Commit | Line | Data |
---|---|---|
c942fddf | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
03bee01d LL |
2 | /* |
3 | * Copyright (C) 2017, Microsoft Corporation. | |
4 | * | |
5 | * Author(s): Long Li <longli@microsoft.com> | |
03bee01d | 6 | */ |
f198186a | 7 | #include <linux/module.h> |
f64b78fd | 8 | #include <linux/highmem.h> |
03bee01d | 9 | #include "smbdirect.h" |
f198186a | 10 | #include "cifs_debug.h" |
b6903bcf | 11 | #include "cifsproto.h" |
35e2cc1b | 12 | #include "smb2proto.h" |
f198186a LL |
13 | |
14 | static struct smbd_response *get_empty_queue_buffer( | |
15 | struct smbd_connection *info); | |
16 | static struct smbd_response *get_receive_buffer( | |
17 | struct smbd_connection *info); | |
18 | static void put_receive_buffer( | |
19 | struct smbd_connection *info, | |
20 | struct smbd_response *response); | |
21 | static int allocate_receive_buffers(struct smbd_connection *info, int num_buf); | |
22 | static void destroy_receive_buffers(struct smbd_connection *info); | |
23 | ||
24 | static void put_empty_packet( | |
25 | struct smbd_connection *info, struct smbd_response *response); | |
26 | static void enqueue_reassembly( | |
27 | struct smbd_connection *info, | |
28 | struct smbd_response *response, int data_length); | |
29 | static struct smbd_response *_get_first_reassembly( | |
30 | struct smbd_connection *info); | |
31 | ||
32 | static int smbd_post_recv( | |
33 | struct smbd_connection *info, | |
34 | struct smbd_response *response); | |
35 | ||
36 | static int smbd_post_send_empty(struct smbd_connection *info); | |
03bee01d | 37 | |
c7398583 LL |
38 | static void destroy_mr_list(struct smbd_connection *info); |
39 | static int allocate_mr_list(struct smbd_connection *info); | |
40 | ||
e5fbdde4 DH |
41 | struct smb_extract_to_rdma { |
42 | struct ib_sge *sge; | |
43 | unsigned int nr_sge; | |
44 | unsigned int max_sge; | |
45 | struct ib_device *device; | |
46 | u32 local_dma_lkey; | |
47 | enum dma_data_direction direction; | |
48 | }; | |
49 | static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len, | |
50 | struct smb_extract_to_rdma *rdma); | |
51 | ||
03bee01d LL |
52 | /* SMBD version number */ |
53 | #define SMBD_V1 0x0100 | |
54 | ||
55 | /* Port numbers for SMBD transport */ | |
56 | #define SMB_PORT 445 | |
57 | #define SMBD_PORT 5445 | |
58 | ||
59 | /* Address lookup and resolve timeout in ms */ | |
60 | #define RDMA_RESOLVE_TIMEOUT 5000 | |
61 | ||
62 | /* SMBD negotiation timeout in seconds */ | |
63 | #define SMBD_NEGOTIATE_TIMEOUT 120 | |
64 | ||
65 | /* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */ | |
66 | #define SMBD_MIN_RECEIVE_SIZE 128 | |
67 | #define SMBD_MIN_FRAGMENTED_SIZE 131072 | |
68 | ||
69 | /* | |
70 | * Default maximum number of RDMA read/write outstanding on this connection | |
71 | * This value is possibly decreased during QP creation on hardware limit | |
72 | */ | |
73 | #define SMBD_CM_RESPONDER_RESOURCES 32 | |
74 | ||
75 | /* Maximum number of retries on data transfer operations */ | |
76 | #define SMBD_CM_RETRY 6 | |
77 | /* No need to retry on Receiver Not Ready since SMBD manages credits */ | |
78 | #define SMBD_CM_RNR_RETRY 0 | |
79 | ||
80 | /* | |
81 | * User configurable initial values per SMBD transport connection | |
82 | * as defined in [MS-SMBD] 3.1.1.1 | |
83 | * Those may change after a SMBD negotiation | |
84 | */ | |
85 | /* The local peer's maximum number of credits to grant to the peer */ | |
86 | int smbd_receive_credit_max = 255; | |
87 | ||
88 | /* The remote peer's credit request of local peer */ | |
89 | int smbd_send_credit_target = 255; | |
90 | ||
91 | /* The maximum single message size can be sent to remote peer */ | |
92 | int smbd_max_send_size = 1364; | |
93 | ||
94 | /* The maximum fragmented upper-layer payload receive size supported */ | |
95 | int smbd_max_fragmented_recv_size = 1024 * 1024; | |
96 | ||
97 | /* The maximum single-message size which can be received */ | |
3c62df55 | 98 | int smbd_max_receive_size = 1364; |
03bee01d LL |
99 | |
100 | /* The timeout to initiate send of a keepalive message on idle */ | |
101 | int smbd_keep_alive_interval = 120; | |
102 | ||
103 | /* | |
104 | * User configurable initial values for RDMA transport | |
105 | * The actual values used may be lower and are limited to hardware capabilities | |
106 | */ | |
d2e81f92 | 107 | /* Default maximum number of pages in a single RDMA write/read */ |
03bee01d LL |
108 | int smbd_max_frmr_depth = 2048; |
109 | ||
110 | /* If payload is less than this byte, use RDMA send/recv not read/write */ | |
111 | int rdma_readwrite_threshold = 4096; | |
f198186a LL |
112 | |
113 | /* Transport logging functions | |
114 | * Logging are defined as classes. They can be OR'ed to define the actual | |
115 | * logging level via module parameter smbd_logging_class | |
116 | * e.g. cifs.smbd_logging_class=0xa0 will log all log_rdma_recv() and | |
117 | * log_rdma_event() | |
118 | */ | |
119 | #define LOG_OUTGOING 0x1 | |
120 | #define LOG_INCOMING 0x2 | |
121 | #define LOG_READ 0x4 | |
122 | #define LOG_WRITE 0x8 | |
123 | #define LOG_RDMA_SEND 0x10 | |
124 | #define LOG_RDMA_RECV 0x20 | |
125 | #define LOG_KEEP_ALIVE 0x40 | |
126 | #define LOG_RDMA_EVENT 0x80 | |
127 | #define LOG_RDMA_MR 0x100 | |
128 | static unsigned int smbd_logging_class; | |
129 | module_param(smbd_logging_class, uint, 0644); | |
130 | MODULE_PARM_DESC(smbd_logging_class, | |
131 | "Logging class for SMBD transport 0x0 to 0x100"); | |
132 | ||
133 | #define ERR 0x0 | |
134 | #define INFO 0x1 | |
135 | static unsigned int smbd_logging_level = ERR; | |
136 | module_param(smbd_logging_level, uint, 0644); | |
137 | MODULE_PARM_DESC(smbd_logging_level, | |
138 | "Logging level for SMBD transport, 0 (default): error, 1: info"); | |
139 | ||
140 | #define log_rdma(level, class, fmt, args...) \ | |
141 | do { \ | |
142 | if (level <= smbd_logging_level || class & smbd_logging_class) \ | |
143 | cifs_dbg(VFS, "%s:%d " fmt, __func__, __LINE__, ##args);\ | |
144 | } while (0) | |
145 | ||
146 | #define log_outgoing(level, fmt, args...) \ | |
147 | log_rdma(level, LOG_OUTGOING, fmt, ##args) | |
148 | #define log_incoming(level, fmt, args...) \ | |
149 | log_rdma(level, LOG_INCOMING, fmt, ##args) | |
150 | #define log_read(level, fmt, args...) log_rdma(level, LOG_READ, fmt, ##args) | |
151 | #define log_write(level, fmt, args...) log_rdma(level, LOG_WRITE, fmt, ##args) | |
152 | #define log_rdma_send(level, fmt, args...) \ | |
153 | log_rdma(level, LOG_RDMA_SEND, fmt, ##args) | |
154 | #define log_rdma_recv(level, fmt, args...) \ | |
155 | log_rdma(level, LOG_RDMA_RECV, fmt, ##args) | |
156 | #define log_keep_alive(level, fmt, args...) \ | |
157 | log_rdma(level, LOG_KEEP_ALIVE, fmt, ##args) | |
158 | #define log_rdma_event(level, fmt, args...) \ | |
159 | log_rdma(level, LOG_RDMA_EVENT, fmt, ##args) | |
160 | #define log_rdma_mr(level, fmt, args...) \ | |
161 | log_rdma(level, LOG_RDMA_MR, fmt, ##args) | |
162 | ||
f198186a LL |
163 | static void smbd_disconnect_rdma_work(struct work_struct *work) |
164 | { | |
165 | struct smbd_connection *info = | |
166 | container_of(work, struct smbd_connection, disconnect_work); | |
167 | ||
168 | if (info->transport_status == SMBD_CONNECTED) { | |
169 | info->transport_status = SMBD_DISCONNECTING; | |
170 | rdma_disconnect(info->id); | |
171 | } | |
172 | } | |
173 | ||
174 | static void smbd_disconnect_rdma_connection(struct smbd_connection *info) | |
175 | { | |
176 | queue_work(info->workqueue, &info->disconnect_work); | |
177 | } | |
178 | ||
179 | /* Upcall from RDMA CM */ | |
180 | static int smbd_conn_upcall( | |
181 | struct rdma_cm_id *id, struct rdma_cm_event *event) | |
182 | { | |
183 | struct smbd_connection *info = id->context; | |
184 | ||
185 | log_rdma_event(INFO, "event=%d status=%d\n", | |
186 | event->event, event->status); | |
187 | ||
188 | switch (event->event) { | |
189 | case RDMA_CM_EVENT_ADDR_RESOLVED: | |
190 | case RDMA_CM_EVENT_ROUTE_RESOLVED: | |
191 | info->ri_rc = 0; | |
192 | complete(&info->ri_done); | |
193 | break; | |
194 | ||
195 | case RDMA_CM_EVENT_ADDR_ERROR: | |
196 | info->ri_rc = -EHOSTUNREACH; | |
197 | complete(&info->ri_done); | |
198 | break; | |
199 | ||
200 | case RDMA_CM_EVENT_ROUTE_ERROR: | |
201 | info->ri_rc = -ENETUNREACH; | |
202 | complete(&info->ri_done); | |
203 | break; | |
204 | ||
205 | case RDMA_CM_EVENT_ESTABLISHED: | |
206 | log_rdma_event(INFO, "connected event=%d\n", event->event); | |
207 | info->transport_status = SMBD_CONNECTED; | |
208 | wake_up_interruptible(&info->conn_wait); | |
209 | break; | |
210 | ||
211 | case RDMA_CM_EVENT_CONNECT_ERROR: | |
212 | case RDMA_CM_EVENT_UNREACHABLE: | |
213 | case RDMA_CM_EVENT_REJECTED: | |
214 | log_rdma_event(INFO, "connecting failed event=%d\n", event->event); | |
215 | info->transport_status = SMBD_DISCONNECTED; | |
216 | wake_up_interruptible(&info->conn_wait); | |
217 | break; | |
218 | ||
219 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | |
220 | case RDMA_CM_EVENT_DISCONNECTED: | |
221 | /* This happenes when we fail the negotiation */ | |
222 | if (info->transport_status == SMBD_NEGOTIATE_FAILED) { | |
223 | info->transport_status = SMBD_DISCONNECTED; | |
224 | wake_up(&info->conn_wait); | |
225 | break; | |
226 | } | |
227 | ||
228 | info->transport_status = SMBD_DISCONNECTED; | |
e8b3bfe9 | 229 | wake_up_interruptible(&info->disconn_wait); |
050b8c37 LL |
230 | wake_up_interruptible(&info->wait_reassembly_queue); |
231 | wake_up_interruptible_all(&info->wait_send_queue); | |
f198186a LL |
232 | break; |
233 | ||
234 | default: | |
235 | break; | |
236 | } | |
237 | ||
238 | return 0; | |
239 | } | |
240 | ||
241 | /* Upcall from RDMA QP */ | |
242 | static void | |
243 | smbd_qp_async_error_upcall(struct ib_event *event, void *context) | |
244 | { | |
245 | struct smbd_connection *info = context; | |
246 | ||
247 | log_rdma_event(ERR, "%s on device %s info %p\n", | |
248 | ib_event_msg(event->event), event->device->name, info); | |
249 | ||
250 | switch (event->event) { | |
251 | case IB_EVENT_CQ_ERR: | |
252 | case IB_EVENT_QP_FATAL: | |
253 | smbd_disconnect_rdma_connection(info); | |
21ac58f4 | 254 | break; |
f198186a LL |
255 | |
256 | default: | |
257 | break; | |
258 | } | |
259 | } | |
260 | ||
261 | static inline void *smbd_request_payload(struct smbd_request *request) | |
262 | { | |
263 | return (void *)request->packet; | |
264 | } | |
265 | ||
266 | static inline void *smbd_response_payload(struct smbd_response *response) | |
267 | { | |
268 | return (void *)response->packet; | |
269 | } | |
270 | ||
271 | /* Called when a RDMA send is done */ | |
272 | static void send_done(struct ib_cq *cq, struct ib_wc *wc) | |
273 | { | |
274 | int i; | |
275 | struct smbd_request *request = | |
276 | container_of(wc->wr_cqe, struct smbd_request, cqe); | |
277 | ||
0350d7a3 | 278 | log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n", |
f198186a LL |
279 | request, wc->status); |
280 | ||
281 | if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { | |
282 | log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n", | |
283 | wc->status, wc->opcode); | |
284 | smbd_disconnect_rdma_connection(request->info); | |
285 | } | |
286 | ||
287 | for (i = 0; i < request->num_sge; i++) | |
288 | ib_dma_unmap_single(request->info->id->device, | |
289 | request->sge[i].addr, | |
290 | request->sge[i].length, | |
291 | DMA_TO_DEVICE); | |
292 | ||
072a14ec LL |
293 | if (atomic_dec_and_test(&request->info->send_pending)) |
294 | wake_up(&request->info->wait_send_pending); | |
295 | ||
3ffbe78a | 296 | wake_up(&request->info->wait_post_send); |
f198186a LL |
297 | |
298 | mempool_free(request, request->info->request_mempool); | |
299 | } | |
300 | ||
301 | static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp) | |
302 | { | |
a0a3036b JP |
303 | log_rdma_event(INFO, "resp message min_version %u max_version %u negotiated_version %u credits_requested %u credits_granted %u status %u max_readwrite_size %u preferred_send_size %u max_receive_size %u max_fragmented_size %u\n", |
304 | resp->min_version, resp->max_version, | |
305 | resp->negotiated_version, resp->credits_requested, | |
306 | resp->credits_granted, resp->status, | |
307 | resp->max_readwrite_size, resp->preferred_send_size, | |
308 | resp->max_receive_size, resp->max_fragmented_size); | |
f198186a LL |
309 | } |
310 | ||
311 | /* | |
312 | * Process a negotiation response message, according to [MS-SMBD]3.1.5.7 | |
313 | * response, packet_length: the negotiation response message | |
314 | * return value: true if negotiation is a success, false if failed | |
315 | */ | |
316 | static bool process_negotiation_response( | |
317 | struct smbd_response *response, int packet_length) | |
318 | { | |
319 | struct smbd_connection *info = response->info; | |
320 | struct smbd_negotiate_resp *packet = smbd_response_payload(response); | |
321 | ||
322 | if (packet_length < sizeof(struct smbd_negotiate_resp)) { | |
323 | log_rdma_event(ERR, | |
324 | "error: packet_length=%d\n", packet_length); | |
325 | return false; | |
326 | } | |
327 | ||
328 | if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) { | |
329 | log_rdma_event(ERR, "error: negotiated_version=%x\n", | |
330 | le16_to_cpu(packet->negotiated_version)); | |
331 | return false; | |
332 | } | |
333 | info->protocol = le16_to_cpu(packet->negotiated_version); | |
334 | ||
335 | if (packet->credits_requested == 0) { | |
336 | log_rdma_event(ERR, "error: credits_requested==0\n"); | |
337 | return false; | |
338 | } | |
339 | info->receive_credit_target = le16_to_cpu(packet->credits_requested); | |
340 | ||
341 | if (packet->credits_granted == 0) { | |
342 | log_rdma_event(ERR, "error: credits_granted==0\n"); | |
343 | return false; | |
344 | } | |
345 | atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted)); | |
346 | ||
347 | atomic_set(&info->receive_credits, 0); | |
348 | ||
349 | if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) { | |
350 | log_rdma_event(ERR, "error: preferred_send_size=%d\n", | |
351 | le32_to_cpu(packet->preferred_send_size)); | |
352 | return false; | |
353 | } | |
354 | info->max_receive_size = le32_to_cpu(packet->preferred_send_size); | |
355 | ||
356 | if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) { | |
357 | log_rdma_event(ERR, "error: max_receive_size=%d\n", | |
358 | le32_to_cpu(packet->max_receive_size)); | |
359 | return false; | |
360 | } | |
361 | info->max_send_size = min_t(int, info->max_send_size, | |
362 | le32_to_cpu(packet->max_receive_size)); | |
363 | ||
364 | if (le32_to_cpu(packet->max_fragmented_size) < | |
365 | SMBD_MIN_FRAGMENTED_SIZE) { | |
366 | log_rdma_event(ERR, "error: max_fragmented_size=%d\n", | |
367 | le32_to_cpu(packet->max_fragmented_size)); | |
368 | return false; | |
369 | } | |
370 | info->max_fragmented_send_size = | |
371 | le32_to_cpu(packet->max_fragmented_size); | |
c7398583 LL |
372 | info->rdma_readwrite_threshold = |
373 | rdma_readwrite_threshold > info->max_fragmented_send_size ? | |
374 | info->max_fragmented_send_size : | |
375 | rdma_readwrite_threshold; | |
376 | ||
377 | ||
378 | info->max_readwrite_size = min_t(u32, | |
379 | le32_to_cpu(packet->max_readwrite_size), | |
380 | info->max_frmr_depth * PAGE_SIZE); | |
381 | info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE; | |
f198186a LL |
382 | |
383 | return true; | |
384 | } | |
385 | ||
f198186a LL |
386 | static void smbd_post_send_credits(struct work_struct *work) |
387 | { | |
388 | int ret = 0; | |
389 | int use_receive_queue = 1; | |
390 | int rc; | |
391 | struct smbd_response *response; | |
392 | struct smbd_connection *info = | |
393 | container_of(work, struct smbd_connection, | |
394 | post_send_credits_work); | |
395 | ||
396 | if (info->transport_status != SMBD_CONNECTED) { | |
397 | wake_up(&info->wait_receive_queues); | |
398 | return; | |
399 | } | |
400 | ||
401 | if (info->receive_credit_target > | |
402 | atomic_read(&info->receive_credits)) { | |
403 | while (true) { | |
404 | if (use_receive_queue) | |
405 | response = get_receive_buffer(info); | |
406 | else | |
407 | response = get_empty_queue_buffer(info); | |
408 | if (!response) { | |
409 | /* now switch to emtpy packet queue */ | |
410 | if (use_receive_queue) { | |
411 | use_receive_queue = 0; | |
412 | continue; | |
413 | } else | |
414 | break; | |
415 | } | |
416 | ||
417 | response->type = SMBD_TRANSFER_DATA; | |
418 | response->first_segment = false; | |
419 | rc = smbd_post_recv(info, response); | |
420 | if (rc) { | |
421 | log_rdma_recv(ERR, | |
422 | "post_recv failed rc=%d\n", rc); | |
423 | put_receive_buffer(info, response); | |
424 | break; | |
425 | } | |
426 | ||
427 | ret++; | |
428 | } | |
429 | } | |
430 | ||
431 | spin_lock(&info->lock_new_credits_offered); | |
432 | info->new_credits_offered += ret; | |
433 | spin_unlock(&info->lock_new_credits_offered); | |
434 | ||
044b541c LL |
435 | /* Promptly send an immediate packet as defined in [MS-SMBD] 3.1.1.1 */ |
436 | info->send_immediate = true; | |
437 | if (atomic_read(&info->receive_credits) < | |
438 | info->receive_credit_target - 1) { | |
439 | if (info->keep_alive_requested == KEEP_ALIVE_PENDING || | |
440 | info->send_immediate) { | |
441 | log_keep_alive(INFO, "send an empty message\n"); | |
442 | smbd_post_send_empty(info); | |
443 | } | |
444 | } | |
f198186a LL |
445 | } |
446 | ||
f198186a LL |
447 | /* Called from softirq, when recv is done */ |
448 | static void recv_done(struct ib_cq *cq, struct ib_wc *wc) | |
449 | { | |
450 | struct smbd_data_transfer *data_transfer; | |
451 | struct smbd_response *response = | |
452 | container_of(wc->wr_cqe, struct smbd_response, cqe); | |
453 | struct smbd_connection *info = response->info; | |
454 | int data_length = 0; | |
455 | ||
0350d7a3 | 456 | log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n", |
a0a3036b JP |
457 | response, response->type, wc->status, wc->opcode, |
458 | wc->byte_len, wc->pkey_index); | |
f198186a LL |
459 | |
460 | if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) { | |
461 | log_rdma_recv(INFO, "wc->status=%d opcode=%d\n", | |
462 | wc->status, wc->opcode); | |
463 | smbd_disconnect_rdma_connection(info); | |
464 | goto error; | |
465 | } | |
466 | ||
467 | ib_dma_sync_single_for_cpu( | |
468 | wc->qp->device, | |
469 | response->sge.addr, | |
470 | response->sge.length, | |
471 | DMA_FROM_DEVICE); | |
472 | ||
473 | switch (response->type) { | |
474 | /* SMBD negotiation response */ | |
475 | case SMBD_NEGOTIATE_RESP: | |
476 | dump_smbd_negotiate_resp(smbd_response_payload(response)); | |
477 | info->full_packet_received = true; | |
478 | info->negotiate_done = | |
479 | process_negotiation_response(response, wc->byte_len); | |
480 | complete(&info->negotiate_completion); | |
481 | break; | |
482 | ||
483 | /* SMBD data transfer packet */ | |
484 | case SMBD_TRANSFER_DATA: | |
485 | data_transfer = smbd_response_payload(response); | |
486 | data_length = le32_to_cpu(data_transfer->data_length); | |
487 | ||
488 | /* | |
489 | * If this is a packet with data playload place the data in | |
490 | * reassembly queue and wake up the reading thread | |
491 | */ | |
492 | if (data_length) { | |
493 | if (info->full_packet_received) | |
494 | response->first_segment = true; | |
495 | ||
496 | if (le32_to_cpu(data_transfer->remaining_data_length)) | |
497 | info->full_packet_received = false; | |
498 | else | |
499 | info->full_packet_received = true; | |
500 | ||
501 | enqueue_reassembly( | |
502 | info, | |
503 | response, | |
504 | data_length); | |
505 | } else | |
506 | put_empty_packet(info, response); | |
507 | ||
508 | if (data_length) | |
509 | wake_up_interruptible(&info->wait_reassembly_queue); | |
510 | ||
511 | atomic_dec(&info->receive_credits); | |
512 | info->receive_credit_target = | |
513 | le16_to_cpu(data_transfer->credits_requested); | |
4ebb8795 LL |
514 | if (le16_to_cpu(data_transfer->credits_granted)) { |
515 | atomic_add(le16_to_cpu(data_transfer->credits_granted), | |
516 | &info->send_credits); | |
517 | /* | |
518 | * We have new send credits granted from remote peer | |
519 | * If any sender is waiting for credits, unblock it | |
520 | */ | |
521 | wake_up_interruptible(&info->wait_send_queue); | |
522 | } | |
f198186a | 523 | |
a0a3036b JP |
524 | log_incoming(INFO, "data flags %d data_offset %d data_length %d remaining_data_length %d\n", |
525 | le16_to_cpu(data_transfer->flags), | |
526 | le32_to_cpu(data_transfer->data_offset), | |
527 | le32_to_cpu(data_transfer->data_length), | |
528 | le32_to_cpu(data_transfer->remaining_data_length)); | |
f198186a LL |
529 | |
530 | /* Send a KEEP_ALIVE response right away if requested */ | |
531 | info->keep_alive_requested = KEEP_ALIVE_NONE; | |
532 | if (le16_to_cpu(data_transfer->flags) & | |
533 | SMB_DIRECT_RESPONSE_REQUESTED) { | |
534 | info->keep_alive_requested = KEEP_ALIVE_PENDING; | |
535 | } | |
536 | ||
f198186a LL |
537 | return; |
538 | ||
539 | default: | |
540 | log_rdma_recv(ERR, | |
541 | "unexpected response type=%d\n", response->type); | |
542 | } | |
543 | ||
544 | error: | |
545 | put_receive_buffer(info, response); | |
546 | } | |
547 | ||
548 | static struct rdma_cm_id *smbd_create_id( | |
549 | struct smbd_connection *info, | |
550 | struct sockaddr *dstaddr, int port) | |
551 | { | |
552 | struct rdma_cm_id *id; | |
553 | int rc; | |
554 | __be16 *sport; | |
555 | ||
556 | id = rdma_create_id(&init_net, smbd_conn_upcall, info, | |
557 | RDMA_PS_TCP, IB_QPT_RC); | |
558 | if (IS_ERR(id)) { | |
559 | rc = PTR_ERR(id); | |
560 | log_rdma_event(ERR, "rdma_create_id() failed %i\n", rc); | |
561 | return id; | |
562 | } | |
563 | ||
564 | if (dstaddr->sa_family == AF_INET6) | |
565 | sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port; | |
566 | else | |
567 | sport = &((struct sockaddr_in *)dstaddr)->sin_port; | |
568 | ||
569 | *sport = htons(port); | |
570 | ||
571 | init_completion(&info->ri_done); | |
572 | info->ri_rc = -ETIMEDOUT; | |
573 | ||
574 | rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)dstaddr, | |
575 | RDMA_RESOLVE_TIMEOUT); | |
576 | if (rc) { | |
577 | log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc); | |
578 | goto out; | |
579 | } | |
0555b221 | 580 | rc = wait_for_completion_interruptible_timeout( |
f198186a | 581 | &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); |
0555b221 SF |
582 | /* e.g. if interrupted returns -ERESTARTSYS */ |
583 | if (rc < 0) { | |
584 | log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc); | |
585 | goto out; | |
586 | } | |
f198186a LL |
587 | rc = info->ri_rc; |
588 | if (rc) { | |
589 | log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc); | |
590 | goto out; | |
591 | } | |
592 | ||
593 | info->ri_rc = -ETIMEDOUT; | |
594 | rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); | |
595 | if (rc) { | |
596 | log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc); | |
597 | goto out; | |
598 | } | |
0555b221 | 599 | rc = wait_for_completion_interruptible_timeout( |
f198186a | 600 | &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); |
0555b221 SF |
601 | /* e.g. if interrupted returns -ERESTARTSYS */ |
602 | if (rc < 0) { | |
603 | log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc); | |
604 | goto out; | |
605 | } | |
f198186a LL |
606 | rc = info->ri_rc; |
607 | if (rc) { | |
608 | log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc); | |
609 | goto out; | |
610 | } | |
611 | ||
612 | return id; | |
613 | ||
614 | out: | |
615 | rdma_destroy_id(id); | |
616 | return ERR_PTR(rc); | |
617 | } | |
618 | ||
619 | /* | |
620 | * Test if FRWR (Fast Registration Work Requests) is supported on the device | |
621 | * This implementation requries FRWR on RDMA read/write | |
622 | * return value: true if it is supported | |
623 | */ | |
624 | static bool frwr_is_supported(struct ib_device_attr *attrs) | |
625 | { | |
626 | if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) | |
627 | return false; | |
628 | if (attrs->max_fast_reg_page_list_len == 0) | |
629 | return false; | |
630 | return true; | |
631 | } | |
632 | ||
633 | static int smbd_ia_open( | |
634 | struct smbd_connection *info, | |
635 | struct sockaddr *dstaddr, int port) | |
636 | { | |
637 | int rc; | |
638 | ||
639 | info->id = smbd_create_id(info, dstaddr, port); | |
640 | if (IS_ERR(info->id)) { | |
641 | rc = PTR_ERR(info->id); | |
642 | goto out1; | |
643 | } | |
644 | ||
645 | if (!frwr_is_supported(&info->id->device->attrs)) { | |
a0a3036b JP |
646 | log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n"); |
647 | log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n", | |
648 | info->id->device->attrs.device_cap_flags, | |
649 | info->id->device->attrs.max_fast_reg_page_list_len); | |
f198186a LL |
650 | rc = -EPROTONOSUPPORT; |
651 | goto out2; | |
652 | } | |
c7398583 LL |
653 | info->max_frmr_depth = min_t(int, |
654 | smbd_max_frmr_depth, | |
655 | info->id->device->attrs.max_fast_reg_page_list_len); | |
656 | info->mr_type = IB_MR_TYPE_MEM_REG; | |
e945c653 | 657 | if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) |
c7398583 | 658 | info->mr_type = IB_MR_TYPE_SG_GAPS; |
f198186a LL |
659 | |
660 | info->pd = ib_alloc_pd(info->id->device, 0); | |
661 | if (IS_ERR(info->pd)) { | |
662 | rc = PTR_ERR(info->pd); | |
663 | log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc); | |
664 | goto out2; | |
665 | } | |
666 | ||
667 | return 0; | |
668 | ||
669 | out2: | |
670 | rdma_destroy_id(info->id); | |
671 | info->id = NULL; | |
672 | ||
673 | out1: | |
674 | return rc; | |
675 | } | |
676 | ||
677 | /* | |
678 | * Send a negotiation request message to the peer | |
679 | * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3 | |
680 | * After negotiation, the transport is connected and ready for | |
681 | * carrying upper layer SMB payload | |
682 | */ | |
683 | static int smbd_post_send_negotiate_req(struct smbd_connection *info) | |
684 | { | |
73930595 | 685 | struct ib_send_wr send_wr; |
f198186a LL |
686 | int rc = -ENOMEM; |
687 | struct smbd_request *request; | |
688 | struct smbd_negotiate_req *packet; | |
689 | ||
690 | request = mempool_alloc(info->request_mempool, GFP_KERNEL); | |
691 | if (!request) | |
692 | return rc; | |
693 | ||
694 | request->info = info; | |
695 | ||
696 | packet = smbd_request_payload(request); | |
697 | packet->min_version = cpu_to_le16(SMBD_V1); | |
698 | packet->max_version = cpu_to_le16(SMBD_V1); | |
699 | packet->reserved = 0; | |
700 | packet->credits_requested = cpu_to_le16(info->send_credit_target); | |
701 | packet->preferred_send_size = cpu_to_le32(info->max_send_size); | |
702 | packet->max_receive_size = cpu_to_le32(info->max_receive_size); | |
703 | packet->max_fragmented_size = | |
704 | cpu_to_le32(info->max_fragmented_recv_size); | |
705 | ||
706 | request->num_sge = 1; | |
707 | request->sge[0].addr = ib_dma_map_single( | |
708 | info->id->device, (void *)packet, | |
709 | sizeof(*packet), DMA_TO_DEVICE); | |
710 | if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { | |
711 | rc = -EIO; | |
712 | goto dma_mapping_failed; | |
713 | } | |
714 | ||
715 | request->sge[0].length = sizeof(*packet); | |
716 | request->sge[0].lkey = info->pd->local_dma_lkey; | |
717 | ||
718 | ib_dma_sync_single_for_device( | |
719 | info->id->device, request->sge[0].addr, | |
720 | request->sge[0].length, DMA_TO_DEVICE); | |
721 | ||
722 | request->cqe.done = send_done; | |
723 | ||
724 | send_wr.next = NULL; | |
725 | send_wr.wr_cqe = &request->cqe; | |
726 | send_wr.sg_list = request->sge; | |
727 | send_wr.num_sge = request->num_sge; | |
728 | send_wr.opcode = IB_WR_SEND; | |
729 | send_wr.send_flags = IB_SEND_SIGNALED; | |
730 | ||
0350d7a3 | 731 | log_rdma_send(INFO, "sge addr=0x%llx length=%u lkey=0x%x\n", |
f198186a LL |
732 | request->sge[0].addr, |
733 | request->sge[0].length, request->sge[0].lkey); | |
734 | ||
f198186a | 735 | atomic_inc(&info->send_pending); |
73930595 | 736 | rc = ib_post_send(info->id->qp, &send_wr, NULL); |
f198186a LL |
737 | if (!rc) |
738 | return 0; | |
739 | ||
740 | /* if we reach here, post send failed */ | |
741 | log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); | |
742 | atomic_dec(&info->send_pending); | |
743 | ib_dma_unmap_single(info->id->device, request->sge[0].addr, | |
744 | request->sge[0].length, DMA_TO_DEVICE); | |
745 | ||
21a4e14a LL |
746 | smbd_disconnect_rdma_connection(info); |
747 | ||
f198186a LL |
748 | dma_mapping_failed: |
749 | mempool_free(request, info->request_mempool); | |
750 | return rc; | |
751 | } | |
752 | ||
753 | /* | |
754 | * Extend the credits to remote peer | |
755 | * This implements [MS-SMBD] 3.1.5.9 | |
756 | * The idea is that we should extend credits to remote peer as quickly as | |
757 | * it's allowed, to maintain data flow. We allocate as much receive | |
758 | * buffer as possible, and extend the receive credits to remote peer | |
759 | * return value: the new credtis being granted. | |
760 | */ | |
761 | static int manage_credits_prior_sending(struct smbd_connection *info) | |
762 | { | |
763 | int new_credits; | |
764 | ||
765 | spin_lock(&info->lock_new_credits_offered); | |
766 | new_credits = info->new_credits_offered; | |
767 | info->new_credits_offered = 0; | |
768 | spin_unlock(&info->lock_new_credits_offered); | |
769 | ||
770 | return new_credits; | |
771 | } | |
772 | ||
773 | /* | |
774 | * Check if we need to send a KEEP_ALIVE message | |
775 | * The idle connection timer triggers a KEEP_ALIVE message when expires | |
776 | * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send | |
777 | * back a response. | |
778 | * return value: | |
779 | * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set | |
780 | * 0: otherwise | |
781 | */ | |
782 | static int manage_keep_alive_before_sending(struct smbd_connection *info) | |
783 | { | |
784 | if (info->keep_alive_requested == KEEP_ALIVE_PENDING) { | |
785 | info->keep_alive_requested = KEEP_ALIVE_SENT; | |
786 | return 1; | |
787 | } | |
788 | return 0; | |
789 | } | |
790 | ||
f1b7b862 LL |
791 | /* Post the send request */ |
792 | static int smbd_post_send(struct smbd_connection *info, | |
793 | struct smbd_request *request) | |
794 | { | |
795 | struct ib_send_wr send_wr; | |
796 | int rc, i; | |
797 | ||
798 | for (i = 0; i < request->num_sge; i++) { | |
799 | log_rdma_send(INFO, | |
0350d7a3 | 800 | "rdma_request sge[%d] addr=0x%llx length=%u\n", |
f1b7b862 LL |
801 | i, request->sge[i].addr, request->sge[i].length); |
802 | ib_dma_sync_single_for_device( | |
803 | info->id->device, | |
804 | request->sge[i].addr, | |
805 | request->sge[i].length, | |
806 | DMA_TO_DEVICE); | |
807 | } | |
808 | ||
809 | request->cqe.done = send_done; | |
810 | ||
811 | send_wr.next = NULL; | |
812 | send_wr.wr_cqe = &request->cqe; | |
813 | send_wr.sg_list = request->sge; | |
814 | send_wr.num_sge = request->num_sge; | |
815 | send_wr.opcode = IB_WR_SEND; | |
816 | send_wr.send_flags = IB_SEND_SIGNALED; | |
817 | ||
818 | rc = ib_post_send(info->id->qp, &send_wr, NULL); | |
819 | if (rc) { | |
820 | log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); | |
821 | smbd_disconnect_rdma_connection(info); | |
822 | rc = -EAGAIN; | |
823 | } else | |
824 | /* Reset timer for idle connection after packet is sent */ | |
825 | mod_delayed_work(info->workqueue, &info->idle_timer_work, | |
826 | info->keep_alive_interval*HZ); | |
827 | ||
828 | return rc; | |
829 | } | |
830 | ||
3d78fe73 DH |
831 | static int smbd_post_send_iter(struct smbd_connection *info, |
832 | struct iov_iter *iter, | |
833 | int *_remaining_data_length) | |
f198186a | 834 | { |
f1b7b862 LL |
835 | int i, rc; |
836 | int header_length; | |
3d78fe73 | 837 | int data_length; |
f198186a LL |
838 | struct smbd_request *request; |
839 | struct smbd_data_transfer *packet; | |
4bd3e4c7 | 840 | int new_credits = 0; |
f198186a | 841 | |
f1b7b862 | 842 | wait_credit: |
f198186a LL |
843 | /* Wait for send credits. A SMBD packet needs one credit */ |
844 | rc = wait_event_interruptible(info->wait_send_queue, | |
845 | atomic_read(&info->send_credits) > 0 || | |
846 | info->transport_status != SMBD_CONNECTED); | |
847 | if (rc) | |
f1b7b862 | 848 | goto err_wait_credit; |
f198186a LL |
849 | |
850 | if (info->transport_status != SMBD_CONNECTED) { | |
f1b7b862 LL |
851 | log_outgoing(ERR, "disconnected not sending on wait_credit\n"); |
852 | rc = -EAGAIN; | |
853 | goto err_wait_credit; | |
854 | } | |
855 | if (unlikely(atomic_dec_return(&info->send_credits) < 0)) { | |
856 | atomic_inc(&info->send_credits); | |
857 | goto wait_credit; | |
858 | } | |
859 | ||
860 | wait_send_queue: | |
861 | wait_event(info->wait_post_send, | |
862 | atomic_read(&info->send_pending) < info->send_credit_target || | |
863 | info->transport_status != SMBD_CONNECTED); | |
864 | ||
865 | if (info->transport_status != SMBD_CONNECTED) { | |
866 | log_outgoing(ERR, "disconnected not sending on wait_send_queue\n"); | |
867 | rc = -EAGAIN; | |
868 | goto err_wait_send_queue; | |
869 | } | |
870 | ||
871 | if (unlikely(atomic_inc_return(&info->send_pending) > | |
872 | info->send_credit_target)) { | |
873 | atomic_dec(&info->send_pending); | |
874 | goto wait_send_queue; | |
f198186a | 875 | } |
f198186a LL |
876 | |
877 | request = mempool_alloc(info->request_mempool, GFP_KERNEL); | |
878 | if (!request) { | |
879 | rc = -ENOMEM; | |
d4e5160d | 880 | goto err_alloc; |
f198186a LL |
881 | } |
882 | ||
883 | request->info = info; | |
3d78fe73 DH |
884 | memset(request->sge, 0, sizeof(request->sge)); |
885 | ||
886 | /* Fill in the data payload to find out how much data we can add */ | |
887 | if (iter) { | |
888 | struct smb_extract_to_rdma extract = { | |
889 | .nr_sge = 1, | |
890 | .max_sge = SMBDIRECT_MAX_SEND_SGE, | |
891 | .sge = request->sge, | |
892 | .device = info->id->device, | |
893 | .local_dma_lkey = info->pd->local_dma_lkey, | |
894 | .direction = DMA_TO_DEVICE, | |
895 | }; | |
896 | ||
897 | rc = smb_extract_iter_to_rdma(iter, *_remaining_data_length, | |
898 | &extract); | |
899 | if (rc < 0) | |
900 | goto err_dma; | |
901 | data_length = rc; | |
902 | request->num_sge = extract.nr_sge; | |
903 | *_remaining_data_length -= data_length; | |
904 | } else { | |
905 | data_length = 0; | |
906 | request->num_sge = 1; | |
907 | } | |
f198186a LL |
908 | |
909 | /* Fill in the packet header */ | |
910 | packet = smbd_request_payload(request); | |
911 | packet->credits_requested = cpu_to_le16(info->send_credit_target); | |
d4e5160d LL |
912 | |
913 | new_credits = manage_credits_prior_sending(info); | |
914 | atomic_add(new_credits, &info->receive_credits); | |
915 | packet->credits_granted = cpu_to_le16(new_credits); | |
916 | ||
f198186a LL |
917 | info->send_immediate = false; |
918 | ||
919 | packet->flags = 0; | |
920 | if (manage_keep_alive_before_sending(info)) | |
921 | packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED); | |
922 | ||
923 | packet->reserved = 0; | |
f1b7b862 | 924 | if (!data_length) |
f198186a LL |
925 | packet->data_offset = 0; |
926 | else | |
927 | packet->data_offset = cpu_to_le32(24); | |
f1b7b862 | 928 | packet->data_length = cpu_to_le32(data_length); |
3d78fe73 | 929 | packet->remaining_data_length = cpu_to_le32(*_remaining_data_length); |
f198186a LL |
930 | packet->padding = 0; |
931 | ||
a0a3036b JP |
932 | log_outgoing(INFO, "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n", |
933 | le16_to_cpu(packet->credits_requested), | |
934 | le16_to_cpu(packet->credits_granted), | |
935 | le32_to_cpu(packet->data_offset), | |
936 | le32_to_cpu(packet->data_length), | |
937 | le32_to_cpu(packet->remaining_data_length)); | |
f198186a LL |
938 | |
939 | /* Map the packet to DMA */ | |
940 | header_length = sizeof(struct smbd_data_transfer); | |
941 | /* If this is a packet without payload, don't send padding */ | |
f1b7b862 | 942 | if (!data_length) |
f198186a LL |
943 | header_length = offsetof(struct smbd_data_transfer, padding); |
944 | ||
f198186a LL |
945 | request->sge[0].addr = ib_dma_map_single(info->id->device, |
946 | (void *)packet, | |
947 | header_length, | |
7f46d23e | 948 | DMA_TO_DEVICE); |
f198186a | 949 | if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { |
f198186a | 950 | rc = -EIO; |
f1b7b862 | 951 | request->sge[0].addr = 0; |
d4e5160d | 952 | goto err_dma; |
f198186a LL |
953 | } |
954 | ||
955 | request->sge[0].length = header_length; | |
956 | request->sge[0].lkey = info->pd->local_dma_lkey; | |
957 | ||
072a14ec | 958 | rc = smbd_post_send(info, request); |
f198186a LL |
959 | if (!rc) |
960 | return 0; | |
961 | ||
f1b7b862 LL |
962 | err_dma: |
963 | for (i = 0; i < request->num_sge; i++) | |
f198186a LL |
964 | if (request->sge[i].addr) |
965 | ib_dma_unmap_single(info->id->device, | |
966 | request->sge[i].addr, | |
967 | request->sge[i].length, | |
968 | DMA_TO_DEVICE); | |
f1b7b862 LL |
969 | mempool_free(request, info->request_mempool); |
970 | ||
971 | /* roll back receive credits and credits to be offered */ | |
972 | spin_lock(&info->lock_new_credits_offered); | |
973 | info->new_credits_offered += new_credits; | |
974 | spin_unlock(&info->lock_new_credits_offered); | |
975 | atomic_sub(new_credits, &info->receive_credits); | |
976 | ||
977 | err_alloc: | |
978 | if (atomic_dec_and_test(&info->send_pending)) | |
979 | wake_up(&info->wait_send_pending); | |
980 | ||
981 | err_wait_send_queue: | |
982 | /* roll back send credits and pending */ | |
983 | atomic_inc(&info->send_credits); | |
984 | ||
985 | err_wait_credit: | |
f198186a LL |
986 | return rc; |
987 | } | |
988 | ||
989 | /* | |
990 | * Send an empty message | |
991 | * Empty message is used to extend credits to peer to for keep live | |
992 | * while there is no upper layer payload to send at the time | |
993 | */ | |
994 | static int smbd_post_send_empty(struct smbd_connection *info) | |
995 | { | |
3d78fe73 DH |
996 | int remaining_data_length = 0; |
997 | ||
f198186a | 998 | info->count_send_empty++; |
3d78fe73 | 999 | return smbd_post_send_iter(info, NULL, &remaining_data_length); |
f198186a LL |
1000 | } |
1001 | ||
1002 | /* | |
1003 | * Post a receive request to the transport | |
1004 | * The remote peer can only send data when a receive request is posted | |
1005 | * The interaction is controlled by send/receive credit system | |
1006 | */ | |
1007 | static int smbd_post_recv( | |
1008 | struct smbd_connection *info, struct smbd_response *response) | |
1009 | { | |
73930595 | 1010 | struct ib_recv_wr recv_wr; |
f198186a LL |
1011 | int rc = -EIO; |
1012 | ||
1013 | response->sge.addr = ib_dma_map_single( | |
1014 | info->id->device, response->packet, | |
1015 | info->max_receive_size, DMA_FROM_DEVICE); | |
1016 | if (ib_dma_mapping_error(info->id->device, response->sge.addr)) | |
1017 | return rc; | |
1018 | ||
1019 | response->sge.length = info->max_receive_size; | |
1020 | response->sge.lkey = info->pd->local_dma_lkey; | |
1021 | ||
1022 | response->cqe.done = recv_done; | |
1023 | ||
1024 | recv_wr.wr_cqe = &response->cqe; | |
1025 | recv_wr.next = NULL; | |
1026 | recv_wr.sg_list = &response->sge; | |
1027 | recv_wr.num_sge = 1; | |
1028 | ||
73930595 | 1029 | rc = ib_post_recv(info->id->qp, &recv_wr, NULL); |
f198186a LL |
1030 | if (rc) { |
1031 | ib_dma_unmap_single(info->id->device, response->sge.addr, | |
1032 | response->sge.length, DMA_FROM_DEVICE); | |
21a4e14a | 1033 | smbd_disconnect_rdma_connection(info); |
f198186a LL |
1034 | log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc); |
1035 | } | |
1036 | ||
1037 | return rc; | |
1038 | } | |
1039 | ||
1040 | /* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */ | |
1041 | static int smbd_negotiate(struct smbd_connection *info) | |
1042 | { | |
1043 | int rc; | |
1044 | struct smbd_response *response = get_receive_buffer(info); | |
1045 | ||
1046 | response->type = SMBD_NEGOTIATE_RESP; | |
1047 | rc = smbd_post_recv(info, response); | |
0350d7a3 | 1048 | log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n", |
a0a3036b JP |
1049 | rc, response->sge.addr, |
1050 | response->sge.length, response->sge.lkey); | |
f198186a LL |
1051 | if (rc) |
1052 | return rc; | |
1053 | ||
1054 | init_completion(&info->negotiate_completion); | |
1055 | info->negotiate_done = false; | |
1056 | rc = smbd_post_send_negotiate_req(info); | |
1057 | if (rc) | |
1058 | return rc; | |
1059 | ||
1060 | rc = wait_for_completion_interruptible_timeout( | |
1061 | &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ); | |
1062 | log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc); | |
1063 | ||
1064 | if (info->negotiate_done) | |
1065 | return 0; | |
1066 | ||
1067 | if (rc == 0) | |
1068 | rc = -ETIMEDOUT; | |
1069 | else if (rc == -ERESTARTSYS) | |
1070 | rc = -EINTR; | |
1071 | else | |
1072 | rc = -ENOTCONN; | |
1073 | ||
1074 | return rc; | |
1075 | } | |
1076 | ||
1077 | static void put_empty_packet( | |
1078 | struct smbd_connection *info, struct smbd_response *response) | |
1079 | { | |
1080 | spin_lock(&info->empty_packet_queue_lock); | |
1081 | list_add_tail(&response->list, &info->empty_packet_queue); | |
1082 | info->count_empty_packet_queue++; | |
1083 | spin_unlock(&info->empty_packet_queue_lock); | |
1084 | ||
1085 | queue_work(info->workqueue, &info->post_send_credits_work); | |
1086 | } | |
1087 | ||
1088 | /* | |
1089 | * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1 | |
1090 | * This is a queue for reassembling upper layer payload and present to upper | |
1091 | * layer. All the inncoming payload go to the reassembly queue, regardless of | |
1092 | * if reassembly is required. The uuper layer code reads from the queue for all | |
1093 | * incoming payloads. | |
1094 | * Put a received packet to the reassembly queue | |
1095 | * response: the packet received | |
1096 | * data_length: the size of payload in this packet | |
1097 | */ | |
1098 | static void enqueue_reassembly( | |
1099 | struct smbd_connection *info, | |
1100 | struct smbd_response *response, | |
1101 | int data_length) | |
1102 | { | |
1103 | spin_lock(&info->reassembly_queue_lock); | |
1104 | list_add_tail(&response->list, &info->reassembly_queue); | |
1105 | info->reassembly_queue_length++; | |
1106 | /* | |
1107 | * Make sure reassembly_data_length is updated after list and | |
1108 | * reassembly_queue_length are updated. On the dequeue side | |
1109 | * reassembly_data_length is checked without a lock to determine | |
1110 | * if reassembly_queue_length and list is up to date | |
1111 | */ | |
1112 | virt_wmb(); | |
1113 | info->reassembly_data_length += data_length; | |
1114 | spin_unlock(&info->reassembly_queue_lock); | |
1115 | info->count_reassembly_queue++; | |
1116 | info->count_enqueue_reassembly_queue++; | |
1117 | } | |
1118 | ||
1119 | /* | |
1120 | * Get the first entry at the front of reassembly queue | |
1121 | * Caller is responsible for locking | |
1122 | * return value: the first entry if any, NULL if queue is empty | |
1123 | */ | |
1124 | static struct smbd_response *_get_first_reassembly(struct smbd_connection *info) | |
1125 | { | |
1126 | struct smbd_response *ret = NULL; | |
1127 | ||
1128 | if (!list_empty(&info->reassembly_queue)) { | |
1129 | ret = list_first_entry( | |
1130 | &info->reassembly_queue, | |
1131 | struct smbd_response, list); | |
1132 | } | |
1133 | return ret; | |
1134 | } | |
1135 | ||
1136 | static struct smbd_response *get_empty_queue_buffer( | |
1137 | struct smbd_connection *info) | |
1138 | { | |
1139 | struct smbd_response *ret = NULL; | |
1140 | unsigned long flags; | |
1141 | ||
1142 | spin_lock_irqsave(&info->empty_packet_queue_lock, flags); | |
1143 | if (!list_empty(&info->empty_packet_queue)) { | |
1144 | ret = list_first_entry( | |
1145 | &info->empty_packet_queue, | |
1146 | struct smbd_response, list); | |
1147 | list_del(&ret->list); | |
1148 | info->count_empty_packet_queue--; | |
1149 | } | |
1150 | spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags); | |
1151 | ||
1152 | return ret; | |
1153 | } | |
1154 | ||
1155 | /* | |
1156 | * Get a receive buffer | |
1157 | * For each remote send, we need to post a receive. The receive buffers are | |
1158 | * pre-allocated in advance. | |
1159 | * return value: the receive buffer, NULL if none is available | |
1160 | */ | |
1161 | static struct smbd_response *get_receive_buffer(struct smbd_connection *info) | |
1162 | { | |
1163 | struct smbd_response *ret = NULL; | |
1164 | unsigned long flags; | |
1165 | ||
1166 | spin_lock_irqsave(&info->receive_queue_lock, flags); | |
1167 | if (!list_empty(&info->receive_queue)) { | |
1168 | ret = list_first_entry( | |
1169 | &info->receive_queue, | |
1170 | struct smbd_response, list); | |
1171 | list_del(&ret->list); | |
1172 | info->count_receive_queue--; | |
1173 | info->count_get_receive_buffer++; | |
1174 | } | |
1175 | spin_unlock_irqrestore(&info->receive_queue_lock, flags); | |
1176 | ||
1177 | return ret; | |
1178 | } | |
1179 | ||
1180 | /* | |
1181 | * Return a receive buffer | |
1182 | * Upon returning of a receive buffer, we can post new receive and extend | |
1183 | * more receive credits to remote peer. This is done immediately after a | |
1184 | * receive buffer is returned. | |
1185 | */ | |
1186 | static void put_receive_buffer( | |
1187 | struct smbd_connection *info, struct smbd_response *response) | |
1188 | { | |
1189 | unsigned long flags; | |
1190 | ||
1191 | ib_dma_unmap_single(info->id->device, response->sge.addr, | |
1192 | response->sge.length, DMA_FROM_DEVICE); | |
1193 | ||
1194 | spin_lock_irqsave(&info->receive_queue_lock, flags); | |
1195 | list_add_tail(&response->list, &info->receive_queue); | |
1196 | info->count_receive_queue++; | |
1197 | info->count_put_receive_buffer++; | |
1198 | spin_unlock_irqrestore(&info->receive_queue_lock, flags); | |
1199 | ||
1200 | queue_work(info->workqueue, &info->post_send_credits_work); | |
1201 | } | |
1202 | ||
1203 | /* Preallocate all receive buffer on transport establishment */ | |
1204 | static int allocate_receive_buffers(struct smbd_connection *info, int num_buf) | |
1205 | { | |
1206 | int i; | |
1207 | struct smbd_response *response; | |
1208 | ||
1209 | INIT_LIST_HEAD(&info->reassembly_queue); | |
1210 | spin_lock_init(&info->reassembly_queue_lock); | |
1211 | info->reassembly_data_length = 0; | |
1212 | info->reassembly_queue_length = 0; | |
1213 | ||
1214 | INIT_LIST_HEAD(&info->receive_queue); | |
1215 | spin_lock_init(&info->receive_queue_lock); | |
1216 | info->count_receive_queue = 0; | |
1217 | ||
1218 | INIT_LIST_HEAD(&info->empty_packet_queue); | |
1219 | spin_lock_init(&info->empty_packet_queue_lock); | |
1220 | info->count_empty_packet_queue = 0; | |
1221 | ||
1222 | init_waitqueue_head(&info->wait_receive_queues); | |
1223 | ||
1224 | for (i = 0; i < num_buf; i++) { | |
1225 | response = mempool_alloc(info->response_mempool, GFP_KERNEL); | |
1226 | if (!response) | |
1227 | goto allocate_failed; | |
1228 | ||
1229 | response->info = info; | |
1230 | list_add_tail(&response->list, &info->receive_queue); | |
1231 | info->count_receive_queue++; | |
1232 | } | |
1233 | ||
1234 | return 0; | |
1235 | ||
1236 | allocate_failed: | |
1237 | while (!list_empty(&info->receive_queue)) { | |
1238 | response = list_first_entry( | |
1239 | &info->receive_queue, | |
1240 | struct smbd_response, list); | |
1241 | list_del(&response->list); | |
1242 | info->count_receive_queue--; | |
1243 | ||
1244 | mempool_free(response, info->response_mempool); | |
1245 | } | |
1246 | return -ENOMEM; | |
1247 | } | |
1248 | ||
1249 | static void destroy_receive_buffers(struct smbd_connection *info) | |
1250 | { | |
1251 | struct smbd_response *response; | |
1252 | ||
1253 | while ((response = get_receive_buffer(info))) | |
1254 | mempool_free(response, info->response_mempool); | |
1255 | ||
1256 | while ((response = get_empty_queue_buffer(info))) | |
1257 | mempool_free(response, info->response_mempool); | |
1258 | } | |
1259 | ||
f198186a LL |
1260 | /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */ |
1261 | static void idle_connection_timer(struct work_struct *work) | |
1262 | { | |
1263 | struct smbd_connection *info = container_of( | |
1264 | work, struct smbd_connection, | |
1265 | idle_timer_work.work); | |
1266 | ||
1267 | if (info->keep_alive_requested != KEEP_ALIVE_NONE) { | |
1268 | log_keep_alive(ERR, | |
1269 | "error status info->keep_alive_requested=%d\n", | |
1270 | info->keep_alive_requested); | |
1271 | smbd_disconnect_rdma_connection(info); | |
1272 | return; | |
1273 | } | |
1274 | ||
1275 | log_keep_alive(INFO, "about to send an empty idle message\n"); | |
1276 | smbd_post_send_empty(info); | |
1277 | ||
1278 | /* Setup the next idle timeout work */ | |
1279 | queue_delayed_work(info->workqueue, &info->idle_timer_work, | |
1280 | info->keep_alive_interval*HZ); | |
1281 | } | |
1282 | ||
050b8c37 LL |
1283 | /* |
1284 | * Destroy the transport and related RDMA and memory resources | |
1285 | * Need to go through all the pending counters and make sure on one is using | |
1286 | * the transport while it is destroyed | |
1287 | */ | |
1288 | void smbd_destroy(struct TCP_Server_Info *server) | |
8ef130f9 | 1289 | { |
050b8c37 LL |
1290 | struct smbd_connection *info = server->smbd_conn; |
1291 | struct smbd_response *response; | |
1292 | unsigned long flags; | |
1293 | ||
1294 | if (!info) { | |
1295 | log_rdma_event(INFO, "rdma session already destroyed\n"); | |
1296 | return; | |
1297 | } | |
1298 | ||
8ef130f9 | 1299 | log_rdma_event(INFO, "destroying rdma session\n"); |
050b8c37 LL |
1300 | if (info->transport_status != SMBD_DISCONNECTED) { |
1301 | rdma_disconnect(server->smbd_conn->id); | |
1302 | log_rdma_event(INFO, "wait for transport being disconnected\n"); | |
e8b3bfe9 | 1303 | wait_event_interruptible( |
050b8c37 LL |
1304 | info->disconn_wait, |
1305 | info->transport_status == SMBD_DISCONNECTED); | |
1306 | } | |
8ef130f9 | 1307 | |
050b8c37 LL |
1308 | log_rdma_event(INFO, "destroying qp\n"); |
1309 | ib_drain_qp(info->id->qp); | |
1310 | rdma_destroy_qp(info->id); | |
1311 | ||
1312 | log_rdma_event(INFO, "cancelling idle timer\n"); | |
1313 | cancel_delayed_work_sync(&info->idle_timer_work); | |
8ef130f9 | 1314 | |
050b8c37 LL |
1315 | log_rdma_event(INFO, "wait for all send posted to IB to finish\n"); |
1316 | wait_event(info->wait_send_pending, | |
1317 | atomic_read(&info->send_pending) == 0); | |
050b8c37 | 1318 | |
fb64f7f1 | 1319 | /* It's not possible for upper layer to get to reassembly */ |
050b8c37 LL |
1320 | log_rdma_event(INFO, "drain the reassembly queue\n"); |
1321 | do { | |
1322 | spin_lock_irqsave(&info->reassembly_queue_lock, flags); | |
1323 | response = _get_first_reassembly(info); | |
1324 | if (response) { | |
1325 | list_del(&response->list); | |
1326 | spin_unlock_irqrestore( | |
1327 | &info->reassembly_queue_lock, flags); | |
1328 | put_receive_buffer(info, response); | |
1329 | } else | |
1330 | spin_unlock_irqrestore( | |
1331 | &info->reassembly_queue_lock, flags); | |
1332 | } while (response); | |
1333 | info->reassembly_data_length = 0; | |
1334 | ||
1335 | log_rdma_event(INFO, "free receive buffers\n"); | |
1336 | wait_event(info->wait_receive_queues, | |
1337 | info->count_receive_queue + info->count_empty_packet_queue | |
1338 | == info->receive_credit_max); | |
1339 | destroy_receive_buffers(info); | |
1340 | ||
1341 | /* | |
1342 | * For performance reasons, memory registration and deregistration | |
1343 | * are not locked by srv_mutex. It is possible some processes are | |
1344 | * blocked on transport srv_mutex while holding memory registration. | |
1345 | * Release the transport srv_mutex to allow them to hit the failure | |
1346 | * path when sending data, and then release memory registartions. | |
1347 | */ | |
1348 | log_rdma_event(INFO, "freeing mr list\n"); | |
1349 | wake_up_interruptible_all(&info->wait_mr); | |
1350 | while (atomic_read(&info->mr_used_count)) { | |
cc391b69 | 1351 | cifs_server_unlock(server); |
050b8c37 | 1352 | msleep(1000); |
cc391b69 | 1353 | cifs_server_lock(server); |
050b8c37 LL |
1354 | } |
1355 | destroy_mr_list(info); | |
1356 | ||
1357 | ib_free_cq(info->send_cq); | |
1358 | ib_free_cq(info->recv_cq); | |
1359 | ib_dealloc_pd(info->pd); | |
1360 | rdma_destroy_id(info->id); | |
1361 | ||
1362 | /* free mempools */ | |
1363 | mempool_destroy(info->request_mempool); | |
1364 | kmem_cache_destroy(info->request_cache); | |
1365 | ||
1366 | mempool_destroy(info->response_mempool); | |
1367 | kmem_cache_destroy(info->response_cache); | |
1368 | ||
1369 | info->transport_status = SMBD_DESTROYED; | |
8ef130f9 LL |
1370 | |
1371 | destroy_workqueue(info->workqueue); | |
d63cdbae | 1372 | log_rdma_event(INFO, "rdma session destroyed\n"); |
8ef130f9 | 1373 | kfree(info); |
b7ab9161 | 1374 | server->smbd_conn = NULL; |
8ef130f9 LL |
1375 | } |
1376 | ||
ad57b8e1 LL |
1377 | /* |
1378 | * Reconnect this SMBD connection, called from upper layer | |
1379 | * return value: 0 on success, or actual error code | |
1380 | */ | |
1381 | int smbd_reconnect(struct TCP_Server_Info *server) | |
1382 | { | |
1383 | log_rdma_event(INFO, "reconnecting rdma session\n"); | |
1384 | ||
1385 | if (!server->smbd_conn) { | |
48f238a7 LL |
1386 | log_rdma_event(INFO, "rdma session already destroyed\n"); |
1387 | goto create_conn; | |
ad57b8e1 LL |
1388 | } |
1389 | ||
1390 | /* | |
1391 | * This is possible if transport is disconnected and we haven't received | |
1392 | * notification from RDMA, but upper layer has detected timeout | |
1393 | */ | |
1394 | if (server->smbd_conn->transport_status == SMBD_CONNECTED) { | |
1395 | log_rdma_event(INFO, "disconnecting transport\n"); | |
050b8c37 | 1396 | smbd_destroy(server); |
ad57b8e1 LL |
1397 | } |
1398 | ||
48f238a7 | 1399 | create_conn: |
ad57b8e1 LL |
1400 | log_rdma_event(INFO, "creating rdma session\n"); |
1401 | server->smbd_conn = smbd_get_connection( | |
1402 | server, (struct sockaddr *) &server->dstaddr); | |
d63cdbae LL |
1403 | |
1404 | if (server->smbd_conn) | |
1405 | cifs_dbg(VFS, "RDMA transport re-established\n"); | |
ad57b8e1 LL |
1406 | |
1407 | return server->smbd_conn ? 0 : -ENOENT; | |
1408 | } | |
1409 | ||
f198186a LL |
1410 | static void destroy_caches_and_workqueue(struct smbd_connection *info) |
1411 | { | |
1412 | destroy_receive_buffers(info); | |
1413 | destroy_workqueue(info->workqueue); | |
1414 | mempool_destroy(info->response_mempool); | |
1415 | kmem_cache_destroy(info->response_cache); | |
1416 | mempool_destroy(info->request_mempool); | |
1417 | kmem_cache_destroy(info->request_cache); | |
1418 | } | |
1419 | ||
1420 | #define MAX_NAME_LEN 80 | |
1421 | static int allocate_caches_and_workqueue(struct smbd_connection *info) | |
1422 | { | |
1423 | char name[MAX_NAME_LEN]; | |
1424 | int rc; | |
1425 | ||
74ea5f98 | 1426 | scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info); |
f198186a LL |
1427 | info->request_cache = |
1428 | kmem_cache_create( | |
1429 | name, | |
1430 | sizeof(struct smbd_request) + | |
1431 | sizeof(struct smbd_data_transfer), | |
1432 | 0, SLAB_HWCACHE_ALIGN, NULL); | |
1433 | if (!info->request_cache) | |
1434 | return -ENOMEM; | |
1435 | ||
1436 | info->request_mempool = | |
1437 | mempool_create(info->send_credit_target, mempool_alloc_slab, | |
1438 | mempool_free_slab, info->request_cache); | |
1439 | if (!info->request_mempool) | |
1440 | goto out1; | |
1441 | ||
74ea5f98 | 1442 | scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info); |
f198186a LL |
1443 | info->response_cache = |
1444 | kmem_cache_create( | |
1445 | name, | |
1446 | sizeof(struct smbd_response) + | |
1447 | info->max_receive_size, | |
1448 | 0, SLAB_HWCACHE_ALIGN, NULL); | |
1449 | if (!info->response_cache) | |
1450 | goto out2; | |
1451 | ||
1452 | info->response_mempool = | |
1453 | mempool_create(info->receive_credit_max, mempool_alloc_slab, | |
1454 | mempool_free_slab, info->response_cache); | |
1455 | if (!info->response_mempool) | |
1456 | goto out3; | |
1457 | ||
74ea5f98 | 1458 | scnprintf(name, MAX_NAME_LEN, "smbd_%p", info); |
f198186a LL |
1459 | info->workqueue = create_workqueue(name); |
1460 | if (!info->workqueue) | |
1461 | goto out4; | |
1462 | ||
1463 | rc = allocate_receive_buffers(info, info->receive_credit_max); | |
1464 | if (rc) { | |
1465 | log_rdma_event(ERR, "failed to allocate receive buffers\n"); | |
1466 | goto out5; | |
1467 | } | |
1468 | ||
1469 | return 0; | |
1470 | ||
1471 | out5: | |
1472 | destroy_workqueue(info->workqueue); | |
1473 | out4: | |
1474 | mempool_destroy(info->response_mempool); | |
1475 | out3: | |
1476 | kmem_cache_destroy(info->response_cache); | |
1477 | out2: | |
1478 | mempool_destroy(info->request_mempool); | |
1479 | out1: | |
1480 | kmem_cache_destroy(info->request_cache); | |
1481 | return -ENOMEM; | |
1482 | } | |
1483 | ||
1484 | /* Create a SMBD connection, called by upper layer */ | |
9084432c | 1485 | static struct smbd_connection *_smbd_get_connection( |
f198186a LL |
1486 | struct TCP_Server_Info *server, struct sockaddr *dstaddr, int port) |
1487 | { | |
1488 | int rc; | |
1489 | struct smbd_connection *info; | |
1490 | struct rdma_conn_param conn_param; | |
1491 | struct ib_qp_init_attr qp_attr; | |
1492 | struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr; | |
c7398583 LL |
1493 | struct ib_port_immutable port_immutable; |
1494 | u32 ird_ord_hdr[2]; | |
f198186a LL |
1495 | |
1496 | info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL); | |
1497 | if (!info) | |
1498 | return NULL; | |
1499 | ||
1500 | info->transport_status = SMBD_CONNECTING; | |
1501 | rc = smbd_ia_open(info, dstaddr, port); | |
1502 | if (rc) { | |
1503 | log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc); | |
1504 | goto create_id_failed; | |
1505 | } | |
1506 | ||
1507 | if (smbd_send_credit_target > info->id->device->attrs.max_cqe || | |
1508 | smbd_send_credit_target > info->id->device->attrs.max_qp_wr) { | |
0350d7a3 | 1509 | log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n", |
a0a3036b JP |
1510 | smbd_send_credit_target, |
1511 | info->id->device->attrs.max_cqe, | |
1512 | info->id->device->attrs.max_qp_wr); | |
f198186a LL |
1513 | goto config_failed; |
1514 | } | |
1515 | ||
1516 | if (smbd_receive_credit_max > info->id->device->attrs.max_cqe || | |
1517 | smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) { | |
0350d7a3 | 1518 | log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n", |
a0a3036b JP |
1519 | smbd_receive_credit_max, |
1520 | info->id->device->attrs.max_cqe, | |
1521 | info->id->device->attrs.max_qp_wr); | |
f198186a LL |
1522 | goto config_failed; |
1523 | } | |
1524 | ||
1525 | info->receive_credit_max = smbd_receive_credit_max; | |
1526 | info->send_credit_target = smbd_send_credit_target; | |
1527 | info->max_send_size = smbd_max_send_size; | |
1528 | info->max_fragmented_recv_size = smbd_max_fragmented_recv_size; | |
1529 | info->max_receive_size = smbd_max_receive_size; | |
1530 | info->keep_alive_interval = smbd_keep_alive_interval; | |
1531 | ||
d2e81f92 TT |
1532 | if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE || |
1533 | info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) { | |
33023fb8 | 1534 | log_rdma_event(ERR, |
d2e81f92 TT |
1535 | "device %.*s max_send_sge/max_recv_sge = %d/%d too small\n", |
1536 | IB_DEVICE_NAME_MAX, | |
1537 | info->id->device->name, | |
1538 | info->id->device->attrs.max_send_sge, | |
33023fb8 | 1539 | info->id->device->attrs.max_recv_sge); |
d2e81f92 | 1540 | goto config_failed; |
f198186a LL |
1541 | } |
1542 | ||
1543 | info->send_cq = NULL; | |
1544 | info->recv_cq = NULL; | |
20cf4e02 CL |
1545 | info->send_cq = |
1546 | ib_alloc_cq_any(info->id->device, info, | |
1547 | info->send_credit_target, IB_POLL_SOFTIRQ); | |
f198186a LL |
1548 | if (IS_ERR(info->send_cq)) { |
1549 | info->send_cq = NULL; | |
1550 | goto alloc_cq_failed; | |
1551 | } | |
1552 | ||
20cf4e02 CL |
1553 | info->recv_cq = |
1554 | ib_alloc_cq_any(info->id->device, info, | |
1555 | info->receive_credit_max, IB_POLL_SOFTIRQ); | |
f198186a LL |
1556 | if (IS_ERR(info->recv_cq)) { |
1557 | info->recv_cq = NULL; | |
1558 | goto alloc_cq_failed; | |
1559 | } | |
1560 | ||
1561 | memset(&qp_attr, 0, sizeof(qp_attr)); | |
1562 | qp_attr.event_handler = smbd_qp_async_error_upcall; | |
1563 | qp_attr.qp_context = info; | |
1564 | qp_attr.cap.max_send_wr = info->send_credit_target; | |
1565 | qp_attr.cap.max_recv_wr = info->receive_credit_max; | |
d2e81f92 TT |
1566 | qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SEND_SGE; |
1567 | qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_RECV_SGE; | |
f198186a LL |
1568 | qp_attr.cap.max_inline_data = 0; |
1569 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | |
1570 | qp_attr.qp_type = IB_QPT_RC; | |
1571 | qp_attr.send_cq = info->send_cq; | |
1572 | qp_attr.recv_cq = info->recv_cq; | |
1573 | qp_attr.port_num = ~0; | |
1574 | ||
1575 | rc = rdma_create_qp(info->id, info->pd, &qp_attr); | |
1576 | if (rc) { | |
1577 | log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc); | |
1578 | goto create_qp_failed; | |
1579 | } | |
1580 | ||
1581 | memset(&conn_param, 0, sizeof(conn_param)); | |
1582 | conn_param.initiator_depth = 0; | |
1583 | ||
c7398583 LL |
1584 | conn_param.responder_resources = |
1585 | info->id->device->attrs.max_qp_rd_atom | |
1586 | < SMBD_CM_RESPONDER_RESOURCES ? | |
1587 | info->id->device->attrs.max_qp_rd_atom : | |
1588 | SMBD_CM_RESPONDER_RESOURCES; | |
1589 | info->responder_resources = conn_param.responder_resources; | |
1590 | log_rdma_mr(INFO, "responder_resources=%d\n", | |
1591 | info->responder_resources); | |
1592 | ||
1593 | /* Need to send IRD/ORD in private data for iWARP */ | |
3023a1e9 | 1594 | info->id->device->ops.get_port_immutable( |
c7398583 LL |
1595 | info->id->device, info->id->port_num, &port_immutable); |
1596 | if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) { | |
1597 | ird_ord_hdr[0] = info->responder_resources; | |
1598 | ird_ord_hdr[1] = 1; | |
1599 | conn_param.private_data = ird_ord_hdr; | |
1600 | conn_param.private_data_len = sizeof(ird_ord_hdr); | |
1601 | } else { | |
1602 | conn_param.private_data = NULL; | |
1603 | conn_param.private_data_len = 0; | |
1604 | } | |
1605 | ||
f198186a LL |
1606 | conn_param.retry_count = SMBD_CM_RETRY; |
1607 | conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY; | |
1608 | conn_param.flow_control = 0; | |
f198186a LL |
1609 | |
1610 | log_rdma_event(INFO, "connecting to IP %pI4 port %d\n", | |
1611 | &addr_in->sin_addr, port); | |
1612 | ||
1613 | init_waitqueue_head(&info->conn_wait); | |
050b8c37 LL |
1614 | init_waitqueue_head(&info->disconn_wait); |
1615 | init_waitqueue_head(&info->wait_reassembly_queue); | |
f198186a LL |
1616 | rc = rdma_connect(info->id, &conn_param); |
1617 | if (rc) { | |
1618 | log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc); | |
1619 | goto rdma_connect_failed; | |
1620 | } | |
1621 | ||
1622 | wait_event_interruptible( | |
1623 | info->conn_wait, info->transport_status != SMBD_CONNECTING); | |
1624 | ||
1625 | if (info->transport_status != SMBD_CONNECTED) { | |
1626 | log_rdma_event(ERR, "rdma_connect failed port=%d\n", port); | |
1627 | goto rdma_connect_failed; | |
1628 | } | |
1629 | ||
1630 | log_rdma_event(INFO, "rdma_connect connected\n"); | |
1631 | ||
1632 | rc = allocate_caches_and_workqueue(info); | |
1633 | if (rc) { | |
1634 | log_rdma_event(ERR, "cache allocation failed\n"); | |
1635 | goto allocate_cache_failed; | |
1636 | } | |
1637 | ||
1638 | init_waitqueue_head(&info->wait_send_queue); | |
f198186a | 1639 | INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer); |
f198186a LL |
1640 | queue_delayed_work(info->workqueue, &info->idle_timer_work, |
1641 | info->keep_alive_interval*HZ); | |
1642 | ||
1643 | init_waitqueue_head(&info->wait_send_pending); | |
1644 | atomic_set(&info->send_pending, 0); | |
1645 | ||
3ffbe78a | 1646 | init_waitqueue_head(&info->wait_post_send); |
f198186a LL |
1647 | |
1648 | INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work); | |
f198186a LL |
1649 | INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits); |
1650 | info->new_credits_offered = 0; | |
1651 | spin_lock_init(&info->lock_new_credits_offered); | |
1652 | ||
1653 | rc = smbd_negotiate(info); | |
1654 | if (rc) { | |
1655 | log_rdma_event(ERR, "smbd_negotiate rc=%d\n", rc); | |
1656 | goto negotiation_failed; | |
1657 | } | |
1658 | ||
c7398583 LL |
1659 | rc = allocate_mr_list(info); |
1660 | if (rc) { | |
1661 | log_rdma_mr(ERR, "memory registration allocation failed\n"); | |
1662 | goto allocate_mr_failed; | |
1663 | } | |
1664 | ||
f198186a LL |
1665 | return info; |
1666 | ||
c7398583 LL |
1667 | allocate_mr_failed: |
1668 | /* At this point, need to a full transport shutdown */ | |
e9d3401d | 1669 | server->smbd_conn = info; |
050b8c37 | 1670 | smbd_destroy(server); |
c7398583 LL |
1671 | return NULL; |
1672 | ||
f198186a LL |
1673 | negotiation_failed: |
1674 | cancel_delayed_work_sync(&info->idle_timer_work); | |
1675 | destroy_caches_and_workqueue(info); | |
1676 | info->transport_status = SMBD_NEGOTIATE_FAILED; | |
1677 | init_waitqueue_head(&info->conn_wait); | |
1678 | rdma_disconnect(info->id); | |
1679 | wait_event(info->conn_wait, | |
1680 | info->transport_status == SMBD_DISCONNECTED); | |
1681 | ||
1682 | allocate_cache_failed: | |
1683 | rdma_connect_failed: | |
1684 | rdma_destroy_qp(info->id); | |
1685 | ||
1686 | create_qp_failed: | |
1687 | alloc_cq_failed: | |
1688 | if (info->send_cq) | |
1689 | ib_free_cq(info->send_cq); | |
1690 | if (info->recv_cq) | |
1691 | ib_free_cq(info->recv_cq); | |
1692 | ||
1693 | config_failed: | |
1694 | ib_dealloc_pd(info->pd); | |
1695 | rdma_destroy_id(info->id); | |
1696 | ||
1697 | create_id_failed: | |
1698 | kfree(info); | |
1699 | return NULL; | |
1700 | } | |
399f9539 LL |
1701 | |
1702 | struct smbd_connection *smbd_get_connection( | |
1703 | struct TCP_Server_Info *server, struct sockaddr *dstaddr) | |
1704 | { | |
1705 | struct smbd_connection *ret; | |
1706 | int port = SMBD_PORT; | |
1707 | ||
1708 | try_again: | |
1709 | ret = _smbd_get_connection(server, dstaddr, port); | |
1710 | ||
1711 | /* Try SMB_PORT if SMBD_PORT doesn't work */ | |
1712 | if (!ret && port == SMBD_PORT) { | |
1713 | port = SMB_PORT; | |
1714 | goto try_again; | |
1715 | } | |
1716 | return ret; | |
1717 | } | |
f64b78fd LL |
1718 | |
1719 | /* | |
1720 | * Receive data from receive reassembly queue | |
1721 | * All the incoming data packets are placed in reassembly queue | |
1722 | * buf: the buffer to read data into | |
1723 | * size: the length of data to read | |
1724 | * return value: actual data read | |
1725 | * Note: this implementation copies the data from reassebmly queue to receive | |
1726 | * buffers used by upper layer. This is not the optimal code path. A better way | |
1727 | * to do it is to not have upper layer allocate its receive buffers but rather | |
1728 | * borrow the buffer from reassembly queue, and return it after data is | |
1729 | * consumed. But this will require more changes to upper layer code, and also | |
1730 | * need to consider packet boundaries while they still being reassembled. | |
1731 | */ | |
2026b06e SF |
1732 | static int smbd_recv_buf(struct smbd_connection *info, char *buf, |
1733 | unsigned int size) | |
f64b78fd LL |
1734 | { |
1735 | struct smbd_response *response; | |
1736 | struct smbd_data_transfer *data_transfer; | |
1737 | int to_copy, to_read, data_read, offset; | |
1738 | u32 data_length, remaining_data_length, data_offset; | |
1739 | int rc; | |
f64b78fd LL |
1740 | |
1741 | again: | |
f64b78fd LL |
1742 | /* |
1743 | * No need to hold the reassembly queue lock all the time as we are | |
1744 | * the only one reading from the front of the queue. The transport | |
1745 | * may add more entries to the back of the queue at the same time | |
1746 | */ | |
1747 | log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size, | |
1748 | info->reassembly_data_length); | |
1749 | if (info->reassembly_data_length >= size) { | |
1750 | int queue_length; | |
1751 | int queue_removed = 0; | |
1752 | ||
1753 | /* | |
1754 | * Need to make sure reassembly_data_length is read before | |
1755 | * reading reassembly_queue_length and calling | |
1756 | * _get_first_reassembly. This call is lock free | |
1757 | * as we never read at the end of the queue which are being | |
1758 | * updated in SOFTIRQ as more data is received | |
1759 | */ | |
1760 | virt_rmb(); | |
1761 | queue_length = info->reassembly_queue_length; | |
1762 | data_read = 0; | |
1763 | to_read = size; | |
1764 | offset = info->first_entry_offset; | |
1765 | while (data_read < size) { | |
1766 | response = _get_first_reassembly(info); | |
1767 | data_transfer = smbd_response_payload(response); | |
1768 | data_length = le32_to_cpu(data_transfer->data_length); | |
1769 | remaining_data_length = | |
1770 | le32_to_cpu( | |
1771 | data_transfer->remaining_data_length); | |
1772 | data_offset = le32_to_cpu(data_transfer->data_offset); | |
1773 | ||
1774 | /* | |
1775 | * The upper layer expects RFC1002 length at the | |
1776 | * beginning of the payload. Return it to indicate | |
1777 | * the total length of the packet. This minimize the | |
1778 | * change to upper layer packet processing logic. This | |
1779 | * will be eventually remove when an intermediate | |
1780 | * transport layer is added | |
1781 | */ | |
1782 | if (response->first_segment && size == 4) { | |
1783 | unsigned int rfc1002_len = | |
1784 | data_length + remaining_data_length; | |
1785 | *((__be32 *)buf) = cpu_to_be32(rfc1002_len); | |
1786 | data_read = 4; | |
1787 | response->first_segment = false; | |
1788 | log_read(INFO, "returning rfc1002 length %d\n", | |
1789 | rfc1002_len); | |
1790 | goto read_rfc1002_done; | |
1791 | } | |
1792 | ||
1793 | to_copy = min_t(int, data_length - offset, to_read); | |
1794 | memcpy( | |
1795 | buf + data_read, | |
1796 | (char *)data_transfer + data_offset + offset, | |
1797 | to_copy); | |
1798 | ||
1799 | /* move on to the next buffer? */ | |
1800 | if (to_copy == data_length - offset) { | |
1801 | queue_length--; | |
1802 | /* | |
1803 | * No need to lock if we are not at the | |
1804 | * end of the queue | |
1805 | */ | |
f9de151b SF |
1806 | if (queue_length) |
1807 | list_del(&response->list); | |
1808 | else { | |
e36c048a AB |
1809 | spin_lock_irq( |
1810 | &info->reassembly_queue_lock); | |
f9de151b | 1811 | list_del(&response->list); |
e36c048a AB |
1812 | spin_unlock_irq( |
1813 | &info->reassembly_queue_lock); | |
f9de151b SF |
1814 | } |
1815 | queue_removed++; | |
f64b78fd LL |
1816 | info->count_reassembly_queue--; |
1817 | info->count_dequeue_reassembly_queue++; | |
1818 | put_receive_buffer(info, response); | |
1819 | offset = 0; | |
1820 | log_read(INFO, "put_receive_buffer offset=0\n"); | |
1821 | } else | |
1822 | offset += to_copy; | |
1823 | ||
1824 | to_read -= to_copy; | |
1825 | data_read += to_copy; | |
1826 | ||
a0a3036b JP |
1827 | log_read(INFO, "_get_first_reassembly memcpy %d bytes data_transfer_length-offset=%d after that to_read=%d data_read=%d offset=%d\n", |
1828 | to_copy, data_length - offset, | |
1829 | to_read, data_read, offset); | |
f64b78fd LL |
1830 | } |
1831 | ||
e36c048a | 1832 | spin_lock_irq(&info->reassembly_queue_lock); |
f64b78fd LL |
1833 | info->reassembly_data_length -= data_read; |
1834 | info->reassembly_queue_length -= queue_removed; | |
e36c048a | 1835 | spin_unlock_irq(&info->reassembly_queue_lock); |
f64b78fd LL |
1836 | |
1837 | info->first_entry_offset = offset; | |
a0a3036b JP |
1838 | log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n", |
1839 | data_read, info->reassembly_data_length, | |
1840 | info->first_entry_offset); | |
f64b78fd LL |
1841 | read_rfc1002_done: |
1842 | return data_read; | |
1843 | } | |
1844 | ||
1845 | log_read(INFO, "wait_event on more data\n"); | |
1846 | rc = wait_event_interruptible( | |
1847 | info->wait_reassembly_queue, | |
1848 | info->reassembly_data_length >= size || | |
1849 | info->transport_status != SMBD_CONNECTED); | |
1850 | /* Don't return any data if interrupted */ | |
1851 | if (rc) | |
98e0d408 | 1852 | return rc; |
f64b78fd | 1853 | |
e8b3bfe9 LL |
1854 | if (info->transport_status != SMBD_CONNECTED) { |
1855 | log_read(ERR, "disconnected\n"); | |
acd4680e | 1856 | return -ECONNABORTED; |
e8b3bfe9 LL |
1857 | } |
1858 | ||
f64b78fd LL |
1859 | goto again; |
1860 | } | |
1861 | ||
1862 | /* | |
1863 | * Receive a page from receive reassembly queue | |
1864 | * page: the page to read data into | |
1865 | * to_read: the length of data to read | |
1866 | * return value: actual data read | |
1867 | */ | |
2026b06e | 1868 | static int smbd_recv_page(struct smbd_connection *info, |
6509f50c LL |
1869 | struct page *page, unsigned int page_offset, |
1870 | unsigned int to_read) | |
f64b78fd LL |
1871 | { |
1872 | int ret; | |
1873 | char *to_address; | |
6509f50c | 1874 | void *page_address; |
f64b78fd LL |
1875 | |
1876 | /* make sure we have the page ready for read */ | |
1877 | ret = wait_event_interruptible( | |
1878 | info->wait_reassembly_queue, | |
1879 | info->reassembly_data_length >= to_read || | |
1880 | info->transport_status != SMBD_CONNECTED); | |
1881 | if (ret) | |
6509f50c | 1882 | return ret; |
f64b78fd LL |
1883 | |
1884 | /* now we can read from reassembly queue and not sleep */ | |
6509f50c LL |
1885 | page_address = kmap_atomic(page); |
1886 | to_address = (char *) page_address + page_offset; | |
f64b78fd LL |
1887 | |
1888 | log_read(INFO, "reading from page=%p address=%p to_read=%d\n", | |
1889 | page, to_address, to_read); | |
1890 | ||
1891 | ret = smbd_recv_buf(info, to_address, to_read); | |
6509f50c | 1892 | kunmap_atomic(page_address); |
f64b78fd LL |
1893 | |
1894 | return ret; | |
1895 | } | |
1896 | ||
1897 | /* | |
1898 | * Receive data from transport | |
1899 | * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC | |
1900 | * return: total bytes read, or 0. SMB Direct will not do partial read. | |
1901 | */ | |
1902 | int smbd_recv(struct smbd_connection *info, struct msghdr *msg) | |
1903 | { | |
1904 | char *buf; | |
1905 | struct page *page; | |
6509f50c | 1906 | unsigned int to_read, page_offset; |
f64b78fd LL |
1907 | int rc; |
1908 | ||
00e23707 DH |
1909 | if (iov_iter_rw(&msg->msg_iter) == WRITE) { |
1910 | /* It's a bug in upper layer to get there */ | |
a0a3036b | 1911 | cifs_dbg(VFS, "Invalid msg iter dir %u\n", |
00e23707 DH |
1912 | iov_iter_rw(&msg->msg_iter)); |
1913 | rc = -EINVAL; | |
1914 | goto out; | |
1915 | } | |
1916 | ||
1917 | switch (iov_iter_type(&msg->msg_iter)) { | |
1918 | case ITER_KVEC: | |
f64b78fd LL |
1919 | buf = msg->msg_iter.kvec->iov_base; |
1920 | to_read = msg->msg_iter.kvec->iov_len; | |
1921 | rc = smbd_recv_buf(info, buf, to_read); | |
1922 | break; | |
1923 | ||
00e23707 | 1924 | case ITER_BVEC: |
f64b78fd | 1925 | page = msg->msg_iter.bvec->bv_page; |
6509f50c | 1926 | page_offset = msg->msg_iter.bvec->bv_offset; |
f64b78fd | 1927 | to_read = msg->msg_iter.bvec->bv_len; |
6509f50c | 1928 | rc = smbd_recv_page(info, page, page_offset, to_read); |
f64b78fd LL |
1929 | break; |
1930 | ||
1931 | default: | |
1932 | /* It's a bug in upper layer to get there */ | |
a0a3036b | 1933 | cifs_dbg(VFS, "Invalid msg type %d\n", |
00e23707 | 1934 | iov_iter_type(&msg->msg_iter)); |
6509f50c | 1935 | rc = -EINVAL; |
f64b78fd LL |
1936 | } |
1937 | ||
00e23707 | 1938 | out: |
f64b78fd LL |
1939 | /* SMBDirect will read it all or nothing */ |
1940 | if (rc > 0) | |
1941 | msg->msg_iter.count = 0; | |
1942 | return rc; | |
1943 | } | |
d649e1bb LL |
1944 | |
1945 | /* | |
1946 | * Send data to transport | |
1947 | * Each rqst is transported as a SMBDirect payload | |
1948 | * rqst: the data to write | |
1949 | * return value: 0 if successfully write, otherwise error code | |
1950 | */ | |
4739f232 LL |
1951 | int smbd_send(struct TCP_Server_Info *server, |
1952 | int num_rqst, struct smb_rqst *rqst_array) | |
d649e1bb | 1953 | { |
81f39f95 | 1954 | struct smbd_connection *info = server->smbd_conn; |
4739f232 | 1955 | struct smb_rqst *rqst; |
d08089f6 DH |
1956 | struct iov_iter iter; |
1957 | unsigned int remaining_data_length, klen; | |
1958 | int rc, i, rqst_idx; | |
d649e1bb | 1959 | |
adeb964d TT |
1960 | if (info->transport_status != SMBD_CONNECTED) |
1961 | return -EAGAIN; | |
d649e1bb | 1962 | |
b6903bcf LL |
1963 | /* |
1964 | * Add in the page array if there is one. The caller needs to set | |
1965 | * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and | |
1966 | * ends at page boundary | |
1967 | */ | |
4739f232 LL |
1968 | remaining_data_length = 0; |
1969 | for (i = 0; i < num_rqst; i++) | |
1970 | remaining_data_length += smb_rqst_len(server, &rqst_array[i]); | |
d649e1bb | 1971 | |
adeb964d TT |
1972 | if (unlikely(remaining_data_length > info->max_fragmented_send_size)) { |
1973 | /* assertion: payload never exceeds negotiated maximum */ | |
d649e1bb | 1974 | log_write(ERR, "payload size %d > max size %d\n", |
4739f232 | 1975 | remaining_data_length, info->max_fragmented_send_size); |
adeb964d | 1976 | return -EINVAL; |
d649e1bb LL |
1977 | } |
1978 | ||
7f46d23e LL |
1979 | log_write(INFO, "num_rqst=%d total length=%u\n", |
1980 | num_rqst, remaining_data_length); | |
4739f232 | 1981 | |
7f46d23e | 1982 | rqst_idx = 0; |
adeb964d TT |
1983 | do { |
1984 | rqst = &rqst_array[rqst_idx]; | |
adeb964d TT |
1985 | |
1986 | cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n", | |
d08089f6 DH |
1987 | rqst_idx, smb_rqst_len(server, rqst)); |
1988 | for (i = 0; i < rqst->rq_nvec; i++) | |
1989 | dump_smb(rqst->rq_iov[i].iov_base, rqst->rq_iov[i].iov_len); | |
1990 | ||
1991 | log_write(INFO, "RDMA-WR[%u] nvec=%d len=%u iter=%zu rqlen=%lu\n", | |
1992 | rqst_idx, rqst->rq_nvec, remaining_data_length, | |
1993 | iov_iter_count(&rqst->rq_iter), smb_rqst_len(server, rqst)); | |
1994 | ||
1995 | /* Send the metadata pages. */ | |
1996 | klen = 0; | |
1997 | for (i = 0; i < rqst->rq_nvec; i++) | |
1998 | klen += rqst->rq_iov[i].iov_len; | |
1999 | iov_iter_kvec(&iter, ITER_SOURCE, rqst->rq_iov, rqst->rq_nvec, klen); | |
2000 | ||
2001 | rc = smbd_post_send_iter(info, &iter, &remaining_data_length); | |
2002 | if (rc < 0) | |
2003 | break; | |
adeb964d | 2004 | |
d08089f6 DH |
2005 | if (iov_iter_count(&rqst->rq_iter) > 0) { |
2006 | /* And then the data pages if there are any */ | |
2007 | rc = smbd_post_send_iter(info, &rqst->rq_iter, | |
2008 | &remaining_data_length); | |
2009 | if (rc < 0) | |
2010 | break; | |
d649e1bb | 2011 | } |
d08089f6 | 2012 | |
adeb964d | 2013 | } while (++rqst_idx < num_rqst); |
4739f232 | 2014 | |
d649e1bb LL |
2015 | /* |
2016 | * As an optimization, we don't wait for individual I/O to finish | |
2017 | * before sending the next one. | |
2018 | * Send them all and wait for pending send count to get to 0 | |
2019 | * that means all the I/Os have been out and we are good to return | |
2020 | */ | |
2021 | ||
072a14ec LL |
2022 | wait_event(info->wait_send_pending, |
2023 | atomic_read(&info->send_pending) == 0); | |
d649e1bb | 2024 | |
d649e1bb LL |
2025 | return rc; |
2026 | } | |
c7398583 LL |
2027 | |
2028 | static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc) | |
2029 | { | |
2030 | struct smbd_mr *mr; | |
2031 | struct ib_cqe *cqe; | |
2032 | ||
2033 | if (wc->status) { | |
2034 | log_rdma_mr(ERR, "status=%d\n", wc->status); | |
2035 | cqe = wc->wr_cqe; | |
2036 | mr = container_of(cqe, struct smbd_mr, cqe); | |
2037 | smbd_disconnect_rdma_connection(mr->conn); | |
2038 | } | |
2039 | } | |
2040 | ||
2041 | /* | |
2042 | * The work queue function that recovers MRs | |
2043 | * We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used | |
2044 | * again. Both calls are slow, so finish them in a workqueue. This will not | |
2045 | * block I/O path. | |
2046 | * There is one workqueue that recovers MRs, there is no need to lock as the | |
2047 | * I/O requests calling smbd_register_mr will never update the links in the | |
2048 | * mr_list. | |
2049 | */ | |
2050 | static void smbd_mr_recovery_work(struct work_struct *work) | |
2051 | { | |
2052 | struct smbd_connection *info = | |
2053 | container_of(work, struct smbd_connection, mr_recovery_work); | |
2054 | struct smbd_mr *smbdirect_mr; | |
2055 | int rc; | |
2056 | ||
2057 | list_for_each_entry(smbdirect_mr, &info->mr_list, list) { | |
c21ce58e | 2058 | if (smbdirect_mr->state == MR_ERROR) { |
c7398583 | 2059 | |
7cf20bce LL |
2060 | /* recover this MR entry */ |
2061 | rc = ib_dereg_mr(smbdirect_mr->mr); | |
2062 | if (rc) { | |
2063 | log_rdma_mr(ERR, | |
2064 | "ib_dereg_mr failed rc=%x\n", | |
2065 | rc); | |
2066 | smbd_disconnect_rdma_connection(info); | |
2067 | continue; | |
2068 | } | |
2069 | ||
2070 | smbdirect_mr->mr = ib_alloc_mr( | |
2071 | info->pd, info->mr_type, | |
2072 | info->max_frmr_depth); | |
2073 | if (IS_ERR(smbdirect_mr->mr)) { | |
a0a3036b JP |
2074 | log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n", |
2075 | info->mr_type, | |
2076 | info->max_frmr_depth); | |
7cf20bce LL |
2077 | smbd_disconnect_rdma_connection(info); |
2078 | continue; | |
2079 | } | |
ff526d86 LL |
2080 | } else |
2081 | /* This MR is being used, don't recover it */ | |
2082 | continue; | |
7cf20bce | 2083 | |
ff526d86 | 2084 | smbdirect_mr->state = MR_READY; |
c7398583 | 2085 | |
ff526d86 LL |
2086 | /* smbdirect_mr->state is updated by this function |
2087 | * and is read and updated by I/O issuing CPUs trying | |
2088 | * to get a MR, the call to atomic_inc_return | |
2089 | * implicates a memory barrier and guarantees this | |
2090 | * value is updated before waking up any calls to | |
2091 | * get_mr() from the I/O issuing CPUs | |
2092 | */ | |
2093 | if (atomic_inc_return(&info->mr_ready_count) == 1) | |
2094 | wake_up_interruptible(&info->wait_mr); | |
c7398583 LL |
2095 | } |
2096 | } | |
2097 | ||
2098 | static void destroy_mr_list(struct smbd_connection *info) | |
2099 | { | |
2100 | struct smbd_mr *mr, *tmp; | |
2101 | ||
2102 | cancel_work_sync(&info->mr_recovery_work); | |
2103 | list_for_each_entry_safe(mr, tmp, &info->mr_list, list) { | |
2104 | if (mr->state == MR_INVALIDATED) | |
3d78fe73 DH |
2105 | ib_dma_unmap_sg(info->id->device, mr->sgt.sgl, |
2106 | mr->sgt.nents, mr->dir); | |
c7398583 | 2107 | ib_dereg_mr(mr->mr); |
3d78fe73 | 2108 | kfree(mr->sgt.sgl); |
c7398583 LL |
2109 | kfree(mr); |
2110 | } | |
2111 | } | |
2112 | ||
2113 | /* | |
2114 | * Allocate MRs used for RDMA read/write | |
2115 | * The number of MRs will not exceed hardware capability in responder_resources | |
2116 | * All MRs are kept in mr_list. The MR can be recovered after it's used | |
2117 | * Recovery is done in smbd_mr_recovery_work. The content of list entry changes | |
2118 | * as MRs are used and recovered for I/O, but the list links will not change | |
2119 | */ | |
2120 | static int allocate_mr_list(struct smbd_connection *info) | |
2121 | { | |
2122 | int i; | |
2123 | struct smbd_mr *smbdirect_mr, *tmp; | |
2124 | ||
2125 | INIT_LIST_HEAD(&info->mr_list); | |
2126 | init_waitqueue_head(&info->wait_mr); | |
2127 | spin_lock_init(&info->mr_list_lock); | |
2128 | atomic_set(&info->mr_ready_count, 0); | |
2129 | atomic_set(&info->mr_used_count, 0); | |
2130 | init_waitqueue_head(&info->wait_for_mr_cleanup); | |
3e161c27 | 2131 | INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work); |
c7398583 LL |
2132 | /* Allocate more MRs (2x) than hardware responder_resources */ |
2133 | for (i = 0; i < info->responder_resources * 2; i++) { | |
2134 | smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL); | |
2135 | if (!smbdirect_mr) | |
2136 | goto out; | |
2137 | smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type, | |
2138 | info->max_frmr_depth); | |
2139 | if (IS_ERR(smbdirect_mr->mr)) { | |
a0a3036b JP |
2140 | log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n", |
2141 | info->mr_type, info->max_frmr_depth); | |
c7398583 LL |
2142 | goto out; |
2143 | } | |
3d78fe73 DH |
2144 | smbdirect_mr->sgt.sgl = kcalloc(info->max_frmr_depth, |
2145 | sizeof(struct scatterlist), | |
2146 | GFP_KERNEL); | |
2147 | if (!smbdirect_mr->sgt.sgl) { | |
c7398583 LL |
2148 | log_rdma_mr(ERR, "failed to allocate sgl\n"); |
2149 | ib_dereg_mr(smbdirect_mr->mr); | |
2150 | goto out; | |
2151 | } | |
2152 | smbdirect_mr->state = MR_READY; | |
2153 | smbdirect_mr->conn = info; | |
2154 | ||
2155 | list_add_tail(&smbdirect_mr->list, &info->mr_list); | |
2156 | atomic_inc(&info->mr_ready_count); | |
2157 | } | |
c7398583 LL |
2158 | return 0; |
2159 | ||
2160 | out: | |
2161 | kfree(smbdirect_mr); | |
2162 | ||
2163 | list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) { | |
3e161c27 | 2164 | list_del(&smbdirect_mr->list); |
c7398583 | 2165 | ib_dereg_mr(smbdirect_mr->mr); |
3d78fe73 | 2166 | kfree(smbdirect_mr->sgt.sgl); |
c7398583 LL |
2167 | kfree(smbdirect_mr); |
2168 | } | |
2169 | return -ENOMEM; | |
2170 | } | |
2171 | ||
2172 | /* | |
2173 | * Get a MR from mr_list. This function waits until there is at least one | |
2174 | * MR available in the list. It may access the list while the | |
2175 | * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock | |
2176 | * as they never modify the same places. However, there may be several CPUs | |
2177 | * issueing I/O trying to get MR at the same time, mr_list_lock is used to | |
2178 | * protect this situation. | |
2179 | */ | |
2180 | static struct smbd_mr *get_mr(struct smbd_connection *info) | |
2181 | { | |
2182 | struct smbd_mr *ret; | |
2183 | int rc; | |
2184 | again: | |
2185 | rc = wait_event_interruptible(info->wait_mr, | |
2186 | atomic_read(&info->mr_ready_count) || | |
2187 | info->transport_status != SMBD_CONNECTED); | |
2188 | if (rc) { | |
2189 | log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc); | |
2190 | return NULL; | |
2191 | } | |
2192 | ||
2193 | if (info->transport_status != SMBD_CONNECTED) { | |
2194 | log_rdma_mr(ERR, "info->transport_status=%x\n", | |
2195 | info->transport_status); | |
2196 | return NULL; | |
2197 | } | |
2198 | ||
2199 | spin_lock(&info->mr_list_lock); | |
2200 | list_for_each_entry(ret, &info->mr_list, list) { | |
2201 | if (ret->state == MR_READY) { | |
2202 | ret->state = MR_REGISTERED; | |
2203 | spin_unlock(&info->mr_list_lock); | |
2204 | atomic_dec(&info->mr_ready_count); | |
2205 | atomic_inc(&info->mr_used_count); | |
2206 | return ret; | |
2207 | } | |
2208 | } | |
2209 | ||
2210 | spin_unlock(&info->mr_list_lock); | |
2211 | /* | |
2212 | * It is possible that we could fail to get MR because other processes may | |
2213 | * try to acquire a MR at the same time. If this is the case, retry it. | |
2214 | */ | |
2215 | goto again; | |
2216 | } | |
2217 | ||
d08089f6 DH |
2218 | /* |
2219 | * Transcribe the pages from an iterator into an MR scatterlist. | |
d08089f6 DH |
2220 | */ |
2221 | static int smbd_iter_to_mr(struct smbd_connection *info, | |
2222 | struct iov_iter *iter, | |
3d78fe73 DH |
2223 | struct sg_table *sgt, |
2224 | unsigned int max_sg) | |
d08089f6 | 2225 | { |
d08089f6 DH |
2226 | int ret; |
2227 | ||
3d78fe73 | 2228 | memset(sgt->sgl, 0, max_sg * sizeof(struct scatterlist)); |
d08089f6 | 2229 | |
3d78fe73 | 2230 | ret = netfs_extract_iter_to_sg(iter, iov_iter_count(iter), sgt, max_sg, 0); |
d08089f6 | 2231 | WARN_ON(ret < 0); |
3d78fe73 DH |
2232 | if (sgt->nents > 0) |
2233 | sg_mark_end(&sgt->sgl[sgt->nents - 1]); | |
d08089f6 DH |
2234 | return ret; |
2235 | } | |
2236 | ||
c7398583 LL |
2237 | /* |
2238 | * Register memory for RDMA read/write | |
d08089f6 | 2239 | * iter: the buffer to register memory with |
c7398583 LL |
2240 | * writing: true if this is a RDMA write (SMB read), false for RDMA read |
2241 | * need_invalidate: true if this MR needs to be locally invalidated after I/O | |
2242 | * return value: the MR registered, NULL if failed. | |
2243 | */ | |
d08089f6 DH |
2244 | struct smbd_mr *smbd_register_mr(struct smbd_connection *info, |
2245 | struct iov_iter *iter, | |
2246 | bool writing, bool need_invalidate) | |
c7398583 LL |
2247 | { |
2248 | struct smbd_mr *smbdirect_mr; | |
d08089f6 | 2249 | int rc, num_pages; |
c7398583 LL |
2250 | enum dma_data_direction dir; |
2251 | struct ib_reg_wr *reg_wr; | |
c7398583 | 2252 | |
d08089f6 | 2253 | num_pages = iov_iter_npages(iter, info->max_frmr_depth + 1); |
c7398583 LL |
2254 | if (num_pages > info->max_frmr_depth) { |
2255 | log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n", | |
2256 | num_pages, info->max_frmr_depth); | |
d08089f6 | 2257 | WARN_ON_ONCE(1); |
c7398583 LL |
2258 | return NULL; |
2259 | } | |
2260 | ||
2261 | smbdirect_mr = get_mr(info); | |
2262 | if (!smbdirect_mr) { | |
2263 | log_rdma_mr(ERR, "get_mr returning NULL\n"); | |
2264 | return NULL; | |
2265 | } | |
d08089f6 DH |
2266 | |
2267 | dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | |
2268 | smbdirect_mr->dir = dir; | |
c7398583 | 2269 | smbdirect_mr->need_invalidate = need_invalidate; |
3d78fe73 DH |
2270 | smbdirect_mr->sgt.nents = 0; |
2271 | smbdirect_mr->sgt.orig_nents = 0; | |
c7398583 | 2272 | |
3d78fe73 DH |
2273 | log_rdma_mr(INFO, "num_pages=0x%x count=0x%zx depth=%u\n", |
2274 | num_pages, iov_iter_count(iter), info->max_frmr_depth); | |
2275 | smbd_iter_to_mr(info, iter, &smbdirect_mr->sgt, info->max_frmr_depth); | |
7cf20bce | 2276 | |
3d78fe73 DH |
2277 | rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgt.sgl, |
2278 | smbdirect_mr->sgt.nents, dir); | |
c7398583 | 2279 | if (!rc) { |
7cf20bce | 2280 | log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n", |
c7398583 LL |
2281 | num_pages, dir, rc); |
2282 | goto dma_map_error; | |
2283 | } | |
2284 | ||
3d78fe73 DH |
2285 | rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgt.sgl, |
2286 | smbdirect_mr->sgt.nents, NULL, PAGE_SIZE); | |
2287 | if (rc != smbdirect_mr->sgt.nents) { | |
7cf20bce | 2288 | log_rdma_mr(ERR, |
3d78fe73 DH |
2289 | "ib_map_mr_sg failed rc = %d nents = %x\n", |
2290 | rc, smbdirect_mr->sgt.nents); | |
c7398583 LL |
2291 | goto map_mr_error; |
2292 | } | |
2293 | ||
2294 | ib_update_fast_reg_key(smbdirect_mr->mr, | |
2295 | ib_inc_rkey(smbdirect_mr->mr->rkey)); | |
2296 | reg_wr = &smbdirect_mr->wr; | |
2297 | reg_wr->wr.opcode = IB_WR_REG_MR; | |
2298 | smbdirect_mr->cqe.done = register_mr_done; | |
2299 | reg_wr->wr.wr_cqe = &smbdirect_mr->cqe; | |
2300 | reg_wr->wr.num_sge = 0; | |
2301 | reg_wr->wr.send_flags = IB_SEND_SIGNALED; | |
2302 | reg_wr->mr = smbdirect_mr->mr; | |
2303 | reg_wr->key = smbdirect_mr->mr->rkey; | |
2304 | reg_wr->access = writing ? | |
2305 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : | |
2306 | IB_ACCESS_REMOTE_READ; | |
2307 | ||
2308 | /* | |
2309 | * There is no need for waiting for complemtion on ib_post_send | |
2310 | * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution | |
2311 | * on the next ib_post_send when we actaully send I/O to remote peer | |
2312 | */ | |
73930595 | 2313 | rc = ib_post_send(info->id->qp, ®_wr->wr, NULL); |
c7398583 LL |
2314 | if (!rc) |
2315 | return smbdirect_mr; | |
2316 | ||
2317 | log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n", | |
2318 | rc, reg_wr->key); | |
2319 | ||
2320 | /* If all failed, attempt to recover this MR by setting it MR_ERROR*/ | |
2321 | map_mr_error: | |
3d78fe73 DH |
2322 | ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgt.sgl, |
2323 | smbdirect_mr->sgt.nents, smbdirect_mr->dir); | |
c7398583 LL |
2324 | |
2325 | dma_map_error: | |
2326 | smbdirect_mr->state = MR_ERROR; | |
2327 | if (atomic_dec_and_test(&info->mr_used_count)) | |
2328 | wake_up(&info->wait_for_mr_cleanup); | |
2329 | ||
21a4e14a LL |
2330 | smbd_disconnect_rdma_connection(info); |
2331 | ||
c7398583 LL |
2332 | return NULL; |
2333 | } | |
2334 | ||
2335 | static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc) | |
2336 | { | |
2337 | struct smbd_mr *smbdirect_mr; | |
2338 | struct ib_cqe *cqe; | |
2339 | ||
2340 | cqe = wc->wr_cqe; | |
2341 | smbdirect_mr = container_of(cqe, struct smbd_mr, cqe); | |
2342 | smbdirect_mr->state = MR_INVALIDATED; | |
2343 | if (wc->status != IB_WC_SUCCESS) { | |
2344 | log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status); | |
2345 | smbdirect_mr->state = MR_ERROR; | |
2346 | } | |
2347 | complete(&smbdirect_mr->invalidate_done); | |
2348 | } | |
2349 | ||
2350 | /* | |
2351 | * Deregister a MR after I/O is done | |
2352 | * This function may wait if remote invalidation is not used | |
2353 | * and we have to locally invalidate the buffer to prevent data is being | |
2354 | * modified by remote peer after upper layer consumes it | |
2355 | */ | |
2356 | int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) | |
2357 | { | |
73930595 | 2358 | struct ib_send_wr *wr; |
c7398583 LL |
2359 | struct smbd_connection *info = smbdirect_mr->conn; |
2360 | int rc = 0; | |
2361 | ||
2362 | if (smbdirect_mr->need_invalidate) { | |
2363 | /* Need to finish local invalidation before returning */ | |
2364 | wr = &smbdirect_mr->inv_wr; | |
2365 | wr->opcode = IB_WR_LOCAL_INV; | |
2366 | smbdirect_mr->cqe.done = local_inv_done; | |
2367 | wr->wr_cqe = &smbdirect_mr->cqe; | |
2368 | wr->num_sge = 0; | |
2369 | wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey; | |
2370 | wr->send_flags = IB_SEND_SIGNALED; | |
2371 | ||
2372 | init_completion(&smbdirect_mr->invalidate_done); | |
73930595 | 2373 | rc = ib_post_send(info->id->qp, wr, NULL); |
c7398583 LL |
2374 | if (rc) { |
2375 | log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc); | |
2376 | smbd_disconnect_rdma_connection(info); | |
2377 | goto done; | |
2378 | } | |
2379 | wait_for_completion(&smbdirect_mr->invalidate_done); | |
2380 | smbdirect_mr->need_invalidate = false; | |
2381 | } else | |
2382 | /* | |
2383 | * For remote invalidation, just set it to MR_INVALIDATED | |
2384 | * and defer to mr_recovery_work to recover the MR for next use | |
2385 | */ | |
2386 | smbdirect_mr->state = MR_INVALIDATED; | |
2387 | ||
c21ce58e LL |
2388 | if (smbdirect_mr->state == MR_INVALIDATED) { |
2389 | ib_dma_unmap_sg( | |
3d78fe73 DH |
2390 | info->id->device, smbdirect_mr->sgt.sgl, |
2391 | smbdirect_mr->sgt.nents, | |
c21ce58e LL |
2392 | smbdirect_mr->dir); |
2393 | smbdirect_mr->state = MR_READY; | |
2394 | if (atomic_inc_return(&info->mr_ready_count) == 1) | |
2395 | wake_up_interruptible(&info->wait_mr); | |
2396 | } else | |
2397 | /* | |
2398 | * Schedule the work to do MR recovery for future I/Os MR | |
2399 | * recovery is slow and don't want it to block current I/O | |
2400 | */ | |
2401 | queue_work(info->workqueue, &info->mr_recovery_work); | |
c7398583 LL |
2402 | |
2403 | done: | |
2404 | if (atomic_dec_and_test(&info->mr_used_count)) | |
2405 | wake_up(&info->wait_for_mr_cleanup); | |
2406 | ||
2407 | return rc; | |
2408 | } | |
e5fbdde4 DH |
2409 | |
2410 | static bool smb_set_sge(struct smb_extract_to_rdma *rdma, | |
2411 | struct page *lowest_page, size_t off, size_t len) | |
2412 | { | |
2413 | struct ib_sge *sge = &rdma->sge[rdma->nr_sge]; | |
2414 | u64 addr; | |
2415 | ||
2416 | addr = ib_dma_map_page(rdma->device, lowest_page, | |
2417 | off, len, rdma->direction); | |
2418 | if (ib_dma_mapping_error(rdma->device, addr)) | |
2419 | return false; | |
2420 | ||
2421 | sge->addr = addr; | |
2422 | sge->length = len; | |
2423 | sge->lkey = rdma->local_dma_lkey; | |
2424 | rdma->nr_sge++; | |
2425 | return true; | |
2426 | } | |
2427 | ||
2428 | /* | |
2429 | * Extract page fragments from a BVEC-class iterator and add them to an RDMA | |
2430 | * element list. The pages are not pinned. | |
2431 | */ | |
2432 | static ssize_t smb_extract_bvec_to_rdma(struct iov_iter *iter, | |
2433 | struct smb_extract_to_rdma *rdma, | |
2434 | ssize_t maxsize) | |
2435 | { | |
2436 | const struct bio_vec *bv = iter->bvec; | |
2437 | unsigned long start = iter->iov_offset; | |
2438 | unsigned int i; | |
2439 | ssize_t ret = 0; | |
2440 | ||
2441 | for (i = 0; i < iter->nr_segs; i++) { | |
2442 | size_t off, len; | |
2443 | ||
2444 | len = bv[i].bv_len; | |
2445 | if (start >= len) { | |
2446 | start -= len; | |
2447 | continue; | |
2448 | } | |
2449 | ||
2450 | len = min_t(size_t, maxsize, len - start); | |
2451 | off = bv[i].bv_offset + start; | |
2452 | ||
2453 | if (!smb_set_sge(rdma, bv[i].bv_page, off, len)) | |
2454 | return -EIO; | |
2455 | ||
2456 | ret += len; | |
2457 | maxsize -= len; | |
2458 | if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0) | |
2459 | break; | |
2460 | start = 0; | |
2461 | } | |
2462 | ||
2463 | return ret; | |
2464 | } | |
2465 | ||
2466 | /* | |
2467 | * Extract fragments from a KVEC-class iterator and add them to an RDMA list. | |
2468 | * This can deal with vmalloc'd buffers as well as kmalloc'd or static buffers. | |
2469 | * The pages are not pinned. | |
2470 | */ | |
2471 | static ssize_t smb_extract_kvec_to_rdma(struct iov_iter *iter, | |
2472 | struct smb_extract_to_rdma *rdma, | |
2473 | ssize_t maxsize) | |
2474 | { | |
2475 | const struct kvec *kv = iter->kvec; | |
2476 | unsigned long start = iter->iov_offset; | |
2477 | unsigned int i; | |
2478 | ssize_t ret = 0; | |
2479 | ||
2480 | for (i = 0; i < iter->nr_segs; i++) { | |
2481 | struct page *page; | |
2482 | unsigned long kaddr; | |
2483 | size_t off, len, seg; | |
2484 | ||
2485 | len = kv[i].iov_len; | |
2486 | if (start >= len) { | |
2487 | start -= len; | |
2488 | continue; | |
2489 | } | |
2490 | ||
2491 | kaddr = (unsigned long)kv[i].iov_base + start; | |
2492 | off = kaddr & ~PAGE_MASK; | |
2493 | len = min_t(size_t, maxsize, len - start); | |
2494 | kaddr &= PAGE_MASK; | |
2495 | ||
2496 | maxsize -= len; | |
2497 | do { | |
2498 | seg = min_t(size_t, len, PAGE_SIZE - off); | |
2499 | ||
2500 | if (is_vmalloc_or_module_addr((void *)kaddr)) | |
2501 | page = vmalloc_to_page((void *)kaddr); | |
2502 | else | |
2503 | page = virt_to_page(kaddr); | |
2504 | ||
2505 | if (!smb_set_sge(rdma, page, off, seg)) | |
2506 | return -EIO; | |
2507 | ||
2508 | ret += seg; | |
2509 | len -= seg; | |
2510 | kaddr += PAGE_SIZE; | |
2511 | off = 0; | |
2512 | } while (len > 0 && rdma->nr_sge < rdma->max_sge); | |
2513 | ||
2514 | if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0) | |
2515 | break; | |
2516 | start = 0; | |
2517 | } | |
2518 | ||
2519 | return ret; | |
2520 | } | |
2521 | ||
2522 | /* | |
2523 | * Extract folio fragments from an XARRAY-class iterator and add them to an | |
2524 | * RDMA list. The folios are not pinned. | |
2525 | */ | |
2526 | static ssize_t smb_extract_xarray_to_rdma(struct iov_iter *iter, | |
2527 | struct smb_extract_to_rdma *rdma, | |
2528 | ssize_t maxsize) | |
2529 | { | |
2530 | struct xarray *xa = iter->xarray; | |
2531 | struct folio *folio; | |
2532 | loff_t start = iter->xarray_start + iter->iov_offset; | |
2533 | pgoff_t index = start / PAGE_SIZE; | |
2534 | ssize_t ret = 0; | |
2535 | size_t off, len; | |
2536 | XA_STATE(xas, xa, index); | |
2537 | ||
2538 | rcu_read_lock(); | |
2539 | ||
2540 | xas_for_each(&xas, folio, ULONG_MAX) { | |
2541 | if (xas_retry(&xas, folio)) | |
2542 | continue; | |
2543 | if (WARN_ON(xa_is_value(folio))) | |
2544 | break; | |
2545 | if (WARN_ON(folio_test_hugetlb(folio))) | |
2546 | break; | |
2547 | ||
2548 | off = offset_in_folio(folio, start); | |
2549 | len = min_t(size_t, maxsize, folio_size(folio) - off); | |
2550 | ||
2551 | if (!smb_set_sge(rdma, folio_page(folio, 0), off, len)) { | |
2552 | rcu_read_unlock(); | |
2553 | return -EIO; | |
2554 | } | |
2555 | ||
2556 | maxsize -= len; | |
2557 | ret += len; | |
2558 | if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0) | |
2559 | break; | |
2560 | } | |
2561 | ||
2562 | rcu_read_unlock(); | |
2563 | return ret; | |
2564 | } | |
2565 | ||
2566 | /* | |
2567 | * Extract page fragments from up to the given amount of the source iterator | |
2568 | * and build up an RDMA list that refers to all of those bits. The RDMA list | |
2569 | * is appended to, up to the maximum number of elements set in the parameter | |
2570 | * block. | |
2571 | * | |
2572 | * The extracted page fragments are not pinned or ref'd in any way; if an | |
2573 | * IOVEC/UBUF-type iterator is to be used, it should be converted to a | |
2574 | * BVEC-type iterator and the pages pinned, ref'd or otherwise held in some | |
2575 | * way. | |
2576 | */ | |
2577 | static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len, | |
2578 | struct smb_extract_to_rdma *rdma) | |
2579 | { | |
2580 | ssize_t ret; | |
2581 | int before = rdma->nr_sge; | |
2582 | ||
2583 | switch (iov_iter_type(iter)) { | |
2584 | case ITER_BVEC: | |
2585 | ret = smb_extract_bvec_to_rdma(iter, rdma, len); | |
2586 | break; | |
2587 | case ITER_KVEC: | |
2588 | ret = smb_extract_kvec_to_rdma(iter, rdma, len); | |
2589 | break; | |
2590 | case ITER_XARRAY: | |
2591 | ret = smb_extract_xarray_to_rdma(iter, rdma, len); | |
2592 | break; | |
2593 | default: | |
2594 | WARN_ON_ONCE(1); | |
2595 | return -EIO; | |
2596 | } | |
2597 | ||
2598 | if (ret > 0) { | |
2599 | iov_iter_advance(iter, ret); | |
2600 | } else if (ret < 0) { | |
2601 | while (rdma->nr_sge > before) { | |
2602 | struct ib_sge *sge = &rdma->sge[rdma->nr_sge--]; | |
2603 | ||
2604 | ib_dma_unmap_single(rdma->device, sge->addr, sge->length, | |
2605 | rdma->direction); | |
2606 | sge->addr = 0; | |
2607 | } | |
2608 | } | |
2609 | ||
2610 | return ret; | |
2611 | } |