RDMA/siw: Fix broken RDMA Read Fence/Resume logic.
[linux-2.6-block.git] / drivers / infiniband / sw / siw / siw.h
CommitLineData
a5319752
BM
1/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
2
3/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4/* Copyright (c) 2008-2019, IBM Corporation */
5
6#ifndef _SIW_H
7#define _SIW_H
8
9#include <rdma/ib_verbs.h>
58fb0b56 10#include <rdma/restrack.h>
a5319752
BM
11#include <linux/socket.h>
12#include <linux/skbuff.h>
13#include <crypto/hash.h>
14#include <linux/crc32.h>
15#include <linux/crc32c.h>
16
17#include <rdma/siw-abi.h>
18#include "iwarp.h"
19
20#define SIW_VENDOR_ID 0x626d74 /* ascii 'bmt' for now */
21#define SIW_VENDORT_PART_ID 0
22#define SIW_MAX_QP (1024 * 100)
23#define SIW_MAX_QP_WR (1024 * 32)
24#define SIW_MAX_ORD_QP 128
25#define SIW_MAX_IRD_QP 128
26#define SIW_MAX_SGE_PBL 256 /* max num sge's for PBL */
27#define SIW_MAX_SGE_RD 1 /* iwarp limitation. we could relax */
28#define SIW_MAX_CQ (1024 * 100)
29#define SIW_MAX_CQE (SIW_MAX_QP_WR * 100)
30#define SIW_MAX_MR (SIW_MAX_QP * 10)
31#define SIW_MAX_PD SIW_MAX_QP
32#define SIW_MAX_MW 0 /* to be set if MW's are supported */
a5319752
BM
33#define SIW_MAX_SRQ SIW_MAX_QP
34#define SIW_MAX_SRQ_WR (SIW_MAX_QP_WR * 10)
35#define SIW_MAX_CONTEXT SIW_MAX_PD
36
37/* Min number of bytes for using zero copy transmit */
38#define SENDPAGE_THRESH PAGE_SIZE
39
40/* Maximum number of frames which can be send in one SQ processing */
41#define SQ_USER_MAXBURST 100
42
43/* Maximum number of consecutive IRQ elements which get served
44 * if SQ has pending work. Prevents starving local SQ processing
45 * by serving peer Read Requests.
46 */
47#define SIW_IRQ_MAXBURST_SQ_ACTIVE 4
48
49struct siw_dev_cap {
50 int max_qp;
51 int max_qp_wr;
52 int max_ord; /* max. outbound read queue depth */
53 int max_ird; /* max. inbound read queue depth */
54 int max_sge;
55 int max_sge_rd;
56 int max_cq;
57 int max_cqe;
58 int max_mr;
59 int max_pd;
60 int max_mw;
a5319752
BM
61 int max_srq;
62 int max_srq_wr;
63 int max_srq_sge;
64};
65
66struct siw_pd {
67 struct ib_pd base_pd;
68};
69
70struct siw_device {
71 struct ib_device base_dev;
72 struct net_device *netdev;
73 struct siw_dev_cap attrs;
74
75 u32 vendor_part_id;
76 int numa_node;
77
78 /* physical port state (only one port per device) */
79 enum ib_port_state state;
80
81 spinlock_t lock;
82
83 struct xarray qp_xa;
84 struct xarray mem_xa;
85
86 struct list_head cep_list;
87 struct list_head qp_list;
88
89 /* active objects statistics to enforce limits */
90 atomic_t num_qp;
91 atomic_t num_cq;
92 atomic_t num_pd;
93 atomic_t num_mr;
94 atomic_t num_srq;
95 atomic_t num_ctx;
96
97 struct work_struct netdev_down;
98};
99
a5319752
BM
100struct siw_ucontext {
101 struct ib_ucontext base_ucontext;
102 struct siw_device *sdev;
a5319752
BM
103};
104
105/*
106 * The RDMA core does not define LOCAL_READ access, which is always
107 * enabled implictely.
108 */
109#define IWARP_ACCESS_MASK \
110 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | \
111 IB_ACCESS_REMOTE_READ)
112
113/*
114 * siw presentation of user memory registered as source
115 * or target of RDMA operations.
116 */
117
118struct siw_page_chunk {
119 struct page **plist;
120};
121
122struct siw_umem {
123 struct siw_page_chunk *page_chunk;
124 int num_pages;
125 bool writable;
126 u64 fp_addr; /* First page base address */
127 struct mm_struct *owning_mm;
128};
129
130struct siw_pble {
c536277e
BM
131 dma_addr_t addr; /* Address of assigned buffer */
132 unsigned int size; /* Size of this entry */
133 unsigned long pbl_off; /* Total offset from start of PBL */
a5319752
BM
134};
135
136struct siw_pbl {
137 unsigned int num_buf;
138 unsigned int max_buf;
bd25c806 139 struct siw_pble pbe[];
a5319752
BM
140};
141
a5319752
BM
142/*
143 * Generic memory representation for registered siw memory.
144 * Memory lookup always via higher 24 bit of STag (STag index).
145 */
146struct siw_mem {
147 struct siw_device *sdev;
148 struct kref ref;
149 u64 va; /* VA of memory */
150 u64 len; /* length of the memory buffer in bytes */
151 u32 stag; /* iWarp memory access steering tag */
152 u8 stag_valid; /* VALID or INVALID */
153 u8 is_pbl; /* PBL or user space mem */
154 u8 is_mw; /* Memory Region or Memory Window */
155 enum ib_access_flags perms; /* local/remote READ & WRITE */
156 union {
157 struct siw_umem *umem;
158 struct siw_pbl *pbl;
159 void *mem_obj;
160 };
161 struct ib_pd *pd;
162};
163
164struct siw_mr {
165 struct ib_mr base_mr;
166 struct siw_mem *mem;
167 struct rcu_head rcu;
168};
169
170/*
171 * Error codes for local or remote
172 * access to registered memory
173 */
174enum siw_access_state {
175 E_ACCESS_OK,
176 E_STAG_INVALID,
177 E_BASE_BOUNDS,
178 E_ACCESS_PERM,
179 E_PD_MISMATCH
180};
181
182enum siw_wr_state {
183 SIW_WR_IDLE,
184 SIW_WR_QUEUED, /* processing has not started yet */
185 SIW_WR_INPROGRESS /* initiated processing of the WR */
186};
187
188/* The WQE currently being processed (RX or TX) */
189struct siw_wqe {
190 /* Copy of applications SQE or RQE */
191 union {
192 struct siw_sqe sqe;
193 struct siw_rqe rqe;
194 };
195 struct siw_mem *mem[SIW_MAX_SGE]; /* per sge's resolved mem */
196 enum siw_wr_state wr_status;
197 enum siw_wc_status wc_status;
198 u32 bytes; /* total bytes to process */
199 u32 processed; /* bytes processed */
200};
201
202struct siw_cq {
203 struct ib_cq base_cq;
204 spinlock_t lock;
2c8ccb37 205 struct siw_cq_ctrl *notify;
a5319752
BM
206 struct siw_cqe *queue;
207 u32 cq_put;
208 u32 cq_get;
209 u32 num_cqe;
11f1a755 210 struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */
a5319752
BM
211 u32 id; /* For debugging only */
212};
213
214enum siw_qp_state {
215 SIW_QP_STATE_IDLE,
216 SIW_QP_STATE_RTR,
217 SIW_QP_STATE_RTS,
218 SIW_QP_STATE_CLOSING,
219 SIW_QP_STATE_TERMINATE,
220 SIW_QP_STATE_ERROR,
221 SIW_QP_STATE_COUNT
222};
223
224enum siw_qp_flags {
225 SIW_RDMA_BIND_ENABLED = (1 << 0),
226 SIW_RDMA_WRITE_ENABLED = (1 << 1),
227 SIW_RDMA_READ_ENABLED = (1 << 2),
228 SIW_SIGNAL_ALL_WR = (1 << 3),
229 SIW_MPA_CRC = (1 << 4),
230 SIW_QP_IN_DESTROY = (1 << 5)
231};
232
233enum siw_qp_attr_mask {
234 SIW_QP_ATTR_STATE = (1 << 0),
235 SIW_QP_ATTR_ACCESS_FLAGS = (1 << 1),
236 SIW_QP_ATTR_LLP_HANDLE = (1 << 2),
237 SIW_QP_ATTR_ORD = (1 << 3),
238 SIW_QP_ATTR_IRD = (1 << 4),
239 SIW_QP_ATTR_SQ_SIZE = (1 << 5),
240 SIW_QP_ATTR_RQ_SIZE = (1 << 6),
241 SIW_QP_ATTR_MPA = (1 << 7)
242};
243
244struct siw_srq {
245 struct ib_srq base_srq;
246 spinlock_t lock;
247 u32 max_sge;
248 u32 limit; /* low watermark for async event */
249 struct siw_rqe *recvq;
250 u32 rq_put;
251 u32 rq_get;
252 u32 num_rqe; /* max # of wqe's allowed */
11f1a755 253 struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */
58fb0b56
BM
254 bool armed:1; /* inform user if limit hit */
255 bool is_kernel_res:1; /* true if kernel client */
a5319752
BM
256};
257
258struct siw_qp_attrs {
259 enum siw_qp_state state;
260 u32 sq_size;
261 u32 rq_size;
262 u32 orq_size;
263 u32 irq_size;
264 u32 sq_max_sges;
265 u32 rq_max_sges;
266 enum siw_qp_flags flags;
267
268 struct socket *sk;
269};
270
271enum siw_tx_ctx {
272 SIW_SEND_HDR, /* start or continue sending HDR */
273 SIW_SEND_DATA, /* start or continue sending DDP payload */
274 SIW_SEND_TRAILER, /* start or continue sending TRAILER */
275 SIW_SEND_SHORT_FPDU/* send whole FPDU hdr|data|trailer at once */
276};
277
278enum siw_rx_state {
279 SIW_GET_HDR, /* await new hdr or within hdr */
280 SIW_GET_DATA_START, /* start of inbound DDP payload */
281 SIW_GET_DATA_MORE, /* continuation of (misaligned) DDP payload */
282 SIW_GET_TRAILER/* await new trailer or within trailer */
283};
284
285struct siw_rx_stream {
286 struct sk_buff *skb;
287 int skb_new; /* pending unread bytes in skb */
288 int skb_offset; /* offset in skb */
289 int skb_copied; /* processed bytes in skb */
290
291 union iwarp_hdr hdr;
292 struct mpa_trailer trailer;
293
294 enum siw_rx_state state;
295
296 /*
297 * For each FPDU, main RX loop runs through 3 stages:
298 * Receiving protocol headers, placing DDP payload and receiving
299 * trailer information (CRC + possibly padding).
300 * Next two variables keep state on receive status of the
301 * current FPDU part (hdr, data, trailer).
302 */
303 int fpdu_part_rcvd; /* bytes in pkt part copied */
304 int fpdu_part_rem; /* bytes in pkt part not seen */
305
306 /*
307 * Next expected DDP MSN for each QN +
308 * expected steering tag +
309 * expected DDP tagget offset (all HBO)
310 */
311 u32 ddp_msn[RDMAP_UNTAGGED_QN_COUNT];
312 u32 ddp_stag;
313 u64 ddp_to;
314 u32 inval_stag; /* Stag to be invalidated */
315
316 struct shash_desc *mpa_crc_hd;
317 u8 rx_suspend : 1;
318 u8 pad : 2; /* # of pad bytes expected */
319 u8 rdmap_op : 4; /* opcode of current frame */
320};
321
322struct siw_rx_fpdu {
323 /*
324 * Local destination memory of inbound RDMA operation.
325 * Valid, according to wqe->wr_status
326 */
327 struct siw_wqe wqe_active;
328
329 unsigned int pbl_idx; /* Index into current PBL */
330 unsigned int sge_idx; /* current sge in rx */
331 unsigned int sge_off; /* already rcvd in curr. sge */
332
333 char first_ddp_seg; /* this is the first DDP seg */
334 char more_ddp_segs; /* more DDP segs expected */
335 u8 prev_rdmap_op : 4; /* opcode of prev frame */
336};
337
338/*
339 * Shorthands for short packets w/o payload
340 * to be transmitted more efficient.
341 */
342struct siw_send_pkt {
343 struct iwarp_send send;
344 __be32 crc;
345};
346
347struct siw_write_pkt {
348 struct iwarp_rdma_write write;
349 __be32 crc;
350};
351
352struct siw_rreq_pkt {
353 struct iwarp_rdma_rreq rreq;
354 __be32 crc;
355};
356
357struct siw_rresp_pkt {
358 struct iwarp_rdma_rresp rresp;
359 __be32 crc;
360};
361
362struct siw_iwarp_tx {
363 union {
364 union iwarp_hdr hdr;
365
366 /* Generic part of FPDU header */
367 struct iwarp_ctrl ctrl;
368 struct iwarp_ctrl_untagged c_untagged;
369 struct iwarp_ctrl_tagged c_tagged;
370
371 /* FPDU headers */
372 struct iwarp_rdma_write rwrite;
373 struct iwarp_rdma_rreq rreq;
374 struct iwarp_rdma_rresp rresp;
375 struct iwarp_terminate terminate;
376 struct iwarp_send send;
377 struct iwarp_send_inv send_inv;
378
379 /* complete short FPDUs */
380 struct siw_send_pkt send_pkt;
381 struct siw_write_pkt write_pkt;
382 struct siw_rreq_pkt rreq_pkt;
383 struct siw_rresp_pkt rresp_pkt;
384 } pkt;
385
386 struct mpa_trailer trailer;
387 /* DDP MSN for untagged messages */
388 u32 ddp_msn[RDMAP_UNTAGGED_QN_COUNT];
389
390 enum siw_tx_ctx state;
391 u16 ctrl_len; /* ddp+rdmap hdr */
392 u16 ctrl_sent;
393 int burst;
394 int bytes_unsent; /* ddp payload bytes */
395
396 struct shash_desc *mpa_crc_hd;
397
398 u8 do_crc : 1; /* do crc for segment */
399 u8 use_sendpage : 1; /* send w/o copy */
400 u8 tx_suspend : 1; /* stop sending DDP segs. */
401 u8 pad : 2; /* # pad in current fpdu */
402 u8 orq_fence : 1; /* ORQ full or Send fenced */
403 u8 in_syscall : 1; /* TX out of user context */
404 u8 zcopy_tx : 1; /* Use TCP_SENDPAGE if possible */
405 u8 gso_seg_limit; /* Maximum segments for GSO, 0 = unbound */
406
407 u16 fpdu_len; /* len of FPDU to tx */
408 unsigned int tcp_seglen; /* remaining tcp seg space */
409
410 struct siw_wqe wqe_active;
411
412 int pbl_idx; /* Index into current PBL */
413 int sge_idx; /* current sge in tx */
414 u32 sge_off; /* already sent in curr. sge */
415};
416
417struct siw_qp {
58fb0b56 418 struct ib_qp base_qp;
a5319752 419 struct siw_device *sdev;
a5319752 420 struct kref ref;
a5319752
BM
421 struct list_head devq;
422 int tx_cpu;
a5319752
BM
423 struct siw_qp_attrs attrs;
424
425 struct siw_cep *cep;
426 struct rw_semaphore state_lock;
427
428 struct ib_pd *pd;
429 struct siw_cq *scq;
430 struct siw_cq *rcq;
431 struct siw_srq *srq;
432
433 struct siw_iwarp_tx tx_ctx; /* Transmit context */
434 spinlock_t sq_lock;
435 struct siw_sqe *sendq; /* send queue element array */
436 uint32_t sq_get; /* consumer index into sq array */
437 uint32_t sq_put; /* kernel prod. index into sq array */
438 struct llist_node tx_list;
439
440 struct siw_sqe *orq; /* outbound read queue element array */
441 spinlock_t orq_lock;
442 uint32_t orq_get; /* consumer index into orq array */
443 uint32_t orq_put; /* shared producer index for ORQ */
444
445 struct siw_rx_stream rx_stream;
446 struct siw_rx_fpdu *rx_fpdu;
447 struct siw_rx_fpdu rx_tagged;
448 struct siw_rx_fpdu rx_untagged;
449 spinlock_t rq_lock;
450 struct siw_rqe *recvq; /* recv queue element array */
451 uint32_t rq_get; /* consumer index into rq array */
452 uint32_t rq_put; /* kernel prod. index into rq array */
453
454 struct siw_sqe *irq; /* inbound read queue element array */
455 uint32_t irq_get; /* consumer index into irq array */
456 uint32_t irq_put; /* producer index into irq array */
457 int irq_burst;
458
459 struct { /* information to be carried in TERMINATE pkt, if valid */
460 u8 valid;
461 u8 in_tx;
462 u8 layer : 4, etype : 4;
463 u8 ecode;
464 } term_info;
11f1a755
MK
465 struct rdma_user_mmap_entry *sq_entry; /* mmap info for SQE array */
466 struct rdma_user_mmap_entry *rq_entry; /* mmap info for RQE array */
a5319752
BM
467 struct rcu_head rcu;
468};
469
a5319752
BM
470/* helper macros */
471#define rx_qp(rx) container_of(rx, struct siw_qp, rx_stream)
472#define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx)
473#define tx_wqe(qp) (&(qp)->tx_ctx.wqe_active)
474#define rx_wqe(rctx) (&(rctx)->wqe_active)
475#define rx_mem(rctx) ((rctx)->wqe_active.mem[0])
476#define tx_type(wqe) ((wqe)->sqe.opcode)
477#define rx_type(wqe) ((wqe)->rqe.opcode)
478#define tx_flags(wqe) ((wqe)->sqe.flags)
479
480struct iwarp_msg_info {
481 int hdr_len;
482 struct iwarp_ctrl ctrl;
483 int (*rx_data)(struct siw_qp *qp);
484};
485
11f1a755
MK
486struct siw_user_mmap_entry {
487 struct rdma_user_mmap_entry rdma_entry;
488 void *address;
489};
490
a5319752
BM
491/* Global siw parameters. Currently set in siw_main.c */
492extern const bool zcopy_tx;
493extern const bool try_gso;
494extern const bool loopback_enabled;
495extern const bool mpa_crc_required;
496extern const bool mpa_crc_strict;
497extern const bool siw_tcp_nagle;
498extern u_char mpa_version;
499extern const bool peer_to_peer;
500extern struct task_struct *siw_tx_thread[];
501
502extern struct crypto_shash *siw_crypto_shash;
503extern struct iwarp_msg_info iwarp_pktinfo[RDMAP_TERMINATE + 1];
504
505/* QP general functions */
506int siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attr,
507 enum siw_qp_attr_mask mask);
508int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl);
509void siw_qp_llp_close(struct siw_qp *qp);
510void siw_qp_cm_drop(struct siw_qp *qp, int schedule);
511void siw_send_terminate(struct siw_qp *qp);
512
513void siw_qp_get_ref(struct ib_qp *qp);
514void siw_qp_put_ref(struct ib_qp *qp);
515int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp);
516void siw_free_qp(struct kref *ref);
517
518void siw_init_terminate(struct siw_qp *qp, enum term_elayer layer,
519 u8 etype, u8 ecode, int in_tx);
520enum ddp_ecode siw_tagged_error(enum siw_access_state state);
521enum rdmap_ecode siw_rdmap_error(enum siw_access_state state);
522
523void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe);
524int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
525 enum siw_wc_status status);
526int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
527 u32 inval_stag, enum siw_wc_status status);
528void siw_qp_llp_data_ready(struct sock *sk);
529void siw_qp_llp_write_space(struct sock *sk);
530
531/* QP TX path functions */
532int siw_run_sq(void *arg);
533int siw_qp_sq_process(struct siw_qp *qp);
534int siw_sq_start(struct siw_qp *qp);
535int siw_activate_tx(struct siw_qp *qp);
536void siw_stop_tx_thread(int nr_cpu);
537int siw_get_tx_cpu(struct siw_device *sdev);
538void siw_put_tx_cpu(int cpu);
539
540/* QP RX path functions */
541int siw_proc_send(struct siw_qp *qp);
542int siw_proc_rreq(struct siw_qp *qp);
543int siw_proc_rresp(struct siw_qp *qp);
544int siw_proc_write(struct siw_qp *qp);
545int siw_proc_terminate(struct siw_qp *qp);
546
547int siw_tcp_rx_data(read_descriptor_t *rd_desc, struct sk_buff *skb,
548 unsigned int off, size_t len);
549
550static inline void set_rx_fpdu_context(struct siw_qp *qp, u8 opcode)
551{
552 if (opcode == RDMAP_RDMA_WRITE || opcode == RDMAP_RDMA_READ_RESP)
553 qp->rx_fpdu = &qp->rx_tagged;
554 else
555 qp->rx_fpdu = &qp->rx_untagged;
556
557 qp->rx_stream.rdmap_op = opcode;
558}
559
560static inline struct siw_ucontext *to_siw_ctx(struct ib_ucontext *base_ctx)
561{
562 return container_of(base_ctx, struct siw_ucontext, base_ucontext);
563}
564
a5319752
BM
565static inline struct siw_qp *to_siw_qp(struct ib_qp *base_qp)
566{
58fb0b56 567 return container_of(base_qp, struct siw_qp, base_qp);
a5319752
BM
568}
569
570static inline struct siw_cq *to_siw_cq(struct ib_cq *base_cq)
571{
572 return container_of(base_cq, struct siw_cq, base_cq);
573}
574
575static inline struct siw_srq *to_siw_srq(struct ib_srq *base_srq)
576{
577 return container_of(base_srq, struct siw_srq, base_srq);
578}
579
580static inline struct siw_device *to_siw_dev(struct ib_device *base_dev)
581{
582 return container_of(base_dev, struct siw_device, base_dev);
583}
584
585static inline struct siw_mr *to_siw_mr(struct ib_mr *base_mr)
586{
587 return container_of(base_mr, struct siw_mr, base_mr);
588}
589
11f1a755
MK
590static inline struct siw_user_mmap_entry *
591to_siw_mmap_entry(struct rdma_user_mmap_entry *rdma_mmap)
592{
593 return container_of(rdma_mmap, struct siw_user_mmap_entry, rdma_entry);
594}
595
a5319752
BM
596static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id)
597{
598 struct siw_qp *qp;
599
600 rcu_read_lock();
601 qp = xa_load(&sdev->qp_xa, id);
602 if (likely(qp && kref_get_unless_zero(&qp->ref))) {
603 rcu_read_unlock();
604 return qp;
605 }
606 rcu_read_unlock();
607 return NULL;
608}
609
610static inline u32 qp_id(struct siw_qp *qp)
611{
58fb0b56 612 return qp->base_qp.qp_num;
a5319752
BM
613}
614
615static inline void siw_qp_get(struct siw_qp *qp)
616{
617 kref_get(&qp->ref);
618}
619
620static inline void siw_qp_put(struct siw_qp *qp)
621{
622 kref_put(&qp->ref, siw_free_qp);
623}
624
625static inline int siw_sq_empty(struct siw_qp *qp)
626{
627 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
628
629 return READ_ONCE(sqe->flags) == 0;
630}
631
632static inline struct siw_sqe *sq_get_next(struct siw_qp *qp)
633{
634 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
635
636 if (READ_ONCE(sqe->flags) & SIW_WQE_VALID)
637 return sqe;
638
639 return NULL;
640}
641
642static inline struct siw_sqe *orq_get_current(struct siw_qp *qp)
643{
644 return &qp->orq[qp->orq_get % qp->attrs.orq_size];
645}
646
a5319752
BM
647static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
648{
b43a76f4 649 struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size];
a5319752 650
661f3859 651 if (READ_ONCE(orq_e->flags) == 0)
a5319752
BM
652 return orq_e;
653
654 return NULL;
655}
656
657static inline int siw_orq_empty(struct siw_qp *qp)
658{
659 return qp->orq[qp->orq_get % qp->attrs.orq_size].flags == 0 ? 1 : 0;
660}
661
662static inline struct siw_sqe *irq_alloc_free(struct siw_qp *qp)
663{
664 struct siw_sqe *irq_e = &qp->irq[qp->irq_put % qp->attrs.irq_size];
665
666 if (READ_ONCE(irq_e->flags) == 0) {
667 qp->irq_put++;
668 return irq_e;
669 }
670 return NULL;
671}
672
673static inline __wsum siw_csum_update(const void *buff, int len, __wsum sum)
674{
675 return (__force __wsum)crc32c((__force __u32)sum, buff, len);
676}
677
678static inline __wsum siw_csum_combine(__wsum csum, __wsum csum2, int offset,
679 int len)
680{
681 return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
682 (__force __u32)csum2, len);
683}
684
685static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
686{
687 const struct skb_checksum_ops siw_cs_ops = {
688 .update = siw_csum_update,
689 .combine = siw_csum_combine,
690 };
691 __wsum crc = *(u32 *)shash_desc_ctx(srx->mpa_crc_hd);
692
693 crc = __skb_checksum(srx->skb, srx->skb_offset, len, crc,
694 &siw_cs_ops);
695 *(u32 *)shash_desc_ctx(srx->mpa_crc_hd) = crc;
696}
697
698#define siw_dbg(ibdev, fmt, ...) \
699 ibdev_dbg(ibdev, "%s: " fmt, __func__, ##__VA_ARGS__)
700
701#define siw_dbg_qp(qp, fmt, ...) \
702 ibdev_dbg(&qp->sdev->base_dev, "QP[%u] %s: " fmt, qp_id(qp), __func__, \
703 ##__VA_ARGS__)
704
705#define siw_dbg_cq(cq, fmt, ...) \
706 ibdev_dbg(cq->base_cq.device, "CQ[%u] %s: " fmt, cq->id, __func__, \
707 ##__VA_ARGS__)
708
709#define siw_dbg_pd(pd, fmt, ...) \
710 ibdev_dbg(pd->device, "PD[%u] %s: " fmt, pd->res.id, __func__, \
711 ##__VA_ARGS__)
712
713#define siw_dbg_mem(mem, fmt, ...) \
714 ibdev_dbg(&mem->sdev->base_dev, \
715 "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
716
717#define siw_dbg_cep(cep, fmt, ...) \
58fb0b56 718 ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
a5319752
BM
719 cep, __func__, ##__VA_ARGS__)
720
721void siw_cq_flush(struct siw_cq *cq);
722void siw_sq_flush(struct siw_qp *qp);
723void siw_rq_flush(struct siw_qp *qp);
724int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc);
725
726#endif