net: ena: Add validation for completion descriptors consistency
[linux-block.git] / drivers / net / ethernet / amazon / ena / ena_eth_com.c
CommitLineData
2246cbc2 1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
1738cd3e 2/*
2246cbc2 3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
1738cd3e
NB
4 */
5
6#include "ena_eth_com.h"
7
c2b54204 8static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
1738cd3e
NB
9 struct ena_com_io_cq *io_cq)
10{
11 struct ena_eth_io_rx_cdesc_base *cdesc;
12 u16 expected_phase, head_masked;
13 u16 desc_phase;
14
15 head_masked = io_cq->head & (io_cq->q_depth - 1);
16 expected_phase = io_cq->phase;
17
18 cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
19 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
20
50613650 21 desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
bf2746e8 22 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
1738cd3e
NB
23
24 if (desc_phase != expected_phase)
25 return NULL;
26
37dff155
NB
27 /* Make sure we read the rest of the descriptor after the phase bit
28 * has been read
29 */
30 dma_rmb();
31
1738cd3e
NB
32 return cdesc;
33}
34
c2b54204 35static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
1738cd3e
NB
36{
37 u16 tail_masked;
38 u32 offset;
39
40 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
41
42 offset = tail_masked * io_sq->desc_entry_size;
43
44 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
45}
46
c2b54204 47static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
689b2bda 48 u8 *bounce_buffer)
1738cd3e 49{
689b2bda 50 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
1738cd3e 51
689b2bda
AK
52 u16 dst_tail_mask;
53 u32 dst_offset;
1738cd3e 54
689b2bda
AK
55 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
56 dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
57
05d62ca2
SJ
58 if (is_llq_max_tx_burst_exists(io_sq)) {
59 if (unlikely(!io_sq->entries_in_tx_burst_left)) {
da580ca8
SA
60 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
61 "Error: trying to send more packets than tx burst allows\n");
05d62ca2
SJ
62 return -ENOSPC;
63 }
64
65 io_sq->entries_in_tx_burst_left--;
da580ca8 66 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
50613650
DA
67 "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid,
68 io_sq->entries_in_tx_burst_left);
05d62ca2
SJ
69 }
70
689b2bda
AK
71 /* Make sure everything was written into the bounce buffer before
72 * writing the bounce buffer to the device
73 */
74 wmb();
75
76 /* The line is completed. Copy it to dev */
50613650
DA
77 __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer,
78 (llq_info->desc_list_entry_size) / 8);
1738cd3e 79
1738cd3e
NB
80 io_sq->tail++;
81
82 /* Switch phase bit in case of wrap around */
83 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
84 io_sq->phase ^= 1;
689b2bda
AK
85
86 return 0;
1738cd3e
NB
87}
88
c2b54204 89static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
689b2bda
AK
90 u8 *header_src,
91 u16 header_len)
1738cd3e 92{
689b2bda
AK
93 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
94 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
95 u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
96 u16 header_offset;
1738cd3e 97
689b2bda 98 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
1738cd3e
NB
99 return 0;
100
689b2bda
AK
101 header_offset =
102 llq_info->descs_num_before_header * io_sq->desc_entry_size;
103
50613650 104 if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
da580ca8
SA
105 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
106 "Trying to write header larger than llq entry can accommodate\n");
689b2bda
AK
107 return -EFAULT;
108 }
109
110 if (unlikely(!bounce_buffer)) {
50613650 111 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
689b2bda
AK
112 return -EFAULT;
113 }
114
115 memcpy(bounce_buffer + header_offset, header_src, header_len);
116
117 return 0;
118}
119
c2b54204 120static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
689b2bda
AK
121{
122 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
123 u8 *bounce_buffer;
124 void *sq_desc;
125
126 bounce_buffer = pkt_ctrl->curr_bounce_buf;
127
128 if (unlikely(!bounce_buffer)) {
50613650 129 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
689b2bda
AK
130 return NULL;
131 }
132
133 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
134 pkt_ctrl->idx++;
135 pkt_ctrl->descs_left_in_line--;
136
137 return sq_desc;
138}
139
c2b54204 140static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
689b2bda
AK
141{
142 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
143 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
144 int rc;
145
146 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
147 return 0;
148
149 /* bounce buffer was used, so write it and get a new one */
e4ac382e 150 if (likely(pkt_ctrl->idx)) {
689b2bda
AK
151 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
152 pkt_ctrl->curr_bounce_buf);
091d0e85
SA
153 if (unlikely(rc)) {
154 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
155 "Failed to write bounce buffer to device\n");
689b2bda 156 return rc;
091d0e85 157 }
689b2bda
AK
158
159 pkt_ctrl->curr_bounce_buf =
160 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
161 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
162 0x0, llq_info->desc_list_entry_size);
163 }
164
165 pkt_ctrl->idx = 0;
166 pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
167 return 0;
168}
169
c2b54204 170static void *get_sq_desc(struct ena_com_io_sq *io_sq)
689b2bda
AK
171{
172 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
173 return get_sq_desc_llq(io_sq);
174
175 return get_sq_desc_regular_queue(io_sq);
176}
177
c2b54204 178static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
689b2bda
AK
179{
180 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
181 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
182 int rc;
183
184 if (!pkt_ctrl->descs_left_in_line) {
185 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
186 pkt_ctrl->curr_bounce_buf);
091d0e85
SA
187 if (unlikely(rc)) {
188 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
189 "Failed to write bounce buffer to device\n");
689b2bda 190 return rc;
091d0e85 191 }
689b2bda
AK
192
193 pkt_ctrl->curr_bounce_buf =
194 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
4208966f
CIK
195 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
196 0x0, llq_info->desc_list_entry_size);
689b2bda
AK
197
198 pkt_ctrl->idx = 0;
199 if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
200 pkt_ctrl->descs_left_in_line = 1;
201 else
202 pkt_ctrl->descs_left_in_line =
203 llq_info->desc_list_entry_size / io_sq->desc_entry_size;
1738cd3e
NB
204 }
205
689b2bda
AK
206 return 0;
207}
208
c2b54204 209static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
689b2bda
AK
210{
211 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
212 return ena_com_sq_update_llq_tail(io_sq);
213
214 io_sq->tail++;
215
216 /* Switch phase bit in case of wrap around */
217 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
218 io_sq->phase ^= 1;
1738cd3e
NB
219
220 return 0;
221}
222
c2b54204 223static struct ena_eth_io_rx_cdesc_base *
1738cd3e
NB
224 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
225{
226 idx &= (io_cq->q_depth - 1);
227 return (struct ena_eth_io_rx_cdesc_base *)
228 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
229 idx * io_cq->cdesc_entry_size_in_bytes);
230}
231
b37b98a3
DA
232static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
233 u16 *first_cdesc_idx,
234 u16 *num_descs)
1738cd3e 235{
b37b98a3 236 u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked;
1738cd3e 237 struct ena_eth_io_rx_cdesc_base *cdesc;
1738cd3e
NB
238 u32 last = 0;
239
240 do {
b37b98a3
DA
241 u32 status;
242
1738cd3e
NB
243 cdesc = ena_com_get_next_rx_cdesc(io_cq);
244 if (!cdesc)
245 break;
b37b98a3 246 status = READ_ONCE(cdesc->status);
1738cd3e
NB
247
248 ena_com_cq_inc_head(io_cq);
b37b98a3
DA
249 if (unlikely((status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >>
250 ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT && count != 0)) {
251 struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
252
253 netdev_err(dev->net_device,
254 "First bit is on in descriptor #%d on q_id: %d, req_id: %u\n",
255 count, io_cq->qid, cdesc->req_id);
256 return -EFAULT;
257 }
1738cd3e 258 count++;
b37b98a3
DA
259 last = (status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
260 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
1738cd3e
NB
261 } while (!last);
262
263 if (last) {
264 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
1738cd3e
NB
265
266 head_masked = io_cq->head & (io_cq->q_depth - 1);
267
b37b98a3 268 *num_descs = count;
1738cd3e
NB
269 io_cq->cur_rx_pkt_cdesc_count = 0;
270 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
271
da580ca8
SA
272 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
273 "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
274 io_cq->qid, *first_cdesc_idx, count);
1738cd3e 275 } else {
b37b98a3
DA
276 io_cq->cur_rx_pkt_cdesc_count = count;
277 *num_descs = 0;
1738cd3e
NB
278 }
279
b37b98a3 280 return 0;
1738cd3e
NB
281}
282
0e3a3f6d
AK
283static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
284 struct ena_com_tx_meta *ena_meta)
1738cd3e
NB
285{
286 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
1738cd3e
NB
287
288 meta_desc = get_sq_desc(io_sq);
f49ed500
SA
289 if (unlikely(!meta_desc))
290 return -EFAULT;
291
1738cd3e
NB
292 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
293
294 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
295
296 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
297
298 /* bits 0-9 of the mss */
f49ed500 299 meta_desc->word2 |= ((u32)ena_meta->mss <<
1738cd3e
NB
300 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
301 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
302 /* bits 10-13 of the mss */
303 meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
304 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
305 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
306
307 /* Extended meta desc */
308 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
f49ed500 309 meta_desc->len_ctrl |= ((u32)io_sq->phase <<
1738cd3e
NB
310 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
311 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
312
313 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
0e3a3f6d
AK
314 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
315
1738cd3e
NB
316 meta_desc->word2 |= ena_meta->l3_hdr_len &
317 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
318 meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
319 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
320 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
321
f49ed500 322 meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
1738cd3e
NB
323 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
324 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
325
0e3a3f6d
AK
326 return ena_com_sq_update_tail(io_sq);
327}
328
329static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
330 struct ena_com_tx_ctx *ena_tx_ctx,
331 bool *have_meta)
332{
333 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
1738cd3e 334
0e3a3f6d
AK
335 /* When disable meta caching is set, don't bother to save the meta and
336 * compare it to the stored version, just create the meta
337 */
338 if (io_sq->disable_meta_caching) {
0e3a3f6d
AK
339 *have_meta = true;
340 return ena_com_create_meta(io_sq, ena_meta);
341 }
342
343 if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
344 *have_meta = true;
345 /* Cache the meta desc */
346 memcpy(&io_sq->cached_tx_meta, ena_meta,
347 sizeof(struct ena_com_tx_meta));
348 return ena_com_create_meta(io_sq, ena_meta);
349 }
350
351 *have_meta = false;
352 return 0;
1738cd3e
NB
353}
354
da580ca8
SA
355static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
356 struct ena_com_rx_ctx *ena_rx_ctx,
bf2746e8 357 struct ena_eth_io_rx_cdesc_base *cdesc)
1738cd3e
NB
358{
359 ena_rx_ctx->l3_proto = cdesc->status &
360 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
361 ena_rx_ctx->l4_proto =
362 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
363 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
364 ena_rx_ctx->l3_csum_err =
248ab773
AK
365 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
366 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
1738cd3e 367 ena_rx_ctx->l4_csum_err =
248ab773
AK
368 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
369 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
cb36bb36
AK
370 ena_rx_ctx->l4_csum_checked =
371 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
372 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
1738cd3e
NB
373 ena_rx_ctx->hash = cdesc->hash;
374 ena_rx_ctx->frag =
375 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
376 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
377
da580ca8
SA
378 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
379 "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
50613650
DA
380 ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err,
381 ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
1738cd3e
NB
382}
383
384/*****************************************************************************/
385/***************************** API **********************************/
386/*****************************************************************************/
387
388int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
389 struct ena_com_tx_ctx *ena_tx_ctx,
390 int *nb_hw_desc)
391{
392 struct ena_eth_io_tx_desc *desc = NULL;
393 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
689b2bda 394 void *buffer_to_push = ena_tx_ctx->push_header;
1738cd3e
NB
395 u16 header_len = ena_tx_ctx->header_len;
396 u16 num_bufs = ena_tx_ctx->num_bufs;
689b2bda
AK
397 u16 start_tail = io_sq->tail;
398 int i, rc;
1738cd3e
NB
399 bool have_meta;
400 u64 addr_hi;
401
402 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
403
404 /* num_bufs +1 for potential meta desc */
689b2bda 405 if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
da580ca8
SA
406 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
407 "Not enough space in the tx queue\n");
1738cd3e
NB
408 return -ENOMEM;
409 }
410
411 if (unlikely(header_len > io_sq->tx_max_header_size)) {
da580ca8 412 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
50613650
DA
413 "Header size is too large %d max header: %d\n", header_len,
414 io_sq->tx_max_header_size);
1738cd3e
NB
415 return -EINVAL;
416 }
417
50613650 418 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) {
091d0e85
SA
419 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
420 "Push header wasn't provided in LLQ mode\n");
689b2bda 421 return -EINVAL;
091d0e85 422 }
689b2bda
AK
423
424 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
1738cd3e
NB
425 if (unlikely(rc))
426 return rc;
427
0e3a3f6d
AK
428 rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
429 if (unlikely(rc)) {
da580ca8
SA
430 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
431 "Failed to create and store tx meta desc\n");
0e3a3f6d 432 return rc;
689b2bda 433 }
1738cd3e 434
689b2bda 435 /* If the caller doesn't want to send packets */
1738cd3e 436 if (unlikely(!num_bufs && !header_len)) {
689b2bda 437 rc = ena_com_close_bounce_buffer(io_sq);
091d0e85
SA
438 if (rc)
439 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
440 "Failed to write buffers to LLQ\n");
689b2bda
AK
441 *nb_hw_desc = io_sq->tail - start_tail;
442 return rc;
1738cd3e
NB
443 }
444
445 desc = get_sq_desc(io_sq);
689b2bda
AK
446 if (unlikely(!desc))
447 return -EFAULT;
1738cd3e
NB
448 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
449
450 /* Set first desc when we don't have meta descriptor */
451 if (!have_meta)
452 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
453
f49ed500 454 desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
1738cd3e
NB
455 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
456 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
f49ed500 457 desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
1738cd3e
NB
458 ENA_ETH_IO_TX_DESC_PHASE_MASK;
459
460 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
461
462 /* Bits 0-9 */
f49ed500 463 desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
1738cd3e
NB
464 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
465 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
466
467 desc->meta_ctrl |= (ena_tx_ctx->df <<
468 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
469 ENA_ETH_IO_TX_DESC_DF_MASK;
470
471 /* Bits 10-15 */
472 desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
473 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
474 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
475
476 if (ena_tx_ctx->meta_valid) {
477 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
478 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
479 ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
480 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
481 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
482 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
483 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
484 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
485 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
486 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
487 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
488 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
489 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
490 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
491 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
492 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
493 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
494 }
495
496 for (i = 0; i < num_bufs; i++) {
497 /* The first desc share the same desc as the header */
498 if (likely(i != 0)) {
689b2bda 499 rc = ena_com_sq_update_tail(io_sq);
091d0e85
SA
500 if (unlikely(rc)) {
501 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
502 "Failed to update sq tail\n");
689b2bda 503 return rc;
091d0e85 504 }
1738cd3e
NB
505
506 desc = get_sq_desc(io_sq);
689b2bda
AK
507 if (unlikely(!desc))
508 return -EFAULT;
509
1738cd3e
NB
510 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
511
f49ed500 512 desc->len_ctrl |= ((u32)io_sq->phase <<
1738cd3e
NB
513 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
514 ENA_ETH_IO_TX_DESC_PHASE_MASK;
515 }
516
517 desc->len_ctrl |= ena_bufs->len &
518 ENA_ETH_IO_TX_DESC_LENGTH_MASK;
519
520 addr_hi = ((ena_bufs->paddr &
521 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
522
523 desc->buff_addr_lo = (u32)ena_bufs->paddr;
524 desc->buff_addr_hi_hdr_sz |= addr_hi &
525 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
526 ena_bufs++;
527 }
528
529 /* set the last desc indicator */
530 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
531
689b2bda 532 rc = ena_com_sq_update_tail(io_sq);
091d0e85
SA
533 if (unlikely(rc)) {
534 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
535 "Failed to update sq tail of the last descriptor\n");
689b2bda 536 return rc;
091d0e85 537 }
1738cd3e 538
689b2bda 539 rc = ena_com_close_bounce_buffer(io_sq);
1738cd3e 540
689b2bda
AK
541 *nb_hw_desc = io_sq->tail - start_tail;
542 return rc;
1738cd3e
NB
543}
544
545int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
546 struct ena_com_io_sq *io_sq,
547 struct ena_com_rx_ctx *ena_rx_ctx)
548{
549 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
550 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
5b7022cf 551 u16 q_depth = io_cq->q_depth;
1738cd3e
NB
552 u16 cdesc_idx = 0;
553 u16 nb_hw_desc;
68f236df 554 u16 i = 0;
b37b98a3 555 int rc;
1738cd3e
NB
556
557 WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
558
b37b98a3
DA
559 rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc);
560 if (unlikely(rc != 0))
561 return -EFAULT;
562
1738cd3e
NB
563 if (nb_hw_desc == 0) {
564 ena_rx_ctx->descs = nb_hw_desc;
565 return 0;
566 }
567
da580ca8 568 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
50613650 569 "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc);
1738cd3e
NB
570
571 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
da580ca8 572 netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
50613650 573 "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs);
1738cd3e
NB
574 return -ENOSPC;
575 }
576
68f236df
AK
577 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
578 ena_rx_ctx->pkt_offset = cdesc->offset;
579
580 do {
581 ena_buf[i].len = cdesc->length;
582 ena_buf[i].req_id = cdesc->req_id;
5b7022cf
SA
583 if (unlikely(ena_buf[i].req_id >= q_depth))
584 return -EIO;
68f236df
AK
585
586 if (++i >= nb_hw_desc)
587 break;
588
1738cd3e
NB
589 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
590
68f236df 591 } while (1);
1738cd3e
NB
592
593 /* Update SQ head ptr */
594 io_sq->next_to_comp += nb_hw_desc;
595
da580ca8 596 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
50613650
DA
597 "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
598 io_sq->next_to_comp);
1738cd3e
NB
599
600 /* Get rx flags from the last pkt */
da580ca8 601 ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
1738cd3e
NB
602
603 ena_rx_ctx->descs = nb_hw_desc;
1e584739 604
1738cd3e
NB
605 return 0;
606}
607
608int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
609 struct ena_com_buf *ena_buf,
610 u16 req_id)
611{
612 struct ena_eth_io_rx_desc *desc;
613
614 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
615
689b2bda 616 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
1738cd3e
NB
617 return -ENOSPC;
618
619 desc = get_sq_desc(io_sq);
689b2bda
AK
620 if (unlikely(!desc))
621 return -EFAULT;
622
1738cd3e
NB
623 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
624
625 desc->length = ena_buf->len;
626
b0ae3ac4 627 desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
bf2746e8 628 ENA_ETH_IO_RX_DESC_LAST_MASK |
1e584739
SA
629 ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
630 (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
1738cd3e
NB
631
632 desc->req_id = req_id;
633
da580ca8 634 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
50613650
DA
635 "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid,
636 req_id);
da580ca8 637
1738cd3e
NB
638 desc->buff_addr_lo = (u32)ena_buf->paddr;
639 desc->buff_addr_hi =
640 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
641
689b2bda 642 return ena_com_sq_update_tail(io_sq);
1738cd3e
NB
643}
644
8510e1a3
NB
645bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
646{
647 struct ena_eth_io_rx_cdesc_base *cdesc;
648
649 cdesc = ena_com_get_next_rx_cdesc(io_cq);
650 if (cdesc)
651 return false;
652 else
653 return true;
654}