2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
10 #define NICVF_QUEUES_H
12 #include <linux/netdevice.h>
15 #define MAX_QUEUE_SET 128
16 #define MAX_RCV_QUEUES_PER_QS 8
17 #define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
18 #define MAX_SND_QUEUES_PER_QS 8
19 #define MAX_CMP_QUEUES_PER_QS 8
21 /* VF's queue interrupt ranges */
22 #define NICVF_INTR_ID_CQ 0
23 #define NICVF_INTR_ID_SQ 8
24 #define NICVF_INTR_ID_RBDR 16
25 #define NICVF_INTR_ID_MISC 18
26 #define NICVF_INTR_ID_QS_ERR 19
28 #define for_each_cq_irq(irq) \
29 for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
30 #define for_each_sq_irq(irq) \
31 for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
32 #define for_each_rbdr_irq(irq) \
33 for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
35 #define RBDR_SIZE0 0ULL /* 8K entries */
36 #define RBDR_SIZE1 1ULL /* 16K entries */
37 #define RBDR_SIZE2 2ULL /* 32K entries */
38 #define RBDR_SIZE3 3ULL /* 64K entries */
39 #define RBDR_SIZE4 4ULL /* 126K entries */
40 #define RBDR_SIZE5 5ULL /* 256K entries */
41 #define RBDR_SIZE6 6ULL /* 512K entries */
43 #define SND_QUEUE_SIZE0 0ULL /* 1K entries */
44 #define SND_QUEUE_SIZE1 1ULL /* 2K entries */
45 #define SND_QUEUE_SIZE2 2ULL /* 4K entries */
46 #define SND_QUEUE_SIZE3 3ULL /* 8K entries */
47 #define SND_QUEUE_SIZE4 4ULL /* 16K entries */
48 #define SND_QUEUE_SIZE5 5ULL /* 32K entries */
49 #define SND_QUEUE_SIZE6 6ULL /* 64K entries */
51 #define CMP_QUEUE_SIZE0 0ULL /* 1K entries */
52 #define CMP_QUEUE_SIZE1 1ULL /* 2K entries */
53 #define CMP_QUEUE_SIZE2 2ULL /* 4K entries */
54 #define CMP_QUEUE_SIZE3 3ULL /* 8K entries */
55 #define CMP_QUEUE_SIZE4 4ULL /* 16K entries */
56 #define CMP_QUEUE_SIZE5 5ULL /* 32K entries */
57 #define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
59 /* Default queue count per QS, its lengths and threshold values */
61 #define RCV_QUEUE_CNT 8
62 #define SND_QUEUE_CNT 8
63 #define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
65 #define SND_QSIZE SND_QUEUE_SIZE4
66 #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
67 #define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
68 #define SND_QUEUE_THRESH 2ULL
69 #define MIN_SQ_DESC_PER_PKT_XMIT 2
70 /* Since timestamp not enabled, otherwise 2 */
71 #define MAX_CQE_PER_PKT_XMIT 1
73 #define CMP_QSIZE CMP_QUEUE_SIZE4
74 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
75 #define CMP_QUEUE_CQE_THRESH 0
76 #define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
78 #define RBDR_SIZE RBDR_SIZE0
79 #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
80 #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
81 #define RBDR_THRESH (RCV_BUF_COUNT / 2)
82 #define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
83 #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
84 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
85 (NICVF_RCV_BUF_ALIGN_BYTES * 2))
86 #define RCV_DATA_OFFSET NICVF_RCV_BUF_ALIGN_BYTES
88 #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
90 #define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256)
92 /* Descriptor size in bytes */
93 #define SND_QUEUE_DESC_SIZE 16
94 #define CMP_QUEUE_DESC_SIZE 512
96 /* Buffer / descriptor alignments */
97 #define NICVF_RCV_BUF_ALIGN 7
98 #define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN)
99 #define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */
100 #define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
102 #define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
103 #define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\
104 (NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES)
105 #define NICVF_RCV_BUF_ALIGN_LEN(X)\
106 (NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X)
108 /* Queue enable/disable */
109 #define NICVF_SQ_EN BIT_ULL(19)
112 #define NICVF_CQ_RESET BIT_ULL(41)
113 #define NICVF_SQ_RESET BIT_ULL(17)
114 #define NICVF_RBDR_RESET BIT_ULL(43)
116 enum CQ_RX_ERRLVL_E {
124 CQ_RX_ERROP_RE_NONE = 0x0,
125 CQ_RX_ERROP_RE_PARTIAL = 0x1,
126 CQ_RX_ERROP_RE_JABBER = 0x2,
127 CQ_RX_ERROP_RE_FCS = 0x7,
128 CQ_RX_ERROP_RE_TERMINATE = 0x9,
129 CQ_RX_ERROP_RE_RX_CTL = 0xb,
130 CQ_RX_ERROP_PREL2_ERR = 0x1f,
131 CQ_RX_ERROP_L2_FRAGMENT = 0x20,
132 CQ_RX_ERROP_L2_OVERRUN = 0x21,
133 CQ_RX_ERROP_L2_PFCS = 0x22,
134 CQ_RX_ERROP_L2_PUNY = 0x23,
135 CQ_RX_ERROP_L2_MAL = 0x24,
136 CQ_RX_ERROP_L2_OVERSIZE = 0x25,
137 CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
138 CQ_RX_ERROP_L2_LENMISM = 0x27,
139 CQ_RX_ERROP_L2_PCLP = 0x28,
140 CQ_RX_ERROP_IP_NOT = 0x41,
141 CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
142 CQ_RX_ERROP_IP_MAL = 0x43,
143 CQ_RX_ERROP_IP_MALD = 0x44,
144 CQ_RX_ERROP_IP_HOP = 0x45,
145 CQ_RX_ERROP_L3_ICRC = 0x46,
146 CQ_RX_ERROP_L3_PCLP = 0x47,
147 CQ_RX_ERROP_L4_MAL = 0x61,
148 CQ_RX_ERROP_L4_CHK = 0x62,
149 CQ_RX_ERROP_UDP_LEN = 0x63,
150 CQ_RX_ERROP_L4_PORT = 0x64,
151 CQ_RX_ERROP_TCP_FLAG = 0x65,
152 CQ_RX_ERROP_TCP_OFFSET = 0x66,
153 CQ_RX_ERROP_L4_PCLP = 0x67,
154 CQ_RX_ERROP_RBDR_TRUNC = 0x70,
158 CQ_TX_ERROP_GOOD = 0x0,
159 CQ_TX_ERROP_DESC_FAULT = 0x10,
160 CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
161 CQ_TX_ERROP_SUBDC_ERR = 0x12,
162 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
163 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
164 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
165 CQ_TX_ERROP_LOCK_VIOL = 0x83,
166 CQ_TX_ERROP_DATA_FAULT = 0x84,
167 CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
168 CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
169 CQ_TX_ERROP_MEM_FAULT = 0x87,
170 CQ_TX_ERROP_CK_OVERLAP = 0x88,
171 CQ_TX_ERROP_CK_OFLOW = 0x89,
172 CQ_TX_ERROP_ENUM_LAST = 0x8a,
175 struct cmp_queue_stats {
195 u64 l2_hdr_malformed;
202 u64 ip_hdr_malformed;
203 u64 ip_payload_malformed;
233 } ____cacheline_aligned_in_smp;
240 struct rx_tx_queue_stats {
243 } ____cacheline_aligned_in_smp;
249 dma_addr_t phys_base;
258 u32 thresh; /* Threshold level for interrupt */
262 struct q_desc_mem dmem;
263 } ____cacheline_aligned_in_smp;
267 struct rbdr *rbdr_start;
268 struct rbdr *rbdr_cont;
269 bool en_tcp_reassembly;
270 u8 cq_qs; /* CQ's QS to which this RQ is assigned */
271 u8 cq_idx; /* CQ index (0 to 7) in the QS */
272 u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */
273 u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */
274 u8 start_rbdr_qs; /* First buffer ptrs - QS num */
275 u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
277 struct rx_tx_queue_stats stats;
278 } ____cacheline_aligned_in_smp;
283 spinlock_t lock; /* lock to serialize processing CQEs */
285 struct q_desc_mem dmem;
286 struct cmp_queue_stats stats;
287 } ____cacheline_aligned_in_smp;
291 u8 cq_qs; /* CQ's QS to which this SQ is pointing */
292 u8 cq_idx; /* CQ index (0 to 7) in the above QS */
300 #define TSO_HEADER_SIZE 128
301 /* For TSO segment's header */
303 dma_addr_t tso_hdrs_phys;
305 cpumask_t affinity_mask;
306 struct q_desc_mem dmem;
307 struct rx_tx_queue_stats stats;
308 } ____cacheline_aligned_in_smp;
321 struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS];
322 struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS];
323 struct snd_queue sq[MAX_SND_QUEUES_PER_QS];
324 struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
325 } ____cacheline_aligned_in_smp;
327 #define GET_RBDR_DESC(RING, idx)\
328 (&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
329 #define GET_SQ_DESC(RING, idx)\
330 (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
331 #define GET_CQ_DESC(RING, idx)\
332 (&(((union cq_desc_t *)((RING)->desc))[idx]))
335 #define CQ_WR_FULL BIT(26)
336 #define CQ_WR_DISABLE BIT(25)
337 #define CQ_WR_FAULT BIT(24)
338 #define CQ_CQE_COUNT (0xFFFF << 0)
340 #define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
342 int nicvf_set_qset_resources(struct nicvf *nic);
343 int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
344 void nicvf_qset_config(struct nicvf *nic, bool enable);
345 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
346 int qidx, bool enable);
348 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
349 void nicvf_sq_disable(struct nicvf *nic, int qidx);
350 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
351 void nicvf_sq_free_used_descs(struct net_device *netdev,
352 struct snd_queue *sq, int qidx);
353 int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb);
355 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
356 void nicvf_rbdr_task(unsigned long data);
357 void nicvf_rbdr_work(struct work_struct *work);
359 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
360 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
361 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
362 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
364 /* Register access APIs */
365 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
366 u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
367 void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
368 u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
369 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
371 u64 nicvf_queue_reg_read(struct nicvf *nic,
372 u64 offset, u64 qidx);
375 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
376 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
377 int nicvf_check_cqe_rx_errs(struct nicvf *nic,
378 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
379 int nicvf_check_cqe_tx_errs(struct nicvf *nic,
380 struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
381 #endif /* NICVF_QUEUES_H */