2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/acpi.h>
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/skbuff.h>
19 #include <linux/inetdevice.h>
20 #include <linux/mbus.h>
21 #include <linux/module.h>
22 #include <linux/mfd/syscon.h>
23 #include <linux/interrupt.h>
24 #include <linux/cpumask.h>
26 #include <linux/of_irq.h>
27 #include <linux/of_mdio.h>
28 #include <linux/of_net.h>
29 #include <linux/of_address.h>
30 #include <linux/of_device.h>
31 #include <linux/phy.h>
32 #include <linux/phy/phy.h>
33 #include <linux/clk.h>
34 #include <linux/hrtimer.h>
35 #include <linux/ktime.h>
36 #include <linux/regmap.h>
37 #include <uapi/linux/ppp_defs.h>
43 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
44 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
45 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
46 #define MVPP2_RX_FIFO_INIT_REG 0x64
47 #define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port))
48 #define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port))
50 /* RX DMA Top Registers */
51 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
52 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
53 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
54 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
55 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
56 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
57 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
58 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
59 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
60 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
61 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
62 #define MVPP2_RXQ_POOL_LONG_OFFS 24
63 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
64 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
65 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
66 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
67 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
70 #define MVPP2_MH_REG(port) (0x5040 + 4 * (port))
71 #define MVPP2_DSA_EXTENDED BIT(5)
73 /* Parser Registers */
74 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
75 #define MVPP2_PRS_PORT_LU_MAX 0xf
76 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
77 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
78 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
79 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
80 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
81 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
82 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
83 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
84 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
85 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
86 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
87 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
88 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
89 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
90 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
93 #define MVPP22_RSS_INDEX 0x1500
94 #define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
95 #define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
96 #define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
97 #define MVPP22_RSS_TABLE_ENTRY 0x1508
98 #define MVPP22_RSS_TABLE 0x1510
99 #define MVPP22_RSS_TABLE_POINTER(p) (p)
100 #define MVPP22_RSS_WIDTH 0x150c
102 /* Classifier Registers */
103 #define MVPP2_CLS_MODE_REG 0x1800
104 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
105 #define MVPP2_CLS_PORT_WAY_REG 0x1810
106 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
107 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
108 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
109 #define MVPP2_CLS_LKP_TBL_REG 0x1818
110 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
111 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
112 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
113 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
114 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
115 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
116 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
117 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
118 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
119 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
120 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
121 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
123 /* Descriptor Manager Top Registers */
124 #define MVPP2_RXQ_NUM_REG 0x2040
125 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
126 #define MVPP22_DESC_ADDR_OFFS 8
127 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
128 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
129 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
130 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
131 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
132 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
133 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
134 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
135 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
136 #define MVPP2_RXQ_THRESH_REG 0x204c
137 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
138 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
139 #define MVPP2_RXQ_INDEX_REG 0x2050
140 #define MVPP2_TXQ_NUM_REG 0x2080
141 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
142 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
143 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
144 #define MVPP2_TXQ_THRESH_REG 0x2094
145 #define MVPP2_TXQ_THRESH_OFFSET 16
146 #define MVPP2_TXQ_THRESH_MASK 0x3fff
147 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
148 #define MVPP2_TXQ_INDEX_REG 0x2098
149 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
150 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
151 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
152 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
153 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
154 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
155 #define MVPP2_TXQ_PENDING_REG 0x20a0
156 #define MVPP2_TXQ_PENDING_MASK 0x3fff
157 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
158 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
159 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
160 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
161 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
162 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
163 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
164 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
165 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
166 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
167 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
168 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
169 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
170 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
171 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
172 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
173 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
175 /* MBUS bridge registers */
176 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
177 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
178 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
179 #define MVPP2_BASE_ADDR_ENABLE 0x4060
181 /* AXI Bridge Registers */
182 #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
183 #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
184 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
185 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
186 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
187 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
188 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
189 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
190 #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
191 #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
192 #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
193 #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
195 /* Values for AXI Bridge registers */
196 #define MVPP22_AXI_ATTR_CACHE_OFFS 0
197 #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
199 #define MVPP22_AXI_CODE_CACHE_OFFS 0
200 #define MVPP22_AXI_CODE_DOMAIN_OFFS 4
202 #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
203 #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
204 #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
206 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
207 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
209 /* Interrupt Cause and Mask registers */
210 #define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
211 #define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
213 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
214 #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
215 #define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
217 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
218 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
219 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
220 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
222 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
223 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
225 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
226 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
227 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
228 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
230 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
231 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
232 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
233 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
234 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
235 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
236 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
237 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
238 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
239 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
240 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
241 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
242 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
243 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
244 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
245 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
246 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
247 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
248 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
250 /* Buffer Manager registers */
251 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
252 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
253 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
254 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
255 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
256 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
257 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
258 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
259 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
260 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
261 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
262 #define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8
263 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
264 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
265 #define MVPP2_BM_START_MASK BIT(0)
266 #define MVPP2_BM_STOP_MASK BIT(1)
267 #define MVPP2_BM_STATE_MASK BIT(4)
268 #define MVPP2_BM_LOW_THRESH_OFFS 8
269 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
270 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
271 MVPP2_BM_LOW_THRESH_OFFS)
272 #define MVPP2_BM_HIGH_THRESH_OFFS 16
273 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
274 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
275 MVPP2_BM_HIGH_THRESH_OFFS)
276 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
277 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
278 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
279 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
280 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
281 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
282 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
283 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
284 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
285 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
286 #define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
287 #define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
288 #define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
289 #define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
290 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
291 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
292 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
293 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
294 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
295 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
296 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
297 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
298 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
300 /* TX Scheduler registers */
301 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
302 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
303 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
304 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
305 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
306 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
307 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
308 #define MVPP2_TXP_MTU_MAX 0x7FFFF
309 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
310 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
311 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
312 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
313 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
314 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
315 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
316 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
317 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
318 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
319 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
320 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
321 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
322 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
324 /* TX general registers */
325 #define MVPP2_TX_SNOOP_REG 0x8800
326 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
327 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
330 #define MVPP2_SRC_ADDR_MIDDLE 0x24
331 #define MVPP2_SRC_ADDR_HIGH 0x28
332 #define MVPP2_PHY_AN_CFG0_REG 0x34
333 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
334 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
335 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
337 /* Per-port registers */
338 #define MVPP2_GMAC_CTRL_0_REG 0x0
339 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
340 #define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
341 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
342 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
343 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
344 #define MVPP2_GMAC_CTRL_1_REG 0x4
345 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
346 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
347 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
348 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
349 #define MVPP2_GMAC_SA_LOW_OFFS 7
350 #define MVPP2_GMAC_CTRL_2_REG 0x8
351 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
352 #define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
353 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
354 #define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4)
355 #define MVPP2_GMAC_DISABLE_PADDING BIT(5)
356 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
357 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
358 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
359 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
360 #define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
361 #define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
362 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
363 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
364 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
365 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
366 #define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
367 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
368 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
369 #define MVPP2_GMAC_STATUS0 0x10
370 #define MVPP2_GMAC_STATUS0_LINK_UP BIT(0)
371 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
372 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
373 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
374 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
375 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
376 #define MVPP22_GMAC_INT_STAT 0x20
377 #define MVPP22_GMAC_INT_STAT_LINK BIT(1)
378 #define MVPP22_GMAC_INT_MASK 0x24
379 #define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1)
380 #define MVPP22_GMAC_CTRL_4_REG 0x90
381 #define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
382 #define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
383 #define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
384 #define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
385 #define MVPP22_GMAC_INT_SUM_MASK 0xa4
386 #define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
388 /* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
389 * relative to port->base.
391 #define MVPP22_XLG_CTRL0_REG 0x100
392 #define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
393 #define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
394 #define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
395 #define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
396 #define MVPP22_XLG_CTRL1_REG 0x104
397 #define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0
398 #define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff
399 #define MVPP22_XLG_STATUS 0x10c
400 #define MVPP22_XLG_STATUS_LINK_UP BIT(0)
401 #define MVPP22_XLG_INT_STAT 0x114
402 #define MVPP22_XLG_INT_STAT_LINK BIT(1)
403 #define MVPP22_XLG_INT_MASK 0x118
404 #define MVPP22_XLG_INT_MASK_LINK BIT(1)
405 #define MVPP22_XLG_CTRL3_REG 0x11c
406 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
407 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
408 #define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
409 #define MVPP22_XLG_EXT_INT_MASK 0x15c
410 #define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1)
411 #define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2)
412 #define MVPP22_XLG_CTRL4_REG 0x184
413 #define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
414 #define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
415 #define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
417 /* SMI registers. PPv2.2 only, relative to priv->iface_base. */
418 #define MVPP22_SMI_MISC_CFG_REG 0x1204
419 #define MVPP22_SMI_POLLING_EN BIT(10)
421 #define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
423 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
425 /* Descriptor ring Macros */
426 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
427 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
429 /* XPCS registers. PPv2.2 only */
430 #define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
431 #define MVPP22_MPCS_CTRL 0x14
432 #define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
433 #define MVPP22_MPCS_CLK_RESET 0x14c
434 #define MAC_CLK_RESET_SD_TX BIT(0)
435 #define MAC_CLK_RESET_SD_RX BIT(1)
436 #define MAC_CLK_RESET_MAC BIT(2)
437 #define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
438 #define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
440 /* XPCS registers. PPv2.2 only */
441 #define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
442 #define MVPP22_XPCS_CFG0 0x0
443 #define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
444 #define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
446 /* System controller registers. Accessed through a regmap. */
447 #define GENCONF_SOFT_RESET1 0x1108
448 #define GENCONF_SOFT_RESET1_GOP BIT(6)
449 #define GENCONF_PORT_CTRL0 0x1110
450 #define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1)
451 #define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29)
452 #define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31)
453 #define GENCONF_PORT_CTRL1 0x1114
454 #define GENCONF_PORT_CTRL1_EN(p) BIT(p)
455 #define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28)
456 #define GENCONF_CTRL0 0x1120
457 #define GENCONF_CTRL0_PORT0_RGMII BIT(0)
458 #define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1)
459 #define GENCONF_CTRL0_PORT1_RGMII BIT(2)
461 /* Various constants */
464 #define MVPP2_TXDONE_COAL_PKTS_THRESH 64
465 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
466 #define MVPP2_TXDONE_COAL_USEC 1000
467 #define MVPP2_RX_COAL_PKTS 32
468 #define MVPP2_RX_COAL_USEC 64
470 /* The two bytes Marvell header. Either contains a special value used
471 * by Marvell switches when a specific hardware mode is enabled (not
472 * supported by this driver) or is filled automatically by zeroes on
473 * the RX side. Those two bytes being at the front of the Ethernet
474 * header, they allow to have the IP header aligned on a 4 bytes
475 * boundary automatically: the hardware skips those two bytes on its
478 #define MVPP2_MH_SIZE 2
479 #define MVPP2_ETH_TYPE_LEN 2
480 #define MVPP2_PPPOE_HDR_SIZE 8
481 #define MVPP2_VLAN_TAG_LEN 4
482 #define MVPP2_VLAN_TAG_EDSA_LEN 8
484 /* Lbtd 802.3 type */
485 #define MVPP2_IP_LBDT_TYPE 0xfffa
487 #define MVPP2_TX_CSUM_MAX_SIZE 9800
489 /* Timeout constants */
490 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
491 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
493 #define MVPP2_TX_MTU_MAX 0x7ffff
495 /* Maximum number of T-CONTs of PON port */
496 #define MVPP2_MAX_TCONT 16
498 /* Maximum number of supported ports */
499 #define MVPP2_MAX_PORTS 4
501 /* Maximum number of TXQs used by single port */
502 #define MVPP2_MAX_TXQ 8
504 /* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO
505 * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data),
506 * multiply this value by two to count the maximum number of skb descs needed.
508 #define MVPP2_MAX_TSO_SEGS 300
509 #define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
511 /* Dfault number of RXQs in use */
512 #define MVPP2_DEFAULT_RXQ 4
514 /* Max number of Rx descriptors */
515 #define MVPP2_MAX_RXD_MAX 1024
516 #define MVPP2_MAX_RXD_DFLT 128
518 /* Max number of Tx descriptors */
519 #define MVPP2_MAX_TXD_MAX 2048
520 #define MVPP2_MAX_TXD_DFLT 1024
522 /* Amount of Tx descriptors that can be reserved at once by CPU */
523 #define MVPP2_CPU_DESC_CHUNK 64
525 /* Max number of Tx descriptors in each aggregated queue */
526 #define MVPP2_AGGR_TXQ_SIZE 256
528 /* Descriptor aligned size */
529 #define MVPP2_DESC_ALIGNED_SIZE 32
531 /* Descriptor alignment mask */
532 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
534 /* RX FIFO constants */
535 #define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000
536 #define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000
537 #define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000
538 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200
539 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80
540 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40
541 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
543 /* TX FIFO constants */
544 #define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa
545 #define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3
546 #define MVPP2_TX_FIFO_THRESHOLD_MIN 256
547 #define MVPP2_TX_FIFO_THRESHOLD_10KB \
548 (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
549 #define MVPP2_TX_FIFO_THRESHOLD_3KB \
550 (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
552 /* RX buffer constants */
553 #define MVPP2_SKB_SHINFO_SIZE \
554 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
556 #define MVPP2_RX_PKT_SIZE(mtu) \
557 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
558 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
560 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
561 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
562 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
563 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
565 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
567 /* IPv6 max L3 address size */
568 #define MVPP2_MAX_L3_ADDR_SIZE 16
571 #define MVPP2_F_LOOPBACK BIT(0)
573 /* Marvell tag types */
574 enum mvpp2_tag_type {
575 MVPP2_TAG_TYPE_NONE = 0,
576 MVPP2_TAG_TYPE_MH = 1,
577 MVPP2_TAG_TYPE_DSA = 2,
578 MVPP2_TAG_TYPE_EDSA = 3,
579 MVPP2_TAG_TYPE_VLAN = 4,
580 MVPP2_TAG_TYPE_LAST = 5
583 /* Parser constants */
584 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
585 #define MVPP2_PRS_TCAM_WORDS 6
586 #define MVPP2_PRS_SRAM_WORDS 4
587 #define MVPP2_PRS_FLOW_ID_SIZE 64
588 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
589 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
590 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
591 #define MVPP2_PRS_IPV4_HEAD 0x40
592 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
593 #define MVPP2_PRS_IPV4_MC 0xe0
594 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
595 #define MVPP2_PRS_IPV4_BC_MASK 0xff
596 #define MVPP2_PRS_IPV4_IHL 0x5
597 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
598 #define MVPP2_PRS_IPV6_MC 0xff
599 #define MVPP2_PRS_IPV6_MC_MASK 0xff
600 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
601 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
602 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
603 #define MVPP2_PRS_DBL_VLANS_MAX 100
604 #define MVPP2_PRS_CAST_MASK BIT(0)
605 #define MVPP2_PRS_MCAST_VAL BIT(0)
606 #define MVPP2_PRS_UCAST_VAL 0x0
609 * - lookup ID - 4 bits
611 * - additional information - 1 byte
612 * - header data - 8 bytes
613 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
615 #define MVPP2_PRS_AI_BITS 8
616 #define MVPP2_PRS_PORT_MASK 0xff
617 #define MVPP2_PRS_LU_MASK 0xf
618 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
619 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
620 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
621 (((offs) * 2) - ((offs) % 2) + 2)
622 #define MVPP2_PRS_TCAM_AI_BYTE 16
623 #define MVPP2_PRS_TCAM_PORT_BYTE 17
624 #define MVPP2_PRS_TCAM_LU_BYTE 20
625 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
626 #define MVPP2_PRS_TCAM_INV_WORD 5
628 #define MVPP2_PRS_VID_TCAM_BYTE 2
630 /* TCAM range for unicast and multicast filtering. We have 25 entries per port,
631 * with 4 dedicated to UC filtering and the rest to multicast filtering.
632 * Additionnally we reserve one entry for the broadcast address, and one for
633 * each port's own address.
635 #define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25
636 #define MVPP2_PRS_MAC_RANGE_SIZE 80
638 /* Number of entries per port dedicated to UC and MC filtering */
639 #define MVPP2_PRS_MAC_UC_FILT_MAX 4
640 #define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \
641 MVPP2_PRS_MAC_UC_FILT_MAX)
643 /* There is a TCAM range reserved for VLAN filtering entries, range size is 33
644 * 10 VLAN ID filter entries per port
645 * 1 default VLAN filter entry per port
646 * It is assumed that there are 3 ports for filter, not including loopback port
648 #define MVPP2_PRS_VLAN_FILT_MAX 11
649 #define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33
651 #define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2)
652 #define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1)
654 /* Tcam entries ID */
655 #define MVPP2_PE_DROP_ALL 0
656 #define MVPP2_PE_FIRST_FREE_TID 1
658 /* MAC filtering range */
659 #define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1)
660 #define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \
661 MVPP2_PRS_MAC_RANGE_SIZE + 1)
662 /* VLAN filtering range */
663 #define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
664 #define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
665 MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
666 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
667 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
668 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
669 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
670 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
671 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22)
672 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21)
673 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20)
674 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
675 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
676 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
677 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
678 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
679 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
680 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
681 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
682 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
683 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
684 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
685 #define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
686 #define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
687 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
688 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
690 #define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
691 #define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
692 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
694 #define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \
695 ((port) * MVPP2_PRS_VLAN_FILT_MAX))
696 #define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
697 + MVPP2_PRS_VLAN_FILT_MAX_ENTRY)
698 /* Index of default vid filter for given port */
699 #define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
700 + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY)
703 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
705 #define MVPP2_PRS_SRAM_RI_OFFS 0
706 #define MVPP2_PRS_SRAM_RI_WORD 0
707 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
708 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
709 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
710 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
711 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
712 #define MVPP2_PRS_SRAM_UDF_OFFS 73
713 #define MVPP2_PRS_SRAM_UDF_BITS 8
714 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
715 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
716 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
717 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
718 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
719 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
720 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
721 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
722 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
723 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
724 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
725 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
726 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
727 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
728 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
729 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
730 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
731 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
732 #define MVPP2_PRS_SRAM_AI_OFFS 90
733 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
734 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
735 #define MVPP2_PRS_SRAM_AI_MASK 0xff
736 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
737 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
738 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
739 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
741 /* Sram result info bits assignment */
742 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
743 #define MVPP2_PRS_RI_DSA_MASK 0x2
744 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
745 #define MVPP2_PRS_RI_VLAN_NONE 0x0
746 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
747 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
748 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
749 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
750 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
751 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
752 #define MVPP2_PRS_RI_L2_UCAST 0x0
753 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
754 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
755 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
756 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
757 #define MVPP2_PRS_RI_L3_UN 0x0
758 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
759 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
760 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
761 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
762 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
763 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
764 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
765 #define MVPP2_PRS_RI_L3_UCAST 0x0
766 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
767 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
768 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
769 #define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
770 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
771 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
772 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
773 #define MVPP2_PRS_RI_L4_TCP BIT(22)
774 #define MVPP2_PRS_RI_L4_UDP BIT(23)
775 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
776 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
777 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
778 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
780 /* Sram additional info bits assignment */
781 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
782 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
783 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
784 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
785 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
786 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
787 #define MVPP2_PRS_SINGLE_VLAN_AI 0
788 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
789 #define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0)
792 #define MVPP2_PRS_TAGGED true
793 #define MVPP2_PRS_UNTAGGED false
794 #define MVPP2_PRS_EDSA true
795 #define MVPP2_PRS_DSA false
797 /* MAC entries, shadow udf */
799 MVPP2_PRS_UDF_MAC_DEF,
800 MVPP2_PRS_UDF_MAC_RANGE,
801 MVPP2_PRS_UDF_L2_DEF,
802 MVPP2_PRS_UDF_L2_DEF_COPY,
803 MVPP2_PRS_UDF_L2_USER,
807 enum mvpp2_prs_lookup {
822 enum mvpp2_prs_l2_cast {
823 MVPP2_PRS_L2_UNI_CAST,
824 MVPP2_PRS_L2_MULTI_CAST,
828 enum mvpp2_prs_l3_cast {
829 MVPP2_PRS_L3_UNI_CAST,
830 MVPP2_PRS_L3_MULTI_CAST,
831 MVPP2_PRS_L3_BROAD_CAST
834 /* Classifier constants */
835 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
836 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
837 #define MVPP2_CLS_LKP_TBL_SIZE 64
838 #define MVPP2_CLS_RX_QUEUES 256
841 #define MVPP22_RSS_TABLE_ENTRIES 32
844 #define MVPP2_BM_JUMBO_BUF_NUM 512
845 #define MVPP2_BM_LONG_BUF_NUM 1024
846 #define MVPP2_BM_SHORT_BUF_NUM 2048
847 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
848 #define MVPP2_BM_POOL_PTR_ALIGN 128
850 /* BM cookie (32 bits) definition */
851 #define MVPP2_BM_COOKIE_POOL_OFFS 8
852 #define MVPP2_BM_COOKIE_CPU_OFFS 24
854 #define MVPP2_BM_SHORT_FRAME_SIZE 512
855 #define MVPP2_BM_LONG_FRAME_SIZE 2048
856 #define MVPP2_BM_JUMBO_FRAME_SIZE 10240
857 /* BM short pool packet size
858 * These value assure that for SWF the total number
859 * of bytes allocated for each buffer will be 512
861 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE)
862 #define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE)
863 #define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE)
865 #define MVPP21_ADDR_SPACE_SZ 0
866 #define MVPP22_ADDR_SPACE_SZ SZ_64K
868 #define MVPP2_MAX_THREADS 8
869 #define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
871 enum mvpp2_bm_pool_log_num {
881 } mvpp2_pools[MVPP2_BM_POOLS_NUM];
883 /* GMAC MIB Counters register definitions */
884 #define MVPP21_MIB_COUNTERS_OFFSET 0x1000
885 #define MVPP21_MIB_COUNTERS_PORT_SZ 0x400
886 #define MVPP22_MIB_COUNTERS_OFFSET 0x0
887 #define MVPP22_MIB_COUNTERS_PORT_SZ 0x100
889 #define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0
890 #define MVPP2_MIB_BAD_OCTETS_RCVD 0x8
891 #define MVPP2_MIB_CRC_ERRORS_SENT 0xc
892 #define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10
893 #define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18
894 #define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c
895 #define MVPP2_MIB_FRAMES_64_OCTETS 0x20
896 #define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24
897 #define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28
898 #define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c
899 #define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30
900 #define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
901 #define MVPP2_MIB_GOOD_OCTETS_SENT 0x38
902 #define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40
903 #define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48
904 #define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c
905 #define MVPP2_MIB_FC_SENT 0x54
906 #define MVPP2_MIB_FC_RCVD 0x58
907 #define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c
908 #define MVPP2_MIB_UNDERSIZE_RCVD 0x60
909 #define MVPP2_MIB_FRAGMENTS_RCVD 0x64
910 #define MVPP2_MIB_OVERSIZE_RCVD 0x68
911 #define MVPP2_MIB_JABBER_RCVD 0x6c
912 #define MVPP2_MIB_MAC_RCV_ERROR 0x70
913 #define MVPP2_MIB_BAD_CRC_EVENT 0x74
914 #define MVPP2_MIB_COLLISION 0x78
915 #define MVPP2_MIB_LATE_COLLISION 0x7c
917 #define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
919 #define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
923 /* Shared Packet Processor resources */
925 /* Shared registers' base addresses */
926 void __iomem *lms_base;
927 void __iomem *iface_base;
929 /* On PPv2.2, each "software thread" can access the base
930 * register through a separate address space, each 64 KB apart
931 * from each other. Typically, such address spaces will be
934 void __iomem *swth_base[MVPP2_MAX_THREADS];
936 /* On PPv2.2, some port control registers are located into the system
937 * controller space. These registers are accessible through a regmap.
939 struct regmap *sysctrl_base;
945 struct clk *mg_core_clk;
948 /* List of pointers to port structures */
950 struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
952 /* Aggregated TXQs */
953 struct mvpp2_tx_queue *aggr_txqs;
956 struct mvpp2_bm_pool *bm_pools;
958 /* PRS shadow table */
959 struct mvpp2_prs_shadow *prs_shadow;
960 /* PRS auxiliary table for double vlan entries control */
961 bool *prs_double_vlans;
967 enum { MVPP21, MVPP22 } hw_version;
969 /* Maximum number of RXQs per port */
970 unsigned int max_port_rxqs;
972 /* Workqueue to gather hardware statistics */
974 struct workqueue_struct *stats_queue;
977 struct mvpp2_pcpu_stats {
978 struct u64_stats_sync syncp;
985 /* Per-CPU port control */
986 struct mvpp2_port_pcpu {
987 struct hrtimer tx_done_timer;
988 bool timer_scheduled;
989 /* Tasklet for egress finalization */
990 struct tasklet_struct tx_done_tasklet;
993 struct mvpp2_queue_vector {
995 struct napi_struct napi;
996 enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
1001 u32 pending_cause_rx;
1002 struct mvpp2_port *port;
1008 /* Index of the port from the "group of ports" complex point
1017 /* Firmware node associated to the port */
1018 struct fwnode_handle *fwnode;
1020 /* Per-port registers' base address */
1022 void __iomem *stats_base;
1024 struct mvpp2_rx_queue **rxqs;
1026 struct mvpp2_tx_queue **txqs;
1028 struct net_device *dev;
1032 /* Per-CPU port control */
1033 struct mvpp2_port_pcpu __percpu *pcpu;
1036 unsigned long flags;
1040 struct mvpp2_pcpu_stats __percpu *stats;
1043 /* Per-port work and its lock to gather hardware statistics */
1044 struct mutex gather_stats_lock;
1045 struct delayed_work stats_work;
1047 phy_interface_t phy_interface;
1048 struct device_node *phy_node;
1051 unsigned int duplex;
1054 struct mvpp2_bm_pool *pool_long;
1055 struct mvpp2_bm_pool *pool_short;
1057 /* Index of first port's physical RXQ */
1060 struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
1061 unsigned int nqvecs;
1067 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
1068 * layout of the transmit and reception DMA descriptors, and their
1069 * layout is therefore defined by the hardware design
1072 #define MVPP2_TXD_L3_OFF_SHIFT 0
1073 #define MVPP2_TXD_IP_HLEN_SHIFT 8
1074 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
1075 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
1076 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
1077 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
1078 #define MVPP2_TXD_L4_UDP BIT(24)
1079 #define MVPP2_TXD_L3_IP6 BIT(26)
1080 #define MVPP2_TXD_L_DESC BIT(28)
1081 #define MVPP2_TXD_F_DESC BIT(29)
1083 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
1084 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
1085 #define MVPP2_RXD_ERR_CRC 0x0
1086 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
1087 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
1088 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
1089 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
1090 #define MVPP2_RXD_HWF_SYNC BIT(21)
1091 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
1092 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
1093 #define MVPP2_RXD_L4_TCP BIT(25)
1094 #define MVPP2_RXD_L4_UDP BIT(26)
1095 #define MVPP2_RXD_L3_IP4 BIT(28)
1096 #define MVPP2_RXD_L3_IP6 BIT(30)
1097 #define MVPP2_RXD_BUF_HDR BIT(31)
1099 /* HW TX descriptor for PPv2.1 */
1100 struct mvpp21_tx_desc {
1101 u32 command; /* Options used by HW for packet transmitting.*/
1102 u8 packet_offset; /* the offset from the buffer beginning */
1103 u8 phys_txq; /* destination queue ID */
1104 u16 data_size; /* data size of transmitted packet in bytes */
1105 u32 buf_dma_addr; /* physical addr of transmitted buffer */
1106 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
1107 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
1108 u32 reserved2; /* reserved (for future use) */
1111 /* HW RX descriptor for PPv2.1 */
1112 struct mvpp21_rx_desc {
1113 u32 status; /* info about received packet */
1114 u16 reserved1; /* parser_info (for future use, PnC) */
1115 u16 data_size; /* size of received packet in bytes */
1116 u32 buf_dma_addr; /* physical address of the buffer */
1117 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
1118 u16 reserved2; /* gem_port_id (for future use, PON) */
1119 u16 reserved3; /* csum_l4 (for future use, PnC) */
1120 u8 reserved4; /* bm_qset (for future use, BM) */
1122 u16 reserved6; /* classify_info (for future use, PnC) */
1123 u32 reserved7; /* flow_id (for future use, PnC) */
1127 /* HW TX descriptor for PPv2.2 */
1128 struct mvpp22_tx_desc {
1134 u64 buf_dma_addr_ptp;
1135 u64 buf_cookie_misc;
1138 /* HW RX descriptor for PPv2.2 */
1139 struct mvpp22_rx_desc {
1145 u64 buf_dma_addr_key_hash;
1146 u64 buf_cookie_misc;
1149 /* Opaque type used by the driver to manipulate the HW TX and RX
1152 struct mvpp2_tx_desc {
1154 struct mvpp21_tx_desc pp21;
1155 struct mvpp22_tx_desc pp22;
1159 struct mvpp2_rx_desc {
1161 struct mvpp21_rx_desc pp21;
1162 struct mvpp22_rx_desc pp22;
1166 struct mvpp2_txq_pcpu_buf {
1167 /* Transmitted SKB */
1168 struct sk_buff *skb;
1170 /* Physical address of transmitted buffer */
1173 /* Size transmitted */
1177 /* Per-CPU Tx queue control */
1178 struct mvpp2_txq_pcpu {
1181 /* Number of Tx DMA descriptors in the descriptor ring */
1184 /* Number of currently used Tx DMA descriptor in the
1192 /* Number of Tx DMA descriptors reserved for each CPU */
1195 /* Infos about transmitted buffers */
1196 struct mvpp2_txq_pcpu_buf *buffs;
1198 /* Index of last TX DMA descriptor that was inserted */
1201 /* Index of the TX DMA descriptor to be cleaned up */
1204 /* DMA buffer for TSO headers */
1206 dma_addr_t tso_headers_dma;
1209 struct mvpp2_tx_queue {
1210 /* Physical number of this Tx queue */
1213 /* Logical number of this Tx queue */
1216 /* Number of Tx DMA descriptors in the descriptor ring */
1219 /* Number of currently used Tx DMA descriptor in the descriptor ring */
1222 /* Per-CPU control of physical Tx queues */
1223 struct mvpp2_txq_pcpu __percpu *pcpu;
1227 /* Virtual address of thex Tx DMA descriptors array */
1228 struct mvpp2_tx_desc *descs;
1230 /* DMA address of the Tx DMA descriptors array */
1231 dma_addr_t descs_dma;
1233 /* Index of the last Tx DMA descriptor */
1236 /* Index of the next Tx DMA descriptor to process */
1237 int next_desc_to_proc;
1240 struct mvpp2_rx_queue {
1241 /* RX queue number, in the range 0-31 for physical RXQs */
1244 /* Num of rx descriptors in the rx descriptor ring */
1250 /* Virtual address of the RX DMA descriptors array */
1251 struct mvpp2_rx_desc *descs;
1253 /* DMA address of the RX DMA descriptors array */
1254 dma_addr_t descs_dma;
1256 /* Index of the last RX DMA descriptor */
1259 /* Index of the next RX DMA descriptor to process */
1260 int next_desc_to_proc;
1262 /* ID of port to which physical RXQ is mapped */
1265 /* Port's logic RXQ number to which physical RXQ is mapped */
1269 union mvpp2_prs_tcam_entry {
1270 u32 word[MVPP2_PRS_TCAM_WORDS];
1271 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1274 union mvpp2_prs_sram_entry {
1275 u32 word[MVPP2_PRS_SRAM_WORDS];
1276 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1279 struct mvpp2_prs_entry {
1281 union mvpp2_prs_tcam_entry tcam;
1282 union mvpp2_prs_sram_entry sram;
1285 struct mvpp2_prs_shadow {
1292 /* User defined offset */
1300 struct mvpp2_cls_flow_entry {
1302 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1305 struct mvpp2_cls_lookup_entry {
1311 struct mvpp2_bm_pool {
1312 /* Pool number in the range 0-7 */
1315 /* Buffer Pointers Pool External (BPPE) size */
1317 /* BPPE size in bytes */
1319 /* Number of buffers for this pool */
1321 /* Pool buffer size */
1327 /* BPPE virtual base address */
1329 /* BPPE DMA base address */
1330 dma_addr_t dma_addr;
1332 /* Ports using BM pool */
1336 #define IS_TSO_HEADER(txq_pcpu, addr) \
1337 ((addr) >= (txq_pcpu)->tso_headers_dma && \
1338 (addr) < (txq_pcpu)->tso_headers_dma + \
1339 (txq_pcpu)->size * TSO_HEADER_SIZE)
1342 #define MVPP2_QDIST_SINGLE_MODE 0
1343 #define MVPP2_QDIST_MULTI_MODE 1
1345 static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
1347 module_param(queue_mode, int, 0444);
1348 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
1350 #define MVPP2_DRIVER_NAME "mvpp2"
1351 #define MVPP2_DRIVER_VERSION "1.0"
1353 /* Utility/helper methods */
1355 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1357 writel(data, priv->swth_base[0] + offset);
1360 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1362 return readl(priv->swth_base[0] + offset);
1365 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
1367 return readl_relaxed(priv->swth_base[0] + offset);
1369 /* These accessors should be used to access:
1371 * - per-CPU registers, where each CPU has its own copy of the
1374 * MVPP2_BM_VIRT_ALLOC_REG
1375 * MVPP2_BM_ADDR_HIGH_ALLOC
1376 * MVPP22_BM_ADDR_HIGH_RLS_REG
1377 * MVPP2_BM_VIRT_RLS_REG
1378 * MVPP2_ISR_RX_TX_CAUSE_REG
1379 * MVPP2_ISR_RX_TX_MASK_REG
1381 * MVPP2_AGGR_TXQ_UPDATE_REG
1382 * MVPP2_TXQ_RSVD_REQ_REG
1383 * MVPP2_TXQ_RSVD_RSLT_REG
1384 * MVPP2_TXQ_SENT_REG
1387 * - global registers that must be accessed through a specific CPU
1388 * window, because they are related to an access to a per-CPU
1391 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1392 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1393 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1394 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1395 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1396 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1397 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1398 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1399 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1400 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1401 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1402 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1403 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1405 static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1406 u32 offset, u32 data)
1408 writel(data, priv->swth_base[cpu] + offset);
1411 static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1414 return readl(priv->swth_base[cpu] + offset);
1417 static void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu,
1418 u32 offset, u32 data)
1420 writel_relaxed(data, priv->swth_base[cpu] + offset);
1423 static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu,
1426 return readl_relaxed(priv->swth_base[cpu] + offset);
1429 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1430 struct mvpp2_tx_desc *tx_desc)
1432 if (port->priv->hw_version == MVPP21)
1433 return tx_desc->pp21.buf_dma_addr;
1435 return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
1438 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1439 struct mvpp2_tx_desc *tx_desc,
1440 dma_addr_t dma_addr)
1442 dma_addr_t addr, offset;
1444 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
1445 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
1447 if (port->priv->hw_version == MVPP21) {
1448 tx_desc->pp21.buf_dma_addr = addr;
1449 tx_desc->pp21.packet_offset = offset;
1451 u64 val = (u64)addr;
1453 tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
1454 tx_desc->pp22.buf_dma_addr_ptp |= val;
1455 tx_desc->pp22.packet_offset = offset;
1459 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1460 struct mvpp2_tx_desc *tx_desc)
1462 if (port->priv->hw_version == MVPP21)
1463 return tx_desc->pp21.data_size;
1465 return tx_desc->pp22.data_size;
1468 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1469 struct mvpp2_tx_desc *tx_desc,
1472 if (port->priv->hw_version == MVPP21)
1473 tx_desc->pp21.data_size = size;
1475 tx_desc->pp22.data_size = size;
1478 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1479 struct mvpp2_tx_desc *tx_desc,
1482 if (port->priv->hw_version == MVPP21)
1483 tx_desc->pp21.phys_txq = txq;
1485 tx_desc->pp22.phys_txq = txq;
1488 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1489 struct mvpp2_tx_desc *tx_desc,
1490 unsigned int command)
1492 if (port->priv->hw_version == MVPP21)
1493 tx_desc->pp21.command = command;
1495 tx_desc->pp22.command = command;
1498 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1499 struct mvpp2_tx_desc *tx_desc)
1501 if (port->priv->hw_version == MVPP21)
1502 return tx_desc->pp21.packet_offset;
1504 return tx_desc->pp22.packet_offset;
1507 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1508 struct mvpp2_rx_desc *rx_desc)
1510 if (port->priv->hw_version == MVPP21)
1511 return rx_desc->pp21.buf_dma_addr;
1513 return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
1516 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1517 struct mvpp2_rx_desc *rx_desc)
1519 if (port->priv->hw_version == MVPP21)
1520 return rx_desc->pp21.buf_cookie;
1522 return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
1525 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1526 struct mvpp2_rx_desc *rx_desc)
1528 if (port->priv->hw_version == MVPP21)
1529 return rx_desc->pp21.data_size;
1531 return rx_desc->pp22.data_size;
1534 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1535 struct mvpp2_rx_desc *rx_desc)
1537 if (port->priv->hw_version == MVPP21)
1538 return rx_desc->pp21.status;
1540 return rx_desc->pp22.status;
1543 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1545 txq_pcpu->txq_get_index++;
1546 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1547 txq_pcpu->txq_get_index = 0;
1550 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1551 struct mvpp2_txq_pcpu *txq_pcpu,
1552 struct sk_buff *skb,
1553 struct mvpp2_tx_desc *tx_desc)
1555 struct mvpp2_txq_pcpu_buf *tx_buf =
1556 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1558 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1559 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1560 mvpp2_txdesc_offset_get(port, tx_desc);
1561 txq_pcpu->txq_put_index++;
1562 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1563 txq_pcpu->txq_put_index = 0;
1566 /* Get number of physical egress port */
1567 static inline int mvpp2_egress_port(struct mvpp2_port *port)
1569 return MVPP2_MAX_TCONT + port->id;
1572 /* Get number of physical TXQ */
1573 static inline int mvpp2_txq_phys(int port, int txq)
1575 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1578 /* Parser configuration routines */
1580 /* Update parser tcam and sram hw entries */
1581 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1585 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1588 /* Clear entry invalidation bit */
1589 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1591 /* Write tcam index - indirect access */
1592 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1593 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1594 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1596 /* Write sram index - indirect access */
1597 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1598 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1599 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1604 /* Initialize tcam entry from hw */
1605 static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
1606 struct mvpp2_prs_entry *pe, int tid)
1610 if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1613 memset(pe, 0, sizeof(*pe));
1616 /* Write tcam index - indirect access */
1617 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1619 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1620 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1621 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1622 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1624 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1625 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1627 /* Write sram index - indirect access */
1628 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1629 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1630 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1635 /* Invalidate tcam hw entry */
1636 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1638 /* Write index - indirect access */
1639 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1640 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1641 MVPP2_PRS_TCAM_INV_MASK);
1644 /* Enable shadow table entry and set its lookup ID */
1645 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1647 priv->prs_shadow[index].valid = true;
1648 priv->prs_shadow[index].lu = lu;
1651 /* Update ri fields in shadow table entry */
1652 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1653 unsigned int ri, unsigned int ri_mask)
1655 priv->prs_shadow[index].ri_mask = ri_mask;
1656 priv->prs_shadow[index].ri = ri;
1659 /* Update lookup field in tcam sw entry */
1660 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1662 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1664 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1665 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1668 /* Update mask for single port in tcam sw entry */
1669 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1670 unsigned int port, bool add)
1672 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1675 pe->tcam.byte[enable_off] &= ~(1 << port);
1677 pe->tcam.byte[enable_off] |= 1 << port;
1680 /* Update port map in tcam sw entry */
1681 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1684 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1685 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1687 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1688 pe->tcam.byte[enable_off] &= ~port_mask;
1689 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1692 /* Obtain port map from tcam sw entry */
1693 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1695 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1697 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1700 /* Set byte of data and its enable bits in tcam sw entry */
1701 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1702 unsigned int offs, unsigned char byte,
1703 unsigned char enable)
1705 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1706 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1709 /* Get byte of data and its enable bits from tcam sw entry */
1710 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1711 unsigned int offs, unsigned char *byte,
1712 unsigned char *enable)
1714 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1715 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1718 /* Compare tcam data bytes with a pattern */
1719 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1722 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1725 tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
1726 if (tcam_data != data)
1731 /* Update ai bits in tcam sw entry */
1732 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1733 unsigned int bits, unsigned int enable)
1735 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1737 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1739 if (!(enable & BIT(i)))
1743 pe->tcam.byte[ai_idx] |= 1 << i;
1745 pe->tcam.byte[ai_idx] &= ~(1 << i);
1748 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1751 /* Get ai bits from tcam sw entry */
1752 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1754 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1757 /* Set ethertype in tcam sw entry */
1758 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1759 unsigned short ethertype)
1761 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1762 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1765 /* Set vid in tcam sw entry */
1766 static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
1769 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
1770 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
1773 /* Set bits in sram sw entry */
1774 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1777 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1780 /* Clear bits in sram sw entry */
1781 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1784 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1787 /* Update ri bits in sram sw entry */
1788 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1789 unsigned int bits, unsigned int mask)
1793 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1794 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1796 if (!(mask & BIT(i)))
1800 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1802 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1804 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1808 /* Obtain ri bits from sram sw entry */
1809 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1811 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1814 /* Update ai bits in sram sw entry */
1815 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1816 unsigned int bits, unsigned int mask)
1819 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1821 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1823 if (!(mask & BIT(i)))
1827 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1829 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1831 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1835 /* Read ai bits from sram sw entry */
1836 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1839 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1840 int ai_en_off = ai_off + 1;
1841 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1843 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1844 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1849 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1852 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1855 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1857 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1858 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1859 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1862 /* In the sram sw entry set sign and value of the next lookup offset
1863 * and the offset value generated to the classifier
1865 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1870 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1873 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1877 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1878 (unsigned char)shift;
1880 /* Reset and set operation */
1881 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1882 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1883 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1885 /* Set base offset as current */
1886 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1889 /* In the sram sw entry set sign and value of the user defined offset
1890 * generated to the classifier
1892 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1893 unsigned int type, int offset,
1898 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1899 offset = 0 - offset;
1901 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1905 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1906 MVPP2_PRS_SRAM_UDF_MASK);
1907 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1908 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1909 MVPP2_PRS_SRAM_UDF_BITS)] &=
1910 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1911 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1912 MVPP2_PRS_SRAM_UDF_BITS)] |=
1913 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1915 /* Set offset type */
1916 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1917 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1918 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1920 /* Set offset operation */
1921 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1922 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1923 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1925 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1926 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1927 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1928 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1930 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1931 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1932 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1934 /* Set base offset as current */
1935 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1938 /* Find parser flow entry */
1939 static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1941 struct mvpp2_prs_entry pe;
1944 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1945 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1948 if (!priv->prs_shadow[tid].valid ||
1949 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1952 mvpp2_prs_init_from_hw(priv, &pe, tid);
1953 bits = mvpp2_prs_sram_ai_get(&pe);
1955 /* Sram store classification lookup ID in AI bits [5:0] */
1956 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1963 /* Return first free tcam index, seeking from start to end */
1964 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1972 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1973 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1975 for (tid = start; tid <= end; tid++) {
1976 if (!priv->prs_shadow[tid].valid)
1983 /* Enable/disable dropping all mac da's */
1984 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1986 struct mvpp2_prs_entry pe;
1988 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1989 /* Entry exist - update port only */
1990 mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
1992 /* Entry doesn't exist - create new */
1993 memset(&pe, 0, sizeof(pe));
1994 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1995 pe.index = MVPP2_PE_DROP_ALL;
1997 /* Non-promiscuous mode for all ports - DROP unknown packets */
1998 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1999 MVPP2_PRS_RI_DROP_MASK);
2001 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2002 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2004 /* Update shadow table */
2005 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2007 /* Mask all ports */
2008 mvpp2_prs_tcam_port_map_set(&pe, 0);
2011 /* Update port mask */
2012 mvpp2_prs_tcam_port_set(&pe, port, add);
2014 mvpp2_prs_hw_write(priv, &pe);
2017 /* Set port to unicast or multicast promiscuous mode */
2018 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
2019 enum mvpp2_prs_l2_cast l2_cast, bool add)
2021 struct mvpp2_prs_entry pe;
2022 unsigned char cast_match;
2026 if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
2027 cast_match = MVPP2_PRS_UCAST_VAL;
2028 tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
2029 ri = MVPP2_PRS_RI_L2_UCAST;
2031 cast_match = MVPP2_PRS_MCAST_VAL;
2032 tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
2033 ri = MVPP2_PRS_RI_L2_MCAST;
2036 /* promiscuous mode - Accept unknown unicast or multicast packets */
2037 if (priv->prs_shadow[tid].valid) {
2038 mvpp2_prs_init_from_hw(priv, &pe, tid);
2040 memset(&pe, 0, sizeof(pe));
2041 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2044 /* Continue - set next lookup */
2045 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2047 /* Set result info bits */
2048 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
2050 /* Match UC or MC addresses */
2051 mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
2052 MVPP2_PRS_CAST_MASK);
2054 /* Shift to ethertype */
2055 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2056 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2058 /* Mask all ports */
2059 mvpp2_prs_tcam_port_map_set(&pe, 0);
2061 /* Update shadow table */
2062 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2065 /* Update port mask */
2066 mvpp2_prs_tcam_port_set(&pe, port, add);
2068 mvpp2_prs_hw_write(priv, &pe);
2071 /* Set entry for dsa packets */
2072 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
2073 bool tagged, bool extend)
2075 struct mvpp2_prs_entry pe;
2079 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
2082 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
2086 if (priv->prs_shadow[tid].valid) {
2087 /* Entry exist - update port only */
2088 mvpp2_prs_init_from_hw(priv, &pe, tid);
2090 /* Entry doesn't exist - create new */
2091 memset(&pe, 0, sizeof(pe));
2092 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2095 /* Update shadow table */
2096 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2099 /* Set tagged bit in DSA tag */
2100 mvpp2_prs_tcam_data_byte_set(&pe, 0,
2101 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2102 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2104 /* Set ai bits for next iteration */
2106 mvpp2_prs_sram_ai_update(&pe, 1,
2107 MVPP2_PRS_SRAM_AI_MASK);
2109 mvpp2_prs_sram_ai_update(&pe, 0,
2110 MVPP2_PRS_SRAM_AI_MASK);
2112 /* If packet is tagged continue check vid filtering */
2113 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
2115 /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
2116 mvpp2_prs_sram_shift_set(&pe, shift,
2117 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2119 /* Set result info bits to 'no vlans' */
2120 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2121 MVPP2_PRS_RI_VLAN_MASK);
2122 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2125 /* Mask all ports */
2126 mvpp2_prs_tcam_port_map_set(&pe, 0);
2129 /* Update port mask */
2130 mvpp2_prs_tcam_port_set(&pe, port, add);
2132 mvpp2_prs_hw_write(priv, &pe);
2135 /* Set entry for dsa ethertype */
2136 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
2137 bool add, bool tagged, bool extend)
2139 struct mvpp2_prs_entry pe;
2140 int tid, shift, port_mask;
2143 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
2144 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
2148 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
2149 MVPP2_PE_ETYPE_DSA_UNTAGGED;
2150 port_mask = MVPP2_PRS_PORT_MASK;
2154 if (priv->prs_shadow[tid].valid) {
2155 /* Entry exist - update port only */
2156 mvpp2_prs_init_from_hw(priv, &pe, tid);
2158 /* Entry doesn't exist - create new */
2159 memset(&pe, 0, sizeof(pe));
2160 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2164 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
2165 mvpp2_prs_match_etype(&pe, 2, 0);
2167 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
2168 MVPP2_PRS_RI_DSA_MASK);
2169 /* Shift ethertype + 2 byte reserved + tag*/
2170 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
2171 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2173 /* Update shadow table */
2174 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2177 /* Set tagged bit in DSA tag */
2178 mvpp2_prs_tcam_data_byte_set(&pe,
2179 MVPP2_ETH_TYPE_LEN + 2 + 3,
2180 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2181 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2182 /* Clear all ai bits for next iteration */
2183 mvpp2_prs_sram_ai_update(&pe, 0,
2184 MVPP2_PRS_SRAM_AI_MASK);
2185 /* If packet is tagged continue check vlans */
2186 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2188 /* Set result info bits to 'no vlans' */
2189 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2190 MVPP2_PRS_RI_VLAN_MASK);
2191 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2193 /* Mask/unmask all ports, depending on dsa type */
2194 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
2197 /* Update port mask */
2198 mvpp2_prs_tcam_port_set(&pe, port, add);
2200 mvpp2_prs_hw_write(priv, &pe);
2203 /* Search for existing single/triple vlan entry */
2204 static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
2206 struct mvpp2_prs_entry pe;
2209 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2210 for (tid = MVPP2_PE_FIRST_FREE_TID;
2211 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2212 unsigned int ri_bits, ai_bits;
2215 if (!priv->prs_shadow[tid].valid ||
2216 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2219 mvpp2_prs_init_from_hw(priv, &pe, tid);
2220 match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid));
2225 ri_bits = mvpp2_prs_sram_ri_get(&pe);
2226 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2228 /* Get current ai value from tcam */
2229 ai_bits = mvpp2_prs_tcam_ai_get(&pe);
2230 /* Clear double vlan bit */
2231 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
2236 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2237 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2244 /* Add/update single/triple vlan entry */
2245 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
2246 unsigned int port_map)
2248 struct mvpp2_prs_entry pe;
2252 memset(&pe, 0, sizeof(pe));
2254 tid = mvpp2_prs_vlan_find(priv, tpid, ai);
2257 /* Create new tcam entry */
2258 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
2259 MVPP2_PE_FIRST_FREE_TID);
2263 /* Get last double vlan tid */
2264 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
2265 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
2266 unsigned int ri_bits;
2268 if (!priv->prs_shadow[tid_aux].valid ||
2269 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2272 mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
2273 ri_bits = mvpp2_prs_sram_ri_get(&pe);
2274 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
2275 MVPP2_PRS_RI_VLAN_DOUBLE)
2282 memset(&pe, 0, sizeof(pe));
2284 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2286 mvpp2_prs_match_etype(&pe, 0, tpid);
2288 /* VLAN tag detected, proceed with VID filtering */
2289 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
2291 /* Clear all ai bits for next iteration */
2292 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2294 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2295 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
2296 MVPP2_PRS_RI_VLAN_MASK);
2298 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2299 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2300 MVPP2_PRS_RI_VLAN_MASK);
2302 mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2304 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2306 mvpp2_prs_init_from_hw(priv, &pe, tid);
2308 /* Update ports' mask */
2309 mvpp2_prs_tcam_port_map_set(&pe, port_map);
2311 mvpp2_prs_hw_write(priv, &pe);
2316 /* Get first free double vlan ai number */
2317 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2321 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2322 if (!priv->prs_double_vlans[i])
2329 /* Search for existing double vlan entry */
2330 static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
2331 unsigned short tpid2)
2333 struct mvpp2_prs_entry pe;
2336 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2337 for (tid = MVPP2_PE_FIRST_FREE_TID;
2338 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2339 unsigned int ri_mask;
2342 if (!priv->prs_shadow[tid].valid ||
2343 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2346 mvpp2_prs_init_from_hw(priv, &pe, tid);
2348 match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) &&
2349 mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2));
2354 ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
2355 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2362 /* Add or update double vlan entry */
2363 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2364 unsigned short tpid2,
2365 unsigned int port_map)
2367 int tid_aux, tid, ai, ret = 0;
2368 struct mvpp2_prs_entry pe;
2370 memset(&pe, 0, sizeof(pe));
2372 tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2375 /* Create new tcam entry */
2376 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2377 MVPP2_PE_LAST_FREE_TID);
2381 /* Set ai value for new double vlan entry */
2382 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
2386 /* Get first single/triple vlan tid */
2387 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2388 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2389 unsigned int ri_bits;
2391 if (!priv->prs_shadow[tid_aux].valid ||
2392 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2395 mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
2396 ri_bits = mvpp2_prs_sram_ri_get(&pe);
2397 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2398 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2399 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2406 memset(&pe, 0, sizeof(pe));
2407 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2410 priv->prs_double_vlans[ai] = true;
2412 mvpp2_prs_match_etype(&pe, 0, tpid1);
2413 mvpp2_prs_match_etype(&pe, 4, tpid2);
2415 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2416 /* Shift 4 bytes - skip outer vlan tag */
2417 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
2418 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2419 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2420 MVPP2_PRS_RI_VLAN_MASK);
2421 mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2422 MVPP2_PRS_SRAM_AI_MASK);
2424 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2426 mvpp2_prs_init_from_hw(priv, &pe, tid);
2429 /* Update ports' mask */
2430 mvpp2_prs_tcam_port_map_set(&pe, port_map);
2431 mvpp2_prs_hw_write(priv, &pe);
2436 /* IPv4 header parsing for fragmentation and L4 offset */
2437 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2438 unsigned int ri, unsigned int ri_mask)
2440 struct mvpp2_prs_entry pe;
2443 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2444 (proto != IPPROTO_IGMP))
2447 /* Not fragmented packet */
2448 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2449 MVPP2_PE_LAST_FREE_TID);
2453 memset(&pe, 0, sizeof(pe));
2454 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2457 /* Set next lu to IPv4 */
2458 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2459 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2461 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2462 sizeof(struct iphdr) - 4,
2463 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2464 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2465 MVPP2_PRS_IPV4_DIP_AI_BIT);
2466 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2468 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
2469 MVPP2_PRS_TCAM_PROTO_MASK_L);
2470 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
2471 MVPP2_PRS_TCAM_PROTO_MASK);
2473 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2474 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2475 /* Unmask all ports */
2476 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2478 /* Update shadow table and hw entry */
2479 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2480 mvpp2_prs_hw_write(priv, &pe);
2482 /* Fragmented packet */
2483 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2484 MVPP2_PE_LAST_FREE_TID);
2489 /* Clear ri before updating */
2490 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2491 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2492 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2494 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
2495 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2497 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
2498 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
2500 /* Update shadow table and hw entry */
2501 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2502 mvpp2_prs_hw_write(priv, &pe);
2507 /* IPv4 L3 multicast or broadcast */
2508 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2510 struct mvpp2_prs_entry pe;
2513 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2514 MVPP2_PE_LAST_FREE_TID);
2518 memset(&pe, 0, sizeof(pe));
2519 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2523 case MVPP2_PRS_L3_MULTI_CAST:
2524 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2525 MVPP2_PRS_IPV4_MC_MASK);
2526 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2527 MVPP2_PRS_RI_L3_ADDR_MASK);
2529 case MVPP2_PRS_L3_BROAD_CAST:
2530 mask = MVPP2_PRS_IPV4_BC_MASK;
2531 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2532 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2533 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2534 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2535 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2536 MVPP2_PRS_RI_L3_ADDR_MASK);
2542 /* Finished: go to flowid generation */
2543 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2544 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2546 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2547 MVPP2_PRS_IPV4_DIP_AI_BIT);
2548 /* Unmask all ports */
2549 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2551 /* Update shadow table and hw entry */
2552 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2553 mvpp2_prs_hw_write(priv, &pe);
2558 /* Set entries for protocols over IPv6 */
2559 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2560 unsigned int ri, unsigned int ri_mask)
2562 struct mvpp2_prs_entry pe;
2565 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2566 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2569 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2570 MVPP2_PE_LAST_FREE_TID);
2574 memset(&pe, 0, sizeof(pe));
2575 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2578 /* Finished: go to flowid generation */
2579 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2580 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2581 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2582 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2583 sizeof(struct ipv6hdr) - 6,
2584 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2586 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2587 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2588 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2589 /* Unmask all ports */
2590 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2593 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2594 mvpp2_prs_hw_write(priv, &pe);
2599 /* IPv6 L3 multicast entry */
2600 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2602 struct mvpp2_prs_entry pe;
2605 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2608 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2609 MVPP2_PE_LAST_FREE_TID);
2613 memset(&pe, 0, sizeof(pe));
2614 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2617 /* Finished: go to flowid generation */
2618 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2619 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2620 MVPP2_PRS_RI_L3_ADDR_MASK);
2621 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2622 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2623 /* Shift back to IPv6 NH */
2624 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2626 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2627 MVPP2_PRS_IPV6_MC_MASK);
2628 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2629 /* Unmask all ports */
2630 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2632 /* Update shadow table and hw entry */
2633 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2634 mvpp2_prs_hw_write(priv, &pe);
2639 /* Parser per-port initialization */
2640 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2641 int lu_max, int offset)
2646 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2647 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2648 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2649 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2651 /* Set maximum number of loops for packet received from port */
2652 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2653 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2654 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2655 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2657 /* Set initial offset for packet header extraction for the first
2660 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2661 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2662 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2663 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2666 /* Default flow entries initialization for all ports */
2667 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2669 struct mvpp2_prs_entry pe;
2672 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2673 memset(&pe, 0, sizeof(pe));
2674 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2675 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2677 /* Mask all ports */
2678 mvpp2_prs_tcam_port_map_set(&pe, 0);
2681 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2682 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2684 /* Update shadow table and hw entry */
2685 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2686 mvpp2_prs_hw_write(priv, &pe);
2690 /* Set default entry for Marvell Header field */
2691 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2693 struct mvpp2_prs_entry pe;
2695 memset(&pe, 0, sizeof(pe));
2697 pe.index = MVPP2_PE_MH_DEFAULT;
2698 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2699 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2700 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2701 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2703 /* Unmask all ports */
2704 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2706 /* Update shadow table and hw entry */
2707 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2708 mvpp2_prs_hw_write(priv, &pe);
2711 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2712 * multicast MAC addresses
2714 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2716 struct mvpp2_prs_entry pe;
2718 memset(&pe, 0, sizeof(pe));
2720 /* Non-promiscuous mode for all ports - DROP unknown packets */
2721 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2722 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2724 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2725 MVPP2_PRS_RI_DROP_MASK);
2726 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2727 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2729 /* Unmask all ports */
2730 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2732 /* Update shadow table and hw entry */
2733 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2734 mvpp2_prs_hw_write(priv, &pe);
2736 /* Create dummy entries for drop all and promiscuous modes */
2737 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2738 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
2739 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
2742 /* Set default entries for various types of dsa packets */
2743 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2745 struct mvpp2_prs_entry pe;
2747 /* None tagged EDSA entry - place holder */
2748 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2751 /* Tagged EDSA entry - place holder */
2752 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2754 /* None tagged DSA entry - place holder */
2755 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2758 /* Tagged DSA entry - place holder */
2759 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2761 /* None tagged EDSA ethertype entry - place holder*/
2762 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2763 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2765 /* Tagged EDSA ethertype entry - place holder*/
2766 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2767 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2769 /* None tagged DSA ethertype entry */
2770 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2771 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2773 /* Tagged DSA ethertype entry */
2774 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2775 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2777 /* Set default entry, in case DSA or EDSA tag not found */
2778 memset(&pe, 0, sizeof(pe));
2779 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2780 pe.index = MVPP2_PE_DSA_DEFAULT;
2781 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2784 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2785 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2787 /* Clear all sram ai bits for next iteration */
2788 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2790 /* Unmask all ports */
2791 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2793 mvpp2_prs_hw_write(priv, &pe);
2796 /* Initialize parser entries for VID filtering */
2797 static void mvpp2_prs_vid_init(struct mvpp2 *priv)
2799 struct mvpp2_prs_entry pe;
2801 memset(&pe, 0, sizeof(pe));
2803 /* Set default vid entry */
2804 pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
2805 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2807 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
2809 /* Skip VLAN header - Set offset to 4 bytes */
2810 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
2811 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2813 /* Clear all ai bits for next iteration */
2814 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2816 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2818 /* Unmask all ports */
2819 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2821 /* Update shadow table and hw entry */
2822 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2823 mvpp2_prs_hw_write(priv, &pe);
2825 /* Set default vid entry for extended DSA*/
2826 memset(&pe, 0, sizeof(pe));
2828 /* Set default vid entry */
2829 pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
2830 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2832 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
2833 MVPP2_PRS_EDSA_VID_AI_BIT);
2835 /* Skip VLAN header - Set offset to 8 bytes */
2836 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
2837 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2839 /* Clear all ai bits for next iteration */
2840 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2842 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2844 /* Unmask all ports */
2845 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2847 /* Update shadow table and hw entry */
2848 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2849 mvpp2_prs_hw_write(priv, &pe);
2852 /* Match basic ethertypes */
2853 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2855 struct mvpp2_prs_entry pe;
2858 /* Ethertype: PPPoE */
2859 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2860 MVPP2_PE_LAST_FREE_TID);
2864 memset(&pe, 0, sizeof(pe));
2865 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2868 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2870 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2871 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2872 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2873 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2874 MVPP2_PRS_RI_PPPOE_MASK);
2876 /* Update shadow table and hw entry */
2877 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2878 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2879 priv->prs_shadow[pe.index].finish = false;
2880 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2881 MVPP2_PRS_RI_PPPOE_MASK);
2882 mvpp2_prs_hw_write(priv, &pe);
2884 /* Ethertype: ARP */
2885 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2886 MVPP2_PE_LAST_FREE_TID);
2890 memset(&pe, 0, sizeof(pe));
2891 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2894 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2896 /* Generate flow in the next iteration*/
2897 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2898 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2899 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2900 MVPP2_PRS_RI_L3_PROTO_MASK);
2902 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2904 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2906 /* Update shadow table and hw entry */
2907 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2908 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2909 priv->prs_shadow[pe.index].finish = true;
2910 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2911 MVPP2_PRS_RI_L3_PROTO_MASK);
2912 mvpp2_prs_hw_write(priv, &pe);
2914 /* Ethertype: LBTD */
2915 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2916 MVPP2_PE_LAST_FREE_TID);
2920 memset(&pe, 0, sizeof(pe));
2921 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2924 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2926 /* Generate flow in the next iteration*/
2927 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2928 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2929 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2930 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2931 MVPP2_PRS_RI_CPU_CODE_MASK |
2932 MVPP2_PRS_RI_UDF3_MASK);
2934 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2936 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2938 /* Update shadow table and hw entry */
2939 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2940 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2941 priv->prs_shadow[pe.index].finish = true;
2942 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2943 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2944 MVPP2_PRS_RI_CPU_CODE_MASK |
2945 MVPP2_PRS_RI_UDF3_MASK);
2946 mvpp2_prs_hw_write(priv, &pe);
2948 /* Ethertype: IPv4 without options */
2949 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2950 MVPP2_PE_LAST_FREE_TID);
2954 memset(&pe, 0, sizeof(pe));
2955 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2958 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2959 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2960 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2961 MVPP2_PRS_IPV4_HEAD_MASK |
2962 MVPP2_PRS_IPV4_IHL_MASK);
2964 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2965 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2966 MVPP2_PRS_RI_L3_PROTO_MASK);
2967 /* Skip eth_type + 4 bytes of IP header */
2968 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2969 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2971 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2973 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2975 /* Update shadow table and hw entry */
2976 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2977 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2978 priv->prs_shadow[pe.index].finish = false;
2979 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2980 MVPP2_PRS_RI_L3_PROTO_MASK);
2981 mvpp2_prs_hw_write(priv, &pe);
2983 /* Ethertype: IPv4 with options */
2984 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2985 MVPP2_PE_LAST_FREE_TID);
2991 /* Clear tcam data before updating */
2992 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2993 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2995 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2996 MVPP2_PRS_IPV4_HEAD,
2997 MVPP2_PRS_IPV4_HEAD_MASK);
2999 /* Clear ri before updating */
3000 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3001 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3002 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
3003 MVPP2_PRS_RI_L3_PROTO_MASK);
3005 /* Update shadow table and hw entry */
3006 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3007 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3008 priv->prs_shadow[pe.index].finish = false;
3009 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
3010 MVPP2_PRS_RI_L3_PROTO_MASK);
3011 mvpp2_prs_hw_write(priv, &pe);
3013 /* Ethertype: IPv6 without options */
3014 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3015 MVPP2_PE_LAST_FREE_TID);
3019 memset(&pe, 0, sizeof(pe));
3020 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
3023 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
3025 /* Skip DIP of IPV6 header */
3026 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
3027 MVPP2_MAX_L3_ADDR_SIZE,
3028 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3029 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3030 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3031 MVPP2_PRS_RI_L3_PROTO_MASK);
3033 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3035 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3037 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3038 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3039 priv->prs_shadow[pe.index].finish = false;
3040 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
3041 MVPP2_PRS_RI_L3_PROTO_MASK);
3042 mvpp2_prs_hw_write(priv, &pe);
3044 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
3045 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3046 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
3047 pe.index = MVPP2_PE_ETH_TYPE_UN;
3049 /* Unmask all ports */
3050 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3052 /* Generate flow in the next iteration*/
3053 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3054 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3055 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3056 MVPP2_PRS_RI_L3_PROTO_MASK);
3057 /* Set L3 offset even it's unknown L3 */
3058 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3060 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3062 /* Update shadow table and hw entry */
3063 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3064 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3065 priv->prs_shadow[pe.index].finish = true;
3066 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
3067 MVPP2_PRS_RI_L3_PROTO_MASK);
3068 mvpp2_prs_hw_write(priv, &pe);
3073 /* Configure vlan entries and detect up to 2 successive VLAN tags.
3080 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
3082 struct mvpp2_prs_entry pe;
3085 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
3086 MVPP2_PRS_DBL_VLANS_MAX,
3088 if (!priv->prs_double_vlans)
3091 /* Double VLAN: 0x8100, 0x88A8 */
3092 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
3093 MVPP2_PRS_PORT_MASK);
3097 /* Double VLAN: 0x8100, 0x8100 */
3098 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
3099 MVPP2_PRS_PORT_MASK);
3103 /* Single VLAN: 0x88a8 */
3104 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
3105 MVPP2_PRS_PORT_MASK);
3109 /* Single VLAN: 0x8100 */
3110 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
3111 MVPP2_PRS_PORT_MASK);
3115 /* Set default double vlan entry */
3116 memset(&pe, 0, sizeof(pe));
3117 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3118 pe.index = MVPP2_PE_VLAN_DBL;
3120 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
3122 /* Clear ai for next iterations */
3123 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3124 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
3125 MVPP2_PRS_RI_VLAN_MASK);
3127 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
3128 MVPP2_PRS_DBL_VLAN_AI_BIT);
3129 /* Unmask all ports */
3130 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3132 /* Update shadow table and hw entry */
3133 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
3134 mvpp2_prs_hw_write(priv, &pe);
3136 /* Set default vlan none entry */
3137 memset(&pe, 0, sizeof(pe));
3138 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3139 pe.index = MVPP2_PE_VLAN_NONE;
3141 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3142 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3143 MVPP2_PRS_RI_VLAN_MASK);
3145 /* Unmask all ports */
3146 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3148 /* Update shadow table and hw entry */
3149 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
3150 mvpp2_prs_hw_write(priv, &pe);
3155 /* Set entries for PPPoE ethertype */
3156 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
3158 struct mvpp2_prs_entry pe;
3161 /* IPv4 over PPPoE with options */
3162 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3163 MVPP2_PE_LAST_FREE_TID);
3167 memset(&pe, 0, sizeof(pe));
3168 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3171 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
3173 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3174 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
3175 MVPP2_PRS_RI_L3_PROTO_MASK);
3176 /* Skip eth_type + 4 bytes of IP header */
3177 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3178 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3180 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3182 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3184 /* Update shadow table and hw entry */
3185 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3186 mvpp2_prs_hw_write(priv, &pe);
3188 /* IPv4 over PPPoE without options */
3189 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3190 MVPP2_PE_LAST_FREE_TID);
3196 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
3197 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
3198 MVPP2_PRS_IPV4_HEAD_MASK |
3199 MVPP2_PRS_IPV4_IHL_MASK);
3201 /* Clear ri before updating */
3202 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3203 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3204 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
3205 MVPP2_PRS_RI_L3_PROTO_MASK);
3207 /* Update shadow table and hw entry */
3208 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3209 mvpp2_prs_hw_write(priv, &pe);
3211 /* IPv6 over PPPoE */
3212 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3213 MVPP2_PE_LAST_FREE_TID);
3217 memset(&pe, 0, sizeof(pe));
3218 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3221 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
3223 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3224 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3225 MVPP2_PRS_RI_L3_PROTO_MASK);
3226 /* Skip eth_type + 4 bytes of IPv6 header */
3227 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3228 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3230 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3232 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3234 /* Update shadow table and hw entry */
3235 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3236 mvpp2_prs_hw_write(priv, &pe);
3238 /* Non-IP over PPPoE */
3239 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3240 MVPP2_PE_LAST_FREE_TID);
3244 memset(&pe, 0, sizeof(pe));
3245 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3248 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3249 MVPP2_PRS_RI_L3_PROTO_MASK);
3251 /* Finished: go to flowid generation */
3252 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3253 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3254 /* Set L3 offset even if it's unknown L3 */
3255 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3257 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3259 /* Update shadow table and hw entry */
3260 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3261 mvpp2_prs_hw_write(priv, &pe);
3266 /* Initialize entries for IPv4 */
3267 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
3269 struct mvpp2_prs_entry pe;
3272 /* Set entries for TCP, UDP and IGMP over IPv4 */
3273 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
3274 MVPP2_PRS_RI_L4_PROTO_MASK);
3278 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
3279 MVPP2_PRS_RI_L4_PROTO_MASK);
3283 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
3284 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3285 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3286 MVPP2_PRS_RI_CPU_CODE_MASK |
3287 MVPP2_PRS_RI_UDF3_MASK);
3291 /* IPv4 Broadcast */
3292 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
3296 /* IPv4 Multicast */
3297 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3301 /* Default IPv4 entry for unknown protocols */
3302 memset(&pe, 0, sizeof(pe));
3303 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3304 pe.index = MVPP2_PE_IP4_PROTO_UN;
3306 /* Set next lu to IPv4 */
3307 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3308 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3310 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3311 sizeof(struct iphdr) - 4,
3312 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3313 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3314 MVPP2_PRS_IPV4_DIP_AI_BIT);
3315 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3316 MVPP2_PRS_RI_L4_PROTO_MASK);
3318 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3319 /* Unmask all ports */
3320 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3322 /* Update shadow table and hw entry */
3323 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3324 mvpp2_prs_hw_write(priv, &pe);
3326 /* Default IPv4 entry for unicast address */
3327 memset(&pe, 0, sizeof(pe));
3328 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3329 pe.index = MVPP2_PE_IP4_ADDR_UN;
3331 /* Finished: go to flowid generation */
3332 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3333 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3334 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3335 MVPP2_PRS_RI_L3_ADDR_MASK);
3337 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3338 MVPP2_PRS_IPV4_DIP_AI_BIT);
3339 /* Unmask all ports */
3340 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3342 /* Update shadow table and hw entry */
3343 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3344 mvpp2_prs_hw_write(priv, &pe);
3349 /* Initialize entries for IPv6 */
3350 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3352 struct mvpp2_prs_entry pe;
3355 /* Set entries for TCP, UDP and ICMP over IPv6 */
3356 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3357 MVPP2_PRS_RI_L4_TCP,
3358 MVPP2_PRS_RI_L4_PROTO_MASK);
3362 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3363 MVPP2_PRS_RI_L4_UDP,
3364 MVPP2_PRS_RI_L4_PROTO_MASK);
3368 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3369 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3370 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3371 MVPP2_PRS_RI_CPU_CODE_MASK |
3372 MVPP2_PRS_RI_UDF3_MASK);
3376 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3377 /* Result Info: UDF7=1, DS lite */
3378 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3379 MVPP2_PRS_RI_UDF7_IP6_LITE,
3380 MVPP2_PRS_RI_UDF7_MASK);
3384 /* IPv6 multicast */
3385 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3389 /* Entry for checking hop limit */
3390 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3391 MVPP2_PE_LAST_FREE_TID);
3395 memset(&pe, 0, sizeof(pe));
3396 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3399 /* Finished: go to flowid generation */
3400 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3401 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3402 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3403 MVPP2_PRS_RI_DROP_MASK,
3404 MVPP2_PRS_RI_L3_PROTO_MASK |
3405 MVPP2_PRS_RI_DROP_MASK);
3407 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3408 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3409 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3411 /* Update shadow table and hw entry */
3412 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3413 mvpp2_prs_hw_write(priv, &pe);
3415 /* Default IPv6 entry for unknown protocols */
3416 memset(&pe, 0, sizeof(pe));
3417 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3418 pe.index = MVPP2_PE_IP6_PROTO_UN;
3420 /* Finished: go to flowid generation */
3421 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3422 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3423 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3424 MVPP2_PRS_RI_L4_PROTO_MASK);
3425 /* Set L4 offset relatively to our current place */
3426 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3427 sizeof(struct ipv6hdr) - 4,
3428 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3430 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3431 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3432 /* Unmask all ports */
3433 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3435 /* Update shadow table and hw entry */
3436 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3437 mvpp2_prs_hw_write(priv, &pe);
3439 /* Default IPv6 entry for unknown ext protocols */
3440 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3441 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3442 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3444 /* Finished: go to flowid generation */
3445 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3446 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3447 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3448 MVPP2_PRS_RI_L4_PROTO_MASK);
3450 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3451 MVPP2_PRS_IPV6_EXT_AI_BIT);
3452 /* Unmask all ports */
3453 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3455 /* Update shadow table and hw entry */
3456 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3457 mvpp2_prs_hw_write(priv, &pe);
3459 /* Default IPv6 entry for unicast address */
3460 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3461 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3462 pe.index = MVPP2_PE_IP6_ADDR_UN;
3464 /* Finished: go to IPv6 again */
3465 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3466 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3467 MVPP2_PRS_RI_L3_ADDR_MASK);
3468 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3469 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3470 /* Shift back to IPV6 NH */
3471 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3473 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3474 /* Unmask all ports */
3475 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3477 /* Update shadow table and hw entry */
3478 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3479 mvpp2_prs_hw_write(priv, &pe);
3484 /* Find tcam entry with matched pair <vid,port> */
3485 static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
3488 unsigned char byte[2], enable[2];
3489 struct mvpp2_prs_entry pe;
3493 /* Go through the all entries with MVPP2_PRS_LU_VID */
3494 for (tid = MVPP2_PE_VID_FILT_RANGE_START;
3495 tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
3496 if (!priv->prs_shadow[tid].valid ||
3497 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
3500 mvpp2_prs_init_from_hw(priv, &pe, tid);
3502 mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
3503 mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
3505 rvid = ((byte[0] & 0xf) << 8) + byte[1];
3506 rmask = ((enable[0] & 0xf) << 8) + enable[1];
3508 if (rvid != vid || rmask != mask)
3517 /* Write parser entry for VID filtering */
3518 static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
3520 unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
3521 port->id * MVPP2_PRS_VLAN_FILT_MAX;
3522 unsigned int mask = 0xfff, reg_val, shift;
3523 struct mvpp2 *priv = port->priv;
3524 struct mvpp2_prs_entry pe;
3527 memset(&pe, 0, sizeof(pe));
3529 /* Scan TCAM and see if entry with this <vid,port> already exist */
3530 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
3532 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
3533 if (reg_val & MVPP2_DSA_EXTENDED)
3534 shift = MVPP2_VLAN_TAG_EDSA_LEN;
3536 shift = MVPP2_VLAN_TAG_LEN;
3541 /* Go through all entries from first to last in vlan range */
3542 tid = mvpp2_prs_tcam_first_free(priv, vid_start,
3544 MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
3546 /* There isn't room for a new VID filter */
3550 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
3553 /* Mask all ports */
3554 mvpp2_prs_tcam_port_map_set(&pe, 0);
3556 mvpp2_prs_init_from_hw(priv, &pe, tid);
3559 /* Enable the current port */
3560 mvpp2_prs_tcam_port_set(&pe, port->id, true);
3562 /* Continue - set next lookup */
3563 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3565 /* Skip VLAN header - Set offset to 4 or 8 bytes */
3566 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3568 /* Set match on VID */
3569 mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
3571 /* Clear all ai bits for next iteration */
3572 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3574 /* Update shadow table */
3575 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
3576 mvpp2_prs_hw_write(priv, &pe);
3581 /* Write parser entry for VID filtering */
3582 static void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
3584 struct mvpp2 *priv = port->priv;
3587 /* Scan TCAM and see if entry with this <vid,port> already exist */
3588 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
3594 mvpp2_prs_hw_inv(priv, tid);
3595 priv->prs_shadow[tid].valid = false;
3598 /* Remove all existing VID filters on this port */
3599 static void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
3601 struct mvpp2 *priv = port->priv;
3604 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
3605 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
3606 if (priv->prs_shadow[tid].valid)
3607 mvpp2_prs_vid_entry_remove(port, tid);
3611 /* Remove VID filering entry for this port */
3612 static void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
3614 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
3615 struct mvpp2 *priv = port->priv;
3617 /* Invalidate the guard entry */
3618 mvpp2_prs_hw_inv(priv, tid);
3620 priv->prs_shadow[tid].valid = false;
3623 /* Add guard entry that drops packets when no VID is matched on this port */
3624 static void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
3626 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
3627 struct mvpp2 *priv = port->priv;
3628 unsigned int reg_val, shift;
3629 struct mvpp2_prs_entry pe;
3631 if (priv->prs_shadow[tid].valid)
3634 memset(&pe, 0, sizeof(pe));
3638 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
3639 if (reg_val & MVPP2_DSA_EXTENDED)
3640 shift = MVPP2_VLAN_TAG_EDSA_LEN;
3642 shift = MVPP2_VLAN_TAG_LEN;
3644 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
3646 /* Mask all ports */
3647 mvpp2_prs_tcam_port_map_set(&pe, 0);
3649 /* Update port mask */
3650 mvpp2_prs_tcam_port_set(&pe, port->id, true);
3652 /* Continue - set next lookup */
3653 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3655 /* Skip VLAN header - Set offset to 4 or 8 bytes */
3656 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3658 /* Drop VLAN packets that don't belong to any VIDs on this port */
3659 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
3660 MVPP2_PRS_RI_DROP_MASK);
3662 /* Clear all ai bits for next iteration */
3663 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3665 /* Update shadow table */
3666 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
3667 mvpp2_prs_hw_write(priv, &pe);
3670 /* Parser default initialization */
3671 static int mvpp2_prs_default_init(struct platform_device *pdev,
3676 /* Enable tcam table */
3677 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3679 /* Clear all tcam and sram entries */
3680 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3681 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3682 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3683 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3685 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3686 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3687 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3690 /* Invalidate all tcam entries */
3691 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3692 mvpp2_prs_hw_inv(priv, index);
3694 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
3695 sizeof(*priv->prs_shadow),
3697 if (!priv->prs_shadow)
3700 /* Always start from lookup = 0 */
3701 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3702 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3703 MVPP2_PRS_PORT_LU_MAX, 0);
3705 mvpp2_prs_def_flow_init(priv);
3707 mvpp2_prs_mh_init(priv);
3709 mvpp2_prs_mac_init(priv);
3711 mvpp2_prs_dsa_init(priv);
3713 mvpp2_prs_vid_init(priv);
3715 err = mvpp2_prs_etype_init(priv);
3719 err = mvpp2_prs_vlan_init(pdev, priv);
3723 err = mvpp2_prs_pppoe_init(priv);
3727 err = mvpp2_prs_ip6_init(priv);
3731 err = mvpp2_prs_ip4_init(priv);
3738 /* Compare MAC DA with tcam entry data */
3739 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3740 const u8 *da, unsigned char *mask)
3742 unsigned char tcam_byte, tcam_mask;
3745 for (index = 0; index < ETH_ALEN; index++) {
3746 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3747 if (tcam_mask != mask[index])
3750 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3757 /* Find tcam entry with matched pair <MAC DA, port> */
3759 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3760 unsigned char *mask, int udf_type)
3762 struct mvpp2_prs_entry pe;
3765 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3766 for (tid = MVPP2_PE_MAC_RANGE_START;
3767 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
3768 unsigned int entry_pmap;
3770 if (!priv->prs_shadow[tid].valid ||
3771 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3772 (priv->prs_shadow[tid].udf != udf_type))
3775 mvpp2_prs_init_from_hw(priv, &pe, tid);
3776 entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
3778 if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
3786 /* Update parser's mac da entry */
3787 static int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da,
3790 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3791 struct mvpp2 *priv = port->priv;
3792 unsigned int pmap, len, ri;
3793 struct mvpp2_prs_entry pe;
3796 memset(&pe, 0, sizeof(pe));
3798 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3799 tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
3800 MVPP2_PRS_UDF_MAC_DEF);
3807 /* Create new TCAM entry */
3808 /* Go through the all entries from first to last */
3809 tid = mvpp2_prs_tcam_first_free(priv,
3810 MVPP2_PE_MAC_RANGE_START,
3811 MVPP2_PE_MAC_RANGE_END);
3817 /* Mask all ports */
3818 mvpp2_prs_tcam_port_map_set(&pe, 0);
3820 mvpp2_prs_init_from_hw(priv, &pe, tid);
3823 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3825 /* Update port mask */
3826 mvpp2_prs_tcam_port_set(&pe, port->id, add);
3828 /* Invalidate the entry if no ports are left enabled */
3829 pmap = mvpp2_prs_tcam_port_map_get(&pe);
3834 mvpp2_prs_hw_inv(priv, pe.index);
3835 priv->prs_shadow[pe.index].valid = false;
3839 /* Continue - set next lookup */
3840 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
3842 /* Set match on DA */
3845 mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
3847 /* Set result info bits */
3848 if (is_broadcast_ether_addr(da)) {
3849 ri = MVPP2_PRS_RI_L2_BCAST;
3850 } else if (is_multicast_ether_addr(da)) {
3851 ri = MVPP2_PRS_RI_L2_MCAST;
3853 ri = MVPP2_PRS_RI_L2_UCAST;
3855 if (ether_addr_equal(da, port->dev->dev_addr))
3856 ri |= MVPP2_PRS_RI_MAC_ME_MASK;
3859 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3860 MVPP2_PRS_RI_MAC_ME_MASK);
3861 mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3862 MVPP2_PRS_RI_MAC_ME_MASK);
3864 /* Shift to ethertype */
3865 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
3866 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3868 /* Update shadow table and hw entry */
3869 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
3870 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
3871 mvpp2_prs_hw_write(priv, &pe);
3876 static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3878 struct mvpp2_port *port = netdev_priv(dev);
3881 /* Remove old parser entry */
3882 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
3886 /* Add new parser entry */
3887 err = mvpp2_prs_mac_da_accept(port, da, true);
3891 /* Set addr in the device */
3892 ether_addr_copy(dev->dev_addr, da);
3897 static void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
3899 struct mvpp2 *priv = port->priv;
3900 struct mvpp2_prs_entry pe;
3904 for (tid = MVPP2_PE_MAC_RANGE_START;
3905 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
3906 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3908 if (!priv->prs_shadow[tid].valid ||
3909 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3910 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3913 mvpp2_prs_init_from_hw(priv, &pe, tid);
3915 pmap = mvpp2_prs_tcam_port_map_get(&pe);
3917 /* We only want entries active on this port */
3918 if (!test_bit(port->id, &pmap))
3921 /* Read mac addr from entry */
3922 for (index = 0; index < ETH_ALEN; index++)
3923 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3926 /* Special cases : Don't remove broadcast and port's own
3929 if (is_broadcast_ether_addr(da) ||
3930 ether_addr_equal(da, port->dev->dev_addr))
3933 /* Remove entry from TCAM */
3934 mvpp2_prs_mac_da_accept(port, da, false);
3938 static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3941 case MVPP2_TAG_TYPE_EDSA:
3942 /* Add port to EDSA entries */
3943 mvpp2_prs_dsa_tag_set(priv, port, true,
3944 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3945 mvpp2_prs_dsa_tag_set(priv, port, true,
3946 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3947 /* Remove port from DSA entries */
3948 mvpp2_prs_dsa_tag_set(priv, port, false,
3949 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3950 mvpp2_prs_dsa_tag_set(priv, port, false,
3951 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3954 case MVPP2_TAG_TYPE_DSA:
3955 /* Add port to DSA entries */
3956 mvpp2_prs_dsa_tag_set(priv, port, true,
3957 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3958 mvpp2_prs_dsa_tag_set(priv, port, true,
3959 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3960 /* Remove port from EDSA entries */
3961 mvpp2_prs_dsa_tag_set(priv, port, false,
3962 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3963 mvpp2_prs_dsa_tag_set(priv, port, false,
3964 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3967 case MVPP2_TAG_TYPE_MH:
3968 case MVPP2_TAG_TYPE_NONE:
3969 /* Remove port form EDSA and DSA entries */
3970 mvpp2_prs_dsa_tag_set(priv, port, false,
3971 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3972 mvpp2_prs_dsa_tag_set(priv, port, false,
3973 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3974 mvpp2_prs_dsa_tag_set(priv, port, false,
3975 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3976 mvpp2_prs_dsa_tag_set(priv, port, false,
3977 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3981 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3988 /* Set prs flow for the port */
3989 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3991 struct mvpp2_prs_entry pe;
3994 memset(&pe, 0, sizeof(pe));
3996 tid = mvpp2_prs_flow_find(port->priv, port->id);
3998 /* Such entry not exist */
4000 /* Go through the all entires from last to first */
4001 tid = mvpp2_prs_tcam_first_free(port->priv,
4002 MVPP2_PE_LAST_FREE_TID,
4003 MVPP2_PE_FIRST_FREE_TID);
4010 mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
4011 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
4013 /* Update shadow table */
4014 mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
4016 mvpp2_prs_init_from_hw(port->priv, &pe, tid);
4019 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4020 mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
4021 mvpp2_prs_hw_write(port->priv, &pe);
4026 /* Classifier configuration routines */
4028 /* Update classification flow table registers */
4029 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
4030 struct mvpp2_cls_flow_entry *fe)
4032 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
4033 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
4034 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
4035 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
4038 /* Update classification lookup table register */
4039 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
4040 struct mvpp2_cls_lookup_entry *le)
4044 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
4045 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
4046 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
4049 /* Classifier default initialization */
4050 static void mvpp2_cls_init(struct mvpp2 *priv)
4052 struct mvpp2_cls_lookup_entry le;
4053 struct mvpp2_cls_flow_entry fe;
4056 /* Enable classifier */
4057 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
4059 /* Clear classifier flow table */
4060 memset(&fe.data, 0, sizeof(fe.data));
4061 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
4063 mvpp2_cls_flow_write(priv, &fe);
4066 /* Clear classifier lookup table */
4068 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
4071 mvpp2_cls_lookup_write(priv, &le);
4074 mvpp2_cls_lookup_write(priv, &le);
4078 static void mvpp2_cls_port_config(struct mvpp2_port *port)
4080 struct mvpp2_cls_lookup_entry le;
4083 /* Set way for the port */
4084 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
4085 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
4086 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
4088 /* Pick the entry to be accessed in lookup ID decoding table
4089 * according to the way and lkpid.
4091 le.lkpid = port->id;
4095 /* Set initial CPU queue for receiving packets */
4096 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
4097 le.data |= port->first_rxq;
4099 /* Disable classification engines */
4100 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
4102 /* Update lookup ID table entry */
4103 mvpp2_cls_lookup_write(port->priv, &le);
4106 /* Set CPU queue number for oversize packets */
4107 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
4111 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
4112 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
4114 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
4115 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
4117 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
4118 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
4119 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
4122 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
4124 if (likely(pool->frag_size <= PAGE_SIZE))
4125 return netdev_alloc_frag(pool->frag_size);
4127 return kmalloc(pool->frag_size, GFP_ATOMIC);
4130 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
4132 if (likely(pool->frag_size <= PAGE_SIZE))
4133 skb_free_frag(data);
4138 /* Buffer Manager configuration routines */
4141 static int mvpp2_bm_pool_create(struct platform_device *pdev,
4143 struct mvpp2_bm_pool *bm_pool, int size)
4147 /* Number of buffer pointers must be a multiple of 16, as per
4148 * hardware constraints
4150 if (!IS_ALIGNED(size, 16))
4153 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
4154 * bytes per buffer pointer
4156 if (priv->hw_version == MVPP21)
4157 bm_pool->size_bytes = 2 * sizeof(u32) * size;
4159 bm_pool->size_bytes = 2 * sizeof(u64) * size;
4161 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
4164 if (!bm_pool->virt_addr)
4167 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
4168 MVPP2_BM_POOL_PTR_ALIGN)) {
4169 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
4170 bm_pool->virt_addr, bm_pool->dma_addr);
4171 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
4172 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
4176 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
4177 lower_32_bits(bm_pool->dma_addr));
4178 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
4180 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
4181 val |= MVPP2_BM_START_MASK;
4182 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
4184 bm_pool->size = size;
4185 bm_pool->pkt_size = 0;
4186 bm_pool->buf_num = 0;
4191 /* Set pool buffer size */
4192 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
4193 struct mvpp2_bm_pool *bm_pool,
4198 bm_pool->buf_size = buf_size;
4200 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
4201 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
4204 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
4205 struct mvpp2_bm_pool *bm_pool,
4206 dma_addr_t *dma_addr,
4207 phys_addr_t *phys_addr)
4209 int cpu = get_cpu();
4211 *dma_addr = mvpp2_percpu_read(priv, cpu,
4212 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
4213 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
4215 if (priv->hw_version == MVPP22) {
4217 u32 dma_addr_highbits, phys_addr_highbits;
4219 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
4220 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
4221 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
4222 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
4224 if (sizeof(dma_addr_t) == 8)
4225 *dma_addr |= (u64)dma_addr_highbits << 32;
4227 if (sizeof(phys_addr_t) == 8)
4228 *phys_addr |= (u64)phys_addr_highbits << 32;
4234 /* Free all buffers from the pool */
4235 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
4236 struct mvpp2_bm_pool *bm_pool, int buf_num)
4240 if (buf_num > bm_pool->buf_num) {
4241 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
4242 bm_pool->id, buf_num);
4243 buf_num = bm_pool->buf_num;
4246 for (i = 0; i < buf_num; i++) {
4247 dma_addr_t buf_dma_addr;
4248 phys_addr_t buf_phys_addr;
4251 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
4252 &buf_dma_addr, &buf_phys_addr);
4254 dma_unmap_single(dev, buf_dma_addr,
4255 bm_pool->buf_size, DMA_FROM_DEVICE);
4257 data = (void *)phys_to_virt(buf_phys_addr);
4261 mvpp2_frag_free(bm_pool, data);
4264 /* Update BM driver with number of buffers removed from pool */
4265 bm_pool->buf_num -= i;
4268 /* Check number of buffers in BM pool */
4269 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
4273 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
4274 MVPP22_BM_POOL_PTRS_NUM_MASK;
4275 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
4276 MVPP2_BM_BPPI_PTR_NUM_MASK;
4278 /* HW has one buffer ready which is not reflected in the counters */
4286 static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
4288 struct mvpp2_bm_pool *bm_pool)
4293 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
4294 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
4296 /* Check buffer counters after free */
4297 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
4299 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
4300 bm_pool->id, bm_pool->buf_num);
4304 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
4305 val |= MVPP2_BM_STOP_MASK;
4306 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
4308 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
4314 static int mvpp2_bm_pools_init(struct platform_device *pdev,
4318 struct mvpp2_bm_pool *bm_pool;
4320 /* Create all pools with maximum size */
4321 size = MVPP2_BM_POOL_SIZE_MAX;
4322 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4323 bm_pool = &priv->bm_pools[i];
4325 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
4327 goto err_unroll_pools;
4328 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
4333 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
4334 for (i = i - 1; i >= 0; i--)
4335 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
4339 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
4343 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4344 /* Mask BM all interrupts */
4345 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
4346 /* Clear BM cause register */
4347 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
4350 /* Allocate and initialize BM pools */
4351 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
4352 sizeof(*priv->bm_pools), GFP_KERNEL);
4353 if (!priv->bm_pools)
4356 err = mvpp2_bm_pools_init(pdev, priv);
4362 static void mvpp2_setup_bm_pool(void)
4365 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
4366 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
4369 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
4370 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
4373 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
4374 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
4377 /* Attach long pool to rxq */
4378 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
4379 int lrxq, int long_pool)
4384 /* Get queue physical ID */
4385 prxq = port->rxqs[lrxq]->id;
4387 if (port->priv->hw_version == MVPP21)
4388 mask = MVPP21_RXQ_POOL_LONG_MASK;
4390 mask = MVPP22_RXQ_POOL_LONG_MASK;
4392 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4394 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
4395 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4398 /* Attach short pool to rxq */
4399 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
4400 int lrxq, int short_pool)
4405 /* Get queue physical ID */
4406 prxq = port->rxqs[lrxq]->id;
4408 if (port->priv->hw_version == MVPP21)
4409 mask = MVPP21_RXQ_POOL_SHORT_MASK;
4411 mask = MVPP22_RXQ_POOL_SHORT_MASK;
4413 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4415 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
4416 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4419 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
4420 struct mvpp2_bm_pool *bm_pool,
4421 dma_addr_t *buf_dma_addr,
4422 phys_addr_t *buf_phys_addr,
4425 dma_addr_t dma_addr;
4428 data = mvpp2_frag_alloc(bm_pool);
4432 dma_addr = dma_map_single(port->dev->dev.parent, data,
4433 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
4435 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
4436 mvpp2_frag_free(bm_pool, data);
4439 *buf_dma_addr = dma_addr;
4440 *buf_phys_addr = virt_to_phys(data);
4445 /* Release buffer to BM */
4446 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
4447 dma_addr_t buf_dma_addr,
4448 phys_addr_t buf_phys_addr)
4450 int cpu = get_cpu();
4452 if (port->priv->hw_version == MVPP22) {
4455 if (sizeof(dma_addr_t) == 8)
4456 val |= upper_32_bits(buf_dma_addr) &
4457 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
4459 if (sizeof(phys_addr_t) == 8)
4460 val |= (upper_32_bits(buf_phys_addr)
4461 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
4462 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
4464 mvpp2_percpu_write_relaxed(port->priv, cpu,
4465 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
4468 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
4469 * returned in the "cookie" field of the RX
4470 * descriptor. Instead of storing the virtual address, we
4471 * store the physical address
4473 mvpp2_percpu_write_relaxed(port->priv, cpu,
4474 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
4475 mvpp2_percpu_write_relaxed(port->priv, cpu,
4476 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
4481 /* Allocate buffers for the pool */
4482 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
4483 struct mvpp2_bm_pool *bm_pool, int buf_num)
4485 int i, buf_size, total_size;
4486 dma_addr_t dma_addr;
4487 phys_addr_t phys_addr;
4490 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
4491 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
4494 (buf_num + bm_pool->buf_num > bm_pool->size)) {
4495 netdev_err(port->dev,
4496 "cannot allocate %d buffers for pool %d\n",
4497 buf_num, bm_pool->id);
4501 for (i = 0; i < buf_num; i++) {
4502 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
4503 &phys_addr, GFP_KERNEL);
4507 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
4511 /* Update BM driver with number of buffers added to pool */
4512 bm_pool->buf_num += i;
4514 netdev_dbg(port->dev,
4515 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
4516 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
4518 netdev_dbg(port->dev,
4519 "pool %d: %d of %d buffers added\n",
4520 bm_pool->id, i, buf_num);
4524 /* Notify the driver that BM pool is being used as specific type and return the
4525 * pool pointer on success
4527 static struct mvpp2_bm_pool *
4528 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
4530 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
4533 if (pool >= MVPP2_BM_POOLS_NUM) {
4534 netdev_err(port->dev, "Invalid pool %d\n", pool);
4538 /* Allocate buffers in case BM pool is used as long pool, but packet
4539 * size doesn't match MTU or BM pool hasn't being used yet
4541 if (new_pool->pkt_size == 0) {
4544 /* Set default buffer number or free all the buffers in case
4545 * the pool is not empty
4547 pkts_num = new_pool->buf_num;
4549 pkts_num = mvpp2_pools[pool].buf_num;
4551 mvpp2_bm_bufs_free(port->dev->dev.parent,
4552 port->priv, new_pool, pkts_num);
4554 new_pool->pkt_size = pkt_size;
4555 new_pool->frag_size =
4556 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4557 MVPP2_SKB_SHINFO_SIZE;
4559 /* Allocate buffers for this pool */
4560 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4561 if (num != pkts_num) {
4562 WARN(1, "pool %d: %d of %d allocated\n",
4563 new_pool->id, num, pkts_num);
4568 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4569 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4574 /* Initialize pools for swf */
4575 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4578 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
4580 /* If port pkt_size is higher than 1518B:
4581 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
4582 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
4584 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
4585 long_log_pool = MVPP2_BM_JUMBO;
4586 short_log_pool = MVPP2_BM_LONG;
4588 long_log_pool = MVPP2_BM_LONG;
4589 short_log_pool = MVPP2_BM_SHORT;
4592 if (!port->pool_long) {
4594 mvpp2_bm_pool_use(port, long_log_pool,
4595 mvpp2_pools[long_log_pool].pkt_size);
4596 if (!port->pool_long)
4599 port->pool_long->port_map |= BIT(port->id);
4601 for (rxq = 0; rxq < port->nrxqs; rxq++)
4602 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4605 if (!port->pool_short) {
4607 mvpp2_bm_pool_use(port, short_log_pool,
4608 mvpp2_pools[short_log_pool].pkt_size);
4609 if (!port->pool_short)
4612 port->pool_short->port_map |= BIT(port->id);
4614 for (rxq = 0; rxq < port->nrxqs; rxq++)
4615 mvpp2_rxq_short_pool_set(port, rxq,
4616 port->pool_short->id);
4622 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4624 struct mvpp2_port *port = netdev_priv(dev);
4625 enum mvpp2_bm_pool_log_num new_long_pool;
4626 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4628 /* If port MTU is higher than 1518B:
4629 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
4630 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
4632 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
4633 new_long_pool = MVPP2_BM_JUMBO;
4635 new_long_pool = MVPP2_BM_LONG;
4637 if (new_long_pool != port->pool_long->id) {
4638 /* Remove port from old short & long pool */
4639 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
4640 port->pool_long->pkt_size);
4641 port->pool_long->port_map &= ~BIT(port->id);
4642 port->pool_long = NULL;
4644 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
4645 port->pool_short->pkt_size);
4646 port->pool_short->port_map &= ~BIT(port->id);
4647 port->pool_short = NULL;
4649 port->pkt_size = pkt_size;
4651 /* Add port to new short & long pool */
4652 mvpp2_swf_bm_pool_init(port);
4654 /* Update L4 checksum when jumbo enable/disable on port */
4655 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
4656 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
4657 dev->hw_features &= ~(NETIF_F_IP_CSUM |
4660 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4661 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4666 dev->wanted_features = dev->features;
4668 netdev_update_features(dev);
4672 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4674 int i, sw_thread_mask = 0;
4676 for (i = 0; i < port->nqvecs; i++)
4677 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4679 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4680 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
4683 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4685 int i, sw_thread_mask = 0;
4687 for (i = 0; i < port->nqvecs; i++)
4688 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4690 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4691 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
4694 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
4696 struct mvpp2_port *port = qvec->port;
4698 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4699 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
4702 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
4704 struct mvpp2_port *port = qvec->port;
4706 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4707 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
4710 /* Mask the current CPU's Rx/Tx interrupts
4711 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4712 * using smp_processor_id() is OK.
4714 static void mvpp2_interrupts_mask(void *arg)
4716 struct mvpp2_port *port = arg;
4718 mvpp2_percpu_write(port->priv, smp_processor_id(),
4719 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
4722 /* Unmask the current CPU's Rx/Tx interrupts.
4723 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4724 * using smp_processor_id() is OK.
4726 static void mvpp2_interrupts_unmask(void *arg)
4728 struct mvpp2_port *port = arg;
4731 val = MVPP2_CAUSE_MISC_SUM_MASK |
4732 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4733 if (port->has_tx_irqs)
4734 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4736 mvpp2_percpu_write(port->priv, smp_processor_id(),
4737 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4741 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
4746 if (port->priv->hw_version != MVPP22)
4752 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4754 for (i = 0; i < port->nqvecs; i++) {
4755 struct mvpp2_queue_vector *v = port->qvecs + i;
4757 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
4760 mvpp2_percpu_write(port->priv, v->sw_thread_id,
4761 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4765 /* Port configuration routines */
4767 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
4769 struct mvpp2 *priv = port->priv;
4772 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4773 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
4774 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4776 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4777 if (port->gop_id == 2)
4778 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
4779 else if (port->gop_id == 3)
4780 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
4781 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4784 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
4786 struct mvpp2 *priv = port->priv;
4789 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4790 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
4791 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
4792 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4794 if (port->gop_id > 1) {
4795 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4796 if (port->gop_id == 2)
4797 val &= ~GENCONF_CTRL0_PORT0_RGMII;
4798 else if (port->gop_id == 3)
4799 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
4800 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4804 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
4806 struct mvpp2 *priv = port->priv;
4807 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
4808 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
4812 val = readl(xpcs + MVPP22_XPCS_CFG0);
4813 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
4814 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
4815 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
4816 writel(val, xpcs + MVPP22_XPCS_CFG0);
4819 val = readl(mpcs + MVPP22_MPCS_CTRL);
4820 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
4821 writel(val, mpcs + MVPP22_MPCS_CTRL);
4823 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
4824 val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
4825 MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
4826 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
4827 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4829 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
4830 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
4831 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4834 static int mvpp22_gop_init(struct mvpp2_port *port)
4836 struct mvpp2 *priv = port->priv;
4839 if (!priv->sysctrl_base)
4842 switch (port->phy_interface) {
4843 case PHY_INTERFACE_MODE_RGMII:
4844 case PHY_INTERFACE_MODE_RGMII_ID:
4845 case PHY_INTERFACE_MODE_RGMII_RXID:
4846 case PHY_INTERFACE_MODE_RGMII_TXID:
4847 if (port->gop_id == 0)
4849 mvpp22_gop_init_rgmii(port);
4851 case PHY_INTERFACE_MODE_SGMII:
4852 mvpp22_gop_init_sgmii(port);
4854 case PHY_INTERFACE_MODE_10GKR:
4855 if (port->gop_id != 0)
4857 mvpp22_gop_init_10gkr(port);
4860 goto unsupported_conf;
4863 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
4864 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
4865 GENCONF_PORT_CTRL1_EN(port->gop_id);
4866 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
4868 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4869 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
4870 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4872 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
4873 val |= GENCONF_SOFT_RESET1_GOP;
4874 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
4880 netdev_err(port->dev, "Invalid port configuration\n");
4884 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
4888 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4889 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4890 /* Enable the GMAC link status irq for this port */
4891 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4892 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4893 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4896 if (port->gop_id == 0) {
4897 /* Enable the XLG/GIG irqs for this port */
4898 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4899 if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4900 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
4902 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
4903 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4907 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
4911 if (port->gop_id == 0) {
4912 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4913 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
4914 MVPP22_XLG_EXT_INT_MASK_GIG);
4915 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4918 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4919 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4920 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4921 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4922 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4926 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
4930 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4931 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4932 val = readl(port->base + MVPP22_GMAC_INT_MASK);
4933 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
4934 writel(val, port->base + MVPP22_GMAC_INT_MASK);
4937 if (port->gop_id == 0) {
4938 val = readl(port->base + MVPP22_XLG_INT_MASK);
4939 val |= MVPP22_XLG_INT_MASK_LINK;
4940 writel(val, port->base + MVPP22_XLG_INT_MASK);
4943 mvpp22_gop_unmask_irq(port);
4946 static int mvpp22_comphy_init(struct mvpp2_port *port)
4954 switch (port->phy_interface) {
4955 case PHY_INTERFACE_MODE_SGMII:
4956 mode = PHY_MODE_SGMII;
4958 case PHY_INTERFACE_MODE_10GKR:
4959 mode = PHY_MODE_10GKR;
4965 ret = phy_set_mode(port->comphy, mode);
4969 return phy_power_on(port->comphy);
4972 static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
4976 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4977 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4978 val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL |
4979 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4980 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4981 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4982 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
4983 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4984 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
4985 MVPP22_CTRL4_SYNC_BYPASS_DIS |
4986 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4987 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4988 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4991 /* The port is connected to a copper PHY */
4992 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4993 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
4994 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4996 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4997 val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
4998 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
4999 MVPP2_GMAC_AN_DUPLEX_EN;
5000 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5001 val |= MVPP2_GMAC_IN_BAND_AUTONEG;
5002 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5005 static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
5009 /* Force link down */
5010 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5011 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5012 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
5013 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5015 /* Set the GMAC in a reset state */
5016 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5017 val |= MVPP2_GMAC_PORT_RESET_MASK;
5018 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5020 /* Configure the PCS and in-band AN */
5021 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5022 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
5023 val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
5024 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
5025 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
5027 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5029 mvpp2_port_mii_gmac_configure_mode(port);
5031 /* Unset the GMAC reset state */
5032 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5033 val &= ~MVPP2_GMAC_PORT_RESET_MASK;
5034 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5036 /* Stop forcing link down */
5037 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5038 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
5039 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5042 static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port)
5046 if (port->gop_id != 0)
5049 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5050 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
5051 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5053 val = readl(port->base + MVPP22_XLG_CTRL4_REG);
5054 val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
5055 val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
5056 writel(val, port->base + MVPP22_XLG_CTRL4_REG);
5059 static void mvpp22_port_mii_set(struct mvpp2_port *port)
5063 /* Only GOP port 0 has an XLG MAC */
5064 if (port->gop_id == 0) {
5065 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
5066 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
5068 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5069 port->phy_interface == PHY_INTERFACE_MODE_10GKR)
5070 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
5072 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
5074 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
5078 static void mvpp2_port_mii_set(struct mvpp2_port *port)
5080 if (port->priv->hw_version == MVPP22)
5081 mvpp22_port_mii_set(port);
5083 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
5084 port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5085 mvpp2_port_mii_gmac_configure(port);
5086 else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
5087 mvpp2_port_mii_xlg_configure(port);
5090 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
5094 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5095 val |= MVPP2_GMAC_FC_ADV_EN;
5096 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5099 static void mvpp2_port_enable(struct mvpp2_port *port)
5103 /* Only GOP port 0 has an XLG MAC */
5104 if (port->gop_id == 0 &&
5105 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5106 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
5107 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5108 val |= MVPP22_XLG_CTRL0_PORT_EN |
5109 MVPP22_XLG_CTRL0_MAC_RESET_DIS;
5110 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
5111 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5113 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5114 val |= MVPP2_GMAC_PORT_EN_MASK;
5115 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
5116 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5120 static void mvpp2_port_disable(struct mvpp2_port *port)
5124 /* Only GOP port 0 has an XLG MAC */
5125 if (port->gop_id == 0 &&
5126 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5127 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
5128 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5129 val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
5130 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
5131 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5133 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5134 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
5135 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5139 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
5140 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
5144 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
5145 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
5146 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
5149 /* Configure loopback port */
5150 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
5154 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5156 if (port->speed == 1000)
5157 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
5159 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
5161 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5162 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
5164 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
5166 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
5169 struct mvpp2_ethtool_counter {
5170 unsigned int offset;
5171 const char string[ETH_GSTRING_LEN];
5175 static u64 mvpp2_read_count(struct mvpp2_port *port,
5176 const struct mvpp2_ethtool_counter *counter)
5180 val = readl(port->stats_base + counter->offset);
5181 if (counter->reg_is_64b)
5182 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
5187 /* Due to the fact that software statistics and hardware statistics are, by
5188 * design, incremented at different moments in the chain of packet processing,
5189 * it is very likely that incoming packets could have been dropped after being
5190 * counted by hardware but before reaching software statistics (most probably
5191 * multicast packets), and in the oppposite way, during transmission, FCS bytes
5192 * are added in between as well as TSO skb will be split and header bytes added.
5193 * Hence, statistics gathered from userspace with ifconfig (software) and
5194 * ethtool (hardware) cannot be compared.
5196 static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
5197 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
5198 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
5199 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
5200 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
5201 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
5202 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
5203 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
5204 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
5205 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
5206 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
5207 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
5208 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
5209 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
5210 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
5211 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
5212 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
5213 { MVPP2_MIB_FC_SENT, "fc_sent" },
5214 { MVPP2_MIB_FC_RCVD, "fc_received" },
5215 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
5216 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
5217 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
5218 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
5219 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
5220 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
5221 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
5222 { MVPP2_MIB_COLLISION, "collision" },
5223 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
5226 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
5229 if (sset == ETH_SS_STATS) {
5232 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5233 memcpy(data + i * ETH_GSTRING_LEN,
5234 &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
5238 static void mvpp2_gather_hw_statistics(struct work_struct *work)
5240 struct delayed_work *del_work = to_delayed_work(work);
5241 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
5246 mutex_lock(&port->gather_stats_lock);
5248 pstats = port->ethtool_stats;
5249 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5250 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
5252 /* No need to read again the counters right after this function if it
5253 * was called asynchronously by the user (ie. use of ethtool).
5255 cancel_delayed_work(&port->stats_work);
5256 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
5257 MVPP2_MIB_COUNTERS_STATS_DELAY);
5259 mutex_unlock(&port->gather_stats_lock);
5262 static void mvpp2_ethtool_get_stats(struct net_device *dev,
5263 struct ethtool_stats *stats, u64 *data)
5265 struct mvpp2_port *port = netdev_priv(dev);
5267 /* Update statistics for the given port, then take the lock to avoid
5268 * concurrent accesses on the ethtool_stats structure during its copy.
5270 mvpp2_gather_hw_statistics(&port->stats_work.work);
5272 mutex_lock(&port->gather_stats_lock);
5273 memcpy(data, port->ethtool_stats,
5274 sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
5275 mutex_unlock(&port->gather_stats_lock);
5278 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
5280 if (sset == ETH_SS_STATS)
5281 return ARRAY_SIZE(mvpp2_ethtool_regs);
5286 static void mvpp2_port_reset(struct mvpp2_port *port)
5291 /* Read the GOP statistics to reset the hardware counters */
5292 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5293 mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
5295 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5296 ~MVPP2_GMAC_PORT_RESET_MASK;
5297 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5299 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5300 MVPP2_GMAC_PORT_RESET_MASK)
5304 /* Change maximum receive size of the port */
5305 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
5309 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5310 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
5311 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
5312 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
5313 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5316 /* Change maximum receive size of the port */
5317 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
5321 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
5322 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
5323 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
5324 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
5325 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
5328 /* Set defaults to the MVPP2 port */
5329 static void mvpp2_defaults_set(struct mvpp2_port *port)
5331 int tx_port_num, val, queue, ptxq, lrxq;
5333 if (port->priv->hw_version == MVPP21) {
5334 /* Configure port to loopback if needed */
5335 if (port->flags & MVPP2_F_LOOPBACK)
5336 mvpp2_port_loopback_set(port);
5338 /* Update TX FIFO MIN Threshold */
5339 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
5340 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
5341 /* Min. TX threshold must be less than minimal packet length */
5342 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
5343 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
5346 /* Disable Legacy WRR, Disable EJP, Release from reset */
5347 tx_port_num = mvpp2_egress_port(port);
5348 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
5350 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
5352 /* Close bandwidth for all queues */
5353 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
5354 ptxq = mvpp2_txq_phys(port->id, queue);
5355 mvpp2_write(port->priv,
5356 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
5359 /* Set refill period to 1 usec, refill tokens
5360 * and bucket size to maximum
5362 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
5363 port->priv->tclk / USEC_PER_SEC);
5364 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
5365 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
5366 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
5367 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
5368 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
5369 val = MVPP2_TXP_TOKEN_SIZE_MAX;
5370 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5372 /* Set MaximumLowLatencyPacketSize value to 256 */
5373 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
5374 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
5375 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
5377 /* Enable Rx cache snoop */
5378 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
5379 queue = port->rxqs[lrxq]->id;
5380 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5381 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
5382 MVPP2_SNOOP_BUF_HDR_MASK;
5383 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5386 /* At default, mask all interrupts to all present cpus */
5387 mvpp2_interrupts_disable(port);
5390 /* Enable/disable receiving packets */
5391 static void mvpp2_ingress_enable(struct mvpp2_port *port)
5396 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
5397 queue = port->rxqs[lrxq]->id;
5398 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5399 val &= ~MVPP2_RXQ_DISABLE_MASK;
5400 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5404 static void mvpp2_ingress_disable(struct mvpp2_port *port)
5409 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
5410 queue = port->rxqs[lrxq]->id;
5411 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5412 val |= MVPP2_RXQ_DISABLE_MASK;
5413 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5417 /* Enable transmit via physical egress queue
5418 * - HW starts take descriptors from DRAM
5420 static void mvpp2_egress_enable(struct mvpp2_port *port)
5424 int tx_port_num = mvpp2_egress_port(port);
5426 /* Enable all initialized TXs. */
5428 for (queue = 0; queue < port->ntxqs; queue++) {
5429 struct mvpp2_tx_queue *txq = port->txqs[queue];
5432 qmap |= (1 << queue);
5435 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5436 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
5439 /* Disable transmit via physical egress queue
5440 * - HW doesn't take descriptors from DRAM
5442 static void mvpp2_egress_disable(struct mvpp2_port *port)
5446 int tx_port_num = mvpp2_egress_port(port);
5448 /* Issue stop command for active channels only */
5449 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5450 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
5451 MVPP2_TXP_SCHED_ENQ_MASK;
5453 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
5454 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
5456 /* Wait for all Tx activity to terminate. */
5459 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
5460 netdev_warn(port->dev,
5461 "Tx stop timed out, status=0x%08x\n",
5468 /* Check port TX Command register that all
5469 * Tx queues are stopped
5471 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
5472 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
5475 /* Rx descriptors helper methods */
5477 /* Get number of Rx descriptors occupied by received packets */
5479 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
5481 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
5483 return val & MVPP2_RXQ_OCCUPIED_MASK;
5486 /* Update Rx queue status with the number of occupied and available
5487 * Rx descriptor slots.
5490 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
5491 int used_count, int free_count)
5493 /* Decrement the number of used descriptors and increment count
5494 * increment the number of free descriptors.
5496 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
5498 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
5501 /* Get pointer to next RX descriptor to be processed by SW */
5502 static inline struct mvpp2_rx_desc *
5503 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
5505 int rx_desc = rxq->next_desc_to_proc;
5507 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
5508 prefetch(rxq->descs + rxq->next_desc_to_proc);
5509 return rxq->descs + rx_desc;
5512 /* Set rx queue offset */
5513 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
5514 int prxq, int offset)
5518 /* Convert offset from bytes to units of 32 bytes */
5519 offset = offset >> 5;
5521 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
5522 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
5525 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
5526 MVPP2_RXQ_PACKET_OFFSET_MASK);
5528 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
5531 /* Tx descriptors helper methods */
5533 /* Get pointer to next Tx descriptor to be processed (send) by HW */
5534 static struct mvpp2_tx_desc *
5535 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
5537 int tx_desc = txq->next_desc_to_proc;
5539 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
5540 return txq->descs + tx_desc;
5543 /* Update HW with number of aggregated Tx descriptors to be sent
5545 * Called only from mvpp2_tx(), so migration is disabled, using
5546 * smp_processor_id() is OK.
5548 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
5550 /* aggregated access - relevant TXQ number is written in TX desc */
5551 mvpp2_percpu_write(port->priv, smp_processor_id(),
5552 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
5556 /* Check if there are enough free descriptors in aggregated txq.
5557 * If not, update the number of occupied descriptors and repeat the check.
5559 * Called only from mvpp2_tx(), so migration is disabled, using
5560 * smp_processor_id() is OK.
5562 static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
5563 struct mvpp2_tx_queue *aggr_txq, int num)
5565 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
5566 /* Update number of occupied aggregated Tx descriptors */
5567 int cpu = smp_processor_id();
5568 u32 val = mvpp2_read_relaxed(priv,
5569 MVPP2_AGGR_TXQ_STATUS_REG(cpu));
5571 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
5574 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
5580 /* Reserved Tx descriptors allocation request
5582 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
5583 * only by mvpp2_tx(), so migration is disabled, using
5584 * smp_processor_id() is OK.
5586 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
5587 struct mvpp2_tx_queue *txq, int num)
5590 int cpu = smp_processor_id();
5592 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
5593 mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
5595 val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
5597 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
5600 /* Check if there are enough reserved descriptors for transmission.
5601 * If not, request chunk of reserved descriptors and check again.
5603 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
5604 struct mvpp2_tx_queue *txq,
5605 struct mvpp2_txq_pcpu *txq_pcpu,
5608 int req, cpu, desc_count;
5610 if (txq_pcpu->reserved_num >= num)
5613 /* Not enough descriptors reserved! Update the reserved descriptor
5614 * count and check again.
5618 /* Compute total of used descriptors */
5619 for_each_present_cpu(cpu) {
5620 struct mvpp2_txq_pcpu *txq_pcpu_aux;
5622 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
5623 desc_count += txq_pcpu_aux->count;
5624 desc_count += txq_pcpu_aux->reserved_num;
5627 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
5631 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
5634 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
5636 /* OK, the descriptor cound has been updated: check again. */
5637 if (txq_pcpu->reserved_num < num)
5642 /* Release the last allocated Tx descriptor. Useful to handle DMA
5643 * mapping failures in the Tx path.
5645 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
5647 if (txq->next_desc_to_proc == 0)
5648 txq->next_desc_to_proc = txq->last_desc - 1;
5650 txq->next_desc_to_proc--;
5653 /* Set Tx descriptors fields relevant for CSUM calculation */
5654 static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
5655 int ip_hdr_len, int l4_proto)
5659 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
5660 * G_L4_chk, L4_type required only for checksum calculation
5662 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
5663 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
5664 command |= MVPP2_TXD_IP_CSUM_DISABLE;
5666 if (l3_proto == swab16(ETH_P_IP)) {
5667 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
5668 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
5670 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
5673 if (l4_proto == IPPROTO_TCP) {
5674 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
5675 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5676 } else if (l4_proto == IPPROTO_UDP) {
5677 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
5678 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5680 command |= MVPP2_TXD_L4_CSUM_NOT;
5686 /* Get number of sent descriptors and decrement counter.
5687 * The number of sent descriptors is returned.
5690 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
5691 * (migration disabled) and from the TX completion tasklet (migration
5692 * disabled) so using smp_processor_id() is OK.
5694 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
5695 struct mvpp2_tx_queue *txq)
5699 /* Reading status reg resets transmitted descriptor counter */
5700 val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(),
5701 MVPP2_TXQ_SENT_REG(txq->id));
5703 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
5704 MVPP2_TRANSMITTED_COUNT_OFFSET;
5707 /* Called through on_each_cpu(), so runs on all CPUs, with migration
5708 * disabled, therefore using smp_processor_id() is OK.
5710 static void mvpp2_txq_sent_counter_clear(void *arg)
5712 struct mvpp2_port *port = arg;
5715 for (queue = 0; queue < port->ntxqs; queue++) {
5716 int id = port->txqs[queue]->id;
5718 mvpp2_percpu_read(port->priv, smp_processor_id(),
5719 MVPP2_TXQ_SENT_REG(id));
5723 /* Set max sizes for Tx queues */
5724 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
5727 int txq, tx_port_num;
5729 mtu = port->pkt_size * 8;
5730 if (mtu > MVPP2_TXP_MTU_MAX)
5731 mtu = MVPP2_TXP_MTU_MAX;
5733 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
5736 /* Indirect access to registers */
5737 tx_port_num = mvpp2_egress_port(port);
5738 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5741 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
5742 val &= ~MVPP2_TXP_MTU_MAX;
5744 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
5746 /* TXP token size and all TXQs token size must be larger that MTU */
5747 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
5748 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
5751 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
5753 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5756 for (txq = 0; txq < port->ntxqs; txq++) {
5757 val = mvpp2_read(port->priv,
5758 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
5759 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
5763 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
5765 mvpp2_write(port->priv,
5766 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
5772 /* Set the number of packets that will be received before Rx interrupt
5773 * will be generated by HW.
5775 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
5776 struct mvpp2_rx_queue *rxq)
5778 int cpu = get_cpu();
5780 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
5781 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
5783 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5784 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
5790 /* For some reason in the LSP this is done on each CPU. Why ? */
5791 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
5792 struct mvpp2_tx_queue *txq)
5794 int cpu = get_cpu();
5797 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
5798 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
5800 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
5801 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5802 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
5807 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
5809 u64 tmp = (u64)clk_hz * usec;
5811 do_div(tmp, USEC_PER_SEC);
5813 return tmp > U32_MAX ? U32_MAX : tmp;
5816 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
5818 u64 tmp = (u64)cycles * USEC_PER_SEC;
5820 do_div(tmp, clk_hz);
5822 return tmp > U32_MAX ? U32_MAX : tmp;
5825 /* Set the time delay in usec before Rx interrupt */
5826 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
5827 struct mvpp2_rx_queue *rxq)
5829 unsigned long freq = port->priv->tclk;
5830 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
5832 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
5834 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
5836 /* re-evaluate to get actual register value */
5837 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
5840 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
5843 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
5845 unsigned long freq = port->priv->tclk;
5846 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5848 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
5849 port->tx_time_coal =
5850 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
5852 /* re-evaluate to get actual register value */
5853 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5856 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
5859 /* Free Tx queue skbuffs */
5860 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
5861 struct mvpp2_tx_queue *txq,
5862 struct mvpp2_txq_pcpu *txq_pcpu, int num)
5866 for (i = 0; i < num; i++) {
5867 struct mvpp2_txq_pcpu_buf *tx_buf =
5868 txq_pcpu->buffs + txq_pcpu->txq_get_index;
5870 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
5871 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
5872 tx_buf->size, DMA_TO_DEVICE);
5874 dev_kfree_skb_any(tx_buf->skb);
5876 mvpp2_txq_inc_get(txq_pcpu);
5880 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
5883 int queue = fls(cause) - 1;
5885 return port->rxqs[queue];
5888 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
5891 int queue = fls(cause) - 1;
5893 return port->txqs[queue];
5896 /* Handle end of transmission */
5897 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5898 struct mvpp2_txq_pcpu *txq_pcpu)
5900 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
5903 if (txq_pcpu->cpu != smp_processor_id())
5904 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
5906 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5909 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
5911 txq_pcpu->count -= tx_done;
5913 if (netif_tx_queue_stopped(nq))
5914 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
5915 netif_tx_wake_queue(nq);
5918 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
5921 struct mvpp2_tx_queue *txq;
5922 struct mvpp2_txq_pcpu *txq_pcpu;
5923 unsigned int tx_todo = 0;
5926 txq = mvpp2_get_tx_queue(port, cause);
5930 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5932 if (txq_pcpu->count) {
5933 mvpp2_txq_done(port, txq, txq_pcpu);
5934 tx_todo += txq_pcpu->count;
5937 cause &= ~(1 << txq->log_id);
5942 /* Rx/Tx queue initialization/cleanup methods */
5944 /* Allocate and initialize descriptors for aggr TXQ */
5945 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
5946 struct mvpp2_tx_queue *aggr_txq, int cpu,
5951 /* Allocate memory for TX descriptors */
5952 aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
5953 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
5954 &aggr_txq->descs_dma, GFP_KERNEL);
5955 if (!aggr_txq->descs)
5958 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
5960 /* Aggr TXQ no reset WA */
5961 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
5962 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
5964 /* Set Tx descriptors queue starting address indirect
5967 if (priv->hw_version == MVPP21)
5968 txq_dma = aggr_txq->descs_dma;
5970 txq_dma = aggr_txq->descs_dma >>
5971 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
5973 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
5974 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
5975 MVPP2_AGGR_TXQ_SIZE);
5980 /* Create a specified Rx queue */
5981 static int mvpp2_rxq_init(struct mvpp2_port *port,
5982 struct mvpp2_rx_queue *rxq)
5988 rxq->size = port->rx_ring_size;
5990 /* Allocate memory for RX descriptors */
5991 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
5992 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
5993 &rxq->descs_dma, GFP_KERNEL);
5997 rxq->last_desc = rxq->size - 1;
5999 /* Zero occupied and non-occupied counters - direct access */
6000 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
6002 /* Set Rx descriptors queue starting address - indirect access */
6004 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
6005 if (port->priv->hw_version == MVPP21)
6006 rxq_dma = rxq->descs_dma;
6008 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
6009 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
6010 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
6011 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
6015 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
6017 /* Set coalescing pkts and time */
6018 mvpp2_rx_pkts_coal_set(port, rxq);
6019 mvpp2_rx_time_coal_set(port, rxq);
6021 /* Add number of descriptors ready for receiving packets */
6022 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
6027 /* Push packets received by the RXQ to BM pool */
6028 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
6029 struct mvpp2_rx_queue *rxq)
6033 rx_received = mvpp2_rxq_received(port, rxq->id);
6037 for (i = 0; i < rx_received; i++) {
6038 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
6039 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
6042 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
6043 MVPP2_RXD_BM_POOL_ID_OFFS;
6045 mvpp2_bm_pool_put(port, pool,
6046 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
6047 mvpp2_rxdesc_cookie_get(port, rx_desc));
6049 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
6052 /* Cleanup Rx queue */
6053 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
6054 struct mvpp2_rx_queue *rxq)
6058 mvpp2_rxq_drop_pkts(port, rxq);
6061 dma_free_coherent(port->dev->dev.parent,
6062 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
6068 rxq->next_desc_to_proc = 0;
6071 /* Clear Rx descriptors queue starting address and size;
6072 * free descriptor number
6074 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
6076 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
6077 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
6078 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
6082 /* Create and initialize a Tx queue */
6083 static int mvpp2_txq_init(struct mvpp2_port *port,
6084 struct mvpp2_tx_queue *txq)
6087 int cpu, desc, desc_per_txq, tx_port_num;
6088 struct mvpp2_txq_pcpu *txq_pcpu;
6090 txq->size = port->tx_ring_size;
6092 /* Allocate memory for Tx descriptors */
6093 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
6094 txq->size * MVPP2_DESC_ALIGNED_SIZE,
6095 &txq->descs_dma, GFP_KERNEL);
6099 txq->last_desc = txq->size - 1;
6101 /* Set Tx descriptors queue starting address - indirect access */
6103 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6104 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
6106 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
6107 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
6108 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
6109 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
6110 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
6111 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
6112 val &= ~MVPP2_TXQ_PENDING_MASK;
6113 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
6115 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
6116 * for each existing TXQ.
6117 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
6118 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
6121 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
6122 (txq->log_id * desc_per_txq);
6124 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
6125 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
6126 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
6129 /* WRR / EJP configuration - indirect access */
6130 tx_port_num = mvpp2_egress_port(port);
6131 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
6133 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
6134 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
6135 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
6136 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
6137 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
6139 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
6140 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
6143 for_each_present_cpu(cpu) {
6144 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6145 txq_pcpu->size = txq->size;
6146 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
6147 sizeof(*txq_pcpu->buffs),
6149 if (!txq_pcpu->buffs)
6152 txq_pcpu->count = 0;
6153 txq_pcpu->reserved_num = 0;
6154 txq_pcpu->txq_put_index = 0;
6155 txq_pcpu->txq_get_index = 0;
6156 txq_pcpu->tso_headers = NULL;
6158 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
6159 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
6161 txq_pcpu->tso_headers =
6162 dma_alloc_coherent(port->dev->dev.parent,
6163 txq_pcpu->size * TSO_HEADER_SIZE,
6164 &txq_pcpu->tso_headers_dma,
6166 if (!txq_pcpu->tso_headers)
6173 /* Free allocated TXQ resources */
6174 static void mvpp2_txq_deinit(struct mvpp2_port *port,
6175 struct mvpp2_tx_queue *txq)
6177 struct mvpp2_txq_pcpu *txq_pcpu;
6180 for_each_present_cpu(cpu) {
6181 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6182 kfree(txq_pcpu->buffs);
6184 if (txq_pcpu->tso_headers)
6185 dma_free_coherent(port->dev->dev.parent,
6186 txq_pcpu->size * TSO_HEADER_SIZE,
6187 txq_pcpu->tso_headers,
6188 txq_pcpu->tso_headers_dma);
6190 txq_pcpu->tso_headers = NULL;
6194 dma_free_coherent(port->dev->dev.parent,
6195 txq->size * MVPP2_DESC_ALIGNED_SIZE,
6196 txq->descs, txq->descs_dma);
6200 txq->next_desc_to_proc = 0;
6203 /* Set minimum bandwidth for disabled TXQs */
6204 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
6206 /* Set Tx descriptors queue starting address and size */
6208 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6209 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
6210 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
6214 /* Cleanup Tx ports */
6215 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
6217 struct mvpp2_txq_pcpu *txq_pcpu;
6218 int delay, pending, cpu;
6222 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6223 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
6224 val |= MVPP2_TXQ_DRAIN_EN_MASK;
6225 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
6227 /* The napi queue has been stopped so wait for all packets
6228 * to be transmitted.
6232 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
6233 netdev_warn(port->dev,
6234 "port %d: cleaning queue %d timed out\n",
6235 port->id, txq->log_id);
6241 pending = mvpp2_percpu_read(port->priv, cpu,
6242 MVPP2_TXQ_PENDING_REG);
6243 pending &= MVPP2_TXQ_PENDING_MASK;
6246 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
6247 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
6250 for_each_present_cpu(cpu) {
6251 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6253 /* Release all packets */
6254 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
6257 txq_pcpu->count = 0;
6258 txq_pcpu->txq_put_index = 0;
6259 txq_pcpu->txq_get_index = 0;
6263 /* Cleanup all Tx queues */
6264 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
6266 struct mvpp2_tx_queue *txq;
6270 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
6272 /* Reset Tx ports and delete Tx queues */
6273 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
6274 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
6276 for (queue = 0; queue < port->ntxqs; queue++) {
6277 txq = port->txqs[queue];
6278 mvpp2_txq_clean(port, txq);
6279 mvpp2_txq_deinit(port, txq);
6282 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
6284 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
6285 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
6288 /* Cleanup all Rx queues */
6289 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
6293 for (queue = 0; queue < port->nrxqs; queue++)
6294 mvpp2_rxq_deinit(port, port->rxqs[queue]);
6297 /* Init all Rx queues for port */
6298 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
6302 for (queue = 0; queue < port->nrxqs; queue++) {
6303 err = mvpp2_rxq_init(port, port->rxqs[queue]);
6310 mvpp2_cleanup_rxqs(port);
6314 /* Init all tx queues for port */
6315 static int mvpp2_setup_txqs(struct mvpp2_port *port)
6317 struct mvpp2_tx_queue *txq;
6320 for (queue = 0; queue < port->ntxqs; queue++) {
6321 txq = port->txqs[queue];
6322 err = mvpp2_txq_init(port, txq);
6327 if (port->has_tx_irqs) {
6328 mvpp2_tx_time_coal_set(port);
6329 for (queue = 0; queue < port->ntxqs; queue++) {
6330 txq = port->txqs[queue];
6331 mvpp2_tx_pkts_coal_set(port, txq);
6335 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
6339 mvpp2_cleanup_txqs(port);
6343 /* The callback for per-port interrupt */
6344 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
6346 struct mvpp2_queue_vector *qv = dev_id;
6348 mvpp2_qvec_interrupt_disable(qv);
6350 napi_schedule(&qv->napi);
6355 /* Per-port interrupt for link status changes */
6356 static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
6358 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
6359 struct net_device *dev = port->dev;
6360 bool event = false, link = false;
6363 mvpp22_gop_mask_irq(port);
6365 if (port->gop_id == 0 &&
6366 port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
6367 val = readl(port->base + MVPP22_XLG_INT_STAT);
6368 if (val & MVPP22_XLG_INT_STAT_LINK) {
6370 val = readl(port->base + MVPP22_XLG_STATUS);
6371 if (val & MVPP22_XLG_STATUS_LINK_UP)
6374 } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
6375 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
6376 val = readl(port->base + MVPP22_GMAC_INT_STAT);
6377 if (val & MVPP22_GMAC_INT_STAT_LINK) {
6379 val = readl(port->base + MVPP2_GMAC_STATUS0);
6380 if (val & MVPP2_GMAC_STATUS0_LINK_UP)
6385 if (!netif_running(dev) || !event)
6389 mvpp2_interrupts_enable(port);
6391 mvpp2_egress_enable(port);
6392 mvpp2_ingress_enable(port);
6393 netif_carrier_on(dev);
6394 netif_tx_wake_all_queues(dev);
6396 netif_tx_stop_all_queues(dev);
6397 netif_carrier_off(dev);
6398 mvpp2_ingress_disable(port);
6399 mvpp2_egress_disable(port);
6401 mvpp2_interrupts_disable(port);
6405 mvpp22_gop_unmask_irq(port);
6409 static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port,
6410 struct phy_device *phydev)
6414 if (port->phy_interface != PHY_INTERFACE_MODE_RGMII &&
6415 port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID &&
6416 port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID &&
6417 port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID &&
6418 port->phy_interface != PHY_INTERFACE_MODE_SGMII)
6421 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6422 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
6423 MVPP2_GMAC_CONFIG_GMII_SPEED |
6424 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
6425 MVPP2_GMAC_AN_SPEED_EN |
6426 MVPP2_GMAC_AN_DUPLEX_EN);
6429 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6431 if (phydev->speed == SPEED_1000)
6432 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
6433 else if (phydev->speed == SPEED_100)
6434 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
6436 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6440 static void mvpp2_link_event(struct net_device *dev)
6442 struct mvpp2_port *port = netdev_priv(dev);
6443 struct phy_device *phydev = dev->phydev;
6444 bool link_reconfigured = false;
6448 if (port->phy_interface != phydev->interface && port->comphy) {
6449 /* disable current port for reconfiguration */
6450 mvpp2_interrupts_disable(port);
6451 netif_carrier_off(port->dev);
6452 mvpp2_port_disable(port);
6453 phy_power_off(port->comphy);
6455 /* comphy reconfiguration */
6456 port->phy_interface = phydev->interface;
6457 mvpp22_comphy_init(port);
6459 /* gop/mac reconfiguration */
6460 mvpp22_gop_init(port);
6461 mvpp2_port_mii_set(port);
6463 link_reconfigured = true;
6466 if ((port->speed != phydev->speed) ||
6467 (port->duplex != phydev->duplex)) {
6468 mvpp2_gmac_set_autoneg(port, phydev);
6470 port->duplex = phydev->duplex;
6471 port->speed = phydev->speed;
6475 if (phydev->link != port->link || link_reconfigured) {
6476 port->link = phydev->link;
6479 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
6480 port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
6481 port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
6482 port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID ||
6483 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
6484 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6485 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
6486 MVPP2_GMAC_FORCE_LINK_DOWN);
6487 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6490 mvpp2_interrupts_enable(port);
6491 mvpp2_port_enable(port);
6493 mvpp2_egress_enable(port);
6494 mvpp2_ingress_enable(port);
6495 netif_carrier_on(dev);
6496 netif_tx_wake_all_queues(dev);
6501 netif_tx_stop_all_queues(dev);
6502 netif_carrier_off(dev);
6503 mvpp2_ingress_disable(port);
6504 mvpp2_egress_disable(port);
6506 mvpp2_port_disable(port);
6507 mvpp2_interrupts_disable(port);
6510 phy_print_status(phydev);
6514 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
6518 if (!port_pcpu->timer_scheduled) {
6519 port_pcpu->timer_scheduled = true;
6520 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
6521 hrtimer_start(&port_pcpu->tx_done_timer, interval,
6522 HRTIMER_MODE_REL_PINNED);
6526 static void mvpp2_tx_proc_cb(unsigned long data)
6528 struct net_device *dev = (struct net_device *)data;
6529 struct mvpp2_port *port = netdev_priv(dev);
6530 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
6531 unsigned int tx_todo, cause;
6533 if (!netif_running(dev))
6535 port_pcpu->timer_scheduled = false;
6537 /* Process all the Tx queues */
6538 cause = (1 << port->ntxqs) - 1;
6539 tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
6541 /* Set the timer in case not all the packets were processed */
6543 mvpp2_timer_set(port_pcpu);
6546 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
6548 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
6549 struct mvpp2_port_pcpu,
6552 tasklet_schedule(&port_pcpu->tx_done_tasklet);
6554 return HRTIMER_NORESTART;
6557 /* Main RX/TX processing routines */
6559 /* Display more error info */
6560 static void mvpp2_rx_error(struct mvpp2_port *port,
6561 struct mvpp2_rx_desc *rx_desc)
6563 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
6564 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
6566 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
6567 case MVPP2_RXD_ERR_CRC:
6568 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
6571 case MVPP2_RXD_ERR_OVERRUN:
6572 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
6575 case MVPP2_RXD_ERR_RESOURCE:
6576 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
6582 /* Handle RX checksum offload */
6583 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
6584 struct sk_buff *skb)
6586 if (((status & MVPP2_RXD_L3_IP4) &&
6587 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
6588 (status & MVPP2_RXD_L3_IP6))
6589 if (((status & MVPP2_RXD_L4_UDP) ||
6590 (status & MVPP2_RXD_L4_TCP)) &&
6591 (status & MVPP2_RXD_L4_CSUM_OK)) {
6593 skb->ip_summed = CHECKSUM_UNNECESSARY;
6597 skb->ip_summed = CHECKSUM_NONE;
6600 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
6601 static int mvpp2_rx_refill(struct mvpp2_port *port,
6602 struct mvpp2_bm_pool *bm_pool, int pool)
6604 dma_addr_t dma_addr;
6605 phys_addr_t phys_addr;
6608 /* No recycle or too many buffers are in use, so allocate a new skb */
6609 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
6614 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
6619 /* Handle tx checksum */
6620 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
6622 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6626 if (skb->protocol == htons(ETH_P_IP)) {
6627 struct iphdr *ip4h = ip_hdr(skb);
6629 /* Calculate IPv4 checksum and L4 checksum */
6630 ip_hdr_len = ip4h->ihl;
6631 l4_proto = ip4h->protocol;
6632 } else if (skb->protocol == htons(ETH_P_IPV6)) {
6633 struct ipv6hdr *ip6h = ipv6_hdr(skb);
6635 /* Read l4_protocol from one of IPv6 extra headers */
6636 if (skb_network_header_len(skb) > 0)
6637 ip_hdr_len = (skb_network_header_len(skb) >> 2);
6638 l4_proto = ip6h->nexthdr;
6640 return MVPP2_TXD_L4_CSUM_NOT;
6643 return mvpp2_txq_desc_csum(skb_network_offset(skb),
6644 skb->protocol, ip_hdr_len, l4_proto);
6647 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
6650 /* Main rx processing */
6651 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
6652 int rx_todo, struct mvpp2_rx_queue *rxq)
6654 struct net_device *dev = port->dev;
6660 /* Get number of received packets and clamp the to-do */
6661 rx_received = mvpp2_rxq_received(port, rxq->id);
6662 if (rx_todo > rx_received)
6663 rx_todo = rx_received;
6665 while (rx_done < rx_todo) {
6666 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
6667 struct mvpp2_bm_pool *bm_pool;
6668 struct sk_buff *skb;
6669 unsigned int frag_size;
6670 dma_addr_t dma_addr;
6671 phys_addr_t phys_addr;
6673 int pool, rx_bytes, err;
6677 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
6678 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
6679 rx_bytes -= MVPP2_MH_SIZE;
6680 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
6681 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
6682 data = (void *)phys_to_virt(phys_addr);
6684 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
6685 MVPP2_RXD_BM_POOL_ID_OFFS;
6686 bm_pool = &port->priv->bm_pools[pool];
6688 /* In case of an error, release the requested buffer pointer
6689 * to the Buffer Manager. This request process is controlled
6690 * by the hardware, and the information about the buffer is
6691 * comprised by the RX descriptor.
6693 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
6695 dev->stats.rx_errors++;
6696 mvpp2_rx_error(port, rx_desc);
6697 /* Return the buffer to the pool */
6698 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
6702 if (bm_pool->frag_size > PAGE_SIZE)
6705 frag_size = bm_pool->frag_size;
6707 skb = build_skb(data, frag_size);
6709 netdev_warn(port->dev, "skb build failed\n");
6710 goto err_drop_frame;
6713 err = mvpp2_rx_refill(port, bm_pool, pool);
6715 netdev_err(port->dev, "failed to refill BM pools\n");
6716 goto err_drop_frame;
6719 dma_unmap_single(dev->dev.parent, dma_addr,
6720 bm_pool->buf_size, DMA_FROM_DEVICE);
6723 rcvd_bytes += rx_bytes;
6725 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
6726 skb_put(skb, rx_bytes);
6727 skb->protocol = eth_type_trans(skb, dev);
6728 mvpp2_rx_csum(port, rx_status, skb);
6730 napi_gro_receive(napi, skb);
6734 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
6736 u64_stats_update_begin(&stats->syncp);
6737 stats->rx_packets += rcvd_pkts;
6738 stats->rx_bytes += rcvd_bytes;
6739 u64_stats_update_end(&stats->syncp);
6742 /* Update Rx queue management counters */
6744 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
6750 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
6751 struct mvpp2_tx_desc *desc)
6753 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6755 dma_addr_t buf_dma_addr =
6756 mvpp2_txdesc_dma_addr_get(port, desc);
6758 mvpp2_txdesc_size_get(port, desc);
6759 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
6760 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
6761 buf_sz, DMA_TO_DEVICE);
6762 mvpp2_txq_desc_put(txq);
6765 /* Handle tx fragmentation processing */
6766 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
6767 struct mvpp2_tx_queue *aggr_txq,
6768 struct mvpp2_tx_queue *txq)
6770 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6771 struct mvpp2_tx_desc *tx_desc;
6773 dma_addr_t buf_dma_addr;
6775 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6776 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6777 void *addr = page_address(frag->page.p) + frag->page_offset;
6779 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6780 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6781 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
6783 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
6786 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
6787 mvpp2_txq_desc_put(txq);
6791 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
6793 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
6794 /* Last descriptor */
6795 mvpp2_txdesc_cmd_set(port, tx_desc,
6797 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6799 /* Descriptor in the middle: Not First, Not Last */
6800 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6801 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6807 /* Release all descriptors that were used to map fragments of
6808 * this packet, as well as the corresponding DMA mappings
6810 for (i = i - 1; i >= 0; i--) {
6811 tx_desc = txq->descs + i;
6812 tx_desc_unmap_put(port, txq, tx_desc);
6818 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
6819 struct net_device *dev,
6820 struct mvpp2_tx_queue *txq,
6821 struct mvpp2_tx_queue *aggr_txq,
6822 struct mvpp2_txq_pcpu *txq_pcpu,
6825 struct mvpp2_port *port = netdev_priv(dev);
6826 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6829 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6830 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
6832 addr = txq_pcpu->tso_headers_dma +
6833 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
6834 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
6836 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
6838 MVPP2_TXD_PADDING_DISABLE);
6839 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6842 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
6843 struct net_device *dev, struct tso_t *tso,
6844 struct mvpp2_tx_queue *txq,
6845 struct mvpp2_tx_queue *aggr_txq,
6846 struct mvpp2_txq_pcpu *txq_pcpu,
6847 int sz, bool left, bool last)
6849 struct mvpp2_port *port = netdev_priv(dev);
6850 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6851 dma_addr_t buf_dma_addr;
6853 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6854 mvpp2_txdesc_size_set(port, tx_desc, sz);
6856 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
6858 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
6859 mvpp2_txq_desc_put(txq);
6863 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
6866 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
6868 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6872 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6875 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6879 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
6880 struct mvpp2_tx_queue *txq,
6881 struct mvpp2_tx_queue *aggr_txq,
6882 struct mvpp2_txq_pcpu *txq_pcpu)
6884 struct mvpp2_port *port = netdev_priv(dev);
6886 int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
6887 int i, len, descs = 0;
6889 /* Check number of available descriptors */
6890 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
6891 tso_count_descs(skb)) ||
6892 mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
6893 tso_count_descs(skb)))
6896 tso_start(skb, &tso);
6897 len = skb->len - hdr_sz;
6899 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
6900 char *hdr = txq_pcpu->tso_headers +
6901 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
6906 tso_build_hdr(skb, hdr, &tso, left, len == 0);
6907 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
6910 int sz = min_t(int, tso.size, left);
6914 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
6915 txq_pcpu, sz, left, len == 0))
6917 tso_build_data(skb, &tso, sz);
6924 for (i = descs - 1; i >= 0; i--) {
6925 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
6926 tx_desc_unmap_put(port, txq, tx_desc);
6931 /* Main tx processing */
6932 static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
6934 struct mvpp2_port *port = netdev_priv(dev);
6935 struct mvpp2_tx_queue *txq, *aggr_txq;
6936 struct mvpp2_txq_pcpu *txq_pcpu;
6937 struct mvpp2_tx_desc *tx_desc;
6938 dma_addr_t buf_dma_addr;
6943 txq_id = skb_get_queue_mapping(skb);
6944 txq = port->txqs[txq_id];
6945 txq_pcpu = this_cpu_ptr(txq->pcpu);
6946 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
6948 if (skb_is_gso(skb)) {
6949 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
6952 frags = skb_shinfo(skb)->nr_frags + 1;
6954 /* Check number of available descriptors */
6955 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
6956 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
6962 /* Get a descriptor for the first part of the packet */
6963 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6964 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6965 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
6967 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
6968 skb_headlen(skb), DMA_TO_DEVICE);
6969 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
6970 mvpp2_txq_desc_put(txq);
6975 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
6977 tx_cmd = mvpp2_skb_tx_csum(port, skb);
6980 /* First and Last descriptor */
6981 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
6982 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6983 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6985 /* First but not Last */
6986 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
6987 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6988 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6990 /* Continue with other skb fragments */
6991 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
6992 tx_desc_unmap_put(port, txq, tx_desc);
6999 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
7000 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
7002 txq_pcpu->reserved_num -= frags;
7003 txq_pcpu->count += frags;
7004 aggr_txq->count += frags;
7006 /* Enable transmit */
7008 mvpp2_aggr_txq_pend_desc_add(port, frags);
7010 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
7011 netif_tx_stop_queue(nq);
7013 u64_stats_update_begin(&stats->syncp);
7014 stats->tx_packets++;
7015 stats->tx_bytes += skb->len;
7016 u64_stats_update_end(&stats->syncp);
7018 dev->stats.tx_dropped++;
7019 dev_kfree_skb_any(skb);
7022 /* Finalize TX processing */
7023 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
7024 mvpp2_txq_done(port, txq, txq_pcpu);
7026 /* Set the timer in case not all frags were processed */
7027 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
7028 txq_pcpu->count > 0) {
7029 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
7031 mvpp2_timer_set(port_pcpu);
7034 return NETDEV_TX_OK;
7037 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
7039 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
7040 netdev_err(dev, "FCS error\n");
7041 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
7042 netdev_err(dev, "rx fifo overrun error\n");
7043 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
7044 netdev_err(dev, "tx fifo underrun error\n");
7047 static int mvpp2_poll(struct napi_struct *napi, int budget)
7049 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
7051 struct mvpp2_port *port = netdev_priv(napi->dev);
7052 struct mvpp2_queue_vector *qv;
7053 int cpu = smp_processor_id();
7055 qv = container_of(napi, struct mvpp2_queue_vector, napi);
7057 /* Rx/Tx cause register
7059 * Bits 0-15: each bit indicates received packets on the Rx queue
7060 * (bit 0 is for Rx queue 0).
7062 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
7063 * (bit 16 is for Tx queue 0).
7065 * Each CPU has its own Rx/Tx cause register
7067 cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id,
7068 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
7070 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
7072 mvpp2_cause_error(port->dev, cause_misc);
7074 /* Clear the cause register */
7075 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
7076 mvpp2_percpu_write(port->priv, cpu,
7077 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
7078 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
7081 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
7083 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
7084 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
7087 /* Process RX packets */
7088 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
7089 cause_rx <<= qv->first_rxq;
7090 cause_rx |= qv->pending_cause_rx;
7091 while (cause_rx && budget > 0) {
7093 struct mvpp2_rx_queue *rxq;
7095 rxq = mvpp2_get_rx_queue(port, cause_rx);
7099 count = mvpp2_rx(port, napi, budget, rxq);
7103 /* Clear the bit associated to this Rx queue
7104 * so that next iteration will continue from
7105 * the next Rx queue.
7107 cause_rx &= ~(1 << rxq->logic_rxq);
7113 napi_complete_done(napi, rx_done);
7115 mvpp2_qvec_interrupt_enable(qv);
7117 qv->pending_cause_rx = cause_rx;
7121 /* Set hw internals when starting port */
7122 static void mvpp2_start_dev(struct mvpp2_port *port)
7124 struct net_device *ndev = port->dev;
7127 if (port->gop_id == 0 &&
7128 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
7129 port->phy_interface == PHY_INTERFACE_MODE_10GKR))
7130 mvpp2_xlg_max_rx_size_set(port);
7132 mvpp2_gmac_max_rx_size_set(port);
7134 mvpp2_txp_max_tx_size_set(port);
7136 for (i = 0; i < port->nqvecs; i++)
7137 napi_enable(&port->qvecs[i].napi);
7139 /* Enable interrupts on all CPUs */
7140 mvpp2_interrupts_enable(port);
7142 if (port->priv->hw_version == MVPP22) {
7143 mvpp22_comphy_init(port);
7144 mvpp22_gop_init(port);
7147 mvpp2_port_mii_set(port);
7148 mvpp2_port_enable(port);
7150 phy_start(ndev->phydev);
7151 netif_tx_start_all_queues(port->dev);
7154 /* Set hw internals when stopping port */
7155 static void mvpp2_stop_dev(struct mvpp2_port *port)
7157 struct net_device *ndev = port->dev;
7160 /* Stop new packets from arriving to RXQs */
7161 mvpp2_ingress_disable(port);
7165 /* Disable interrupts on all CPUs */
7166 mvpp2_interrupts_disable(port);
7168 for (i = 0; i < port->nqvecs; i++)
7169 napi_disable(&port->qvecs[i].napi);
7171 netif_carrier_off(port->dev);
7172 netif_tx_stop_all_queues(port->dev);
7174 mvpp2_egress_disable(port);
7175 mvpp2_port_disable(port);
7177 phy_stop(ndev->phydev);
7178 phy_power_off(port->comphy);
7181 static int mvpp2_check_ringparam_valid(struct net_device *dev,
7182 struct ethtool_ringparam *ring)
7184 u16 new_rx_pending = ring->rx_pending;
7185 u16 new_tx_pending = ring->tx_pending;
7187 if (ring->rx_pending == 0 || ring->tx_pending == 0)
7190 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
7191 new_rx_pending = MVPP2_MAX_RXD_MAX;
7192 else if (!IS_ALIGNED(ring->rx_pending, 16))
7193 new_rx_pending = ALIGN(ring->rx_pending, 16);
7195 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
7196 new_tx_pending = MVPP2_MAX_TXD_MAX;
7197 else if (!IS_ALIGNED(ring->tx_pending, 32))
7198 new_tx_pending = ALIGN(ring->tx_pending, 32);
7200 /* The Tx ring size cannot be smaller than the minimum number of
7201 * descriptors needed for TSO.
7203 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
7204 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
7206 if (ring->rx_pending != new_rx_pending) {
7207 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
7208 ring->rx_pending, new_rx_pending);
7209 ring->rx_pending = new_rx_pending;
7212 if (ring->tx_pending != new_tx_pending) {
7213 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
7214 ring->tx_pending, new_tx_pending);
7215 ring->tx_pending = new_tx_pending;
7221 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
7223 u32 mac_addr_l, mac_addr_m, mac_addr_h;
7225 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
7226 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
7227 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
7228 addr[0] = (mac_addr_h >> 24) & 0xFF;
7229 addr[1] = (mac_addr_h >> 16) & 0xFF;
7230 addr[2] = (mac_addr_h >> 8) & 0xFF;
7231 addr[3] = mac_addr_h & 0xFF;
7232 addr[4] = mac_addr_m & 0xFF;
7233 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
7236 static int mvpp2_phy_connect(struct mvpp2_port *port)
7238 struct phy_device *phy_dev;
7240 /* No PHY is attached */
7241 if (!port->phy_node)
7244 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
7245 port->phy_interface);
7247 netdev_err(port->dev, "cannot connect to phy\n");
7250 phy_dev->supported &= PHY_GBIT_FEATURES;
7251 phy_dev->advertising = phy_dev->supported;
7260 static void mvpp2_phy_disconnect(struct mvpp2_port *port)
7262 struct net_device *ndev = port->dev;
7267 phy_disconnect(ndev->phydev);
7270 static int mvpp2_irqs_init(struct mvpp2_port *port)
7274 for (i = 0; i < port->nqvecs; i++) {
7275 struct mvpp2_queue_vector *qv = port->qvecs + i;
7277 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
7278 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
7280 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
7284 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
7285 irq_set_affinity_hint(qv->irq,
7286 cpumask_of(qv->sw_thread_id));
7291 for (i = 0; i < port->nqvecs; i++) {
7292 struct mvpp2_queue_vector *qv = port->qvecs + i;
7294 irq_set_affinity_hint(qv->irq, NULL);
7295 free_irq(qv->irq, qv);
7301 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
7305 for (i = 0; i < port->nqvecs; i++) {
7306 struct mvpp2_queue_vector *qv = port->qvecs + i;
7308 irq_set_affinity_hint(qv->irq, NULL);
7309 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
7310 free_irq(qv->irq, qv);
7314 static void mvpp22_init_rss(struct mvpp2_port *port)
7316 struct mvpp2 *priv = port->priv;
7319 /* Set the table width: replace the whole classifier Rx queue number
7320 * with the ones configured in RSS table entries.
7322 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0));
7323 mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
7325 /* Loop through the classifier Rx Queues and map them to a RSS table.
7326 * Map them all to the first table (0) by default.
7328 for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) {
7329 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i));
7330 mvpp2_write(priv, MVPP22_RSS_TABLE,
7331 MVPP22_RSS_TABLE_POINTER(0));
7334 /* Configure the first table to evenly distribute the packets across
7335 * real Rx Queues. The table entries map a hash to an port Rx Queue.
7337 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
7338 u32 sel = MVPP22_RSS_INDEX_TABLE(0) |
7339 MVPP22_RSS_INDEX_TABLE_ENTRY(i);
7340 mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
7342 mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs);
7347 static int mvpp2_open(struct net_device *dev)
7349 struct mvpp2_port *port = netdev_priv(dev);
7350 struct mvpp2 *priv = port->priv;
7351 unsigned char mac_bcast[ETH_ALEN] = {
7352 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
7355 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
7357 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
7360 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
7362 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
7365 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
7367 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
7370 err = mvpp2_prs_def_flow(port);
7372 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
7376 /* Allocate the Rx/Tx queues */
7377 err = mvpp2_setup_rxqs(port);
7379 netdev_err(port->dev, "cannot allocate Rx queues\n");
7383 err = mvpp2_setup_txqs(port);
7385 netdev_err(port->dev, "cannot allocate Tx queues\n");
7386 goto err_cleanup_rxqs;
7389 err = mvpp2_irqs_init(port);
7391 netdev_err(port->dev, "cannot init IRQs\n");
7392 goto err_cleanup_txqs;
7395 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) {
7396 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
7399 netdev_err(port->dev, "cannot request link IRQ %d\n",
7404 mvpp22_gop_setup_irq(port);
7407 /* In default link is down */
7408 netif_carrier_off(port->dev);
7410 err = mvpp2_phy_connect(port);
7412 goto err_free_link_irq;
7414 /* Unmask interrupts on all CPUs */
7415 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
7416 mvpp2_shared_interrupt_mask_unmask(port, false);
7418 mvpp2_start_dev(port);
7420 if (priv->hw_version == MVPP22)
7421 mvpp22_init_rss(port);
7423 /* Start hardware statistics gathering */
7424 queue_delayed_work(priv->stats_queue, &port->stats_work,
7425 MVPP2_MIB_COUNTERS_STATS_DELAY);
7430 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
7431 free_irq(port->link_irq, port);
7433 mvpp2_irqs_deinit(port);
7435 mvpp2_cleanup_txqs(port);
7437 mvpp2_cleanup_rxqs(port);
7441 static int mvpp2_stop(struct net_device *dev)
7443 struct mvpp2_port *port = netdev_priv(dev);
7444 struct mvpp2_port_pcpu *port_pcpu;
7445 struct mvpp2 *priv = port->priv;
7448 mvpp2_stop_dev(port);
7449 mvpp2_phy_disconnect(port);
7451 /* Mask interrupts on all CPUs */
7452 on_each_cpu(mvpp2_interrupts_mask, port, 1);
7453 mvpp2_shared_interrupt_mask_unmask(port, true);
7455 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
7456 free_irq(port->link_irq, port);
7458 mvpp2_irqs_deinit(port);
7459 if (!port->has_tx_irqs) {
7460 for_each_present_cpu(cpu) {
7461 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
7463 hrtimer_cancel(&port_pcpu->tx_done_timer);
7464 port_pcpu->timer_scheduled = false;
7465 tasklet_kill(&port_pcpu->tx_done_tasklet);
7468 mvpp2_cleanup_rxqs(port);
7469 mvpp2_cleanup_txqs(port);
7471 cancel_delayed_work_sync(&port->stats_work);
7476 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
7477 struct netdev_hw_addr_list *list)
7479 struct netdev_hw_addr *ha;
7482 netdev_hw_addr_list_for_each(ha, list) {
7483 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
7491 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
7493 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
7494 mvpp2_prs_vid_enable_filtering(port);
7496 mvpp2_prs_vid_disable_filtering(port);
7498 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7499 MVPP2_PRS_L2_UNI_CAST, enable);
7501 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7502 MVPP2_PRS_L2_MULTI_CAST, enable);
7505 static void mvpp2_set_rx_mode(struct net_device *dev)
7507 struct mvpp2_port *port = netdev_priv(dev);
7509 /* Clear the whole UC and MC list */
7510 mvpp2_prs_mac_del_all(port);
7512 if (dev->flags & IFF_PROMISC) {
7513 mvpp2_set_rx_promisc(port, true);
7517 mvpp2_set_rx_promisc(port, false);
7519 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
7520 mvpp2_prs_mac_da_accept_list(port, &dev->uc))
7521 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7522 MVPP2_PRS_L2_UNI_CAST, true);
7524 if (dev->flags & IFF_ALLMULTI) {
7525 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7526 MVPP2_PRS_L2_MULTI_CAST, true);
7530 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
7531 mvpp2_prs_mac_da_accept_list(port, &dev->mc))
7532 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7533 MVPP2_PRS_L2_MULTI_CAST, true);
7536 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
7538 struct mvpp2_port *port = netdev_priv(dev);
7539 const struct sockaddr *addr = p;
7542 if (!is_valid_ether_addr(addr->sa_data)) {
7543 err = -EADDRNOTAVAIL;
7547 if (!netif_running(dev)) {
7548 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
7551 /* Reconfigure parser to accept the original MAC address */
7552 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
7557 mvpp2_stop_dev(port);
7559 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
7563 /* Reconfigure parser accept the original MAC address */
7564 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
7568 mvpp2_start_dev(port);
7569 mvpp2_egress_enable(port);
7570 mvpp2_ingress_enable(port);
7573 netdev_err(dev, "failed to change MAC address\n");
7577 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
7579 struct mvpp2_port *port = netdev_priv(dev);
7582 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
7583 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
7584 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
7585 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
7588 if (!netif_running(dev)) {
7589 err = mvpp2_bm_update_mtu(dev, mtu);
7591 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
7595 /* Reconfigure BM to the original MTU */
7596 err = mvpp2_bm_update_mtu(dev, dev->mtu);
7601 mvpp2_stop_dev(port);
7603 err = mvpp2_bm_update_mtu(dev, mtu);
7605 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
7609 /* Reconfigure BM to the original MTU */
7610 err = mvpp2_bm_update_mtu(dev, dev->mtu);
7615 mvpp2_start_dev(port);
7616 mvpp2_egress_enable(port);
7617 mvpp2_ingress_enable(port);
7621 netdev_err(dev, "failed to change MTU\n");
7626 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7628 struct mvpp2_port *port = netdev_priv(dev);
7632 for_each_possible_cpu(cpu) {
7633 struct mvpp2_pcpu_stats *cpu_stats;
7639 cpu_stats = per_cpu_ptr(port->stats, cpu);
7641 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
7642 rx_packets = cpu_stats->rx_packets;
7643 rx_bytes = cpu_stats->rx_bytes;
7644 tx_packets = cpu_stats->tx_packets;
7645 tx_bytes = cpu_stats->tx_bytes;
7646 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
7648 stats->rx_packets += rx_packets;
7649 stats->rx_bytes += rx_bytes;
7650 stats->tx_packets += tx_packets;
7651 stats->tx_bytes += tx_bytes;
7654 stats->rx_errors = dev->stats.rx_errors;
7655 stats->rx_dropped = dev->stats.rx_dropped;
7656 stats->tx_dropped = dev->stats.tx_dropped;
7659 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7666 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
7668 mvpp2_link_event(dev);
7673 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
7675 struct mvpp2_port *port = netdev_priv(dev);
7678 ret = mvpp2_prs_vid_entry_add(port, vid);
7680 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
7681 MVPP2_PRS_VLAN_FILT_MAX - 1);
7685 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
7687 struct mvpp2_port *port = netdev_priv(dev);
7689 mvpp2_prs_vid_entry_remove(port, vid);
7693 static int mvpp2_set_features(struct net_device *dev,
7694 netdev_features_t features)
7696 netdev_features_t changed = dev->features ^ features;
7697 struct mvpp2_port *port = netdev_priv(dev);
7699 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
7700 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
7701 mvpp2_prs_vid_enable_filtering(port);
7703 /* Invalidate all registered VID filters for this
7706 mvpp2_prs_vid_remove_all(port);
7708 mvpp2_prs_vid_disable_filtering(port);
7715 /* Ethtool methods */
7717 /* Set interrupt coalescing for ethtools */
7718 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
7719 struct ethtool_coalesce *c)
7721 struct mvpp2_port *port = netdev_priv(dev);
7724 for (queue = 0; queue < port->nrxqs; queue++) {
7725 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
7727 rxq->time_coal = c->rx_coalesce_usecs;
7728 rxq->pkts_coal = c->rx_max_coalesced_frames;
7729 mvpp2_rx_pkts_coal_set(port, rxq);
7730 mvpp2_rx_time_coal_set(port, rxq);
7733 if (port->has_tx_irqs) {
7734 port->tx_time_coal = c->tx_coalesce_usecs;
7735 mvpp2_tx_time_coal_set(port);
7738 for (queue = 0; queue < port->ntxqs; queue++) {
7739 struct mvpp2_tx_queue *txq = port->txqs[queue];
7741 txq->done_pkts_coal = c->tx_max_coalesced_frames;
7743 if (port->has_tx_irqs)
7744 mvpp2_tx_pkts_coal_set(port, txq);
7750 /* get coalescing for ethtools */
7751 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
7752 struct ethtool_coalesce *c)
7754 struct mvpp2_port *port = netdev_priv(dev);
7756 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
7757 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
7758 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
7759 c->tx_coalesce_usecs = port->tx_time_coal;
7763 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
7764 struct ethtool_drvinfo *drvinfo)
7766 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
7767 sizeof(drvinfo->driver));
7768 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
7769 sizeof(drvinfo->version));
7770 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
7771 sizeof(drvinfo->bus_info));
7774 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
7775 struct ethtool_ringparam *ring)
7777 struct mvpp2_port *port = netdev_priv(dev);
7779 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
7780 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
7781 ring->rx_pending = port->rx_ring_size;
7782 ring->tx_pending = port->tx_ring_size;
7785 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
7786 struct ethtool_ringparam *ring)
7788 struct mvpp2_port *port = netdev_priv(dev);
7789 u16 prev_rx_ring_size = port->rx_ring_size;
7790 u16 prev_tx_ring_size = port->tx_ring_size;
7793 err = mvpp2_check_ringparam_valid(dev, ring);
7797 if (!netif_running(dev)) {
7798 port->rx_ring_size = ring->rx_pending;
7799 port->tx_ring_size = ring->tx_pending;
7803 /* The interface is running, so we have to force a
7804 * reallocation of the queues
7806 mvpp2_stop_dev(port);
7807 mvpp2_cleanup_rxqs(port);
7808 mvpp2_cleanup_txqs(port);
7810 port->rx_ring_size = ring->rx_pending;
7811 port->tx_ring_size = ring->tx_pending;
7813 err = mvpp2_setup_rxqs(port);
7815 /* Reallocate Rx queues with the original ring size */
7816 port->rx_ring_size = prev_rx_ring_size;
7817 ring->rx_pending = prev_rx_ring_size;
7818 err = mvpp2_setup_rxqs(port);
7822 err = mvpp2_setup_txqs(port);
7824 /* Reallocate Tx queues with the original ring size */
7825 port->tx_ring_size = prev_tx_ring_size;
7826 ring->tx_pending = prev_tx_ring_size;
7827 err = mvpp2_setup_txqs(port);
7829 goto err_clean_rxqs;
7832 mvpp2_start_dev(port);
7833 mvpp2_egress_enable(port);
7834 mvpp2_ingress_enable(port);
7839 mvpp2_cleanup_rxqs(port);
7841 netdev_err(dev, "failed to change ring parameters");
7847 static const struct net_device_ops mvpp2_netdev_ops = {
7848 .ndo_open = mvpp2_open,
7849 .ndo_stop = mvpp2_stop,
7850 .ndo_start_xmit = mvpp2_tx,
7851 .ndo_set_rx_mode = mvpp2_set_rx_mode,
7852 .ndo_set_mac_address = mvpp2_set_mac_address,
7853 .ndo_change_mtu = mvpp2_change_mtu,
7854 .ndo_get_stats64 = mvpp2_get_stats64,
7855 .ndo_do_ioctl = mvpp2_ioctl,
7856 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
7857 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
7858 .ndo_set_features = mvpp2_set_features,
7861 static const struct ethtool_ops mvpp2_eth_tool_ops = {
7862 .nway_reset = phy_ethtool_nway_reset,
7863 .get_link = ethtool_op_get_link,
7864 .set_coalesce = mvpp2_ethtool_set_coalesce,
7865 .get_coalesce = mvpp2_ethtool_get_coalesce,
7866 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
7867 .get_ringparam = mvpp2_ethtool_get_ringparam,
7868 .set_ringparam = mvpp2_ethtool_set_ringparam,
7869 .get_strings = mvpp2_ethtool_get_strings,
7870 .get_ethtool_stats = mvpp2_ethtool_get_stats,
7871 .get_sset_count = mvpp2_ethtool_get_sset_count,
7872 .get_link_ksettings = phy_ethtool_get_link_ksettings,
7873 .set_link_ksettings = phy_ethtool_set_link_ksettings,
7876 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
7877 * had a single IRQ defined per-port.
7879 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
7880 struct device_node *port_node)
7882 struct mvpp2_queue_vector *v = &port->qvecs[0];
7885 v->nrxqs = port->nrxqs;
7886 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7887 v->sw_thread_id = 0;
7888 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
7890 v->irq = irq_of_parse_and_map(port_node, 0);
7893 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7901 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
7902 struct device_node *port_node)
7904 struct mvpp2_queue_vector *v;
7907 port->nqvecs = num_possible_cpus();
7908 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
7911 for (i = 0; i < port->nqvecs; i++) {
7914 v = port->qvecs + i;
7917 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
7918 v->sw_thread_id = i;
7919 v->sw_thread_mask = BIT(i);
7921 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
7923 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
7924 v->first_rxq = i * MVPP2_DEFAULT_RXQ;
7925 v->nrxqs = MVPP2_DEFAULT_RXQ;
7926 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
7927 i == (port->nqvecs - 1)) {
7929 v->nrxqs = port->nrxqs;
7930 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7931 strncpy(irqname, "rx-shared", sizeof(irqname));
7935 v->irq = of_irq_get_byname(port_node, irqname);
7937 v->irq = fwnode_irq_get(port->fwnode, i);
7943 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7950 for (i = 0; i < port->nqvecs; i++)
7951 irq_dispose_mapping(port->qvecs[i].irq);
7955 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
7956 struct device_node *port_node)
7958 if (port->has_tx_irqs)
7959 return mvpp2_multi_queue_vectors_init(port, port_node);
7961 return mvpp2_simple_queue_vectors_init(port, port_node);
7964 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
7968 for (i = 0; i < port->nqvecs; i++)
7969 irq_dispose_mapping(port->qvecs[i].irq);
7972 /* Configure Rx queue group interrupt for this port */
7973 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
7975 struct mvpp2 *priv = port->priv;
7979 if (priv->hw_version == MVPP21) {
7980 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
7985 /* Handle the more complicated PPv2.2 case */
7986 for (i = 0; i < port->nqvecs; i++) {
7987 struct mvpp2_queue_vector *qv = port->qvecs + i;
7992 val = qv->sw_thread_id;
7993 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
7994 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
7996 val = qv->first_rxq;
7997 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
7998 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
8002 /* Initialize port HW */
8003 static int mvpp2_port_init(struct mvpp2_port *port)
8005 struct device *dev = port->dev->dev.parent;
8006 struct mvpp2 *priv = port->priv;
8007 struct mvpp2_txq_pcpu *txq_pcpu;
8008 int queue, cpu, err;
8010 /* Checks for hardware constraints */
8011 if (port->first_rxq + port->nrxqs >
8012 MVPP2_MAX_PORTS * priv->max_port_rxqs)
8015 if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
8016 (port->ntxqs > MVPP2_MAX_TXQ))
8020 mvpp2_egress_disable(port);
8021 mvpp2_port_disable(port);
8023 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
8025 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
8030 /* Associate physical Tx queues to this port and initialize.
8031 * The mapping is predefined.
8033 for (queue = 0; queue < port->ntxqs; queue++) {
8034 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
8035 struct mvpp2_tx_queue *txq;
8037 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
8040 goto err_free_percpu;
8043 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
8046 goto err_free_percpu;
8049 txq->id = queue_phy_id;
8050 txq->log_id = queue;
8051 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
8052 for_each_present_cpu(cpu) {
8053 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
8054 txq_pcpu->cpu = cpu;
8057 port->txqs[queue] = txq;
8060 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
8064 goto err_free_percpu;
8067 /* Allocate and initialize Rx queue for this port */
8068 for (queue = 0; queue < port->nrxqs; queue++) {
8069 struct mvpp2_rx_queue *rxq;
8071 /* Map physical Rx queue to port's logical Rx queue */
8072 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
8075 goto err_free_percpu;
8077 /* Map this Rx queue to a physical queue */
8078 rxq->id = port->first_rxq + queue;
8079 rxq->port = port->id;
8080 rxq->logic_rxq = queue;
8082 port->rxqs[queue] = rxq;
8085 mvpp2_rx_irqs_setup(port);
8087 /* Create Rx descriptor rings */
8088 for (queue = 0; queue < port->nrxqs; queue++) {
8089 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
8091 rxq->size = port->rx_ring_size;
8092 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
8093 rxq->time_coal = MVPP2_RX_COAL_USEC;
8096 mvpp2_ingress_disable(port);
8098 /* Port default configuration */
8099 mvpp2_defaults_set(port);
8101 /* Port's classifier configuration */
8102 mvpp2_cls_oversize_rxq_set(port);
8103 mvpp2_cls_port_config(port);
8105 /* Provide an initial Rx packet size */
8106 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
8108 /* Initialize pools for swf */
8109 err = mvpp2_swf_bm_pool_init(port);
8111 goto err_free_percpu;
8116 for (queue = 0; queue < port->ntxqs; queue++) {
8117 if (!port->txqs[queue])
8119 free_percpu(port->txqs[queue]->pcpu);
8124 /* Checks if the port DT description has the TX interrupts
8125 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
8126 * there are available, but we need to keep support for old DTs.
8128 static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
8129 struct device_node *port_node)
8131 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
8132 "tx-cpu2", "tx-cpu3" };
8135 if (priv->hw_version == MVPP21)
8138 for (i = 0; i < 5; i++) {
8139 ret = of_property_match_string(port_node, "interrupt-names",
8148 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
8149 struct fwnode_handle *fwnode,
8152 struct mvpp2_port *port = netdev_priv(dev);
8153 char hw_mac_addr[ETH_ALEN] = {0};
8154 char fw_mac_addr[ETH_ALEN];
8156 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
8157 *mac_from = "firmware node";
8158 ether_addr_copy(dev->dev_addr, fw_mac_addr);
8162 if (priv->hw_version == MVPP21) {
8163 mvpp21_get_mac_address(port, hw_mac_addr);
8164 if (is_valid_ether_addr(hw_mac_addr)) {
8165 *mac_from = "hardware";
8166 ether_addr_copy(dev->dev_addr, hw_mac_addr);
8171 *mac_from = "random";
8172 eth_hw_addr_random(dev);
8175 /* Ports initialization */
8176 static int mvpp2_port_probe(struct platform_device *pdev,
8177 struct fwnode_handle *port_fwnode,
8180 struct device_node *phy_node;
8181 struct phy *comphy = NULL;
8182 struct mvpp2_port *port;
8183 struct mvpp2_port_pcpu *port_pcpu;
8184 struct device_node *port_node = to_of_node(port_fwnode);
8185 struct net_device *dev;
8186 struct resource *res;
8187 char *mac_from = "";
8188 unsigned int ntxqs, nrxqs;
8196 has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
8199 queue_mode = MVPP2_QDIST_MULTI_MODE;
8203 queue_mode = MVPP2_QDIST_SINGLE_MODE;
8205 ntxqs = MVPP2_MAX_TXQ;
8206 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
8207 nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
8209 nrxqs = MVPP2_DEFAULT_RXQ;
8211 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
8216 phy_node = of_parse_phandle(port_node, "phy", 0);
8220 phy_mode = fwnode_get_phy_mode(port_fwnode);
8222 dev_err(&pdev->dev, "incorrect phy mode\n");
8224 goto err_free_netdev;
8228 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
8229 if (IS_ERR(comphy)) {
8230 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
8231 err = -EPROBE_DEFER;
8232 goto err_free_netdev;
8238 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
8240 dev_err(&pdev->dev, "missing port-id value\n");
8241 goto err_free_netdev;
8244 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
8245 dev->watchdog_timeo = 5 * HZ;
8246 dev->netdev_ops = &mvpp2_netdev_ops;
8247 dev->ethtool_ops = &mvpp2_eth_tool_ops;
8249 port = netdev_priv(dev);
8251 port->fwnode = port_fwnode;
8252 port->ntxqs = ntxqs;
8253 port->nrxqs = nrxqs;
8255 port->has_tx_irqs = has_tx_irqs;
8257 err = mvpp2_queue_vectors_init(port, port_node);
8259 goto err_free_netdev;
8262 port->link_irq = of_irq_get_byname(port_node, "link");
8264 port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
8265 if (port->link_irq == -EPROBE_DEFER) {
8266 err = -EPROBE_DEFER;
8267 goto err_deinit_qvecs;
8269 if (port->link_irq <= 0)
8270 /* the link irq is optional */
8273 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
8274 port->flags |= MVPP2_F_LOOPBACK;
8277 if (priv->hw_version == MVPP21)
8278 port->first_rxq = port->id * port->nrxqs;
8280 port->first_rxq = port->id * priv->max_port_rxqs;
8282 port->phy_node = phy_node;
8283 port->phy_interface = phy_mode;
8284 port->comphy = comphy;
8286 if (priv->hw_version == MVPP21) {
8287 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
8288 port->base = devm_ioremap_resource(&pdev->dev, res);
8289 if (IS_ERR(port->base)) {
8290 err = PTR_ERR(port->base);
8294 port->stats_base = port->priv->lms_base +
8295 MVPP21_MIB_COUNTERS_OFFSET +
8296 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
8298 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
8301 dev_err(&pdev->dev, "missing gop-port-id value\n");
8302 goto err_deinit_qvecs;
8305 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
8306 port->stats_base = port->priv->iface_base +
8307 MVPP22_MIB_COUNTERS_OFFSET +
8308 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
8311 /* Alloc per-cpu and ethtool stats */
8312 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
8318 port->ethtool_stats = devm_kcalloc(&pdev->dev,
8319 ARRAY_SIZE(mvpp2_ethtool_regs),
8320 sizeof(u64), GFP_KERNEL);
8321 if (!port->ethtool_stats) {
8323 goto err_free_stats;
8326 mutex_init(&port->gather_stats_lock);
8327 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
8329 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
8331 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
8332 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
8333 SET_NETDEV_DEV(dev, &pdev->dev);
8335 err = mvpp2_port_init(port);
8337 dev_err(&pdev->dev, "failed to init port %d\n", id);
8338 goto err_free_stats;
8341 mvpp2_port_periodic_xon_disable(port);
8343 if (priv->hw_version == MVPP21)
8344 mvpp2_port_fc_adv_enable(port);
8346 mvpp2_port_reset(port);
8348 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
8351 goto err_free_txq_pcpu;
8354 if (!port->has_tx_irqs) {
8355 for_each_present_cpu(cpu) {
8356 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
8358 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
8359 HRTIMER_MODE_REL_PINNED);
8360 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
8361 port_pcpu->timer_scheduled = false;
8363 tasklet_init(&port_pcpu->tx_done_tasklet,
8365 (unsigned long)dev);
8369 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
8371 dev->features = features | NETIF_F_RXCSUM;
8372 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
8373 NETIF_F_HW_VLAN_CTAG_FILTER;
8375 if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
8376 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
8377 dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
8380 dev->vlan_features |= features;
8381 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
8382 dev->priv_flags |= IFF_UNICAST_FLT;
8384 /* MTU range: 68 - 9704 */
8385 dev->min_mtu = ETH_MIN_MTU;
8386 /* 9704 == 9728 - 20 and rounding to 8 */
8387 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
8389 err = register_netdev(dev);
8391 dev_err(&pdev->dev, "failed to register netdev\n");
8392 goto err_free_port_pcpu;
8394 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
8396 priv->port_list[priv->port_count++] = port;
8401 free_percpu(port->pcpu);
8403 for (i = 0; i < port->ntxqs; i++)
8404 free_percpu(port->txqs[i]->pcpu);
8406 free_percpu(port->stats);
8409 irq_dispose_mapping(port->link_irq);
8411 mvpp2_queue_vectors_deinit(port);
8413 of_node_put(phy_node);
8418 /* Ports removal routine */
8419 static void mvpp2_port_remove(struct mvpp2_port *port)
8423 unregister_netdev(port->dev);
8424 of_node_put(port->phy_node);
8425 free_percpu(port->pcpu);
8426 free_percpu(port->stats);
8427 for (i = 0; i < port->ntxqs; i++)
8428 free_percpu(port->txqs[i]->pcpu);
8429 mvpp2_queue_vectors_deinit(port);
8431 irq_dispose_mapping(port->link_irq);
8432 free_netdev(port->dev);
8435 /* Initialize decoding windows */
8436 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
8442 for (i = 0; i < 6; i++) {
8443 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
8444 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
8447 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
8452 for (i = 0; i < dram->num_cs; i++) {
8453 const struct mbus_dram_window *cs = dram->cs + i;
8455 mvpp2_write(priv, MVPP2_WIN_BASE(i),
8456 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
8457 dram->mbus_dram_target_id);
8459 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
8460 (cs->size - 1) & 0xffff0000);
8462 win_enable |= (1 << i);
8465 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
8468 /* Initialize Rx FIFO's */
8469 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
8473 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
8474 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
8475 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
8476 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
8477 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
8480 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
8481 MVPP2_RX_FIFO_PORT_MIN_PKT);
8482 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
8485 static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
8489 /* The FIFO size parameters are set depending on the maximum speed a
8490 * given port can handle:
8493 * - Ports 2 and 3: 1Gbps
8496 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
8497 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
8498 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
8499 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
8501 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
8502 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
8503 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
8504 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
8506 for (port = 2; port < MVPP2_MAX_PORTS; port++) {
8507 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
8508 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
8509 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
8510 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
8513 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
8514 MVPP2_RX_FIFO_PORT_MIN_PKT);
8515 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
8518 /* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
8519 * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
8520 * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
8522 static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
8524 int port, size, thrs;
8526 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
8528 size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
8529 thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
8531 size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
8532 thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
8534 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
8535 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
8539 static void mvpp2_axi_init(struct mvpp2 *priv)
8541 u32 val, rdval, wrval;
8543 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
8545 /* AXI Bridge Configuration */
8547 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
8548 << MVPP22_AXI_ATTR_CACHE_OFFS;
8549 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8550 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
8552 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
8553 << MVPP22_AXI_ATTR_CACHE_OFFS;
8554 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8555 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
8558 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
8559 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
8562 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
8563 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
8564 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
8565 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
8568 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
8569 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
8571 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
8572 << MVPP22_AXI_CODE_CACHE_OFFS;
8573 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
8574 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8575 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
8576 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
8578 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
8579 << MVPP22_AXI_CODE_CACHE_OFFS;
8580 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8581 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8583 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
8585 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
8586 << MVPP22_AXI_CODE_CACHE_OFFS;
8587 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8588 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8590 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
8593 /* Initialize network controller common part HW */
8594 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
8596 const struct mbus_dram_target_info *dram_target_info;
8600 /* MBUS windows configuration */
8601 dram_target_info = mv_mbus_dram_info();
8602 if (dram_target_info)
8603 mvpp2_conf_mbus_windows(dram_target_info, priv);
8605 if (priv->hw_version == MVPP22)
8606 mvpp2_axi_init(priv);
8608 /* Disable HW PHY polling */
8609 if (priv->hw_version == MVPP21) {
8610 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
8611 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
8612 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
8614 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
8615 val &= ~MVPP22_SMI_POLLING_EN;
8616 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
8619 /* Allocate and initialize aggregated TXQs */
8620 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
8621 sizeof(*priv->aggr_txqs),
8623 if (!priv->aggr_txqs)
8626 for_each_present_cpu(i) {
8627 priv->aggr_txqs[i].id = i;
8628 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
8629 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
8635 if (priv->hw_version == MVPP21) {
8636 mvpp2_rx_fifo_init(priv);
8638 mvpp22_rx_fifo_init(priv);
8639 mvpp22_tx_fifo_init(priv);
8642 if (priv->hw_version == MVPP21)
8643 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
8644 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
8646 /* Allow cache snoop when transmiting packets */
8647 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
8649 /* Buffer Manager initialization */
8650 err = mvpp2_bm_init(pdev, priv);
8654 /* Parser default initialization */
8655 err = mvpp2_prs_default_init(pdev, priv);
8659 /* Classifier default initialization */
8660 mvpp2_cls_init(priv);
8665 static int mvpp2_probe(struct platform_device *pdev)
8667 const struct acpi_device_id *acpi_id;
8668 struct fwnode_handle *fwnode = pdev->dev.fwnode;
8669 struct fwnode_handle *port_fwnode;
8671 struct resource *res;
8676 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
8680 if (has_acpi_companion(&pdev->dev)) {
8681 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
8683 priv->hw_version = (unsigned long)acpi_id->driver_data;
8686 (unsigned long)of_device_get_match_data(&pdev->dev);
8689 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
8690 base = devm_ioremap_resource(&pdev->dev, res);
8692 return PTR_ERR(base);
8694 if (priv->hw_version == MVPP21) {
8695 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
8696 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
8697 if (IS_ERR(priv->lms_base))
8698 return PTR_ERR(priv->lms_base);
8700 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
8701 if (has_acpi_companion(&pdev->dev)) {
8702 /* In case the MDIO memory region is declared in
8703 * the ACPI, it can already appear as 'in-use'
8704 * in the OS. Because it is overlapped by second
8705 * region of the network controller, make
8706 * sure it is released, before requesting it again.
8707 * The care is taken by mvpp2 driver to avoid
8708 * concurrent access to this memory region.
8710 release_resource(res);
8712 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
8713 if (IS_ERR(priv->iface_base))
8714 return PTR_ERR(priv->iface_base);
8717 if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
8718 priv->sysctrl_base =
8719 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
8720 "marvell,system-controller");
8721 if (IS_ERR(priv->sysctrl_base))
8722 /* The system controller regmap is optional for dt
8723 * compatibility reasons. When not provided, the
8724 * configuration of the GoP relies on the
8725 * firmware/bootloader.
8727 priv->sysctrl_base = NULL;
8730 mvpp2_setup_bm_pool();
8732 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
8735 addr_space_sz = (priv->hw_version == MVPP21 ?
8736 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
8737 priv->swth_base[i] = base + i * addr_space_sz;
8740 if (priv->hw_version == MVPP21)
8741 priv->max_port_rxqs = 8;
8743 priv->max_port_rxqs = 32;
8745 if (dev_of_node(&pdev->dev)) {
8746 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
8747 if (IS_ERR(priv->pp_clk))
8748 return PTR_ERR(priv->pp_clk);
8749 err = clk_prepare_enable(priv->pp_clk);
8753 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
8754 if (IS_ERR(priv->gop_clk)) {
8755 err = PTR_ERR(priv->gop_clk);
8758 err = clk_prepare_enable(priv->gop_clk);
8762 if (priv->hw_version == MVPP22) {
8763 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
8764 if (IS_ERR(priv->mg_clk)) {
8765 err = PTR_ERR(priv->mg_clk);
8769 err = clk_prepare_enable(priv->mg_clk);
8773 priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
8774 if (IS_ERR(priv->mg_core_clk)) {
8775 priv->mg_core_clk = NULL;
8777 err = clk_prepare_enable(priv->mg_core_clk);
8783 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
8784 if (IS_ERR(priv->axi_clk)) {
8785 err = PTR_ERR(priv->axi_clk);
8786 if (err == -EPROBE_DEFER)
8787 goto err_mg_core_clk;
8788 priv->axi_clk = NULL;
8790 err = clk_prepare_enable(priv->axi_clk);
8792 goto err_mg_core_clk;
8795 /* Get system's tclk rate */
8796 priv->tclk = clk_get_rate(priv->pp_clk);
8797 } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
8799 dev_err(&pdev->dev, "missing clock-frequency value\n");
8803 if (priv->hw_version == MVPP22) {
8804 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
8807 /* Sadly, the BM pools all share the same register to
8808 * store the high 32 bits of their address. So they
8809 * must all have the same high 32 bits, which forces
8810 * us to restrict coherent memory to DMA_BIT_MASK(32).
8812 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
8817 /* Initialize network controller */
8818 err = mvpp2_init(pdev, priv);
8820 dev_err(&pdev->dev, "failed to initialize controller\n");
8824 /* Initialize ports */
8825 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
8826 err = mvpp2_port_probe(pdev, port_fwnode, priv);
8828 goto err_port_probe;
8831 if (priv->port_count == 0) {
8832 dev_err(&pdev->dev, "no ports enabled\n");
8837 /* Statistics must be gathered regularly because some of them (like
8838 * packets counters) are 32-bit registers and could overflow quite
8839 * quickly. For instance, a 10Gb link used at full bandwidth with the
8840 * smallest packets (64B) will overflow a 32-bit counter in less than
8841 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
8843 snprintf(priv->queue_name, sizeof(priv->queue_name),
8844 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
8845 priv->port_count > 1 ? "+" : "");
8846 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
8847 if (!priv->stats_queue) {
8849 goto err_port_probe;
8852 platform_set_drvdata(pdev, priv);
8857 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
8858 if (priv->port_list[i])
8859 mvpp2_port_remove(priv->port_list[i]);
8863 clk_disable_unprepare(priv->axi_clk);
8866 if (priv->hw_version == MVPP22)
8867 clk_disable_unprepare(priv->mg_core_clk);
8869 if (priv->hw_version == MVPP22)
8870 clk_disable_unprepare(priv->mg_clk);
8872 clk_disable_unprepare(priv->gop_clk);
8874 clk_disable_unprepare(priv->pp_clk);
8878 static int mvpp2_remove(struct platform_device *pdev)
8880 struct mvpp2 *priv = platform_get_drvdata(pdev);
8881 struct fwnode_handle *fwnode = pdev->dev.fwnode;
8882 struct fwnode_handle *port_fwnode;
8885 flush_workqueue(priv->stats_queue);
8886 destroy_workqueue(priv->stats_queue);
8888 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
8889 if (priv->port_list[i]) {
8890 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
8891 mvpp2_port_remove(priv->port_list[i]);
8896 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
8897 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
8899 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
8902 for_each_present_cpu(i) {
8903 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
8905 dma_free_coherent(&pdev->dev,
8906 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
8908 aggr_txq->descs_dma);
8911 if (is_acpi_node(port_fwnode))
8914 clk_disable_unprepare(priv->axi_clk);
8915 clk_disable_unprepare(priv->mg_core_clk);
8916 clk_disable_unprepare(priv->mg_clk);
8917 clk_disable_unprepare(priv->pp_clk);
8918 clk_disable_unprepare(priv->gop_clk);
8923 static const struct of_device_id mvpp2_match[] = {
8925 .compatible = "marvell,armada-375-pp2",
8926 .data = (void *)MVPP21,
8929 .compatible = "marvell,armada-7k-pp22",
8930 .data = (void *)MVPP22,
8934 MODULE_DEVICE_TABLE(of, mvpp2_match);
8936 static const struct acpi_device_id mvpp2_acpi_match[] = {
8937 { "MRVL0110", MVPP22 },
8940 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
8942 static struct platform_driver mvpp2_driver = {
8943 .probe = mvpp2_probe,
8944 .remove = mvpp2_remove,
8946 .name = MVPP2_DRIVER_NAME,
8947 .of_match_table = mvpp2_match,
8948 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
8952 module_platform_driver(mvpp2_driver);
8954 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
8955 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
8956 MODULE_LICENSE("GPL v2");