mlxsw: spectrum_span: Allow setting policer on a SPAN agent
[linux-block.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_matchall.c
CommitLineData
d7fcc986
JP
1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
3
4#include <linux/kernel.h>
5#include <linux/errno.h>
6#include <linux/netdevice.h>
7#include <net/flow_offload.h>
8
9#include "spectrum.h"
10#include "spectrum_span.h"
11#include "reg.h"
12
d7fcc986 13static struct mlxsw_sp_mall_entry *
3c650136 14mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
d7fcc986
JP
15{
16 struct mlxsw_sp_mall_entry *mall_entry;
17
5a2939b9 18 list_for_each_entry(mall_entry, &block->mall.list, list)
d7fcc986
JP
19 if (mall_entry->cookie == cookie)
20 return mall_entry;
21
22 return NULL;
23}
24
25static int
26mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
47fa15ea 27 struct mlxsw_sp_mall_entry *mall_entry)
d7fcc986 28{
c1d7845d 29 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4039504e 30 struct mlxsw_sp_span_agent_parms agent_parms = {};
c1d7845d
IS
31 struct mlxsw_sp_span_trigger_parms parms;
32 enum mlxsw_sp_span_trigger trigger;
33 int err;
d7fcc986 34
780ba878 35 if (!mall_entry->mirror.to_dev) {
d7fcc986
JP
36 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
37 return -EINVAL;
38 }
39
a120ecc3
IS
40 agent_parms.to_dev = mall_entry->mirror.to_dev;
41 err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->mirror.span_id,
42 &agent_parms);
c1d7845d
IS
43 if (err)
44 return err;
45
46 err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
47 mall_entry->ingress);
48 if (err)
49 goto err_analyzed_port_get;
50
51 trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
52 MLXSW_SP_SPAN_TRIGGER_EGRESS;
53 parms.span_id = mall_entry->mirror.span_id;
54 err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port,
55 &parms);
56 if (err)
57 goto err_agent_bind;
58
59 return 0;
60
61err_agent_bind:
62 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
63err_analyzed_port_get:
64 mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
65 return err;
d7fcc986
JP
66}
67
68static void
69mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port,
780ba878 70 struct mlxsw_sp_mall_entry *mall_entry)
d7fcc986 71{
c1d7845d
IS
72 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
73 struct mlxsw_sp_span_trigger_parms parms;
74 enum mlxsw_sp_span_trigger trigger;
75
76 trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
77 MLXSW_SP_SPAN_TRIGGER_EGRESS;
78 parms.span_id = mall_entry->mirror.span_id;
79 mlxsw_sp_span_agent_unbind(mlxsw_sp, trigger, mlxsw_sp_port, &parms);
80 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
81 mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
d7fcc986
JP
82}
83
84static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
85 bool enable, u32 rate)
86{
87 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
88 char mpsc_pl[MLXSW_REG_MPSC_LEN];
89
90 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
91 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
92}
93
94static int
95mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
c7ea0e16 96 struct mlxsw_sp_mall_entry *mall_entry)
d7fcc986
JP
97{
98 int err;
99
481ff57a 100 if (rtnl_dereference(mlxsw_sp_port->sample)) {
d7fcc986
JP
101 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
102 return -EEXIST;
103 }
481ff57a 104 rcu_assign_pointer(mlxsw_sp_port->sample, &mall_entry->sample);
d7fcc986
JP
105
106 err = mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true,
c7ea0e16 107 mall_entry->sample.rate);
d7fcc986
JP
108 if (err)
109 goto err_port_sample_set;
110 return 0;
111
112err_port_sample_set:
481ff57a 113 RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
d7fcc986
JP
114 return err;
115}
116
117static void
118mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port)
119{
120 if (!mlxsw_sp_port->sample)
121 return;
122
123 mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
481ff57a 124 RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
d7fcc986
JP
125}
126
dd0fbc89
JP
127static int
128mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
129 struct mlxsw_sp_mall_entry *mall_entry)
130{
131 switch (mall_entry->type) {
132 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
133 return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry);
134 case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
135 return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry);
136 default:
137 WARN_ON(1);
138 return -EINVAL;
139 }
140}
141
142static void
143mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
144 struct mlxsw_sp_mall_entry *mall_entry)
145{
146 switch (mall_entry->type) {
147 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
148 mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
149 break;
150 case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
151 mlxsw_sp_mall_port_sample_del(mlxsw_sp_port);
152 break;
153 default:
154 WARN_ON(1);
155 }
156}
157
aed65285
JP
158static void mlxsw_sp_mall_prio_update(struct mlxsw_sp_flow_block *block)
159{
160 struct mlxsw_sp_mall_entry *mall_entry;
161
162 if (list_empty(&block->mall.list))
163 return;
164 block->mall.min_prio = UINT_MAX;
165 block->mall.max_prio = 0;
166 list_for_each_entry(mall_entry, &block->mall.list, list) {
167 if (mall_entry->priority < block->mall.min_prio)
168 block->mall.min_prio = mall_entry->priority;
169 if (mall_entry->priority > block->mall.max_prio)
170 block->mall.max_prio = mall_entry->priority;
171 }
172}
173
18346b70
JP
174int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
175 struct mlxsw_sp_flow_block *block,
3c650136 176 struct tc_cls_matchall_offload *f)
d7fcc986 177{
3c650136 178 struct mlxsw_sp_flow_block_binding *binding;
d7fcc986
JP
179 struct mlxsw_sp_mall_entry *mall_entry;
180 __be16 protocol = f->common.protocol;
181 struct flow_action_entry *act;
18346b70
JP
182 unsigned int flower_min_prio;
183 unsigned int flower_max_prio;
184 bool flower_prio_valid;
d7fcc986
JP
185 int err;
186
187 if (!flow_offload_has_one_action(&f->rule->action)) {
3c650136
JP
188 NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported");
189 return -EOPNOTSUPP;
190 }
191
192 if (f->common.chain_index) {
193 NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
194 return -EOPNOTSUPP;
195 }
196
197 if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
198 NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported");
d7fcc986
JP
199 return -EOPNOTSUPP;
200 }
201
18346b70
JP
202 err = mlxsw_sp_flower_prio_get(mlxsw_sp, block, f->common.chain_index,
203 &flower_min_prio, &flower_max_prio);
204 if (err) {
205 if (err != -ENOENT) {
206 NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
207 return err;
208 }
209 flower_prio_valid = false;
210 /* No flower filters are installed in specified chain. */
211 } else {
212 flower_prio_valid = true;
213 }
214
d7fcc986
JP
215 mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
216 if (!mall_entry)
217 return -ENOMEM;
218 mall_entry->cookie = f->cookie;
aed65285 219 mall_entry->priority = f->common.prio;
3c650136 220 mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
d7fcc986
JP
221
222 act = &f->rule->action.entries[0];
223
224 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
18346b70
JP
225 if (flower_prio_valid && mall_entry->ingress &&
226 mall_entry->priority >= flower_min_prio) {
227 NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
228 err = -EOPNOTSUPP;
229 goto errout;
230 }
231 if (flower_prio_valid && !mall_entry->ingress &&
232 mall_entry->priority <= flower_max_prio) {
233 NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
234 err = -EOPNOTSUPP;
235 goto errout;
236 }
d7fcc986 237 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
780ba878 238 mall_entry->mirror.to_dev = act->dev;
d7fcc986
JP
239 } else if (act->id == FLOW_ACTION_SAMPLE &&
240 protocol == htons(ETH_P_ALL)) {
18aa23b3
JP
241 if (!mall_entry->ingress) {
242 NL_SET_ERR_MSG(f->common.extack, "Sample is not supported on egress");
243 err = -EOPNOTSUPP;
18346b70
JP
244 goto errout;
245 }
246 if (flower_prio_valid &&
247 mall_entry->priority >= flower_min_prio) {
248 NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
249 err = -EOPNOTSUPP;
18aa23b3
JP
250 goto errout;
251 }
c7ea0e16 252 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
3c650136 253 NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported");
c7ea0e16
JP
254 err = -EOPNOTSUPP;
255 goto errout;
256 }
d7fcc986 257 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
c7ea0e16
JP
258 mall_entry->sample.psample_group = act->sample.psample_group;
259 mall_entry->sample.truncate = act->sample.truncate;
260 mall_entry->sample.trunc_size = act->sample.trunc_size;
261 mall_entry->sample.rate = act->sample.rate;
d7fcc986
JP
262 } else {
263 err = -EOPNOTSUPP;
dd0fbc89 264 goto errout;
d7fcc986
JP
265 }
266
3c650136
JP
267 list_for_each_entry(binding, &block->binding_list, list) {
268 err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
269 mall_entry);
270 if (err)
271 goto rollback;
272 }
d7fcc986 273
3c650136
JP
274 block->rule_count++;
275 if (mall_entry->ingress)
276 block->egress_blocker_rule_count++;
277 else
278 block->ingress_blocker_rule_count++;
5a2939b9 279 list_add_tail(&mall_entry->list, &block->mall.list);
aed65285 280 mlxsw_sp_mall_prio_update(block);
d7fcc986
JP
281 return 0;
282
3c650136
JP
283rollback:
284 list_for_each_entry_continue_reverse(binding, &block->binding_list,
285 list)
286 mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
c7ea0e16 287errout:
d7fcc986
JP
288 kfree(mall_entry);
289 return err;
290}
291
3c650136 292void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
d7fcc986
JP
293 struct tc_cls_matchall_offload *f)
294{
3c650136 295 struct mlxsw_sp_flow_block_binding *binding;
d7fcc986
JP
296 struct mlxsw_sp_mall_entry *mall_entry;
297
3c650136 298 mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie);
d7fcc986 299 if (!mall_entry) {
3c650136 300 NL_SET_ERR_MSG(f->common.extack, "Entry not found");
d7fcc986
JP
301 return;
302 }
d7fcc986 303
dd0fbc89 304 list_del(&mall_entry->list);
3c650136
JP
305 if (mall_entry->ingress)
306 block->egress_blocker_rule_count--;
307 else
308 block->ingress_blocker_rule_count--;
309 block->rule_count--;
310 list_for_each_entry(binding, &block->binding_list, list)
311 mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
481ff57a 312 kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */
aed65285 313 mlxsw_sp_mall_prio_update(block);
d7fcc986 314}
3c650136
JP
315
316int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
317 struct mlxsw_sp_port *mlxsw_sp_port)
318{
319 struct mlxsw_sp_mall_entry *mall_entry;
320 int err;
321
5a2939b9 322 list_for_each_entry(mall_entry, &block->mall.list, list) {
3c650136
JP
323 err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
324 if (err)
325 goto rollback;
326 }
327 return 0;
328
329rollback:
5a2939b9 330 list_for_each_entry_continue_reverse(mall_entry, &block->mall.list,
3c650136
JP
331 list)
332 mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
333 return err;
334}
335
336void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
337 struct mlxsw_sp_port *mlxsw_sp_port)
338{
339 struct mlxsw_sp_mall_entry *mall_entry;
340
5a2939b9 341 list_for_each_entry(mall_entry, &block->mall.list, list)
3c650136
JP
342 mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
343}
aed65285
JP
344
345int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
346 unsigned int *p_min_prio, unsigned int *p_max_prio)
347{
348 if (chain_index || list_empty(&block->mall.list))
349 /* In case there are no matchall rules, the caller
350 * receives -ENOENT to indicate there is no need
351 * to check the priorities.
352 */
353 return -ENOENT;
354 *p_min_prio = block->mall.min_prio;
355 *p_max_prio = block->mall.max_prio;
356 return 0;
357}