Commit | Line | Data |
---|---|---|
93c1edb2 JP |
1 | /* |
2 | * drivers/net/ethernet/mellanox/mlxsw/core.c | |
3 | * Copyright (c) 2015 Mellanox Technologies. All rights reserved. | |
4 | * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> | |
5 | * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> | |
6 | * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> | |
7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions are met: | |
10 | * | |
11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | |
16 | * 3. Neither the names of the copyright holders nor the names of its | |
17 | * contributors may be used to endorse or promote products derived from | |
18 | * this software without specific prior written permission. | |
19 | * | |
20 | * Alternatively, this software may be distributed under the terms of the | |
21 | * GNU General Public License ("GPL") version 2 as published by the Free | |
22 | * Software Foundation. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | |
28 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
32 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
33 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
34 | * POSSIBILITY OF SUCH DAMAGE. | |
35 | */ | |
36 | ||
37 | #include <linux/kernel.h> | |
38 | #include <linux/module.h> | |
39 | #include <linux/device.h> | |
40 | #include <linux/export.h> | |
41 | #include <linux/err.h> | |
42 | #include <linux/if_link.h> | |
43 | #include <linux/debugfs.h> | |
44 | #include <linux/seq_file.h> | |
45 | #include <linux/u64_stats_sync.h> | |
46 | #include <linux/netdevice.h> | |
4ec14b76 IS |
47 | #include <linux/wait.h> |
48 | #include <linux/skbuff.h> | |
49 | #include <linux/etherdevice.h> | |
50 | #include <linux/types.h> | |
4ec14b76 IS |
51 | #include <linux/string.h> |
52 | #include <linux/gfp.h> | |
53 | #include <linux/random.h> | |
54 | #include <linux/jiffies.h> | |
55 | #include <linux/mutex.h> | |
56 | #include <linux/rcupdate.h> | |
57 | #include <linux/slab.h> | |
58 | #include <asm/byteorder.h> | |
93c1edb2 JP |
59 | |
60 | #include "core.h" | |
61 | #include "item.h" | |
62 | #include "cmd.h" | |
63 | #include "port.h" | |
64 | #include "trap.h" | |
4ec14b76 IS |
65 | #include "emad.h" |
66 | #include "reg.h" | |
93c1edb2 JP |
67 | |
68 | static LIST_HEAD(mlxsw_core_driver_list); | |
69 | static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); | |
70 | ||
71 | static const char mlxsw_core_driver_name[] = "mlxsw_core"; | |
72 | ||
73 | static struct dentry *mlxsw_core_dbg_root; | |
74 | ||
75 | struct mlxsw_core_pcpu_stats { | |
76 | u64 trap_rx_packets[MLXSW_TRAP_ID_MAX]; | |
77 | u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX]; | |
78 | u64 port_rx_packets[MLXSW_PORT_MAX_PORTS]; | |
79 | u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS]; | |
80 | struct u64_stats_sync syncp; | |
81 | u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX]; | |
82 | u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS]; | |
83 | u32 trap_rx_invalid; | |
84 | u32 port_rx_invalid; | |
85 | }; | |
86 | ||
87 | struct mlxsw_core { | |
88 | struct mlxsw_driver *driver; | |
89 | const struct mlxsw_bus *bus; | |
90 | void *bus_priv; | |
91 | const struct mlxsw_bus_info *bus_info; | |
92 | struct list_head rx_listener_list; | |
4ec14b76 IS |
93 | struct list_head event_listener_list; |
94 | struct { | |
95 | struct sk_buff *resp_skb; | |
96 | u64 tid; | |
97 | wait_queue_head_t wait; | |
98 | bool trans_active; | |
99 | struct mutex lock; /* One EMAD transaction at a time. */ | |
100 | bool use_emad; | |
101 | } emad; | |
93c1edb2 JP |
102 | struct mlxsw_core_pcpu_stats __percpu *pcpu_stats; |
103 | struct dentry *dbg_dir; | |
104 | struct { | |
105 | struct debugfs_blob_wrapper vsd_blob; | |
106 | struct debugfs_blob_wrapper psid_blob; | |
107 | } dbg; | |
108 | unsigned long driver_priv[0]; | |
109 | /* driver_priv has to be always the last item */ | |
110 | }; | |
111 | ||
112 | struct mlxsw_rx_listener_item { | |
113 | struct list_head list; | |
114 | struct mlxsw_rx_listener rxl; | |
115 | void *priv; | |
116 | }; | |
117 | ||
4ec14b76 IS |
118 | struct mlxsw_event_listener_item { |
119 | struct list_head list; | |
120 | struct mlxsw_event_listener el; | |
121 | void *priv; | |
122 | }; | |
123 | ||
124 | /****************** | |
125 | * EMAD processing | |
126 | ******************/ | |
127 | ||
128 | /* emad_eth_hdr_dmac | |
129 | * Destination MAC in EMAD's Ethernet header. | |
130 | * Must be set to 01:02:c9:00:00:01 | |
131 | */ | |
132 | MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6); | |
133 | ||
134 | /* emad_eth_hdr_smac | |
135 | * Source MAC in EMAD's Ethernet header. | |
136 | * Must be set to 00:02:c9:01:02:03 | |
137 | */ | |
138 | MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6); | |
139 | ||
140 | /* emad_eth_hdr_ethertype | |
141 | * Ethertype in EMAD's Ethernet header. | |
142 | * Must be set to 0x8932 | |
143 | */ | |
144 | MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16); | |
145 | ||
146 | /* emad_eth_hdr_mlx_proto | |
147 | * Mellanox protocol. | |
148 | * Must be set to 0x0. | |
149 | */ | |
150 | MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8); | |
151 | ||
152 | /* emad_eth_hdr_ver | |
153 | * Mellanox protocol version. | |
154 | * Must be set to 0x0. | |
155 | */ | |
156 | MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4); | |
157 | ||
158 | /* emad_op_tlv_type | |
159 | * Type of the TLV. | |
160 | * Must be set to 0x1 (operation TLV). | |
161 | */ | |
162 | MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5); | |
163 | ||
164 | /* emad_op_tlv_len | |
165 | * Length of the operation TLV in u32. | |
166 | * Must be set to 0x4. | |
167 | */ | |
168 | MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11); | |
169 | ||
170 | /* emad_op_tlv_dr | |
171 | * Direct route bit. Setting to 1 indicates the EMAD is a direct route | |
172 | * EMAD. DR TLV must follow. | |
173 | * | |
174 | * Note: Currently not supported and must not be set. | |
175 | */ | |
176 | MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1); | |
177 | ||
178 | /* emad_op_tlv_status | |
179 | * Returned status in case of EMAD response. Must be set to 0 in case | |
180 | * of EMAD request. | |
181 | * 0x0 - success | |
182 | * 0x1 - device is busy. Requester should retry | |
183 | * 0x2 - Mellanox protocol version not supported | |
184 | * 0x3 - unknown TLV | |
185 | * 0x4 - register not supported | |
186 | * 0x5 - operation class not supported | |
187 | * 0x6 - EMAD method not supported | |
188 | * 0x7 - bad parameter (e.g. port out of range) | |
189 | * 0x8 - resource not available | |
190 | * 0x9 - message receipt acknowledgment. Requester should retry | |
191 | * 0x70 - internal error | |
192 | */ | |
193 | MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7); | |
194 | ||
195 | /* emad_op_tlv_register_id | |
196 | * Register ID of register within register TLV. | |
197 | */ | |
198 | MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16); | |
199 | ||
200 | /* emad_op_tlv_r | |
201 | * Response bit. Setting to 1 indicates Response, otherwise request. | |
202 | */ | |
203 | MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1); | |
204 | ||
205 | /* emad_op_tlv_method | |
206 | * EMAD method type. | |
207 | * 0x1 - query | |
208 | * 0x2 - write | |
209 | * 0x3 - send (currently not supported) | |
210 | * 0x4 - event | |
211 | */ | |
212 | MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7); | |
213 | ||
214 | /* emad_op_tlv_class | |
215 | * EMAD operation class. Must be set to 0x1 (REG_ACCESS). | |
216 | */ | |
217 | MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); | |
218 | ||
219 | /* emad_op_tlv_tid | |
220 | * EMAD transaction ID. Used for pairing request and response EMADs. | |
221 | */ | |
222 | MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); | |
223 | ||
224 | /* emad_reg_tlv_type | |
225 | * Type of the TLV. | |
226 | * Must be set to 0x3 (register TLV). | |
227 | */ | |
228 | MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5); | |
229 | ||
230 | /* emad_reg_tlv_len | |
231 | * Length of the operation TLV in u32. | |
232 | */ | |
233 | MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11); | |
234 | ||
235 | /* emad_end_tlv_type | |
236 | * Type of the TLV. | |
237 | * Must be set to 0x0 (end TLV). | |
238 | */ | |
239 | MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5); | |
240 | ||
241 | /* emad_end_tlv_len | |
242 | * Length of the end TLV in u32. | |
243 | * Must be set to 1. | |
244 | */ | |
245 | MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11); | |
246 | ||
247 | enum mlxsw_core_reg_access_type { | |
248 | MLXSW_CORE_REG_ACCESS_TYPE_QUERY, | |
249 | MLXSW_CORE_REG_ACCESS_TYPE_WRITE, | |
250 | }; | |
251 | ||
252 | static inline const char * | |
253 | mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type) | |
254 | { | |
255 | switch (type) { | |
256 | case MLXSW_CORE_REG_ACCESS_TYPE_QUERY: | |
257 | return "query"; | |
258 | case MLXSW_CORE_REG_ACCESS_TYPE_WRITE: | |
259 | return "write"; | |
260 | } | |
261 | BUG(); | |
262 | } | |
263 | ||
264 | static void mlxsw_emad_pack_end_tlv(char *end_tlv) | |
265 | { | |
266 | mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END); | |
267 | mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN); | |
268 | } | |
269 | ||
270 | static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, | |
271 | const struct mlxsw_reg_info *reg, | |
272 | char *payload) | |
273 | { | |
274 | mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG); | |
275 | mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1); | |
276 | memcpy(reg_tlv + sizeof(u32), payload, reg->len); | |
277 | } | |
278 | ||
279 | static void mlxsw_emad_pack_op_tlv(char *op_tlv, | |
280 | const struct mlxsw_reg_info *reg, | |
281 | enum mlxsw_core_reg_access_type type, | |
282 | struct mlxsw_core *mlxsw_core) | |
283 | { | |
284 | mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); | |
285 | mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); | |
286 | mlxsw_emad_op_tlv_dr_set(op_tlv, 0); | |
287 | mlxsw_emad_op_tlv_status_set(op_tlv, 0); | |
288 | mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); | |
289 | mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); | |
290 | if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type) | |
291 | mlxsw_emad_op_tlv_method_set(op_tlv, | |
292 | MLXSW_EMAD_OP_TLV_METHOD_QUERY); | |
293 | else | |
294 | mlxsw_emad_op_tlv_method_set(op_tlv, | |
295 | MLXSW_EMAD_OP_TLV_METHOD_WRITE); | |
296 | mlxsw_emad_op_tlv_class_set(op_tlv, | |
297 | MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); | |
298 | mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid); | |
299 | } | |
300 | ||
301 | static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) | |
302 | { | |
303 | char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN); | |
304 | ||
305 | mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC); | |
306 | mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC); | |
307 | mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE); | |
308 | mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO); | |
309 | mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION); | |
310 | ||
311 | skb_reset_mac_header(skb); | |
312 | ||
313 | return 0; | |
314 | } | |
315 | ||
316 | static void mlxsw_emad_construct(struct sk_buff *skb, | |
317 | const struct mlxsw_reg_info *reg, | |
318 | char *payload, | |
319 | enum mlxsw_core_reg_access_type type, | |
320 | struct mlxsw_core *mlxsw_core) | |
321 | { | |
322 | char *buf; | |
323 | ||
324 | buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32)); | |
325 | mlxsw_emad_pack_end_tlv(buf); | |
326 | ||
327 | buf = skb_push(skb, reg->len + sizeof(u32)); | |
328 | mlxsw_emad_pack_reg_tlv(buf, reg, payload); | |
329 | ||
330 | buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); | |
331 | mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core); | |
332 | ||
333 | mlxsw_emad_construct_eth_hdr(skb); | |
334 | } | |
335 | ||
336 | static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) | |
337 | { | |
338 | return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN)); | |
339 | } | |
340 | ||
341 | static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) | |
342 | { | |
343 | return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN + | |
344 | MLXSW_EMAD_OP_TLV_LEN * sizeof(u32))); | |
345 | } | |
346 | ||
347 | static char *mlxsw_emad_reg_payload(const char *op_tlv) | |
348 | { | |
349 | return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); | |
350 | } | |
351 | ||
352 | static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) | |
353 | { | |
354 | char *op_tlv; | |
355 | ||
356 | op_tlv = mlxsw_emad_op_tlv(skb); | |
357 | return mlxsw_emad_op_tlv_tid_get(op_tlv); | |
358 | } | |
359 | ||
360 | static bool mlxsw_emad_is_resp(const struct sk_buff *skb) | |
361 | { | |
362 | char *op_tlv; | |
363 | ||
364 | op_tlv = mlxsw_emad_op_tlv(skb); | |
365 | return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv)); | |
366 | } | |
367 | ||
368 | #define MLXSW_EMAD_TIMEOUT_MS 200 | |
369 | ||
370 | static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, | |
371 | struct sk_buff *skb, | |
372 | const struct mlxsw_tx_info *tx_info) | |
373 | { | |
374 | int err; | |
375 | int ret; | |
376 | ||
377 | err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info); | |
378 | if (err) { | |
262df691 JP |
379 | dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n", |
380 | mlxsw_core->emad.tid); | |
4ec14b76 IS |
381 | dev_kfree_skb(skb); |
382 | return err; | |
383 | } | |
384 | ||
385 | mlxsw_core->emad.trans_active = true; | |
386 | ret = wait_event_timeout(mlxsw_core->emad.wait, | |
387 | !(mlxsw_core->emad.trans_active), | |
388 | msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS)); | |
389 | if (!ret) { | |
390 | dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n", | |
391 | mlxsw_core->emad.tid); | |
392 | mlxsw_core->emad.trans_active = false; | |
393 | return -EIO; | |
394 | } | |
395 | ||
396 | return 0; | |
397 | } | |
398 | ||
399 | static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core, | |
400 | char *op_tlv) | |
401 | { | |
402 | enum mlxsw_emad_op_tlv_status status; | |
403 | u64 tid; | |
404 | ||
405 | status = mlxsw_emad_op_tlv_status_get(op_tlv); | |
406 | tid = mlxsw_emad_op_tlv_tid_get(op_tlv); | |
407 | ||
408 | switch (status) { | |
409 | case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: | |
410 | return 0; | |
411 | case MLXSW_EMAD_OP_TLV_STATUS_BUSY: | |
412 | case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: | |
413 | dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n", | |
414 | tid, status, mlxsw_emad_op_tlv_status_str(status)); | |
415 | return -EAGAIN; | |
416 | case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: | |
417 | case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: | |
418 | case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: | |
419 | case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: | |
420 | case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: | |
421 | case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: | |
422 | case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: | |
423 | case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: | |
424 | default: | |
425 | dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n", | |
426 | tid, status, mlxsw_emad_op_tlv_status_str(status)); | |
427 | return -EIO; | |
428 | } | |
429 | } | |
430 | ||
431 | static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core, | |
432 | struct sk_buff *skb) | |
433 | { | |
434 | return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb)); | |
435 | } | |
436 | ||
437 | static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, | |
438 | struct sk_buff *skb, | |
439 | const struct mlxsw_tx_info *tx_info) | |
440 | { | |
441 | struct sk_buff *trans_skb; | |
442 | int n_retry; | |
443 | int err; | |
444 | ||
445 | n_retry = 0; | |
446 | retry: | |
447 | /* We copy the EMAD to a new skb, since we might need | |
448 | * to retransmit it in case of failure. | |
449 | */ | |
450 | trans_skb = skb_copy(skb, GFP_KERNEL); | |
451 | if (!trans_skb) { | |
452 | err = -ENOMEM; | |
453 | goto out; | |
454 | } | |
455 | ||
456 | err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info); | |
457 | if (!err) { | |
458 | struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb; | |
459 | ||
460 | err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb); | |
461 | if (err) | |
462 | dev_kfree_skb(resp_skb); | |
463 | if (!err || err != -EAGAIN) | |
464 | goto out; | |
465 | } | |
466 | if (n_retry++ < MLXSW_EMAD_MAX_RETRY) | |
467 | goto retry; | |
468 | ||
469 | out: | |
470 | dev_kfree_skb(skb); | |
471 | mlxsw_core->emad.tid++; | |
472 | return err; | |
473 | } | |
474 | ||
475 | static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, | |
476 | void *priv) | |
477 | { | |
478 | struct mlxsw_core *mlxsw_core = priv; | |
479 | ||
480 | if (mlxsw_emad_is_resp(skb) && | |
481 | mlxsw_core->emad.trans_active && | |
482 | mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) { | |
483 | mlxsw_core->emad.resp_skb = skb; | |
484 | mlxsw_core->emad.trans_active = false; | |
485 | wake_up(&mlxsw_core->emad.wait); | |
486 | } else { | |
487 | dev_kfree_skb(skb); | |
488 | } | |
489 | } | |
490 | ||
491 | static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = { | |
492 | .func = mlxsw_emad_rx_listener_func, | |
493 | .local_port = MLXSW_PORT_DONT_CARE, | |
494 | .trap_id = MLXSW_TRAP_ID_ETHEMAD, | |
495 | }; | |
496 | ||
497 | static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core) | |
498 | { | |
499 | char htgt_pl[MLXSW_REG_HTGT_LEN]; | |
500 | char hpkt_pl[MLXSW_REG_HPKT_LEN]; | |
501 | int err; | |
502 | ||
503 | mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD); | |
504 | err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); | |
505 | if (err) | |
506 | return err; | |
507 | ||
508 | mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, | |
4ec14b76 IS |
509 | MLXSW_TRAP_ID_ETHEMAD); |
510 | return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); | |
511 | } | |
512 | ||
513 | static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) | |
514 | { | |
515 | int err; | |
516 | ||
517 | /* Set the upper 32 bits of the transaction ID field to a random | |
518 | * number. This allows us to discard EMADs addressed to other | |
519 | * devices. | |
520 | */ | |
521 | get_random_bytes(&mlxsw_core->emad.tid, 4); | |
522 | mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32; | |
523 | ||
524 | init_waitqueue_head(&mlxsw_core->emad.wait); | |
525 | mlxsw_core->emad.trans_active = false; | |
526 | mutex_init(&mlxsw_core->emad.lock); | |
527 | ||
528 | err = mlxsw_core_rx_listener_register(mlxsw_core, | |
529 | &mlxsw_emad_rx_listener, | |
530 | mlxsw_core); | |
531 | if (err) | |
532 | return err; | |
533 | ||
534 | err = mlxsw_emad_traps_set(mlxsw_core); | |
535 | if (err) | |
536 | goto err_emad_trap_set; | |
537 | ||
538 | mlxsw_core->emad.use_emad = true; | |
539 | ||
540 | return 0; | |
541 | ||
542 | err_emad_trap_set: | |
543 | mlxsw_core_rx_listener_unregister(mlxsw_core, | |
544 | &mlxsw_emad_rx_listener, | |
545 | mlxsw_core); | |
546 | return err; | |
547 | } | |
548 | ||
549 | static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) | |
550 | { | |
551 | char hpkt_pl[MLXSW_REG_HPKT_LEN]; | |
552 | ||
18ea5445 | 553 | mlxsw_core->emad.use_emad = false; |
4ec14b76 | 554 | mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, |
4ec14b76 IS |
555 | MLXSW_TRAP_ID_ETHEMAD); |
556 | mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); | |
557 | ||
558 | mlxsw_core_rx_listener_unregister(mlxsw_core, | |
559 | &mlxsw_emad_rx_listener, | |
560 | mlxsw_core); | |
561 | } | |
562 | ||
563 | static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, | |
564 | u16 reg_len) | |
565 | { | |
566 | struct sk_buff *skb; | |
567 | u16 emad_len; | |
568 | ||
569 | emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + | |
570 | (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * | |
571 | sizeof(u32) + mlxsw_core->driver->txhdr_len); | |
572 | if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) | |
573 | return NULL; | |
574 | ||
575 | skb = netdev_alloc_skb(NULL, emad_len); | |
576 | if (!skb) | |
577 | return NULL; | |
578 | memset(skb->data, 0, emad_len); | |
579 | skb_reserve(skb, emad_len); | |
580 | ||
581 | return skb; | |
582 | } | |
583 | ||
93c1edb2 JP |
584 | /***************** |
585 | * Core functions | |
586 | *****************/ | |
587 | ||
588 | static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data) | |
589 | { | |
590 | struct mlxsw_core *mlxsw_core = file->private; | |
591 | struct mlxsw_core_pcpu_stats *p; | |
592 | u64 rx_packets, rx_bytes; | |
593 | u64 tmp_rx_packets, tmp_rx_bytes; | |
594 | u32 rx_dropped, rx_invalid; | |
595 | unsigned int start; | |
596 | int i; | |
597 | int j; | |
598 | static const char hdr[] = | |
599 | " NUM RX_PACKETS RX_BYTES RX_DROPPED\n"; | |
600 | ||
601 | seq_printf(file, hdr); | |
602 | for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) { | |
603 | rx_packets = 0; | |
604 | rx_bytes = 0; | |
605 | rx_dropped = 0; | |
606 | for_each_possible_cpu(j) { | |
607 | p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); | |
608 | do { | |
609 | start = u64_stats_fetch_begin(&p->syncp); | |
610 | tmp_rx_packets = p->trap_rx_packets[i]; | |
611 | tmp_rx_bytes = p->trap_rx_bytes[i]; | |
612 | } while (u64_stats_fetch_retry(&p->syncp, start)); | |
613 | ||
614 | rx_packets += tmp_rx_packets; | |
615 | rx_bytes += tmp_rx_bytes; | |
616 | rx_dropped += p->trap_rx_dropped[i]; | |
617 | } | |
618 | seq_printf(file, "trap %3d %12llu %12llu %10u\n", | |
619 | i, rx_packets, rx_bytes, rx_dropped); | |
620 | } | |
621 | rx_invalid = 0; | |
622 | for_each_possible_cpu(j) { | |
623 | p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); | |
624 | rx_invalid += p->trap_rx_invalid; | |
625 | } | |
626 | seq_printf(file, "trap INV %10u\n", | |
627 | rx_invalid); | |
628 | ||
629 | for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) { | |
630 | rx_packets = 0; | |
631 | rx_bytes = 0; | |
632 | rx_dropped = 0; | |
633 | for_each_possible_cpu(j) { | |
634 | p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); | |
635 | do { | |
636 | start = u64_stats_fetch_begin(&p->syncp); | |
637 | tmp_rx_packets = p->port_rx_packets[i]; | |
638 | tmp_rx_bytes = p->port_rx_bytes[i]; | |
639 | } while (u64_stats_fetch_retry(&p->syncp, start)); | |
640 | ||
641 | rx_packets += tmp_rx_packets; | |
642 | rx_bytes += tmp_rx_bytes; | |
643 | rx_dropped += p->port_rx_dropped[i]; | |
644 | } | |
645 | seq_printf(file, "port %3d %12llu %12llu %10u\n", | |
646 | i, rx_packets, rx_bytes, rx_dropped); | |
647 | } | |
648 | rx_invalid = 0; | |
649 | for_each_possible_cpu(j) { | |
650 | p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); | |
651 | rx_invalid += p->port_rx_invalid; | |
652 | } | |
653 | seq_printf(file, "port INV %10u\n", | |
654 | rx_invalid); | |
655 | return 0; | |
656 | } | |
657 | ||
658 | static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f) | |
659 | { | |
660 | struct mlxsw_core *mlxsw_core = inode->i_private; | |
661 | ||
662 | return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core); | |
663 | } | |
664 | ||
665 | static const struct file_operations mlxsw_core_rx_stats_dbg_ops = { | |
666 | .owner = THIS_MODULE, | |
667 | .open = mlxsw_core_rx_stats_dbg_open, | |
668 | .release = single_release, | |
669 | .read = seq_read, | |
670 | .llseek = seq_lseek | |
671 | }; | |
672 | ||
673 | static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, | |
674 | const char *buf, size_t size) | |
675 | { | |
676 | __be32 *m = (__be32 *) buf; | |
677 | int i; | |
678 | int count = size / sizeof(__be32); | |
679 | ||
680 | for (i = count - 1; i >= 0; i--) | |
681 | if (m[i]) | |
682 | break; | |
683 | i++; | |
684 | count = i ? i : 1; | |
685 | for (i = 0; i < count; i += 4) | |
686 | dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", | |
687 | i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), | |
688 | be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); | |
689 | } | |
690 | ||
691 | int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) | |
692 | { | |
693 | spin_lock(&mlxsw_core_driver_list_lock); | |
694 | list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list); | |
695 | spin_unlock(&mlxsw_core_driver_list_lock); | |
696 | return 0; | |
697 | } | |
698 | EXPORT_SYMBOL(mlxsw_core_driver_register); | |
699 | ||
700 | void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver) | |
701 | { | |
702 | spin_lock(&mlxsw_core_driver_list_lock); | |
703 | list_del(&mlxsw_driver->list); | |
704 | spin_unlock(&mlxsw_core_driver_list_lock); | |
705 | } | |
706 | EXPORT_SYMBOL(mlxsw_core_driver_unregister); | |
707 | ||
708 | static struct mlxsw_driver *__driver_find(const char *kind) | |
709 | { | |
710 | struct mlxsw_driver *mlxsw_driver; | |
711 | ||
712 | list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) { | |
713 | if (strcmp(mlxsw_driver->kind, kind) == 0) | |
714 | return mlxsw_driver; | |
715 | } | |
716 | return NULL; | |
717 | } | |
718 | ||
719 | static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) | |
720 | { | |
721 | struct mlxsw_driver *mlxsw_driver; | |
722 | ||
723 | spin_lock(&mlxsw_core_driver_list_lock); | |
724 | mlxsw_driver = __driver_find(kind); | |
725 | if (!mlxsw_driver) { | |
726 | spin_unlock(&mlxsw_core_driver_list_lock); | |
727 | request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind); | |
728 | spin_lock(&mlxsw_core_driver_list_lock); | |
729 | mlxsw_driver = __driver_find(kind); | |
730 | } | |
731 | if (mlxsw_driver) { | |
732 | if (!try_module_get(mlxsw_driver->owner)) | |
733 | mlxsw_driver = NULL; | |
734 | } | |
735 | ||
736 | spin_unlock(&mlxsw_core_driver_list_lock); | |
737 | return mlxsw_driver; | |
738 | } | |
739 | ||
740 | static void mlxsw_core_driver_put(const char *kind) | |
741 | { | |
742 | struct mlxsw_driver *mlxsw_driver; | |
743 | ||
744 | spin_lock(&mlxsw_core_driver_list_lock); | |
745 | mlxsw_driver = __driver_find(kind); | |
746 | spin_unlock(&mlxsw_core_driver_list_lock); | |
747 | if (!mlxsw_driver) | |
748 | return; | |
749 | module_put(mlxsw_driver->owner); | |
750 | } | |
751 | ||
752 | static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core) | |
753 | { | |
754 | const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info; | |
755 | ||
756 | mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name, | |
757 | mlxsw_core_dbg_root); | |
758 | if (!mlxsw_core->dbg_dir) | |
759 | return -ENOMEM; | |
760 | debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir, | |
761 | mlxsw_core, &mlxsw_core_rx_stats_dbg_ops); | |
762 | mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd; | |
763 | mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd); | |
764 | debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir, | |
765 | &mlxsw_core->dbg.vsd_blob); | |
766 | mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid; | |
767 | mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid); | |
768 | debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir, | |
769 | &mlxsw_core->dbg.psid_blob); | |
770 | return 0; | |
771 | } | |
772 | ||
773 | static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core) | |
774 | { | |
775 | debugfs_remove_recursive(mlxsw_core->dbg_dir); | |
776 | } | |
777 | ||
778 | int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, | |
779 | const struct mlxsw_bus *mlxsw_bus, | |
780 | void *bus_priv) | |
781 | { | |
782 | const char *device_kind = mlxsw_bus_info->device_kind; | |
783 | struct mlxsw_core *mlxsw_core; | |
784 | struct mlxsw_driver *mlxsw_driver; | |
785 | size_t alloc_size; | |
786 | int err; | |
787 | ||
788 | mlxsw_driver = mlxsw_core_driver_get(device_kind); | |
789 | if (!mlxsw_driver) | |
790 | return -EINVAL; | |
791 | alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; | |
792 | mlxsw_core = kzalloc(alloc_size, GFP_KERNEL); | |
793 | if (!mlxsw_core) { | |
794 | err = -ENOMEM; | |
795 | goto err_core_alloc; | |
796 | } | |
797 | ||
798 | INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); | |
4ec14b76 | 799 | INIT_LIST_HEAD(&mlxsw_core->event_listener_list); |
93c1edb2 JP |
800 | mlxsw_core->driver = mlxsw_driver; |
801 | mlxsw_core->bus = mlxsw_bus; | |
802 | mlxsw_core->bus_priv = bus_priv; | |
803 | mlxsw_core->bus_info = mlxsw_bus_info; | |
804 | ||
805 | mlxsw_core->pcpu_stats = | |
806 | netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats); | |
807 | if (!mlxsw_core->pcpu_stats) { | |
808 | err = -ENOMEM; | |
809 | goto err_alloc_stats; | |
810 | } | |
4ec14b76 | 811 | |
93c1edb2 JP |
812 | err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile); |
813 | if (err) | |
814 | goto err_bus_init; | |
815 | ||
4ec14b76 IS |
816 | err = mlxsw_emad_init(mlxsw_core); |
817 | if (err) | |
818 | goto err_emad_init; | |
819 | ||
93c1edb2 JP |
820 | err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core, |
821 | mlxsw_bus_info); | |
822 | if (err) | |
823 | goto err_driver_init; | |
824 | ||
825 | err = mlxsw_core_debugfs_init(mlxsw_core); | |
826 | if (err) | |
827 | goto err_debugfs_init; | |
828 | ||
829 | return 0; | |
830 | ||
831 | err_debugfs_init: | |
832 | mlxsw_core->driver->fini(mlxsw_core->driver_priv); | |
833 | err_driver_init: | |
4ec14b76 IS |
834 | mlxsw_emad_fini(mlxsw_core); |
835 | err_emad_init: | |
93c1edb2 JP |
836 | mlxsw_bus->fini(bus_priv); |
837 | err_bus_init: | |
838 | free_percpu(mlxsw_core->pcpu_stats); | |
839 | err_alloc_stats: | |
840 | kfree(mlxsw_core); | |
841 | err_core_alloc: | |
842 | mlxsw_core_driver_put(device_kind); | |
843 | return err; | |
844 | } | |
845 | EXPORT_SYMBOL(mlxsw_core_bus_device_register); | |
846 | ||
847 | void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) | |
848 | { | |
849 | const char *device_kind = mlxsw_core->bus_info->device_kind; | |
850 | ||
851 | mlxsw_core_debugfs_fini(mlxsw_core); | |
852 | mlxsw_core->driver->fini(mlxsw_core->driver_priv); | |
4ec14b76 | 853 | mlxsw_emad_fini(mlxsw_core); |
93c1edb2 JP |
854 | mlxsw_core->bus->fini(mlxsw_core->bus_priv); |
855 | free_percpu(mlxsw_core->pcpu_stats); | |
856 | kfree(mlxsw_core); | |
857 | mlxsw_core_driver_put(device_kind); | |
858 | } | |
859 | EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); | |
860 | ||
861 | static struct mlxsw_core *__mlxsw_core_get(void *driver_priv) | |
862 | { | |
863 | return container_of(driver_priv, struct mlxsw_core, driver_priv); | |
864 | } | |
865 | ||
d003462a IS |
866 | bool mlxsw_core_skb_transmit_busy(void *driver_priv, |
867 | const struct mlxsw_tx_info *tx_info) | |
868 | { | |
869 | struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv); | |
870 | ||
871 | return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, | |
872 | tx_info); | |
873 | } | |
874 | EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); | |
875 | ||
93c1edb2 JP |
876 | int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb, |
877 | const struct mlxsw_tx_info *tx_info) | |
878 | { | |
879 | struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv); | |
880 | ||
881 | return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, | |
882 | tx_info); | |
883 | } | |
884 | EXPORT_SYMBOL(mlxsw_core_skb_transmit); | |
885 | ||
886 | static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, | |
887 | const struct mlxsw_rx_listener *rxl_b) | |
888 | { | |
889 | return (rxl_a->func == rxl_b->func && | |
890 | rxl_a->local_port == rxl_b->local_port && | |
891 | rxl_a->trap_id == rxl_b->trap_id); | |
892 | } | |
893 | ||
894 | static struct mlxsw_rx_listener_item * | |
895 | __find_rx_listener_item(struct mlxsw_core *mlxsw_core, | |
896 | const struct mlxsw_rx_listener *rxl, | |
897 | void *priv) | |
898 | { | |
899 | struct mlxsw_rx_listener_item *rxl_item; | |
900 | ||
901 | list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) { | |
902 | if (__is_rx_listener_equal(&rxl_item->rxl, rxl) && | |
903 | rxl_item->priv == priv) | |
904 | return rxl_item; | |
905 | } | |
906 | return NULL; | |
907 | } | |
908 | ||
909 | int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, | |
910 | const struct mlxsw_rx_listener *rxl, | |
911 | void *priv) | |
912 | { | |
913 | struct mlxsw_rx_listener_item *rxl_item; | |
914 | ||
915 | rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); | |
916 | if (rxl_item) | |
917 | return -EEXIST; | |
918 | rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL); | |
919 | if (!rxl_item) | |
920 | return -ENOMEM; | |
921 | rxl_item->rxl = *rxl; | |
922 | rxl_item->priv = priv; | |
923 | ||
924 | list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list); | |
925 | return 0; | |
926 | } | |
927 | EXPORT_SYMBOL(mlxsw_core_rx_listener_register); | |
928 | ||
929 | void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, | |
930 | const struct mlxsw_rx_listener *rxl, | |
931 | void *priv) | |
932 | { | |
933 | struct mlxsw_rx_listener_item *rxl_item; | |
934 | ||
935 | rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); | |
936 | if (!rxl_item) | |
937 | return; | |
938 | list_del_rcu(&rxl_item->list); | |
939 | synchronize_rcu(); | |
940 | kfree(rxl_item); | |
941 | } | |
942 | EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister); | |
943 | ||
4ec14b76 IS |
944 | static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port, |
945 | void *priv) | |
946 | { | |
947 | struct mlxsw_event_listener_item *event_listener_item = priv; | |
948 | struct mlxsw_reg_info reg; | |
949 | char *payload; | |
950 | char *op_tlv = mlxsw_emad_op_tlv(skb); | |
951 | char *reg_tlv = mlxsw_emad_reg_tlv(skb); | |
952 | ||
953 | reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv); | |
954 | reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32); | |
955 | payload = mlxsw_emad_reg_payload(op_tlv); | |
956 | event_listener_item->el.func(®, payload, event_listener_item->priv); | |
957 | dev_kfree_skb(skb); | |
958 | } | |
959 | ||
960 | static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a, | |
961 | const struct mlxsw_event_listener *el_b) | |
962 | { | |
963 | return (el_a->func == el_b->func && | |
964 | el_a->trap_id == el_b->trap_id); | |
965 | } | |
966 | ||
967 | static struct mlxsw_event_listener_item * | |
968 | __find_event_listener_item(struct mlxsw_core *mlxsw_core, | |
969 | const struct mlxsw_event_listener *el, | |
970 | void *priv) | |
971 | { | |
972 | struct mlxsw_event_listener_item *el_item; | |
973 | ||
974 | list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) { | |
975 | if (__is_event_listener_equal(&el_item->el, el) && | |
976 | el_item->priv == priv) | |
977 | return el_item; | |
978 | } | |
979 | return NULL; | |
980 | } | |
981 | ||
982 | int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, | |
983 | const struct mlxsw_event_listener *el, | |
984 | void *priv) | |
985 | { | |
986 | int err; | |
987 | struct mlxsw_event_listener_item *el_item; | |
988 | const struct mlxsw_rx_listener rxl = { | |
989 | .func = mlxsw_core_event_listener_func, | |
990 | .local_port = MLXSW_PORT_DONT_CARE, | |
991 | .trap_id = el->trap_id, | |
992 | }; | |
993 | ||
994 | el_item = __find_event_listener_item(mlxsw_core, el, priv); | |
995 | if (el_item) | |
996 | return -EEXIST; | |
997 | el_item = kmalloc(sizeof(*el_item), GFP_KERNEL); | |
998 | if (!el_item) | |
999 | return -ENOMEM; | |
1000 | el_item->el = *el; | |
1001 | el_item->priv = priv; | |
1002 | ||
1003 | err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item); | |
1004 | if (err) | |
1005 | goto err_rx_listener_register; | |
1006 | ||
1007 | /* No reason to save item if we did not manage to register an RX | |
1008 | * listener for it. | |
1009 | */ | |
1010 | list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list); | |
1011 | ||
1012 | return 0; | |
1013 | ||
1014 | err_rx_listener_register: | |
1015 | kfree(el_item); | |
1016 | return err; | |
1017 | } | |
1018 | EXPORT_SYMBOL(mlxsw_core_event_listener_register); | |
1019 | ||
1020 | void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, | |
1021 | const struct mlxsw_event_listener *el, | |
1022 | void *priv) | |
1023 | { | |
1024 | struct mlxsw_event_listener_item *el_item; | |
1025 | const struct mlxsw_rx_listener rxl = { | |
1026 | .func = mlxsw_core_event_listener_func, | |
1027 | .local_port = MLXSW_PORT_DONT_CARE, | |
1028 | .trap_id = el->trap_id, | |
1029 | }; | |
1030 | ||
1031 | el_item = __find_event_listener_item(mlxsw_core, el, priv); | |
1032 | if (!el_item) | |
1033 | return; | |
1034 | mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item); | |
1035 | list_del(&el_item->list); | |
1036 | kfree(el_item); | |
1037 | } | |
1038 | EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); | |
1039 | ||
1040 | static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, | |
1041 | const struct mlxsw_reg_info *reg, | |
1042 | char *payload, | |
1043 | enum mlxsw_core_reg_access_type type) | |
1044 | { | |
1045 | int err; | |
1046 | char *op_tlv; | |
1047 | struct sk_buff *skb; | |
1048 | struct mlxsw_tx_info tx_info = { | |
1049 | .local_port = MLXSW_PORT_CPU_PORT, | |
1050 | .is_emad = true, | |
1051 | }; | |
1052 | ||
1053 | skb = mlxsw_emad_alloc(mlxsw_core, reg->len); | |
1054 | if (!skb) | |
1055 | return -ENOMEM; | |
1056 | ||
1057 | mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core); | |
1058 | mlxsw_core->driver->txhdr_construct(skb, &tx_info); | |
1059 | ||
1060 | dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n", | |
1061 | mlxsw_core->emad.tid); | |
1062 | mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len); | |
1063 | ||
1064 | err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info); | |
1065 | if (!err) { | |
1066 | op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb); | |
1067 | memcpy(payload, mlxsw_emad_reg_payload(op_tlv), | |
1068 | reg->len); | |
1069 | ||
1070 | dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n", | |
1071 | mlxsw_core->emad.tid - 1); | |
1072 | mlxsw_core_buf_dump_dbg(mlxsw_core, | |
1073 | mlxsw_core->emad.resp_skb->data, | |
3bfcd347 | 1074 | mlxsw_core->emad.resp_skb->len); |
4ec14b76 IS |
1075 | |
1076 | dev_kfree_skb(mlxsw_core->emad.resp_skb); | |
1077 | } | |
1078 | ||
1079 | return err; | |
1080 | } | |
1081 | ||
1082 | static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, | |
1083 | const struct mlxsw_reg_info *reg, | |
1084 | char *payload, | |
1085 | enum mlxsw_core_reg_access_type type) | |
1086 | { | |
1087 | int err, n_retry; | |
1088 | char *in_mbox, *out_mbox, *tmp; | |
1089 | ||
1090 | in_mbox = mlxsw_cmd_mbox_alloc(); | |
1091 | if (!in_mbox) | |
1092 | return -ENOMEM; | |
1093 | ||
1094 | out_mbox = mlxsw_cmd_mbox_alloc(); | |
1095 | if (!out_mbox) { | |
1096 | err = -ENOMEM; | |
1097 | goto free_in_mbox; | |
1098 | } | |
1099 | ||
1100 | mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core); | |
1101 | tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); | |
1102 | mlxsw_emad_pack_reg_tlv(tmp, reg, payload); | |
1103 | ||
1104 | n_retry = 0; | |
1105 | retry: | |
1106 | err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox); | |
1107 | if (!err) { | |
1108 | err = mlxsw_emad_process_status(mlxsw_core, out_mbox); | |
1109 | if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) | |
1110 | goto retry; | |
1111 | } | |
1112 | ||
1113 | if (!err) | |
1114 | memcpy(payload, mlxsw_emad_reg_payload(out_mbox), | |
1115 | reg->len); | |
1116 | ||
1117 | mlxsw_core->emad.tid++; | |
1118 | mlxsw_cmd_mbox_free(out_mbox); | |
1119 | free_in_mbox: | |
1120 | mlxsw_cmd_mbox_free(in_mbox); | |
1121 | return err; | |
1122 | } | |
1123 | ||
1124 | static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, | |
1125 | const struct mlxsw_reg_info *reg, | |
1126 | char *payload, | |
1127 | enum mlxsw_core_reg_access_type type) | |
1128 | { | |
1129 | u64 cur_tid; | |
1130 | int err; | |
1131 | ||
1132 | if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) { | |
1133 | dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n", | |
1134 | reg->id, mlxsw_reg_id_str(reg->id), | |
1135 | mlxsw_core_reg_access_type_str(type)); | |
1136 | return -EINTR; | |
1137 | } | |
1138 | ||
1139 | cur_tid = mlxsw_core->emad.tid; | |
1140 | dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", | |
1141 | cur_tid, reg->id, mlxsw_reg_id_str(reg->id), | |
1142 | mlxsw_core_reg_access_type_str(type)); | |
1143 | ||
1144 | /* During initialization EMAD interface is not available to us, | |
1145 | * so we default to command interface. We switch to EMAD interface | |
1146 | * after setting the appropriate traps. | |
1147 | */ | |
1148 | if (!mlxsw_core->emad.use_emad) | |
1149 | err = mlxsw_core_reg_access_cmd(mlxsw_core, reg, | |
1150 | payload, type); | |
1151 | else | |
1152 | err = mlxsw_core_reg_access_emad(mlxsw_core, reg, | |
1153 | payload, type); | |
1154 | ||
1155 | if (err) | |
1156 | dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n", | |
1157 | cur_tid, reg->id, mlxsw_reg_id_str(reg->id), | |
1158 | mlxsw_core_reg_access_type_str(type)); | |
1159 | ||
1160 | mutex_unlock(&mlxsw_core->emad.lock); | |
1161 | return err; | |
1162 | } | |
1163 | ||
1164 | int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, | |
1165 | const struct mlxsw_reg_info *reg, char *payload) | |
1166 | { | |
1167 | return mlxsw_core_reg_access(mlxsw_core, reg, payload, | |
1168 | MLXSW_CORE_REG_ACCESS_TYPE_QUERY); | |
1169 | } | |
1170 | EXPORT_SYMBOL(mlxsw_reg_query); | |
1171 | ||
1172 | int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, | |
1173 | const struct mlxsw_reg_info *reg, char *payload) | |
1174 | { | |
1175 | return mlxsw_core_reg_access(mlxsw_core, reg, payload, | |
1176 | MLXSW_CORE_REG_ACCESS_TYPE_WRITE); | |
1177 | } | |
1178 | EXPORT_SYMBOL(mlxsw_reg_write); | |
1179 | ||
93c1edb2 JP |
1180 | void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, |
1181 | struct mlxsw_rx_info *rx_info) | |
1182 | { | |
1183 | struct mlxsw_rx_listener_item *rxl_item; | |
1184 | const struct mlxsw_rx_listener *rxl; | |
1185 | struct mlxsw_core_pcpu_stats *pcpu_stats; | |
1186 | u8 local_port = rx_info->sys_port; | |
1187 | bool found = false; | |
1188 | ||
1189 | dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n", | |
1190 | __func__, rx_info->sys_port, rx_info->trap_id); | |
1191 | ||
1192 | if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || | |
1193 | (local_port >= MLXSW_PORT_MAX_PORTS)) | |
1194 | goto drop; | |
1195 | ||
1196 | rcu_read_lock(); | |
1197 | list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) { | |
1198 | rxl = &rxl_item->rxl; | |
1199 | if ((rxl->local_port == MLXSW_PORT_DONT_CARE || | |
1200 | rxl->local_port == local_port) && | |
1201 | rxl->trap_id == rx_info->trap_id) { | |
1202 | found = true; | |
1203 | break; | |
1204 | } | |
1205 | } | |
1206 | rcu_read_unlock(); | |
1207 | if (!found) | |
1208 | goto drop; | |
1209 | ||
1210 | pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats); | |
1211 | u64_stats_update_begin(&pcpu_stats->syncp); | |
1212 | pcpu_stats->port_rx_packets[local_port]++; | |
1213 | pcpu_stats->port_rx_bytes[local_port] += skb->len; | |
1214 | pcpu_stats->trap_rx_packets[rx_info->trap_id]++; | |
1215 | pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len; | |
1216 | u64_stats_update_end(&pcpu_stats->syncp); | |
1217 | ||
1218 | rxl->func(skb, local_port, rxl_item->priv); | |
1219 | return; | |
1220 | ||
1221 | drop: | |
1222 | if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX) | |
1223 | this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid); | |
1224 | else | |
1225 | this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]); | |
1226 | if (local_port >= MLXSW_PORT_MAX_PORTS) | |
1227 | this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid); | |
1228 | else | |
1229 | this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]); | |
1230 | dev_kfree_skb(skb); | |
1231 | } | |
1232 | EXPORT_SYMBOL(mlxsw_core_skb_receive); | |
1233 | ||
1234 | int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, | |
1235 | u32 in_mod, bool out_mbox_direct, | |
1236 | char *in_mbox, size_t in_mbox_size, | |
1237 | char *out_mbox, size_t out_mbox_size) | |
1238 | { | |
1239 | u8 status; | |
1240 | int err; | |
1241 | ||
1242 | BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); | |
1243 | if (!mlxsw_core->bus->cmd_exec) | |
1244 | return -EOPNOTSUPP; | |
1245 | ||
1246 | dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", | |
1247 | opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod); | |
1248 | if (in_mbox) { | |
1249 | dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n"); | |
1250 | mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size); | |
1251 | } | |
1252 | ||
1253 | err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode, | |
1254 | opcode_mod, in_mod, out_mbox_direct, | |
1255 | in_mbox, in_mbox_size, | |
1256 | out_mbox, out_mbox_size, &status); | |
1257 | ||
1258 | if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { | |
1259 | dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", | |
1260 | opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, | |
1261 | in_mod, status, mlxsw_cmd_status_str(status)); | |
1262 | } else if (err == -ETIMEDOUT) { | |
1263 | dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", | |
1264 | opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, | |
1265 | in_mod); | |
1266 | } | |
1267 | ||
1268 | if (!err && out_mbox) { | |
1269 | dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); | |
1270 | mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); | |
1271 | } | |
1272 | return err; | |
1273 | } | |
1274 | EXPORT_SYMBOL(mlxsw_cmd_exec); | |
1275 | ||
1276 | static int __init mlxsw_core_module_init(void) | |
1277 | { | |
1278 | mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); | |
1279 | if (!mlxsw_core_dbg_root) | |
1280 | return -ENOMEM; | |
1281 | return 0; | |
1282 | } | |
1283 | ||
1284 | static void __exit mlxsw_core_module_exit(void) | |
1285 | { | |
1286 | debugfs_remove_recursive(mlxsw_core_dbg_root); | |
1287 | } | |
1288 | ||
1289 | module_init(mlxsw_core_module_init); | |
1290 | module_exit(mlxsw_core_module_exit); | |
1291 | ||
1292 | MODULE_LICENSE("Dual BSD/GPL"); | |
1293 | MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); | |
1294 | MODULE_DESCRIPTION("Mellanox switch device core driver"); |