Commit | Line | Data |
---|---|---|
e28c00ad | 1 | /* |
759d5768 | 2 | * Copyright (c) 2006 QLogic, Inc. All rights reserved. |
e28c00ad BS |
3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | #include <rdma/ib_smi.h> | |
35 | ||
36 | #include "ipath_kernel.h" | |
37 | #include "ipath_verbs.h" | |
38 | #include "ips_common.h" | |
39 | ||
40 | #define IB_SMP_UNSUP_VERSION __constant_htons(0x0004) | |
41 | #define IB_SMP_UNSUP_METHOD __constant_htons(0x0008) | |
42 | #define IB_SMP_UNSUP_METH_ATTR __constant_htons(0x000C) | |
43 | #define IB_SMP_INVALID_FIELD __constant_htons(0x001C) | |
44 | ||
45 | static int reply(struct ib_smp *smp) | |
46 | { | |
47 | /* | |
48 | * The verbs framework will handle the directed/LID route | |
49 | * packet changes. | |
50 | */ | |
51 | smp->method = IB_MGMT_METHOD_GET_RESP; | |
52 | if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | |
53 | smp->status |= IB_SMP_DIRECTION; | |
54 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; | |
55 | } | |
56 | ||
57 | static int recv_subn_get_nodedescription(struct ib_smp *smp, | |
58 | struct ib_device *ibdev) | |
59 | { | |
60 | if (smp->attr_mod) | |
61 | smp->status |= IB_SMP_INVALID_FIELD; | |
62 | ||
63 | strncpy(smp->data, ibdev->node_desc, sizeof(smp->data)); | |
64 | ||
65 | return reply(smp); | |
66 | } | |
67 | ||
68 | struct nodeinfo { | |
69 | u8 base_version; | |
70 | u8 class_version; | |
71 | u8 node_type; | |
72 | u8 num_ports; | |
73 | __be64 sys_guid; | |
74 | __be64 node_guid; | |
75 | __be64 port_guid; | |
76 | __be16 partition_cap; | |
77 | __be16 device_id; | |
78 | __be32 revision; | |
79 | u8 local_port_num; | |
80 | u8 vendor_id[3]; | |
81 | } __attribute__ ((packed)); | |
82 | ||
83 | static int recv_subn_get_nodeinfo(struct ib_smp *smp, | |
84 | struct ib_device *ibdev, u8 port) | |
85 | { | |
86 | struct nodeinfo *nip = (struct nodeinfo *)&smp->data; | |
87 | struct ipath_devdata *dd = to_idev(ibdev)->dd; | |
e8a88f09 | 88 | u32 vendor, majrev, minrev; |
e28c00ad BS |
89 | |
90 | if (smp->attr_mod) | |
91 | smp->status |= IB_SMP_INVALID_FIELD; | |
92 | ||
93 | nip->base_version = 1; | |
94 | nip->class_version = 1; | |
95 | nip->node_type = 1; /* channel adapter */ | |
96 | /* | |
97 | * XXX The num_ports value will need a layer function to get | |
98 | * the value if we ever have more than one IB port on a chip. | |
99 | * We will also need to get the GUID for the port. | |
100 | */ | |
101 | nip->num_ports = ibdev->phys_port_cnt; | |
102 | /* This is already in network order */ | |
103 | nip->sys_guid = to_idev(ibdev)->sys_image_guid; | |
104 | nip->node_guid = ipath_layer_get_guid(dd); | |
105 | nip->port_guid = nip->sys_guid; | |
106 | nip->partition_cap = cpu_to_be16(ipath_layer_get_npkeys(dd)); | |
107 | nip->device_id = cpu_to_be16(ipath_layer_get_deviceid(dd)); | |
e8a88f09 BS |
108 | majrev = ipath_layer_get_majrev(dd); |
109 | minrev = ipath_layer_get_minrev(dd); | |
e28c00ad BS |
110 | nip->revision = cpu_to_be32((majrev << 16) | minrev); |
111 | nip->local_port_num = port; | |
e8a88f09 | 112 | vendor = ipath_layer_get_vendorid(dd); |
e28c00ad BS |
113 | nip->vendor_id[0] = 0; |
114 | nip->vendor_id[1] = vendor >> 8; | |
115 | nip->vendor_id[2] = vendor; | |
116 | ||
117 | return reply(smp); | |
118 | } | |
119 | ||
120 | static int recv_subn_get_guidinfo(struct ib_smp *smp, | |
121 | struct ib_device *ibdev) | |
122 | { | |
123 | u32 startgx = 8 * be32_to_cpu(smp->attr_mod); | |
124 | __be64 *p = (__be64 *) smp->data; | |
125 | ||
126 | /* 32 blocks of 8 64-bit GUIDs per block */ | |
127 | ||
128 | memset(smp->data, 0, sizeof(smp->data)); | |
129 | ||
130 | /* | |
131 | * We only support one GUID for now. If this changes, the | |
132 | * portinfo.guid_cap field needs to be updated too. | |
133 | */ | |
134 | if (startgx == 0) | |
135 | /* The first is a copy of the read-only HW GUID. */ | |
136 | *p = ipath_layer_get_guid(to_idev(ibdev)->dd); | |
137 | else | |
138 | smp->status |= IB_SMP_INVALID_FIELD; | |
139 | ||
140 | return reply(smp); | |
141 | } | |
142 | ||
e28c00ad BS |
143 | static int recv_subn_get_portinfo(struct ib_smp *smp, |
144 | struct ib_device *ibdev, u8 port) | |
145 | { | |
146 | struct ipath_ibdev *dev; | |
da2ab62a | 147 | struct ib_port_info *pip = (struct ib_port_info *)smp->data; |
e28c00ad BS |
148 | u16 lid; |
149 | u8 ibcstat; | |
150 | u8 mtu; | |
151 | int ret; | |
152 | ||
153 | if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) { | |
154 | smp->status |= IB_SMP_INVALID_FIELD; | |
155 | ret = reply(smp); | |
156 | goto bail; | |
157 | } | |
158 | ||
159 | dev = to_idev(ibdev); | |
160 | ||
161 | /* Clear all fields. Only set the non-zero fields. */ | |
162 | memset(smp->data, 0, sizeof(smp->data)); | |
163 | ||
164 | /* Only return the mkey if the protection field allows it. */ | |
165 | if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey || | |
166 | (dev->mkeyprot_resv_lmc >> 6) == 0) | |
167 | pip->mkey = dev->mkey; | |
168 | pip->gid_prefix = dev->gid_prefix; | |
169 | lid = ipath_layer_get_lid(dev->dd); | |
170 | pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; | |
171 | pip->sm_lid = cpu_to_be16(dev->sm_lid); | |
172 | pip->cap_mask = cpu_to_be32(dev->port_cap_flags); | |
173 | /* pip->diag_code; */ | |
174 | pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period); | |
175 | pip->local_port_num = port; | |
176 | pip->link_width_enabled = dev->link_width_enabled; | |
177 | pip->link_width_supported = 3; /* 1x or 4x */ | |
178 | pip->link_width_active = 2; /* 4x */ | |
179 | pip->linkspeed_portstate = 0x10; /* 2.5Gbps */ | |
180 | ibcstat = ipath_layer_get_lastibcstat(dev->dd); | |
181 | pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1; | |
182 | pip->portphysstate_linkdown = | |
183 | (ipath_cvt_physportstate[ibcstat & 0xf] << 4) | | |
184 | (ipath_layer_get_linkdowndefaultstate(dev->dd) ? 1 : 2); | |
185 | pip->mkeyprot_resv_lmc = dev->mkeyprot_resv_lmc; | |
186 | pip->linkspeedactive_enabled = 0x11; /* 2.5Gbps, 2.5Gbps */ | |
187 | switch (ipath_layer_get_ibmtu(dev->dd)) { | |
188 | case 4096: | |
189 | mtu = IB_MTU_4096; | |
190 | break; | |
191 | case 2048: | |
192 | mtu = IB_MTU_2048; | |
193 | break; | |
194 | case 1024: | |
195 | mtu = IB_MTU_1024; | |
196 | break; | |
197 | case 512: | |
198 | mtu = IB_MTU_512; | |
199 | break; | |
200 | case 256: | |
201 | mtu = IB_MTU_256; | |
202 | break; | |
203 | default: /* oops, something is wrong */ | |
204 | mtu = IB_MTU_2048; | |
205 | break; | |
206 | } | |
207 | pip->neighbormtu_mastersmsl = (mtu << 4) | dev->sm_sl; | |
208 | pip->vlcap_inittype = 0x10; /* VLCap = VL0, InitType = 0 */ | |
209 | pip->vl_high_limit = dev->vl_high_limit; | |
210 | /* pip->vl_arb_high_cap; // only one VL */ | |
211 | /* pip->vl_arb_low_cap; // only one VL */ | |
212 | /* InitTypeReply = 0 */ | |
213 | pip->inittypereply_mtucap = IB_MTU_4096; | |
214 | // HCAs ignore VLStallCount and HOQLife | |
215 | /* pip->vlstallcnt_hoqlife; */ | |
216 | pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */ | |
217 | pip->mkey_violations = cpu_to_be16(dev->mkey_violations); | |
218 | /* P_KeyViolations are counted by hardware. */ | |
219 | pip->pkey_violations = | |
220 | cpu_to_be16((ipath_layer_get_cr_errpkey(dev->dd) - | |
443a64ab | 221 | dev->z_pkey_violations) & 0xFFFF); |
e28c00ad BS |
222 | pip->qkey_violations = cpu_to_be16(dev->qkey_violations); |
223 | /* Only the hardware GUID is supported for now */ | |
224 | pip->guid_cap = 1; | |
225 | pip->clientrereg_resv_subnetto = dev->subnet_timeout; | |
226 | /* 32.768 usec. response time (guessing) */ | |
227 | pip->resv_resptimevalue = 3; | |
228 | pip->localphyerrors_overrunerrors = | |
229 | (ipath_layer_get_phyerrthreshold(dev->dd) << 4) | | |
230 | ipath_layer_get_overrunthreshold(dev->dd); | |
231 | /* pip->max_credit_hint; */ | |
232 | /* pip->link_roundtrip_latency[3]; */ | |
233 | ||
234 | ret = reply(smp); | |
235 | ||
236 | bail: | |
237 | return ret; | |
238 | } | |
239 | ||
240 | static int recv_subn_get_pkeytable(struct ib_smp *smp, | |
241 | struct ib_device *ibdev) | |
242 | { | |
243 | u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); | |
244 | u16 *p = (u16 *) smp->data; | |
245 | __be16 *q = (__be16 *) smp->data; | |
246 | ||
247 | /* 64 blocks of 32 16-bit P_Key entries */ | |
248 | ||
249 | memset(smp->data, 0, sizeof(smp->data)); | |
250 | if (startpx == 0) { | |
251 | struct ipath_ibdev *dev = to_idev(ibdev); | |
252 | unsigned i, n = ipath_layer_get_npkeys(dev->dd); | |
253 | ||
254 | ipath_layer_get_pkeys(dev->dd, p); | |
255 | ||
256 | for (i = 0; i < n; i++) | |
257 | q[i] = cpu_to_be16(p[i]); | |
258 | } else | |
259 | smp->status |= IB_SMP_INVALID_FIELD; | |
260 | ||
261 | return reply(smp); | |
262 | } | |
263 | ||
264 | static int recv_subn_set_guidinfo(struct ib_smp *smp, | |
265 | struct ib_device *ibdev) | |
266 | { | |
267 | /* The only GUID we support is the first read-only entry. */ | |
268 | return recv_subn_get_guidinfo(smp, ibdev); | |
269 | } | |
270 | ||
271 | /** | |
272 | * recv_subn_set_portinfo - set port information | |
273 | * @smp: the incoming SM packet | |
274 | * @ibdev: the infiniband device | |
275 | * @port: the port on the device | |
276 | * | |
277 | * Set Portinfo (see ch. 14.2.5.6). | |
278 | */ | |
279 | static int recv_subn_set_portinfo(struct ib_smp *smp, | |
280 | struct ib_device *ibdev, u8 port) | |
281 | { | |
da2ab62a | 282 | struct ib_port_info *pip = (struct ib_port_info *)smp->data; |
e28c00ad BS |
283 | struct ib_event event; |
284 | struct ipath_ibdev *dev; | |
285 | u32 flags; | |
286 | char clientrereg = 0; | |
287 | u16 lid, smlid; | |
288 | u8 lwe; | |
289 | u8 lse; | |
290 | u8 state; | |
291 | u16 lstate; | |
292 | u32 mtu; | |
293 | int ret; | |
294 | ||
295 | if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) | |
296 | goto err; | |
297 | ||
298 | dev = to_idev(ibdev); | |
299 | event.device = ibdev; | |
300 | event.element.port_num = port; | |
301 | ||
302 | dev->mkey = pip->mkey; | |
303 | dev->gid_prefix = pip->gid_prefix; | |
304 | dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); | |
305 | ||
306 | lid = be16_to_cpu(pip->lid); | |
307 | if (lid != ipath_layer_get_lid(dev->dd)) { | |
308 | /* Must be a valid unicast LID address. */ | |
309 | if (lid == 0 || lid >= IPS_MULTICAST_LID_BASE) | |
310 | goto err; | |
311 | ipath_set_sps_lid(dev->dd, lid, pip->mkeyprot_resv_lmc & 7); | |
312 | event.event = IB_EVENT_LID_CHANGE; | |
313 | ib_dispatch_event(&event); | |
314 | } | |
315 | ||
316 | smlid = be16_to_cpu(pip->sm_lid); | |
317 | if (smlid != dev->sm_lid) { | |
318 | /* Must be a valid unicast LID address. */ | |
319 | if (smlid == 0 || smlid >= IPS_MULTICAST_LID_BASE) | |
320 | goto err; | |
321 | dev->sm_lid = smlid; | |
322 | event.event = IB_EVENT_SM_CHANGE; | |
323 | ib_dispatch_event(&event); | |
324 | } | |
325 | ||
326 | /* Only 4x supported but allow 1x or 4x to be set (see 14.2.6.6). */ | |
327 | lwe = pip->link_width_enabled; | |
328 | if ((lwe >= 4 && lwe <= 8) || (lwe >= 0xC && lwe <= 0xFE)) | |
329 | goto err; | |
330 | if (lwe == 0xFF) | |
331 | dev->link_width_enabled = 3; /* 1x or 4x */ | |
332 | else if (lwe) | |
333 | dev->link_width_enabled = lwe; | |
334 | ||
335 | /* Only 2.5 Gbs supported. */ | |
336 | lse = pip->linkspeedactive_enabled & 0xF; | |
337 | if (lse >= 2 && lse <= 0xE) | |
338 | goto err; | |
339 | ||
340 | /* Set link down default state. */ | |
341 | switch (pip->portphysstate_linkdown & 0xF) { | |
342 | case 0: /* NOP */ | |
343 | break; | |
344 | case 1: /* SLEEP */ | |
345 | if (ipath_layer_set_linkdowndefaultstate(dev->dd, 1)) | |
346 | goto err; | |
347 | break; | |
348 | case 2: /* POLL */ | |
349 | if (ipath_layer_set_linkdowndefaultstate(dev->dd, 0)) | |
350 | goto err; | |
351 | break; | |
352 | default: | |
353 | goto err; | |
354 | } | |
355 | ||
356 | dev->mkeyprot_resv_lmc = pip->mkeyprot_resv_lmc; | |
357 | dev->vl_high_limit = pip->vl_high_limit; | |
358 | ||
359 | switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) { | |
360 | case IB_MTU_256: | |
361 | mtu = 256; | |
362 | break; | |
363 | case IB_MTU_512: | |
364 | mtu = 512; | |
365 | break; | |
366 | case IB_MTU_1024: | |
367 | mtu = 1024; | |
368 | break; | |
369 | case IB_MTU_2048: | |
370 | mtu = 2048; | |
371 | break; | |
372 | case IB_MTU_4096: | |
373 | mtu = 4096; | |
374 | break; | |
375 | default: | |
376 | /* XXX We have already partially updated our state! */ | |
377 | goto err; | |
378 | } | |
379 | ipath_layer_set_mtu(dev->dd, mtu); | |
380 | ||
381 | dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF; | |
382 | ||
383 | /* We only support VL0 */ | |
384 | if (((pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF) > 1) | |
385 | goto err; | |
386 | ||
387 | if (pip->mkey_violations == 0) | |
388 | dev->mkey_violations = 0; | |
389 | ||
390 | /* | |
391 | * Hardware counter can't be reset so snapshot and subtract | |
392 | * later. | |
393 | */ | |
394 | if (pip->pkey_violations == 0) | |
443a64ab | 395 | dev->z_pkey_violations = |
e28c00ad BS |
396 | ipath_layer_get_cr_errpkey(dev->dd); |
397 | ||
398 | if (pip->qkey_violations == 0) | |
399 | dev->qkey_violations = 0; | |
400 | ||
401 | if (ipath_layer_set_phyerrthreshold( | |
402 | dev->dd, | |
403 | (pip->localphyerrors_overrunerrors >> 4) & 0xF)) | |
404 | goto err; | |
405 | ||
406 | if (ipath_layer_set_overrunthreshold( | |
407 | dev->dd, | |
408 | (pip->localphyerrors_overrunerrors & 0xF))) | |
409 | goto err; | |
410 | ||
411 | dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; | |
412 | ||
413 | if (pip->clientrereg_resv_subnetto & 0x80) { | |
414 | clientrereg = 1; | |
6eddb5cb | 415 | event.event = IB_EVENT_CLIENT_REREGISTER; |
e28c00ad BS |
416 | ib_dispatch_event(&event); |
417 | } | |
418 | ||
419 | /* | |
420 | * Do the port state change now that the other link parameters | |
421 | * have been set. | |
422 | * Changing the port physical state only makes sense if the link | |
423 | * is down or is being set to down. | |
424 | */ | |
425 | state = pip->linkspeed_portstate & 0xF; | |
426 | flags = ipath_layer_get_flags(dev->dd); | |
427 | lstate = (pip->portphysstate_linkdown >> 4) & 0xF; | |
428 | if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) | |
429 | goto err; | |
430 | ||
431 | /* | |
432 | * Only state changes of DOWN, ARM, and ACTIVE are valid | |
433 | * and must be in the correct state to take effect (see 7.2.6). | |
434 | */ | |
435 | switch (state) { | |
436 | case IB_PORT_NOP: | |
437 | if (lstate == 0) | |
438 | break; | |
439 | /* FALLTHROUGH */ | |
440 | case IB_PORT_DOWN: | |
441 | if (lstate == 0) | |
442 | if (ipath_layer_get_linkdowndefaultstate(dev->dd)) | |
443 | lstate = IPATH_IB_LINKDOWN_SLEEP; | |
444 | else | |
445 | lstate = IPATH_IB_LINKDOWN; | |
446 | else if (lstate == 1) | |
447 | lstate = IPATH_IB_LINKDOWN_SLEEP; | |
448 | else if (lstate == 2) | |
449 | lstate = IPATH_IB_LINKDOWN; | |
450 | else if (lstate == 3) | |
451 | lstate = IPATH_IB_LINKDOWN_DISABLE; | |
452 | else | |
453 | goto err; | |
454 | ipath_layer_set_linkstate(dev->dd, lstate); | |
455 | if (flags & IPATH_LINKACTIVE) { | |
456 | event.event = IB_EVENT_PORT_ERR; | |
457 | ib_dispatch_event(&event); | |
458 | } | |
459 | break; | |
460 | case IB_PORT_ARMED: | |
461 | if (!(flags & (IPATH_LINKINIT | IPATH_LINKACTIVE))) | |
462 | break; | |
463 | ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKARM); | |
464 | if (flags & IPATH_LINKACTIVE) { | |
465 | event.event = IB_EVENT_PORT_ERR; | |
466 | ib_dispatch_event(&event); | |
467 | } | |
468 | break; | |
469 | case IB_PORT_ACTIVE: | |
470 | if (!(flags & IPATH_LINKARMED)) | |
471 | break; | |
472 | ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE); | |
473 | event.event = IB_EVENT_PORT_ACTIVE; | |
474 | ib_dispatch_event(&event); | |
475 | break; | |
476 | default: | |
477 | /* XXX We have already partially updated our state! */ | |
478 | goto err; | |
479 | } | |
480 | ||
481 | ret = recv_subn_get_portinfo(smp, ibdev, port); | |
482 | ||
483 | if (clientrereg) | |
484 | pip->clientrereg_resv_subnetto |= 0x80; | |
485 | ||
486 | goto done; | |
487 | ||
488 | err: | |
489 | smp->status |= IB_SMP_INVALID_FIELD; | |
490 | ret = recv_subn_get_portinfo(smp, ibdev, port); | |
491 | ||
492 | done: | |
493 | return ret; | |
494 | } | |
495 | ||
496 | static int recv_subn_set_pkeytable(struct ib_smp *smp, | |
497 | struct ib_device *ibdev) | |
498 | { | |
499 | u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); | |
500 | __be16 *p = (__be16 *) smp->data; | |
501 | u16 *q = (u16 *) smp->data; | |
502 | struct ipath_ibdev *dev = to_idev(ibdev); | |
503 | unsigned i, n = ipath_layer_get_npkeys(dev->dd); | |
504 | ||
505 | for (i = 0; i < n; i++) | |
506 | q[i] = be16_to_cpu(p[i]); | |
507 | ||
508 | if (startpx != 0 || | |
509 | ipath_layer_set_pkeys(dev->dd, q) != 0) | |
510 | smp->status |= IB_SMP_INVALID_FIELD; | |
511 | ||
512 | return recv_subn_get_pkeytable(smp, ibdev); | |
513 | } | |
514 | ||
515 | #define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) | |
516 | #define IB_PMA_PORT_SAMPLES_CONTROL __constant_htons(0x0010) | |
517 | #define IB_PMA_PORT_SAMPLES_RESULT __constant_htons(0x0011) | |
518 | #define IB_PMA_PORT_COUNTERS __constant_htons(0x0012) | |
519 | #define IB_PMA_PORT_COUNTERS_EXT __constant_htons(0x001D) | |
520 | #define IB_PMA_PORT_SAMPLES_RESULT_EXT __constant_htons(0x001E) | |
521 | ||
522 | struct ib_perf { | |
523 | u8 base_version; | |
524 | u8 mgmt_class; | |
525 | u8 class_version; | |
526 | u8 method; | |
527 | __be16 status; | |
528 | __be16 unused; | |
529 | __be64 tid; | |
530 | __be16 attr_id; | |
531 | __be16 resv; | |
532 | __be32 attr_mod; | |
533 | u8 reserved[40]; | |
534 | u8 data[192]; | |
535 | } __attribute__ ((packed)); | |
536 | ||
537 | struct ib_pma_classportinfo { | |
538 | u8 base_version; | |
539 | u8 class_version; | |
540 | __be16 cap_mask; | |
541 | u8 reserved[3]; | |
542 | u8 resp_time_value; /* only lower 5 bits */ | |
543 | union ib_gid redirect_gid; | |
544 | __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */ | |
545 | __be16 redirect_lid; | |
546 | __be16 redirect_pkey; | |
547 | __be32 redirect_qp; /* only lower 24 bits */ | |
548 | __be32 redirect_qkey; | |
549 | union ib_gid trap_gid; | |
550 | __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */ | |
551 | __be16 trap_lid; | |
552 | __be16 trap_pkey; | |
553 | __be32 trap_hl_qp; /* 8, 24 bits respectively */ | |
554 | __be32 trap_qkey; | |
555 | } __attribute__ ((packed)); | |
556 | ||
557 | struct ib_pma_portsamplescontrol { | |
558 | u8 opcode; | |
559 | u8 port_select; | |
560 | u8 tick; | |
561 | u8 counter_width; /* only lower 3 bits */ | |
562 | __be32 counter_mask0_9; /* 2, 10 * 3, bits */ | |
563 | __be16 counter_mask10_14; /* 1, 5 * 3, bits */ | |
564 | u8 sample_mechanisms; | |
565 | u8 sample_status; /* only lower 2 bits */ | |
566 | __be64 option_mask; | |
567 | __be64 vendor_mask; | |
568 | __be32 sample_start; | |
569 | __be32 sample_interval; | |
570 | __be16 tag; | |
571 | __be16 counter_select[15]; | |
572 | } __attribute__ ((packed)); | |
573 | ||
574 | struct ib_pma_portsamplesresult { | |
575 | __be16 tag; | |
576 | __be16 sample_status; /* only lower 2 bits */ | |
577 | __be32 counter[15]; | |
578 | } __attribute__ ((packed)); | |
579 | ||
580 | struct ib_pma_portsamplesresult_ext { | |
581 | __be16 tag; | |
582 | __be16 sample_status; /* only lower 2 bits */ | |
583 | __be32 extended_width; /* only upper 2 bits */ | |
584 | __be64 counter[15]; | |
585 | } __attribute__ ((packed)); | |
586 | ||
587 | struct ib_pma_portcounters { | |
588 | u8 reserved; | |
589 | u8 port_select; | |
590 | __be16 counter_select; | |
591 | __be16 symbol_error_counter; | |
592 | u8 link_error_recovery_counter; | |
593 | u8 link_downed_counter; | |
594 | __be16 port_rcv_errors; | |
595 | __be16 port_rcv_remphys_errors; | |
596 | __be16 port_rcv_switch_relay_errors; | |
597 | __be16 port_xmit_discards; | |
598 | u8 port_xmit_constraint_errors; | |
599 | u8 port_rcv_constraint_errors; | |
600 | u8 reserved1; | |
601 | u8 lli_ebor_errors; /* 4, 4, bits */ | |
602 | __be16 reserved2; | |
603 | __be16 vl15_dropped; | |
604 | __be32 port_xmit_data; | |
605 | __be32 port_rcv_data; | |
606 | __be32 port_xmit_packets; | |
607 | __be32 port_rcv_packets; | |
608 | } __attribute__ ((packed)); | |
609 | ||
610 | #define IB_PMA_SEL_SYMBOL_ERROR __constant_htons(0x0001) | |
611 | #define IB_PMA_SEL_LINK_ERROR_RECOVERY __constant_htons(0x0002) | |
612 | #define IB_PMA_SEL_LINK_DOWNED __constant_htons(0x0004) | |
613 | #define IB_PMA_SEL_PORT_RCV_ERRORS __constant_htons(0x0008) | |
614 | #define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS __constant_htons(0x0010) | |
615 | #define IB_PMA_SEL_PORT_XMIT_DISCARDS __constant_htons(0x0040) | |
616 | #define IB_PMA_SEL_PORT_XMIT_DATA __constant_htons(0x1000) | |
617 | #define IB_PMA_SEL_PORT_RCV_DATA __constant_htons(0x2000) | |
618 | #define IB_PMA_SEL_PORT_XMIT_PACKETS __constant_htons(0x4000) | |
619 | #define IB_PMA_SEL_PORT_RCV_PACKETS __constant_htons(0x8000) | |
620 | ||
621 | struct ib_pma_portcounters_ext { | |
622 | u8 reserved; | |
623 | u8 port_select; | |
624 | __be16 counter_select; | |
625 | __be32 reserved1; | |
626 | __be64 port_xmit_data; | |
627 | __be64 port_rcv_data; | |
628 | __be64 port_xmit_packets; | |
629 | __be64 port_rcv_packets; | |
630 | __be64 port_unicast_xmit_packets; | |
631 | __be64 port_unicast_rcv_packets; | |
632 | __be64 port_multicast_xmit_packets; | |
633 | __be64 port_multicast_rcv_packets; | |
634 | } __attribute__ ((packed)); | |
635 | ||
636 | #define IB_PMA_SELX_PORT_XMIT_DATA __constant_htons(0x0001) | |
637 | #define IB_PMA_SELX_PORT_RCV_DATA __constant_htons(0x0002) | |
638 | #define IB_PMA_SELX_PORT_XMIT_PACKETS __constant_htons(0x0004) | |
639 | #define IB_PMA_SELX_PORT_RCV_PACKETS __constant_htons(0x0008) | |
640 | #define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS __constant_htons(0x0010) | |
641 | #define IB_PMA_SELX_PORT_UNI_RCV_PACKETS __constant_htons(0x0020) | |
642 | #define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS __constant_htons(0x0040) | |
643 | #define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS __constant_htons(0x0080) | |
644 | ||
645 | static int recv_pma_get_classportinfo(struct ib_perf *pmp) | |
646 | { | |
647 | struct ib_pma_classportinfo *p = | |
648 | (struct ib_pma_classportinfo *)pmp->data; | |
649 | ||
650 | memset(pmp->data, 0, sizeof(pmp->data)); | |
651 | ||
652 | if (pmp->attr_mod != 0) | |
653 | pmp->status |= IB_SMP_INVALID_FIELD; | |
654 | ||
655 | /* Indicate AllPortSelect is valid (only one port anyway) */ | |
656 | p->cap_mask = __constant_cpu_to_be16(1 << 8); | |
657 | p->base_version = 1; | |
658 | p->class_version = 1; | |
659 | /* | |
660 | * Expected response time is 4.096 usec. * 2^18 == 1.073741824 | |
661 | * sec. | |
662 | */ | |
663 | p->resp_time_value = 18; | |
664 | ||
665 | return reply((struct ib_smp *) pmp); | |
666 | } | |
667 | ||
668 | /* | |
669 | * The PortSamplesControl.CounterMasks field is an array of 3 bit fields | |
670 | * which specify the N'th counter's capabilities. See ch. 16.1.3.2. | |
671 | * We support 5 counters which only count the mandatory quantities. | |
672 | */ | |
673 | #define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) | |
674 | #define COUNTER_MASK0_9 \ | |
675 | __constant_cpu_to_be32(COUNTER_MASK(1, 0) | \ | |
676 | COUNTER_MASK(1, 1) | \ | |
677 | COUNTER_MASK(1, 2) | \ | |
678 | COUNTER_MASK(1, 3) | \ | |
679 | COUNTER_MASK(1, 4)) | |
680 | ||
681 | static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp, | |
682 | struct ib_device *ibdev, u8 port) | |
683 | { | |
684 | struct ib_pma_portsamplescontrol *p = | |
685 | (struct ib_pma_portsamplescontrol *)pmp->data; | |
686 | struct ipath_ibdev *dev = to_idev(ibdev); | |
687 | unsigned long flags; | |
688 | u8 port_select = p->port_select; | |
689 | ||
690 | memset(pmp->data, 0, sizeof(pmp->data)); | |
691 | ||
692 | p->port_select = port_select; | |
693 | if (pmp->attr_mod != 0 || | |
694 | (port_select != port && port_select != 0xFF)) | |
695 | pmp->status |= IB_SMP_INVALID_FIELD; | |
696 | /* | |
697 | * Ticks are 10x the link transfer period which for 2.5Gbs is 4 | |
698 | * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample | |
699 | * intervals are counted in ticks. Since we use Linux timers, that | |
700 | * count in jiffies, we can't sample for less than 1000 ticks if HZ | |
701 | * == 1000 (4000 ticks if HZ is 250). | |
702 | */ | |
703 | /* XXX This is WRONG. */ | |
704 | p->tick = 250; /* 1 usec. */ | |
705 | p->counter_width = 4; /* 32 bit counters */ | |
706 | p->counter_mask0_9 = COUNTER_MASK0_9; | |
707 | spin_lock_irqsave(&dev->pending_lock, flags); | |
708 | p->sample_status = dev->pma_sample_status; | |
709 | p->sample_start = cpu_to_be32(dev->pma_sample_start); | |
710 | p->sample_interval = cpu_to_be32(dev->pma_sample_interval); | |
711 | p->tag = cpu_to_be16(dev->pma_tag); | |
712 | p->counter_select[0] = dev->pma_counter_select[0]; | |
713 | p->counter_select[1] = dev->pma_counter_select[1]; | |
714 | p->counter_select[2] = dev->pma_counter_select[2]; | |
715 | p->counter_select[3] = dev->pma_counter_select[3]; | |
716 | p->counter_select[4] = dev->pma_counter_select[4]; | |
717 | spin_unlock_irqrestore(&dev->pending_lock, flags); | |
718 | ||
719 | return reply((struct ib_smp *) pmp); | |
720 | } | |
721 | ||
722 | static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp, | |
723 | struct ib_device *ibdev, u8 port) | |
724 | { | |
725 | struct ib_pma_portsamplescontrol *p = | |
726 | (struct ib_pma_portsamplescontrol *)pmp->data; | |
727 | struct ipath_ibdev *dev = to_idev(ibdev); | |
728 | unsigned long flags; | |
729 | u32 start; | |
730 | int ret; | |
731 | ||
732 | if (pmp->attr_mod != 0 || | |
733 | (p->port_select != port && p->port_select != 0xFF)) { | |
734 | pmp->status |= IB_SMP_INVALID_FIELD; | |
735 | ret = reply((struct ib_smp *) pmp); | |
736 | goto bail; | |
737 | } | |
738 | ||
739 | start = be32_to_cpu(p->sample_start); | |
740 | if (start != 0) { | |
741 | spin_lock_irqsave(&dev->pending_lock, flags); | |
742 | if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_DONE) { | |
743 | dev->pma_sample_status = | |
744 | IB_PMA_SAMPLE_STATUS_STARTED; | |
745 | dev->pma_sample_start = start; | |
746 | dev->pma_sample_interval = | |
747 | be32_to_cpu(p->sample_interval); | |
748 | dev->pma_tag = be16_to_cpu(p->tag); | |
749 | if (p->counter_select[0]) | |
750 | dev->pma_counter_select[0] = | |
751 | p->counter_select[0]; | |
752 | if (p->counter_select[1]) | |
753 | dev->pma_counter_select[1] = | |
754 | p->counter_select[1]; | |
755 | if (p->counter_select[2]) | |
756 | dev->pma_counter_select[2] = | |
757 | p->counter_select[2]; | |
758 | if (p->counter_select[3]) | |
759 | dev->pma_counter_select[3] = | |
760 | p->counter_select[3]; | |
761 | if (p->counter_select[4]) | |
762 | dev->pma_counter_select[4] = | |
763 | p->counter_select[4]; | |
764 | } | |
765 | spin_unlock_irqrestore(&dev->pending_lock, flags); | |
766 | } | |
767 | ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port); | |
768 | ||
769 | bail: | |
770 | return ret; | |
771 | } | |
772 | ||
773 | static u64 get_counter(struct ipath_ibdev *dev, __be16 sel) | |
774 | { | |
775 | u64 ret; | |
776 | ||
777 | switch (sel) { | |
778 | case IB_PMA_PORT_XMIT_DATA: | |
779 | ret = dev->ipath_sword; | |
780 | break; | |
781 | case IB_PMA_PORT_RCV_DATA: | |
782 | ret = dev->ipath_rword; | |
783 | break; | |
784 | case IB_PMA_PORT_XMIT_PKTS: | |
785 | ret = dev->ipath_spkts; | |
786 | break; | |
787 | case IB_PMA_PORT_RCV_PKTS: | |
788 | ret = dev->ipath_rpkts; | |
789 | break; | |
790 | case IB_PMA_PORT_XMIT_WAIT: | |
791 | ret = dev->ipath_xmit_wait; | |
792 | break; | |
793 | default: | |
794 | ret = 0; | |
795 | } | |
796 | ||
797 | return ret; | |
798 | } | |
799 | ||
800 | static int recv_pma_get_portsamplesresult(struct ib_perf *pmp, | |
801 | struct ib_device *ibdev) | |
802 | { | |
803 | struct ib_pma_portsamplesresult *p = | |
804 | (struct ib_pma_portsamplesresult *)pmp->data; | |
805 | struct ipath_ibdev *dev = to_idev(ibdev); | |
806 | int i; | |
807 | ||
808 | memset(pmp->data, 0, sizeof(pmp->data)); | |
809 | p->tag = cpu_to_be16(dev->pma_tag); | |
810 | p->sample_status = cpu_to_be16(dev->pma_sample_status); | |
811 | for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) | |
812 | p->counter[i] = cpu_to_be32( | |
813 | get_counter(dev, dev->pma_counter_select[i])); | |
814 | ||
815 | return reply((struct ib_smp *) pmp); | |
816 | } | |
817 | ||
818 | static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp, | |
819 | struct ib_device *ibdev) | |
820 | { | |
821 | struct ib_pma_portsamplesresult_ext *p = | |
822 | (struct ib_pma_portsamplesresult_ext *)pmp->data; | |
823 | struct ipath_ibdev *dev = to_idev(ibdev); | |
824 | int i; | |
825 | ||
826 | memset(pmp->data, 0, sizeof(pmp->data)); | |
827 | p->tag = cpu_to_be16(dev->pma_tag); | |
828 | p->sample_status = cpu_to_be16(dev->pma_sample_status); | |
829 | /* 64 bits */ | |
830 | p->extended_width = __constant_cpu_to_be32(0x80000000); | |
831 | for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) | |
832 | p->counter[i] = cpu_to_be64( | |
833 | get_counter(dev, dev->pma_counter_select[i])); | |
834 | ||
835 | return reply((struct ib_smp *) pmp); | |
836 | } | |
837 | ||
838 | static int recv_pma_get_portcounters(struct ib_perf *pmp, | |
839 | struct ib_device *ibdev, u8 port) | |
840 | { | |
841 | struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) | |
842 | pmp->data; | |
843 | struct ipath_ibdev *dev = to_idev(ibdev); | |
844 | struct ipath_layer_counters cntrs; | |
845 | u8 port_select = p->port_select; | |
846 | ||
847 | ipath_layer_get_counters(dev->dd, &cntrs); | |
848 | ||
849 | /* Adjust counters for any resets done. */ | |
443a64ab | 850 | cntrs.symbol_error_counter -= dev->z_symbol_error_counter; |
e28c00ad | 851 | cntrs.link_error_recovery_counter -= |
443a64ab BS |
852 | dev->z_link_error_recovery_counter; |
853 | cntrs.link_downed_counter -= dev->z_link_downed_counter; | |
e28c00ad | 854 | cntrs.port_rcv_errors += dev->rcv_errors; |
443a64ab BS |
855 | cntrs.port_rcv_errors -= dev->z_port_rcv_errors; |
856 | cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors; | |
857 | cntrs.port_xmit_discards -= dev->z_port_xmit_discards; | |
858 | cntrs.port_xmit_data -= dev->z_port_xmit_data; | |
859 | cntrs.port_rcv_data -= dev->z_port_rcv_data; | |
860 | cntrs.port_xmit_packets -= dev->z_port_xmit_packets; | |
861 | cntrs.port_rcv_packets -= dev->z_port_rcv_packets; | |
e28c00ad BS |
862 | |
863 | memset(pmp->data, 0, sizeof(pmp->data)); | |
864 | ||
865 | p->port_select = port_select; | |
866 | if (pmp->attr_mod != 0 || | |
867 | (port_select != port && port_select != 0xFF)) | |
868 | pmp->status |= IB_SMP_INVALID_FIELD; | |
869 | ||
870 | if (cntrs.symbol_error_counter > 0xFFFFUL) | |
871 | p->symbol_error_counter = __constant_cpu_to_be16(0xFFFF); | |
872 | else | |
873 | p->symbol_error_counter = | |
874 | cpu_to_be16((u16)cntrs.symbol_error_counter); | |
875 | if (cntrs.link_error_recovery_counter > 0xFFUL) | |
876 | p->link_error_recovery_counter = 0xFF; | |
877 | else | |
878 | p->link_error_recovery_counter = | |
879 | (u8)cntrs.link_error_recovery_counter; | |
880 | if (cntrs.link_downed_counter > 0xFFUL) | |
881 | p->link_downed_counter = 0xFF; | |
882 | else | |
883 | p->link_downed_counter = (u8)cntrs.link_downed_counter; | |
884 | if (cntrs.port_rcv_errors > 0xFFFFUL) | |
885 | p->port_rcv_errors = __constant_cpu_to_be16(0xFFFF); | |
886 | else | |
887 | p->port_rcv_errors = | |
888 | cpu_to_be16((u16) cntrs.port_rcv_errors); | |
889 | if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) | |
890 | p->port_rcv_remphys_errors = __constant_cpu_to_be16(0xFFFF); | |
891 | else | |
892 | p->port_rcv_remphys_errors = | |
893 | cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); | |
894 | if (cntrs.port_xmit_discards > 0xFFFFUL) | |
895 | p->port_xmit_discards = __constant_cpu_to_be16(0xFFFF); | |
896 | else | |
897 | p->port_xmit_discards = | |
898 | cpu_to_be16((u16)cntrs.port_xmit_discards); | |
899 | if (cntrs.port_xmit_data > 0xFFFFFFFFUL) | |
900 | p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF); | |
901 | else | |
902 | p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); | |
903 | if (cntrs.port_rcv_data > 0xFFFFFFFFUL) | |
904 | p->port_rcv_data = __constant_cpu_to_be32(0xFFFFFFFF); | |
905 | else | |
906 | p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); | |
907 | if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) | |
908 | p->port_xmit_packets = __constant_cpu_to_be32(0xFFFFFFFF); | |
909 | else | |
910 | p->port_xmit_packets = | |
911 | cpu_to_be32((u32)cntrs.port_xmit_packets); | |
912 | if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) | |
913 | p->port_rcv_packets = __constant_cpu_to_be32(0xFFFFFFFF); | |
914 | else | |
915 | p->port_rcv_packets = | |
916 | cpu_to_be32((u32) cntrs.port_rcv_packets); | |
917 | ||
918 | return reply((struct ib_smp *) pmp); | |
919 | } | |
920 | ||
921 | static int recv_pma_get_portcounters_ext(struct ib_perf *pmp, | |
922 | struct ib_device *ibdev, u8 port) | |
923 | { | |
924 | struct ib_pma_portcounters_ext *p = | |
925 | (struct ib_pma_portcounters_ext *)pmp->data; | |
926 | struct ipath_ibdev *dev = to_idev(ibdev); | |
927 | u64 swords, rwords, spkts, rpkts, xwait; | |
928 | u8 port_select = p->port_select; | |
929 | ||
930 | ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts, | |
931 | &rpkts, &xwait); | |
932 | ||
933 | /* Adjust counters for any resets done. */ | |
443a64ab BS |
934 | swords -= dev->z_port_xmit_data; |
935 | rwords -= dev->z_port_rcv_data; | |
936 | spkts -= dev->z_port_xmit_packets; | |
937 | rpkts -= dev->z_port_rcv_packets; | |
e28c00ad BS |
938 | |
939 | memset(pmp->data, 0, sizeof(pmp->data)); | |
940 | ||
941 | p->port_select = port_select; | |
942 | if (pmp->attr_mod != 0 || | |
943 | (port_select != port && port_select != 0xFF)) | |
944 | pmp->status |= IB_SMP_INVALID_FIELD; | |
945 | ||
946 | p->port_xmit_data = cpu_to_be64(swords); | |
947 | p->port_rcv_data = cpu_to_be64(rwords); | |
948 | p->port_xmit_packets = cpu_to_be64(spkts); | |
949 | p->port_rcv_packets = cpu_to_be64(rpkts); | |
950 | p->port_unicast_xmit_packets = cpu_to_be64(dev->n_unicast_xmit); | |
951 | p->port_unicast_rcv_packets = cpu_to_be64(dev->n_unicast_rcv); | |
952 | p->port_multicast_xmit_packets = cpu_to_be64(dev->n_multicast_xmit); | |
953 | p->port_multicast_rcv_packets = cpu_to_be64(dev->n_multicast_rcv); | |
954 | ||
955 | return reply((struct ib_smp *) pmp); | |
956 | } | |
957 | ||
958 | static int recv_pma_set_portcounters(struct ib_perf *pmp, | |
959 | struct ib_device *ibdev, u8 port) | |
960 | { | |
961 | struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) | |
962 | pmp->data; | |
963 | struct ipath_ibdev *dev = to_idev(ibdev); | |
964 | struct ipath_layer_counters cntrs; | |
965 | ||
966 | /* | |
967 | * Since the HW doesn't support clearing counters, we save the | |
968 | * current count and subtract it from future responses. | |
969 | */ | |
970 | ipath_layer_get_counters(dev->dd, &cntrs); | |
971 | ||
972 | if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR) | |
443a64ab | 973 | dev->z_symbol_error_counter = cntrs.symbol_error_counter; |
e28c00ad BS |
974 | |
975 | if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY) | |
443a64ab | 976 | dev->z_link_error_recovery_counter = |
e28c00ad BS |
977 | cntrs.link_error_recovery_counter; |
978 | ||
979 | if (p->counter_select & IB_PMA_SEL_LINK_DOWNED) | |
443a64ab | 980 | dev->z_link_downed_counter = cntrs.link_downed_counter; |
e28c00ad BS |
981 | |
982 | if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS) | |
443a64ab | 983 | dev->z_port_rcv_errors = |
e28c00ad BS |
984 | cntrs.port_rcv_errors + dev->rcv_errors; |
985 | ||
986 | if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS) | |
443a64ab | 987 | dev->z_port_rcv_remphys_errors = |
e28c00ad BS |
988 | cntrs.port_rcv_remphys_errors; |
989 | ||
990 | if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS) | |
443a64ab | 991 | dev->z_port_xmit_discards = cntrs.port_xmit_discards; |
e28c00ad BS |
992 | |
993 | if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA) | |
443a64ab | 994 | dev->z_port_xmit_data = cntrs.port_xmit_data; |
e28c00ad BS |
995 | |
996 | if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA) | |
443a64ab | 997 | dev->z_port_rcv_data = cntrs.port_rcv_data; |
e28c00ad BS |
998 | |
999 | if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS) | |
443a64ab | 1000 | dev->z_port_xmit_packets = cntrs.port_xmit_packets; |
e28c00ad BS |
1001 | |
1002 | if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS) | |
443a64ab | 1003 | dev->z_port_rcv_packets = cntrs.port_rcv_packets; |
e28c00ad BS |
1004 | |
1005 | return recv_pma_get_portcounters(pmp, ibdev, port); | |
1006 | } | |
1007 | ||
1008 | static int recv_pma_set_portcounters_ext(struct ib_perf *pmp, | |
1009 | struct ib_device *ibdev, u8 port) | |
1010 | { | |
1011 | struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) | |
1012 | pmp->data; | |
1013 | struct ipath_ibdev *dev = to_idev(ibdev); | |
1014 | u64 swords, rwords, spkts, rpkts, xwait; | |
1015 | ||
1016 | ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts, | |
1017 | &rpkts, &xwait); | |
1018 | ||
1019 | if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA) | |
443a64ab | 1020 | dev->z_port_xmit_data = swords; |
e28c00ad BS |
1021 | |
1022 | if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA) | |
443a64ab | 1023 | dev->z_port_rcv_data = rwords; |
e28c00ad BS |
1024 | |
1025 | if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS) | |
443a64ab | 1026 | dev->z_port_xmit_packets = spkts; |
e28c00ad BS |
1027 | |
1028 | if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS) | |
443a64ab | 1029 | dev->z_port_rcv_packets = rpkts; |
e28c00ad BS |
1030 | |
1031 | if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS) | |
1032 | dev->n_unicast_xmit = 0; | |
1033 | ||
1034 | if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS) | |
1035 | dev->n_unicast_rcv = 0; | |
1036 | ||
1037 | if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS) | |
1038 | dev->n_multicast_xmit = 0; | |
1039 | ||
1040 | if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS) | |
1041 | dev->n_multicast_rcv = 0; | |
1042 | ||
1043 | return recv_pma_get_portcounters_ext(pmp, ibdev, port); | |
1044 | } | |
1045 | ||
1046 | static int process_subn(struct ib_device *ibdev, int mad_flags, | |
1047 | u8 port_num, struct ib_mad *in_mad, | |
1048 | struct ib_mad *out_mad) | |
1049 | { | |
1050 | struct ib_smp *smp = (struct ib_smp *)out_mad; | |
1051 | struct ipath_ibdev *dev = to_idev(ibdev); | |
1052 | int ret; | |
1053 | ||
1054 | *out_mad = *in_mad; | |
1055 | if (smp->class_version != 1) { | |
1056 | smp->status |= IB_SMP_UNSUP_VERSION; | |
1057 | ret = reply(smp); | |
1058 | goto bail; | |
1059 | } | |
1060 | ||
1061 | /* Is the mkey in the process of expiring? */ | |
1062 | if (dev->mkey_lease_timeout && jiffies >= dev->mkey_lease_timeout) { | |
1063 | /* Clear timeout and mkey protection field. */ | |
1064 | dev->mkey_lease_timeout = 0; | |
1065 | dev->mkeyprot_resv_lmc &= 0x3F; | |
1066 | } | |
1067 | ||
1068 | /* | |
1069 | * M_Key checking depends on | |
1070 | * Portinfo:M_Key_protect_bits | |
1071 | */ | |
1072 | if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && dev->mkey != 0 && | |
1073 | dev->mkey != smp->mkey && | |
1074 | (smp->method == IB_MGMT_METHOD_SET || | |
1075 | (smp->method == IB_MGMT_METHOD_GET && | |
1076 | (dev->mkeyprot_resv_lmc >> 7) != 0))) { | |
1077 | if (dev->mkey_violations != 0xFFFF) | |
1078 | ++dev->mkey_violations; | |
1079 | if (dev->mkey_lease_timeout || | |
1080 | dev->mkey_lease_period == 0) { | |
1081 | ret = IB_MAD_RESULT_SUCCESS | | |
1082 | IB_MAD_RESULT_CONSUMED; | |
1083 | goto bail; | |
1084 | } | |
1085 | dev->mkey_lease_timeout = jiffies + | |
1086 | dev->mkey_lease_period * HZ; | |
1087 | /* Future: Generate a trap notice. */ | |
1088 | ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; | |
1089 | goto bail; | |
1090 | } else if (dev->mkey_lease_timeout) | |
1091 | dev->mkey_lease_timeout = 0; | |
1092 | ||
1093 | switch (smp->method) { | |
1094 | case IB_MGMT_METHOD_GET: | |
1095 | switch (smp->attr_id) { | |
1096 | case IB_SMP_ATTR_NODE_DESC: | |
1097 | ret = recv_subn_get_nodedescription(smp, ibdev); | |
1098 | goto bail; | |
1099 | case IB_SMP_ATTR_NODE_INFO: | |
1100 | ret = recv_subn_get_nodeinfo(smp, ibdev, port_num); | |
1101 | goto bail; | |
1102 | case IB_SMP_ATTR_GUID_INFO: | |
1103 | ret = recv_subn_get_guidinfo(smp, ibdev); | |
1104 | goto bail; | |
1105 | case IB_SMP_ATTR_PORT_INFO: | |
1106 | ret = recv_subn_get_portinfo(smp, ibdev, port_num); | |
1107 | goto bail; | |
1108 | case IB_SMP_ATTR_PKEY_TABLE: | |
1109 | ret = recv_subn_get_pkeytable(smp, ibdev); | |
1110 | goto bail; | |
1111 | case IB_SMP_ATTR_SM_INFO: | |
1112 | if (dev->port_cap_flags & IB_PORT_SM_DISABLED) { | |
1113 | ret = IB_MAD_RESULT_SUCCESS | | |
1114 | IB_MAD_RESULT_CONSUMED; | |
1115 | goto bail; | |
1116 | } | |
1117 | if (dev->port_cap_flags & IB_PORT_SM) { | |
1118 | ret = IB_MAD_RESULT_SUCCESS; | |
1119 | goto bail; | |
1120 | } | |
1121 | /* FALLTHROUGH */ | |
1122 | default: | |
1123 | smp->status |= IB_SMP_UNSUP_METH_ATTR; | |
1124 | ret = reply(smp); | |
1125 | goto bail; | |
1126 | } | |
1127 | ||
1128 | case IB_MGMT_METHOD_SET: | |
1129 | switch (smp->attr_id) { | |
1130 | case IB_SMP_ATTR_GUID_INFO: | |
1131 | ret = recv_subn_set_guidinfo(smp, ibdev); | |
1132 | goto bail; | |
1133 | case IB_SMP_ATTR_PORT_INFO: | |
1134 | ret = recv_subn_set_portinfo(smp, ibdev, port_num); | |
1135 | goto bail; | |
1136 | case IB_SMP_ATTR_PKEY_TABLE: | |
1137 | ret = recv_subn_set_pkeytable(smp, ibdev); | |
1138 | goto bail; | |
1139 | case IB_SMP_ATTR_SM_INFO: | |
1140 | if (dev->port_cap_flags & IB_PORT_SM_DISABLED) { | |
1141 | ret = IB_MAD_RESULT_SUCCESS | | |
1142 | IB_MAD_RESULT_CONSUMED; | |
1143 | goto bail; | |
1144 | } | |
1145 | if (dev->port_cap_flags & IB_PORT_SM) { | |
1146 | ret = IB_MAD_RESULT_SUCCESS; | |
1147 | goto bail; | |
1148 | } | |
1149 | /* FALLTHROUGH */ | |
1150 | default: | |
1151 | smp->status |= IB_SMP_UNSUP_METH_ATTR; | |
1152 | ret = reply(smp); | |
1153 | goto bail; | |
1154 | } | |
1155 | ||
1156 | case IB_MGMT_METHOD_GET_RESP: | |
1157 | /* | |
1158 | * The ib_mad module will call us to process responses | |
1159 | * before checking for other consumers. | |
1160 | * Just tell the caller to process it normally. | |
1161 | */ | |
1162 | ret = IB_MAD_RESULT_FAILURE; | |
1163 | goto bail; | |
1164 | default: | |
1165 | smp->status |= IB_SMP_UNSUP_METHOD; | |
1166 | ret = reply(smp); | |
1167 | } | |
1168 | ||
1169 | bail: | |
1170 | return ret; | |
1171 | } | |
1172 | ||
1173 | static int process_perf(struct ib_device *ibdev, u8 port_num, | |
1174 | struct ib_mad *in_mad, | |
1175 | struct ib_mad *out_mad) | |
1176 | { | |
1177 | struct ib_perf *pmp = (struct ib_perf *)out_mad; | |
1178 | int ret; | |
1179 | ||
1180 | *out_mad = *in_mad; | |
1181 | if (pmp->class_version != 1) { | |
1182 | pmp->status |= IB_SMP_UNSUP_VERSION; | |
1183 | ret = reply((struct ib_smp *) pmp); | |
1184 | goto bail; | |
1185 | } | |
1186 | ||
1187 | switch (pmp->method) { | |
1188 | case IB_MGMT_METHOD_GET: | |
1189 | switch (pmp->attr_id) { | |
1190 | case IB_PMA_CLASS_PORT_INFO: | |
1191 | ret = recv_pma_get_classportinfo(pmp); | |
1192 | goto bail; | |
1193 | case IB_PMA_PORT_SAMPLES_CONTROL: | |
1194 | ret = recv_pma_get_portsamplescontrol(pmp, ibdev, | |
1195 | port_num); | |
1196 | goto bail; | |
1197 | case IB_PMA_PORT_SAMPLES_RESULT: | |
1198 | ret = recv_pma_get_portsamplesresult(pmp, ibdev); | |
1199 | goto bail; | |
1200 | case IB_PMA_PORT_SAMPLES_RESULT_EXT: | |
1201 | ret = recv_pma_get_portsamplesresult_ext(pmp, | |
1202 | ibdev); | |
1203 | goto bail; | |
1204 | case IB_PMA_PORT_COUNTERS: | |
1205 | ret = recv_pma_get_portcounters(pmp, ibdev, | |
1206 | port_num); | |
1207 | goto bail; | |
1208 | case IB_PMA_PORT_COUNTERS_EXT: | |
1209 | ret = recv_pma_get_portcounters_ext(pmp, ibdev, | |
1210 | port_num); | |
1211 | goto bail; | |
1212 | default: | |
1213 | pmp->status |= IB_SMP_UNSUP_METH_ATTR; | |
1214 | ret = reply((struct ib_smp *) pmp); | |
1215 | goto bail; | |
1216 | } | |
1217 | ||
1218 | case IB_MGMT_METHOD_SET: | |
1219 | switch (pmp->attr_id) { | |
1220 | case IB_PMA_PORT_SAMPLES_CONTROL: | |
1221 | ret = recv_pma_set_portsamplescontrol(pmp, ibdev, | |
1222 | port_num); | |
1223 | goto bail; | |
1224 | case IB_PMA_PORT_COUNTERS: | |
1225 | ret = recv_pma_set_portcounters(pmp, ibdev, | |
1226 | port_num); | |
1227 | goto bail; | |
1228 | case IB_PMA_PORT_COUNTERS_EXT: | |
1229 | ret = recv_pma_set_portcounters_ext(pmp, ibdev, | |
1230 | port_num); | |
1231 | goto bail; | |
1232 | default: | |
1233 | pmp->status |= IB_SMP_UNSUP_METH_ATTR; | |
1234 | ret = reply((struct ib_smp *) pmp); | |
1235 | goto bail; | |
1236 | } | |
1237 | ||
1238 | case IB_MGMT_METHOD_GET_RESP: | |
1239 | /* | |
1240 | * The ib_mad module will call us to process responses | |
1241 | * before checking for other consumers. | |
1242 | * Just tell the caller to process it normally. | |
1243 | */ | |
1244 | ret = IB_MAD_RESULT_FAILURE; | |
1245 | goto bail; | |
1246 | default: | |
1247 | pmp->status |= IB_SMP_UNSUP_METHOD; | |
1248 | ret = reply((struct ib_smp *) pmp); | |
1249 | } | |
1250 | ||
1251 | bail: | |
1252 | return ret; | |
1253 | } | |
1254 | ||
1255 | /** | |
1256 | * ipath_process_mad - process an incoming MAD packet | |
1257 | * @ibdev: the infiniband device this packet came in on | |
1258 | * @mad_flags: MAD flags | |
1259 | * @port_num: the port number this packet came in on | |
1260 | * @in_wc: the work completion entry for this packet | |
1261 | * @in_grh: the global route header for this packet | |
1262 | * @in_mad: the incoming MAD | |
1263 | * @out_mad: any outgoing MAD reply | |
1264 | * | |
1265 | * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not | |
1266 | * interested in processing. | |
1267 | * | |
1268 | * Note that the verbs framework has already done the MAD sanity checks, | |
1269 | * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE | |
1270 | * MADs. | |
1271 | * | |
1272 | * This is called by the ib_mad module. | |
1273 | */ | |
1274 | int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | |
1275 | struct ib_wc *in_wc, struct ib_grh *in_grh, | |
1276 | struct ib_mad *in_mad, struct ib_mad *out_mad) | |
1277 | { | |
1278 | struct ipath_ibdev *dev = to_idev(ibdev); | |
1279 | int ret; | |
1280 | ||
1281 | /* | |
1282 | * Snapshot current HW counters to "clear" them. | |
1283 | * This should be done when the driver is loaded except that for | |
1284 | * some reason we get a zillion errors when brining up the link. | |
1285 | */ | |
1286 | if (dev->rcv_errors == 0) { | |
1287 | struct ipath_layer_counters cntrs; | |
1288 | ||
1289 | ipath_layer_get_counters(to_idev(ibdev)->dd, &cntrs); | |
1290 | dev->rcv_errors++; | |
443a64ab BS |
1291 | dev->z_symbol_error_counter = cntrs.symbol_error_counter; |
1292 | dev->z_link_error_recovery_counter = | |
e28c00ad | 1293 | cntrs.link_error_recovery_counter; |
443a64ab BS |
1294 | dev->z_link_downed_counter = cntrs.link_downed_counter; |
1295 | dev->z_port_rcv_errors = cntrs.port_rcv_errors + 1; | |
1296 | dev->z_port_rcv_remphys_errors = | |
e28c00ad | 1297 | cntrs.port_rcv_remphys_errors; |
443a64ab BS |
1298 | dev->z_port_xmit_discards = cntrs.port_xmit_discards; |
1299 | dev->z_port_xmit_data = cntrs.port_xmit_data; | |
1300 | dev->z_port_rcv_data = cntrs.port_rcv_data; | |
1301 | dev->z_port_xmit_packets = cntrs.port_xmit_packets; | |
1302 | dev->z_port_rcv_packets = cntrs.port_rcv_packets; | |
e28c00ad BS |
1303 | } |
1304 | switch (in_mad->mad_hdr.mgmt_class) { | |
1305 | case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: | |
1306 | case IB_MGMT_CLASS_SUBN_LID_ROUTED: | |
1307 | ret = process_subn(ibdev, mad_flags, port_num, | |
1308 | in_mad, out_mad); | |
1309 | goto bail; | |
1310 | case IB_MGMT_CLASS_PERF_MGMT: | |
1311 | ret = process_perf(ibdev, port_num, in_mad, out_mad); | |
1312 | goto bail; | |
1313 | default: | |
1314 | ret = IB_MAD_RESULT_SUCCESS; | |
1315 | } | |
1316 | ||
1317 | bail: | |
1318 | return ret; | |
1319 | } |