staging/rdma/hfi1: Change for data type of port number
[linux-2.6-block.git] / drivers / staging / rdma / hfi1 / mad.c
CommitLineData
77241056
MM
1/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
91ab4ed3 8 * Copyright(c) 2015, 2016 Intel Corporation.
77241056
MM
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
91ab4ed3 21 * Copyright(c) 2015, 2016 Intel Corporation.
77241056
MM
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51#include <linux/net.h>
52#define OPA_NUM_PKEY_BLOCKS_PER_SMP (OPA_SMP_DR_DATA_SIZE \
53 / (OPA_PARTITION_TABLE_BLK_SIZE * sizeof(u16)))
54
55#include "hfi.h"
56#include "mad.h"
57#include "trace.h"
58
59/* the reset value from the FM is supposed to be 0xffff, handle both */
60#define OPA_LINK_WIDTH_RESET_OLD 0x0fff
61#define OPA_LINK_WIDTH_RESET 0xffff
62
63static int reply(struct ib_mad_hdr *smp)
64{
65 /*
66 * The verbs framework will handle the directed/LID route
67 * packet changes.
68 */
69 smp->method = IB_MGMT_METHOD_GET_RESP;
70 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
71 smp->status |= IB_SMP_DIRECTION;
72 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
73}
74
75static inline void clear_opa_smp_data(struct opa_smp *smp)
76{
77 void *data = opa_get_smp_data(smp);
78 size_t size = opa_get_smp_data_size(smp);
79
80 memset(data, 0, size);
81}
82
83static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
84{
85 struct ib_mad_send_buf *send_buf;
86 struct ib_mad_agent *agent;
5cd24119 87 struct opa_smp *smp;
77241056
MM
88 int ret;
89 unsigned long flags;
90 unsigned long timeout;
91 int pkey_idx;
92 u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp;
93
4eb06882 94 agent = ibp->rvp.send_agent;
77241056
MM
95 if (!agent)
96 return;
97
98 /* o14-3.2.1 */
99 if (ppd_from_ibp(ibp)->lstate != IB_PORT_ACTIVE)
100 return;
101
102 /* o14-2 */
4eb06882
DD
103 if (ibp->rvp.trap_timeout && time_before(jiffies,
104 ibp->rvp.trap_timeout))
77241056
MM
105 return;
106
107 pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
108 if (pkey_idx < 0) {
109 pr_warn("%s: failed to find limited mgmt pkey, defaulting 0x%x\n",
110 __func__, hfi1_get_pkey(ibp, 1));
111 pkey_idx = 1;
112 }
113
114 send_buf = ib_create_send_mad(agent, qpn, pkey_idx, 0,
115 IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
116 GFP_ATOMIC, IB_MGMT_BASE_VERSION);
117 if (IS_ERR(send_buf))
118 return;
119
120 smp = send_buf->mad;
5cd24119 121 smp->base_version = OPA_MGMT_BASE_VERSION;
77241056 122 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
5cd24119 123 smp->class_version = OPA_SMI_CLASS_VERSION;
77241056 124 smp->method = IB_MGMT_METHOD_TRAP;
4eb06882
DD
125 ibp->rvp.tid++;
126 smp->tid = cpu_to_be64(ibp->rvp.tid);
77241056
MM
127 smp->attr_id = IB_SMP_ATTR_NOTICE;
128 /* o14-1: smp->mkey = 0; */
5cd24119 129 memcpy(smp->route.lid.data, data, len);
77241056 130
4eb06882 131 spin_lock_irqsave(&ibp->rvp.lock, flags);
9c4a311e 132 if (!ibp->rvp.sm_ah) {
4eb06882 133 if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
77241056
MM
134 struct ib_ah *ah;
135
4eb06882 136 ah = hfi1_create_qp0_ah(ibp, ibp->rvp.sm_lid);
77241056
MM
137 if (IS_ERR(ah))
138 ret = PTR_ERR(ah);
139 else {
140 send_buf->ah = ah;
9c4a311e 141 ibp->rvp.sm_ah = ibah_to_rvtah(ah);
77241056
MM
142 ret = 0;
143 }
144 } else
145 ret = -EINVAL;
146 } else {
9c4a311e 147 send_buf->ah = &ibp->rvp.sm_ah->ibah;
77241056
MM
148 ret = 0;
149 }
4eb06882 150 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
77241056
MM
151
152 if (!ret)
153 ret = ib_post_send_mad(send_buf, NULL);
154 if (!ret) {
155 /* 4.096 usec. */
4eb06882
DD
156 timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
157 ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
77241056
MM
158 } else {
159 ib_free_send_mad(send_buf);
4eb06882 160 ibp->rvp.trap_timeout = 0;
77241056
MM
161 }
162}
163
164/*
165 * Send a bad [PQ]_Key trap (ch. 14.3.8).
166 */
167void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
5cd24119 168 u32 qp1, u32 qp2, u16 lid1, u16 lid2)
77241056 169{
5cd24119
EK
170 struct opa_mad_notice_attr data;
171 u32 lid = ppd_from_ibp(ibp)->lid;
172 u32 _lid1 = lid1;
173 u32 _lid2 = lid2;
77241056 174
5cd24119
EK
175 memset(&data, 0, sizeof(data));
176
177 if (trap_num == OPA_TRAP_BAD_P_KEY)
4eb06882 178 ibp->rvp.pkey_violations++;
77241056 179 else
4eb06882
DD
180 ibp->rvp.qkey_violations++;
181 ibp->rvp.n_pkt_drops++;
77241056
MM
182
183 /* Send violation trap */
184 data.generic_type = IB_NOTICE_TYPE_SECURITY;
77241056
MM
185 data.prod_type_lsb = IB_NOTICE_PROD_CA;
186 data.trap_num = trap_num;
5cd24119
EK
187 data.issuer_lid = cpu_to_be32(lid);
188 data.ntc_257_258.lid1 = cpu_to_be32(_lid1);
189 data.ntc_257_258.lid2 = cpu_to_be32(_lid2);
190 data.ntc_257_258.key = cpu_to_be32(key);
191 data.ntc_257_258.sl = sl << 3;
192 data.ntc_257_258.qp1 = cpu_to_be32(qp1);
193 data.ntc_257_258.qp2 = cpu_to_be32(qp2);
77241056
MM
194
195 send_trap(ibp, &data, sizeof(data));
196}
197
198/*
199 * Send a bad M_Key trap (ch. 14.3.9).
200 */
201static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
202 __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt)
203{
5cd24119
EK
204 struct opa_mad_notice_attr data;
205 u32 lid = ppd_from_ibp(ibp)->lid;
77241056 206
5cd24119 207 memset(&data, 0, sizeof(data));
77241056
MM
208 /* Send violation trap */
209 data.generic_type = IB_NOTICE_TYPE_SECURITY;
77241056 210 data.prod_type_lsb = IB_NOTICE_PROD_CA;
5cd24119
EK
211 data.trap_num = OPA_TRAP_BAD_M_KEY;
212 data.issuer_lid = cpu_to_be32(lid);
213 data.ntc_256.lid = data.issuer_lid;
214 data.ntc_256.method = mad->method;
215 data.ntc_256.attr_id = mad->attr_id;
216 data.ntc_256.attr_mod = mad->attr_mod;
217 data.ntc_256.mkey = mkey;
77241056 218 if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
5cd24119
EK
219 data.ntc_256.dr_slid = dr_slid;
220 data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
221 if (hop_cnt > ARRAY_SIZE(data.ntc_256.dr_rtn_path)) {
222 data.ntc_256.dr_trunc_hop |=
77241056 223 IB_NOTICE_TRAP_DR_TRUNC;
5cd24119 224 hop_cnt = ARRAY_SIZE(data.ntc_256.dr_rtn_path);
77241056 225 }
5cd24119
EK
226 data.ntc_256.dr_trunc_hop |= hop_cnt;
227 memcpy(data.ntc_256.dr_rtn_path, return_path,
77241056
MM
228 hop_cnt);
229 }
230
231 send_trap(ibp, &data, sizeof(data));
232}
233
234/*
235 * Send a Port Capability Mask Changed trap (ch. 14.3.11).
236 */
45b59eef 237void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
77241056 238{
5cd24119 239 struct opa_mad_notice_attr data;
45b59eef
HC
240 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
241 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
242 struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
5cd24119
EK
243 u32 lid = ppd_from_ibp(ibp)->lid;
244
245 memset(&data, 0, sizeof(data));
77241056
MM
246
247 data.generic_type = IB_NOTICE_TYPE_INFO;
77241056 248 data.prod_type_lsb = IB_NOTICE_PROD_CA;
5cd24119
EK
249 data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
250 data.issuer_lid = cpu_to_be32(lid);
251 data.ntc_144.lid = data.issuer_lid;
4eb06882 252 data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
77241056
MM
253
254 send_trap(ibp, &data, sizeof(data));
255}
256
257/*
258 * Send a System Image GUID Changed trap (ch. 14.3.12).
259 */
260void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
261{
5cd24119
EK
262 struct opa_mad_notice_attr data;
263 u32 lid = ppd_from_ibp(ibp)->lid;
264
265 memset(&data, 0, sizeof(data));
77241056
MM
266
267 data.generic_type = IB_NOTICE_TYPE_INFO;
77241056 268 data.prod_type_lsb = IB_NOTICE_PROD_CA;
5cd24119
EK
269 data.trap_num = OPA_TRAP_CHANGE_SYSGUID;
270 data.issuer_lid = cpu_to_be32(lid);
271 data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
272 data.ntc_145.lid = data.issuer_lid;
77241056
MM
273
274 send_trap(ibp, &data, sizeof(data));
275}
276
277/*
278 * Send a Node Description Changed trap (ch. 14.3.13).
279 */
280void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
281{
5cd24119
EK
282 struct opa_mad_notice_attr data;
283 u32 lid = ppd_from_ibp(ibp)->lid;
284
285 memset(&data, 0, sizeof(data));
77241056
MM
286
287 data.generic_type = IB_NOTICE_TYPE_INFO;
77241056 288 data.prod_type_lsb = IB_NOTICE_PROD_CA;
5cd24119
EK
289 data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
290 data.issuer_lid = cpu_to_be32(lid);
291 data.ntc_144.lid = data.issuer_lid;
292 data.ntc_144.change_flags =
293 cpu_to_be16(OPA_NOTICE_TRAP_NODE_DESC_CHG);
77241056
MM
294
295 send_trap(ibp, &data, sizeof(data));
296}
297
298static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
299 u8 *data, struct ib_device *ibdev,
300 u8 port, u32 *resp_len)
301{
302 struct opa_node_description *nd;
303
304 if (am) {
305 smp->status |= IB_SMP_INVALID_FIELD;
306 return reply((struct ib_mad_hdr *)smp);
307 }
308
309 nd = (struct opa_node_description *)data;
310
311 memcpy(nd->data, ibdev->node_desc, sizeof(nd->data));
312
313 if (resp_len)
314 *resp_len += sizeof(*nd);
315
316 return reply((struct ib_mad_hdr *)smp);
317}
318
319static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
320 struct ib_device *ibdev, u8 port,
321 u32 *resp_len)
322{
323 struct opa_node_info *ni;
324 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
325 unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
326
327 ni = (struct opa_node_info *)data;
328
329 /* GUID 0 is illegal */
330 if (am || pidx >= dd->num_pports || dd->pport[pidx].guid == 0) {
331 smp->status |= IB_SMP_INVALID_FIELD;
332 return reply((struct ib_mad_hdr *)smp);
333 }
334
335 ni->port_guid = cpu_to_be64(dd->pport[pidx].guid);
336 ni->base_version = OPA_MGMT_BASE_VERSION;
337 ni->class_version = OPA_SMI_CLASS_VERSION;
338 ni->node_type = 1; /* channel adapter */
339 ni->num_ports = ibdev->phys_port_cnt;
340 /* This is already in network order */
341 ni->system_image_guid = ib_hfi1_sys_image_guid;
342 /* Use first-port GUID as node */
343 ni->node_guid = cpu_to_be64(dd->pport->guid);
344 ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
345 ni->device_id = cpu_to_be16(dd->pcidev->device);
346 ni->revision = cpu_to_be32(dd->minrev);
347 ni->local_port_num = port;
348 ni->vendor_id[0] = dd->oui1;
349 ni->vendor_id[1] = dd->oui2;
350 ni->vendor_id[2] = dd->oui3;
351
352 if (resp_len)
353 *resp_len += sizeof(*ni);
354
355 return reply((struct ib_mad_hdr *)smp);
356}
357
358static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
359 u8 port)
360{
361 struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
362 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
363 unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
364
365 /* GUID 0 is illegal */
366 if (smp->attr_mod || pidx >= dd->num_pports ||
367 dd->pport[pidx].guid == 0)
368 smp->status |= IB_SMP_INVALID_FIELD;
369 else
370 nip->port_guid = cpu_to_be64(dd->pport[pidx].guid);
371
372 nip->base_version = OPA_MGMT_BASE_VERSION;
373 nip->class_version = OPA_SMI_CLASS_VERSION;
374 nip->node_type = 1; /* channel adapter */
375 nip->num_ports = ibdev->phys_port_cnt;
376 /* This is already in network order */
377 nip->sys_guid = ib_hfi1_sys_image_guid;
378 /* Use first-port GUID as node */
379 nip->node_guid = cpu_to_be64(dd->pport->guid);
380 nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
381 nip->device_id = cpu_to_be16(dd->pcidev->device);
382 nip->revision = cpu_to_be32(dd->minrev);
383 nip->local_port_num = port;
384 nip->vendor_id[0] = dd->oui1;
385 nip->vendor_id[1] = dd->oui2;
386 nip->vendor_id[2] = dd->oui3;
387
388 return reply((struct ib_mad_hdr *)smp);
389}
390
391static void set_link_width_enabled(struct hfi1_pportdata *ppd, u32 w)
392{
393 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_ENB, w);
394}
395
396static void set_link_width_downgrade_enabled(struct hfi1_pportdata *ppd, u32 w)
397{
398 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_DG_ENB, w);
399}
400
401static void set_link_speed_enabled(struct hfi1_pportdata *ppd, u32 s)
402{
403 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_SPD_ENB, s);
404}
405
406static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
407 int mad_flags, __be64 mkey, __be32 dr_slid,
408 u8 return_path[], u8 hop_cnt)
409{
410 int valid_mkey = 0;
411 int ret = 0;
412
413 /* Is the mkey in the process of expiring? */
4eb06882
DD
414 if (ibp->rvp.mkey_lease_timeout &&
415 time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
77241056 416 /* Clear timeout and mkey protection field. */
4eb06882
DD
417 ibp->rvp.mkey_lease_timeout = 0;
418 ibp->rvp.mkeyprot = 0;
77241056
MM
419 }
420
4eb06882
DD
421 if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
422 ibp->rvp.mkey == mkey)
77241056
MM
423 valid_mkey = 1;
424
425 /* Unset lease timeout on any valid Get/Set/TrapRepress */
4eb06882 426 if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
77241056
MM
427 (mad->method == IB_MGMT_METHOD_GET ||
428 mad->method == IB_MGMT_METHOD_SET ||
429 mad->method == IB_MGMT_METHOD_TRAP_REPRESS))
4eb06882 430 ibp->rvp.mkey_lease_timeout = 0;
77241056
MM
431
432 if (!valid_mkey) {
433 switch (mad->method) {
434 case IB_MGMT_METHOD_GET:
435 /* Bad mkey not a violation below level 2 */
4eb06882 436 if (ibp->rvp.mkeyprot < 2)
77241056
MM
437 break;
438 case IB_MGMT_METHOD_SET:
439 case IB_MGMT_METHOD_TRAP_REPRESS:
4eb06882
DD
440 if (ibp->rvp.mkey_violations != 0xFFFF)
441 ++ibp->rvp.mkey_violations;
442 if (!ibp->rvp.mkey_lease_timeout &&
443 ibp->rvp.mkey_lease_period)
444 ibp->rvp.mkey_lease_timeout = jiffies +
445 ibp->rvp.mkey_lease_period * HZ;
77241056
MM
446 /* Generate a trap notice. */
447 bad_mkey(ibp, mad, mkey, dr_slid, return_path,
448 hop_cnt);
449 ret = 1;
450 }
451 }
452
453 return ret;
454}
455
456/*
457 * The SMA caches reads from LCB registers in case the LCB is unavailable.
458 * (The LCB is unavailable in certain link states, for example.)
459 */
460struct lcb_datum {
461 u32 off;
462 u64 val;
463};
464
465static struct lcb_datum lcb_cache[] = {
466 { DC_LCB_STS_ROUND_TRIP_LTP_CNT, 0 },
467};
468
469static int write_lcb_cache(u32 off, u64 val)
470{
471 int i;
472
473 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
474 if (lcb_cache[i].off == off) {
475 lcb_cache[i].val = val;
476 return 0;
477 }
478 }
479
480 pr_warn("%s bad offset 0x%x\n", __func__, off);
481 return -1;
482}
483
484static int read_lcb_cache(u32 off, u64 *val)
485{
486 int i;
487
488 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
489 if (lcb_cache[i].off == off) {
490 *val = lcb_cache[i].val;
491 return 0;
492 }
493 }
494
495 pr_warn("%s bad offset 0x%x\n", __func__, off);
496 return -1;
497}
498
499void read_ltp_rtt(struct hfi1_devdata *dd)
500{
501 u64 reg;
502
503 if (read_lcb_csr(dd, DC_LCB_STS_ROUND_TRIP_LTP_CNT, &reg))
504 dd_dev_err(dd, "%s: unable to read LTP RTT\n", __func__);
505 else
506 write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg);
507}
508
77241056
MM
509static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
510 struct ib_device *ibdev, u8 port,
511 u32 *resp_len)
512{
513 int i;
514 struct hfi1_devdata *dd;
515 struct hfi1_pportdata *ppd;
516 struct hfi1_ibport *ibp;
517 struct opa_port_info *pi = (struct opa_port_info *)data;
518 u8 mtu;
519 u8 credit_rate;
520 u32 state;
521 u32 num_ports = OPA_AM_NPORT(am);
522 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
523 u32 buffer_units;
524 u64 tmp = 0;
525
526 if (num_ports != 1) {
527 smp->status |= IB_SMP_INVALID_FIELD;
528 return reply((struct ib_mad_hdr *)smp);
529 }
530
531 dd = dd_from_ibdev(ibdev);
532 /* IB numbers ports from 1, hw from 0 */
533 ppd = dd->pport + (port - 1);
534 ibp = &ppd->ibport_data;
535
536 if (ppd->vls_supported/2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
537 ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
538 smp->status |= IB_SMP_INVALID_FIELD;
539 return reply((struct ib_mad_hdr *)smp);
540 }
541
542 pi->lid = cpu_to_be32(ppd->lid);
543
544 /* Only return the mkey if the protection field allows it. */
545 if (!(smp->method == IB_MGMT_METHOD_GET &&
4eb06882
DD
546 ibp->rvp.mkey != smp->mkey &&
547 ibp->rvp.mkeyprot == 1))
548 pi->mkey = ibp->rvp.mkey;
549
550 pi->subnet_prefix = ibp->rvp.gid_prefix;
551 pi->sm_lid = cpu_to_be32(ibp->rvp.sm_lid);
552 pi->ib_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
553 pi->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
77241056
MM
554 pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp);
555 pi->sa_qp = cpu_to_be32(ppd->sa_qp);
556
557 pi->link_width.enabled = cpu_to_be16(ppd->link_width_enabled);
558 pi->link_width.supported = cpu_to_be16(ppd->link_width_supported);
559 pi->link_width.active = cpu_to_be16(ppd->link_width_active);
560
561 pi->link_width_downgrade.supported =
562 cpu_to_be16(ppd->link_width_downgrade_supported);
563 pi->link_width_downgrade.enabled =
564 cpu_to_be16(ppd->link_width_downgrade_enabled);
565 pi->link_width_downgrade.tx_active =
566 cpu_to_be16(ppd->link_width_downgrade_tx_active);
567 pi->link_width_downgrade.rx_active =
568 cpu_to_be16(ppd->link_width_downgrade_rx_active);
569
570 pi->link_speed.supported = cpu_to_be16(ppd->link_speed_supported);
571 pi->link_speed.active = cpu_to_be16(ppd->link_speed_active);
572 pi->link_speed.enabled = cpu_to_be16(ppd->link_speed_enabled);
573
574 state = driver_lstate(ppd);
575
576 if (start_of_sm_config && (state == IB_PORT_INIT))
577 ppd->is_sm_config_started = 1;
578
1d01cf33 579 pi->port_phys_conf = (ppd->port_type & 0xf);
77241056
MM
580
581#if PI_LED_ENABLE_SUP
582 pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
583 pi->port_states.ledenable_offlinereason |=
584 ppd->is_sm_config_started << 5;
585 pi->port_states.ledenable_offlinereason |=
a9c05e35 586 ppd->offline_disabled_reason;
77241056
MM
587#else
588 pi->port_states.offline_reason = ppd->neighbor_normal << 4;
589 pi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
a9c05e35 590 pi->port_states.offline_reason |= ppd->offline_disabled_reason;
77241056
MM
591#endif /* PI_LED_ENABLE_SUP */
592
593 pi->port_states.portphysstate_portstate =
594 (hfi1_ibphys_portstate(ppd) << 4) | state;
595
4eb06882 596 pi->mkeyprotect_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
77241056
MM
597
598 memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu));
599 for (i = 0; i < ppd->vls_supported; i++) {
600 mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU);
601 if ((i % 2) == 0)
602 pi->neigh_mtu.pvlx_to_mtu[i/2] |= (mtu << 4);
603 else
604 pi->neigh_mtu.pvlx_to_mtu[i/2] |= mtu;
605 }
606 /* don't forget VL 15 */
607 mtu = mtu_to_enum(dd->vld[15].mtu, 2048);
608 pi->neigh_mtu.pvlx_to_mtu[15/2] |= mtu;
4eb06882 609 pi->smsl = ibp->rvp.sm_sl & OPA_PI_MASK_SMSL;
77241056
MM
610 pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS);
611 pi->partenforce_filterraw |=
612 (ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON);
613 if (ppd->part_enforce & HFI1_PART_ENFORCE_IN)
614 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN;
615 if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT)
616 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT;
4eb06882 617 pi->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
77241056 618 /* P_KeyViolations are counted by hardware. */
4eb06882
DD
619 pi->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
620 pi->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
77241056
MM
621
622 pi->vl.cap = ppd->vls_supported;
4eb06882 623 pi->vl.high_limit = cpu_to_be16(ibp->rvp.vl_high_limit);
77241056
MM
624 pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP);
625 pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP);
626
4eb06882 627 pi->clientrereg_subnettimeout = ibp->rvp.subnet_timeout;
77241056
MM
628
629 pi->port_link_mode = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 |
630 OPA_PORT_LINK_MODE_OPA << 5 |
631 OPA_PORT_LINK_MODE_OPA);
632
633 pi->port_ltp_crc_mode = cpu_to_be16(ppd->port_ltp_crc_mode);
634
635 pi->port_mode = cpu_to_be16(
636 ppd->is_active_optimize_enabled ?
637 OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE : 0);
638
639 pi->port_packet_format.supported =
640 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B);
641 pi->port_packet_format.enabled =
642 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B);
643
644 /* flit_control.interleave is (OPA V1, version .76):
645 * bits use
646 * ---- ---
647 * 2 res
648 * 2 DistanceSupported
649 * 2 DistanceEnabled
650 * 5 MaxNextLevelTxEnabled
651 * 5 MaxNestLevelRxSupported
652 *
653 * HFI supports only "distance mode 1" (see OPA V1, version .76,
654 * section 9.6.2), so set DistanceSupported, DistanceEnabled
655 * to 0x1.
656 */
657 pi->flit_control.interleave = cpu_to_be16(0x1400);
658
659 pi->link_down_reason = ppd->local_link_down_reason.sma;
660 pi->neigh_link_down_reason = ppd->neigh_link_down_reason.sma;
661 pi->port_error_action = cpu_to_be32(ppd->port_error_action);
662 pi->mtucap = mtu_to_enum(hfi1_max_mtu, IB_MTU_4096);
663
664 /* 32.768 usec. response time (guessing) */
665 pi->resptimevalue = 3;
666
667 pi->local_port_num = port;
668
669 /* buffer info for FM */
670 pi->overall_buffer_space = cpu_to_be16(dd->link_credits);
671
672 pi->neigh_node_guid = cpu_to_be64(ppd->neighbor_guid);
673 pi->neigh_port_num = ppd->neighbor_port_number;
674 pi->port_neigh_mode =
675 (ppd->neighbor_type & OPA_PI_MASK_NEIGH_NODE_TYPE) |
676 (ppd->mgmt_allowed ? OPA_PI_MASK_NEIGH_MGMT_ALLOWED : 0) |
677 (ppd->neighbor_fm_security ?
678 OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS : 0);
679
680 /* HFIs shall always return VL15 credits to their
681 * neighbor in a timely manner, without any credit return pacing.
682 */
683 credit_rate = 0;
684 buffer_units = (dd->vau) & OPA_PI_MASK_BUF_UNIT_BUF_ALLOC;
685 buffer_units |= (dd->vcu << 3) & OPA_PI_MASK_BUF_UNIT_CREDIT_ACK;
686 buffer_units |= (credit_rate << 6) &
687 OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE;
688 buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT;
689 pi->buffer_units = cpu_to_be32(buffer_units);
690
691 pi->opa_cap_mask = cpu_to_be16(OPA_CAP_MASK3_IsSharedSpaceSupported);
692
693 /* HFI supports a replay buffer 128 LTPs in size */
694 pi->replay_depth.buffer = 0x80;
695 /* read the cached value of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
696 read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp);
697
698 /* this counter is 16 bits wide, but the replay_depth.wire
699 * variable is only 8 bits */
700 if (tmp > 0xff)
701 tmp = 0xff;
702 pi->replay_depth.wire = tmp;
703
704 if (resp_len)
705 *resp_len += sizeof(struct opa_port_info);
706
707 return reply((struct ib_mad_hdr *)smp);
708}
709
710/**
711 * get_pkeys - return the PKEY table
712 * @dd: the hfi1_ib device
713 * @port: the IB port number
714 * @pkeys: the pkey table is placed here
715 */
716static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
717{
718 struct hfi1_pportdata *ppd = dd->pport + port - 1;
719
720 memcpy(pkeys, ppd->pkeys, sizeof(ppd->pkeys));
721
722 return 0;
723}
724
725static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
726 struct ib_device *ibdev, u8 port,
727 u32 *resp_len)
728{
729 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
730 u32 n_blocks_req = OPA_AM_NBLK(am);
731 u32 start_block = am & 0x7ff;
732 __be16 *p;
733 u16 *q;
734 int i;
735 u16 n_blocks_avail;
736 unsigned npkeys = hfi1_get_npkeys(dd);
737 size_t size;
738
739 if (n_blocks_req == 0) {
740 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
741 port, start_block, n_blocks_req);
742 smp->status |= IB_SMP_INVALID_FIELD;
743 return reply((struct ib_mad_hdr *)smp);
744 }
745
746 n_blocks_avail = (u16) (npkeys/OPA_PARTITION_TABLE_BLK_SIZE) + 1;
747
748 size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16);
749
750 if (start_block + n_blocks_req > n_blocks_avail ||
751 n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
752 pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; "
753 "avail 0x%x; blk/smp 0x%lx\n",
754 start_block, n_blocks_req, n_blocks_avail,
755 OPA_NUM_PKEY_BLOCKS_PER_SMP);
756 smp->status |= IB_SMP_INVALID_FIELD;
757 return reply((struct ib_mad_hdr *)smp);
758 }
759
760 p = (__be16 *) data;
761 q = (u16 *)data;
762 /* get the real pkeys if we are requesting the first block */
763 if (start_block == 0) {
764 get_pkeys(dd, port, q);
765 for (i = 0; i < npkeys; i++)
766 p[i] = cpu_to_be16(q[i]);
767 if (resp_len)
768 *resp_len += size;
769 } else
770 smp->status |= IB_SMP_INVALID_FIELD;
771
772 return reply((struct ib_mad_hdr *)smp);
773}
774
775enum {
776 HFI_TRANSITION_DISALLOWED,
777 HFI_TRANSITION_IGNORED,
778 HFI_TRANSITION_ALLOWED,
779 HFI_TRANSITION_UNDEFINED,
780};
781
782/*
783 * Use shortened names to improve readability of
784 * {logical,physical}_state_transitions
785 */
786enum {
787 __D = HFI_TRANSITION_DISALLOWED,
788 __I = HFI_TRANSITION_IGNORED,
789 __A = HFI_TRANSITION_ALLOWED,
790 __U = HFI_TRANSITION_UNDEFINED,
791};
792
793/*
794 * IB_PORTPHYSSTATE_POLLING (2) through OPA_PORTPHYSSTATE_MAX (11) are
795 * represented in physical_state_transitions.
796 */
797#define __N_PHYSTATES (OPA_PORTPHYSSTATE_MAX - IB_PORTPHYSSTATE_POLLING + 1)
798
799/*
800 * Within physical_state_transitions, rows represent "old" states,
801 * columns "new" states, and physical_state_transitions.allowed[old][new]
802 * indicates if the transition from old state to new state is legal (see
803 * OPAg1v1, Table 6-4).
804 */
805static const struct {
806 u8 allowed[__N_PHYSTATES][__N_PHYSTATES];
807} physical_state_transitions = {
808 {
809 /* 2 3 4 5 6 7 8 9 10 11 */
810 /* 2 */ { __A, __A, __D, __D, __D, __D, __D, __D, __D, __D },
811 /* 3 */ { __A, __I, __D, __D, __D, __D, __D, __D, __D, __A },
812 /* 4 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
813 /* 5 */ { __A, __A, __D, __I, __D, __D, __D, __D, __D, __D },
814 /* 6 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
815 /* 7 */ { __D, __A, __D, __D, __D, __I, __D, __D, __D, __D },
816 /* 8 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
817 /* 9 */ { __I, __A, __D, __D, __D, __D, __D, __I, __D, __D },
818 /*10 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
819 /*11 */ { __D, __A, __D, __D, __D, __D, __D, __D, __D, __I },
820 }
821};
822
823/*
824 * IB_PORT_DOWN (1) through IB_PORT_ACTIVE_DEFER (5) are represented
825 * logical_state_transitions
826 */
827
828#define __N_LOGICAL_STATES (IB_PORT_ACTIVE_DEFER - IB_PORT_DOWN + 1)
829
830/*
831 * Within logical_state_transitions rows represent "old" states,
832 * columns "new" states, and logical_state_transitions.allowed[old][new]
833 * indicates if the transition from old state to new state is legal (see
834 * OPAg1v1, Table 9-12).
835 */
836static const struct {
837 u8 allowed[__N_LOGICAL_STATES][__N_LOGICAL_STATES];
838} logical_state_transitions = {
839 {
840 /* 1 2 3 4 5 */
841 /* 1 */ { __I, __D, __D, __D, __U},
842 /* 2 */ { __D, __I, __A, __D, __U},
843 /* 3 */ { __D, __D, __I, __A, __U},
844 /* 4 */ { __D, __D, __I, __I, __U},
845 /* 5 */ { __U, __U, __U, __U, __U},
846 }
847};
848
849static int logical_transition_allowed(int old, int new)
850{
851 if (old < IB_PORT_NOP || old > IB_PORT_ACTIVE_DEFER ||
852 new < IB_PORT_NOP || new > IB_PORT_ACTIVE_DEFER) {
853 pr_warn("invalid logical state(s) (old %d new %d)\n",
854 old, new);
855 return HFI_TRANSITION_UNDEFINED;
856 }
857
858 if (new == IB_PORT_NOP)
859 return HFI_TRANSITION_ALLOWED; /* always allowed */
860
861 /* adjust states for indexing into logical_state_transitions */
862 old -= IB_PORT_DOWN;
863 new -= IB_PORT_DOWN;
864
865 if (old < 0 || new < 0)
866 return HFI_TRANSITION_UNDEFINED;
867 return logical_state_transitions.allowed[old][new];
868}
869
870static int physical_transition_allowed(int old, int new)
871{
872 if (old < IB_PORTPHYSSTATE_NOP || old > OPA_PORTPHYSSTATE_MAX ||
873 new < IB_PORTPHYSSTATE_NOP || new > OPA_PORTPHYSSTATE_MAX) {
874 pr_warn("invalid physical state(s) (old %d new %d)\n",
875 old, new);
876 return HFI_TRANSITION_UNDEFINED;
877 }
878
879 if (new == IB_PORTPHYSSTATE_NOP)
880 return HFI_TRANSITION_ALLOWED; /* always allowed */
881
882 /* adjust states for indexing into physical_state_transitions */
883 old -= IB_PORTPHYSSTATE_POLLING;
884 new -= IB_PORTPHYSSTATE_POLLING;
885
886 if (old < 0 || new < 0)
887 return HFI_TRANSITION_UNDEFINED;
888 return physical_state_transitions.allowed[old][new];
889}
890
891static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
892 u32 logical_new, u32 physical_new)
893{
894 u32 physical_old = driver_physical_state(ppd);
895 u32 logical_old = driver_logical_state(ppd);
896 int ret, logical_allowed, physical_allowed;
897
898 logical_allowed = ret =
899 logical_transition_allowed(logical_old, logical_new);
900
901 if (ret == HFI_TRANSITION_DISALLOWED ||
902 ret == HFI_TRANSITION_UNDEFINED) {
903 pr_warn("invalid logical state transition %s -> %s\n",
904 opa_lstate_name(logical_old),
905 opa_lstate_name(logical_new));
906 return ret;
907 }
908
909 physical_allowed = ret =
910 physical_transition_allowed(physical_old, physical_new);
911
912 if (ret == HFI_TRANSITION_DISALLOWED ||
913 ret == HFI_TRANSITION_UNDEFINED) {
914 pr_warn("invalid physical state transition %s -> %s\n",
915 opa_pstate_name(physical_old),
916 opa_pstate_name(physical_new));
917 return ret;
918 }
919
920 if (logical_allowed == HFI_TRANSITION_IGNORED &&
921 physical_allowed == HFI_TRANSITION_IGNORED)
922 return HFI_TRANSITION_IGNORED;
923
a9c05e35
BM
924 /*
925 * A change request of Physical Port State from
926 * 'Offline' to 'Polling' should be ignored.
927 */
928 if ((physical_old == OPA_PORTPHYSSTATE_OFFLINE) &&
929 (physical_new == IB_PORTPHYSSTATE_POLLING))
930 return HFI_TRANSITION_IGNORED;
931
77241056
MM
932 /*
933 * Either physical_allowed or logical_allowed is
934 * HFI_TRANSITION_ALLOWED.
935 */
936 return HFI_TRANSITION_ALLOWED;
937}
938
939static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
940 u32 logical_state, u32 phys_state,
941 int suppress_idle_sma)
942{
943 struct hfi1_devdata *dd = ppd->dd;
944 u32 link_state;
945 int ret;
946
947 ret = port_states_transition_allowed(ppd, logical_state, phys_state);
948 if (ret == HFI_TRANSITION_DISALLOWED ||
949 ret == HFI_TRANSITION_UNDEFINED) {
950 /* error message emitted above */
951 smp->status |= IB_SMP_INVALID_FIELD;
952 return 0;
953 }
954
955 if (ret == HFI_TRANSITION_IGNORED)
956 return 0;
957
958 if ((phys_state != IB_PORTPHYSSTATE_NOP) &&
959 !(logical_state == IB_PORT_DOWN ||
960 logical_state == IB_PORT_NOP)){
961 pr_warn("SubnSet(OPA_PortInfo) port state invalid: logical_state 0x%x physical_state 0x%x\n",
962 logical_state, phys_state);
963 smp->status |= IB_SMP_INVALID_FIELD;
964 }
965
966 /*
967 * Logical state changes are summarized in OPAv1g1 spec.,
968 * Table 9-12; physical state changes are summarized in
969 * OPAv1g1 spec., Table 6.4.
970 */
971 switch (logical_state) {
972 case IB_PORT_NOP:
973 if (phys_state == IB_PORTPHYSSTATE_NOP)
974 break;
975 /* FALLTHROUGH */
976 case IB_PORT_DOWN:
977 if (phys_state == IB_PORTPHYSSTATE_NOP)
978 link_state = HLS_DN_DOWNDEF;
979 else if (phys_state == IB_PORTPHYSSTATE_POLLING) {
980 link_state = HLS_DN_POLL;
981 set_link_down_reason(ppd,
982 OPA_LINKDOWN_REASON_FM_BOUNCE, 0,
983 OPA_LINKDOWN_REASON_FM_BOUNCE);
984 } else if (phys_state == IB_PORTPHYSSTATE_DISABLED)
985 link_state = HLS_DN_DISABLE;
986 else {
987 pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n",
988 phys_state);
989 smp->status |= IB_SMP_INVALID_FIELD;
990 break;
991 }
992
993 set_link_state(ppd, link_state);
994 if (link_state == HLS_DN_DISABLE &&
995 (ppd->offline_disabled_reason >
a9c05e35 996 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED) ||
77241056 997 ppd->offline_disabled_reason ==
a9c05e35 998 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
77241056 999 ppd->offline_disabled_reason =
a9c05e35 1000 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
77241056
MM
1001 /*
1002 * Don't send a reply if the response would be sent
1003 * through the disabled port.
1004 */
1005 if (link_state == HLS_DN_DISABLE && smp->hop_cnt)
1006 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1007 break;
1008 case IB_PORT_ARMED:
1009 ret = set_link_state(ppd, HLS_UP_ARMED);
1010 if ((ret == 0) && (suppress_idle_sma == 0))
1011 send_idle_sma(dd, SMA_IDLE_ARM);
1012 break;
1013 case IB_PORT_ACTIVE:
1014 if (ppd->neighbor_normal) {
1015 ret = set_link_state(ppd, HLS_UP_ACTIVE);
1016 if (ret == 0)
1017 send_idle_sma(dd, SMA_IDLE_ACTIVE);
1018 } else {
1019 pr_warn("SubnSet(OPA_PortInfo) Cannot move to Active with NeighborNormal 0\n");
1020 smp->status |= IB_SMP_INVALID_FIELD;
1021 }
1022 break;
1023 default:
1024 pr_warn("SubnSet(OPA_PortInfo) invalid logical state 0x%x\n",
1025 logical_state);
1026 smp->status |= IB_SMP_INVALID_FIELD;
1027 }
1028
1029 return 0;
1030}
1031
1032/**
1033 * subn_set_opa_portinfo - set port information
1034 * @smp: the incoming SM packet
1035 * @ibdev: the infiniband device
1036 * @port: the port on the device
1037 *
1038 */
1039static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
1040 struct ib_device *ibdev, u8 port,
1041 u32 *resp_len)
1042{
1043 struct opa_port_info *pi = (struct opa_port_info *)data;
1044 struct ib_event event;
1045 struct hfi1_devdata *dd;
1046 struct hfi1_pportdata *ppd;
1047 struct hfi1_ibport *ibp;
1048 u8 clientrereg;
1049 unsigned long flags;
1050 u32 smlid, opa_lid; /* tmp vars to hold LID values */
1051 u16 lid;
1052 u8 ls_old, ls_new, ps_new;
1053 u8 vls;
1054 u8 msl;
1055 u8 crc_enabled;
1056 u16 lse, lwe, mtu;
1057 u32 num_ports = OPA_AM_NPORT(am);
1058 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1059 int ret, i, invalid = 0, call_set_mtu = 0;
1060 int call_link_downgrade_policy = 0;
1061
1062 if (num_ports != 1) {
1063 smp->status |= IB_SMP_INVALID_FIELD;
1064 return reply((struct ib_mad_hdr *)smp);
1065 }
1066
1067 opa_lid = be32_to_cpu(pi->lid);
1068 if (opa_lid & 0xFFFF0000) {
1069 pr_warn("OPA_PortInfo lid out of range: %X\n", opa_lid);
1070 smp->status |= IB_SMP_INVALID_FIELD;
1071 goto get_only;
1072 }
1073
1074 lid = (u16)(opa_lid & 0x0000FFFF);
1075
1076 smlid = be32_to_cpu(pi->sm_lid);
1077 if (smlid & 0xFFFF0000) {
1078 pr_warn("OPA_PortInfo SM lid out of range: %X\n", smlid);
1079 smp->status |= IB_SMP_INVALID_FIELD;
1080 goto get_only;
1081 }
1082 smlid &= 0x0000FFFF;
1083
1084 clientrereg = (pi->clientrereg_subnettimeout &
1085 OPA_PI_MASK_CLIENT_REREGISTER);
1086
1087 dd = dd_from_ibdev(ibdev);
1088 /* IB numbers ports from 1, hw from 0 */
1089 ppd = dd->pport + (port - 1);
1090 ibp = &ppd->ibport_data;
1091 event.device = ibdev;
1092 event.element.port_num = port;
1093
1094 ls_old = driver_lstate(ppd);
1095
4eb06882
DD
1096 ibp->rvp.mkey = pi->mkey;
1097 ibp->rvp.gid_prefix = pi->subnet_prefix;
1098 ibp->rvp.mkey_lease_period = be16_to_cpu(pi->mkey_lease_period);
77241056
MM
1099
1100 /* Must be a valid unicast LID address. */
1101 if ((lid == 0 && ls_old > IB_PORT_INIT) ||
8859b4a6 1102 lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
77241056
MM
1103 smp->status |= IB_SMP_INVALID_FIELD;
1104 pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n",
1105 lid);
1106 } else if (ppd->lid != lid ||
1107 ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) {
1108 if (ppd->lid != lid)
1109 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LID_CHANGE_BIT);
1110 if (ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC))
1111 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LMC_CHANGE_BIT);
1112 hfi1_set_lid(ppd, lid, pi->mkeyprotect_lmc & OPA_PI_MASK_LMC);
1113 event.event = IB_EVENT_LID_CHANGE;
1114 ib_dispatch_event(&event);
1115 }
1116
1117 msl = pi->smsl & OPA_PI_MASK_SMSL;
1118 if (pi->partenforce_filterraw & OPA_PI_MASK_LINKINIT_REASON)
1119 ppd->linkinit_reason =
1120 (pi->partenforce_filterraw &
1121 OPA_PI_MASK_LINKINIT_REASON);
1122 /* enable/disable SW pkey checking as per FM control */
1123 if (pi->partenforce_filterraw & OPA_PI_MASK_PARTITION_ENFORCE_IN)
1124 ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
1125 else
1126 ppd->part_enforce &= ~HFI1_PART_ENFORCE_IN;
1127
1128 if (pi->partenforce_filterraw & OPA_PI_MASK_PARTITION_ENFORCE_OUT)
1129 ppd->part_enforce |= HFI1_PART_ENFORCE_OUT;
1130 else
1131 ppd->part_enforce &= ~HFI1_PART_ENFORCE_OUT;
1132
1133 /* Must be a valid unicast LID address. */
1134 if ((smlid == 0 && ls_old > IB_PORT_INIT) ||
8859b4a6 1135 smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
77241056
MM
1136 smp->status |= IB_SMP_INVALID_FIELD;
1137 pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid);
4eb06882 1138 } else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
77241056 1139 pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid);
4eb06882 1140 spin_lock_irqsave(&ibp->rvp.lock, flags);
9c4a311e 1141 if (ibp->rvp.sm_ah) {
4eb06882 1142 if (smlid != ibp->rvp.sm_lid)
9c4a311e 1143 ibp->rvp.sm_ah->attr.dlid = smlid;
4eb06882 1144 if (msl != ibp->rvp.sm_sl)
9c4a311e 1145 ibp->rvp.sm_ah->attr.sl = msl;
77241056 1146 }
4eb06882
DD
1147 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1148 if (smlid != ibp->rvp.sm_lid)
1149 ibp->rvp.sm_lid = smlid;
1150 if (msl != ibp->rvp.sm_sl)
1151 ibp->rvp.sm_sl = msl;
77241056
MM
1152 event.event = IB_EVENT_SM_CHANGE;
1153 ib_dispatch_event(&event);
1154 }
1155
1156 if (pi->link_down_reason == 0) {
1157 ppd->local_link_down_reason.sma = 0;
1158 ppd->local_link_down_reason.latest = 0;
1159 }
1160
1161 if (pi->neigh_link_down_reason == 0) {
1162 ppd->neigh_link_down_reason.sma = 0;
1163 ppd->neigh_link_down_reason.latest = 0;
1164 }
1165
1166 ppd->sm_trap_qp = be32_to_cpu(pi->sm_trap_qp);
1167 ppd->sa_qp = be32_to_cpu(pi->sa_qp);
1168
1169 ppd->port_error_action = be32_to_cpu(pi->port_error_action);
1170 lwe = be16_to_cpu(pi->link_width.enabled);
1171 if (lwe) {
1172 if (lwe == OPA_LINK_WIDTH_RESET
1173 || lwe == OPA_LINK_WIDTH_RESET_OLD)
1174 set_link_width_enabled(ppd, ppd->link_width_supported);
1175 else if ((lwe & ~ppd->link_width_supported) == 0)
1176 set_link_width_enabled(ppd, lwe);
1177 else
1178 smp->status |= IB_SMP_INVALID_FIELD;
1179 }
1180 lwe = be16_to_cpu(pi->link_width_downgrade.enabled);
1181 /* LWD.E is always applied - 0 means "disabled" */
1182 if (lwe == OPA_LINK_WIDTH_RESET
1183 || lwe == OPA_LINK_WIDTH_RESET_OLD) {
1184 set_link_width_downgrade_enabled(ppd,
1185 ppd->link_width_downgrade_supported);
1186 } else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) {
1187 /* only set and apply if something changed */
1188 if (lwe != ppd->link_width_downgrade_enabled) {
1189 set_link_width_downgrade_enabled(ppd, lwe);
1190 call_link_downgrade_policy = 1;
1191 }
1192 } else
1193 smp->status |= IB_SMP_INVALID_FIELD;
1194
1195 lse = be16_to_cpu(pi->link_speed.enabled);
1196 if (lse) {
1197 if (lse & be16_to_cpu(pi->link_speed.supported))
1198 set_link_speed_enabled(ppd, lse);
1199 else
1200 smp->status |= IB_SMP_INVALID_FIELD;
1201 }
1202
4eb06882
DD
1203 ibp->rvp.mkeyprot =
1204 (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6;
1205 ibp->rvp.vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF;
77241056 1206 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT,
4eb06882 1207 ibp->rvp.vl_high_limit);
77241056
MM
1208
1209 if (ppd->vls_supported/2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
1210 ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
1211 smp->status |= IB_SMP_INVALID_FIELD;
1212 return reply((struct ib_mad_hdr *)smp);
1213 }
1214 for (i = 0; i < ppd->vls_supported; i++) {
1215 if ((i % 2) == 0)
1216 mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i/2] >> 4)
1217 & 0xF);
1218 else
1219 mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i/2] & 0xF);
1220 if (mtu == 0xffff) {
1221 pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n",
1222 mtu,
1223 (pi->neigh_mtu.pvlx_to_mtu[0] >> 4) & 0xF);
1224 smp->status |= IB_SMP_INVALID_FIELD;
1225 mtu = hfi1_max_mtu; /* use a valid MTU */
1226 }
1227 if (dd->vld[i].mtu != mtu) {
1228 dd_dev_info(dd,
1229 "MTU change on vl %d from %d to %d\n",
1230 i, dd->vld[i].mtu, mtu);
1231 dd->vld[i].mtu = mtu;
1232 call_set_mtu++;
1233 }
1234 }
1235 /* As per OPAV1 spec: VL15 must support and be configured
1236 * for operation with a 2048 or larger MTU.
1237 */
1238 mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15/2] & 0xF);
1239 if (mtu < 2048 || mtu == 0xffff)
1240 mtu = 2048;
1241 if (dd->vld[15].mtu != mtu) {
1242 dd_dev_info(dd,
1243 "MTU change on vl 15 from %d to %d\n",
1244 dd->vld[15].mtu, mtu);
1245 dd->vld[15].mtu = mtu;
1246 call_set_mtu++;
1247 }
1248 if (call_set_mtu)
1249 set_mtu(ppd);
1250
1251 /* Set operational VLs */
1252 vls = pi->operational_vls & OPA_PI_MASK_OPERATIONAL_VL;
1253 if (vls) {
1254 if (vls > ppd->vls_supported) {
1255 pr_warn("SubnSet(OPA_PortInfo) VL's supported invalid %d\n",
1256 pi->operational_vls);
1257 smp->status |= IB_SMP_INVALID_FIELD;
1258 } else {
1259 if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS,
1260 vls) == -EINVAL)
1261 smp->status |= IB_SMP_INVALID_FIELD;
1262 }
1263 }
1264
1265 if (pi->mkey_violations == 0)
4eb06882 1266 ibp->rvp.mkey_violations = 0;
77241056
MM
1267
1268 if (pi->pkey_violations == 0)
4eb06882 1269 ibp->rvp.pkey_violations = 0;
77241056
MM
1270
1271 if (pi->qkey_violations == 0)
4eb06882 1272 ibp->rvp.qkey_violations = 0;
77241056 1273
4eb06882 1274 ibp->rvp.subnet_timeout =
77241056
MM
1275 pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT;
1276
1277 crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode);
1278 crc_enabled >>= 4;
1279 crc_enabled &= 0xf;
1280
1281 if (crc_enabled != 0)
1282 ppd->port_crc_mode_enabled = port_ltp_to_cap(crc_enabled);
1283
1284 ppd->is_active_optimize_enabled =
1285 !!(be16_to_cpu(pi->port_mode)
1286 & OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE);
1287
1288 ls_new = pi->port_states.portphysstate_portstate &
1289 OPA_PI_MASK_PORT_STATE;
1290 ps_new = (pi->port_states.portphysstate_portstate &
1291 OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4;
1292
1293 if (ls_old == IB_PORT_INIT) {
1294 if (start_of_sm_config) {
1295 if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
1296 ppd->is_sm_config_started = 1;
1297 } else if (ls_new == IB_PORT_ARMED) {
1298 if (ppd->is_sm_config_started == 0)
1299 invalid = 1;
1300 }
1301 }
1302
1303 /* Handle CLIENT_REREGISTER event b/c SM asked us for it */
1304 if (clientrereg) {
1305 event.event = IB_EVENT_CLIENT_REREGISTER;
1306 ib_dispatch_event(&event);
1307 }
1308
1309 /*
1310 * Do the port state change now that the other link parameters
1311 * have been set.
1312 * Changing the port physical state only makes sense if the link
1313 * is down or is being set to down.
1314 */
1315
1316 ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
1317 if (ret)
1318 return ret;
1319
1320 ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
1321
1322 /* restore re-reg bit per o14-12.2.1 */
1323 pi->clientrereg_subnettimeout |= clientrereg;
1324
1325 /*
1326 * Apply the new link downgrade policy. This may result in a link
1327 * bounce. Do this after everything else so things are settled.
1328 * Possible problem: if setting the port state above fails, then
1329 * the policy change is not applied.
1330 */
1331 if (call_link_downgrade_policy)
1332 apply_link_downgrade_policy(ppd, 0);
1333
1334 return ret;
1335
1336get_only:
1337 return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
1338}
1339
1340/**
1341 * set_pkeys - set the PKEY table for ctxt 0
1342 * @dd: the hfi1_ib device
1343 * @port: the IB port number
1344 * @pkeys: the PKEY table
1345 */
1346static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
1347{
1348 struct hfi1_pportdata *ppd;
1349 int i;
1350 int changed = 0;
1351 int update_includes_mgmt_partition = 0;
1352
1353 /*
1354 * IB port one/two always maps to context zero/one,
1355 * always a kernel context, no locking needed
1356 * If we get here with ppd setup, no need to check
1357 * that rcd is valid.
1358 */
1359 ppd = dd->pport + (port - 1);
1360 /*
1361 * If the update does not include the management pkey, don't do it.
1362 */
1363 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1364 if (pkeys[i] == LIM_MGMT_P_KEY) {
1365 update_includes_mgmt_partition = 1;
1366 break;
1367 }
1368 }
1369
1370 if (!update_includes_mgmt_partition)
1371 return 1;
1372
1373 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1374 u16 key = pkeys[i];
1375 u16 okey = ppd->pkeys[i];
1376
1377 if (key == okey)
1378 continue;
1379 /*
1380 * The SM gives us the complete PKey table. We have
1381 * to ensure that we put the PKeys in the matching
1382 * slots.
1383 */
1384 ppd->pkeys[i] = key;
1385 changed = 1;
1386 }
1387
1388 if (changed) {
1389 struct ib_event event;
1390
1391 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
1392
1393 event.event = IB_EVENT_PKEY_CHANGE;
ec3f2c12 1394 event.device = &dd->verbs_dev.rdi.ibdev;
77241056
MM
1395 event.element.port_num = port;
1396 ib_dispatch_event(&event);
1397 }
1398 return 0;
1399}
1400
1401static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
1402 struct ib_device *ibdev, u8 port,
1403 u32 *resp_len)
1404{
1405 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1406 u32 n_blocks_sent = OPA_AM_NBLK(am);
1407 u32 start_block = am & 0x7ff;
1408 u16 *p = (u16 *) data;
1409 __be16 *q = (__be16 *)data;
1410 int i;
1411 u16 n_blocks_avail;
1412 unsigned npkeys = hfi1_get_npkeys(dd);
1413
1414 if (n_blocks_sent == 0) {
1415 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
1416 port, start_block, n_blocks_sent);
1417 smp->status |= IB_SMP_INVALID_FIELD;
1418 return reply((struct ib_mad_hdr *)smp);
1419 }
1420
1421 n_blocks_avail = (u16)(npkeys/OPA_PARTITION_TABLE_BLK_SIZE) + 1;
1422
1423 if (start_block + n_blocks_sent > n_blocks_avail ||
1424 n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
1425 pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n",
1426 start_block, n_blocks_sent, n_blocks_avail,
1427 OPA_NUM_PKEY_BLOCKS_PER_SMP);
1428 smp->status |= IB_SMP_INVALID_FIELD;
1429 return reply((struct ib_mad_hdr *)smp);
1430 }
1431
1432 for (i = 0; i < n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE; i++)
1433 p[i] = be16_to_cpu(q[i]);
1434
1435 if (start_block == 0 && set_pkeys(dd, port, p) != 0) {
1436 smp->status |= IB_SMP_INVALID_FIELD;
1437 return reply((struct ib_mad_hdr *)smp);
1438 }
1439
1440 return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len);
1441}
1442
1443static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1444{
a787bde8 1445 u64 *val = data;
77241056
MM
1446
1447 *val++ = read_csr(dd, SEND_SC2VLT0);
1448 *val++ = read_csr(dd, SEND_SC2VLT1);
1449 *val++ = read_csr(dd, SEND_SC2VLT2);
1450 *val++ = read_csr(dd, SEND_SC2VLT3);
1451 return 0;
1452}
1453
1454#define ILLEGAL_VL 12
1455/*
1456 * filter_sc2vlt changes mappings to VL15 to ILLEGAL_VL (except
1457 * for SC15, which must map to VL15). If we don't remap things this
1458 * way it is possible for VL15 counters to increment when we try to
1459 * send on a SC which is mapped to an invalid VL.
1460 */
1461static void filter_sc2vlt(void *data)
1462{
1463 int i;
a787bde8 1464 u8 *pd = data;
77241056
MM
1465
1466 for (i = 0; i < OPA_MAX_SCS; i++) {
1467 if (i == 15)
1468 continue;
1469 if ((pd[i] & 0x1f) == 0xf)
1470 pd[i] = ILLEGAL_VL;
1471 }
1472}
1473
1474static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1475{
a787bde8 1476 u64 *val = data;
77241056
MM
1477
1478 filter_sc2vlt(data);
1479
1480 write_csr(dd, SEND_SC2VLT0, *val++);
1481 write_csr(dd, SEND_SC2VLT1, *val++);
1482 write_csr(dd, SEND_SC2VLT2, *val++);
1483 write_csr(dd, SEND_SC2VLT3, *val++);
1484 write_seqlock_irq(&dd->sc2vl_lock);
a787bde8 1485 memcpy(dd->sc2vl, data, sizeof(dd->sc2vl));
77241056
MM
1486 write_sequnlock_irq(&dd->sc2vl_lock);
1487 return 0;
1488}
1489
1490static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1491 struct ib_device *ibdev, u8 port,
1492 u32 *resp_len)
1493{
1494 struct hfi1_ibport *ibp = to_iport(ibdev, port);
6618c051 1495 u8 *p = data;
77241056
MM
1496 size_t size = ARRAY_SIZE(ibp->sl_to_sc); /* == 32 */
1497 unsigned i;
1498
1499 if (am) {
1500 smp->status |= IB_SMP_INVALID_FIELD;
1501 return reply((struct ib_mad_hdr *)smp);
1502 }
1503
1504 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++)
1505 *p++ = ibp->sl_to_sc[i];
1506
1507 if (resp_len)
1508 *resp_len += size;
1509
1510 return reply((struct ib_mad_hdr *)smp);
1511}
1512
1513static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1514 struct ib_device *ibdev, u8 port,
1515 u32 *resp_len)
1516{
1517 struct hfi1_ibport *ibp = to_iport(ibdev, port);
6618c051 1518 u8 *p = data;
77241056
MM
1519 int i;
1520
1521 if (am) {
1522 smp->status |= IB_SMP_INVALID_FIELD;
1523 return reply((struct ib_mad_hdr *)smp);
1524 }
1525
1526 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++)
1527 ibp->sl_to_sc[i] = *p++;
1528
1529 return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len);
1530}
1531
1532static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1533 struct ib_device *ibdev, u8 port,
1534 u32 *resp_len)
1535{
1536 struct hfi1_ibport *ibp = to_iport(ibdev, port);
6618c051 1537 u8 *p = data;
77241056
MM
1538 size_t size = ARRAY_SIZE(ibp->sc_to_sl); /* == 32 */
1539 unsigned i;
1540
1541 if (am) {
1542 smp->status |= IB_SMP_INVALID_FIELD;
1543 return reply((struct ib_mad_hdr *)smp);
1544 }
1545
1546 for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1547 *p++ = ibp->sc_to_sl[i];
1548
1549 if (resp_len)
1550 *resp_len += size;
1551
1552 return reply((struct ib_mad_hdr *)smp);
1553}
1554
1555static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1556 struct ib_device *ibdev, u8 port,
1557 u32 *resp_len)
1558{
1559 struct hfi1_ibport *ibp = to_iport(ibdev, port);
6618c051 1560 u8 *p = data;
77241056
MM
1561 int i;
1562
1563 if (am) {
1564 smp->status |= IB_SMP_INVALID_FIELD;
1565 return reply((struct ib_mad_hdr *)smp);
1566 }
1567
1568 for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1569 ibp->sc_to_sl[i] = *p++;
1570
1571 return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len);
1572}
1573
1574static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1575 struct ib_device *ibdev, u8 port,
1576 u32 *resp_len)
1577{
1578 u32 n_blocks = OPA_AM_NBLK(am);
1579 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1580 void *vp = (void *) data;
1581 size_t size = 4 * sizeof(u64);
1582
1583 if (n_blocks != 1) {
1584 smp->status |= IB_SMP_INVALID_FIELD;
1585 return reply((struct ib_mad_hdr *)smp);
1586 }
1587
1588 get_sc2vlt_tables(dd, vp);
1589
1590 if (resp_len)
1591 *resp_len += size;
1592
1593 return reply((struct ib_mad_hdr *)smp);
1594}
1595
1596static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1597 struct ib_device *ibdev, u8 port,
1598 u32 *resp_len)
1599{
1600 u32 n_blocks = OPA_AM_NBLK(am);
1601 int async_update = OPA_AM_ASYNC(am);
1602 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1603 void *vp = (void *) data;
1604 struct hfi1_pportdata *ppd;
1605 int lstate;
1606
1607 if (n_blocks != 1 || async_update) {
1608 smp->status |= IB_SMP_INVALID_FIELD;
1609 return reply((struct ib_mad_hdr *)smp);
1610 }
1611
1612 /* IB numbers ports from 1, hw from 0 */
1613 ppd = dd->pport + (port - 1);
1614 lstate = driver_lstate(ppd);
1615 /* it's known that async_update is 0 by this point, but include
1616 * the explicit check for clarity */
1617 if (!async_update &&
1618 (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) {
1619 smp->status |= IB_SMP_INVALID_FIELD;
1620 return reply((struct ib_mad_hdr *)smp);
1621 }
1622
1623 set_sc2vlt_tables(dd, vp);
1624
1625 return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len);
1626}
1627
1628static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
1629 struct ib_device *ibdev, u8 port,
1630 u32 *resp_len)
1631{
1632 u32 n_blocks = OPA_AM_NPORT(am);
1633 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1634 struct hfi1_pportdata *ppd;
1635 void *vp = (void *) data;
1636 int size;
1637
1638 if (n_blocks != 1) {
1639 smp->status |= IB_SMP_INVALID_FIELD;
1640 return reply((struct ib_mad_hdr *)smp);
1641 }
1642
1643 ppd = dd->pport + (port - 1);
1644
1645 size = fm_get_table(ppd, FM_TBL_SC2VLNT, vp);
1646
1647 if (resp_len)
1648 *resp_len += size;
1649
1650 return reply((struct ib_mad_hdr *)smp);
1651}
1652
1653static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
1654 struct ib_device *ibdev, u8 port,
1655 u32 *resp_len)
1656{
1657 u32 n_blocks = OPA_AM_NPORT(am);
1658 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1659 struct hfi1_pportdata *ppd;
1660 void *vp = (void *) data;
1661 int lstate;
1662
1663 if (n_blocks != 1) {
1664 smp->status |= IB_SMP_INVALID_FIELD;
1665 return reply((struct ib_mad_hdr *)smp);
1666 }
1667
1668 /* IB numbers ports from 1, hw from 0 */
1669 ppd = dd->pport + (port - 1);
1670 lstate = driver_lstate(ppd);
1671 if (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE) {
1672 smp->status |= IB_SMP_INVALID_FIELD;
1673 return reply((struct ib_mad_hdr *)smp);
1674 }
1675
1676 ppd = dd->pport + (port - 1);
1677
1678 fm_set_table(ppd, FM_TBL_SC2VLNT, vp);
1679
1680 return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
1681 resp_len);
1682}
1683
1684static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
1685 struct ib_device *ibdev, u8 port,
1686 u32 *resp_len)
1687{
1688 u32 nports = OPA_AM_NPORT(am);
1689 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1690 u32 lstate;
1691 struct hfi1_ibport *ibp;
1692 struct hfi1_pportdata *ppd;
1693 struct opa_port_state_info *psi = (struct opa_port_state_info *) data;
1694
1695 if (nports != 1) {
1696 smp->status |= IB_SMP_INVALID_FIELD;
1697 return reply((struct ib_mad_hdr *)smp);
1698 }
1699
1700 ibp = to_iport(ibdev, port);
1701 ppd = ppd_from_ibp(ibp);
1702
1703 lstate = driver_lstate(ppd);
1704
1705 if (start_of_sm_config && (lstate == IB_PORT_INIT))
1706 ppd->is_sm_config_started = 1;
1707
1708#if PI_LED_ENABLE_SUP
1709 psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
1710 psi->port_states.ledenable_offlinereason |=
1711 ppd->is_sm_config_started << 5;
1712 psi->port_states.ledenable_offlinereason |=
a9c05e35 1713 ppd->offline_disabled_reason;
77241056
MM
1714#else
1715 psi->port_states.offline_reason = ppd->neighbor_normal << 4;
1716 psi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
a9c05e35 1717 psi->port_states.offline_reason |= ppd->offline_disabled_reason;
77241056
MM
1718#endif /* PI_LED_ENABLE_SUP */
1719
1720 psi->port_states.portphysstate_portstate =
1721 (hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf);
1722 psi->link_width_downgrade_tx_active =
aadfc3b2 1723 cpu_to_be16(ppd->link_width_downgrade_tx_active);
77241056 1724 psi->link_width_downgrade_rx_active =
aadfc3b2 1725 cpu_to_be16(ppd->link_width_downgrade_rx_active);
77241056
MM
1726 if (resp_len)
1727 *resp_len += sizeof(struct opa_port_state_info);
1728
1729 return reply((struct ib_mad_hdr *)smp);
1730}
1731
1732static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
1733 struct ib_device *ibdev, u8 port,
1734 u32 *resp_len)
1735{
1736 u32 nports = OPA_AM_NPORT(am);
1737 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1738 u32 ls_old;
1739 u8 ls_new, ps_new;
1740 struct hfi1_ibport *ibp;
1741 struct hfi1_pportdata *ppd;
1742 struct opa_port_state_info *psi = (struct opa_port_state_info *) data;
1743 int ret, invalid = 0;
1744
1745 if (nports != 1) {
1746 smp->status |= IB_SMP_INVALID_FIELD;
1747 return reply((struct ib_mad_hdr *)smp);
1748 }
1749
1750 ibp = to_iport(ibdev, port);
1751 ppd = ppd_from_ibp(ibp);
1752
1753 ls_old = driver_lstate(ppd);
1754
1755 ls_new = port_states_to_logical_state(&psi->port_states);
1756 ps_new = port_states_to_phys_state(&psi->port_states);
1757
1758 if (ls_old == IB_PORT_INIT) {
1759 if (start_of_sm_config) {
1760 if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
1761 ppd->is_sm_config_started = 1;
1762 } else if (ls_new == IB_PORT_ARMED) {
1763 if (ppd->is_sm_config_started == 0)
1764 invalid = 1;
1765 }
1766 }
1767
1768 ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
1769 if (ret)
1770 return ret;
1771
1772 if (invalid)
1773 smp->status |= IB_SMP_INVALID_FIELD;
1774
1775 return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len);
1776}
1777
1778static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
1779 struct ib_device *ibdev, u8 port,
1780 u32 *resp_len)
1781{
1782 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1783 u32 addr = OPA_AM_CI_ADDR(am);
1784 u32 len = OPA_AM_CI_LEN(am) + 1;
1785 int ret;
1786
349ac71f 1787#define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
77241056
MM
1788#define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
1789#define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
1790
1791 /* check that addr is within spec, and
1792 * addr and (addr + len - 1) are on the same "page" */
1793 if (addr >= 4096 ||
1794 (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) {
1795 smp->status |= IB_SMP_INVALID_FIELD;
1796 return reply((struct ib_mad_hdr *)smp);
1797 }
1798
1799 ret = get_cable_info(dd, port, addr, len, data);
1800
1801 if (ret == -ENODEV) {
1802 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1803 return reply((struct ib_mad_hdr *)smp);
1804 }
1805
1806 /* The address range for the CableInfo SMA query is wider than the
1807 * memory available on the QSFP cable. We want to return a valid
1808 * response, albeit zeroed out, for address ranges beyond available
1809 * memory but that are within the CableInfo query spec
1810 */
1811 if (ret < 0 && ret != -ERANGE) {
1812 smp->status |= IB_SMP_INVALID_FIELD;
1813 return reply((struct ib_mad_hdr *)smp);
1814 }
1815
1816 if (resp_len)
1817 *resp_len += len;
1818
1819 return reply((struct ib_mad_hdr *)smp);
1820}
1821
1822static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
1823 struct ib_device *ibdev, u8 port, u32 *resp_len)
1824{
1825 u32 num_ports = OPA_AM_NPORT(am);
1826 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1827 struct hfi1_pportdata *ppd;
1828 struct buffer_control *p = (struct buffer_control *) data;
1829 int size;
1830
1831 if (num_ports != 1) {
1832 smp->status |= IB_SMP_INVALID_FIELD;
1833 return reply((struct ib_mad_hdr *)smp);
1834 }
1835
1836 ppd = dd->pport + (port - 1);
1837 size = fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p);
1838 trace_bct_get(dd, p);
1839 if (resp_len)
1840 *resp_len += size;
1841
1842 return reply((struct ib_mad_hdr *)smp);
1843}
1844
1845static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
1846 struct ib_device *ibdev, u8 port, u32 *resp_len)
1847{
1848 u32 num_ports = OPA_AM_NPORT(am);
1849 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1850 struct hfi1_pportdata *ppd;
1851 struct buffer_control *p = (struct buffer_control *) data;
1852
1853 if (num_ports != 1) {
1854 smp->status |= IB_SMP_INVALID_FIELD;
1855 return reply((struct ib_mad_hdr *)smp);
1856 }
1857 ppd = dd->pport + (port - 1);
1858 trace_bct_set(dd, p);
1859 if (fm_set_table(ppd, FM_TBL_BUFFER_CONTROL, p) < 0) {
1860 smp->status |= IB_SMP_INVALID_FIELD;
1861 return reply((struct ib_mad_hdr *)smp);
1862 }
1863
1864 return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len);
1865}
1866
1867static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
1868 struct ib_device *ibdev, u8 port,
1869 u32 *resp_len)
1870{
1871 struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1872 u32 num_ports = OPA_AM_NPORT(am);
1873 u8 section = (am & 0x00ff0000) >> 16;
1874 u8 *p = data;
1875 int size = 0;
1876
1877 if (num_ports != 1) {
1878 smp->status |= IB_SMP_INVALID_FIELD;
1879 return reply((struct ib_mad_hdr *)smp);
1880 }
1881
1882 switch (section) {
1883 case OPA_VLARB_LOW_ELEMENTS:
1884 size = fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p);
1885 break;
1886 case OPA_VLARB_HIGH_ELEMENTS:
1887 size = fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p);
1888 break;
1889 case OPA_VLARB_PREEMPT_ELEMENTS:
1890 size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p);
1891 break;
1892 case OPA_VLARB_PREEMPT_MATRIX:
1893 size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p);
1894 break;
1895 default:
1896 pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n",
1897 be32_to_cpu(smp->attr_mod));
1898 smp->status |= IB_SMP_INVALID_FIELD;
1899 break;
1900 }
1901
1902 if (size > 0 && resp_len)
1903 *resp_len += size;
1904
1905 return reply((struct ib_mad_hdr *)smp);
1906}
1907
1908static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
1909 struct ib_device *ibdev, u8 port,
1910 u32 *resp_len)
1911{
1912 struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1913 u32 num_ports = OPA_AM_NPORT(am);
1914 u8 section = (am & 0x00ff0000) >> 16;
1915 u8 *p = data;
1916
1917 if (num_ports != 1) {
1918 smp->status |= IB_SMP_INVALID_FIELD;
1919 return reply((struct ib_mad_hdr *)smp);
1920 }
1921
1922 switch (section) {
1923 case OPA_VLARB_LOW_ELEMENTS:
1924 (void) fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
1925 break;
1926 case OPA_VLARB_HIGH_ELEMENTS:
1927 (void) fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
1928 break;
1929 /* neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX
1930 * can be changed from the default values */
1931 case OPA_VLARB_PREEMPT_ELEMENTS:
1932 /* FALLTHROUGH */
1933 case OPA_VLARB_PREEMPT_MATRIX:
1934 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1935 break;
1936 default:
1937 pr_warn("OPA SubnSet(VL Arb) AM Invalid : 0x%x\n",
1938 be32_to_cpu(smp->attr_mod));
1939 smp->status |= IB_SMP_INVALID_FIELD;
1940 break;
1941 }
1942
1943 return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len);
1944}
1945
1946struct opa_pma_mad {
1947 struct ib_mad_hdr mad_hdr;
1948 u8 data[2024];
1949} __packed;
1950
1951struct opa_class_port_info {
1952 u8 base_version;
1953 u8 class_version;
1954 __be16 cap_mask;
1955 __be32 cap_mask2_resp_time;
1956
1957 u8 redirect_gid[16];
1958 __be32 redirect_tc_fl;
1959 __be32 redirect_lid;
1960 __be32 redirect_sl_qp;
1961 __be32 redirect_qkey;
1962
1963 u8 trap_gid[16];
1964 __be32 trap_tc_fl;
1965 __be32 trap_lid;
1966 __be32 trap_hl_qp;
1967 __be32 trap_qkey;
1968
1969 __be16 trap_pkey;
1970 __be16 redirect_pkey;
1971
1972 u8 trap_sl_rsvd;
1973 u8 reserved[3];
1974} __packed;
1975
1976struct opa_port_status_req {
1977 __u8 port_num;
1978 __u8 reserved[3];
1979 __be32 vl_select_mask;
1980};
1981
1982#define VL_MASK_ALL 0x000080ff
1983
1984struct opa_port_status_rsp {
1985 __u8 port_num;
1986 __u8 reserved[3];
1987 __be32 vl_select_mask;
1988
1989 /* Data counters */
1990 __be64 port_xmit_data;
1991 __be64 port_rcv_data;
1992 __be64 port_xmit_pkts;
1993 __be64 port_rcv_pkts;
1994 __be64 port_multicast_xmit_pkts;
1995 __be64 port_multicast_rcv_pkts;
1996 __be64 port_xmit_wait;
1997 __be64 sw_port_congestion;
1998 __be64 port_rcv_fecn;
1999 __be64 port_rcv_becn;
2000 __be64 port_xmit_time_cong;
2001 __be64 port_xmit_wasted_bw;
2002 __be64 port_xmit_wait_data;
2003 __be64 port_rcv_bubble;
2004 __be64 port_mark_fecn;
2005 /* Error counters */
2006 __be64 port_rcv_constraint_errors;
2007 __be64 port_rcv_switch_relay_errors;
2008 __be64 port_xmit_discards;
2009 __be64 port_xmit_constraint_errors;
2010 __be64 port_rcv_remote_physical_errors;
2011 __be64 local_link_integrity_errors;
2012 __be64 port_rcv_errors;
2013 __be64 excessive_buffer_overruns;
2014 __be64 fm_config_errors;
2015 __be32 link_error_recovery;
2016 __be32 link_downed;
2017 u8 uncorrectable_errors;
2018
2019 u8 link_quality_indicator; /* 5res, 3bit */
2020 u8 res2[6];
2021 struct _vls_pctrs {
2022 /* per-VL Data counters */
2023 __be64 port_vl_xmit_data;
2024 __be64 port_vl_rcv_data;
2025 __be64 port_vl_xmit_pkts;
2026 __be64 port_vl_rcv_pkts;
2027 __be64 port_vl_xmit_wait;
2028 __be64 sw_port_vl_congestion;
2029 __be64 port_vl_rcv_fecn;
2030 __be64 port_vl_rcv_becn;
2031 __be64 port_xmit_time_cong;
2032 __be64 port_vl_xmit_wasted_bw;
2033 __be64 port_vl_xmit_wait_data;
2034 __be64 port_vl_rcv_bubble;
2035 __be64 port_vl_mark_fecn;
2036 __be64 port_vl_xmit_discards;
2037 } vls[0]; /* real array size defined by # bits set in vl_select_mask */
2038};
2039
2040enum counter_selects {
2041 CS_PORT_XMIT_DATA = (1 << 31),
2042 CS_PORT_RCV_DATA = (1 << 30),
2043 CS_PORT_XMIT_PKTS = (1 << 29),
2044 CS_PORT_RCV_PKTS = (1 << 28),
2045 CS_PORT_MCAST_XMIT_PKTS = (1 << 27),
2046 CS_PORT_MCAST_RCV_PKTS = (1 << 26),
2047 CS_PORT_XMIT_WAIT = (1 << 25),
2048 CS_SW_PORT_CONGESTION = (1 << 24),
2049 CS_PORT_RCV_FECN = (1 << 23),
2050 CS_PORT_RCV_BECN = (1 << 22),
2051 CS_PORT_XMIT_TIME_CONG = (1 << 21),
2052 CS_PORT_XMIT_WASTED_BW = (1 << 20),
2053 CS_PORT_XMIT_WAIT_DATA = (1 << 19),
2054 CS_PORT_RCV_BUBBLE = (1 << 18),
2055 CS_PORT_MARK_FECN = (1 << 17),
2056 CS_PORT_RCV_CONSTRAINT_ERRORS = (1 << 16),
2057 CS_PORT_RCV_SWITCH_RELAY_ERRORS = (1 << 15),
2058 CS_PORT_XMIT_DISCARDS = (1 << 14),
2059 CS_PORT_XMIT_CONSTRAINT_ERRORS = (1 << 13),
2060 CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS = (1 << 12),
2061 CS_LOCAL_LINK_INTEGRITY_ERRORS = (1 << 11),
2062 CS_PORT_RCV_ERRORS = (1 << 10),
2063 CS_EXCESSIVE_BUFFER_OVERRUNS = (1 << 9),
2064 CS_FM_CONFIG_ERRORS = (1 << 8),
2065 CS_LINK_ERROR_RECOVERY = (1 << 7),
2066 CS_LINK_DOWNED = (1 << 6),
2067 CS_UNCORRECTABLE_ERRORS = (1 << 5),
2068};
2069
2070struct opa_clear_port_status {
2071 __be64 port_select_mask[4];
2072 __be32 counter_select_mask;
2073};
2074
2075struct opa_aggregate {
2076 __be16 attr_id;
2077 __be16 err_reqlength; /* 1 bit, 8 res, 7 bit */
2078 __be32 attr_mod;
2079 u8 data[0];
2080};
2081
f0852922
AL
2082#define MSK_LLI 0x000000f0
2083#define MSK_LLI_SFT 4
2084#define MSK_LER 0x0000000f
2085#define MSK_LER_SFT 0
2086#define ADD_LLI 8
2087#define ADD_LER 2
2088
2089/* Request contains first three fields, response contains those plus the rest */
77241056
MM
2090struct opa_port_data_counters_msg {
2091 __be64 port_select_mask[4];
2092 __be32 vl_select_mask;
f0852922 2093 __be32 resolution;
77241056
MM
2094
2095 /* Response fields follow */
77241056
MM
2096 struct _port_dctrs {
2097 u8 port_number;
2098 u8 reserved2[3];
2099 __be32 link_quality_indicator; /* 29res, 3bit */
2100
2101 /* Data counters */
2102 __be64 port_xmit_data;
2103 __be64 port_rcv_data;
2104 __be64 port_xmit_pkts;
2105 __be64 port_rcv_pkts;
2106 __be64 port_multicast_xmit_pkts;
2107 __be64 port_multicast_rcv_pkts;
2108 __be64 port_xmit_wait;
2109 __be64 sw_port_congestion;
2110 __be64 port_rcv_fecn;
2111 __be64 port_rcv_becn;
2112 __be64 port_xmit_time_cong;
2113 __be64 port_xmit_wasted_bw;
2114 __be64 port_xmit_wait_data;
2115 __be64 port_rcv_bubble;
2116 __be64 port_mark_fecn;
2117
2118 __be64 port_error_counter_summary;
2119 /* Sum of error counts/port */
2120
2121 struct _vls_dctrs {
2122 /* per-VL Data counters */
2123 __be64 port_vl_xmit_data;
2124 __be64 port_vl_rcv_data;
2125 __be64 port_vl_xmit_pkts;
2126 __be64 port_vl_rcv_pkts;
2127 __be64 port_vl_xmit_wait;
2128 __be64 sw_port_vl_congestion;
2129 __be64 port_vl_rcv_fecn;
2130 __be64 port_vl_rcv_becn;
2131 __be64 port_xmit_time_cong;
2132 __be64 port_vl_xmit_wasted_bw;
2133 __be64 port_vl_xmit_wait_data;
2134 __be64 port_vl_rcv_bubble;
2135 __be64 port_vl_mark_fecn;
2136 } vls[0];
2137 /* array size defined by #bits set in vl_select_mask*/
2138 } port[1]; /* array size defined by #ports in attribute modifier */
2139};
2140
2141struct opa_port_error_counters64_msg {
2142 /* Request contains first two fields, response contains the
2143 * whole magilla */
2144 __be64 port_select_mask[4];
2145 __be32 vl_select_mask;
2146
2147 /* Response-only fields follow */
2148 __be32 reserved1;
2149 struct _port_ectrs {
2150 u8 port_number;
2151 u8 reserved2[7];
2152 __be64 port_rcv_constraint_errors;
2153 __be64 port_rcv_switch_relay_errors;
2154 __be64 port_xmit_discards;
2155 __be64 port_xmit_constraint_errors;
2156 __be64 port_rcv_remote_physical_errors;
2157 __be64 local_link_integrity_errors;
2158 __be64 port_rcv_errors;
2159 __be64 excessive_buffer_overruns;
2160 __be64 fm_config_errors;
2161 __be32 link_error_recovery;
2162 __be32 link_downed;
2163 u8 uncorrectable_errors;
2164 u8 reserved3[7];
2165 struct _vls_ectrs {
2166 __be64 port_vl_xmit_discards;
2167 } vls[0];
2168 /* array size defined by #bits set in vl_select_mask */
2169 } port[1]; /* array size defined by #ports in attribute modifier */
2170};
2171
2172struct opa_port_error_info_msg {
2173 __be64 port_select_mask[4];
2174 __be32 error_info_select_mask;
2175 __be32 reserved1;
2176 struct _port_ei {
2177
2178 u8 port_number;
2179 u8 reserved2[7];
2180
2181 /* PortRcvErrorInfo */
2182 struct {
2183 u8 status_and_code;
2184 union {
2185 u8 raw[17];
2186 struct {
2187 /* EI1to12 format */
2188 u8 packet_flit1[8];
2189 u8 packet_flit2[8];
2190 u8 remaining_flit_bits12;
2191 } ei1to12;
2192 struct {
2193 u8 packet_bytes[8];
2194 u8 remaining_flit_bits;
2195 } ei13;
2196 } ei;
2197 u8 reserved3[6];
2198 } __packed port_rcv_ei;
2199
2200 /* ExcessiveBufferOverrunInfo */
2201 struct {
2202 u8 status_and_sc;
2203 u8 reserved4[7];
2204 } __packed excessive_buffer_overrun_ei;
2205
2206 /* PortXmitConstraintErrorInfo */
2207 struct {
2208 u8 status;
2209 u8 reserved5;
2210 __be16 pkey;
2211 __be32 slid;
2212 } __packed port_xmit_constraint_ei;
2213
2214 /* PortRcvConstraintErrorInfo */
2215 struct {
2216 u8 status;
2217 u8 reserved6;
2218 __be16 pkey;
2219 __be32 slid;
2220 } __packed port_rcv_constraint_ei;
2221
2222 /* PortRcvSwitchRelayErrorInfo */
2223 struct {
2224 u8 status_and_code;
2225 u8 reserved7[3];
2226 __u32 error_info;
2227 } __packed port_rcv_switch_relay_ei;
2228
2229 /* UncorrectableErrorInfo */
2230 struct {
2231 u8 status_and_code;
2232 u8 reserved8;
2233 } __packed uncorrectable_ei;
2234
2235 /* FMConfigErrorInfo */
2236 struct {
2237 u8 status_and_code;
2238 u8 error_info;
2239 } __packed fm_config_ei;
2240 __u32 reserved9;
2241 } port[1]; /* actual array size defined by #ports in attr modifier */
2242};
2243
2244/* opa_port_error_info_msg error_info_select_mask bit definitions */
2245enum error_info_selects {
2246 ES_PORT_RCV_ERROR_INFO = (1 << 31),
2247 ES_EXCESSIVE_BUFFER_OVERRUN_INFO = (1 << 30),
2248 ES_PORT_XMIT_CONSTRAINT_ERROR_INFO = (1 << 29),
2249 ES_PORT_RCV_CONSTRAINT_ERROR_INFO = (1 << 28),
2250 ES_PORT_RCV_SWITCH_RELAY_ERROR_INFO = (1 << 27),
2251 ES_UNCORRECTABLE_ERROR_INFO = (1 << 26),
2252 ES_FM_CONFIG_ERROR_INFO = (1 << 25)
2253};
2254
2255static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
2256 struct ib_device *ibdev, u32 *resp_len)
2257{
2258 struct opa_class_port_info *p =
2259 (struct opa_class_port_info *)pmp->data;
2260
2261 memset(pmp->data, 0, sizeof(pmp->data));
2262
2263 if (pmp->mad_hdr.attr_mod != 0)
2264 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2265
2266 p->base_version = OPA_MGMT_BASE_VERSION;
2267 p->class_version = OPA_SMI_CLASS_VERSION;
2268 /*
2269 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
2270 */
2271 p->cap_mask2_resp_time = cpu_to_be32(18);
2272
2273 if (resp_len)
2274 *resp_len += sizeof(*p);
2275
2276 return reply((struct ib_mad_hdr *)pmp);
2277}
2278
2279static void a0_portstatus(struct hfi1_pportdata *ppd,
2280 struct opa_port_status_rsp *rsp, u32 vl_select_mask)
2281{
2282 if (!is_bx(ppd->dd)) {
2283 unsigned long vl;
f4ddedf4 2284 u64 sum_vl_xmit_wait = 0;
77241056 2285 u32 vl_all_mask = VL_MASK_ALL;
77241056
MM
2286
2287 for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
2288 8 * sizeof(vl_all_mask)) {
f4ddedf4
IW
2289 u64 tmp = sum_vl_xmit_wait +
2290 read_port_cntr(ppd, C_TX_WAIT_VL,
2291 idx_from_vl(vl));
2292 if (tmp < sum_vl_xmit_wait) {
2293 /* we wrapped */
2294 sum_vl_xmit_wait = (u64)~0;
2295 break;
2296 }
2297 sum_vl_xmit_wait = tmp;
77241056 2298 }
f4ddedf4
IW
2299 if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
2300 rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
77241056
MM
2301 }
2302}
2303
2304
2305static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
2306 struct ib_device *ibdev, u8 port, u32 *resp_len)
2307{
2308 struct opa_port_status_req *req =
2309 (struct opa_port_status_req *)pmp->data;
2310 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2311 struct opa_port_status_rsp *rsp;
2312 u32 vl_select_mask = be32_to_cpu(req->vl_select_mask);
2313 unsigned long vl;
2314 size_t response_data_size;
2315 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2316 u8 port_num = req->port_num;
2317 u8 num_vls = hweight32(vl_select_mask);
2318 struct _vls_pctrs *vlinfo;
2319 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2320 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2321 int vfi;
2322 u64 tmp, tmp2;
2323
2324 response_data_size = sizeof(struct opa_port_status_rsp) +
2325 num_vls * sizeof(struct _vls_pctrs);
2326 if (response_data_size > sizeof(pmp->data)) {
2327 pmp->mad_hdr.status |= OPA_PM_STATUS_REQUEST_TOO_LARGE;
2328 return reply((struct ib_mad_hdr *)pmp);
2329 }
2330
2331 if (nports != 1 || (port_num && port_num != port)
2332 || num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) {
2333 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2334 return reply((struct ib_mad_hdr *)pmp);
2335 }
2336
2337 memset(pmp->data, 0, sizeof(pmp->data));
2338
2339 rsp = (struct opa_port_status_rsp *)pmp->data;
2340 if (port_num)
2341 rsp->port_num = port_num;
2342 else
2343 rsp->port_num = port;
2344
2345 rsp->port_rcv_constraint_errors =
2346 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2347 CNTR_INVALID_VL));
2348
2349 hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
2350
2351 rsp->vl_select_mask = cpu_to_be32(vl_select_mask);
2352 rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2353 CNTR_INVALID_VL));
2354 rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2355 CNTR_INVALID_VL));
77241056
MM
2356 rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2357 CNTR_INVALID_VL));
2358 rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2359 CNTR_INVALID_VL));
2360 rsp->port_multicast_xmit_pkts =
2361 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
2362 CNTR_INVALID_VL));
2363 rsp->port_multicast_rcv_pkts =
2364 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2365 CNTR_INVALID_VL));
2366 rsp->port_xmit_wait =
2367 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
2368 rsp->port_rcv_fecn =
2369 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
2370 rsp->port_rcv_becn =
2371 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
2372 rsp->port_xmit_discards =
2373 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
2374 CNTR_INVALID_VL));
2375 rsp->port_xmit_constraint_errors =
2376 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2377 CNTR_INVALID_VL));
2378 rsp->port_rcv_remote_physical_errors =
2379 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2380 CNTR_INVALID_VL));
2381 tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
2382 tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
2383 if (tmp2 < tmp) {
2384 /* overflow/wrapped */
2385 rsp->local_link_integrity_errors = cpu_to_be64(~0);
2386 } else {
2387 rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
2388 }
2389 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2390 tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
2391 CNTR_INVALID_VL);
2392 if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
2393 /* overflow/wrapped */
2394 rsp->link_error_recovery = cpu_to_be32(~0);
2395 } else {
2396 rsp->link_error_recovery = cpu_to_be32(tmp2);
2397 }
2398 rsp->port_rcv_errors =
2399 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
2400 rsp->excessive_buffer_overruns =
2401 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
2402 rsp->fm_config_errors =
2403 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2404 CNTR_INVALID_VL));
2405 rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
2406 CNTR_INVALID_VL));
2407
2408 /* rsp->uncorrectable_errors is 8 bits wide, and it pegs at 0xff */
2409 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2410 rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
2411
2412 vlinfo = &(rsp->vls[0]);
2413 vfi = 0;
2414 /* The vl_select_mask has been checked above, and we know
2415 * that it contains only entries which represent valid VLs.
2416 * So in the for_each_set_bit() loop below, we don't need
2417 * any additional checks for vl.
2418 */
2419 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2420 8 * sizeof(vl_select_mask)) {
2421 memset(vlinfo, 0, sizeof(*vlinfo));
2422
2423 tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
2424 rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(tmp);
77241056
MM
2425
2426 rsp->vls[vfi].port_vl_rcv_pkts =
2427 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
2428 idx_from_vl(vl)));
2429
2430 rsp->vls[vfi].port_vl_xmit_data =
2431 cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
2432 idx_from_vl(vl)));
2433
2434 rsp->vls[vfi].port_vl_xmit_pkts =
2435 cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
2436 idx_from_vl(vl)));
2437
2438 rsp->vls[vfi].port_vl_xmit_wait =
2439 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
2440 idx_from_vl(vl)));
2441
2442 rsp->vls[vfi].port_vl_rcv_fecn =
2443 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
2444 idx_from_vl(vl)));
2445
2446 rsp->vls[vfi].port_vl_rcv_becn =
2447 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
2448 idx_from_vl(vl)));
2449
2450 vlinfo++;
2451 vfi++;
2452 }
2453
2454 a0_portstatus(ppd, rsp, vl_select_mask);
2455
2456 if (resp_len)
2457 *resp_len += response_data_size;
2458
2459 return reply((struct ib_mad_hdr *)pmp);
2460}
2461
f0852922
AL
2462static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
2463 u8 res_lli, u8 res_ler)
77241056
MM
2464{
2465 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2466 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2467 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2468 u64 error_counter_summary = 0, tmp;
2469
2470 error_counter_summary += read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2471 CNTR_INVALID_VL);
2472 /* port_rcv_switch_relay_errors is 0 for HFIs */
2473 error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_DSCD,
2474 CNTR_INVALID_VL);
2475 error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2476 CNTR_INVALID_VL);
2477 error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2478 CNTR_INVALID_VL);
f0852922
AL
2479 /* local link integrity must be right-shifted by the lli resolution */
2480 tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
2481 tmp += read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
2482 error_counter_summary += (tmp >> res_lli);
2483 /* link error recovery must b right-shifted by the ler resolution */
2484 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2485 tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
2486 error_counter_summary += (tmp >> res_ler);
77241056
MM
2487 error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR,
2488 CNTR_INVALID_VL);
2489 error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
2490 error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2491 CNTR_INVALID_VL);
2492 /* ppd->link_downed is a 32-bit value */
2493 error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN,
2494 CNTR_INVALID_VL);
2495 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2496 /* this is an 8-bit quantity */
2497 error_counter_summary += tmp < 0x100 ? (tmp & 0xff) : 0xff;
2498
2499 return error_counter_summary;
2500}
2501
f4ddedf4 2502static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
77241056
MM
2503 u32 vl_select_mask)
2504{
f4ddedf4 2505 if (!is_bx(ppd->dd)) {
77241056 2506 unsigned long vl;
db00a055 2507 u64 sum_vl_xmit_wait = 0;
f4ddedf4 2508 u32 vl_all_mask = VL_MASK_ALL;
db00a055 2509
f4ddedf4
IW
2510 for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
2511 8 * sizeof(vl_all_mask)) {
77241056 2512 u64 tmp = sum_vl_xmit_wait +
f4ddedf4
IW
2513 read_port_cntr(ppd, C_TX_WAIT_VL,
2514 idx_from_vl(vl));
77241056
MM
2515 if (tmp < sum_vl_xmit_wait) {
2516 /* we wrapped */
2517 sum_vl_xmit_wait = (u64) ~0;
2518 break;
2519 }
2520 sum_vl_xmit_wait = tmp;
2521 }
2522 if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
2523 rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
2524 }
2525}
2526
2527static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
2528 struct ib_device *ibdev, u8 port, u32 *resp_len)
2529{
2530 struct opa_port_data_counters_msg *req =
2531 (struct opa_port_data_counters_msg *)pmp->data;
2532 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2533 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2534 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2535 struct _port_dctrs *rsp;
2536 struct _vls_dctrs *vlinfo;
2537 size_t response_data_size;
2538 u32 num_ports;
2539 u8 num_pslm;
2540 u8 lq, num_vls;
f0852922 2541 u8 res_lli, res_ler;
77241056
MM
2542 u64 port_mask;
2543 unsigned long port_num;
2544 unsigned long vl;
2545 u32 vl_select_mask;
2546 int vfi;
2547
2548 num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2549 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
2550 num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
2551 vl_select_mask = be32_to_cpu(req->vl_select_mask);
f0852922
AL
2552 res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
2553 res_lli = res_lli ? res_lli + ADD_LLI : 0;
2554 res_ler = (u8)(be32_to_cpu(req->resolution) & MSK_LER) >> MSK_LER_SFT;
2555 res_ler = res_ler ? res_ler + ADD_LER : 0;
77241056
MM
2556
2557 if (num_ports != 1 || (vl_select_mask & ~VL_MASK_ALL)) {
2558 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2559 return reply((struct ib_mad_hdr *)pmp);
2560 }
2561
2562 /* Sanity check */
2563 response_data_size = sizeof(struct opa_port_data_counters_msg) +
2564 num_vls * sizeof(struct _vls_dctrs);
2565
2566 if (response_data_size > sizeof(pmp->data)) {
2567 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2568 return reply((struct ib_mad_hdr *)pmp);
2569 }
2570
2571 /*
2572 * The bit set in the mask needs to be consistent with the
2573 * port the request came in on.
2574 */
2575 port_mask = be64_to_cpu(req->port_select_mask[3]);
2576 port_num = find_first_bit((unsigned long *)&port_mask,
2577 sizeof(port_mask));
2578
2579 if ((u8)port_num != port) {
2580 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2581 return reply((struct ib_mad_hdr *)pmp);
2582 }
2583
2584 rsp = (struct _port_dctrs *)&(req->port[0]);
2585 memset(rsp, 0, sizeof(*rsp));
2586
2587 rsp->port_number = port;
2588 /*
2589 * Note that link_quality_indicator is a 32 bit quantity in
2590 * 'datacounters' queries (as opposed to 'portinfo' queries,
2591 * where it's a byte).
2592 */
2593 hfi1_read_link_quality(dd, &lq);
2594 rsp->link_quality_indicator = cpu_to_be32((u32)lq);
2595
2596 /* rsp->sw_port_congestion is 0 for HFIs */
2597 /* rsp->port_xmit_time_cong is 0 for HFIs */
2598 /* rsp->port_xmit_wasted_bw ??? */
2599 /* rsp->port_xmit_wait_data ??? */
2600 /* rsp->port_mark_fecn is 0 for HFIs */
2601
2602 rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2603 CNTR_INVALID_VL));
2604 rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2605 CNTR_INVALID_VL));
77241056
MM
2606 rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2607 CNTR_INVALID_VL));
2608 rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2609 CNTR_INVALID_VL));
2610 rsp->port_multicast_xmit_pkts =
2611 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
2612 CNTR_INVALID_VL));
2613 rsp->port_multicast_rcv_pkts =
2614 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2615 CNTR_INVALID_VL));
2616 rsp->port_xmit_wait =
2617 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
2618 rsp->port_rcv_fecn =
2619 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
2620 rsp->port_rcv_becn =
2621 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
2622
2623 rsp->port_error_counter_summary =
f0852922
AL
2624 cpu_to_be64(get_error_counter_summary(ibdev, port,
2625 res_lli, res_ler));
77241056
MM
2626
2627 vlinfo = &(rsp->vls[0]);
2628 vfi = 0;
2629 /* The vl_select_mask has been checked above, and we know
2630 * that it contains only entries which represent valid VLs.
2631 * So in the for_each_set_bit() loop below, we don't need
2632 * any additional checks for vl.
2633 */
2634 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2635 8 * sizeof(req->vl_select_mask)) {
2636 memset(vlinfo, 0, sizeof(*vlinfo));
2637
2638 rsp->vls[vfi].port_vl_xmit_data =
2639 cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
2640 idx_from_vl(vl)));
2641
2642 rsp->vls[vfi].port_vl_rcv_data =
2643 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL,
2644 idx_from_vl(vl)));
77241056
MM
2645
2646 rsp->vls[vfi].port_vl_xmit_pkts =
2647 cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
2648 idx_from_vl(vl)));
2649
2650 rsp->vls[vfi].port_vl_rcv_pkts =
2651 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
2652 idx_from_vl(vl)));
2653
2654 rsp->vls[vfi].port_vl_xmit_wait =
2655 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
2656 idx_from_vl(vl)));
2657
2658 rsp->vls[vfi].port_vl_rcv_fecn =
2659 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
2660 idx_from_vl(vl)));
2661 rsp->vls[vfi].port_vl_rcv_becn =
2662 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
2663 idx_from_vl(vl)));
2664
2665 /* rsp->port_vl_xmit_time_cong is 0 for HFIs */
2666 /* rsp->port_vl_xmit_wasted_bw ??? */
2667 /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ???
2668 * does this differ from rsp->vls[vfi].port_vl_xmit_wait */
2669 /*rsp->vls[vfi].port_vl_mark_fecn =
2670 cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT
2671 + offset));
2672 */
2673 vlinfo++;
2674 vfi++;
2675 }
2676
f4ddedf4 2677 a0_datacounters(ppd, rsp, vl_select_mask);
77241056
MM
2678
2679 if (resp_len)
2680 *resp_len += response_data_size;
2681
2682 return reply((struct ib_mad_hdr *)pmp);
2683}
2684
2685static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
2686 struct ib_device *ibdev, u8 port, u32 *resp_len)
2687{
2688 size_t response_data_size;
2689 struct _port_ectrs *rsp;
eb2e557c 2690 u8 port_num;
77241056
MM
2691 struct opa_port_error_counters64_msg *req;
2692 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2693 u32 num_ports;
2694 u8 num_pslm;
2695 u8 num_vls;
2696 struct hfi1_ibport *ibp;
2697 struct hfi1_pportdata *ppd;
2698 struct _vls_ectrs *vlinfo;
2699 unsigned long vl;
2700 u64 port_mask, tmp, tmp2;
2701 u32 vl_select_mask;
2702 int vfi;
2703
2704 req = (struct opa_port_error_counters64_msg *)pmp->data;
2705
2706 num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2707
2708 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
2709 num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
2710
2711 if (num_ports != 1 || num_ports != num_pslm) {
2712 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2713 return reply((struct ib_mad_hdr *)pmp);
2714 }
2715
2716 response_data_size = sizeof(struct opa_port_error_counters64_msg) +
2717 num_vls * sizeof(struct _vls_ectrs);
2718
2719 if (response_data_size > sizeof(pmp->data)) {
2720 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2721 return reply((struct ib_mad_hdr *)pmp);
2722 }
2723 /*
2724 * The bit set in the mask needs to be consistent with the
2725 * port the request came in on.
2726 */
2727 port_mask = be64_to_cpu(req->port_select_mask[3]);
2728 port_num = find_first_bit((unsigned long *)&port_mask,
2729 sizeof(port_mask));
2730
eb2e557c 2731 if (port_num != port) {
77241056
MM
2732 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2733 return reply((struct ib_mad_hdr *)pmp);
2734 }
2735
2736 rsp = (struct _port_ectrs *)&(req->port[0]);
2737
2738 ibp = to_iport(ibdev, port_num);
2739 ppd = ppd_from_ibp(ibp);
2740
2741 memset(rsp, 0, sizeof(*rsp));
eb2e557c 2742 rsp->port_number = port_num;
77241056
MM
2743
2744 rsp->port_rcv_constraint_errors =
2745 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2746 CNTR_INVALID_VL));
2747 /* port_rcv_switch_relay_errors is 0 for HFIs */
2748 rsp->port_xmit_discards =
2749 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
2750 CNTR_INVALID_VL));
2751 rsp->port_rcv_remote_physical_errors =
2752 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2753 CNTR_INVALID_VL));
2754 tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
2755 tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
2756 if (tmp2 < tmp) {
2757 /* overflow/wrapped */
2758 rsp->local_link_integrity_errors = cpu_to_be64(~0);
2759 } else {
2760 rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
2761 }
2762 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2763 tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
2764 CNTR_INVALID_VL);
2765 if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
2766 /* overflow/wrapped */
2767 rsp->link_error_recovery = cpu_to_be32(~0);
2768 } else {
2769 rsp->link_error_recovery = cpu_to_be32(tmp2);
2770 }
2771 rsp->port_xmit_constraint_errors =
2772 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2773 CNTR_INVALID_VL));
2774 rsp->excessive_buffer_overruns =
2775 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
2776 rsp->fm_config_errors =
2777 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2778 CNTR_INVALID_VL));
2779 rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
2780 CNTR_INVALID_VL));
2781 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2782 rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
2783
2784 vlinfo = (struct _vls_ectrs *)&(rsp->vls[0]);
2785 vfi = 0;
2786 vl_select_mask = be32_to_cpu(req->vl_select_mask);
2787 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2788 8 * sizeof(req->vl_select_mask)) {
2789 memset(vlinfo, 0, sizeof(*vlinfo));
2790 /* vlinfo->vls[vfi].port_vl_xmit_discards ??? */
2791 vlinfo += 1;
2792 vfi++;
2793 }
2794
2795 if (resp_len)
2796 *resp_len += response_data_size;
2797
2798 return reply((struct ib_mad_hdr *)pmp);
2799}
2800
2801static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
2802 struct ib_device *ibdev, u8 port, u32 *resp_len)
2803{
2804 size_t response_data_size;
2805 struct _port_ei *rsp;
2806 struct opa_port_error_info_msg *req;
2807 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2808 u64 port_mask;
2809 u32 num_ports;
eb2e557c 2810 u8 port_num;
77241056
MM
2811 u8 num_pslm;
2812 u64 reg;
2813
2814 req = (struct opa_port_error_info_msg *)pmp->data;
2815 rsp = (struct _port_ei *)&(req->port[0]);
2816
2817 num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
2818 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
2819
2820 memset(rsp, 0, sizeof(*rsp));
2821
2822 if (num_ports != 1 || num_ports != num_pslm) {
2823 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2824 return reply((struct ib_mad_hdr *)pmp);
2825 }
2826
2827 /* Sanity check */
2828 response_data_size = sizeof(struct opa_port_error_info_msg);
2829
2830 if (response_data_size > sizeof(pmp->data)) {
2831 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2832 return reply((struct ib_mad_hdr *)pmp);
2833 }
2834
2835 /*
2836 * The bit set in the mask needs to be consistent with the port
2837 * the request came in on.
2838 */
2839 port_mask = be64_to_cpu(req->port_select_mask[3]);
2840 port_num = find_first_bit((unsigned long *)&port_mask,
2841 sizeof(port_mask));
2842
eb2e557c 2843 if (port_num != port) {
77241056
MM
2844 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2845 return reply((struct ib_mad_hdr *)pmp);
2846 }
2847
2848 /* PortRcvErrorInfo */
2849 rsp->port_rcv_ei.status_and_code =
2850 dd->err_info_rcvport.status_and_code;
2851 memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1,
2852 &dd->err_info_rcvport.packet_flit1, sizeof(u64));
2853 memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2,
2854 &dd->err_info_rcvport.packet_flit2, sizeof(u64));
2855
2856 /* ExcessiverBufferOverrunInfo */
2857 reg = read_csr(dd, RCV_ERR_INFO);
2858 if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) {
2859 /* if the RcvExcessBufferOverrun bit is set, save SC of
2860 * first pkt that encountered an excess buffer overrun */
2861 u8 tmp = (u8)reg;
2862
2863 tmp &= RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK;
2864 tmp <<= 2;
2865 rsp->excessive_buffer_overrun_ei.status_and_sc = tmp;
2866 /* set the status bit */
2867 rsp->excessive_buffer_overrun_ei.status_and_sc |= 0x80;
2868 }
2869
2870 rsp->port_xmit_constraint_ei.status =
2871 dd->err_info_xmit_constraint.status;
2872 rsp->port_xmit_constraint_ei.pkey =
2873 cpu_to_be16(dd->err_info_xmit_constraint.pkey);
2874 rsp->port_xmit_constraint_ei.slid =
2875 cpu_to_be32(dd->err_info_xmit_constraint.slid);
2876
2877 rsp->port_rcv_constraint_ei.status =
2878 dd->err_info_rcv_constraint.status;
2879 rsp->port_rcv_constraint_ei.pkey =
2880 cpu_to_be16(dd->err_info_rcv_constraint.pkey);
2881 rsp->port_rcv_constraint_ei.slid =
2882 cpu_to_be32(dd->err_info_rcv_constraint.slid);
2883
2884 /* UncorrectableErrorInfo */
2885 rsp->uncorrectable_ei.status_and_code = dd->err_info_uncorrectable;
2886
2887 /* FMConfigErrorInfo */
2888 rsp->fm_config_ei.status_and_code = dd->err_info_fmconfig;
2889
2890 if (resp_len)
2891 *resp_len += response_data_size;
2892
2893 return reply((struct ib_mad_hdr *)pmp);
2894}
2895
2896static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
2897 struct ib_device *ibdev, u8 port, u32 *resp_len)
2898{
2899 struct opa_clear_port_status *req =
2900 (struct opa_clear_port_status *)pmp->data;
2901 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2902 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2903 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2904 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2905 u64 portn = be64_to_cpu(req->port_select_mask[3]);
2906 u32 counter_select = be32_to_cpu(req->counter_select_mask);
2907 u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
2908 unsigned long vl;
2909
2910 if ((nports != 1) || (portn != 1 << port)) {
2911 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2912 return reply((struct ib_mad_hdr *)pmp);
2913 }
2914 /*
2915 * only counters returned by pma_get_opa_portstatus() are
2916 * handled, so when pma_get_opa_portstatus() gets a fix,
2917 * the corresponding change should be made here as well.
2918 */
2919
2920 if (counter_select & CS_PORT_XMIT_DATA)
2921 write_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL, 0);
2922
2923 if (counter_select & CS_PORT_RCV_DATA)
2924 write_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL, 0);
2925
2926 if (counter_select & CS_PORT_XMIT_PKTS)
2927 write_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL, 0);
2928
2929 if (counter_select & CS_PORT_RCV_PKTS)
2930 write_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL, 0);
2931
2932 if (counter_select & CS_PORT_MCAST_XMIT_PKTS)
2933 write_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL, 0);
2934
2935 if (counter_select & CS_PORT_MCAST_RCV_PKTS)
2936 write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0);
2937
2938 if (counter_select & CS_PORT_XMIT_WAIT)
2939 write_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL, 0);
2940
2941 /* ignore cs_sw_portCongestion for HFIs */
2942
2943 if (counter_select & CS_PORT_RCV_FECN)
2944 write_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL, 0);
2945
2946 if (counter_select & CS_PORT_RCV_BECN)
2947 write_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL, 0);
2948
2949 /* ignore cs_port_xmit_time_cong for HFIs */
2950 /* ignore cs_port_xmit_wasted_bw for now */
2951 /* ignore cs_port_xmit_wait_data for now */
2952 if (counter_select & CS_PORT_RCV_BUBBLE)
2953 write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0);
2954
2955 /* Only applicable for switch */
2956 /*if (counter_select & CS_PORT_MARK_FECN)
2957 write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0);*/
2958
2959 if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS)
2960 write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0);
2961
2962 /* ignore cs_port_rcv_switch_relay_errors for HFIs */
2963 if (counter_select & CS_PORT_XMIT_DISCARDS)
2964 write_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL, 0);
2965
2966 if (counter_select & CS_PORT_XMIT_CONSTRAINT_ERRORS)
2967 write_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL, 0);
2968
2969 if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS)
2970 write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
2971
2972 if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS) {
2973 write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
2974 write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
2975 }
2976
2977 if (counter_select & CS_LINK_ERROR_RECOVERY) {
2978 write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
2979 write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
2980 CNTR_INVALID_VL, 0);
2981 }
2982
2983 if (counter_select & CS_PORT_RCV_ERRORS)
2984 write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
2985
2986 if (counter_select & CS_EXCESSIVE_BUFFER_OVERRUNS) {
2987 write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
2988 dd->rcv_ovfl_cnt = 0;
2989 }
2990
2991 if (counter_select & CS_FM_CONFIG_ERRORS)
2992 write_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL, 0);
2993
2994 if (counter_select & CS_LINK_DOWNED)
2995 write_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL, 0);
2996
2997 if (counter_select & CS_UNCORRECTABLE_ERRORS)
2998 write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
2999
3000 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
3001 8 * sizeof(vl_select_mask)) {
3002
3003 if (counter_select & CS_PORT_XMIT_DATA)
3004 write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
3005
3006 if (counter_select & CS_PORT_RCV_DATA)
3007 write_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl), 0);
3008
3009 if (counter_select & CS_PORT_XMIT_PKTS)
3010 write_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl), 0);
3011
3012 if (counter_select & CS_PORT_RCV_PKTS)
3013 write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0);
3014
3015 if (counter_select & CS_PORT_XMIT_WAIT)
3016 write_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl), 0);
3017
3018 /* sw_port_vl_congestion is 0 for HFIs */
3019 if (counter_select & CS_PORT_RCV_FECN)
3020 write_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl), 0);
3021
3022 if (counter_select & CS_PORT_RCV_BECN)
3023 write_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl), 0);
3024
3025 /* port_vl_xmit_time_cong is 0 for HFIs */
3026 /* port_vl_xmit_wasted_bw ??? */
3027 /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? */
3028 if (counter_select & CS_PORT_RCV_BUBBLE)
3029 write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0);
3030
3031 /*if (counter_select & CS_PORT_MARK_FECN)
3032 write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
3033 */
3034 /* port_vl_xmit_discards ??? */
3035 }
3036
3037 if (resp_len)
3038 *resp_len += sizeof(*req);
3039
3040 return reply((struct ib_mad_hdr *)pmp);
3041}
3042
3043static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
3044 struct ib_device *ibdev, u8 port, u32 *resp_len)
3045{
3046 struct _port_ei *rsp;
3047 struct opa_port_error_info_msg *req;
3048 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3049 u64 port_mask;
3050 u32 num_ports;
eb2e557c 3051 u8 port_num;
77241056
MM
3052 u8 num_pslm;
3053 u32 error_info_select;
3054
3055 req = (struct opa_port_error_info_msg *)pmp->data;
3056 rsp = (struct _port_ei *)&(req->port[0]);
3057
3058 num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
3059 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
3060
3061 memset(rsp, 0, sizeof(*rsp));
3062
3063 if (num_ports != 1 || num_ports != num_pslm) {
3064 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3065 return reply((struct ib_mad_hdr *)pmp);
3066 }
3067
3068 /*
3069 * The bit set in the mask needs to be consistent with the port
3070 * the request came in on.
3071 */
3072 port_mask = be64_to_cpu(req->port_select_mask[3]);
3073 port_num = find_first_bit((unsigned long *)&port_mask,
3074 sizeof(port_mask));
3075
eb2e557c 3076 if (port_num != port) {
77241056
MM
3077 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3078 return reply((struct ib_mad_hdr *)pmp);
3079 }
3080
3081 error_info_select = be32_to_cpu(req->error_info_select_mask);
3082
3083 /* PortRcvErrorInfo */
3084 if (error_info_select & ES_PORT_RCV_ERROR_INFO)
3085 /* turn off status bit */
3086 dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
3087
3088 /* ExcessiverBufferOverrunInfo */
3089 if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO)
3090 /* status bit is essentially kept in the h/w - bit 5 of
3091 * RCV_ERR_INFO */
3092 write_csr(dd, RCV_ERR_INFO,
3093 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
3094
3095 if (error_info_select & ES_PORT_XMIT_CONSTRAINT_ERROR_INFO)
3096 dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
3097
3098 if (error_info_select & ES_PORT_RCV_CONSTRAINT_ERROR_INFO)
3099 dd->err_info_rcv_constraint.status &= ~OPA_EI_STATUS_SMASK;
3100
3101 /* UncorrectableErrorInfo */
3102 if (error_info_select & ES_UNCORRECTABLE_ERROR_INFO)
3103 /* turn off status bit */
3104 dd->err_info_uncorrectable &= ~OPA_EI_STATUS_SMASK;
3105
3106 /* FMConfigErrorInfo */
3107 if (error_info_select & ES_FM_CONFIG_ERROR_INFO)
3108 /* turn off status bit */
3109 dd->err_info_fmconfig &= ~OPA_EI_STATUS_SMASK;
3110
3111 if (resp_len)
3112 *resp_len += sizeof(*req);
3113
3114 return reply((struct ib_mad_hdr *)pmp);
3115}
3116
3117struct opa_congestion_info_attr {
3118 __be16 congestion_info;
3119 u8 control_table_cap; /* Multiple of 64 entry unit CCTs */
3120 u8 congestion_log_length;
3121} __packed;
3122
3123static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
3124 struct ib_device *ibdev, u8 port,
3125 u32 *resp_len)
3126{
3127 struct opa_congestion_info_attr *p =
3128 (struct opa_congestion_info_attr *)data;
3129 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3130 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3131
3132 p->congestion_info = 0;
3133 p->control_table_cap = ppd->cc_max_table_entries;
3134 p->congestion_log_length = OPA_CONG_LOG_ELEMS;
3135
3136 if (resp_len)
3137 *resp_len += sizeof(*p);
3138
3139 return reply((struct ib_mad_hdr *)smp);
3140}
3141
3142static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
3143 u8 *data,
3144 struct ib_device *ibdev,
3145 u8 port, u32 *resp_len)
3146{
3147 int i;
3148 struct opa_congestion_setting_attr *p =
3149 (struct opa_congestion_setting_attr *) data;
3150 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3151 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3152 struct opa_congestion_setting_entry_shadow *entries;
3153 struct cc_state *cc_state;
3154
3155 rcu_read_lock();
3156
3157 cc_state = get_cc_state(ppd);
3158
3159 if (cc_state == NULL) {
3160 rcu_read_unlock();
3161 return reply((struct ib_mad_hdr *)smp);
3162 }
3163
3164 entries = cc_state->cong_setting.entries;
3165 p->port_control = cpu_to_be16(cc_state->cong_setting.port_control);
3166 p->control_map = cpu_to_be32(cc_state->cong_setting.control_map);
3167 for (i = 0; i < OPA_MAX_SLS; i++) {
3168 p->entries[i].ccti_increase = entries[i].ccti_increase;
3169 p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
3170 p->entries[i].trigger_threshold =
3171 entries[i].trigger_threshold;
3172 p->entries[i].ccti_min = entries[i].ccti_min;
3173 }
3174
3175 rcu_read_unlock();
3176
3177 if (resp_len)
3178 *resp_len += sizeof(*p);
3179
3180 return reply((struct ib_mad_hdr *)smp);
3181}
3182
3183static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
3184 struct ib_device *ibdev, u8 port,
3185 u32 *resp_len)
3186{
3187 struct opa_congestion_setting_attr *p =
3188 (struct opa_congestion_setting_attr *) data;
3189 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3190 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3191 struct opa_congestion_setting_entry_shadow *entries;
3192 int i;
3193
3194 ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
3195
3196 entries = ppd->congestion_entries;
3197 for (i = 0; i < OPA_MAX_SLS; i++) {
3198 entries[i].ccti_increase = p->entries[i].ccti_increase;
3199 entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer);
3200 entries[i].trigger_threshold =
3201 p->entries[i].trigger_threshold;
3202 entries[i].ccti_min = p->entries[i].ccti_min;
3203 }
3204
3205 return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
3206 resp_len);
3207}
3208
3209static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
3210 u8 *data, struct ib_device *ibdev,
3211 u8 port, u32 *resp_len)
3212{
3213 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3214 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3215 struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data;
3216 s64 ts;
3217 int i;
3218
3219 if (am != 0) {
3220 smp->status |= IB_SMP_INVALID_FIELD;
3221 return reply((struct ib_mad_hdr *)smp);
3222 }
3223
b77d713a 3224 spin_lock_irq(&ppd->cc_log_lock);
77241056
MM
3225
3226 cong_log->log_type = OPA_CC_LOG_TYPE_HFI;
3227 cong_log->congestion_flags = 0;
3228 cong_log->threshold_event_counter =
3229 cpu_to_be16(ppd->threshold_event_counter);
3230 memcpy(cong_log->threshold_cong_event_map,
3231 ppd->threshold_cong_event_map,
3232 sizeof(cong_log->threshold_cong_event_map));
3233 /* keep timestamp in units of 1.024 usec */
3234 ts = ktime_to_ns(ktime_get()) / 1024;
3235 cong_log->current_time_stamp = cpu_to_be32(ts);
3236 for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) {
3237 struct opa_hfi1_cong_log_event_internal *cce =
3238 &ppd->cc_events[ppd->cc_mad_idx++];
3239 if (ppd->cc_mad_idx == OPA_CONG_LOG_ELEMS)
3240 ppd->cc_mad_idx = 0;
3241 /*
3242 * Entries which are older than twice the time
3243 * required to wrap the counter are supposed to
3244 * be zeroed (CA10-49 IBTA, release 1.2.1, V1).
3245 */
3246 if ((u64)(ts - cce->timestamp) > (2 * UINT_MAX))
3247 continue;
3248 memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
3249 memcpy(cong_log->events[i].remote_qp_number_cn_entry,
3250 &cce->rqpn, 3);
3251 cong_log->events[i].sl_svc_type_cn_entry =
3252 ((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7);
3253 cong_log->events[i].remote_lid_cn_entry =
3254 cpu_to_be32(cce->rlid);
3255 cong_log->events[i].timestamp_cn_entry =
3256 cpu_to_be32(cce->timestamp);
3257 }
3258
3259 /*
3260 * Reset threshold_cong_event_map, and threshold_event_counter
3261 * to 0 when log is read.
3262 */
3263 memset(ppd->threshold_cong_event_map, 0x0,
3264 sizeof(ppd->threshold_cong_event_map));
3265 ppd->threshold_event_counter = 0;
3266
b77d713a 3267 spin_unlock_irq(&ppd->cc_log_lock);
77241056
MM
3268
3269 if (resp_len)
3270 *resp_len += sizeof(struct opa_hfi1_cong_log);
3271
3272 return reply((struct ib_mad_hdr *)smp);
3273}
3274
3275static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3276 struct ib_device *ibdev, u8 port,
3277 u32 *resp_len)
3278{
3279 struct ib_cc_table_attr *cc_table_attr =
3280 (struct ib_cc_table_attr *) data;
3281 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3282 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3283 u32 start_block = OPA_AM_START_BLK(am);
3284 u32 n_blocks = OPA_AM_NBLK(am);
3285 struct ib_cc_table_entry_shadow *entries;
3286 int i, j;
3287 u32 sentry, eentry;
3288 struct cc_state *cc_state;
3289
3290 /* sanity check n_blocks, start_block */
3291 if (n_blocks == 0 ||
3292 start_block + n_blocks > ppd->cc_max_table_entries) {
3293 smp->status |= IB_SMP_INVALID_FIELD;
3294 return reply((struct ib_mad_hdr *)smp);
3295 }
3296
3297 rcu_read_lock();
3298
3299 cc_state = get_cc_state(ppd);
3300
3301 if (cc_state == NULL) {
3302 rcu_read_unlock();
3303 return reply((struct ib_mad_hdr *)smp);
3304 }
3305
3306 sentry = start_block * IB_CCT_ENTRIES;
3307 eentry = sentry + (IB_CCT_ENTRIES * n_blocks);
3308
3309 cc_table_attr->ccti_limit = cpu_to_be16(cc_state->cct.ccti_limit);
3310
3311 entries = cc_state->cct.entries;
3312
3313 /* return n_blocks, though the last block may not be full */
3314 for (j = 0, i = sentry; i < eentry; j++, i++)
3315 cc_table_attr->ccti_entries[j].entry =
3316 cpu_to_be16(entries[i].entry);
3317
3318 rcu_read_unlock();
3319
3320 if (resp_len)
3321 *resp_len += sizeof(u16)*(IB_CCT_ENTRIES * n_blocks + 1);
3322
3323 return reply((struct ib_mad_hdr *)smp);
3324}
3325
3326void cc_state_reclaim(struct rcu_head *rcu)
3327{
3328 struct cc_state *cc_state = container_of(rcu, struct cc_state, rcu);
3329
3330 kfree(cc_state);
3331}
3332
3333static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3334 struct ib_device *ibdev, u8 port,
3335 u32 *resp_len)
3336{
3337 struct ib_cc_table_attr *p = (struct ib_cc_table_attr *) data;
3338 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3339 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3340 u32 start_block = OPA_AM_START_BLK(am);
3341 u32 n_blocks = OPA_AM_NBLK(am);
3342 struct ib_cc_table_entry_shadow *entries;
3343 int i, j;
3344 u32 sentry, eentry;
3345 u16 ccti_limit;
3346 struct cc_state *old_cc_state, *new_cc_state;
3347
3348 /* sanity check n_blocks, start_block */
3349 if (n_blocks == 0 ||
3350 start_block + n_blocks > ppd->cc_max_table_entries) {
3351 smp->status |= IB_SMP_INVALID_FIELD;
3352 return reply((struct ib_mad_hdr *)smp);
3353 }
3354
3355 sentry = start_block * IB_CCT_ENTRIES;
3356 eentry = sentry + ((n_blocks - 1) * IB_CCT_ENTRIES) +
3357 (be16_to_cpu(p->ccti_limit)) % IB_CCT_ENTRIES + 1;
3358
3359 /* sanity check ccti_limit */
3360 ccti_limit = be16_to_cpu(p->ccti_limit);
3361 if (ccti_limit + 1 > eentry) {
3362 smp->status |= IB_SMP_INVALID_FIELD;
3363 return reply((struct ib_mad_hdr *)smp);
3364 }
3365
3366 new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
3367 if (new_cc_state == NULL)
3368 goto getit;
3369
3370 spin_lock(&ppd->cc_state_lock);
3371
3372 old_cc_state = get_cc_state(ppd);
3373
3374 if (old_cc_state == NULL) {
3375 spin_unlock(&ppd->cc_state_lock);
3376 kfree(new_cc_state);
3377 return reply((struct ib_mad_hdr *)smp);
3378 }
3379
3380 *new_cc_state = *old_cc_state;
3381
3382 new_cc_state->cct.ccti_limit = ccti_limit;
3383
3384 entries = ppd->ccti_entries;
3385 ppd->total_cct_entry = ccti_limit + 1;
3386
3387 for (j = 0, i = sentry; i < eentry; j++, i++)
3388 entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
3389
3390 memcpy(new_cc_state->cct.entries, entries,
3391 eentry * sizeof(struct ib_cc_table_entry));
3392
3393 new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
3394 new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
3395 memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
3396 OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
3397
3398 rcu_assign_pointer(ppd->cc_state, new_cc_state);
3399
3400 spin_unlock(&ppd->cc_state_lock);
3401
3402 call_rcu(&old_cc_state->rcu, cc_state_reclaim);
3403
3404getit:
3405 return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len);
3406}
3407
3408struct opa_led_info {
3409 __be32 rsvd_led_mask;
3410 __be32 rsvd;
3411};
3412
3413#define OPA_LED_SHIFT 31
349ac71f 3414#define OPA_LED_MASK BIT(OPA_LED_SHIFT)
77241056
MM
3415
3416static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
3417 struct ib_device *ibdev, u8 port,
3418 u32 *resp_len)
3419{
3420 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3421 struct opa_led_info *p = (struct opa_led_info *) data;
3422 u32 nport = OPA_AM_NPORT(am);
3423 u64 reg;
3424
801cfd6d 3425 if (nport != 1) {
77241056
MM
3426 smp->status |= IB_SMP_INVALID_FIELD;
3427 return reply((struct ib_mad_hdr *)smp);
3428 }
3429
3430 reg = read_csr(dd, DCC_CFG_LED_CNTRL);
3431 if ((reg & DCC_CFG_LED_CNTRL_LED_CNTRL_SMASK) &&
3432 ((reg & DCC_CFG_LED_CNTRL_LED_SW_BLINK_RATE_SMASK) == 0xf))
3433 p->rsvd_led_mask = cpu_to_be32(OPA_LED_MASK);
3434
3435 if (resp_len)
3436 *resp_len += sizeof(struct opa_led_info);
3437
3438 return reply((struct ib_mad_hdr *)smp);
3439}
3440
3441static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
3442 struct ib_device *ibdev, u8 port,
3443 u32 *resp_len)
3444{
3445 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3446 struct opa_led_info *p = (struct opa_led_info *) data;
3447 u32 nport = OPA_AM_NPORT(am);
3448 int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
3449
801cfd6d 3450 if (nport != 1) {
77241056
MM
3451 smp->status |= IB_SMP_INVALID_FIELD;
3452 return reply((struct ib_mad_hdr *)smp);
3453 }
3454
91ab4ed3
EH
3455 if (on)
3456 hfi1_set_led_override(dd->pport, 2000, 1500);
3457 else
3458 hfi1_set_led_override(dd->pport, 0, 0);
77241056
MM
3459
3460 return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len);
3461}
3462
3463static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
3464 u8 *data, struct ib_device *ibdev, u8 port,
3465 u32 *resp_len)
3466{
3467 int ret;
3468 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3469
3470 switch (attr_id) {
3471 case IB_SMP_ATTR_NODE_DESC:
3472 ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port,
3473 resp_len);
3474 break;
3475 case IB_SMP_ATTR_NODE_INFO:
3476 ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port,
3477 resp_len);
3478 break;
3479 case IB_SMP_ATTR_PORT_INFO:
3480 ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port,
3481 resp_len);
3482 break;
3483 case IB_SMP_ATTR_PKEY_TABLE:
3484 ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port,
3485 resp_len);
3486 break;
3487 case OPA_ATTRIB_ID_SL_TO_SC_MAP:
3488 ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port,
3489 resp_len);
3490 break;
3491 case OPA_ATTRIB_ID_SC_TO_SL_MAP:
3492 ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port,
3493 resp_len);
3494 break;
3495 case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
3496 ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port,
3497 resp_len);
3498 break;
3499 case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
3500 ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
3501 resp_len);
3502 break;
3503 case OPA_ATTRIB_ID_PORT_STATE_INFO:
3504 ret = __subn_get_opa_psi(smp, am, data, ibdev, port,
3505 resp_len);
3506 break;
3507 case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
3508 ret = __subn_get_opa_bct(smp, am, data, ibdev, port,
3509 resp_len);
3510 break;
3511 case OPA_ATTRIB_ID_CABLE_INFO:
3512 ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port,
3513 resp_len);
3514 break;
3515 case IB_SMP_ATTR_VL_ARB_TABLE:
3516 ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port,
3517 resp_len);
3518 break;
3519 case OPA_ATTRIB_ID_CONGESTION_INFO:
3520 ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port,
3521 resp_len);
3522 break;
3523 case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
3524 ret = __subn_get_opa_cong_setting(smp, am, data, ibdev,
3525 port, resp_len);
3526 break;
3527 case OPA_ATTRIB_ID_HFI_CONGESTION_LOG:
3528 ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev,
3529 port, resp_len);
3530 break;
3531 case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
3532 ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port,
3533 resp_len);
3534 break;
3535 case IB_SMP_ATTR_LED_INFO:
3536 ret = __subn_get_opa_led_info(smp, am, data, ibdev, port,
3537 resp_len);
3538 break;
3539 case IB_SMP_ATTR_SM_INFO:
4eb06882 3540 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
77241056 3541 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
4eb06882 3542 if (ibp->rvp.port_cap_flags & IB_PORT_SM)
77241056
MM
3543 return IB_MAD_RESULT_SUCCESS;
3544 /* FALLTHROUGH */
3545 default:
3546 smp->status |= IB_SMP_UNSUP_METH_ATTR;
3547 ret = reply((struct ib_mad_hdr *)smp);
3548 break;
3549 }
3550 return ret;
3551}
3552
3553static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
3554 u8 *data, struct ib_device *ibdev, u8 port,
3555 u32 *resp_len)
3556{
3557 int ret;
3558 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3559
3560 switch (attr_id) {
3561 case IB_SMP_ATTR_PORT_INFO:
3562 ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port,
3563 resp_len);
3564 break;
3565 case IB_SMP_ATTR_PKEY_TABLE:
3566 ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port,
3567 resp_len);
3568 break;
3569 case OPA_ATTRIB_ID_SL_TO_SC_MAP:
3570 ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port,
3571 resp_len);
3572 break;
3573 case OPA_ATTRIB_ID_SC_TO_SL_MAP:
3574 ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port,
3575 resp_len);
3576 break;
3577 case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
3578 ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port,
3579 resp_len);
3580 break;
3581 case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
3582 ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port,
3583 resp_len);
3584 break;
3585 case OPA_ATTRIB_ID_PORT_STATE_INFO:
3586 ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
3587 resp_len);
3588 break;
3589 case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
3590 ret = __subn_set_opa_bct(smp, am, data, ibdev, port,
3591 resp_len);
3592 break;
3593 case IB_SMP_ATTR_VL_ARB_TABLE:
3594 ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port,
3595 resp_len);
3596 break;
3597 case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
3598 ret = __subn_set_opa_cong_setting(smp, am, data, ibdev,
3599 port, resp_len);
3600 break;
3601 case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
3602 ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port,
3603 resp_len);
3604 break;
3605 case IB_SMP_ATTR_LED_INFO:
3606 ret = __subn_set_opa_led_info(smp, am, data, ibdev, port,
3607 resp_len);
3608 break;
3609 case IB_SMP_ATTR_SM_INFO:
4eb06882 3610 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
77241056 3611 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
4eb06882 3612 if (ibp->rvp.port_cap_flags & IB_PORT_SM)
77241056
MM
3613 return IB_MAD_RESULT_SUCCESS;
3614 /* FALLTHROUGH */
3615 default:
3616 smp->status |= IB_SMP_UNSUP_METH_ATTR;
3617 ret = reply((struct ib_mad_hdr *)smp);
3618 break;
3619 }
3620 return ret;
3621}
3622
3623static inline void set_aggr_error(struct opa_aggregate *ag)
3624{
3625 ag->err_reqlength |= cpu_to_be16(0x8000);
3626}
3627
3628static int subn_get_opa_aggregate(struct opa_smp *smp,
3629 struct ib_device *ibdev, u8 port,
3630 u32 *resp_len)
3631{
3632 int i;
3633 u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
3634 u8 *next_smp = opa_get_smp_data(smp);
3635
3636 if (num_attr < 1 || num_attr > 117) {
3637 smp->status |= IB_SMP_INVALID_FIELD;
3638 return reply((struct ib_mad_hdr *)smp);
3639 }
3640
3641 for (i = 0; i < num_attr; i++) {
3642 struct opa_aggregate *agg;
3643 size_t agg_data_len;
3644 size_t agg_size;
3645 u32 am;
3646
3647 agg = (struct opa_aggregate *)next_smp;
3648 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
3649 agg_size = sizeof(*agg) + agg_data_len;
3650 am = be32_to_cpu(agg->attr_mod);
3651
3652 *resp_len += agg_size;
3653
3654 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
3655 smp->status |= IB_SMP_INVALID_FIELD;
3656 return reply((struct ib_mad_hdr *)smp);
3657 }
3658
3659 /* zero the payload for this segment */
3660 memset(next_smp + sizeof(*agg), 0, agg_data_len);
3661
3662 (void) subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
3663 ibdev, port, NULL);
3664 if (smp->status & ~IB_SMP_DIRECTION) {
3665 set_aggr_error(agg);
3666 return reply((struct ib_mad_hdr *)smp);
3667 }
3668 next_smp += agg_size;
3669
3670 }
3671
3672 return reply((struct ib_mad_hdr *)smp);
3673}
3674
3675static int subn_set_opa_aggregate(struct opa_smp *smp,
3676 struct ib_device *ibdev, u8 port,
3677 u32 *resp_len)
3678{
3679 int i;
3680 u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
3681 u8 *next_smp = opa_get_smp_data(smp);
3682
3683 if (num_attr < 1 || num_attr > 117) {
3684 smp->status |= IB_SMP_INVALID_FIELD;
3685 return reply((struct ib_mad_hdr *)smp);
3686 }
3687
3688 for (i = 0; i < num_attr; i++) {
3689 struct opa_aggregate *agg;
3690 size_t agg_data_len;
3691 size_t agg_size;
3692 u32 am;
3693
3694 agg = (struct opa_aggregate *)next_smp;
3695 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
3696 agg_size = sizeof(*agg) + agg_data_len;
3697 am = be32_to_cpu(agg->attr_mod);
3698
3699 *resp_len += agg_size;
3700
3701 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
3702 smp->status |= IB_SMP_INVALID_FIELD;
3703 return reply((struct ib_mad_hdr *)smp);
3704 }
3705
3706 (void) subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
3707 ibdev, port, NULL);
3708 if (smp->status & ~IB_SMP_DIRECTION) {
3709 set_aggr_error(agg);
3710 return reply((struct ib_mad_hdr *)smp);
3711 }
3712 next_smp += agg_size;
3713
3714 }
3715
3716 return reply((struct ib_mad_hdr *)smp);
3717}
3718
3719/*
3720 * OPAv1 specifies that, on the transition to link up, these counters
3721 * are cleared:
3722 * PortRcvErrors [*]
3723 * LinkErrorRecovery
3724 * LocalLinkIntegrityErrors
3725 * ExcessiveBufferOverruns [*]
3726 *
3727 * [*] Error info associated with these counters is retained, but the
3728 * error info status is reset to 0.
3729 */
3730void clear_linkup_counters(struct hfi1_devdata *dd)
3731{
3732 /* PortRcvErrors */
3733 write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
3734 dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
3735 /* LinkErrorRecovery */
3736 write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
3737 write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
3738 /* LocalLinkIntegrityErrors */
3739 write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
3740 write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
3741 /* ExcessiveBufferOverruns */
3742 write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
3743 dd->rcv_ovfl_cnt = 0;
3744 dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
3745}
3746
3747/*
3748 * is_local_mad() returns 1 if 'mad' is sent from, and destined to the
3749 * local node, 0 otherwise.
3750 */
3751static int is_local_mad(struct hfi1_ibport *ibp, const struct opa_mad *mad,
3752 const struct ib_wc *in_wc)
3753{
3754 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3755 const struct opa_smp *smp = (const struct opa_smp *)mad;
3756
3757 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
3758 return (smp->hop_cnt == 0 &&
3759 smp->route.dr.dr_slid == OPA_LID_PERMISSIVE &&
3760 smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE);
3761 }
3762
3763 return (in_wc->slid == ppd->lid);
3764}
3765
3766/*
3767 * opa_local_smp_check() should only be called on MADs for which
3768 * is_local_mad() returns true. It applies the SMP checks that are
3769 * specific to SMPs which are sent from, and destined to this node.
3770 * opa_local_smp_check() returns 0 if the SMP passes its checks, 1
3771 * otherwise.
3772 *
3773 * SMPs which arrive from other nodes are instead checked by
3774 * opa_smp_check().
3775 */
3776static int opa_local_smp_check(struct hfi1_ibport *ibp,
3777 const struct ib_wc *in_wc)
3778{
3779 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3780 u16 slid = in_wc->slid;
3781 u16 pkey;
3782
3783 if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
3784 return 1;
3785
3786 pkey = ppd->pkeys[in_wc->pkey_index];
3787 /*
3788 * We need to do the "node-local" checks specified in OPAv1,
3789 * rev 0.90, section 9.10.26, which are:
3790 * - pkey is 0x7fff, or 0xffff
3791 * - Source QPN == 0 || Destination QPN == 0
3792 * - the MAD header's management class is either
3793 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE or
3794 * IB_MGMT_CLASS_SUBN_LID_ROUTED
3795 * - SLID != 0
3796 *
3797 * However, we know (and so don't need to check again) that,
3798 * for local SMPs, the MAD stack passes MADs with:
3799 * - Source QPN of 0
3800 * - MAD mgmt_class is IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
3801 * - SLID is either: OPA_LID_PERMISSIVE (0xFFFFFFFF), or
3802 * our own port's lid
3803 *
3804 */
3805 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
3806 return 0;
3807 ingress_pkey_table_fail(ppd, pkey, slid);
3808 return 1;
3809}
3810
3811static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
3812 u8 port, const struct opa_mad *in_mad,
3813 struct opa_mad *out_mad,
3814 u32 *resp_len)
3815{
3816 struct opa_smp *smp = (struct opa_smp *)out_mad;
3817 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3818 u8 *data;
3819 u32 am;
3820 __be16 attr_id;
3821 int ret;
3822
3823 *out_mad = *in_mad;
3824 data = opa_get_smp_data(smp);
3825
3826 am = be32_to_cpu(smp->attr_mod);
3827 attr_id = smp->attr_id;
3828 if (smp->class_version != OPA_SMI_CLASS_VERSION) {
3829 smp->status |= IB_SMP_UNSUP_VERSION;
3830 ret = reply((struct ib_mad_hdr *)smp);
3831 goto bail;
3832 }
3833 ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey,
3834 smp->route.dr.dr_slid, smp->route.dr.return_path,
3835 smp->hop_cnt);
3836 if (ret) {
3837 u32 port_num = be32_to_cpu(smp->attr_mod);
3838
3839 /*
3840 * If this is a get/set portinfo, we already check the
3841 * M_Key if the MAD is for another port and the M_Key
3842 * is OK on the receiving port. This check is needed
3843 * to increment the error counters when the M_Key
3844 * fails to match on *both* ports.
3845 */
3846 if (attr_id == IB_SMP_ATTR_PORT_INFO &&
3847 (smp->method == IB_MGMT_METHOD_GET ||
3848 smp->method == IB_MGMT_METHOD_SET) &&
3849 port_num && port_num <= ibdev->phys_port_cnt &&
3850 port != port_num)
3851 (void) check_mkey(to_iport(ibdev, port_num),
3852 (struct ib_mad_hdr *)smp, 0,
3853 smp->mkey, smp->route.dr.dr_slid,
3854 smp->route.dr.return_path,
3855 smp->hop_cnt);
3856 ret = IB_MAD_RESULT_FAILURE;
3857 goto bail;
3858 }
3859
3860 *resp_len = opa_get_smp_header_size(smp);
3861
3862 switch (smp->method) {
3863 case IB_MGMT_METHOD_GET:
3864 switch (attr_id) {
3865 default:
3866 clear_opa_smp_data(smp);
3867 ret = subn_get_opa_sma(attr_id, smp, am, data,
3868 ibdev, port, resp_len);
3869 goto bail;
3870 case OPA_ATTRIB_ID_AGGREGATE:
3871 ret = subn_get_opa_aggregate(smp, ibdev, port,
3872 resp_len);
3873 goto bail;
3874 }
3875 case IB_MGMT_METHOD_SET:
3876 switch (attr_id) {
3877 default:
3878 ret = subn_set_opa_sma(attr_id, smp, am, data,
3879 ibdev, port, resp_len);
3880 goto bail;
3881 case OPA_ATTRIB_ID_AGGREGATE:
3882 ret = subn_set_opa_aggregate(smp, ibdev, port,
3883 resp_len);
3884 goto bail;
3885 }
3886 case IB_MGMT_METHOD_TRAP:
3887 case IB_MGMT_METHOD_REPORT:
3888 case IB_MGMT_METHOD_REPORT_RESP:
3889 case IB_MGMT_METHOD_GET_RESP:
3890 /*
3891 * The ib_mad module will call us to process responses
3892 * before checking for other consumers.
3893 * Just tell the caller to process it normally.
3894 */
3895 ret = IB_MAD_RESULT_SUCCESS;
3896 goto bail;
3897 default:
3898 smp->status |= IB_SMP_UNSUP_METHOD;
3899 ret = reply((struct ib_mad_hdr *)smp);
3900 }
3901
3902bail:
3903 return ret;
3904}
3905
3906static int process_subn(struct ib_device *ibdev, int mad_flags,
3907 u8 port, const struct ib_mad *in_mad,
3908 struct ib_mad *out_mad)
3909{
3910 struct ib_smp *smp = (struct ib_smp *)out_mad;
3911 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3912 int ret;
3913
3914 *out_mad = *in_mad;
3915 if (smp->class_version != 1) {
3916 smp->status |= IB_SMP_UNSUP_VERSION;
3917 ret = reply((struct ib_mad_hdr *)smp);
3918 goto bail;
3919 }
3920
3921 ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags,
3922 smp->mkey, (__force __be32)smp->dr_slid,
3923 smp->return_path, smp->hop_cnt);
3924 if (ret) {
3925 u32 port_num = be32_to_cpu(smp->attr_mod);
3926
3927 /*
3928 * If this is a get/set portinfo, we already check the
3929 * M_Key if the MAD is for another port and the M_Key
3930 * is OK on the receiving port. This check is needed
3931 * to increment the error counters when the M_Key
3932 * fails to match on *both* ports.
3933 */
3934 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
3935 (smp->method == IB_MGMT_METHOD_GET ||
3936 smp->method == IB_MGMT_METHOD_SET) &&
3937 port_num && port_num <= ibdev->phys_port_cnt &&
3938 port != port_num)
3939 (void) check_mkey(to_iport(ibdev, port_num),
3940 (struct ib_mad_hdr *)smp, 0,
3941 smp->mkey,
3942 (__force __be32)smp->dr_slid,
3943 smp->return_path, smp->hop_cnt);
3944 ret = IB_MAD_RESULT_FAILURE;
3945 goto bail;
3946 }
3947
3948 switch (smp->method) {
3949 case IB_MGMT_METHOD_GET:
3950 switch (smp->attr_id) {
3951 case IB_SMP_ATTR_NODE_INFO:
3952 ret = subn_get_nodeinfo(smp, ibdev, port);
3953 goto bail;
3954 default:
3955 smp->status |= IB_SMP_UNSUP_METH_ATTR;
3956 ret = reply((struct ib_mad_hdr *)smp);
3957 goto bail;
3958 }
3959 }
3960
3961bail:
3962 return ret;
3963}
3964
3965static int process_perf_opa(struct ib_device *ibdev, u8 port,
3966 const struct opa_mad *in_mad,
3967 struct opa_mad *out_mad, u32 *resp_len)
3968{
3969 struct opa_pma_mad *pmp = (struct opa_pma_mad *)out_mad;
3970 int ret;
3971
3972 *out_mad = *in_mad;
3973
3974 if (pmp->mad_hdr.class_version != OPA_SMI_CLASS_VERSION) {
3975 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
3976 return reply((struct ib_mad_hdr *)pmp);
3977 }
3978
3979 *resp_len = sizeof(pmp->mad_hdr);
3980
3981 switch (pmp->mad_hdr.method) {
3982 case IB_MGMT_METHOD_GET:
3983 switch (pmp->mad_hdr.attr_id) {
3984 case IB_PMA_CLASS_PORT_INFO:
3985 ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len);
3986 goto bail;
3987 case OPA_PM_ATTRIB_ID_PORT_STATUS:
3988 ret = pma_get_opa_portstatus(pmp, ibdev, port,
3989 resp_len);
3990 goto bail;
3991 case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS:
3992 ret = pma_get_opa_datacounters(pmp, ibdev, port,
3993 resp_len);
3994 goto bail;
3995 case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS:
3996 ret = pma_get_opa_porterrors(pmp, ibdev, port,
3997 resp_len);
3998 goto bail;
3999 case OPA_PM_ATTRIB_ID_ERROR_INFO:
4000 ret = pma_get_opa_errorinfo(pmp, ibdev, port,
4001 resp_len);
4002 goto bail;
4003 default:
4004 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4005 ret = reply((struct ib_mad_hdr *)pmp);
4006 goto bail;
4007 }
4008
4009 case IB_MGMT_METHOD_SET:
4010 switch (pmp->mad_hdr.attr_id) {
4011 case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS:
4012 ret = pma_set_opa_portstatus(pmp, ibdev, port,
4013 resp_len);
4014 goto bail;
4015 case OPA_PM_ATTRIB_ID_ERROR_INFO:
4016 ret = pma_set_opa_errorinfo(pmp, ibdev, port,
4017 resp_len);
4018 goto bail;
4019 default:
4020 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4021 ret = reply((struct ib_mad_hdr *)pmp);
4022 goto bail;
4023 }
4024
4025 case IB_MGMT_METHOD_TRAP:
4026 case IB_MGMT_METHOD_GET_RESP:
4027 /*
4028 * The ib_mad module will call us to process responses
4029 * before checking for other consumers.
4030 * Just tell the caller to process it normally.
4031 */
4032 ret = IB_MAD_RESULT_SUCCESS;
4033 goto bail;
4034
4035 default:
4036 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
4037 ret = reply((struct ib_mad_hdr *)pmp);
4038 }
4039
4040bail:
4041 return ret;
4042}
4043
4044static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
a724648e
JB
4045 u8 port, const struct ib_wc *in_wc,
4046 const struct ib_grh *in_grh,
4047 const struct opa_mad *in_mad,
4048 struct opa_mad *out_mad, size_t *out_mad_size,
4049 u16 *out_mad_pkey_index)
77241056
MM
4050{
4051 int ret;
4052 int pkey_idx;
4053 u32 resp_len = 0;
4054 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4055
4056 pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
4057 if (pkey_idx < 0) {
4058 pr_warn("failed to find limited mgmt pkey, defaulting 0x%x\n",
4059 hfi1_get_pkey(ibp, 1));
4060 pkey_idx = 1;
4061 }
4062 *out_mad_pkey_index = (u16)pkey_idx;
4063
4064 switch (in_mad->mad_hdr.mgmt_class) {
4065 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4066 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4067 if (is_local_mad(ibp, in_mad, in_wc)) {
4068 ret = opa_local_smp_check(ibp, in_wc);
4069 if (ret)
4070 return IB_MAD_RESULT_FAILURE;
4071 }
4072 ret = process_subn_opa(ibdev, mad_flags, port, in_mad,
4073 out_mad, &resp_len);
4074 goto bail;
4075 case IB_MGMT_CLASS_PERF_MGMT:
4076 ret = process_perf_opa(ibdev, port, in_mad, out_mad,
4077 &resp_len);
4078 goto bail;
4079
4080 default:
4081 ret = IB_MAD_RESULT_SUCCESS;
4082 }
4083
4084bail:
4085 if (ret & IB_MAD_RESULT_REPLY)
4086 *out_mad_size = round_up(resp_len, 8);
4087 else if (ret & IB_MAD_RESULT_SUCCESS)
4088 *out_mad_size = in_wc->byte_len - sizeof(struct ib_grh);
4089
4090 return ret;
4091}
4092
4093static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4094 const struct ib_wc *in_wc,
4095 const struct ib_grh *in_grh,
4096 const struct ib_mad *in_mad,
4097 struct ib_mad *out_mad)
4098{
4099 int ret;
4100
4101 switch (in_mad->mad_hdr.mgmt_class) {
4102 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4103 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4104 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
4105 goto bail;
4106 default:
4107 ret = IB_MAD_RESULT_SUCCESS;
4108 }
4109
4110bail:
4111 return ret;
4112}
4113
4114/**
4115 * hfi1_process_mad - process an incoming MAD packet
4116 * @ibdev: the infiniband device this packet came in on
4117 * @mad_flags: MAD flags
4118 * @port: the port number this packet came in on
4119 * @in_wc: the work completion entry for this packet
4120 * @in_grh: the global route header for this packet
4121 * @in_mad: the incoming MAD
4122 * @out_mad: any outgoing MAD reply
4123 *
4124 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
4125 * interested in processing.
4126 *
4127 * Note that the verbs framework has already done the MAD sanity checks,
4128 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
4129 * MADs.
4130 *
4131 * This is called by the ib_mad module.
4132 */
4133int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4134 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
4135 const struct ib_mad_hdr *in_mad, size_t in_mad_size,
4136 struct ib_mad_hdr *out_mad, size_t *out_mad_size,
4137 u16 *out_mad_pkey_index)
4138{
4139 switch (in_mad->base_version) {
4140 case OPA_MGMT_BASE_VERSION:
4141 if (unlikely(in_mad_size != sizeof(struct opa_mad))) {
4142 dev_err(ibdev->dma_device, "invalid in_mad_size\n");
4143 return IB_MAD_RESULT_FAILURE;
4144 }
4145 return hfi1_process_opa_mad(ibdev, mad_flags, port,
4146 in_wc, in_grh,
4147 (struct opa_mad *)in_mad,
4148 (struct opa_mad *)out_mad,
4149 out_mad_size,
4150 out_mad_pkey_index);
4151 case IB_MGMT_BASE_VERSION:
4152 return hfi1_process_ib_mad(ibdev, mad_flags, port,
4153 in_wc, in_grh,
4154 (const struct ib_mad *)in_mad,
4155 (struct ib_mad *)out_mad);
4156 default:
4157 break;
4158 }
4159
4160 return IB_MAD_RESULT_FAILURE;
4161}