Merge tag 'drm-vc4-fixes-2016-09-14' of https://github.com/anholt/linux into drm...
[linux-2.6-block.git] / drivers / infiniband / hw / hfi1 / mad.c
CommitLineData
77241056 1/*
05d6ac1d 2 * Copyright(c) 2015, 2016 Intel Corporation.
77241056
MM
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
77241056
MM
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
77241056
MM
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/net.h>
49#define OPA_NUM_PKEY_BLOCKS_PER_SMP (OPA_SMP_DR_DATA_SIZE \
50 / (OPA_PARTITION_TABLE_BLK_SIZE * sizeof(u16)))
51
52#include "hfi.h"
53#include "mad.h"
54#include "trace.h"
0ec79e87 55#include "qp.h"
77241056
MM
56
57/* the reset value from the FM is supposed to be 0xffff, handle both */
58#define OPA_LINK_WIDTH_RESET_OLD 0x0fff
59#define OPA_LINK_WIDTH_RESET 0xffff
60
61static int reply(struct ib_mad_hdr *smp)
62{
63 /*
64 * The verbs framework will handle the directed/LID route
65 * packet changes.
66 */
67 smp->method = IB_MGMT_METHOD_GET_RESP;
68 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
69 smp->status |= IB_SMP_DIRECTION;
70 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
71}
72
73static inline void clear_opa_smp_data(struct opa_smp *smp)
74{
75 void *data = opa_get_smp_data(smp);
76 size_t size = opa_get_smp_data_size(smp);
77
78 memset(data, 0, size);
79}
80
34d351f8
SS
81void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
82{
83 struct ib_event event;
84
85 event.event = IB_EVENT_PKEY_CHANGE;
86 event.device = &dd->verbs_dev.rdi.ibdev;
87 event.element.port_num = port;
88 ib_dispatch_event(&event);
89}
90
77241056
MM
91static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
92{
93 struct ib_mad_send_buf *send_buf;
94 struct ib_mad_agent *agent;
5cd24119 95 struct opa_smp *smp;
77241056
MM
96 int ret;
97 unsigned long flags;
98 unsigned long timeout;
99 int pkey_idx;
100 u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp;
101
4eb06882 102 agent = ibp->rvp.send_agent;
77241056
MM
103 if (!agent)
104 return;
105
106 /* o14-3.2.1 */
107 if (ppd_from_ibp(ibp)->lstate != IB_PORT_ACTIVE)
108 return;
109
110 /* o14-2 */
4eb06882
DD
111 if (ibp->rvp.trap_timeout && time_before(jiffies,
112 ibp->rvp.trap_timeout))
77241056
MM
113 return;
114
115 pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
116 if (pkey_idx < 0) {
117 pr_warn("%s: failed to find limited mgmt pkey, defaulting 0x%x\n",
118 __func__, hfi1_get_pkey(ibp, 1));
119 pkey_idx = 1;
120 }
121
122 send_buf = ib_create_send_mad(agent, qpn, pkey_idx, 0,
123 IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
124 GFP_ATOMIC, IB_MGMT_BASE_VERSION);
125 if (IS_ERR(send_buf))
126 return;
127
128 smp = send_buf->mad;
5cd24119 129 smp->base_version = OPA_MGMT_BASE_VERSION;
77241056 130 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
5cd24119 131 smp->class_version = OPA_SMI_CLASS_VERSION;
77241056 132 smp->method = IB_MGMT_METHOD_TRAP;
4eb06882
DD
133 ibp->rvp.tid++;
134 smp->tid = cpu_to_be64(ibp->rvp.tid);
77241056
MM
135 smp->attr_id = IB_SMP_ATTR_NOTICE;
136 /* o14-1: smp->mkey = 0; */
5cd24119 137 memcpy(smp->route.lid.data, data, len);
77241056 138
4eb06882 139 spin_lock_irqsave(&ibp->rvp.lock, flags);
9c4a311e 140 if (!ibp->rvp.sm_ah) {
4eb06882 141 if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
77241056
MM
142 struct ib_ah *ah;
143
4eb06882 144 ah = hfi1_create_qp0_ah(ibp, ibp->rvp.sm_lid);
e490974e 145 if (IS_ERR(ah)) {
77241056 146 ret = PTR_ERR(ah);
e490974e 147 } else {
77241056 148 send_buf->ah = ah;
9c4a311e 149 ibp->rvp.sm_ah = ibah_to_rvtah(ah);
77241056
MM
150 ret = 0;
151 }
e490974e 152 } else {
77241056 153 ret = -EINVAL;
e490974e 154 }
77241056 155 } else {
9c4a311e 156 send_buf->ah = &ibp->rvp.sm_ah->ibah;
77241056
MM
157 ret = 0;
158 }
4eb06882 159 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
77241056
MM
160
161 if (!ret)
162 ret = ib_post_send_mad(send_buf, NULL);
163 if (!ret) {
164 /* 4.096 usec. */
4eb06882
DD
165 timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
166 ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
77241056
MM
167 } else {
168 ib_free_send_mad(send_buf);
4eb06882 169 ibp->rvp.trap_timeout = 0;
77241056
MM
170 }
171}
172
173/*
174 * Send a bad [PQ]_Key trap (ch. 14.3.8).
175 */
176void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
5cd24119 177 u32 qp1, u32 qp2, u16 lid1, u16 lid2)
77241056 178{
5cd24119
EK
179 struct opa_mad_notice_attr data;
180 u32 lid = ppd_from_ibp(ibp)->lid;
181 u32 _lid1 = lid1;
182 u32 _lid2 = lid2;
77241056 183
5cd24119
EK
184 memset(&data, 0, sizeof(data));
185
186 if (trap_num == OPA_TRAP_BAD_P_KEY)
4eb06882 187 ibp->rvp.pkey_violations++;
77241056 188 else
4eb06882
DD
189 ibp->rvp.qkey_violations++;
190 ibp->rvp.n_pkt_drops++;
77241056
MM
191
192 /* Send violation trap */
193 data.generic_type = IB_NOTICE_TYPE_SECURITY;
77241056
MM
194 data.prod_type_lsb = IB_NOTICE_PROD_CA;
195 data.trap_num = trap_num;
5cd24119
EK
196 data.issuer_lid = cpu_to_be32(lid);
197 data.ntc_257_258.lid1 = cpu_to_be32(_lid1);
198 data.ntc_257_258.lid2 = cpu_to_be32(_lid2);
199 data.ntc_257_258.key = cpu_to_be32(key);
200 data.ntc_257_258.sl = sl << 3;
201 data.ntc_257_258.qp1 = cpu_to_be32(qp1);
202 data.ntc_257_258.qp2 = cpu_to_be32(qp2);
77241056
MM
203
204 send_trap(ibp, &data, sizeof(data));
205}
206
207/*
208 * Send a bad M_Key trap (ch. 14.3.9).
209 */
210static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
211 __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt)
212{
5cd24119
EK
213 struct opa_mad_notice_attr data;
214 u32 lid = ppd_from_ibp(ibp)->lid;
77241056 215
5cd24119 216 memset(&data, 0, sizeof(data));
77241056
MM
217 /* Send violation trap */
218 data.generic_type = IB_NOTICE_TYPE_SECURITY;
77241056 219 data.prod_type_lsb = IB_NOTICE_PROD_CA;
5cd24119
EK
220 data.trap_num = OPA_TRAP_BAD_M_KEY;
221 data.issuer_lid = cpu_to_be32(lid);
222 data.ntc_256.lid = data.issuer_lid;
223 data.ntc_256.method = mad->method;
224 data.ntc_256.attr_id = mad->attr_id;
225 data.ntc_256.attr_mod = mad->attr_mod;
226 data.ntc_256.mkey = mkey;
77241056 227 if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
5cd24119
EK
228 data.ntc_256.dr_slid = dr_slid;
229 data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
230 if (hop_cnt > ARRAY_SIZE(data.ntc_256.dr_rtn_path)) {
231 data.ntc_256.dr_trunc_hop |=
77241056 232 IB_NOTICE_TRAP_DR_TRUNC;
5cd24119 233 hop_cnt = ARRAY_SIZE(data.ntc_256.dr_rtn_path);
77241056 234 }
5cd24119
EK
235 data.ntc_256.dr_trunc_hop |= hop_cnt;
236 memcpy(data.ntc_256.dr_rtn_path, return_path,
77241056
MM
237 hop_cnt);
238 }
239
240 send_trap(ibp, &data, sizeof(data));
241}
242
243/*
244 * Send a Port Capability Mask Changed trap (ch. 14.3.11).
245 */
45b59eef 246void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
77241056 247{
5cd24119 248 struct opa_mad_notice_attr data;
45b59eef
HC
249 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
250 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
251 struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
5cd24119
EK
252 u32 lid = ppd_from_ibp(ibp)->lid;
253
254 memset(&data, 0, sizeof(data));
77241056
MM
255
256 data.generic_type = IB_NOTICE_TYPE_INFO;
77241056 257 data.prod_type_lsb = IB_NOTICE_PROD_CA;
5cd24119
EK
258 data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
259 data.issuer_lid = cpu_to_be32(lid);
260 data.ntc_144.lid = data.issuer_lid;
4eb06882 261 data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
77241056
MM
262
263 send_trap(ibp, &data, sizeof(data));
264}
265
266/*
267 * Send a System Image GUID Changed trap (ch. 14.3.12).
268 */
269void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
270{
5cd24119
EK
271 struct opa_mad_notice_attr data;
272 u32 lid = ppd_from_ibp(ibp)->lid;
273
274 memset(&data, 0, sizeof(data));
77241056
MM
275
276 data.generic_type = IB_NOTICE_TYPE_INFO;
77241056 277 data.prod_type_lsb = IB_NOTICE_PROD_CA;
5cd24119
EK
278 data.trap_num = OPA_TRAP_CHANGE_SYSGUID;
279 data.issuer_lid = cpu_to_be32(lid);
280 data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
281 data.ntc_145.lid = data.issuer_lid;
77241056
MM
282
283 send_trap(ibp, &data, sizeof(data));
284}
285
286/*
287 * Send a Node Description Changed trap (ch. 14.3.13).
288 */
289void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
290{
5cd24119
EK
291 struct opa_mad_notice_attr data;
292 u32 lid = ppd_from_ibp(ibp)->lid;
293
294 memset(&data, 0, sizeof(data));
77241056
MM
295
296 data.generic_type = IB_NOTICE_TYPE_INFO;
77241056 297 data.prod_type_lsb = IB_NOTICE_PROD_CA;
5cd24119
EK
298 data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
299 data.issuer_lid = cpu_to_be32(lid);
300 data.ntc_144.lid = data.issuer_lid;
301 data.ntc_144.change_flags =
302 cpu_to_be16(OPA_NOTICE_TRAP_NODE_DESC_CHG);
77241056
MM
303
304 send_trap(ibp, &data, sizeof(data));
305}
306
307static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
308 u8 *data, struct ib_device *ibdev,
309 u8 port, u32 *resp_len)
310{
311 struct opa_node_description *nd;
312
313 if (am) {
314 smp->status |= IB_SMP_INVALID_FIELD;
315 return reply((struct ib_mad_hdr *)smp);
316 }
317
318 nd = (struct opa_node_description *)data;
319
320 memcpy(nd->data, ibdev->node_desc, sizeof(nd->data));
321
322 if (resp_len)
323 *resp_len += sizeof(*nd);
324
325 return reply((struct ib_mad_hdr *)smp);
326}
327
328static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
329 struct ib_device *ibdev, u8 port,
330 u32 *resp_len)
331{
332 struct opa_node_info *ni;
333 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
334 unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
335
336 ni = (struct opa_node_info *)data;
337
338 /* GUID 0 is illegal */
339 if (am || pidx >= dd->num_pports || dd->pport[pidx].guid == 0) {
340 smp->status |= IB_SMP_INVALID_FIELD;
341 return reply((struct ib_mad_hdr *)smp);
342 }
343
344 ni->port_guid = cpu_to_be64(dd->pport[pidx].guid);
345 ni->base_version = OPA_MGMT_BASE_VERSION;
346 ni->class_version = OPA_SMI_CLASS_VERSION;
347 ni->node_type = 1; /* channel adapter */
348 ni->num_ports = ibdev->phys_port_cnt;
349 /* This is already in network order */
350 ni->system_image_guid = ib_hfi1_sys_image_guid;
351 /* Use first-port GUID as node */
352 ni->node_guid = cpu_to_be64(dd->pport->guid);
353 ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
354 ni->device_id = cpu_to_be16(dd->pcidev->device);
355 ni->revision = cpu_to_be32(dd->minrev);
356 ni->local_port_num = port;
357 ni->vendor_id[0] = dd->oui1;
358 ni->vendor_id[1] = dd->oui2;
359 ni->vendor_id[2] = dd->oui3;
360
361 if (resp_len)
362 *resp_len += sizeof(*ni);
363
364 return reply((struct ib_mad_hdr *)smp);
365}
366
367static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
368 u8 port)
369{
370 struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
371 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
372 unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
373
374 /* GUID 0 is illegal */
375 if (smp->attr_mod || pidx >= dd->num_pports ||
376 dd->pport[pidx].guid == 0)
377 smp->status |= IB_SMP_INVALID_FIELD;
378 else
379 nip->port_guid = cpu_to_be64(dd->pport[pidx].guid);
380
381 nip->base_version = OPA_MGMT_BASE_VERSION;
382 nip->class_version = OPA_SMI_CLASS_VERSION;
383 nip->node_type = 1; /* channel adapter */
384 nip->num_ports = ibdev->phys_port_cnt;
385 /* This is already in network order */
386 nip->sys_guid = ib_hfi1_sys_image_guid;
387 /* Use first-port GUID as node */
388 nip->node_guid = cpu_to_be64(dd->pport->guid);
389 nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
390 nip->device_id = cpu_to_be16(dd->pcidev->device);
391 nip->revision = cpu_to_be32(dd->minrev);
392 nip->local_port_num = port;
393 nip->vendor_id[0] = dd->oui1;
394 nip->vendor_id[1] = dd->oui2;
395 nip->vendor_id[2] = dd->oui3;
396
397 return reply((struct ib_mad_hdr *)smp);
398}
399
400static void set_link_width_enabled(struct hfi1_pportdata *ppd, u32 w)
401{
402 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_ENB, w);
403}
404
405static void set_link_width_downgrade_enabled(struct hfi1_pportdata *ppd, u32 w)
406{
407 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_DG_ENB, w);
408}
409
410static void set_link_speed_enabled(struct hfi1_pportdata *ppd, u32 s)
411{
412 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_SPD_ENB, s);
413}
414
415static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
416 int mad_flags, __be64 mkey, __be32 dr_slid,
417 u8 return_path[], u8 hop_cnt)
418{
419 int valid_mkey = 0;
420 int ret = 0;
421
422 /* Is the mkey in the process of expiring? */
4eb06882
DD
423 if (ibp->rvp.mkey_lease_timeout &&
424 time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
77241056 425 /* Clear timeout and mkey protection field. */
4eb06882
DD
426 ibp->rvp.mkey_lease_timeout = 0;
427 ibp->rvp.mkeyprot = 0;
77241056
MM
428 }
429
4eb06882
DD
430 if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
431 ibp->rvp.mkey == mkey)
77241056
MM
432 valid_mkey = 1;
433
434 /* Unset lease timeout on any valid Get/Set/TrapRepress */
4eb06882 435 if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
77241056
MM
436 (mad->method == IB_MGMT_METHOD_GET ||
437 mad->method == IB_MGMT_METHOD_SET ||
438 mad->method == IB_MGMT_METHOD_TRAP_REPRESS))
4eb06882 439 ibp->rvp.mkey_lease_timeout = 0;
77241056
MM
440
441 if (!valid_mkey) {
442 switch (mad->method) {
443 case IB_MGMT_METHOD_GET:
444 /* Bad mkey not a violation below level 2 */
4eb06882 445 if (ibp->rvp.mkeyprot < 2)
77241056
MM
446 break;
447 case IB_MGMT_METHOD_SET:
448 case IB_MGMT_METHOD_TRAP_REPRESS:
4eb06882
DD
449 if (ibp->rvp.mkey_violations != 0xFFFF)
450 ++ibp->rvp.mkey_violations;
451 if (!ibp->rvp.mkey_lease_timeout &&
452 ibp->rvp.mkey_lease_period)
453 ibp->rvp.mkey_lease_timeout = jiffies +
454 ibp->rvp.mkey_lease_period * HZ;
77241056
MM
455 /* Generate a trap notice. */
456 bad_mkey(ibp, mad, mkey, dr_slid, return_path,
457 hop_cnt);
458 ret = 1;
459 }
460 }
461
462 return ret;
463}
464
465/*
466 * The SMA caches reads from LCB registers in case the LCB is unavailable.
467 * (The LCB is unavailable in certain link states, for example.)
468 */
469struct lcb_datum {
470 u32 off;
471 u64 val;
472};
473
474static struct lcb_datum lcb_cache[] = {
475 { DC_LCB_STS_ROUND_TRIP_LTP_CNT, 0 },
476};
477
478static int write_lcb_cache(u32 off, u64 val)
479{
480 int i;
481
482 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
483 if (lcb_cache[i].off == off) {
484 lcb_cache[i].val = val;
485 return 0;
486 }
487 }
488
489 pr_warn("%s bad offset 0x%x\n", __func__, off);
490 return -1;
491}
492
493static int read_lcb_cache(u32 off, u64 *val)
494{
495 int i;
496
497 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
498 if (lcb_cache[i].off == off) {
499 *val = lcb_cache[i].val;
500 return 0;
501 }
502 }
503
504 pr_warn("%s bad offset 0x%x\n", __func__, off);
505 return -1;
506}
507
508void read_ltp_rtt(struct hfi1_devdata *dd)
509{
510 u64 reg;
511
512 if (read_lcb_csr(dd, DC_LCB_STS_ROUND_TRIP_LTP_CNT, &reg))
513 dd_dev_err(dd, "%s: unable to read LTP RTT\n", __func__);
514 else
515 write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg);
516}
517
77241056
MM
518static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
519 struct ib_device *ibdev, u8 port,
520 u32 *resp_len)
521{
522 int i;
523 struct hfi1_devdata *dd;
524 struct hfi1_pportdata *ppd;
525 struct hfi1_ibport *ibp;
526 struct opa_port_info *pi = (struct opa_port_info *)data;
527 u8 mtu;
528 u8 credit_rate;
409b1462 529 u8 is_beaconing_active;
77241056
MM
530 u32 state;
531 u32 num_ports = OPA_AM_NPORT(am);
532 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
533 u32 buffer_units;
534 u64 tmp = 0;
535
536 if (num_ports != 1) {
537 smp->status |= IB_SMP_INVALID_FIELD;
538 return reply((struct ib_mad_hdr *)smp);
539 }
540
541 dd = dd_from_ibdev(ibdev);
542 /* IB numbers ports from 1, hw from 0 */
543 ppd = dd->pport + (port - 1);
544 ibp = &ppd->ibport_data;
545
8638b77f 546 if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
17fb4f29 547 ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
77241056
MM
548 smp->status |= IB_SMP_INVALID_FIELD;
549 return reply((struct ib_mad_hdr *)smp);
550 }
551
552 pi->lid = cpu_to_be32(ppd->lid);
553
554 /* Only return the mkey if the protection field allows it. */
555 if (!(smp->method == IB_MGMT_METHOD_GET &&
4eb06882
DD
556 ibp->rvp.mkey != smp->mkey &&
557 ibp->rvp.mkeyprot == 1))
558 pi->mkey = ibp->rvp.mkey;
559
560 pi->subnet_prefix = ibp->rvp.gid_prefix;
561 pi->sm_lid = cpu_to_be32(ibp->rvp.sm_lid);
562 pi->ib_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
563 pi->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
77241056
MM
564 pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp);
565 pi->sa_qp = cpu_to_be32(ppd->sa_qp);
566
567 pi->link_width.enabled = cpu_to_be16(ppd->link_width_enabled);
568 pi->link_width.supported = cpu_to_be16(ppd->link_width_supported);
569 pi->link_width.active = cpu_to_be16(ppd->link_width_active);
570
571 pi->link_width_downgrade.supported =
572 cpu_to_be16(ppd->link_width_downgrade_supported);
573 pi->link_width_downgrade.enabled =
574 cpu_to_be16(ppd->link_width_downgrade_enabled);
575 pi->link_width_downgrade.tx_active =
576 cpu_to_be16(ppd->link_width_downgrade_tx_active);
577 pi->link_width_downgrade.rx_active =
578 cpu_to_be16(ppd->link_width_downgrade_rx_active);
579
580 pi->link_speed.supported = cpu_to_be16(ppd->link_speed_supported);
581 pi->link_speed.active = cpu_to_be16(ppd->link_speed_active);
582 pi->link_speed.enabled = cpu_to_be16(ppd->link_speed_enabled);
583
584 state = driver_lstate(ppd);
585
586 if (start_of_sm_config && (state == IB_PORT_INIT))
587 ppd->is_sm_config_started = 1;
588
1d01cf33 589 pi->port_phys_conf = (ppd->port_type & 0xf);
77241056 590
77241056
MM
591 pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
592 pi->port_states.ledenable_offlinereason |=
593 ppd->is_sm_config_started << 5;
409b1462 594 /*
2243472e
EH
595 * This pairs with the memory barrier in hfi1_start_led_override to
596 * ensure that we read the correct state of LED beaconing represented
597 * by led_override_timer_active
409b1462 598 */
2243472e 599 smp_rmb();
409b1462
EH
600 is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
601 pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6;
77241056 602 pi->port_states.ledenable_offlinereason |=
a9c05e35 603 ppd->offline_disabled_reason;
77241056
MM
604
605 pi->port_states.portphysstate_portstate =
606 (hfi1_ibphys_portstate(ppd) << 4) | state;
607
4eb06882 608 pi->mkeyprotect_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
77241056
MM
609
610 memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu));
611 for (i = 0; i < ppd->vls_supported; i++) {
612 mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU);
613 if ((i % 2) == 0)
8638b77f 614 pi->neigh_mtu.pvlx_to_mtu[i / 2] |= (mtu << 4);
77241056 615 else
8638b77f 616 pi->neigh_mtu.pvlx_to_mtu[i / 2] |= mtu;
77241056
MM
617 }
618 /* don't forget VL 15 */
619 mtu = mtu_to_enum(dd->vld[15].mtu, 2048);
8638b77f 620 pi->neigh_mtu.pvlx_to_mtu[15 / 2] |= mtu;
4eb06882 621 pi->smsl = ibp->rvp.sm_sl & OPA_PI_MASK_SMSL;
77241056
MM
622 pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS);
623 pi->partenforce_filterraw |=
624 (ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON);
625 if (ppd->part_enforce & HFI1_PART_ENFORCE_IN)
626 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN;
627 if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT)
628 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT;
4eb06882 629 pi->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
77241056 630 /* P_KeyViolations are counted by hardware. */
4eb06882
DD
631 pi->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
632 pi->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
77241056
MM
633
634 pi->vl.cap = ppd->vls_supported;
4eb06882 635 pi->vl.high_limit = cpu_to_be16(ibp->rvp.vl_high_limit);
77241056
MM
636 pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP);
637 pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP);
638
4eb06882 639 pi->clientrereg_subnettimeout = ibp->rvp.subnet_timeout;
77241056
MM
640
641 pi->port_link_mode = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 |
642 OPA_PORT_LINK_MODE_OPA << 5 |
643 OPA_PORT_LINK_MODE_OPA);
644
645 pi->port_ltp_crc_mode = cpu_to_be16(ppd->port_ltp_crc_mode);
646
647 pi->port_mode = cpu_to_be16(
648 ppd->is_active_optimize_enabled ?
649 OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE : 0);
650
651 pi->port_packet_format.supported =
652 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B);
653 pi->port_packet_format.enabled =
654 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B);
655
656 /* flit_control.interleave is (OPA V1, version .76):
657 * bits use
658 * ---- ---
659 * 2 res
660 * 2 DistanceSupported
661 * 2 DistanceEnabled
662 * 5 MaxNextLevelTxEnabled
663 * 5 MaxNestLevelRxSupported
664 *
665 * HFI supports only "distance mode 1" (see OPA V1, version .76,
666 * section 9.6.2), so set DistanceSupported, DistanceEnabled
667 * to 0x1.
668 */
669 pi->flit_control.interleave = cpu_to_be16(0x1400);
670
671 pi->link_down_reason = ppd->local_link_down_reason.sma;
672 pi->neigh_link_down_reason = ppd->neigh_link_down_reason.sma;
673 pi->port_error_action = cpu_to_be32(ppd->port_error_action);
674 pi->mtucap = mtu_to_enum(hfi1_max_mtu, IB_MTU_4096);
675
676 /* 32.768 usec. response time (guessing) */
677 pi->resptimevalue = 3;
678
679 pi->local_port_num = port;
680
681 /* buffer info for FM */
682 pi->overall_buffer_space = cpu_to_be16(dd->link_credits);
683
684 pi->neigh_node_guid = cpu_to_be64(ppd->neighbor_guid);
685 pi->neigh_port_num = ppd->neighbor_port_number;
686 pi->port_neigh_mode =
687 (ppd->neighbor_type & OPA_PI_MASK_NEIGH_NODE_TYPE) |
688 (ppd->mgmt_allowed ? OPA_PI_MASK_NEIGH_MGMT_ALLOWED : 0) |
689 (ppd->neighbor_fm_security ?
690 OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS : 0);
691
692 /* HFIs shall always return VL15 credits to their
693 * neighbor in a timely manner, without any credit return pacing.
694 */
695 credit_rate = 0;
696 buffer_units = (dd->vau) & OPA_PI_MASK_BUF_UNIT_BUF_ALLOC;
697 buffer_units |= (dd->vcu << 3) & OPA_PI_MASK_BUF_UNIT_CREDIT_ACK;
698 buffer_units |= (credit_rate << 6) &
699 OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE;
700 buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT;
701 pi->buffer_units = cpu_to_be32(buffer_units);
702
703 pi->opa_cap_mask = cpu_to_be16(OPA_CAP_MASK3_IsSharedSpaceSupported);
704
705 /* HFI supports a replay buffer 128 LTPs in size */
706 pi->replay_depth.buffer = 0x80;
707 /* read the cached value of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
708 read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp);
709
4d114fdd
JJ
710 /*
711 * this counter is 16 bits wide, but the replay_depth.wire
712 * variable is only 8 bits
713 */
77241056
MM
714 if (tmp > 0xff)
715 tmp = 0xff;
716 pi->replay_depth.wire = tmp;
717
718 if (resp_len)
719 *resp_len += sizeof(struct opa_port_info);
720
721 return reply((struct ib_mad_hdr *)smp);
722}
723
724/**
725 * get_pkeys - return the PKEY table
726 * @dd: the hfi1_ib device
727 * @port: the IB port number
728 * @pkeys: the pkey table is placed here
729 */
730static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
731{
732 struct hfi1_pportdata *ppd = dd->pport + port - 1;
733
734 memcpy(pkeys, ppd->pkeys, sizeof(ppd->pkeys));
735
736 return 0;
737}
738
739static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
740 struct ib_device *ibdev, u8 port,
741 u32 *resp_len)
742{
743 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
744 u32 n_blocks_req = OPA_AM_NBLK(am);
745 u32 start_block = am & 0x7ff;
746 __be16 *p;
747 u16 *q;
748 int i;
749 u16 n_blocks_avail;
750 unsigned npkeys = hfi1_get_npkeys(dd);
751 size_t size;
752
753 if (n_blocks_req == 0) {
754 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
755 port, start_block, n_blocks_req);
756 smp->status |= IB_SMP_INVALID_FIELD;
757 return reply((struct ib_mad_hdr *)smp);
758 }
759
50e5dcbe 760 n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
77241056
MM
761
762 size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16);
763
764 if (start_block + n_blocks_req > n_blocks_avail ||
765 n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
766 pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; "
767 "avail 0x%x; blk/smp 0x%lx\n",
768 start_block, n_blocks_req, n_blocks_avail,
769 OPA_NUM_PKEY_BLOCKS_PER_SMP);
770 smp->status |= IB_SMP_INVALID_FIELD;
771 return reply((struct ib_mad_hdr *)smp);
772 }
773
50e5dcbe 774 p = (__be16 *)data;
77241056
MM
775 q = (u16 *)data;
776 /* get the real pkeys if we are requesting the first block */
777 if (start_block == 0) {
778 get_pkeys(dd, port, q);
779 for (i = 0; i < npkeys; i++)
780 p[i] = cpu_to_be16(q[i]);
781 if (resp_len)
782 *resp_len += size;
e490974e 783 } else {
77241056 784 smp->status |= IB_SMP_INVALID_FIELD;
e490974e 785 }
77241056
MM
786 return reply((struct ib_mad_hdr *)smp);
787}
788
789enum {
790 HFI_TRANSITION_DISALLOWED,
791 HFI_TRANSITION_IGNORED,
792 HFI_TRANSITION_ALLOWED,
793 HFI_TRANSITION_UNDEFINED,
794};
795
796/*
797 * Use shortened names to improve readability of
798 * {logical,physical}_state_transitions
799 */
800enum {
801 __D = HFI_TRANSITION_DISALLOWED,
802 __I = HFI_TRANSITION_IGNORED,
803 __A = HFI_TRANSITION_ALLOWED,
804 __U = HFI_TRANSITION_UNDEFINED,
805};
806
807/*
808 * IB_PORTPHYSSTATE_POLLING (2) through OPA_PORTPHYSSTATE_MAX (11) are
809 * represented in physical_state_transitions.
810 */
811#define __N_PHYSTATES (OPA_PORTPHYSSTATE_MAX - IB_PORTPHYSSTATE_POLLING + 1)
812
813/*
814 * Within physical_state_transitions, rows represent "old" states,
815 * columns "new" states, and physical_state_transitions.allowed[old][new]
816 * indicates if the transition from old state to new state is legal (see
817 * OPAg1v1, Table 6-4).
818 */
819static const struct {
820 u8 allowed[__N_PHYSTATES][__N_PHYSTATES];
821} physical_state_transitions = {
822 {
823 /* 2 3 4 5 6 7 8 9 10 11 */
824 /* 2 */ { __A, __A, __D, __D, __D, __D, __D, __D, __D, __D },
825 /* 3 */ { __A, __I, __D, __D, __D, __D, __D, __D, __D, __A },
826 /* 4 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
827 /* 5 */ { __A, __A, __D, __I, __D, __D, __D, __D, __D, __D },
828 /* 6 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
829 /* 7 */ { __D, __A, __D, __D, __D, __I, __D, __D, __D, __D },
830 /* 8 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
831 /* 9 */ { __I, __A, __D, __D, __D, __D, __D, __I, __D, __D },
832 /*10 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
833 /*11 */ { __D, __A, __D, __D, __D, __D, __D, __D, __D, __I },
834 }
835};
836
837/*
838 * IB_PORT_DOWN (1) through IB_PORT_ACTIVE_DEFER (5) are represented
839 * logical_state_transitions
840 */
841
842#define __N_LOGICAL_STATES (IB_PORT_ACTIVE_DEFER - IB_PORT_DOWN + 1)
843
844/*
845 * Within logical_state_transitions rows represent "old" states,
846 * columns "new" states, and logical_state_transitions.allowed[old][new]
847 * indicates if the transition from old state to new state is legal (see
848 * OPAg1v1, Table 9-12).
849 */
850static const struct {
851 u8 allowed[__N_LOGICAL_STATES][__N_LOGICAL_STATES];
852} logical_state_transitions = {
853 {
854 /* 1 2 3 4 5 */
855 /* 1 */ { __I, __D, __D, __D, __U},
856 /* 2 */ { __D, __I, __A, __D, __U},
857 /* 3 */ { __D, __D, __I, __A, __U},
858 /* 4 */ { __D, __D, __I, __I, __U},
859 /* 5 */ { __U, __U, __U, __U, __U},
860 }
861};
862
863static int logical_transition_allowed(int old, int new)
864{
865 if (old < IB_PORT_NOP || old > IB_PORT_ACTIVE_DEFER ||
866 new < IB_PORT_NOP || new > IB_PORT_ACTIVE_DEFER) {
867 pr_warn("invalid logical state(s) (old %d new %d)\n",
868 old, new);
869 return HFI_TRANSITION_UNDEFINED;
870 }
871
872 if (new == IB_PORT_NOP)
873 return HFI_TRANSITION_ALLOWED; /* always allowed */
874
875 /* adjust states for indexing into logical_state_transitions */
876 old -= IB_PORT_DOWN;
877 new -= IB_PORT_DOWN;
878
879 if (old < 0 || new < 0)
880 return HFI_TRANSITION_UNDEFINED;
881 return logical_state_transitions.allowed[old][new];
882}
883
884static int physical_transition_allowed(int old, int new)
885{
886 if (old < IB_PORTPHYSSTATE_NOP || old > OPA_PORTPHYSSTATE_MAX ||
887 new < IB_PORTPHYSSTATE_NOP || new > OPA_PORTPHYSSTATE_MAX) {
888 pr_warn("invalid physical state(s) (old %d new %d)\n",
889 old, new);
890 return HFI_TRANSITION_UNDEFINED;
891 }
892
893 if (new == IB_PORTPHYSSTATE_NOP)
894 return HFI_TRANSITION_ALLOWED; /* always allowed */
895
896 /* adjust states for indexing into physical_state_transitions */
897 old -= IB_PORTPHYSSTATE_POLLING;
898 new -= IB_PORTPHYSSTATE_POLLING;
899
900 if (old < 0 || new < 0)
901 return HFI_TRANSITION_UNDEFINED;
902 return physical_state_transitions.allowed[old][new];
903}
904
905static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
906 u32 logical_new, u32 physical_new)
907{
908 u32 physical_old = driver_physical_state(ppd);
909 u32 logical_old = driver_logical_state(ppd);
910 int ret, logical_allowed, physical_allowed;
911
f3ff8189
JJ
912 ret = logical_transition_allowed(logical_old, logical_new);
913 logical_allowed = ret;
77241056
MM
914
915 if (ret == HFI_TRANSITION_DISALLOWED ||
916 ret == HFI_TRANSITION_UNDEFINED) {
917 pr_warn("invalid logical state transition %s -> %s\n",
918 opa_lstate_name(logical_old),
919 opa_lstate_name(logical_new));
920 return ret;
921 }
922
f3ff8189
JJ
923 ret = physical_transition_allowed(physical_old, physical_new);
924 physical_allowed = ret;
77241056
MM
925
926 if (ret == HFI_TRANSITION_DISALLOWED ||
927 ret == HFI_TRANSITION_UNDEFINED) {
928 pr_warn("invalid physical state transition %s -> %s\n",
929 opa_pstate_name(physical_old),
930 opa_pstate_name(physical_new));
931 return ret;
932 }
933
934 if (logical_allowed == HFI_TRANSITION_IGNORED &&
935 physical_allowed == HFI_TRANSITION_IGNORED)
936 return HFI_TRANSITION_IGNORED;
937
a9c05e35
BM
938 /*
939 * A change request of Physical Port State from
940 * 'Offline' to 'Polling' should be ignored.
941 */
942 if ((physical_old == OPA_PORTPHYSSTATE_OFFLINE) &&
943 (physical_new == IB_PORTPHYSSTATE_POLLING))
944 return HFI_TRANSITION_IGNORED;
945
77241056
MM
946 /*
947 * Either physical_allowed or logical_allowed is
948 * HFI_TRANSITION_ALLOWED.
949 */
950 return HFI_TRANSITION_ALLOWED;
951}
952
953static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
954 u32 logical_state, u32 phys_state,
955 int suppress_idle_sma)
956{
957 struct hfi1_devdata *dd = ppd->dd;
958 u32 link_state;
959 int ret;
960
961 ret = port_states_transition_allowed(ppd, logical_state, phys_state);
962 if (ret == HFI_TRANSITION_DISALLOWED ||
963 ret == HFI_TRANSITION_UNDEFINED) {
964 /* error message emitted above */
965 smp->status |= IB_SMP_INVALID_FIELD;
966 return 0;
967 }
968
969 if (ret == HFI_TRANSITION_IGNORED)
970 return 0;
971
972 if ((phys_state != IB_PORTPHYSSTATE_NOP) &&
973 !(logical_state == IB_PORT_DOWN ||
974 logical_state == IB_PORT_NOP)){
975 pr_warn("SubnSet(OPA_PortInfo) port state invalid: logical_state 0x%x physical_state 0x%x\n",
976 logical_state, phys_state);
977 smp->status |= IB_SMP_INVALID_FIELD;
978 }
979
980 /*
981 * Logical state changes are summarized in OPAv1g1 spec.,
982 * Table 9-12; physical state changes are summarized in
983 * OPAv1g1 spec., Table 6.4.
984 */
985 switch (logical_state) {
986 case IB_PORT_NOP:
987 if (phys_state == IB_PORTPHYSSTATE_NOP)
988 break;
989 /* FALLTHROUGH */
990 case IB_PORT_DOWN:
e490974e 991 if (phys_state == IB_PORTPHYSSTATE_NOP) {
77241056 992 link_state = HLS_DN_DOWNDEF;
e490974e 993 } else if (phys_state == IB_PORTPHYSSTATE_POLLING) {
77241056 994 link_state = HLS_DN_POLL;
17fb4f29
JJ
995 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_FM_BOUNCE,
996 0, OPA_LINKDOWN_REASON_FM_BOUNCE);
e490974e 997 } else if (phys_state == IB_PORTPHYSSTATE_DISABLED) {
77241056 998 link_state = HLS_DN_DISABLE;
e490974e 999 } else {
77241056
MM
1000 pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n",
1001 phys_state);
1002 smp->status |= IB_SMP_INVALID_FIELD;
1003 break;
1004 }
1005
1cbaa670
DL
1006 if ((link_state == HLS_DN_POLL ||
1007 link_state == HLS_DN_DOWNDEF)) {
1008 /*
1009 * Going to poll. No matter what the current state,
1010 * always move offline first, then tune and start the
1011 * link. This correctly handles a FM link bounce and
1012 * a link enable. Going offline is a no-op if already
1013 * offline.
1014 */
1015 set_link_state(ppd, HLS_DN_OFFLINE);
1016 tune_serdes(ppd);
1017 start_link(ppd);
1018 } else {
1019 set_link_state(ppd, link_state);
1020 }
77241056
MM
1021 if (link_state == HLS_DN_DISABLE &&
1022 (ppd->offline_disabled_reason >
a9c05e35 1023 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED) ||
77241056 1024 ppd->offline_disabled_reason ==
a9c05e35 1025 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
77241056 1026 ppd->offline_disabled_reason =
a9c05e35 1027 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
77241056
MM
1028 /*
1029 * Don't send a reply if the response would be sent
1030 * through the disabled port.
1031 */
1032 if (link_state == HLS_DN_DISABLE && smp->hop_cnt)
1033 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1034 break;
1035 case IB_PORT_ARMED:
1036 ret = set_link_state(ppd, HLS_UP_ARMED);
1037 if ((ret == 0) && (suppress_idle_sma == 0))
1038 send_idle_sma(dd, SMA_IDLE_ARM);
1039 break;
1040 case IB_PORT_ACTIVE:
1041 if (ppd->neighbor_normal) {
1042 ret = set_link_state(ppd, HLS_UP_ACTIVE);
1043 if (ret == 0)
1044 send_idle_sma(dd, SMA_IDLE_ACTIVE);
1045 } else {
1046 pr_warn("SubnSet(OPA_PortInfo) Cannot move to Active with NeighborNormal 0\n");
1047 smp->status |= IB_SMP_INVALID_FIELD;
1048 }
1049 break;
1050 default:
1051 pr_warn("SubnSet(OPA_PortInfo) invalid logical state 0x%x\n",
1052 logical_state);
1053 smp->status |= IB_SMP_INVALID_FIELD;
1054 }
1055
1056 return 0;
1057}
1058
1059/**
1060 * subn_set_opa_portinfo - set port information
1061 * @smp: the incoming SM packet
1062 * @ibdev: the infiniband device
1063 * @port: the port on the device
1064 *
1065 */
1066static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
1067 struct ib_device *ibdev, u8 port,
1068 u32 *resp_len)
1069{
1070 struct opa_port_info *pi = (struct opa_port_info *)data;
1071 struct ib_event event;
1072 struct hfi1_devdata *dd;
1073 struct hfi1_pportdata *ppd;
1074 struct hfi1_ibport *ibp;
1075 u8 clientrereg;
1076 unsigned long flags;
1077 u32 smlid, opa_lid; /* tmp vars to hold LID values */
1078 u16 lid;
1079 u8 ls_old, ls_new, ps_new;
1080 u8 vls;
1081 u8 msl;
1082 u8 crc_enabled;
1083 u16 lse, lwe, mtu;
1084 u32 num_ports = OPA_AM_NPORT(am);
1085 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1086 int ret, i, invalid = 0, call_set_mtu = 0;
1087 int call_link_downgrade_policy = 0;
1088
1089 if (num_ports != 1) {
1090 smp->status |= IB_SMP_INVALID_FIELD;
1091 return reply((struct ib_mad_hdr *)smp);
1092 }
1093
1094 opa_lid = be32_to_cpu(pi->lid);
1095 if (opa_lid & 0xFFFF0000) {
1096 pr_warn("OPA_PortInfo lid out of range: %X\n", opa_lid);
1097 smp->status |= IB_SMP_INVALID_FIELD;
1098 goto get_only;
1099 }
1100
1101 lid = (u16)(opa_lid & 0x0000FFFF);
1102
1103 smlid = be32_to_cpu(pi->sm_lid);
1104 if (smlid & 0xFFFF0000) {
1105 pr_warn("OPA_PortInfo SM lid out of range: %X\n", smlid);
1106 smp->status |= IB_SMP_INVALID_FIELD;
1107 goto get_only;
1108 }
1109 smlid &= 0x0000FFFF;
1110
1111 clientrereg = (pi->clientrereg_subnettimeout &
1112 OPA_PI_MASK_CLIENT_REREGISTER);
1113
1114 dd = dd_from_ibdev(ibdev);
1115 /* IB numbers ports from 1, hw from 0 */
1116 ppd = dd->pport + (port - 1);
1117 ibp = &ppd->ibport_data;
1118 event.device = ibdev;
1119 event.element.port_num = port;
1120
1121 ls_old = driver_lstate(ppd);
1122
4eb06882
DD
1123 ibp->rvp.mkey = pi->mkey;
1124 ibp->rvp.gid_prefix = pi->subnet_prefix;
1125 ibp->rvp.mkey_lease_period = be16_to_cpu(pi->mkey_lease_period);
77241056
MM
1126
1127 /* Must be a valid unicast LID address. */
1128 if ((lid == 0 && ls_old > IB_PORT_INIT) ||
17fb4f29 1129 lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
77241056
MM
1130 smp->status |= IB_SMP_INVALID_FIELD;
1131 pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n",
1132 lid);
1133 } else if (ppd->lid != lid ||
1134 ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) {
1135 if (ppd->lid != lid)
1136 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LID_CHANGE_BIT);
1137 if (ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC))
1138 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LMC_CHANGE_BIT);
1139 hfi1_set_lid(ppd, lid, pi->mkeyprotect_lmc & OPA_PI_MASK_LMC);
1140 event.event = IB_EVENT_LID_CHANGE;
1141 ib_dispatch_event(&event);
1142 }
1143
1144 msl = pi->smsl & OPA_PI_MASK_SMSL;
1145 if (pi->partenforce_filterraw & OPA_PI_MASK_LINKINIT_REASON)
1146 ppd->linkinit_reason =
1147 (pi->partenforce_filterraw &
1148 OPA_PI_MASK_LINKINIT_REASON);
1149 /* enable/disable SW pkey checking as per FM control */
1150 if (pi->partenforce_filterraw & OPA_PI_MASK_PARTITION_ENFORCE_IN)
1151 ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
1152 else
1153 ppd->part_enforce &= ~HFI1_PART_ENFORCE_IN;
1154
1155 if (pi->partenforce_filterraw & OPA_PI_MASK_PARTITION_ENFORCE_OUT)
1156 ppd->part_enforce |= HFI1_PART_ENFORCE_OUT;
1157 else
1158 ppd->part_enforce &= ~HFI1_PART_ENFORCE_OUT;
1159
1160 /* Must be a valid unicast LID address. */
1161 if ((smlid == 0 && ls_old > IB_PORT_INIT) ||
17fb4f29 1162 smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
77241056
MM
1163 smp->status |= IB_SMP_INVALID_FIELD;
1164 pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid);
4eb06882 1165 } else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
77241056 1166 pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid);
4eb06882 1167 spin_lock_irqsave(&ibp->rvp.lock, flags);
9c4a311e 1168 if (ibp->rvp.sm_ah) {
4eb06882 1169 if (smlid != ibp->rvp.sm_lid)
9c4a311e 1170 ibp->rvp.sm_ah->attr.dlid = smlid;
4eb06882 1171 if (msl != ibp->rvp.sm_sl)
9c4a311e 1172 ibp->rvp.sm_ah->attr.sl = msl;
77241056 1173 }
4eb06882
DD
1174 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1175 if (smlid != ibp->rvp.sm_lid)
1176 ibp->rvp.sm_lid = smlid;
1177 if (msl != ibp->rvp.sm_sl)
1178 ibp->rvp.sm_sl = msl;
77241056
MM
1179 event.event = IB_EVENT_SM_CHANGE;
1180 ib_dispatch_event(&event);
1181 }
1182
1183 if (pi->link_down_reason == 0) {
1184 ppd->local_link_down_reason.sma = 0;
1185 ppd->local_link_down_reason.latest = 0;
1186 }
1187
1188 if (pi->neigh_link_down_reason == 0) {
1189 ppd->neigh_link_down_reason.sma = 0;
1190 ppd->neigh_link_down_reason.latest = 0;
1191 }
1192
1193 ppd->sm_trap_qp = be32_to_cpu(pi->sm_trap_qp);
1194 ppd->sa_qp = be32_to_cpu(pi->sa_qp);
1195
1196 ppd->port_error_action = be32_to_cpu(pi->port_error_action);
1197 lwe = be16_to_cpu(pi->link_width.enabled);
1198 if (lwe) {
d0d236ea
JJ
1199 if (lwe == OPA_LINK_WIDTH_RESET ||
1200 lwe == OPA_LINK_WIDTH_RESET_OLD)
77241056
MM
1201 set_link_width_enabled(ppd, ppd->link_width_supported);
1202 else if ((lwe & ~ppd->link_width_supported) == 0)
1203 set_link_width_enabled(ppd, lwe);
1204 else
1205 smp->status |= IB_SMP_INVALID_FIELD;
1206 }
1207 lwe = be16_to_cpu(pi->link_width_downgrade.enabled);
1208 /* LWD.E is always applied - 0 means "disabled" */
d0d236ea
JJ
1209 if (lwe == OPA_LINK_WIDTH_RESET ||
1210 lwe == OPA_LINK_WIDTH_RESET_OLD) {
77241056 1211 set_link_width_downgrade_enabled(ppd,
17fb4f29
JJ
1212 ppd->
1213 link_width_downgrade_supported
1214 );
77241056
MM
1215 } else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) {
1216 /* only set and apply if something changed */
1217 if (lwe != ppd->link_width_downgrade_enabled) {
1218 set_link_width_downgrade_enabled(ppd, lwe);
1219 call_link_downgrade_policy = 1;
1220 }
e490974e 1221 } else {
77241056 1222 smp->status |= IB_SMP_INVALID_FIELD;
e490974e 1223 }
77241056
MM
1224 lse = be16_to_cpu(pi->link_speed.enabled);
1225 if (lse) {
1226 if (lse & be16_to_cpu(pi->link_speed.supported))
1227 set_link_speed_enabled(ppd, lse);
1228 else
1229 smp->status |= IB_SMP_INVALID_FIELD;
1230 }
1231
4eb06882
DD
1232 ibp->rvp.mkeyprot =
1233 (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6;
1234 ibp->rvp.vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF;
77241056 1235 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT,
4eb06882 1236 ibp->rvp.vl_high_limit);
77241056 1237
8638b77f 1238 if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
17fb4f29 1239 ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
77241056
MM
1240 smp->status |= IB_SMP_INVALID_FIELD;
1241 return reply((struct ib_mad_hdr *)smp);
1242 }
1243 for (i = 0; i < ppd->vls_supported; i++) {
1244 if ((i % 2) == 0)
17fb4f29
JJ
1245 mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >>
1246 4) & 0xF);
77241056 1247 else
17fb4f29
JJ
1248 mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] &
1249 0xF);
77241056
MM
1250 if (mtu == 0xffff) {
1251 pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n",
1252 mtu,
1253 (pi->neigh_mtu.pvlx_to_mtu[0] >> 4) & 0xF);
1254 smp->status |= IB_SMP_INVALID_FIELD;
1255 mtu = hfi1_max_mtu; /* use a valid MTU */
1256 }
1257 if (dd->vld[i].mtu != mtu) {
1258 dd_dev_info(dd,
17fb4f29
JJ
1259 "MTU change on vl %d from %d to %d\n",
1260 i, dd->vld[i].mtu, mtu);
77241056
MM
1261 dd->vld[i].mtu = mtu;
1262 call_set_mtu++;
1263 }
1264 }
1265 /* As per OPAV1 spec: VL15 must support and be configured
1266 * for operation with a 2048 or larger MTU.
1267 */
8638b77f 1268 mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15 / 2] & 0xF);
77241056
MM
1269 if (mtu < 2048 || mtu == 0xffff)
1270 mtu = 2048;
1271 if (dd->vld[15].mtu != mtu) {
1272 dd_dev_info(dd,
17fb4f29
JJ
1273 "MTU change on vl 15 from %d to %d\n",
1274 dd->vld[15].mtu, mtu);
77241056
MM
1275 dd->vld[15].mtu = mtu;
1276 call_set_mtu++;
1277 }
1278 if (call_set_mtu)
1279 set_mtu(ppd);
1280
1281 /* Set operational VLs */
1282 vls = pi->operational_vls & OPA_PI_MASK_OPERATIONAL_VL;
1283 if (vls) {
1284 if (vls > ppd->vls_supported) {
1285 pr_warn("SubnSet(OPA_PortInfo) VL's supported invalid %d\n",
1286 pi->operational_vls);
1287 smp->status |= IB_SMP_INVALID_FIELD;
1288 } else {
1289 if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS,
17fb4f29 1290 vls) == -EINVAL)
77241056
MM
1291 smp->status |= IB_SMP_INVALID_FIELD;
1292 }
1293 }
1294
1295 if (pi->mkey_violations == 0)
4eb06882 1296 ibp->rvp.mkey_violations = 0;
77241056
MM
1297
1298 if (pi->pkey_violations == 0)
4eb06882 1299 ibp->rvp.pkey_violations = 0;
77241056
MM
1300
1301 if (pi->qkey_violations == 0)
4eb06882 1302 ibp->rvp.qkey_violations = 0;
77241056 1303
4eb06882 1304 ibp->rvp.subnet_timeout =
77241056
MM
1305 pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT;
1306
1307 crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode);
1308 crc_enabled >>= 4;
1309 crc_enabled &= 0xf;
1310
1311 if (crc_enabled != 0)
1312 ppd->port_crc_mode_enabled = port_ltp_to_cap(crc_enabled);
1313
1314 ppd->is_active_optimize_enabled =
1315 !!(be16_to_cpu(pi->port_mode)
1316 & OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE);
1317
1318 ls_new = pi->port_states.portphysstate_portstate &
1319 OPA_PI_MASK_PORT_STATE;
1320 ps_new = (pi->port_states.portphysstate_portstate &
1321 OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4;
1322
1323 if (ls_old == IB_PORT_INIT) {
1324 if (start_of_sm_config) {
1325 if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
1326 ppd->is_sm_config_started = 1;
1327 } else if (ls_new == IB_PORT_ARMED) {
1328 if (ppd->is_sm_config_started == 0)
1329 invalid = 1;
1330 }
1331 }
1332
1333 /* Handle CLIENT_REREGISTER event b/c SM asked us for it */
1334 if (clientrereg) {
1335 event.event = IB_EVENT_CLIENT_REREGISTER;
1336 ib_dispatch_event(&event);
1337 }
1338
1339 /*
1340 * Do the port state change now that the other link parameters
1341 * have been set.
1342 * Changing the port physical state only makes sense if the link
1343 * is down or is being set to down.
1344 */
1345
1346 ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
1347 if (ret)
1348 return ret;
1349
1350 ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
1351
1352 /* restore re-reg bit per o14-12.2.1 */
1353 pi->clientrereg_subnettimeout |= clientrereg;
1354
1355 /*
1356 * Apply the new link downgrade policy. This may result in a link
1357 * bounce. Do this after everything else so things are settled.
1358 * Possible problem: if setting the port state above fails, then
1359 * the policy change is not applied.
1360 */
1361 if (call_link_downgrade_policy)
1362 apply_link_downgrade_policy(ppd, 0);
1363
1364 return ret;
1365
1366get_only:
1367 return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
1368}
1369
1370/**
1371 * set_pkeys - set the PKEY table for ctxt 0
1372 * @dd: the hfi1_ib device
1373 * @port: the IB port number
1374 * @pkeys: the PKEY table
1375 */
1376static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
1377{
1378 struct hfi1_pportdata *ppd;
1379 int i;
1380 int changed = 0;
1381 int update_includes_mgmt_partition = 0;
1382
1383 /*
1384 * IB port one/two always maps to context zero/one,
1385 * always a kernel context, no locking needed
1386 * If we get here with ppd setup, no need to check
1387 * that rcd is valid.
1388 */
1389 ppd = dd->pport + (port - 1);
1390 /*
1391 * If the update does not include the management pkey, don't do it.
1392 */
1393 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1394 if (pkeys[i] == LIM_MGMT_P_KEY) {
1395 update_includes_mgmt_partition = 1;
1396 break;
1397 }
1398 }
1399
1400 if (!update_includes_mgmt_partition)
1401 return 1;
1402
1403 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1404 u16 key = pkeys[i];
1405 u16 okey = ppd->pkeys[i];
1406
1407 if (key == okey)
1408 continue;
ce8b2fd0
SS
1409 /*
1410 * Don't update pkeys[2], if an HFI port without MgmtAllowed
1411 * by neighbor is a switch.
1412 */
1413 if (i == 2 && !ppd->mgmt_allowed && ppd->neighbor_type == 1)
1414 continue;
77241056
MM
1415 /*
1416 * The SM gives us the complete PKey table. We have
1417 * to ensure that we put the PKeys in the matching
1418 * slots.
1419 */
1420 ppd->pkeys[i] = key;
1421 changed = 1;
1422 }
1423
1424 if (changed) {
77241056 1425 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
34d351f8 1426 hfi1_event_pkey_change(dd, port);
77241056 1427 }
34d351f8 1428
77241056
MM
1429 return 0;
1430}
1431
1432static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
1433 struct ib_device *ibdev, u8 port,
1434 u32 *resp_len)
1435{
1436 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1437 u32 n_blocks_sent = OPA_AM_NBLK(am);
1438 u32 start_block = am & 0x7ff;
50e5dcbe 1439 u16 *p = (u16 *)data;
77241056
MM
1440 __be16 *q = (__be16 *)data;
1441 int i;
1442 u16 n_blocks_avail;
1443 unsigned npkeys = hfi1_get_npkeys(dd);
1444
1445 if (n_blocks_sent == 0) {
1446 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
1447 port, start_block, n_blocks_sent);
1448 smp->status |= IB_SMP_INVALID_FIELD;
1449 return reply((struct ib_mad_hdr *)smp);
1450 }
1451
8638b77f 1452 n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
77241056
MM
1453
1454 if (start_block + n_blocks_sent > n_blocks_avail ||
1455 n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
1456 pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n",
1457 start_block, n_blocks_sent, n_blocks_avail,
1458 OPA_NUM_PKEY_BLOCKS_PER_SMP);
1459 smp->status |= IB_SMP_INVALID_FIELD;
1460 return reply((struct ib_mad_hdr *)smp);
1461 }
1462
1463 for (i = 0; i < n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE; i++)
1464 p[i] = be16_to_cpu(q[i]);
1465
1466 if (start_block == 0 && set_pkeys(dd, port, p) != 0) {
1467 smp->status |= IB_SMP_INVALID_FIELD;
1468 return reply((struct ib_mad_hdr *)smp);
1469 }
1470
1471 return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len);
1472}
1473
1474static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1475{
a787bde8 1476 u64 *val = data;
77241056
MM
1477
1478 *val++ = read_csr(dd, SEND_SC2VLT0);
1479 *val++ = read_csr(dd, SEND_SC2VLT1);
1480 *val++ = read_csr(dd, SEND_SC2VLT2);
1481 *val++ = read_csr(dd, SEND_SC2VLT3);
1482 return 0;
1483}
1484
1485#define ILLEGAL_VL 12
1486/*
1487 * filter_sc2vlt changes mappings to VL15 to ILLEGAL_VL (except
1488 * for SC15, which must map to VL15). If we don't remap things this
1489 * way it is possible for VL15 counters to increment when we try to
1490 * send on a SC which is mapped to an invalid VL.
1491 */
1492static void filter_sc2vlt(void *data)
1493{
1494 int i;
a787bde8 1495 u8 *pd = data;
77241056
MM
1496
1497 for (i = 0; i < OPA_MAX_SCS; i++) {
1498 if (i == 15)
1499 continue;
1500 if ((pd[i] & 0x1f) == 0xf)
1501 pd[i] = ILLEGAL_VL;
1502 }
1503}
1504
1505static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1506{
a787bde8 1507 u64 *val = data;
77241056
MM
1508
1509 filter_sc2vlt(data);
1510
1511 write_csr(dd, SEND_SC2VLT0, *val++);
1512 write_csr(dd, SEND_SC2VLT1, *val++);
1513 write_csr(dd, SEND_SC2VLT2, *val++);
1514 write_csr(dd, SEND_SC2VLT3, *val++);
1515 write_seqlock_irq(&dd->sc2vl_lock);
a787bde8 1516 memcpy(dd->sc2vl, data, sizeof(dd->sc2vl));
77241056
MM
1517 write_sequnlock_irq(&dd->sc2vl_lock);
1518 return 0;
1519}
1520
1521static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1522 struct ib_device *ibdev, u8 port,
1523 u32 *resp_len)
1524{
1525 struct hfi1_ibport *ibp = to_iport(ibdev, port);
6618c051 1526 u8 *p = data;
77241056
MM
1527 size_t size = ARRAY_SIZE(ibp->sl_to_sc); /* == 32 */
1528 unsigned i;
1529
1530 if (am) {
1531 smp->status |= IB_SMP_INVALID_FIELD;
1532 return reply((struct ib_mad_hdr *)smp);
1533 }
1534
1535 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++)
1536 *p++ = ibp->sl_to_sc[i];
1537
1538 if (resp_len)
1539 *resp_len += size;
1540
1541 return reply((struct ib_mad_hdr *)smp);
1542}
1543
1544static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1545 struct ib_device *ibdev, u8 port,
1546 u32 *resp_len)
1547{
1548 struct hfi1_ibport *ibp = to_iport(ibdev, port);
6618c051 1549 u8 *p = data;
77241056 1550 int i;
0ec79e87 1551 u8 sc;
77241056
MM
1552
1553 if (am) {
1554 smp->status |= IB_SMP_INVALID_FIELD;
1555 return reply((struct ib_mad_hdr *)smp);
1556 }
1557
0ec79e87
KW
1558 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++) {
1559 sc = *p++;
1560 if (ibp->sl_to_sc[i] != sc) {
1561 ibp->sl_to_sc[i] = sc;
1562
1563 /* Put all stale qps into error state */
1564 hfi1_error_port_qps(ibp, i);
1565 }
1566 }
77241056
MM
1567
1568 return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len);
1569}
1570
1571static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1572 struct ib_device *ibdev, u8 port,
1573 u32 *resp_len)
1574{
1575 struct hfi1_ibport *ibp = to_iport(ibdev, port);
6618c051 1576 u8 *p = data;
77241056
MM
1577 size_t size = ARRAY_SIZE(ibp->sc_to_sl); /* == 32 */
1578 unsigned i;
1579
1580 if (am) {
1581 smp->status |= IB_SMP_INVALID_FIELD;
1582 return reply((struct ib_mad_hdr *)smp);
1583 }
1584
1585 for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1586 *p++ = ibp->sc_to_sl[i];
1587
1588 if (resp_len)
1589 *resp_len += size;
1590
1591 return reply((struct ib_mad_hdr *)smp);
1592}
1593
1594static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1595 struct ib_device *ibdev, u8 port,
1596 u32 *resp_len)
1597{
1598 struct hfi1_ibport *ibp = to_iport(ibdev, port);
6618c051 1599 u8 *p = data;
77241056
MM
1600 int i;
1601
1602 if (am) {
1603 smp->status |= IB_SMP_INVALID_FIELD;
1604 return reply((struct ib_mad_hdr *)smp);
1605 }
1606
1607 for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1608 ibp->sc_to_sl[i] = *p++;
1609
1610 return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len);
1611}
1612
1613static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1614 struct ib_device *ibdev, u8 port,
1615 u32 *resp_len)
1616{
1617 u32 n_blocks = OPA_AM_NBLK(am);
1618 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
50e5dcbe 1619 void *vp = (void *)data;
77241056
MM
1620 size_t size = 4 * sizeof(u64);
1621
1622 if (n_blocks != 1) {
1623 smp->status |= IB_SMP_INVALID_FIELD;
1624 return reply((struct ib_mad_hdr *)smp);
1625 }
1626
1627 get_sc2vlt_tables(dd, vp);
1628
1629 if (resp_len)
1630 *resp_len += size;
1631
1632 return reply((struct ib_mad_hdr *)smp);
1633}
1634
1635static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1636 struct ib_device *ibdev, u8 port,
1637 u32 *resp_len)
1638{
1639 u32 n_blocks = OPA_AM_NBLK(am);
1640 int async_update = OPA_AM_ASYNC(am);
1641 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
50e5dcbe 1642 void *vp = (void *)data;
77241056
MM
1643 struct hfi1_pportdata *ppd;
1644 int lstate;
1645
1646 if (n_blocks != 1 || async_update) {
1647 smp->status |= IB_SMP_INVALID_FIELD;
1648 return reply((struct ib_mad_hdr *)smp);
1649 }
1650
1651 /* IB numbers ports from 1, hw from 0 */
1652 ppd = dd->pport + (port - 1);
1653 lstate = driver_lstate(ppd);
4d114fdd
JJ
1654 /*
1655 * it's known that async_update is 0 by this point, but include
1656 * the explicit check for clarity
1657 */
77241056
MM
1658 if (!async_update &&
1659 (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) {
1660 smp->status |= IB_SMP_INVALID_FIELD;
1661 return reply((struct ib_mad_hdr *)smp);
1662 }
1663
1664 set_sc2vlt_tables(dd, vp);
1665
1666 return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len);
1667}
1668
1669static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
1670 struct ib_device *ibdev, u8 port,
1671 u32 *resp_len)
1672{
1673 u32 n_blocks = OPA_AM_NPORT(am);
1674 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1675 struct hfi1_pportdata *ppd;
50e5dcbe 1676 void *vp = (void *)data;
77241056
MM
1677 int size;
1678
1679 if (n_blocks != 1) {
1680 smp->status |= IB_SMP_INVALID_FIELD;
1681 return reply((struct ib_mad_hdr *)smp);
1682 }
1683
1684 ppd = dd->pport + (port - 1);
1685
1686 size = fm_get_table(ppd, FM_TBL_SC2VLNT, vp);
1687
1688 if (resp_len)
1689 *resp_len += size;
1690
1691 return reply((struct ib_mad_hdr *)smp);
1692}
1693
1694static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
1695 struct ib_device *ibdev, u8 port,
1696 u32 *resp_len)
1697{
1698 u32 n_blocks = OPA_AM_NPORT(am);
1699 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1700 struct hfi1_pportdata *ppd;
50e5dcbe 1701 void *vp = (void *)data;
77241056
MM
1702 int lstate;
1703
1704 if (n_blocks != 1) {
1705 smp->status |= IB_SMP_INVALID_FIELD;
1706 return reply((struct ib_mad_hdr *)smp);
1707 }
1708
1709 /* IB numbers ports from 1, hw from 0 */
1710 ppd = dd->pport + (port - 1);
1711 lstate = driver_lstate(ppd);
1712 if (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE) {
1713 smp->status |= IB_SMP_INVALID_FIELD;
1714 return reply((struct ib_mad_hdr *)smp);
1715 }
1716
1717 ppd = dd->pport + (port - 1);
1718
1719 fm_set_table(ppd, FM_TBL_SC2VLNT, vp);
1720
1721 return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
1722 resp_len);
1723}
1724
1725static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
1726 struct ib_device *ibdev, u8 port,
1727 u32 *resp_len)
1728{
1729 u32 nports = OPA_AM_NPORT(am);
1730 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1731 u32 lstate;
1732 struct hfi1_ibport *ibp;
1733 struct hfi1_pportdata *ppd;
50e5dcbe 1734 struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
77241056
MM
1735
1736 if (nports != 1) {
1737 smp->status |= IB_SMP_INVALID_FIELD;
1738 return reply((struct ib_mad_hdr *)smp);
1739 }
1740
1741 ibp = to_iport(ibdev, port);
1742 ppd = ppd_from_ibp(ibp);
1743
1744 lstate = driver_lstate(ppd);
1745
1746 if (start_of_sm_config && (lstate == IB_PORT_INIT))
1747 ppd->is_sm_config_started = 1;
1748
77241056
MM
1749 psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
1750 psi->port_states.ledenable_offlinereason |=
1751 ppd->is_sm_config_started << 5;
1752 psi->port_states.ledenable_offlinereason |=
a9c05e35 1753 ppd->offline_disabled_reason;
77241056
MM
1754
1755 psi->port_states.portphysstate_portstate =
1756 (hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf);
1757 psi->link_width_downgrade_tx_active =
aadfc3b2 1758 cpu_to_be16(ppd->link_width_downgrade_tx_active);
77241056 1759 psi->link_width_downgrade_rx_active =
aadfc3b2 1760 cpu_to_be16(ppd->link_width_downgrade_rx_active);
77241056
MM
1761 if (resp_len)
1762 *resp_len += sizeof(struct opa_port_state_info);
1763
1764 return reply((struct ib_mad_hdr *)smp);
1765}
1766
1767static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
1768 struct ib_device *ibdev, u8 port,
1769 u32 *resp_len)
1770{
1771 u32 nports = OPA_AM_NPORT(am);
1772 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1773 u32 ls_old;
1774 u8 ls_new, ps_new;
1775 struct hfi1_ibport *ibp;
1776 struct hfi1_pportdata *ppd;
50e5dcbe 1777 struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
77241056
MM
1778 int ret, invalid = 0;
1779
1780 if (nports != 1) {
1781 smp->status |= IB_SMP_INVALID_FIELD;
1782 return reply((struct ib_mad_hdr *)smp);
1783 }
1784
1785 ibp = to_iport(ibdev, port);
1786 ppd = ppd_from_ibp(ibp);
1787
1788 ls_old = driver_lstate(ppd);
1789
1790 ls_new = port_states_to_logical_state(&psi->port_states);
1791 ps_new = port_states_to_phys_state(&psi->port_states);
1792
1793 if (ls_old == IB_PORT_INIT) {
1794 if (start_of_sm_config) {
1795 if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
1796 ppd->is_sm_config_started = 1;
1797 } else if (ls_new == IB_PORT_ARMED) {
1798 if (ppd->is_sm_config_started == 0)
1799 invalid = 1;
1800 }
1801 }
1802
1803 ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
1804 if (ret)
1805 return ret;
1806
1807 if (invalid)
1808 smp->status |= IB_SMP_INVALID_FIELD;
1809
1810 return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len);
1811}
1812
1813static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
1814 struct ib_device *ibdev, u8 port,
1815 u32 *resp_len)
1816{
1817 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1818 u32 addr = OPA_AM_CI_ADDR(am);
1819 u32 len = OPA_AM_CI_LEN(am) + 1;
1820 int ret;
1821
f29a08dc
EH
1822 if (dd->pport->port_type != PORT_TYPE_QSFP) {
1823 smp->status |= IB_SMP_INVALID_FIELD;
1824 return reply((struct ib_mad_hdr *)smp);
1825 }
1826
349ac71f 1827#define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
77241056
MM
1828#define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
1829#define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
1830
4d114fdd
JJ
1831 /*
1832 * check that addr is within spec, and
1833 * addr and (addr + len - 1) are on the same "page"
1834 */
77241056 1835 if (addr >= 4096 ||
17fb4f29 1836 (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) {
77241056
MM
1837 smp->status |= IB_SMP_INVALID_FIELD;
1838 return reply((struct ib_mad_hdr *)smp);
1839 }
1840
1841 ret = get_cable_info(dd, port, addr, len, data);
1842
1843 if (ret == -ENODEV) {
1844 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1845 return reply((struct ib_mad_hdr *)smp);
1846 }
1847
1848 /* The address range for the CableInfo SMA query is wider than the
1849 * memory available on the QSFP cable. We want to return a valid
1850 * response, albeit zeroed out, for address ranges beyond available
1851 * memory but that are within the CableInfo query spec
1852 */
1853 if (ret < 0 && ret != -ERANGE) {
1854 smp->status |= IB_SMP_INVALID_FIELD;
1855 return reply((struct ib_mad_hdr *)smp);
1856 }
1857
1858 if (resp_len)
1859 *resp_len += len;
1860
1861 return reply((struct ib_mad_hdr *)smp);
1862}
1863
1864static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
1865 struct ib_device *ibdev, u8 port, u32 *resp_len)
1866{
1867 u32 num_ports = OPA_AM_NPORT(am);
1868 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1869 struct hfi1_pportdata *ppd;
50e5dcbe 1870 struct buffer_control *p = (struct buffer_control *)data;
77241056
MM
1871 int size;
1872
1873 if (num_ports != 1) {
1874 smp->status |= IB_SMP_INVALID_FIELD;
1875 return reply((struct ib_mad_hdr *)smp);
1876 }
1877
1878 ppd = dd->pport + (port - 1);
1879 size = fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p);
1880 trace_bct_get(dd, p);
1881 if (resp_len)
1882 *resp_len += size;
1883
1884 return reply((struct ib_mad_hdr *)smp);
1885}
1886
1887static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
1888 struct ib_device *ibdev, u8 port, u32 *resp_len)
1889{
1890 u32 num_ports = OPA_AM_NPORT(am);
1891 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1892 struct hfi1_pportdata *ppd;
50e5dcbe 1893 struct buffer_control *p = (struct buffer_control *)data;
77241056
MM
1894
1895 if (num_ports != 1) {
1896 smp->status |= IB_SMP_INVALID_FIELD;
1897 return reply((struct ib_mad_hdr *)smp);
1898 }
1899 ppd = dd->pport + (port - 1);
1900 trace_bct_set(dd, p);
1901 if (fm_set_table(ppd, FM_TBL_BUFFER_CONTROL, p) < 0) {
1902 smp->status |= IB_SMP_INVALID_FIELD;
1903 return reply((struct ib_mad_hdr *)smp);
1904 }
1905
1906 return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len);
1907}
1908
1909static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
1910 struct ib_device *ibdev, u8 port,
1911 u32 *resp_len)
1912{
1913 struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1914 u32 num_ports = OPA_AM_NPORT(am);
1915 u8 section = (am & 0x00ff0000) >> 16;
1916 u8 *p = data;
1917 int size = 0;
1918
1919 if (num_ports != 1) {
1920 smp->status |= IB_SMP_INVALID_FIELD;
1921 return reply((struct ib_mad_hdr *)smp);
1922 }
1923
1924 switch (section) {
1925 case OPA_VLARB_LOW_ELEMENTS:
1926 size = fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p);
1927 break;
1928 case OPA_VLARB_HIGH_ELEMENTS:
1929 size = fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p);
1930 break;
1931 case OPA_VLARB_PREEMPT_ELEMENTS:
1932 size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p);
1933 break;
1934 case OPA_VLARB_PREEMPT_MATRIX:
1935 size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p);
1936 break;
1937 default:
1938 pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n",
1939 be32_to_cpu(smp->attr_mod));
1940 smp->status |= IB_SMP_INVALID_FIELD;
1941 break;
1942 }
1943
1944 if (size > 0 && resp_len)
1945 *resp_len += size;
1946
1947 return reply((struct ib_mad_hdr *)smp);
1948}
1949
1950static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
1951 struct ib_device *ibdev, u8 port,
1952 u32 *resp_len)
1953{
1954 struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1955 u32 num_ports = OPA_AM_NPORT(am);
1956 u8 section = (am & 0x00ff0000) >> 16;
1957 u8 *p = data;
1958
1959 if (num_ports != 1) {
1960 smp->status |= IB_SMP_INVALID_FIELD;
1961 return reply((struct ib_mad_hdr *)smp);
1962 }
1963
1964 switch (section) {
1965 case OPA_VLARB_LOW_ELEMENTS:
50e5dcbe 1966 (void)fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
77241056
MM
1967 break;
1968 case OPA_VLARB_HIGH_ELEMENTS:
50e5dcbe 1969 (void)fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
77241056 1970 break;
4d114fdd
JJ
1971 /*
1972 * neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX
1973 * can be changed from the default values
1974 */
77241056
MM
1975 case OPA_VLARB_PREEMPT_ELEMENTS:
1976 /* FALLTHROUGH */
1977 case OPA_VLARB_PREEMPT_MATRIX:
1978 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1979 break;
1980 default:
1981 pr_warn("OPA SubnSet(VL Arb) AM Invalid : 0x%x\n",
1982 be32_to_cpu(smp->attr_mod));
1983 smp->status |= IB_SMP_INVALID_FIELD;
1984 break;
1985 }
1986
1987 return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len);
1988}
1989
1990struct opa_pma_mad {
1991 struct ib_mad_hdr mad_hdr;
1992 u8 data[2024];
1993} __packed;
1994
1995struct opa_class_port_info {
1996 u8 base_version;
1997 u8 class_version;
1998 __be16 cap_mask;
1999 __be32 cap_mask2_resp_time;
2000
2001 u8 redirect_gid[16];
2002 __be32 redirect_tc_fl;
2003 __be32 redirect_lid;
2004 __be32 redirect_sl_qp;
2005 __be32 redirect_qkey;
2006
2007 u8 trap_gid[16];
2008 __be32 trap_tc_fl;
2009 __be32 trap_lid;
2010 __be32 trap_hl_qp;
2011 __be32 trap_qkey;
2012
2013 __be16 trap_pkey;
2014 __be16 redirect_pkey;
2015
2016 u8 trap_sl_rsvd;
2017 u8 reserved[3];
2018} __packed;
2019
2020struct opa_port_status_req {
2021 __u8 port_num;
2022 __u8 reserved[3];
2023 __be32 vl_select_mask;
2024};
2025
2026#define VL_MASK_ALL 0x000080ff
2027
2028struct opa_port_status_rsp {
2029 __u8 port_num;
2030 __u8 reserved[3];
2031 __be32 vl_select_mask;
2032
2033 /* Data counters */
2034 __be64 port_xmit_data;
2035 __be64 port_rcv_data;
2036 __be64 port_xmit_pkts;
2037 __be64 port_rcv_pkts;
2038 __be64 port_multicast_xmit_pkts;
2039 __be64 port_multicast_rcv_pkts;
2040 __be64 port_xmit_wait;
2041 __be64 sw_port_congestion;
2042 __be64 port_rcv_fecn;
2043 __be64 port_rcv_becn;
2044 __be64 port_xmit_time_cong;
2045 __be64 port_xmit_wasted_bw;
2046 __be64 port_xmit_wait_data;
2047 __be64 port_rcv_bubble;
2048 __be64 port_mark_fecn;
2049 /* Error counters */
2050 __be64 port_rcv_constraint_errors;
2051 __be64 port_rcv_switch_relay_errors;
2052 __be64 port_xmit_discards;
2053 __be64 port_xmit_constraint_errors;
2054 __be64 port_rcv_remote_physical_errors;
2055 __be64 local_link_integrity_errors;
2056 __be64 port_rcv_errors;
2057 __be64 excessive_buffer_overruns;
2058 __be64 fm_config_errors;
2059 __be32 link_error_recovery;
2060 __be32 link_downed;
2061 u8 uncorrectable_errors;
2062
2063 u8 link_quality_indicator; /* 5res, 3bit */
2064 u8 res2[6];
2065 struct _vls_pctrs {
2066 /* per-VL Data counters */
2067 __be64 port_vl_xmit_data;
2068 __be64 port_vl_rcv_data;
2069 __be64 port_vl_xmit_pkts;
2070 __be64 port_vl_rcv_pkts;
2071 __be64 port_vl_xmit_wait;
2072 __be64 sw_port_vl_congestion;
2073 __be64 port_vl_rcv_fecn;
2074 __be64 port_vl_rcv_becn;
2075 __be64 port_xmit_time_cong;
2076 __be64 port_vl_xmit_wasted_bw;
2077 __be64 port_vl_xmit_wait_data;
2078 __be64 port_vl_rcv_bubble;
2079 __be64 port_vl_mark_fecn;
2080 __be64 port_vl_xmit_discards;
2081 } vls[0]; /* real array size defined by # bits set in vl_select_mask */
2082};
2083
2084enum counter_selects {
2085 CS_PORT_XMIT_DATA = (1 << 31),
2086 CS_PORT_RCV_DATA = (1 << 30),
2087 CS_PORT_XMIT_PKTS = (1 << 29),
2088 CS_PORT_RCV_PKTS = (1 << 28),
2089 CS_PORT_MCAST_XMIT_PKTS = (1 << 27),
2090 CS_PORT_MCAST_RCV_PKTS = (1 << 26),
2091 CS_PORT_XMIT_WAIT = (1 << 25),
2092 CS_SW_PORT_CONGESTION = (1 << 24),
2093 CS_PORT_RCV_FECN = (1 << 23),
2094 CS_PORT_RCV_BECN = (1 << 22),
2095 CS_PORT_XMIT_TIME_CONG = (1 << 21),
2096 CS_PORT_XMIT_WASTED_BW = (1 << 20),
2097 CS_PORT_XMIT_WAIT_DATA = (1 << 19),
2098 CS_PORT_RCV_BUBBLE = (1 << 18),
2099 CS_PORT_MARK_FECN = (1 << 17),
2100 CS_PORT_RCV_CONSTRAINT_ERRORS = (1 << 16),
2101 CS_PORT_RCV_SWITCH_RELAY_ERRORS = (1 << 15),
2102 CS_PORT_XMIT_DISCARDS = (1 << 14),
2103 CS_PORT_XMIT_CONSTRAINT_ERRORS = (1 << 13),
2104 CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS = (1 << 12),
2105 CS_LOCAL_LINK_INTEGRITY_ERRORS = (1 << 11),
2106 CS_PORT_RCV_ERRORS = (1 << 10),
2107 CS_EXCESSIVE_BUFFER_OVERRUNS = (1 << 9),
2108 CS_FM_CONFIG_ERRORS = (1 << 8),
2109 CS_LINK_ERROR_RECOVERY = (1 << 7),
2110 CS_LINK_DOWNED = (1 << 6),
2111 CS_UNCORRECTABLE_ERRORS = (1 << 5),
2112};
2113
2114struct opa_clear_port_status {
2115 __be64 port_select_mask[4];
2116 __be32 counter_select_mask;
2117};
2118
2119struct opa_aggregate {
2120 __be16 attr_id;
2121 __be16 err_reqlength; /* 1 bit, 8 res, 7 bit */
2122 __be32 attr_mod;
2123 u8 data[0];
2124};
2125
f0852922
AL
2126#define MSK_LLI 0x000000f0
2127#define MSK_LLI_SFT 4
2128#define MSK_LER 0x0000000f
2129#define MSK_LER_SFT 0
2130#define ADD_LLI 8
2131#define ADD_LER 2
2132
2133/* Request contains first three fields, response contains those plus the rest */
77241056
MM
2134struct opa_port_data_counters_msg {
2135 __be64 port_select_mask[4];
2136 __be32 vl_select_mask;
f0852922 2137 __be32 resolution;
77241056
MM
2138
2139 /* Response fields follow */
77241056
MM
2140 struct _port_dctrs {
2141 u8 port_number;
2142 u8 reserved2[3];
2143 __be32 link_quality_indicator; /* 29res, 3bit */
2144
2145 /* Data counters */
2146 __be64 port_xmit_data;
2147 __be64 port_rcv_data;
2148 __be64 port_xmit_pkts;
2149 __be64 port_rcv_pkts;
2150 __be64 port_multicast_xmit_pkts;
2151 __be64 port_multicast_rcv_pkts;
2152 __be64 port_xmit_wait;
2153 __be64 sw_port_congestion;
2154 __be64 port_rcv_fecn;
2155 __be64 port_rcv_becn;
2156 __be64 port_xmit_time_cong;
2157 __be64 port_xmit_wasted_bw;
2158 __be64 port_xmit_wait_data;
2159 __be64 port_rcv_bubble;
2160 __be64 port_mark_fecn;
2161
2162 __be64 port_error_counter_summary;
2163 /* Sum of error counts/port */
2164
2165 struct _vls_dctrs {
2166 /* per-VL Data counters */
2167 __be64 port_vl_xmit_data;
2168 __be64 port_vl_rcv_data;
2169 __be64 port_vl_xmit_pkts;
2170 __be64 port_vl_rcv_pkts;
2171 __be64 port_vl_xmit_wait;
2172 __be64 sw_port_vl_congestion;
2173 __be64 port_vl_rcv_fecn;
2174 __be64 port_vl_rcv_becn;
2175 __be64 port_xmit_time_cong;
2176 __be64 port_vl_xmit_wasted_bw;
2177 __be64 port_vl_xmit_wait_data;
2178 __be64 port_vl_rcv_bubble;
2179 __be64 port_vl_mark_fecn;
2180 } vls[0];
2181 /* array size defined by #bits set in vl_select_mask*/
2182 } port[1]; /* array size defined by #ports in attribute modifier */
2183};
2184
2185struct opa_port_error_counters64_msg {
4d114fdd
JJ
2186 /*
2187 * Request contains first two fields, response contains the
2188 * whole magilla
2189 */
77241056
MM
2190 __be64 port_select_mask[4];
2191 __be32 vl_select_mask;
2192
2193 /* Response-only fields follow */
2194 __be32 reserved1;
2195 struct _port_ectrs {
2196 u8 port_number;
2197 u8 reserved2[7];
2198 __be64 port_rcv_constraint_errors;
2199 __be64 port_rcv_switch_relay_errors;
2200 __be64 port_xmit_discards;
2201 __be64 port_xmit_constraint_errors;
2202 __be64 port_rcv_remote_physical_errors;
2203 __be64 local_link_integrity_errors;
2204 __be64 port_rcv_errors;
2205 __be64 excessive_buffer_overruns;
2206 __be64 fm_config_errors;
2207 __be32 link_error_recovery;
2208 __be32 link_downed;
2209 u8 uncorrectable_errors;
2210 u8 reserved3[7];
2211 struct _vls_ectrs {
2212 __be64 port_vl_xmit_discards;
2213 } vls[0];
2214 /* array size defined by #bits set in vl_select_mask */
2215 } port[1]; /* array size defined by #ports in attribute modifier */
2216};
2217
2218struct opa_port_error_info_msg {
2219 __be64 port_select_mask[4];
2220 __be32 error_info_select_mask;
2221 __be32 reserved1;
2222 struct _port_ei {
77241056
MM
2223 u8 port_number;
2224 u8 reserved2[7];
2225
2226 /* PortRcvErrorInfo */
2227 struct {
2228 u8 status_and_code;
2229 union {
2230 u8 raw[17];
2231 struct {
2232 /* EI1to12 format */
2233 u8 packet_flit1[8];
2234 u8 packet_flit2[8];
2235 u8 remaining_flit_bits12;
2236 } ei1to12;
2237 struct {
2238 u8 packet_bytes[8];
2239 u8 remaining_flit_bits;
2240 } ei13;
2241 } ei;
2242 u8 reserved3[6];
2243 } __packed port_rcv_ei;
2244
2245 /* ExcessiveBufferOverrunInfo */
2246 struct {
2247 u8 status_and_sc;
2248 u8 reserved4[7];
2249 } __packed excessive_buffer_overrun_ei;
2250
2251 /* PortXmitConstraintErrorInfo */
2252 struct {
2253 u8 status;
2254 u8 reserved5;
2255 __be16 pkey;
2256 __be32 slid;
2257 } __packed port_xmit_constraint_ei;
2258
2259 /* PortRcvConstraintErrorInfo */
2260 struct {
2261 u8 status;
2262 u8 reserved6;
2263 __be16 pkey;
2264 __be32 slid;
2265 } __packed port_rcv_constraint_ei;
2266
2267 /* PortRcvSwitchRelayErrorInfo */
2268 struct {
2269 u8 status_and_code;
2270 u8 reserved7[3];
2271 __u32 error_info;
2272 } __packed port_rcv_switch_relay_ei;
2273
2274 /* UncorrectableErrorInfo */
2275 struct {
2276 u8 status_and_code;
2277 u8 reserved8;
2278 } __packed uncorrectable_ei;
2279
2280 /* FMConfigErrorInfo */
2281 struct {
2282 u8 status_and_code;
2283 u8 error_info;
2284 } __packed fm_config_ei;
2285 __u32 reserved9;
2286 } port[1]; /* actual array size defined by #ports in attr modifier */
2287};
2288
2289/* opa_port_error_info_msg error_info_select_mask bit definitions */
2290enum error_info_selects {
2291 ES_PORT_RCV_ERROR_INFO = (1 << 31),
2292 ES_EXCESSIVE_BUFFER_OVERRUN_INFO = (1 << 30),
2293 ES_PORT_XMIT_CONSTRAINT_ERROR_INFO = (1 << 29),
2294 ES_PORT_RCV_CONSTRAINT_ERROR_INFO = (1 << 28),
2295 ES_PORT_RCV_SWITCH_RELAY_ERROR_INFO = (1 << 27),
2296 ES_UNCORRECTABLE_ERROR_INFO = (1 << 26),
2297 ES_FM_CONFIG_ERROR_INFO = (1 << 25)
2298};
2299
2300static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
17fb4f29 2301 struct ib_device *ibdev, u32 *resp_len)
77241056
MM
2302{
2303 struct opa_class_port_info *p =
2304 (struct opa_class_port_info *)pmp->data;
2305
2306 memset(pmp->data, 0, sizeof(pmp->data));
2307
2308 if (pmp->mad_hdr.attr_mod != 0)
2309 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2310
2311 p->base_version = OPA_MGMT_BASE_VERSION;
2312 p->class_version = OPA_SMI_CLASS_VERSION;
2313 /*
2314 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
2315 */
2316 p->cap_mask2_resp_time = cpu_to_be32(18);
2317
2318 if (resp_len)
2319 *resp_len += sizeof(*p);
2320
2321 return reply((struct ib_mad_hdr *)pmp);
2322}
2323
2324static void a0_portstatus(struct hfi1_pportdata *ppd,
2325 struct opa_port_status_rsp *rsp, u32 vl_select_mask)
2326{
2327 if (!is_bx(ppd->dd)) {
2328 unsigned long vl;
d9d3e025 2329 u64 sum_vl_xmit_wait = 0;
77241056 2330 u32 vl_all_mask = VL_MASK_ALL;
77241056
MM
2331
2332 for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
2333 8 * sizeof(vl_all_mask)) {
d9d3e025
IW
2334 u64 tmp = sum_vl_xmit_wait +
2335 read_port_cntr(ppd, C_TX_WAIT_VL,
2336 idx_from_vl(vl));
2337 if (tmp < sum_vl_xmit_wait) {
2338 /* we wrapped */
2339 sum_vl_xmit_wait = (u64)~0;
2340 break;
2341 }
2342 sum_vl_xmit_wait = tmp;
77241056 2343 }
d9d3e025
IW
2344 if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
2345 rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
77241056
MM
2346 }
2347}
2348
77241056 2349static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
17fb4f29
JJ
2350 struct ib_device *ibdev,
2351 u8 port, u32 *resp_len)
77241056
MM
2352{
2353 struct opa_port_status_req *req =
2354 (struct opa_port_status_req *)pmp->data;
2355 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2356 struct opa_port_status_rsp *rsp;
2357 u32 vl_select_mask = be32_to_cpu(req->vl_select_mask);
2358 unsigned long vl;
2359 size_t response_data_size;
2360 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2361 u8 port_num = req->port_num;
2362 u8 num_vls = hweight32(vl_select_mask);
2363 struct _vls_pctrs *vlinfo;
2364 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2365 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2366 int vfi;
2367 u64 tmp, tmp2;
2368
2369 response_data_size = sizeof(struct opa_port_status_rsp) +
2370 num_vls * sizeof(struct _vls_pctrs);
2371 if (response_data_size > sizeof(pmp->data)) {
2372 pmp->mad_hdr.status |= OPA_PM_STATUS_REQUEST_TOO_LARGE;
2373 return reply((struct ib_mad_hdr *)pmp);
2374 }
2375
d0d236ea
JJ
2376 if (nports != 1 || (port_num && port_num != port) ||
2377 num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) {
77241056
MM
2378 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2379 return reply((struct ib_mad_hdr *)pmp);
2380 }
2381
2382 memset(pmp->data, 0, sizeof(pmp->data));
2383
2384 rsp = (struct opa_port_status_rsp *)pmp->data;
2385 if (port_num)
2386 rsp->port_num = port_num;
2387 else
2388 rsp->port_num = port;
2389
2390 rsp->port_rcv_constraint_errors =
2391 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2392 CNTR_INVALID_VL));
2393
2394 hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
2395
2396 rsp->vl_select_mask = cpu_to_be32(vl_select_mask);
2397 rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2398 CNTR_INVALID_VL));
2399 rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2400 CNTR_INVALID_VL));
77241056
MM
2401 rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2402 CNTR_INVALID_VL));
2403 rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2404 CNTR_INVALID_VL));
2405 rsp->port_multicast_xmit_pkts =
2406 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
17fb4f29 2407 CNTR_INVALID_VL));
77241056
MM
2408 rsp->port_multicast_rcv_pkts =
2409 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2410 CNTR_INVALID_VL));
2411 rsp->port_xmit_wait =
2412 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
2413 rsp->port_rcv_fecn =
2414 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
2415 rsp->port_rcv_becn =
2416 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
2417 rsp->port_xmit_discards =
2418 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
2419 CNTR_INVALID_VL));
2420 rsp->port_xmit_constraint_errors =
2421 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2422 CNTR_INVALID_VL));
2423 rsp->port_rcv_remote_physical_errors =
2424 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2425 CNTR_INVALID_VL));
3210314a
JP
2426 rsp->local_link_integrity_errors =
2427 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
2428 CNTR_INVALID_VL));
77241056
MM
2429 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2430 tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
17fb4f29 2431 CNTR_INVALID_VL);
77241056
MM
2432 if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
2433 /* overflow/wrapped */
2434 rsp->link_error_recovery = cpu_to_be32(~0);
2435 } else {
2436 rsp->link_error_recovery = cpu_to_be32(tmp2);
2437 }
2438 rsp->port_rcv_errors =
2439 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
2440 rsp->excessive_buffer_overruns =
2441 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
2442 rsp->fm_config_errors =
2443 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2444 CNTR_INVALID_VL));
2445 rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
17fb4f29 2446 CNTR_INVALID_VL));
77241056
MM
2447
2448 /* rsp->uncorrectable_errors is 8 bits wide, and it pegs at 0xff */
2449 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2450 rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
2451
58721b8f 2452 vlinfo = &rsp->vls[0];
77241056
MM
2453 vfi = 0;
2454 /* The vl_select_mask has been checked above, and we know
2455 * that it contains only entries which represent valid VLs.
2456 * So in the for_each_set_bit() loop below, we don't need
2457 * any additional checks for vl.
2458 */
2459 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2460 8 * sizeof(vl_select_mask)) {
2461 memset(vlinfo, 0, sizeof(*vlinfo));
2462
2463 tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
2464 rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(tmp);
77241056
MM
2465
2466 rsp->vls[vfi].port_vl_rcv_pkts =
2467 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
17fb4f29 2468 idx_from_vl(vl)));
77241056
MM
2469
2470 rsp->vls[vfi].port_vl_xmit_data =
2471 cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
17fb4f29 2472 idx_from_vl(vl)));
77241056
MM
2473
2474 rsp->vls[vfi].port_vl_xmit_pkts =
2475 cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
17fb4f29 2476 idx_from_vl(vl)));
77241056
MM
2477
2478 rsp->vls[vfi].port_vl_xmit_wait =
2479 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
17fb4f29 2480 idx_from_vl(vl)));
77241056
MM
2481
2482 rsp->vls[vfi].port_vl_rcv_fecn =
2483 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
17fb4f29 2484 idx_from_vl(vl)));
77241056
MM
2485
2486 rsp->vls[vfi].port_vl_rcv_becn =
2487 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
17fb4f29 2488 idx_from_vl(vl)));
77241056 2489
583eb8b8
JP
2490 rsp->vls[vfi].port_vl_xmit_discards =
2491 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
2492 idx_from_vl(vl)));
77241056
MM
2493 vlinfo++;
2494 vfi++;
2495 }
2496
2497 a0_portstatus(ppd, rsp, vl_select_mask);
2498
2499 if (resp_len)
2500 *resp_len += response_data_size;
2501
2502 return reply((struct ib_mad_hdr *)pmp);
2503}
2504
f0852922
AL
2505static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
2506 u8 res_lli, u8 res_ler)
77241056
MM
2507{
2508 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2509 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2510 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2511 u64 error_counter_summary = 0, tmp;
2512
2513 error_counter_summary += read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2514 CNTR_INVALID_VL);
2515 /* port_rcv_switch_relay_errors is 0 for HFIs */
2516 error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_DSCD,
2517 CNTR_INVALID_VL);
2518 error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2519 CNTR_INVALID_VL);
2520 error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
17fb4f29 2521 CNTR_INVALID_VL);
f0852922 2522 /* local link integrity must be right-shifted by the lli resolution */
3210314a
JP
2523 error_counter_summary += (read_dev_cntr(dd, C_DC_RX_REPLAY,
2524 CNTR_INVALID_VL) >> res_lli);
f0852922
AL
2525 /* link error recovery must b right-shifted by the ler resolution */
2526 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2527 tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
2528 error_counter_summary += (tmp >> res_ler);
77241056 2529 error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR,
17fb4f29 2530 CNTR_INVALID_VL);
77241056
MM
2531 error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
2532 error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR,
17fb4f29 2533 CNTR_INVALID_VL);
77241056
MM
2534 /* ppd->link_downed is a 32-bit value */
2535 error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN,
2536 CNTR_INVALID_VL);
2537 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2538 /* this is an 8-bit quantity */
2539 error_counter_summary += tmp < 0x100 ? (tmp & 0xff) : 0xff;
2540
2541 return error_counter_summary;
2542}
2543
d9d3e025 2544static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
77241056
MM
2545 u32 vl_select_mask)
2546{
d9d3e025 2547 if (!is_bx(ppd->dd)) {
77241056 2548 unsigned long vl;
db00a055 2549 u64 sum_vl_xmit_wait = 0;
d9d3e025 2550 u32 vl_all_mask = VL_MASK_ALL;
db00a055 2551
d9d3e025
IW
2552 for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
2553 8 * sizeof(vl_all_mask)) {
77241056 2554 u64 tmp = sum_vl_xmit_wait +
d9d3e025
IW
2555 read_port_cntr(ppd, C_TX_WAIT_VL,
2556 idx_from_vl(vl));
77241056
MM
2557 if (tmp < sum_vl_xmit_wait) {
2558 /* we wrapped */
50e5dcbe 2559 sum_vl_xmit_wait = (u64)~0;
77241056
MM
2560 break;
2561 }
2562 sum_vl_xmit_wait = tmp;
2563 }
2564 if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
2565 rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
2566 }
2567}
2568
b8d114eb
SS
2569static void pma_get_opa_port_dctrs(struct ib_device *ibdev,
2570 struct _port_dctrs *rsp)
2571{
2572 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2573
2574 rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2575 CNTR_INVALID_VL));
2576 rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2577 CNTR_INVALID_VL));
2578 rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2579 CNTR_INVALID_VL));
2580 rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2581 CNTR_INVALID_VL));
2582 rsp->port_multicast_xmit_pkts =
2583 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
2584 CNTR_INVALID_VL));
2585 rsp->port_multicast_rcv_pkts =
2586 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2587 CNTR_INVALID_VL));
2588}
2589
77241056 2590static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
17fb4f29
JJ
2591 struct ib_device *ibdev,
2592 u8 port, u32 *resp_len)
77241056
MM
2593{
2594 struct opa_port_data_counters_msg *req =
2595 (struct opa_port_data_counters_msg *)pmp->data;
2596 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2597 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2598 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2599 struct _port_dctrs *rsp;
2600 struct _vls_dctrs *vlinfo;
2601 size_t response_data_size;
2602 u32 num_ports;
2603 u8 num_pslm;
2604 u8 lq, num_vls;
f0852922 2605 u8 res_lli, res_ler;
77241056
MM
2606 u64 port_mask;
2607 unsigned long port_num;
2608 unsigned long vl;
2609 u32 vl_select_mask;
2610 int vfi;
2611
2612 num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2613 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
2614 num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
2615 vl_select_mask = be32_to_cpu(req->vl_select_mask);
f0852922
AL
2616 res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
2617 res_lli = res_lli ? res_lli + ADD_LLI : 0;
2618 res_ler = (u8)(be32_to_cpu(req->resolution) & MSK_LER) >> MSK_LER_SFT;
2619 res_ler = res_ler ? res_ler + ADD_LER : 0;
77241056
MM
2620
2621 if (num_ports != 1 || (vl_select_mask & ~VL_MASK_ALL)) {
2622 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2623 return reply((struct ib_mad_hdr *)pmp);
2624 }
2625
2626 /* Sanity check */
2627 response_data_size = sizeof(struct opa_port_data_counters_msg) +
2628 num_vls * sizeof(struct _vls_dctrs);
2629
2630 if (response_data_size > sizeof(pmp->data)) {
2631 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2632 return reply((struct ib_mad_hdr *)pmp);
2633 }
2634
2635 /*
2636 * The bit set in the mask needs to be consistent with the
2637 * port the request came in on.
2638 */
2639 port_mask = be64_to_cpu(req->port_select_mask[3]);
2640 port_num = find_first_bit((unsigned long *)&port_mask,
2641 sizeof(port_mask));
2642
2643 if ((u8)port_num != port) {
2644 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2645 return reply((struct ib_mad_hdr *)pmp);
2646 }
2647
acc17d67 2648 rsp = &req->port[0];
77241056
MM
2649 memset(rsp, 0, sizeof(*rsp));
2650
2651 rsp->port_number = port;
2652 /*
2653 * Note that link_quality_indicator is a 32 bit quantity in
2654 * 'datacounters' queries (as opposed to 'portinfo' queries,
2655 * where it's a byte).
2656 */
2657 hfi1_read_link_quality(dd, &lq);
2658 rsp->link_quality_indicator = cpu_to_be32((u32)lq);
b8d114eb 2659 pma_get_opa_port_dctrs(ibdev, rsp);
77241056 2660
77241056
MM
2661 rsp->port_xmit_wait =
2662 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
2663 rsp->port_rcv_fecn =
2664 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
2665 rsp->port_rcv_becn =
2666 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
77241056 2667 rsp->port_error_counter_summary =
f0852922
AL
2668 cpu_to_be64(get_error_counter_summary(ibdev, port,
2669 res_lli, res_ler));
77241056 2670
58721b8f 2671 vlinfo = &rsp->vls[0];
77241056
MM
2672 vfi = 0;
2673 /* The vl_select_mask has been checked above, and we know
2674 * that it contains only entries which represent valid VLs.
2675 * So in the for_each_set_bit() loop below, we don't need
2676 * any additional checks for vl.
2677 */
2678 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
17fb4f29 2679 8 * sizeof(req->vl_select_mask)) {
77241056
MM
2680 memset(vlinfo, 0, sizeof(*vlinfo));
2681
2682 rsp->vls[vfi].port_vl_xmit_data =
2683 cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
17fb4f29 2684 idx_from_vl(vl)));
77241056
MM
2685
2686 rsp->vls[vfi].port_vl_rcv_data =
2687 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL,
17fb4f29 2688 idx_from_vl(vl)));
77241056
MM
2689
2690 rsp->vls[vfi].port_vl_xmit_pkts =
2691 cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
17fb4f29 2692 idx_from_vl(vl)));
77241056
MM
2693
2694 rsp->vls[vfi].port_vl_rcv_pkts =
2695 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
17fb4f29 2696 idx_from_vl(vl)));
77241056
MM
2697
2698 rsp->vls[vfi].port_vl_xmit_wait =
2699 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
17fb4f29 2700 idx_from_vl(vl)));
77241056
MM
2701
2702 rsp->vls[vfi].port_vl_rcv_fecn =
2703 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
17fb4f29 2704 idx_from_vl(vl)));
77241056
MM
2705 rsp->vls[vfi].port_vl_rcv_becn =
2706 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
17fb4f29 2707 idx_from_vl(vl)));
77241056
MM
2708
2709 /* rsp->port_vl_xmit_time_cong is 0 for HFIs */
2710 /* rsp->port_vl_xmit_wasted_bw ??? */
2711 /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ???
4d114fdd
JJ
2712 * does this differ from rsp->vls[vfi].port_vl_xmit_wait
2713 */
77241056 2714 /*rsp->vls[vfi].port_vl_mark_fecn =
4d114fdd
JJ
2715 * cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT
2716 * + offset));
2717 */
77241056
MM
2718 vlinfo++;
2719 vfi++;
2720 }
2721
d9d3e025 2722 a0_datacounters(ppd, rsp, vl_select_mask);
77241056
MM
2723
2724 if (resp_len)
2725 *resp_len += response_data_size;
2726
2727 return reply((struct ib_mad_hdr *)pmp);
2728}
2729
b8d114eb
SS
2730static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp,
2731 struct ib_device *ibdev, u8 port)
2732{
2733 struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)
2734 pmp->data;
2735 struct _port_dctrs rsp;
2736
2737 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
2738 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2739 goto bail;
2740 }
2741
2742 memset(&rsp, 0, sizeof(rsp));
2743 pma_get_opa_port_dctrs(ibdev, &rsp);
2744
2745 p->port_xmit_data = rsp.port_xmit_data;
2746 p->port_rcv_data = rsp.port_rcv_data;
2747 p->port_xmit_packets = rsp.port_xmit_pkts;
2748 p->port_rcv_packets = rsp.port_rcv_pkts;
2749 p->port_unicast_xmit_packets = 0;
2750 p->port_unicast_rcv_packets = 0;
2751 p->port_multicast_xmit_packets = rsp.port_multicast_xmit_pkts;
2752 p->port_multicast_rcv_packets = rsp.port_multicast_rcv_pkts;
2753
2754bail:
2755 return reply((struct ib_mad_hdr *)pmp);
2756}
2757
2758static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
2759 struct _port_ectrs *rsp, u8 port)
2760{
2761 u64 tmp, tmp2;
2762 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2763 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2764 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2765
2766 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2767 tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
2768 CNTR_INVALID_VL);
2769 if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
2770 /* overflow/wrapped */
2771 rsp->link_error_recovery = cpu_to_be32(~0);
2772 } else {
2773 rsp->link_error_recovery = cpu_to_be32(tmp2);
2774 }
2775
2776 rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
2777 CNTR_INVALID_VL));
2778 rsp->port_rcv_errors =
2779 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
2780 rsp->port_rcv_remote_physical_errors =
2781 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2782 CNTR_INVALID_VL));
2783 rsp->port_rcv_switch_relay_errors = 0;
2784 rsp->port_xmit_discards =
2785 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
2786 CNTR_INVALID_VL));
2787 rsp->port_xmit_constraint_errors =
2788 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2789 CNTR_INVALID_VL));
2790 rsp->port_rcv_constraint_errors =
2791 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2792 CNTR_INVALID_VL));
3210314a
JP
2793 rsp->local_link_integrity_errors =
2794 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
2795 CNTR_INVALID_VL));
b8d114eb
SS
2796 rsp->excessive_buffer_overruns =
2797 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
2798}
2799
77241056 2800static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
17fb4f29
JJ
2801 struct ib_device *ibdev,
2802 u8 port, u32 *resp_len)
77241056
MM
2803{
2804 size_t response_data_size;
2805 struct _port_ectrs *rsp;
eb2e557c 2806 u8 port_num;
77241056
MM
2807 struct opa_port_error_counters64_msg *req;
2808 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2809 u32 num_ports;
2810 u8 num_pslm;
2811 u8 num_vls;
2812 struct hfi1_ibport *ibp;
2813 struct hfi1_pportdata *ppd;
2814 struct _vls_ectrs *vlinfo;
2815 unsigned long vl;
b8d114eb 2816 u64 port_mask, tmp;
77241056
MM
2817 u32 vl_select_mask;
2818 int vfi;
2819
2820 req = (struct opa_port_error_counters64_msg *)pmp->data;
2821
2822 num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2823
2824 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
2825 num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
2826
2827 if (num_ports != 1 || num_ports != num_pslm) {
2828 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2829 return reply((struct ib_mad_hdr *)pmp);
2830 }
2831
2832 response_data_size = sizeof(struct opa_port_error_counters64_msg) +
2833 num_vls * sizeof(struct _vls_ectrs);
2834
2835 if (response_data_size > sizeof(pmp->data)) {
2836 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2837 return reply((struct ib_mad_hdr *)pmp);
2838 }
2839 /*
2840 * The bit set in the mask needs to be consistent with the
2841 * port the request came in on.
2842 */
2843 port_mask = be64_to_cpu(req->port_select_mask[3]);
2844 port_num = find_first_bit((unsigned long *)&port_mask,
17fb4f29 2845 sizeof(port_mask));
77241056 2846
eb2e557c 2847 if (port_num != port) {
77241056
MM
2848 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2849 return reply((struct ib_mad_hdr *)pmp);
2850 }
2851
acc17d67 2852 rsp = &req->port[0];
77241056
MM
2853
2854 ibp = to_iport(ibdev, port_num);
2855 ppd = ppd_from_ibp(ibp);
2856
2857 memset(rsp, 0, sizeof(*rsp));
eb2e557c 2858 rsp->port_number = port_num;
77241056 2859
b8d114eb 2860 pma_get_opa_port_ectrs(ibdev, rsp, port_num);
77241056 2861
77241056
MM
2862 rsp->port_rcv_remote_physical_errors =
2863 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
b8d114eb 2864 CNTR_INVALID_VL));
77241056
MM
2865 rsp->fm_config_errors =
2866 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
17fb4f29 2867 CNTR_INVALID_VL));
77241056 2868 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
b8d114eb 2869
77241056 2870 rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
2b719046
JP
2871 rsp->port_rcv_errors =
2872 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
acc17d67 2873 vlinfo = &rsp->vls[0];
77241056
MM
2874 vfi = 0;
2875 vl_select_mask = be32_to_cpu(req->vl_select_mask);
2876 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2877 8 * sizeof(req->vl_select_mask)) {
2878 memset(vlinfo, 0, sizeof(*vlinfo));
583eb8b8
JP
2879 rsp->vls[vfi].port_vl_xmit_discards =
2880 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
2881 idx_from_vl(vl)));
77241056
MM
2882 vlinfo += 1;
2883 vfi++;
2884 }
2885
2886 if (resp_len)
2887 *resp_len += response_data_size;
2888
2889 return reply((struct ib_mad_hdr *)pmp);
2890}
2891
b8d114eb
SS
2892static int pma_get_ib_portcounters(struct ib_pma_mad *pmp,
2893 struct ib_device *ibdev, u8 port)
2894{
2895 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
2896 pmp->data;
2897 struct _port_ectrs rsp;
2898 u64 temp_link_overrun_errors;
2899 u64 temp_64;
2900 u32 temp_32;
2901
2902 memset(&rsp, 0, sizeof(rsp));
2903 pma_get_opa_port_ectrs(ibdev, &rsp, port);
2904
2905 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
2906 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2907 goto bail;
2908 }
2909
2910 p->symbol_error_counter = 0; /* N/A for OPA */
2911
2912 temp_32 = be32_to_cpu(rsp.link_error_recovery);
2913 if (temp_32 > 0xFFUL)
2914 p->link_error_recovery_counter = 0xFF;
2915 else
2916 p->link_error_recovery_counter = (u8)temp_32;
2917
2918 temp_32 = be32_to_cpu(rsp.link_downed);
2919 if (temp_32 > 0xFFUL)
2920 p->link_downed_counter = 0xFF;
2921 else
2922 p->link_downed_counter = (u8)temp_32;
2923
2924 temp_64 = be64_to_cpu(rsp.port_rcv_errors);
2925 if (temp_64 > 0xFFFFUL)
2926 p->port_rcv_errors = cpu_to_be16(0xFFFF);
2927 else
2928 p->port_rcv_errors = cpu_to_be16((u16)temp_64);
2929
2930 temp_64 = be64_to_cpu(rsp.port_rcv_remote_physical_errors);
2931 if (temp_64 > 0xFFFFUL)
2932 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
2933 else
2934 p->port_rcv_remphys_errors = cpu_to_be16((u16)temp_64);
2935
2936 temp_64 = be64_to_cpu(rsp.port_rcv_switch_relay_errors);
2937 p->port_rcv_switch_relay_errors = cpu_to_be16((u16)temp_64);
2938
2939 temp_64 = be64_to_cpu(rsp.port_xmit_discards);
2940 if (temp_64 > 0xFFFFUL)
2941 p->port_xmit_discards = cpu_to_be16(0xFFFF);
2942 else
2943 p->port_xmit_discards = cpu_to_be16((u16)temp_64);
2944
2945 temp_64 = be64_to_cpu(rsp.port_xmit_constraint_errors);
2946 if (temp_64 > 0xFFUL)
2947 p->port_xmit_constraint_errors = 0xFF;
2948 else
2949 p->port_xmit_constraint_errors = (u8)temp_64;
2950
2951 temp_64 = be64_to_cpu(rsp.port_rcv_constraint_errors);
2952 if (temp_64 > 0xFFUL)
2953 p->port_rcv_constraint_errors = 0xFFUL;
2954 else
2955 p->port_rcv_constraint_errors = (u8)temp_64;
2956
2957 /* LocalLink: 7:4, BufferOverrun: 3:0 */
2958 temp_64 = be64_to_cpu(rsp.local_link_integrity_errors);
2959 if (temp_64 > 0xFUL)
2960 temp_64 = 0xFUL;
2961
2962 temp_link_overrun_errors = temp_64 << 4;
2963
2964 temp_64 = be64_to_cpu(rsp.excessive_buffer_overruns);
2965 if (temp_64 > 0xFUL)
2966 temp_64 = 0xFUL;
2967 temp_link_overrun_errors |= temp_64;
2968
2969 p->link_overrun_errors = (u8)temp_link_overrun_errors;
2970
2971 p->vl15_dropped = 0; /* N/A for OPA */
2972
2973bail:
2974 return reply((struct ib_mad_hdr *)pmp);
2975}
2976
77241056 2977static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
17fb4f29
JJ
2978 struct ib_device *ibdev,
2979 u8 port, u32 *resp_len)
77241056
MM
2980{
2981 size_t response_data_size;
2982 struct _port_ei *rsp;
2983 struct opa_port_error_info_msg *req;
2984 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2985 u64 port_mask;
2986 u32 num_ports;
eb2e557c 2987 u8 port_num;
77241056
MM
2988 u8 num_pslm;
2989 u64 reg;
2990
2991 req = (struct opa_port_error_info_msg *)pmp->data;
acc17d67 2992 rsp = &req->port[0];
77241056
MM
2993
2994 num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
2995 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
2996
2997 memset(rsp, 0, sizeof(*rsp));
2998
2999 if (num_ports != 1 || num_ports != num_pslm) {
3000 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3001 return reply((struct ib_mad_hdr *)pmp);
3002 }
3003
3004 /* Sanity check */
3005 response_data_size = sizeof(struct opa_port_error_info_msg);
3006
3007 if (response_data_size > sizeof(pmp->data)) {
3008 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3009 return reply((struct ib_mad_hdr *)pmp);
3010 }
3011
3012 /*
3013 * The bit set in the mask needs to be consistent with the port
3014 * the request came in on.
3015 */
3016 port_mask = be64_to_cpu(req->port_select_mask[3]);
3017 port_num = find_first_bit((unsigned long *)&port_mask,
3018 sizeof(port_mask));
3019
eb2e557c 3020 if (port_num != port) {
77241056
MM
3021 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3022 return reply((struct ib_mad_hdr *)pmp);
3023 }
3024
3025 /* PortRcvErrorInfo */
3026 rsp->port_rcv_ei.status_and_code =
3027 dd->err_info_rcvport.status_and_code;
3028 memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1,
17fb4f29 3029 &dd->err_info_rcvport.packet_flit1, sizeof(u64));
77241056 3030 memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2,
17fb4f29 3031 &dd->err_info_rcvport.packet_flit2, sizeof(u64));
77241056
MM
3032
3033 /* ExcessiverBufferOverrunInfo */
3034 reg = read_csr(dd, RCV_ERR_INFO);
3035 if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) {
4d114fdd
JJ
3036 /*
3037 * if the RcvExcessBufferOverrun bit is set, save SC of
3038 * first pkt that encountered an excess buffer overrun
3039 */
77241056
MM
3040 u8 tmp = (u8)reg;
3041
3042 tmp &= RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK;
3043 tmp <<= 2;
3044 rsp->excessive_buffer_overrun_ei.status_and_sc = tmp;
3045 /* set the status bit */
3046 rsp->excessive_buffer_overrun_ei.status_and_sc |= 0x80;
3047 }
3048
3049 rsp->port_xmit_constraint_ei.status =
3050 dd->err_info_xmit_constraint.status;
3051 rsp->port_xmit_constraint_ei.pkey =
3052 cpu_to_be16(dd->err_info_xmit_constraint.pkey);
3053 rsp->port_xmit_constraint_ei.slid =
3054 cpu_to_be32(dd->err_info_xmit_constraint.slid);
3055
3056 rsp->port_rcv_constraint_ei.status =
3057 dd->err_info_rcv_constraint.status;
3058 rsp->port_rcv_constraint_ei.pkey =
3059 cpu_to_be16(dd->err_info_rcv_constraint.pkey);
3060 rsp->port_rcv_constraint_ei.slid =
3061 cpu_to_be32(dd->err_info_rcv_constraint.slid);
3062
3063 /* UncorrectableErrorInfo */
3064 rsp->uncorrectable_ei.status_and_code = dd->err_info_uncorrectable;
3065
3066 /* FMConfigErrorInfo */
3067 rsp->fm_config_ei.status_and_code = dd->err_info_fmconfig;
3068
3069 if (resp_len)
3070 *resp_len += response_data_size;
3071
3072 return reply((struct ib_mad_hdr *)pmp);
3073}
3074
3075static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
17fb4f29
JJ
3076 struct ib_device *ibdev,
3077 u8 port, u32 *resp_len)
77241056
MM
3078{
3079 struct opa_clear_port_status *req =
3080 (struct opa_clear_port_status *)pmp->data;
3081 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3082 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3083 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3084 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
3085 u64 portn = be64_to_cpu(req->port_select_mask[3]);
3086 u32 counter_select = be32_to_cpu(req->counter_select_mask);
3087 u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
3088 unsigned long vl;
3089
3090 if ((nports != 1) || (portn != 1 << port)) {
3091 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3092 return reply((struct ib_mad_hdr *)pmp);
3093 }
3094 /*
3095 * only counters returned by pma_get_opa_portstatus() are
3096 * handled, so when pma_get_opa_portstatus() gets a fix,
3097 * the corresponding change should be made here as well.
3098 */
3099
3100 if (counter_select & CS_PORT_XMIT_DATA)
3101 write_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL, 0);
3102
3103 if (counter_select & CS_PORT_RCV_DATA)
3104 write_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL, 0);
3105
3106 if (counter_select & CS_PORT_XMIT_PKTS)
3107 write_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL, 0);
3108
3109 if (counter_select & CS_PORT_RCV_PKTS)
3110 write_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL, 0);
3111
3112 if (counter_select & CS_PORT_MCAST_XMIT_PKTS)
3113 write_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL, 0);
3114
3115 if (counter_select & CS_PORT_MCAST_RCV_PKTS)
3116 write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0);
3117
3118 if (counter_select & CS_PORT_XMIT_WAIT)
3119 write_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL, 0);
3120
3121 /* ignore cs_sw_portCongestion for HFIs */
3122
3123 if (counter_select & CS_PORT_RCV_FECN)
3124 write_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL, 0);
3125
3126 if (counter_select & CS_PORT_RCV_BECN)
3127 write_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL, 0);
3128
3129 /* ignore cs_port_xmit_time_cong for HFIs */
3130 /* ignore cs_port_xmit_wasted_bw for now */
3131 /* ignore cs_port_xmit_wait_data for now */
3132 if (counter_select & CS_PORT_RCV_BUBBLE)
3133 write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0);
3134
3135 /* Only applicable for switch */
4d114fdd
JJ
3136 /* if (counter_select & CS_PORT_MARK_FECN)
3137 * write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0);
3138 */
77241056
MM
3139
3140 if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS)
3141 write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0);
3142
3143 /* ignore cs_port_rcv_switch_relay_errors for HFIs */
3144 if (counter_select & CS_PORT_XMIT_DISCARDS)
3145 write_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL, 0);
3146
3147 if (counter_select & CS_PORT_XMIT_CONSTRAINT_ERRORS)
3148 write_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL, 0);
3149
3150 if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS)
3151 write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
3152
3210314a 3153 if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS)
77241056 3154 write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
77241056
MM
3155
3156 if (counter_select & CS_LINK_ERROR_RECOVERY) {
3157 write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
3158 write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
17fb4f29 3159 CNTR_INVALID_VL, 0);
77241056
MM
3160 }
3161
3162 if (counter_select & CS_PORT_RCV_ERRORS)
3163 write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
3164
3165 if (counter_select & CS_EXCESSIVE_BUFFER_OVERRUNS) {
3166 write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
3167 dd->rcv_ovfl_cnt = 0;
3168 }
3169
3170 if (counter_select & CS_FM_CONFIG_ERRORS)
3171 write_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL, 0);
3172
3173 if (counter_select & CS_LINK_DOWNED)
3174 write_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL, 0);
3175
3176 if (counter_select & CS_UNCORRECTABLE_ERRORS)
3177 write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
3178
3179 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
3180 8 * sizeof(vl_select_mask)) {
77241056
MM
3181 if (counter_select & CS_PORT_XMIT_DATA)
3182 write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
3183
3184 if (counter_select & CS_PORT_RCV_DATA)
3185 write_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl), 0);
3186
3187 if (counter_select & CS_PORT_XMIT_PKTS)
3188 write_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl), 0);
3189
3190 if (counter_select & CS_PORT_RCV_PKTS)
3191 write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0);
3192
3193 if (counter_select & CS_PORT_XMIT_WAIT)
3194 write_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl), 0);
3195
3196 /* sw_port_vl_congestion is 0 for HFIs */
3197 if (counter_select & CS_PORT_RCV_FECN)
3198 write_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl), 0);
3199
3200 if (counter_select & CS_PORT_RCV_BECN)
3201 write_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl), 0);
3202
3203 /* port_vl_xmit_time_cong is 0 for HFIs */
3204 /* port_vl_xmit_wasted_bw ??? */
3205 /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? */
3206 if (counter_select & CS_PORT_RCV_BUBBLE)
3207 write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0);
3208
4d114fdd
JJ
3209 /* if (counter_select & CS_PORT_MARK_FECN)
3210 * write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
3211 */
583eb8b8
JP
3212 if (counter_select & C_SW_XMIT_DSCD_VL)
3213 write_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
3214 idx_from_vl(vl), 0);
77241056
MM
3215 }
3216
3217 if (resp_len)
3218 *resp_len += sizeof(*req);
3219
3220 return reply((struct ib_mad_hdr *)pmp);
3221}
3222
3223static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
17fb4f29
JJ
3224 struct ib_device *ibdev,
3225 u8 port, u32 *resp_len)
77241056
MM
3226{
3227 struct _port_ei *rsp;
3228 struct opa_port_error_info_msg *req;
3229 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3230 u64 port_mask;
3231 u32 num_ports;
eb2e557c 3232 u8 port_num;
77241056
MM
3233 u8 num_pslm;
3234 u32 error_info_select;
3235
3236 req = (struct opa_port_error_info_msg *)pmp->data;
acc17d67 3237 rsp = &req->port[0];
77241056
MM
3238
3239 num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
3240 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
3241
3242 memset(rsp, 0, sizeof(*rsp));
3243
3244 if (num_ports != 1 || num_ports != num_pslm) {
3245 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3246 return reply((struct ib_mad_hdr *)pmp);
3247 }
3248
3249 /*
3250 * The bit set in the mask needs to be consistent with the port
3251 * the request came in on.
3252 */
3253 port_mask = be64_to_cpu(req->port_select_mask[3]);
3254 port_num = find_first_bit((unsigned long *)&port_mask,
3255 sizeof(port_mask));
3256
eb2e557c 3257 if (port_num != port) {
77241056
MM
3258 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3259 return reply((struct ib_mad_hdr *)pmp);
3260 }
3261
3262 error_info_select = be32_to_cpu(req->error_info_select_mask);
3263
3264 /* PortRcvErrorInfo */
3265 if (error_info_select & ES_PORT_RCV_ERROR_INFO)
3266 /* turn off status bit */
3267 dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
3268
3269 /* ExcessiverBufferOverrunInfo */
3270 if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO)
4d114fdd
JJ
3271 /*
3272 * status bit is essentially kept in the h/w - bit 5 of
3273 * RCV_ERR_INFO
3274 */
77241056
MM
3275 write_csr(dd, RCV_ERR_INFO,
3276 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
3277
3278 if (error_info_select & ES_PORT_XMIT_CONSTRAINT_ERROR_INFO)
3279 dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
3280
3281 if (error_info_select & ES_PORT_RCV_CONSTRAINT_ERROR_INFO)
3282 dd->err_info_rcv_constraint.status &= ~OPA_EI_STATUS_SMASK;
3283
3284 /* UncorrectableErrorInfo */
3285 if (error_info_select & ES_UNCORRECTABLE_ERROR_INFO)
3286 /* turn off status bit */
3287 dd->err_info_uncorrectable &= ~OPA_EI_STATUS_SMASK;
3288
3289 /* FMConfigErrorInfo */
3290 if (error_info_select & ES_FM_CONFIG_ERROR_INFO)
3291 /* turn off status bit */
3292 dd->err_info_fmconfig &= ~OPA_EI_STATUS_SMASK;
3293
3294 if (resp_len)
3295 *resp_len += sizeof(*req);
3296
3297 return reply((struct ib_mad_hdr *)pmp);
3298}
3299
3300struct opa_congestion_info_attr {
3301 __be16 congestion_info;
3302 u8 control_table_cap; /* Multiple of 64 entry unit CCTs */
3303 u8 congestion_log_length;
3304} __packed;
3305
3306static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
3307 struct ib_device *ibdev, u8 port,
3308 u32 *resp_len)
3309{
3310 struct opa_congestion_info_attr *p =
3311 (struct opa_congestion_info_attr *)data;
3312 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3313 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3314
3315 p->congestion_info = 0;
3316 p->control_table_cap = ppd->cc_max_table_entries;
3317 p->congestion_log_length = OPA_CONG_LOG_ELEMS;
3318
3319 if (resp_len)
3320 *resp_len += sizeof(*p);
3321
3322 return reply((struct ib_mad_hdr *)smp);
3323}
3324
3325static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
17fb4f29
JJ
3326 u8 *data, struct ib_device *ibdev,
3327 u8 port, u32 *resp_len)
77241056
MM
3328{
3329 int i;
3330 struct opa_congestion_setting_attr *p =
50e5dcbe 3331 (struct opa_congestion_setting_attr *)data;
77241056
MM
3332 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3333 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3334 struct opa_congestion_setting_entry_shadow *entries;
3335 struct cc_state *cc_state;
3336
3337 rcu_read_lock();
3338
3339 cc_state = get_cc_state(ppd);
3340
d125a6c6 3341 if (!cc_state) {
77241056
MM
3342 rcu_read_unlock();
3343 return reply((struct ib_mad_hdr *)smp);
3344 }
3345
3346 entries = cc_state->cong_setting.entries;
3347 p->port_control = cpu_to_be16(cc_state->cong_setting.port_control);
3348 p->control_map = cpu_to_be32(cc_state->cong_setting.control_map);
3349 for (i = 0; i < OPA_MAX_SLS; i++) {
3350 p->entries[i].ccti_increase = entries[i].ccti_increase;
3351 p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
3352 p->entries[i].trigger_threshold =
3353 entries[i].trigger_threshold;
3354 p->entries[i].ccti_min = entries[i].ccti_min;
3355 }
3356
3357 rcu_read_unlock();
3358
3359 if (resp_len)
3360 *resp_len += sizeof(*p);
3361
3362 return reply((struct ib_mad_hdr *)smp);
3363}
3364
f036780b
DL
3365/*
3366 * Apply congestion control information stored in the ppd to the
3367 * active structure.
3368 */
3369static void apply_cc_state(struct hfi1_pportdata *ppd)
3370{
3371 struct cc_state *old_cc_state, *new_cc_state;
3372
3373 new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
3374 if (!new_cc_state)
3375 return;
3376
3377 /*
3378 * Hold the lock for updating *and* to prevent ppd information
3379 * from changing during the update.
3380 */
3381 spin_lock(&ppd->cc_state_lock);
3382
8adf71fa 3383 old_cc_state = get_cc_state_protected(ppd);
f036780b
DL
3384 if (!old_cc_state) {
3385 /* never active, or shutting down */
3386 spin_unlock(&ppd->cc_state_lock);
3387 kfree(new_cc_state);
3388 return;
3389 }
3390
3391 *new_cc_state = *old_cc_state;
3392
3393 new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
3394 memcpy(new_cc_state->cct.entries, ppd->ccti_entries,
3395 ppd->total_cct_entry * sizeof(struct ib_cc_table_entry));
3396
3397 new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
3398 new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
3399 memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
3400 OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
3401
3402 rcu_assign_pointer(ppd->cc_state, new_cc_state);
3403
3404 spin_unlock(&ppd->cc_state_lock);
3405
476d95bd 3406 kfree_rcu(old_cc_state, rcu);
f036780b
DL
3407}
3408
77241056
MM
3409static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
3410 struct ib_device *ibdev, u8 port,
3411 u32 *resp_len)
3412{
3413 struct opa_congestion_setting_attr *p =
50e5dcbe 3414 (struct opa_congestion_setting_attr *)data;
77241056
MM
3415 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3416 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3417 struct opa_congestion_setting_entry_shadow *entries;
3418 int i;
3419
f036780b
DL
3420 /*
3421 * Save details from packet into the ppd. Hold the cc_state_lock so
3422 * our information is consistent with anyone trying to apply the state.
3423 */
3424 spin_lock(&ppd->cc_state_lock);
77241056
MM
3425 ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
3426
3427 entries = ppd->congestion_entries;
3428 for (i = 0; i < OPA_MAX_SLS; i++) {
3429 entries[i].ccti_increase = p->entries[i].ccti_increase;
3430 entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer);
3431 entries[i].trigger_threshold =
3432 p->entries[i].trigger_threshold;
3433 entries[i].ccti_min = p->entries[i].ccti_min;
3434 }
f036780b
DL
3435 spin_unlock(&ppd->cc_state_lock);
3436
3437 /* now apply the information */
3438 apply_cc_state(ppd);
77241056
MM
3439
3440 return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
3441 resp_len);
3442}
3443
3444static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
3445 u8 *data, struct ib_device *ibdev,
3446 u8 port, u32 *resp_len)
3447{
3448 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3449 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3450 struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data;
3451 s64 ts;
3452 int i;
3453
3454 if (am != 0) {
3455 smp->status |= IB_SMP_INVALID_FIELD;
3456 return reply((struct ib_mad_hdr *)smp);
3457 }
3458
b77d713a 3459 spin_lock_irq(&ppd->cc_log_lock);
77241056
MM
3460
3461 cong_log->log_type = OPA_CC_LOG_TYPE_HFI;
3462 cong_log->congestion_flags = 0;
3463 cong_log->threshold_event_counter =
3464 cpu_to_be16(ppd->threshold_event_counter);
3465 memcpy(cong_log->threshold_cong_event_map,
3466 ppd->threshold_cong_event_map,
3467 sizeof(cong_log->threshold_cong_event_map));
3468 /* keep timestamp in units of 1.024 usec */
3469 ts = ktime_to_ns(ktime_get()) / 1024;
3470 cong_log->current_time_stamp = cpu_to_be32(ts);
3471 for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) {
3472 struct opa_hfi1_cong_log_event_internal *cce =
3473 &ppd->cc_events[ppd->cc_mad_idx++];
3474 if (ppd->cc_mad_idx == OPA_CONG_LOG_ELEMS)
3475 ppd->cc_mad_idx = 0;
3476 /*
3477 * Entries which are older than twice the time
3478 * required to wrap the counter are supposed to
3479 * be zeroed (CA10-49 IBTA, release 1.2.1, V1).
3480 */
3481 if ((u64)(ts - cce->timestamp) > (2 * UINT_MAX))
3482 continue;
3483 memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
3484 memcpy(cong_log->events[i].remote_qp_number_cn_entry,
17fb4f29 3485 &cce->rqpn, 3);
77241056
MM
3486 cong_log->events[i].sl_svc_type_cn_entry =
3487 ((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7);
3488 cong_log->events[i].remote_lid_cn_entry =
3489 cpu_to_be32(cce->rlid);
3490 cong_log->events[i].timestamp_cn_entry =
3491 cpu_to_be32(cce->timestamp);
3492 }
3493
3494 /*
3495 * Reset threshold_cong_event_map, and threshold_event_counter
3496 * to 0 when log is read.
3497 */
3498 memset(ppd->threshold_cong_event_map, 0x0,
3499 sizeof(ppd->threshold_cong_event_map));
3500 ppd->threshold_event_counter = 0;
3501
b77d713a 3502 spin_unlock_irq(&ppd->cc_log_lock);
77241056
MM
3503
3504 if (resp_len)
3505 *resp_len += sizeof(struct opa_hfi1_cong_log);
3506
3507 return reply((struct ib_mad_hdr *)smp);
3508}
3509
3510static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3511 struct ib_device *ibdev, u8 port,
3512 u32 *resp_len)
3513{
3514 struct ib_cc_table_attr *cc_table_attr =
50e5dcbe 3515 (struct ib_cc_table_attr *)data;
77241056
MM
3516 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3517 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3518 u32 start_block = OPA_AM_START_BLK(am);
3519 u32 n_blocks = OPA_AM_NBLK(am);
3520 struct ib_cc_table_entry_shadow *entries;
3521 int i, j;
3522 u32 sentry, eentry;
3523 struct cc_state *cc_state;
3524
3525 /* sanity check n_blocks, start_block */
3526 if (n_blocks == 0 ||
3527 start_block + n_blocks > ppd->cc_max_table_entries) {
3528 smp->status |= IB_SMP_INVALID_FIELD;
3529 return reply((struct ib_mad_hdr *)smp);
3530 }
3531
3532 rcu_read_lock();
3533
3534 cc_state = get_cc_state(ppd);
3535
d125a6c6 3536 if (!cc_state) {
77241056
MM
3537 rcu_read_unlock();
3538 return reply((struct ib_mad_hdr *)smp);
3539 }
3540
3541 sentry = start_block * IB_CCT_ENTRIES;
3542 eentry = sentry + (IB_CCT_ENTRIES * n_blocks);
3543
3544 cc_table_attr->ccti_limit = cpu_to_be16(cc_state->cct.ccti_limit);
3545
3546 entries = cc_state->cct.entries;
3547
3548 /* return n_blocks, though the last block may not be full */
3549 for (j = 0, i = sentry; i < eentry; j++, i++)
3550 cc_table_attr->ccti_entries[j].entry =
3551 cpu_to_be16(entries[i].entry);
3552
3553 rcu_read_unlock();
3554
3555 if (resp_len)
8638b77f 3556 *resp_len += sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
77241056
MM
3557
3558 return reply((struct ib_mad_hdr *)smp);
3559}
3560
77241056
MM
3561static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3562 struct ib_device *ibdev, u8 port,
3563 u32 *resp_len)
3564{
50e5dcbe 3565 struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data;
77241056
MM
3566 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3567 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3568 u32 start_block = OPA_AM_START_BLK(am);
3569 u32 n_blocks = OPA_AM_NBLK(am);
3570 struct ib_cc_table_entry_shadow *entries;
3571 int i, j;
3572 u32 sentry, eentry;
3573 u16 ccti_limit;
77241056
MM
3574
3575 /* sanity check n_blocks, start_block */
3576 if (n_blocks == 0 ||
3577 start_block + n_blocks > ppd->cc_max_table_entries) {
3578 smp->status |= IB_SMP_INVALID_FIELD;
3579 return reply((struct ib_mad_hdr *)smp);
3580 }
3581
3582 sentry = start_block * IB_CCT_ENTRIES;
3583 eentry = sentry + ((n_blocks - 1) * IB_CCT_ENTRIES) +
3584 (be16_to_cpu(p->ccti_limit)) % IB_CCT_ENTRIES + 1;
3585
3586 /* sanity check ccti_limit */
3587 ccti_limit = be16_to_cpu(p->ccti_limit);
3588 if (ccti_limit + 1 > eentry) {
3589 smp->status |= IB_SMP_INVALID_FIELD;
3590 return reply((struct ib_mad_hdr *)smp);
3591 }
3592
f036780b
DL
3593 /*
3594 * Save details from packet into the ppd. Hold the cc_state_lock so
3595 * our information is consistent with anyone trying to apply the state.
3596 */
77241056 3597 spin_lock(&ppd->cc_state_lock);
77241056 3598 ppd->total_cct_entry = ccti_limit + 1;
f036780b 3599 entries = ppd->ccti_entries;
77241056
MM
3600 for (j = 0, i = sentry; i < eentry; j++, i++)
3601 entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
77241056
MM
3602 spin_unlock(&ppd->cc_state_lock);
3603
f036780b
DL
3604 /* now apply the information */
3605 apply_cc_state(ppd);
77241056 3606
77241056
MM
3607 return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len);
3608}
3609
3610struct opa_led_info {
3611 __be32 rsvd_led_mask;
3612 __be32 rsvd;
3613};
3614
3615#define OPA_LED_SHIFT 31
349ac71f 3616#define OPA_LED_MASK BIT(OPA_LED_SHIFT)
77241056
MM
3617
3618static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
3619 struct ib_device *ibdev, u8 port,
3620 u32 *resp_len)
3621{
3622 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
409b1462 3623 struct hfi1_pportdata *ppd = dd->pport;
50e5dcbe 3624 struct opa_led_info *p = (struct opa_led_info *)data;
77241056 3625 u32 nport = OPA_AM_NPORT(am);
409b1462 3626 u32 is_beaconing_active;
77241056 3627
801cfd6d 3628 if (nport != 1) {
77241056
MM
3629 smp->status |= IB_SMP_INVALID_FIELD;
3630 return reply((struct ib_mad_hdr *)smp);
3631 }
3632
409b1462 3633 /*
2243472e
EH
3634 * This pairs with the memory barrier in hfi1_start_led_override to
3635 * ensure that we read the correct state of LED beaconing represented
3636 * by led_override_timer_active
409b1462 3637 */
2243472e 3638 smp_rmb();
409b1462
EH
3639 is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
3640 p->rsvd_led_mask = cpu_to_be32(is_beaconing_active << OPA_LED_SHIFT);
77241056
MM
3641
3642 if (resp_len)
3643 *resp_len += sizeof(struct opa_led_info);
3644
3645 return reply((struct ib_mad_hdr *)smp);
3646}
3647
3648static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
3649 struct ib_device *ibdev, u8 port,
3650 u32 *resp_len)
3651{
3652 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
50e5dcbe 3653 struct opa_led_info *p = (struct opa_led_info *)data;
77241056
MM
3654 u32 nport = OPA_AM_NPORT(am);
3655 int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
3656
801cfd6d 3657 if (nport != 1) {
77241056
MM
3658 smp->status |= IB_SMP_INVALID_FIELD;
3659 return reply((struct ib_mad_hdr *)smp);
3660 }
3661
91ab4ed3 3662 if (on)
2243472e 3663 hfi1_start_led_override(dd->pport, 2000, 1500);
91ab4ed3 3664 else
2243472e 3665 shutdown_led_override(dd->pport);
77241056
MM
3666
3667 return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len);
3668}
3669
3670static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
3671 u8 *data, struct ib_device *ibdev, u8 port,
3672 u32 *resp_len)
3673{
3674 int ret;
3675 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3676
3677 switch (attr_id) {
3678 case IB_SMP_ATTR_NODE_DESC:
3679 ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port,
3680 resp_len);
3681 break;
3682 case IB_SMP_ATTR_NODE_INFO:
3683 ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port,
3684 resp_len);
3685 break;
3686 case IB_SMP_ATTR_PORT_INFO:
3687 ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port,
3688 resp_len);
3689 break;
3690 case IB_SMP_ATTR_PKEY_TABLE:
3691 ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port,
3692 resp_len);
3693 break;
3694 case OPA_ATTRIB_ID_SL_TO_SC_MAP:
3695 ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port,
3696 resp_len);
3697 break;
3698 case OPA_ATTRIB_ID_SC_TO_SL_MAP:
3699 ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port,
3700 resp_len);
3701 break;
3702 case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
3703 ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port,
3704 resp_len);
3705 break;
3706 case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
3707 ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
17fb4f29 3708 resp_len);
77241056
MM
3709 break;
3710 case OPA_ATTRIB_ID_PORT_STATE_INFO:
3711 ret = __subn_get_opa_psi(smp, am, data, ibdev, port,
3712 resp_len);
3713 break;
3714 case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
3715 ret = __subn_get_opa_bct(smp, am, data, ibdev, port,
3716 resp_len);
3717 break;
3718 case OPA_ATTRIB_ID_CABLE_INFO:
3719 ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port,
3720 resp_len);
3721 break;
3722 case IB_SMP_ATTR_VL_ARB_TABLE:
3723 ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port,
3724 resp_len);
3725 break;
3726 case OPA_ATTRIB_ID_CONGESTION_INFO:
3727 ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port,
3728 resp_len);
3729 break;
3730 case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
3731 ret = __subn_get_opa_cong_setting(smp, am, data, ibdev,
3732 port, resp_len);
3733 break;
3734 case OPA_ATTRIB_ID_HFI_CONGESTION_LOG:
3735 ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev,
3736 port, resp_len);
3737 break;
3738 case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
3739 ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port,
3740 resp_len);
3741 break;
3742 case IB_SMP_ATTR_LED_INFO:
3743 ret = __subn_get_opa_led_info(smp, am, data, ibdev, port,
3744 resp_len);
3745 break;
3746 case IB_SMP_ATTR_SM_INFO:
4eb06882 3747 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
77241056 3748 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
4eb06882 3749 if (ibp->rvp.port_cap_flags & IB_PORT_SM)
77241056
MM
3750 return IB_MAD_RESULT_SUCCESS;
3751 /* FALLTHROUGH */
3752 default:
3753 smp->status |= IB_SMP_UNSUP_METH_ATTR;
3754 ret = reply((struct ib_mad_hdr *)smp);
3755 break;
3756 }
3757 return ret;
3758}
3759
3760static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
3761 u8 *data, struct ib_device *ibdev, u8 port,
3762 u32 *resp_len)
3763{
3764 int ret;
3765 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3766
3767 switch (attr_id) {
3768 case IB_SMP_ATTR_PORT_INFO:
3769 ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port,
3770 resp_len);
3771 break;
3772 case IB_SMP_ATTR_PKEY_TABLE:
3773 ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port,
3774 resp_len);
3775 break;
3776 case OPA_ATTRIB_ID_SL_TO_SC_MAP:
3777 ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port,
3778 resp_len);
3779 break;
3780 case OPA_ATTRIB_ID_SC_TO_SL_MAP:
3781 ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port,
3782 resp_len);
3783 break;
3784 case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
3785 ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port,
3786 resp_len);
3787 break;
3788 case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
3789 ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port,
17fb4f29 3790 resp_len);
77241056
MM
3791 break;
3792 case OPA_ATTRIB_ID_PORT_STATE_INFO:
3793 ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
3794 resp_len);
3795 break;
3796 case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
3797 ret = __subn_set_opa_bct(smp, am, data, ibdev, port,
3798 resp_len);
3799 break;
3800 case IB_SMP_ATTR_VL_ARB_TABLE:
3801 ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port,
3802 resp_len);
3803 break;
3804 case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
3805 ret = __subn_set_opa_cong_setting(smp, am, data, ibdev,
3806 port, resp_len);
3807 break;
3808 case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
3809 ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port,
3810 resp_len);
3811 break;
3812 case IB_SMP_ATTR_LED_INFO:
3813 ret = __subn_set_opa_led_info(smp, am, data, ibdev, port,
3814 resp_len);
3815 break;
3816 case IB_SMP_ATTR_SM_INFO:
4eb06882 3817 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
77241056 3818 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
4eb06882 3819 if (ibp->rvp.port_cap_flags & IB_PORT_SM)
77241056
MM
3820 return IB_MAD_RESULT_SUCCESS;
3821 /* FALLTHROUGH */
3822 default:
3823 smp->status |= IB_SMP_UNSUP_METH_ATTR;
3824 ret = reply((struct ib_mad_hdr *)smp);
3825 break;
3826 }
3827 return ret;
3828}
3829
3830static inline void set_aggr_error(struct opa_aggregate *ag)
3831{
3832 ag->err_reqlength |= cpu_to_be16(0x8000);
3833}
3834
3835static int subn_get_opa_aggregate(struct opa_smp *smp,
3836 struct ib_device *ibdev, u8 port,
3837 u32 *resp_len)
3838{
3839 int i;
3840 u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
3841 u8 *next_smp = opa_get_smp_data(smp);
3842
3843 if (num_attr < 1 || num_attr > 117) {
3844 smp->status |= IB_SMP_INVALID_FIELD;
3845 return reply((struct ib_mad_hdr *)smp);
3846 }
3847
3848 for (i = 0; i < num_attr; i++) {
3849 struct opa_aggregate *agg;
3850 size_t agg_data_len;
3851 size_t agg_size;
3852 u32 am;
3853
3854 agg = (struct opa_aggregate *)next_smp;
3855 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
3856 agg_size = sizeof(*agg) + agg_data_len;
3857 am = be32_to_cpu(agg->attr_mod);
3858
3859 *resp_len += agg_size;
3860
3861 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
3862 smp->status |= IB_SMP_INVALID_FIELD;
3863 return reply((struct ib_mad_hdr *)smp);
3864 }
3865
3866 /* zero the payload for this segment */
3867 memset(next_smp + sizeof(*agg), 0, agg_data_len);
3868
50e5dcbe 3869 (void)subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
77241056
MM
3870 ibdev, port, NULL);
3871 if (smp->status & ~IB_SMP_DIRECTION) {
3872 set_aggr_error(agg);
3873 return reply((struct ib_mad_hdr *)smp);
3874 }
3875 next_smp += agg_size;
77241056
MM
3876 }
3877
3878 return reply((struct ib_mad_hdr *)smp);
3879}
3880
3881static int subn_set_opa_aggregate(struct opa_smp *smp,
3882 struct ib_device *ibdev, u8 port,
3883 u32 *resp_len)
3884{
3885 int i;
3886 u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
3887 u8 *next_smp = opa_get_smp_data(smp);
3888
3889 if (num_attr < 1 || num_attr > 117) {
3890 smp->status |= IB_SMP_INVALID_FIELD;
3891 return reply((struct ib_mad_hdr *)smp);
3892 }
3893
3894 for (i = 0; i < num_attr; i++) {
3895 struct opa_aggregate *agg;
3896 size_t agg_data_len;
3897 size_t agg_size;
3898 u32 am;
3899
3900 agg = (struct opa_aggregate *)next_smp;
3901 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
3902 agg_size = sizeof(*agg) + agg_data_len;
3903 am = be32_to_cpu(agg->attr_mod);
3904
3905 *resp_len += agg_size;
3906
3907 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
3908 smp->status |= IB_SMP_INVALID_FIELD;
3909 return reply((struct ib_mad_hdr *)smp);
3910 }
3911
50e5dcbe 3912 (void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
77241056
MM
3913 ibdev, port, NULL);
3914 if (smp->status & ~IB_SMP_DIRECTION) {
3915 set_aggr_error(agg);
3916 return reply((struct ib_mad_hdr *)smp);
3917 }
3918 next_smp += agg_size;
77241056
MM
3919 }
3920
3921 return reply((struct ib_mad_hdr *)smp);
3922}
3923
3924/*
3925 * OPAv1 specifies that, on the transition to link up, these counters
3926 * are cleared:
3927 * PortRcvErrors [*]
3928 * LinkErrorRecovery
3929 * LocalLinkIntegrityErrors
3930 * ExcessiveBufferOverruns [*]
3931 *
3932 * [*] Error info associated with these counters is retained, but the
3933 * error info status is reset to 0.
3934 */
3935void clear_linkup_counters(struct hfi1_devdata *dd)
3936{
3937 /* PortRcvErrors */
3938 write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
3939 dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
3940 /* LinkErrorRecovery */
3941 write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
3942 write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
3943 /* LocalLinkIntegrityErrors */
77241056
MM
3944 write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
3945 /* ExcessiveBufferOverruns */
3946 write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
3947 dd->rcv_ovfl_cnt = 0;
3948 dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
3949}
3950
3951/*
3952 * is_local_mad() returns 1 if 'mad' is sent from, and destined to the
3953 * local node, 0 otherwise.
3954 */
3955static int is_local_mad(struct hfi1_ibport *ibp, const struct opa_mad *mad,
3956 const struct ib_wc *in_wc)
3957{
3958 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3959 const struct opa_smp *smp = (const struct opa_smp *)mad;
3960
3961 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
3962 return (smp->hop_cnt == 0 &&
3963 smp->route.dr.dr_slid == OPA_LID_PERMISSIVE &&
3964 smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE);
3965 }
3966
3967 return (in_wc->slid == ppd->lid);
3968}
3969
3970/*
3971 * opa_local_smp_check() should only be called on MADs for which
3972 * is_local_mad() returns true. It applies the SMP checks that are
3973 * specific to SMPs which are sent from, and destined to this node.
3974 * opa_local_smp_check() returns 0 if the SMP passes its checks, 1
3975 * otherwise.
3976 *
3977 * SMPs which arrive from other nodes are instead checked by
3978 * opa_smp_check().
3979 */
3980static int opa_local_smp_check(struct hfi1_ibport *ibp,
3981 const struct ib_wc *in_wc)
3982{
3983 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3984 u16 slid = in_wc->slid;
3985 u16 pkey;
3986
3987 if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
3988 return 1;
3989
3990 pkey = ppd->pkeys[in_wc->pkey_index];
3991 /*
3992 * We need to do the "node-local" checks specified in OPAv1,
3993 * rev 0.90, section 9.10.26, which are:
3994 * - pkey is 0x7fff, or 0xffff
3995 * - Source QPN == 0 || Destination QPN == 0
3996 * - the MAD header's management class is either
3997 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE or
3998 * IB_MGMT_CLASS_SUBN_LID_ROUTED
3999 * - SLID != 0
4000 *
4001 * However, we know (and so don't need to check again) that,
4002 * for local SMPs, the MAD stack passes MADs with:
4003 * - Source QPN of 0
4004 * - MAD mgmt_class is IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
4005 * - SLID is either: OPA_LID_PERMISSIVE (0xFFFFFFFF), or
4006 * our own port's lid
4007 *
4008 */
4009 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
4010 return 0;
4011 ingress_pkey_table_fail(ppd, pkey, slid);
4012 return 1;
4013}
4014
4015static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
4016 u8 port, const struct opa_mad *in_mad,
4017 struct opa_mad *out_mad,
4018 u32 *resp_len)
4019{
4020 struct opa_smp *smp = (struct opa_smp *)out_mad;
4021 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4022 u8 *data;
4023 u32 am;
4024 __be16 attr_id;
4025 int ret;
4026
4027 *out_mad = *in_mad;
4028 data = opa_get_smp_data(smp);
4029
4030 am = be32_to_cpu(smp->attr_mod);
4031 attr_id = smp->attr_id;
4032 if (smp->class_version != OPA_SMI_CLASS_VERSION) {
4033 smp->status |= IB_SMP_UNSUP_VERSION;
4034 ret = reply((struct ib_mad_hdr *)smp);
5950e9b1 4035 return ret;
77241056
MM
4036 }
4037 ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey,
4038 smp->route.dr.dr_slid, smp->route.dr.return_path,
4039 smp->hop_cnt);
4040 if (ret) {
4041 u32 port_num = be32_to_cpu(smp->attr_mod);
4042
4043 /*
4044 * If this is a get/set portinfo, we already check the
4045 * M_Key if the MAD is for another port and the M_Key
4046 * is OK on the receiving port. This check is needed
4047 * to increment the error counters when the M_Key
4048 * fails to match on *both* ports.
4049 */
4050 if (attr_id == IB_SMP_ATTR_PORT_INFO &&
4051 (smp->method == IB_MGMT_METHOD_GET ||
4052 smp->method == IB_MGMT_METHOD_SET) &&
4053 port_num && port_num <= ibdev->phys_port_cnt &&
4054 port != port_num)
50e5dcbe 4055 (void)check_mkey(to_iport(ibdev, port_num),
77241056
MM
4056 (struct ib_mad_hdr *)smp, 0,
4057 smp->mkey, smp->route.dr.dr_slid,
4058 smp->route.dr.return_path,
4059 smp->hop_cnt);
4060 ret = IB_MAD_RESULT_FAILURE;
5950e9b1 4061 return ret;
77241056
MM
4062 }
4063
4064 *resp_len = opa_get_smp_header_size(smp);
4065
4066 switch (smp->method) {
4067 case IB_MGMT_METHOD_GET:
4068 switch (attr_id) {
4069 default:
4070 clear_opa_smp_data(smp);
4071 ret = subn_get_opa_sma(attr_id, smp, am, data,
4072 ibdev, port, resp_len);
5950e9b1 4073 break;
77241056
MM
4074 case OPA_ATTRIB_ID_AGGREGATE:
4075 ret = subn_get_opa_aggregate(smp, ibdev, port,
4076 resp_len);
5950e9b1 4077 break;
77241056 4078 }
5950e9b1 4079 break;
77241056
MM
4080 case IB_MGMT_METHOD_SET:
4081 switch (attr_id) {
4082 default:
4083 ret = subn_set_opa_sma(attr_id, smp, am, data,
4084 ibdev, port, resp_len);
5950e9b1 4085 break;
77241056
MM
4086 case OPA_ATTRIB_ID_AGGREGATE:
4087 ret = subn_set_opa_aggregate(smp, ibdev, port,
4088 resp_len);
5950e9b1 4089 break;
77241056 4090 }
5950e9b1 4091 break;
77241056
MM
4092 case IB_MGMT_METHOD_TRAP:
4093 case IB_MGMT_METHOD_REPORT:
4094 case IB_MGMT_METHOD_REPORT_RESP:
4095 case IB_MGMT_METHOD_GET_RESP:
4096 /*
4097 * The ib_mad module will call us to process responses
4098 * before checking for other consumers.
4099 * Just tell the caller to process it normally.
4100 */
4101 ret = IB_MAD_RESULT_SUCCESS;
5950e9b1 4102 break;
77241056
MM
4103 default:
4104 smp->status |= IB_SMP_UNSUP_METHOD;
4105 ret = reply((struct ib_mad_hdr *)smp);
5950e9b1 4106 break;
77241056
MM
4107 }
4108
77241056
MM
4109 return ret;
4110}
4111
4112static int process_subn(struct ib_device *ibdev, int mad_flags,
4113 u8 port, const struct ib_mad *in_mad,
4114 struct ib_mad *out_mad)
4115{
4116 struct ib_smp *smp = (struct ib_smp *)out_mad;
4117 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4118 int ret;
4119
4120 *out_mad = *in_mad;
4121 if (smp->class_version != 1) {
4122 smp->status |= IB_SMP_UNSUP_VERSION;
4123 ret = reply((struct ib_mad_hdr *)smp);
5950e9b1 4124 return ret;
77241056
MM
4125 }
4126
4127 ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags,
4128 smp->mkey, (__force __be32)smp->dr_slid,
4129 smp->return_path, smp->hop_cnt);
4130 if (ret) {
4131 u32 port_num = be32_to_cpu(smp->attr_mod);
4132
4133 /*
4134 * If this is a get/set portinfo, we already check the
4135 * M_Key if the MAD is for another port and the M_Key
4136 * is OK on the receiving port. This check is needed
4137 * to increment the error counters when the M_Key
4138 * fails to match on *both* ports.
4139 */
4140 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
4141 (smp->method == IB_MGMT_METHOD_GET ||
4142 smp->method == IB_MGMT_METHOD_SET) &&
4143 port_num && port_num <= ibdev->phys_port_cnt &&
4144 port != port_num)
50e5dcbe 4145 (void)check_mkey(to_iport(ibdev, port_num),
17fb4f29
JJ
4146 (struct ib_mad_hdr *)smp, 0,
4147 smp->mkey,
4148 (__force __be32)smp->dr_slid,
4149 smp->return_path, smp->hop_cnt);
77241056 4150 ret = IB_MAD_RESULT_FAILURE;
5950e9b1 4151 return ret;
77241056
MM
4152 }
4153
4154 switch (smp->method) {
4155 case IB_MGMT_METHOD_GET:
4156 switch (smp->attr_id) {
4157 case IB_SMP_ATTR_NODE_INFO:
4158 ret = subn_get_nodeinfo(smp, ibdev, port);
5950e9b1 4159 break;
77241056
MM
4160 default:
4161 smp->status |= IB_SMP_UNSUP_METH_ATTR;
4162 ret = reply((struct ib_mad_hdr *)smp);
5950e9b1 4163 break;
77241056 4164 }
5950e9b1 4165 break;
77241056
MM
4166 }
4167
77241056
MM
4168 return ret;
4169}
4170
b8d114eb
SS
4171static int process_perf(struct ib_device *ibdev, u8 port,
4172 const struct ib_mad *in_mad,
4173 struct ib_mad *out_mad)
4174{
4175 struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
4176 struct ib_class_port_info *cpi = (struct ib_class_port_info *)
4177 &pmp->data;
4178 int ret = IB_MAD_RESULT_FAILURE;
4179
4180 *out_mad = *in_mad;
4181 if (pmp->mad_hdr.class_version != 1) {
4182 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
4183 ret = reply((struct ib_mad_hdr *)pmp);
4184 return ret;
4185 }
4186
4187 switch (pmp->mad_hdr.method) {
4188 case IB_MGMT_METHOD_GET:
4189 switch (pmp->mad_hdr.attr_id) {
4190 case IB_PMA_PORT_COUNTERS:
4191 ret = pma_get_ib_portcounters(pmp, ibdev, port);
4192 break;
4193 case IB_PMA_PORT_COUNTERS_EXT:
4194 ret = pma_get_ib_portcounters_ext(pmp, ibdev, port);
4195 break;
4196 case IB_PMA_CLASS_PORT_INFO:
4197 cpi->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
4198 ret = reply((struct ib_mad_hdr *)pmp);
4199 break;
4200 default:
4201 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4202 ret = reply((struct ib_mad_hdr *)pmp);
4203 break;
4204 }
4205 break;
4206
4207 case IB_MGMT_METHOD_SET:
4208 if (pmp->mad_hdr.attr_id) {
4209 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4210 ret = reply((struct ib_mad_hdr *)pmp);
4211 }
4212 break;
4213
4214 case IB_MGMT_METHOD_TRAP:
4215 case IB_MGMT_METHOD_GET_RESP:
4216 /*
4217 * The ib_mad module will call us to process responses
4218 * before checking for other consumers.
4219 * Just tell the caller to process it normally.
4220 */
4221 ret = IB_MAD_RESULT_SUCCESS;
4222 break;
4223
4224 default:
4225 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
4226 ret = reply((struct ib_mad_hdr *)pmp);
4227 break;
77241056
MM
4228 }
4229
77241056
MM
4230 return ret;
4231}
4232
4233static int process_perf_opa(struct ib_device *ibdev, u8 port,
4234 const struct opa_mad *in_mad,
4235 struct opa_mad *out_mad, u32 *resp_len)
4236{
4237 struct opa_pma_mad *pmp = (struct opa_pma_mad *)out_mad;
4238 int ret;
4239
4240 *out_mad = *in_mad;
4241
4242 if (pmp->mad_hdr.class_version != OPA_SMI_CLASS_VERSION) {
4243 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
4244 return reply((struct ib_mad_hdr *)pmp);
4245 }
4246
4247 *resp_len = sizeof(pmp->mad_hdr);
4248
4249 switch (pmp->mad_hdr.method) {
4250 case IB_MGMT_METHOD_GET:
4251 switch (pmp->mad_hdr.attr_id) {
4252 case IB_PMA_CLASS_PORT_INFO:
4253 ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len);
5950e9b1 4254 break;
77241056
MM
4255 case OPA_PM_ATTRIB_ID_PORT_STATUS:
4256 ret = pma_get_opa_portstatus(pmp, ibdev, port,
17fb4f29 4257 resp_len);
5950e9b1 4258 break;
77241056
MM
4259 case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS:
4260 ret = pma_get_opa_datacounters(pmp, ibdev, port,
17fb4f29 4261 resp_len);
5950e9b1 4262 break;
77241056
MM
4263 case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS:
4264 ret = pma_get_opa_porterrors(pmp, ibdev, port,
17fb4f29 4265 resp_len);
5950e9b1 4266 break;
77241056
MM
4267 case OPA_PM_ATTRIB_ID_ERROR_INFO:
4268 ret = pma_get_opa_errorinfo(pmp, ibdev, port,
17fb4f29 4269 resp_len);
5950e9b1 4270 break;
77241056
MM
4271 default:
4272 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4273 ret = reply((struct ib_mad_hdr *)pmp);
5950e9b1 4274 break;
77241056 4275 }
5950e9b1 4276 break;
77241056
MM
4277
4278 case IB_MGMT_METHOD_SET:
4279 switch (pmp->mad_hdr.attr_id) {
4280 case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS:
4281 ret = pma_set_opa_portstatus(pmp, ibdev, port,
17fb4f29 4282 resp_len);
5950e9b1 4283 break;
77241056
MM
4284 case OPA_PM_ATTRIB_ID_ERROR_INFO:
4285 ret = pma_set_opa_errorinfo(pmp, ibdev, port,
17fb4f29 4286 resp_len);
5950e9b1 4287 break;
77241056
MM
4288 default:
4289 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4290 ret = reply((struct ib_mad_hdr *)pmp);
5950e9b1 4291 break;
77241056 4292 }
5950e9b1 4293 break;
77241056
MM
4294
4295 case IB_MGMT_METHOD_TRAP:
4296 case IB_MGMT_METHOD_GET_RESP:
4297 /*
4298 * The ib_mad module will call us to process responses
4299 * before checking for other consumers.
4300 * Just tell the caller to process it normally.
4301 */
4302 ret = IB_MAD_RESULT_SUCCESS;
5950e9b1 4303 break;
77241056
MM
4304
4305 default:
4306 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
4307 ret = reply((struct ib_mad_hdr *)pmp);
5950e9b1 4308 break;
77241056
MM
4309 }
4310
77241056
MM
4311 return ret;
4312}
4313
4314static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
a724648e
JB
4315 u8 port, const struct ib_wc *in_wc,
4316 const struct ib_grh *in_grh,
4317 const struct opa_mad *in_mad,
4318 struct opa_mad *out_mad, size_t *out_mad_size,
4319 u16 *out_mad_pkey_index)
77241056
MM
4320{
4321 int ret;
4322 int pkey_idx;
4323 u32 resp_len = 0;
4324 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4325
4326 pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
4327 if (pkey_idx < 0) {
4328 pr_warn("failed to find limited mgmt pkey, defaulting 0x%x\n",
4329 hfi1_get_pkey(ibp, 1));
4330 pkey_idx = 1;
4331 }
4332 *out_mad_pkey_index = (u16)pkey_idx;
4333
4334 switch (in_mad->mad_hdr.mgmt_class) {
4335 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4336 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4337 if (is_local_mad(ibp, in_mad, in_wc)) {
4338 ret = opa_local_smp_check(ibp, in_wc);
4339 if (ret)
4340 return IB_MAD_RESULT_FAILURE;
4341 }
4342 ret = process_subn_opa(ibdev, mad_flags, port, in_mad,
4343 out_mad, &resp_len);
4344 goto bail;
4345 case IB_MGMT_CLASS_PERF_MGMT:
4346 ret = process_perf_opa(ibdev, port, in_mad, out_mad,
4347 &resp_len);
4348 goto bail;
4349
4350 default:
4351 ret = IB_MAD_RESULT_SUCCESS;
4352 }
4353
4354bail:
4355 if (ret & IB_MAD_RESULT_REPLY)
4356 *out_mad_size = round_up(resp_len, 8);
4357 else if (ret & IB_MAD_RESULT_SUCCESS)
4358 *out_mad_size = in_wc->byte_len - sizeof(struct ib_grh);
4359
4360 return ret;
4361}
4362
4363static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4364 const struct ib_wc *in_wc,
4365 const struct ib_grh *in_grh,
4366 const struct ib_mad *in_mad,
4367 struct ib_mad *out_mad)
4368{
4369 int ret;
4370
4371 switch (in_mad->mad_hdr.mgmt_class) {
4372 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4373 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4374 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
5950e9b1 4375 break;
b8d114eb
SS
4376 case IB_MGMT_CLASS_PERF_MGMT:
4377 ret = process_perf(ibdev, port, in_mad, out_mad);
4378 break;
77241056
MM
4379 default:
4380 ret = IB_MAD_RESULT_SUCCESS;
5950e9b1 4381 break;
77241056
MM
4382 }
4383
77241056
MM
4384 return ret;
4385}
4386
4387/**
4388 * hfi1_process_mad - process an incoming MAD packet
4389 * @ibdev: the infiniband device this packet came in on
4390 * @mad_flags: MAD flags
4391 * @port: the port number this packet came in on
4392 * @in_wc: the work completion entry for this packet
4393 * @in_grh: the global route header for this packet
4394 * @in_mad: the incoming MAD
4395 * @out_mad: any outgoing MAD reply
4396 *
4397 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
4398 * interested in processing.
4399 *
4400 * Note that the verbs framework has already done the MAD sanity checks,
4401 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
4402 * MADs.
4403 *
4404 * This is called by the ib_mad module.
4405 */
4406int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4407 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
4408 const struct ib_mad_hdr *in_mad, size_t in_mad_size,
4409 struct ib_mad_hdr *out_mad, size_t *out_mad_size,
4410 u16 *out_mad_pkey_index)
4411{
4412 switch (in_mad->base_version) {
4413 case OPA_MGMT_BASE_VERSION:
4414 if (unlikely(in_mad_size != sizeof(struct opa_mad))) {
4415 dev_err(ibdev->dma_device, "invalid in_mad_size\n");
4416 return IB_MAD_RESULT_FAILURE;
4417 }
4418 return hfi1_process_opa_mad(ibdev, mad_flags, port,
4419 in_wc, in_grh,
4420 (struct opa_mad *)in_mad,
4421 (struct opa_mad *)out_mad,
4422 out_mad_size,
4423 out_mad_pkey_index);
4424 case IB_MGMT_BASE_VERSION:
4425 return hfi1_process_ib_mad(ibdev, mad_flags, port,
4426 in_wc, in_grh,
4427 (const struct ib_mad *)in_mad,
4428 (struct ib_mad *)out_mad);
4429 default:
4430 break;
4431 }
4432
4433 return IB_MAD_RESULT_FAILURE;
4434}