IB/hfi1: Change slid arg in ingress_pkey_table_fail to 32bit
[linux-2.6-block.git] / drivers / infiniband / hw / hfi1 / mad.c
CommitLineData
77241056 1/*
3ca4fbc8 2 * Copyright(c) 2015-2017 Intel Corporation.
77241056
MM
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
77241056
MM
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
77241056
MM
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/net.h>
13c19222 49#include <rdma/opa_addr.h>
77241056
MM
50#define OPA_NUM_PKEY_BLOCKS_PER_SMP (OPA_SMP_DR_DATA_SIZE \
51 / (OPA_PARTITION_TABLE_BLK_SIZE * sizeof(u16)))
52
53#include "hfi.h"
54#include "mad.h"
55#include "trace.h"
0ec79e87 56#include "qp.h"
2280740f 57#include "vnic.h"
77241056
MM
58
59/* the reset value from the FM is supposed to be 0xffff, handle both */
60#define OPA_LINK_WIDTH_RESET_OLD 0x0fff
61#define OPA_LINK_WIDTH_RESET 0xffff
62
bf90aadd
MR
63struct trap_node {
64 struct list_head list;
65 struct opa_mad_notice_attr data;
66 __be64 tid;
67 int len;
68 u32 retry;
69 u8 in_use;
70 u8 repress;
71};
72
f1685179
ND
73static int smp_length_check(u32 data_size, u32 request_len)
74{
75 if (unlikely(request_len < data_size))
76 return -EINVAL;
77
78 return 0;
79}
80
77241056
MM
81static int reply(struct ib_mad_hdr *smp)
82{
83 /*
84 * The verbs framework will handle the directed/LID route
85 * packet changes.
86 */
87 smp->method = IB_MGMT_METHOD_GET_RESP;
88 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
89 smp->status |= IB_SMP_DIRECTION;
90 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
91}
92
93static inline void clear_opa_smp_data(struct opa_smp *smp)
94{
95 void *data = opa_get_smp_data(smp);
96 size_t size = opa_get_smp_data_size(smp);
97
98 memset(data, 0, size);
99}
100
406310c6
SS
101static u16 hfi1_lookup_pkey_value(struct hfi1_ibport *ibp, int pkey_idx)
102{
103 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
104
105 if (pkey_idx < ARRAY_SIZE(ppd->pkeys))
106 return ppd->pkeys[pkey_idx];
107
108 return 0;
109}
110
34d351f8
SS
111void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
112{
113 struct ib_event event;
114
115 event.event = IB_EVENT_PKEY_CHANGE;
116 event.device = &dd->verbs_dev.rdi.ibdev;
117 event.element.port_num = port;
118 ib_dispatch_event(&event);
119}
120
bf90aadd
MR
121/*
122 * If the port is down, clean up all pending traps. We need to be careful
123 * with the given trap, because it may be queued.
124 */
125static void cleanup_traps(struct hfi1_ibport *ibp, struct trap_node *trap)
126{
127 struct trap_node *node, *q;
128 unsigned long flags;
129 struct list_head trap_list;
130 int i;
131
132 for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) {
133 spin_lock_irqsave(&ibp->rvp.lock, flags);
134 list_replace_init(&ibp->rvp.trap_lists[i].list, &trap_list);
135 ibp->rvp.trap_lists[i].list_len = 0;
136 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
137
138 /*
139 * Remove all items from the list, freeing all the non-given
140 * traps.
141 */
142 list_for_each_entry_safe(node, q, &trap_list, list) {
143 list_del(&node->list);
144 if (node != trap)
145 kfree(node);
146 }
147 }
148
149 /*
150 * If this wasn't on one of the lists it would not be freed. If it
151 * was on the list, it is now safe to free.
152 */
153 kfree(trap);
154}
155
156static struct trap_node *check_and_add_trap(struct hfi1_ibport *ibp,
157 struct trap_node *trap)
158{
159 struct trap_node *node;
160 struct trap_list *trap_list;
161 unsigned long flags;
162 unsigned long timeout;
163 int found = 0;
ec0d8b8a
KA
164 unsigned int queue_id;
165 static int trap_count;
166
167 queue_id = trap->data.generic_type & 0x0F;
168 if (queue_id >= RVT_MAX_TRAP_LISTS) {
169 trap_count++;
170 pr_err_ratelimited("hfi1: Invalid trap 0x%0x dropped. Total dropped: %d\n",
d68e68e5 171 trap->data.generic_type, trap_count);
ec0d8b8a
KA
172 kfree(trap);
173 return NULL;
174 }
bf90aadd
MR
175
176 /*
177 * Since the retry (handle timeout) does not remove a trap request
178 * from the list, all we have to do is compare the node.
179 */
180 spin_lock_irqsave(&ibp->rvp.lock, flags);
ec0d8b8a 181 trap_list = &ibp->rvp.trap_lists[queue_id];
bf90aadd
MR
182
183 list_for_each_entry(node, &trap_list->list, list) {
184 if (node == trap) {
185 node->retry++;
186 found = 1;
187 break;
188 }
189 }
190
191 /* If it is not on the list, add it, limited to RVT-MAX_TRAP_LEN. */
192 if (!found) {
193 if (trap_list->list_len < RVT_MAX_TRAP_LEN) {
194 trap_list->list_len++;
195 list_add_tail(&trap->list, &trap_list->list);
196 } else {
028e0a67 197 pr_warn_ratelimited("hfi1: Maximum trap limit reached for 0x%0x traps\n",
bf90aadd
MR
198 trap->data.generic_type);
199 kfree(trap);
200 }
201 }
202
203 /*
204 * Next check to see if there is a timer pending. If not, set it up
205 * and get the first trap from the list.
206 */
207 node = NULL;
208 if (!timer_pending(&ibp->rvp.trap_timer)) {
209 /*
210 * o14-2
211 * If the time out is set we have to wait until it expires
212 * before the trap can be sent.
213 * This should be > RVT_TRAP_TIMEOUT
214 */
215 timeout = (RVT_TRAP_TIMEOUT *
216 (1UL << ibp->rvp.subnet_timeout)) / 1000;
217 mod_timer(&ibp->rvp.trap_timer,
218 jiffies + usecs_to_jiffies(timeout));
219 node = list_first_entry(&trap_list->list, struct trap_node,
220 list);
221 node->in_use = 1;
222 }
223 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
224
225 return node;
226}
227
228static void subn_handle_opa_trap_repress(struct hfi1_ibport *ibp,
229 struct opa_smp *smp)
230{
231 struct trap_list *trap_list;
232 struct trap_node *trap;
233 unsigned long flags;
234 int i;
235
236 if (smp->attr_id != IB_SMP_ATTR_NOTICE)
237 return;
238
239 spin_lock_irqsave(&ibp->rvp.lock, flags);
240 for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) {
241 trap_list = &ibp->rvp.trap_lists[i];
242 trap = list_first_entry_or_null(&trap_list->list,
243 struct trap_node, list);
244 if (trap && trap->tid == smp->tid) {
245 if (trap->in_use) {
246 trap->repress = 1;
247 } else {
248 trap_list->list_len--;
249 list_del(&trap->list);
250 kfree(trap);
251 }
252 break;
253 }
254 }
255 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
256}
257
51e658f5
DC
258static void hfi1_update_sm_ah_attr(struct hfi1_ibport *ibp,
259 struct rdma_ah_attr *attr, u32 dlid)
260{
261 rdma_ah_set_dlid(attr, dlid);
262 rdma_ah_set_port_num(attr, ppd_from_ibp(ibp)->port);
263 if (dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
264 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
265
266 rdma_ah_set_ah_flags(attr, IB_AH_GRH);
267 grh->sgid_index = 0;
268 grh->hop_limit = 1;
269 grh->dgid.global.subnet_prefix =
270 ibp->rvp.gid_prefix;
271 grh->dgid.global.interface_id = OPA_MAKE_ID(dlid);
272 }
273}
274
275static int hfi1_modify_qp0_ah(struct hfi1_ibport *ibp,
276 struct rvt_ah *ah, u32 dlid)
277{
278 struct rdma_ah_attr attr;
279 struct rvt_qp *qp0;
280 int ret = -EINVAL;
281
282 memset(&attr, 0, sizeof(attr));
283 attr.type = ah->ibah.type;
284 hfi1_update_sm_ah_attr(ibp, &attr, dlid);
285 rcu_read_lock();
286 qp0 = rcu_dereference(ibp->rvp.qp[0]);
287 if (qp0)
288 ret = rdma_modify_ah(&ah->ibah, &attr);
289 rcu_read_unlock();
290 return ret;
291}
292
293static struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u32 dlid)
294{
295 struct rdma_ah_attr attr;
296 struct ib_ah *ah = ERR_PTR(-EINVAL);
297 struct rvt_qp *qp0;
298 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
299 struct hfi1_devdata *dd = dd_from_ppd(ppd);
300 u8 port_num = ppd->port;
301
302 memset(&attr, 0, sizeof(attr));
303 attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
304 hfi1_update_sm_ah_attr(ibp, &attr, dlid);
305 rcu_read_lock();
306 qp0 = rcu_dereference(ibp->rvp.qp[0]);
307 if (qp0)
308 ah = rdma_create_ah(qp0->ibqp.pd, &attr);
309 rcu_read_unlock();
310 return ah;
311}
312
bf90aadd 313static void send_trap(struct hfi1_ibport *ibp, struct trap_node *trap)
77241056
MM
314{
315 struct ib_mad_send_buf *send_buf;
316 struct ib_mad_agent *agent;
5cd24119 317 struct opa_smp *smp;
77241056 318 unsigned long flags;
77241056
MM
319 int pkey_idx;
320 u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp;
321
4eb06882 322 agent = ibp->rvp.send_agent;
bf90aadd
MR
323 if (!agent) {
324 cleanup_traps(ibp, trap);
77241056 325 return;
bf90aadd 326 }
77241056
MM
327
328 /* o14-3.2.1 */
bf90aadd
MR
329 if (driver_lstate(ppd_from_ibp(ibp)) != IB_PORT_ACTIVE) {
330 cleanup_traps(ibp, trap);
77241056 331 return;
bf90aadd 332 }
77241056 333
bf90aadd
MR
334 /* Add the trap to the list if necessary and see if we can send it */
335 trap = check_and_add_trap(ibp, trap);
336 if (!trap)
77241056
MM
337 return;
338
339 pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
340 if (pkey_idx < 0) {
341 pr_warn("%s: failed to find limited mgmt pkey, defaulting 0x%x\n",
342 __func__, hfi1_get_pkey(ibp, 1));
343 pkey_idx = 1;
344 }
345
346 send_buf = ib_create_send_mad(agent, qpn, pkey_idx, 0,
347 IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
348 GFP_ATOMIC, IB_MGMT_BASE_VERSION);
349 if (IS_ERR(send_buf))
350 return;
351
352 smp = send_buf->mad;
5cd24119 353 smp->base_version = OPA_MGMT_BASE_VERSION;
77241056 354 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
9fa240bb 355 smp->class_version = OPA_SM_CLASS_VERSION;
77241056 356 smp->method = IB_MGMT_METHOD_TRAP;
bf90aadd
MR
357
358 /* Only update the transaction ID for new traps (o13-5). */
359 if (trap->tid == 0) {
360 ibp->rvp.tid++;
361 /* make sure that tid != 0 */
362 if (ibp->rvp.tid == 0)
363 ibp->rvp.tid++;
364 trap->tid = cpu_to_be64(ibp->rvp.tid);
365 }
366 smp->tid = trap->tid;
367
77241056
MM
368 smp->attr_id = IB_SMP_ATTR_NOTICE;
369 /* o14-1: smp->mkey = 0; */
bf90aadd
MR
370
371 memcpy(smp->route.lid.data, &trap->data, trap->len);
77241056 372
4eb06882 373 spin_lock_irqsave(&ibp->rvp.lock, flags);
9c4a311e 374 if (!ibp->rvp.sm_ah) {
4eb06882 375 if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
77241056
MM
376 struct ib_ah *ah;
377
4eb06882 378 ah = hfi1_create_qp0_ah(ibp, ibp->rvp.sm_lid);
e490974e 379 if (IS_ERR(ah)) {
bf90aadd
MR
380 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
381 return;
77241056 382 }
bf90aadd
MR
383 send_buf->ah = ah;
384 ibp->rvp.sm_ah = ibah_to_rvtah(ah);
e490974e 385 } else {
bf90aadd
MR
386 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
387 return;
e490974e 388 }
77241056 389 } else {
9c4a311e 390 send_buf->ah = &ibp->rvp.sm_ah->ibah;
77241056 391 }
bf90aadd
MR
392
393 /*
394 * If the trap was repressed while things were getting set up, don't
395 * bother sending it. This could happen for a retry.
396 */
397 if (trap->repress) {
398 list_del(&trap->list);
399 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
400 kfree(trap);
401 ib_free_send_mad(send_buf);
402 return;
403 }
404
405 trap->in_use = 0;
4eb06882 406 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
77241056 407
bf90aadd 408 if (ib_post_send_mad(send_buf, NULL))
77241056 409 ib_free_send_mad(send_buf);
bf90aadd
MR
410}
411
8064135e 412void hfi1_handle_trap_timer(struct timer_list *t)
bf90aadd 413{
8064135e 414 struct hfi1_ibport *ibp = from_timer(ibp, t, rvp.trap_timer);
bf90aadd
MR
415 struct trap_node *trap = NULL;
416 unsigned long flags;
417 int i;
418
419 /* Find the trap with the highest priority */
420 spin_lock_irqsave(&ibp->rvp.lock, flags);
421 for (i = 0; !trap && i < RVT_MAX_TRAP_LISTS; i++) {
422 trap = list_first_entry_or_null(&ibp->rvp.trap_lists[i].list,
423 struct trap_node, list);
77241056 424 }
bf90aadd
MR
425 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
426
427 if (trap)
428 send_trap(ibp, trap);
429}
430
431static struct trap_node *create_trap_node(u8 type, __be16 trap_num, u32 lid)
432{
433 struct trap_node *trap;
434
435 trap = kzalloc(sizeof(*trap), GFP_ATOMIC);
436 if (!trap)
437 return NULL;
438
439 INIT_LIST_HEAD(&trap->list);
440 trap->data.generic_type = type;
441 trap->data.prod_type_lsb = IB_NOTICE_PROD_CA;
442 trap->data.trap_num = trap_num;
443 trap->data.issuer_lid = cpu_to_be32(lid);
444
445 return trap;
77241056
MM
446}
447
448/*
13d84914 449 * Send a bad P_Key trap (ch. 14.3.8).
77241056 450 */
13d84914 451void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl,
88733e3b 452 u32 qp1, u32 qp2, u32 lid1, u32 lid2)
77241056 453{
bf90aadd 454 struct trap_node *trap;
5cd24119 455 u32 lid = ppd_from_ibp(ibp)->lid;
77241056 456
4eb06882 457 ibp->rvp.n_pkt_drops++;
13d84914 458 ibp->rvp.pkey_violations++;
77241056 459
bf90aadd
MR
460 trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_P_KEY,
461 lid);
462 if (!trap)
463 return;
464
77241056 465 /* Send violation trap */
88733e3b
DH
466 trap->data.ntc_257_258.lid1 = cpu_to_be32(lid1);
467 trap->data.ntc_257_258.lid2 = cpu_to_be32(lid2);
bf90aadd
MR
468 trap->data.ntc_257_258.key = cpu_to_be32(key);
469 trap->data.ntc_257_258.sl = sl << 3;
470 trap->data.ntc_257_258.qp1 = cpu_to_be32(qp1);
471 trap->data.ntc_257_258.qp2 = cpu_to_be32(qp2);
472
473 trap->len = sizeof(trap->data);
474 send_trap(ibp, trap);
77241056
MM
475}
476
477/*
478 * Send a bad M_Key trap (ch. 14.3.9).
479 */
480static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
481 __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt)
482{
bf90aadd 483 struct trap_node *trap;
5cd24119 484 u32 lid = ppd_from_ibp(ibp)->lid;
77241056 485
bf90aadd
MR
486 trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_M_KEY,
487 lid);
488 if (!trap)
489 return;
490
77241056 491 /* Send violation trap */
bf90aadd
MR
492 trap->data.ntc_256.lid = trap->data.issuer_lid;
493 trap->data.ntc_256.method = mad->method;
494 trap->data.ntc_256.attr_id = mad->attr_id;
495 trap->data.ntc_256.attr_mod = mad->attr_mod;
496 trap->data.ntc_256.mkey = mkey;
77241056 497 if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
bf90aadd
MR
498 trap->data.ntc_256.dr_slid = dr_slid;
499 trap->data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
500 if (hop_cnt > ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path)) {
501 trap->data.ntc_256.dr_trunc_hop |=
77241056 502 IB_NOTICE_TRAP_DR_TRUNC;
bf90aadd 503 hop_cnt = ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path);
77241056 504 }
bf90aadd
MR
505 trap->data.ntc_256.dr_trunc_hop |= hop_cnt;
506 memcpy(trap->data.ntc_256.dr_rtn_path, return_path,
77241056
MM
507 hop_cnt);
508 }
509
bf90aadd
MR
510 trap->len = sizeof(trap->data);
511
512 send_trap(ibp, trap);
77241056
MM
513}
514
515/*
516 * Send a Port Capability Mask Changed trap (ch. 14.3.11).
517 */
45b59eef 518void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
77241056 519{
bf90aadd 520 struct trap_node *trap;
45b59eef
HC
521 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
522 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
523 struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
5cd24119
EK
524 u32 lid = ppd_from_ibp(ibp)->lid;
525
bf90aadd
MR
526 trap = create_trap_node(IB_NOTICE_TYPE_INFO,
527 OPA_TRAP_CHANGE_CAPABILITY,
528 lid);
529 if (!trap)
530 return;
77241056 531
bf90aadd
MR
532 trap->data.ntc_144.lid = trap->data.issuer_lid;
533 trap->data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
534 trap->data.ntc_144.cap_mask3 = cpu_to_be16(ibp->rvp.port_cap3_flags);
77241056 535
bf90aadd
MR
536 trap->len = sizeof(trap->data);
537 send_trap(ibp, trap);
77241056
MM
538}
539
540/*
541 * Send a System Image GUID Changed trap (ch. 14.3.12).
542 */
543void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
544{
bf90aadd 545 struct trap_node *trap;
5cd24119
EK
546 u32 lid = ppd_from_ibp(ibp)->lid;
547
bf90aadd
MR
548 trap = create_trap_node(IB_NOTICE_TYPE_INFO, OPA_TRAP_CHANGE_SYSGUID,
549 lid);
550 if (!trap)
551 return;
77241056 552
bf90aadd
MR
553 trap->data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
554 trap->data.ntc_145.lid = trap->data.issuer_lid;
77241056 555
bf90aadd
MR
556 trap->len = sizeof(trap->data);
557 send_trap(ibp, trap);
77241056
MM
558}
559
560/*
561 * Send a Node Description Changed trap (ch. 14.3.13).
562 */
563void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
564{
bf90aadd 565 struct trap_node *trap;
5cd24119
EK
566 u32 lid = ppd_from_ibp(ibp)->lid;
567
bf90aadd
MR
568 trap = create_trap_node(IB_NOTICE_TYPE_INFO,
569 OPA_TRAP_CHANGE_CAPABILITY,
570 lid);
571 if (!trap)
572 return;
77241056 573
bf90aadd
MR
574 trap->data.ntc_144.lid = trap->data.issuer_lid;
575 trap->data.ntc_144.change_flags =
5cd24119 576 cpu_to_be16(OPA_NOTICE_TRAP_NODE_DESC_CHG);
77241056 577
bf90aadd
MR
578 trap->len = sizeof(trap->data);
579 send_trap(ibp, trap);
77241056
MM
580}
581
582static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
583 u8 *data, struct ib_device *ibdev,
f1685179 584 u8 port, u32 *resp_len, u32 max_len)
77241056
MM
585{
586 struct opa_node_description *nd;
587
f1685179 588 if (am || smp_length_check(sizeof(*nd), max_len)) {
77241056
MM
589 smp->status |= IB_SMP_INVALID_FIELD;
590 return reply((struct ib_mad_hdr *)smp);
591 }
592
593 nd = (struct opa_node_description *)data;
594
595 memcpy(nd->data, ibdev->node_desc, sizeof(nd->data));
596
597 if (resp_len)
598 *resp_len += sizeof(*nd);
599
600 return reply((struct ib_mad_hdr *)smp);
601}
602
603static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
604 struct ib_device *ibdev, u8 port,
f1685179 605 u32 *resp_len, u32 max_len)
77241056
MM
606{
607 struct opa_node_info *ni;
608 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
609 unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
610
611 ni = (struct opa_node_info *)data;
612
613 /* GUID 0 is illegal */
a6cd5f08 614 if (am || pidx >= dd->num_pports || ibdev->node_guid == 0 ||
f1685179 615 smp_length_check(sizeof(*ni), max_len) ||
a6cd5f08 616 get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
77241056
MM
617 smp->status |= IB_SMP_INVALID_FIELD;
618 return reply((struct ib_mad_hdr *)smp);
619 }
620
a6cd5f08 621 ni->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
77241056 622 ni->base_version = OPA_MGMT_BASE_VERSION;
9fa240bb 623 ni->class_version = OPA_SM_CLASS_VERSION;
77241056
MM
624 ni->node_type = 1; /* channel adapter */
625 ni->num_ports = ibdev->phys_port_cnt;
626 /* This is already in network order */
627 ni->system_image_guid = ib_hfi1_sys_image_guid;
a6cd5f08 628 ni->node_guid = ibdev->node_guid;
77241056
MM
629 ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
630 ni->device_id = cpu_to_be16(dd->pcidev->device);
631 ni->revision = cpu_to_be32(dd->minrev);
632 ni->local_port_num = port;
633 ni->vendor_id[0] = dd->oui1;
634 ni->vendor_id[1] = dd->oui2;
635 ni->vendor_id[2] = dd->oui3;
636
637 if (resp_len)
638 *resp_len += sizeof(*ni);
639
640 return reply((struct ib_mad_hdr *)smp);
641}
642
643static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
644 u8 port)
645{
646 struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
647 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
648 unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
649
650 /* GUID 0 is illegal */
651 if (smp->attr_mod || pidx >= dd->num_pports ||
a6cd5f08
JP
652 ibdev->node_guid == 0 ||
653 get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
77241056 654 smp->status |= IB_SMP_INVALID_FIELD;
a6cd5f08
JP
655 return reply((struct ib_mad_hdr *)smp);
656 }
77241056 657
a6cd5f08 658 nip->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
77241056 659 nip->base_version = OPA_MGMT_BASE_VERSION;
9fa240bb 660 nip->class_version = OPA_SM_CLASS_VERSION;
77241056
MM
661 nip->node_type = 1; /* channel adapter */
662 nip->num_ports = ibdev->phys_port_cnt;
663 /* This is already in network order */
664 nip->sys_guid = ib_hfi1_sys_image_guid;
a6cd5f08 665 nip->node_guid = ibdev->node_guid;
77241056
MM
666 nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
667 nip->device_id = cpu_to_be16(dd->pcidev->device);
668 nip->revision = cpu_to_be32(dd->minrev);
669 nip->local_port_num = port;
670 nip->vendor_id[0] = dd->oui1;
671 nip->vendor_id[1] = dd->oui2;
672 nip->vendor_id[2] = dd->oui3;
673
674 return reply((struct ib_mad_hdr *)smp);
675}
676
677static void set_link_width_enabled(struct hfi1_pportdata *ppd, u32 w)
678{
679 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_ENB, w);
680}
681
682static void set_link_width_downgrade_enabled(struct hfi1_pportdata *ppd, u32 w)
683{
684 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_DG_ENB, w);
685}
686
687static void set_link_speed_enabled(struct hfi1_pportdata *ppd, u32 s)
688{
689 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_SPD_ENB, s);
690}
691
692static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
693 int mad_flags, __be64 mkey, __be32 dr_slid,
694 u8 return_path[], u8 hop_cnt)
695{
696 int valid_mkey = 0;
697 int ret = 0;
698
699 /* Is the mkey in the process of expiring? */
4eb06882
DD
700 if (ibp->rvp.mkey_lease_timeout &&
701 time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
77241056 702 /* Clear timeout and mkey protection field. */
4eb06882
DD
703 ibp->rvp.mkey_lease_timeout = 0;
704 ibp->rvp.mkeyprot = 0;
77241056
MM
705 }
706
4eb06882
DD
707 if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
708 ibp->rvp.mkey == mkey)
77241056
MM
709 valid_mkey = 1;
710
711 /* Unset lease timeout on any valid Get/Set/TrapRepress */
4eb06882 712 if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
77241056
MM
713 (mad->method == IB_MGMT_METHOD_GET ||
714 mad->method == IB_MGMT_METHOD_SET ||
715 mad->method == IB_MGMT_METHOD_TRAP_REPRESS))
4eb06882 716 ibp->rvp.mkey_lease_timeout = 0;
77241056
MM
717
718 if (!valid_mkey) {
719 switch (mad->method) {
720 case IB_MGMT_METHOD_GET:
721 /* Bad mkey not a violation below level 2 */
4eb06882 722 if (ibp->rvp.mkeyprot < 2)
77241056 723 break;
6ffeb21f 724 /* fall through */
77241056
MM
725 case IB_MGMT_METHOD_SET:
726 case IB_MGMT_METHOD_TRAP_REPRESS:
4eb06882
DD
727 if (ibp->rvp.mkey_violations != 0xFFFF)
728 ++ibp->rvp.mkey_violations;
729 if (!ibp->rvp.mkey_lease_timeout &&
730 ibp->rvp.mkey_lease_period)
731 ibp->rvp.mkey_lease_timeout = jiffies +
732 ibp->rvp.mkey_lease_period * HZ;
77241056
MM
733 /* Generate a trap notice. */
734 bad_mkey(ibp, mad, mkey, dr_slid, return_path,
735 hop_cnt);
736 ret = 1;
737 }
738 }
739
740 return ret;
741}
742
743/*
744 * The SMA caches reads from LCB registers in case the LCB is unavailable.
745 * (The LCB is unavailable in certain link states, for example.)
746 */
747struct lcb_datum {
748 u32 off;
749 u64 val;
750};
751
752static struct lcb_datum lcb_cache[] = {
753 { DC_LCB_STS_ROUND_TRIP_LTP_CNT, 0 },
754};
755
756static int write_lcb_cache(u32 off, u64 val)
757{
758 int i;
759
760 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
761 if (lcb_cache[i].off == off) {
762 lcb_cache[i].val = val;
763 return 0;
764 }
765 }
766
767 pr_warn("%s bad offset 0x%x\n", __func__, off);
768 return -1;
769}
770
771static int read_lcb_cache(u32 off, u64 *val)
772{
773 int i;
774
775 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
776 if (lcb_cache[i].off == off) {
777 *val = lcb_cache[i].val;
778 return 0;
779 }
780 }
781
782 pr_warn("%s bad offset 0x%x\n", __func__, off);
783 return -1;
784}
785
786void read_ltp_rtt(struct hfi1_devdata *dd)
787{
788 u64 reg;
789
790 if (read_lcb_csr(dd, DC_LCB_STS_ROUND_TRIP_LTP_CNT, &reg))
791 dd_dev_err(dd, "%s: unable to read LTP RTT\n", __func__);
792 else
793 write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg);
794}
795
77241056
MM
796static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
797 struct ib_device *ibdev, u8 port,
f1685179 798 u32 *resp_len, u32 max_len)
77241056
MM
799{
800 int i;
801 struct hfi1_devdata *dd;
802 struct hfi1_pportdata *ppd;
803 struct hfi1_ibport *ibp;
804 struct opa_port_info *pi = (struct opa_port_info *)data;
805 u8 mtu;
806 u8 credit_rate;
409b1462 807 u8 is_beaconing_active;
77241056
MM
808 u32 state;
809 u32 num_ports = OPA_AM_NPORT(am);
810 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
811 u32 buffer_units;
812 u64 tmp = 0;
813
f1685179 814 if (num_ports != 1 || smp_length_check(sizeof(*pi), max_len)) {
77241056
MM
815 smp->status |= IB_SMP_INVALID_FIELD;
816 return reply((struct ib_mad_hdr *)smp);
817 }
818
819 dd = dd_from_ibdev(ibdev);
820 /* IB numbers ports from 1, hw from 0 */
821 ppd = dd->pport + (port - 1);
822 ibp = &ppd->ibport_data;
823
8638b77f 824 if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
17fb4f29 825 ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
77241056
MM
826 smp->status |= IB_SMP_INVALID_FIELD;
827 return reply((struct ib_mad_hdr *)smp);
828 }
829
830 pi->lid = cpu_to_be32(ppd->lid);
831
832 /* Only return the mkey if the protection field allows it. */
833 if (!(smp->method == IB_MGMT_METHOD_GET &&
4eb06882
DD
834 ibp->rvp.mkey != smp->mkey &&
835 ibp->rvp.mkeyprot == 1))
836 pi->mkey = ibp->rvp.mkey;
837
838 pi->subnet_prefix = ibp->rvp.gid_prefix;
839 pi->sm_lid = cpu_to_be32(ibp->rvp.sm_lid);
840 pi->ib_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
841 pi->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
77241056
MM
842 pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp);
843 pi->sa_qp = cpu_to_be32(ppd->sa_qp);
844
845 pi->link_width.enabled = cpu_to_be16(ppd->link_width_enabled);
846 pi->link_width.supported = cpu_to_be16(ppd->link_width_supported);
847 pi->link_width.active = cpu_to_be16(ppd->link_width_active);
848
849 pi->link_width_downgrade.supported =
850 cpu_to_be16(ppd->link_width_downgrade_supported);
851 pi->link_width_downgrade.enabled =
852 cpu_to_be16(ppd->link_width_downgrade_enabled);
853 pi->link_width_downgrade.tx_active =
854 cpu_to_be16(ppd->link_width_downgrade_tx_active);
855 pi->link_width_downgrade.rx_active =
856 cpu_to_be16(ppd->link_width_downgrade_rx_active);
857
858 pi->link_speed.supported = cpu_to_be16(ppd->link_speed_supported);
859 pi->link_speed.active = cpu_to_be16(ppd->link_speed_active);
860 pi->link_speed.enabled = cpu_to_be16(ppd->link_speed_enabled);
861
862 state = driver_lstate(ppd);
863
864 if (start_of_sm_config && (state == IB_PORT_INIT))
865 ppd->is_sm_config_started = 1;
866
1d01cf33 867 pi->port_phys_conf = (ppd->port_type & 0xf);
77241056 868
77241056
MM
869 pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
870 pi->port_states.ledenable_offlinereason |=
871 ppd->is_sm_config_started << 5;
409b1462 872 /*
2243472e
EH
873 * This pairs with the memory barrier in hfi1_start_led_override to
874 * ensure that we read the correct state of LED beaconing represented
875 * by led_override_timer_active
409b1462 876 */
2243472e 877 smp_rmb();
409b1462
EH
878 is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
879 pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6;
77241056 880 pi->port_states.ledenable_offlinereason |=
a9c05e35 881 ppd->offline_disabled_reason;
77241056
MM
882
883 pi->port_states.portphysstate_portstate =
bec7c79c 884 (driver_pstate(ppd) << 4) | state;
77241056 885
4eb06882 886 pi->mkeyprotect_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
77241056
MM
887
888 memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu));
889 for (i = 0; i < ppd->vls_supported; i++) {
890 mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU);
891 if ((i % 2) == 0)
8638b77f 892 pi->neigh_mtu.pvlx_to_mtu[i / 2] |= (mtu << 4);
77241056 893 else
8638b77f 894 pi->neigh_mtu.pvlx_to_mtu[i / 2] |= mtu;
77241056
MM
895 }
896 /* don't forget VL 15 */
897 mtu = mtu_to_enum(dd->vld[15].mtu, 2048);
8638b77f 898 pi->neigh_mtu.pvlx_to_mtu[15 / 2] |= mtu;
4eb06882 899 pi->smsl = ibp->rvp.sm_sl & OPA_PI_MASK_SMSL;
77241056
MM
900 pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS);
901 pi->partenforce_filterraw |=
902 (ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON);
903 if (ppd->part_enforce & HFI1_PART_ENFORCE_IN)
904 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN;
905 if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT)
906 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT;
4eb06882 907 pi->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
77241056 908 /* P_KeyViolations are counted by hardware. */
4eb06882
DD
909 pi->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
910 pi->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
77241056
MM
911
912 pi->vl.cap = ppd->vls_supported;
4eb06882 913 pi->vl.high_limit = cpu_to_be16(ibp->rvp.vl_high_limit);
77241056
MM
914 pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP);
915 pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP);
916
4eb06882 917 pi->clientrereg_subnettimeout = ibp->rvp.subnet_timeout;
77241056
MM
918
919 pi->port_link_mode = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 |
920 OPA_PORT_LINK_MODE_OPA << 5 |
921 OPA_PORT_LINK_MODE_OPA);
922
923 pi->port_ltp_crc_mode = cpu_to_be16(ppd->port_ltp_crc_mode);
924
925 pi->port_mode = cpu_to_be16(
926 ppd->is_active_optimize_enabled ?
927 OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE : 0);
928
929 pi->port_packet_format.supported =
2280740f
VN
930 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B |
931 OPA_PORT_PACKET_FORMAT_16B);
77241056 932 pi->port_packet_format.enabled =
2280740f
VN
933 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B |
934 OPA_PORT_PACKET_FORMAT_16B);
77241056
MM
935
936 /* flit_control.interleave is (OPA V1, version .76):
937 * bits use
938 * ---- ---
939 * 2 res
940 * 2 DistanceSupported
941 * 2 DistanceEnabled
942 * 5 MaxNextLevelTxEnabled
943 * 5 MaxNestLevelRxSupported
944 *
945 * HFI supports only "distance mode 1" (see OPA V1, version .76,
946 * section 9.6.2), so set DistanceSupported, DistanceEnabled
947 * to 0x1.
948 */
949 pi->flit_control.interleave = cpu_to_be16(0x1400);
950
951 pi->link_down_reason = ppd->local_link_down_reason.sma;
952 pi->neigh_link_down_reason = ppd->neigh_link_down_reason.sma;
953 pi->port_error_action = cpu_to_be32(ppd->port_error_action);
954 pi->mtucap = mtu_to_enum(hfi1_max_mtu, IB_MTU_4096);
955
956 /* 32.768 usec. response time (guessing) */
957 pi->resptimevalue = 3;
958
959 pi->local_port_num = port;
960
961 /* buffer info for FM */
962 pi->overall_buffer_space = cpu_to_be16(dd->link_credits);
963
964 pi->neigh_node_guid = cpu_to_be64(ppd->neighbor_guid);
965 pi->neigh_port_num = ppd->neighbor_port_number;
966 pi->port_neigh_mode =
967 (ppd->neighbor_type & OPA_PI_MASK_NEIGH_NODE_TYPE) |
968 (ppd->mgmt_allowed ? OPA_PI_MASK_NEIGH_MGMT_ALLOWED : 0) |
969 (ppd->neighbor_fm_security ?
970 OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS : 0);
971
972 /* HFIs shall always return VL15 credits to their
973 * neighbor in a timely manner, without any credit return pacing.
974 */
975 credit_rate = 0;
976 buffer_units = (dd->vau) & OPA_PI_MASK_BUF_UNIT_BUF_ALLOC;
977 buffer_units |= (dd->vcu << 3) & OPA_PI_MASK_BUF_UNIT_CREDIT_ACK;
978 buffer_units |= (credit_rate << 6) &
979 OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE;
980 buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT;
981 pi->buffer_units = cpu_to_be32(buffer_units);
982
cb49366f 983 pi->opa_cap_mask = cpu_to_be16(ibp->rvp.port_cap3_flags);
13c19222
DH
984 pi->collectivemask_multicastmask = ((OPA_COLLECTIVE_NR & 0x7)
985 << 3 | (OPA_MCAST_NR & 0x7));
77241056
MM
986
987 /* HFI supports a replay buffer 128 LTPs in size */
988 pi->replay_depth.buffer = 0x80;
989 /* read the cached value of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
990 read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp);
991
4d114fdd
JJ
992 /*
993 * this counter is 16 bits wide, but the replay_depth.wire
994 * variable is only 8 bits
995 */
77241056
MM
996 if (tmp > 0xff)
997 tmp = 0xff;
998 pi->replay_depth.wire = tmp;
999
1000 if (resp_len)
1001 *resp_len += sizeof(struct opa_port_info);
1002
1003 return reply((struct ib_mad_hdr *)smp);
1004}
1005
1006/**
1007 * get_pkeys - return the PKEY table
1008 * @dd: the hfi1_ib device
1009 * @port: the IB port number
1010 * @pkeys: the pkey table is placed here
1011 */
1012static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
1013{
1014 struct hfi1_pportdata *ppd = dd->pport + port - 1;
1015
1016 memcpy(pkeys, ppd->pkeys, sizeof(ppd->pkeys));
1017
1018 return 0;
1019}
1020
1021static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
1022 struct ib_device *ibdev, u8 port,
f1685179 1023 u32 *resp_len, u32 max_len)
77241056
MM
1024{
1025 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1026 u32 n_blocks_req = OPA_AM_NBLK(am);
1027 u32 start_block = am & 0x7ff;
1028 __be16 *p;
1029 u16 *q;
1030 int i;
1031 u16 n_blocks_avail;
1032 unsigned npkeys = hfi1_get_npkeys(dd);
1033 size_t size;
1034
1035 if (n_blocks_req == 0) {
1036 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
1037 port, start_block, n_blocks_req);
1038 smp->status |= IB_SMP_INVALID_FIELD;
1039 return reply((struct ib_mad_hdr *)smp);
1040 }
1041
50e5dcbe 1042 n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
77241056
MM
1043
1044 size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16);
1045
f1685179
ND
1046 if (smp_length_check(size, max_len)) {
1047 smp->status |= IB_SMP_INVALID_FIELD;
1048 return reply((struct ib_mad_hdr *)smp);
1049 }
1050
77241056
MM
1051 if (start_block + n_blocks_req > n_blocks_avail ||
1052 n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
1053 pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; "
1054 "avail 0x%x; blk/smp 0x%lx\n",
1055 start_block, n_blocks_req, n_blocks_avail,
1056 OPA_NUM_PKEY_BLOCKS_PER_SMP);
1057 smp->status |= IB_SMP_INVALID_FIELD;
1058 return reply((struct ib_mad_hdr *)smp);
1059 }
1060
50e5dcbe 1061 p = (__be16 *)data;
77241056
MM
1062 q = (u16 *)data;
1063 /* get the real pkeys if we are requesting the first block */
1064 if (start_block == 0) {
1065 get_pkeys(dd, port, q);
1066 for (i = 0; i < npkeys; i++)
1067 p[i] = cpu_to_be16(q[i]);
1068 if (resp_len)
1069 *resp_len += size;
e490974e 1070 } else {
77241056 1071 smp->status |= IB_SMP_INVALID_FIELD;
e490974e 1072 }
77241056
MM
1073 return reply((struct ib_mad_hdr *)smp);
1074}
1075
1076enum {
1077 HFI_TRANSITION_DISALLOWED,
1078 HFI_TRANSITION_IGNORED,
1079 HFI_TRANSITION_ALLOWED,
1080 HFI_TRANSITION_UNDEFINED,
1081};
1082
1083/*
1084 * Use shortened names to improve readability of
1085 * {logical,physical}_state_transitions
1086 */
1087enum {
1088 __D = HFI_TRANSITION_DISALLOWED,
1089 __I = HFI_TRANSITION_IGNORED,
1090 __A = HFI_TRANSITION_ALLOWED,
1091 __U = HFI_TRANSITION_UNDEFINED,
1092};
1093
1094/*
1095 * IB_PORTPHYSSTATE_POLLING (2) through OPA_PORTPHYSSTATE_MAX (11) are
1096 * represented in physical_state_transitions.
1097 */
1098#define __N_PHYSTATES (OPA_PORTPHYSSTATE_MAX - IB_PORTPHYSSTATE_POLLING + 1)
1099
1100/*
1101 * Within physical_state_transitions, rows represent "old" states,
1102 * columns "new" states, and physical_state_transitions.allowed[old][new]
1103 * indicates if the transition from old state to new state is legal (see
1104 * OPAg1v1, Table 6-4).
1105 */
1106static const struct {
1107 u8 allowed[__N_PHYSTATES][__N_PHYSTATES];
1108} physical_state_transitions = {
1109 {
1110 /* 2 3 4 5 6 7 8 9 10 11 */
1111 /* 2 */ { __A, __A, __D, __D, __D, __D, __D, __D, __D, __D },
1112 /* 3 */ { __A, __I, __D, __D, __D, __D, __D, __D, __D, __A },
1113 /* 4 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
1114 /* 5 */ { __A, __A, __D, __I, __D, __D, __D, __D, __D, __D },
1115 /* 6 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
1116 /* 7 */ { __D, __A, __D, __D, __D, __I, __D, __D, __D, __D },
1117 /* 8 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
1118 /* 9 */ { __I, __A, __D, __D, __D, __D, __D, __I, __D, __D },
1119 /*10 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
1120 /*11 */ { __D, __A, __D, __D, __D, __D, __D, __D, __D, __I },
1121 }
1122};
1123
1124/*
1125 * IB_PORT_DOWN (1) through IB_PORT_ACTIVE_DEFER (5) are represented
1126 * logical_state_transitions
1127 */
1128
1129#define __N_LOGICAL_STATES (IB_PORT_ACTIVE_DEFER - IB_PORT_DOWN + 1)
1130
1131/*
1132 * Within logical_state_transitions rows represent "old" states,
1133 * columns "new" states, and logical_state_transitions.allowed[old][new]
1134 * indicates if the transition from old state to new state is legal (see
1135 * OPAg1v1, Table 9-12).
1136 */
1137static const struct {
1138 u8 allowed[__N_LOGICAL_STATES][__N_LOGICAL_STATES];
1139} logical_state_transitions = {
1140 {
1141 /* 1 2 3 4 5 */
1142 /* 1 */ { __I, __D, __D, __D, __U},
1143 /* 2 */ { __D, __I, __A, __D, __U},
1144 /* 3 */ { __D, __D, __I, __A, __U},
1145 /* 4 */ { __D, __D, __I, __I, __U},
1146 /* 5 */ { __U, __U, __U, __U, __U},
1147 }
1148};
1149
1150static int logical_transition_allowed(int old, int new)
1151{
1152 if (old < IB_PORT_NOP || old > IB_PORT_ACTIVE_DEFER ||
1153 new < IB_PORT_NOP || new > IB_PORT_ACTIVE_DEFER) {
1154 pr_warn("invalid logical state(s) (old %d new %d)\n",
1155 old, new);
1156 return HFI_TRANSITION_UNDEFINED;
1157 }
1158
1159 if (new == IB_PORT_NOP)
1160 return HFI_TRANSITION_ALLOWED; /* always allowed */
1161
1162 /* adjust states for indexing into logical_state_transitions */
1163 old -= IB_PORT_DOWN;
1164 new -= IB_PORT_DOWN;
1165
1166 if (old < 0 || new < 0)
1167 return HFI_TRANSITION_UNDEFINED;
1168 return logical_state_transitions.allowed[old][new];
1169}
1170
1171static int physical_transition_allowed(int old, int new)
1172{
1173 if (old < IB_PORTPHYSSTATE_NOP || old > OPA_PORTPHYSSTATE_MAX ||
1174 new < IB_PORTPHYSSTATE_NOP || new > OPA_PORTPHYSSTATE_MAX) {
1175 pr_warn("invalid physical state(s) (old %d new %d)\n",
1176 old, new);
1177 return HFI_TRANSITION_UNDEFINED;
1178 }
1179
1180 if (new == IB_PORTPHYSSTATE_NOP)
1181 return HFI_TRANSITION_ALLOWED; /* always allowed */
1182
1183 /* adjust states for indexing into physical_state_transitions */
1184 old -= IB_PORTPHYSSTATE_POLLING;
1185 new -= IB_PORTPHYSSTATE_POLLING;
1186
1187 if (old < 0 || new < 0)
1188 return HFI_TRANSITION_UNDEFINED;
1189 return physical_state_transitions.allowed[old][new];
1190}
1191
1192static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
1193 u32 logical_new, u32 physical_new)
1194{
d392a673 1195 u32 physical_old = driver_pstate(ppd);
02a222c7 1196 u32 logical_old = driver_lstate(ppd);
77241056
MM
1197 int ret, logical_allowed, physical_allowed;
1198
f3ff8189
JJ
1199 ret = logical_transition_allowed(logical_old, logical_new);
1200 logical_allowed = ret;
77241056
MM
1201
1202 if (ret == HFI_TRANSITION_DISALLOWED ||
1203 ret == HFI_TRANSITION_UNDEFINED) {
1204 pr_warn("invalid logical state transition %s -> %s\n",
1205 opa_lstate_name(logical_old),
1206 opa_lstate_name(logical_new));
1207 return ret;
1208 }
1209
f3ff8189
JJ
1210 ret = physical_transition_allowed(physical_old, physical_new);
1211 physical_allowed = ret;
77241056
MM
1212
1213 if (ret == HFI_TRANSITION_DISALLOWED ||
1214 ret == HFI_TRANSITION_UNDEFINED) {
1215 pr_warn("invalid physical state transition %s -> %s\n",
1216 opa_pstate_name(physical_old),
1217 opa_pstate_name(physical_new));
1218 return ret;
1219 }
1220
1221 if (logical_allowed == HFI_TRANSITION_IGNORED &&
1222 physical_allowed == HFI_TRANSITION_IGNORED)
1223 return HFI_TRANSITION_IGNORED;
1224
a9c05e35
BM
1225 /*
1226 * A change request of Physical Port State from
1227 * 'Offline' to 'Polling' should be ignored.
1228 */
1229 if ((physical_old == OPA_PORTPHYSSTATE_OFFLINE) &&
1230 (physical_new == IB_PORTPHYSSTATE_POLLING))
1231 return HFI_TRANSITION_IGNORED;
1232
77241056
MM
1233 /*
1234 * Either physical_allowed or logical_allowed is
1235 * HFI_TRANSITION_ALLOWED.
1236 */
1237 return HFI_TRANSITION_ALLOWED;
1238}
1239
1240static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
a276672e 1241 u32 logical_state, u32 phys_state)
77241056
MM
1242{
1243 struct hfi1_devdata *dd = ppd->dd;
1244 u32 link_state;
1245 int ret;
1246
1247 ret = port_states_transition_allowed(ppd, logical_state, phys_state);
1248 if (ret == HFI_TRANSITION_DISALLOWED ||
1249 ret == HFI_TRANSITION_UNDEFINED) {
1250 /* error message emitted above */
1251 smp->status |= IB_SMP_INVALID_FIELD;
1252 return 0;
1253 }
1254
1255 if (ret == HFI_TRANSITION_IGNORED)
1256 return 0;
1257
1258 if ((phys_state != IB_PORTPHYSSTATE_NOP) &&
1259 !(logical_state == IB_PORT_DOWN ||
1260 logical_state == IB_PORT_NOP)){
1261 pr_warn("SubnSet(OPA_PortInfo) port state invalid: logical_state 0x%x physical_state 0x%x\n",
1262 logical_state, phys_state);
1263 smp->status |= IB_SMP_INVALID_FIELD;
1264 }
1265
1266 /*
1267 * Logical state changes are summarized in OPAv1g1 spec.,
1268 * Table 9-12; physical state changes are summarized in
1269 * OPAv1g1 spec., Table 6.4.
1270 */
1271 switch (logical_state) {
1272 case IB_PORT_NOP:
1273 if (phys_state == IB_PORTPHYSSTATE_NOP)
1274 break;
1275 /* FALLTHROUGH */
1276 case IB_PORT_DOWN:
e490974e 1277 if (phys_state == IB_PORTPHYSSTATE_NOP) {
77241056 1278 link_state = HLS_DN_DOWNDEF;
e490974e 1279 } else if (phys_state == IB_PORTPHYSSTATE_POLLING) {
77241056 1280 link_state = HLS_DN_POLL;
17fb4f29
JJ
1281 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_FM_BOUNCE,
1282 0, OPA_LINKDOWN_REASON_FM_BOUNCE);
e490974e 1283 } else if (phys_state == IB_PORTPHYSSTATE_DISABLED) {
77241056 1284 link_state = HLS_DN_DISABLE;
e490974e 1285 } else {
77241056
MM
1286 pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n",
1287 phys_state);
1288 smp->status |= IB_SMP_INVALID_FIELD;
1289 break;
1290 }
1291
1cbaa670
DL
1292 if ((link_state == HLS_DN_POLL ||
1293 link_state == HLS_DN_DOWNDEF)) {
1294 /*
1295 * Going to poll. No matter what the current state,
1296 * always move offline first, then tune and start the
1297 * link. This correctly handles a FM link bounce and
1298 * a link enable. Going offline is a no-op if already
1299 * offline.
1300 */
1301 set_link_state(ppd, HLS_DN_OFFLINE);
1cbaa670
DL
1302 start_link(ppd);
1303 } else {
1304 set_link_state(ppd, link_state);
1305 }
77241056
MM
1306 if (link_state == HLS_DN_DISABLE &&
1307 (ppd->offline_disabled_reason >
a9c05e35 1308 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED) ||
77241056 1309 ppd->offline_disabled_reason ==
a9c05e35 1310 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
77241056 1311 ppd->offline_disabled_reason =
a9c05e35 1312 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
77241056
MM
1313 /*
1314 * Don't send a reply if the response would be sent
1315 * through the disabled port.
1316 */
1317 if (link_state == HLS_DN_DISABLE && smp->hop_cnt)
1318 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1319 break;
1320 case IB_PORT_ARMED:
1321 ret = set_link_state(ppd, HLS_UP_ARMED);
a276672e 1322 if (!ret)
77241056
MM
1323 send_idle_sma(dd, SMA_IDLE_ARM);
1324 break;
1325 case IB_PORT_ACTIVE:
1326 if (ppd->neighbor_normal) {
1327 ret = set_link_state(ppd, HLS_UP_ACTIVE);
1328 if (ret == 0)
1329 send_idle_sma(dd, SMA_IDLE_ACTIVE);
1330 } else {
1331 pr_warn("SubnSet(OPA_PortInfo) Cannot move to Active with NeighborNormal 0\n");
1332 smp->status |= IB_SMP_INVALID_FIELD;
1333 }
1334 break;
1335 default:
1336 pr_warn("SubnSet(OPA_PortInfo) invalid logical state 0x%x\n",
1337 logical_state);
1338 smp->status |= IB_SMP_INVALID_FIELD;
1339 }
1340
1341 return 0;
1342}
1343
1344/**
1345 * subn_set_opa_portinfo - set port information
1346 * @smp: the incoming SM packet
1347 * @ibdev: the infiniband device
1348 * @port: the port on the device
1349 *
1350 */
1351static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
1352 struct ib_device *ibdev, u8 port,
f1685179 1353 u32 *resp_len, u32 max_len)
77241056
MM
1354{
1355 struct opa_port_info *pi = (struct opa_port_info *)data;
1356 struct ib_event event;
1357 struct hfi1_devdata *dd;
1358 struct hfi1_pportdata *ppd;
1359 struct hfi1_ibport *ibp;
1360 u8 clientrereg;
1361 unsigned long flags;
51e658f5
DC
1362 u32 smlid;
1363 u32 lid;
77241056
MM
1364 u8 ls_old, ls_new, ps_new;
1365 u8 vls;
1366 u8 msl;
1367 u8 crc_enabled;
1368 u16 lse, lwe, mtu;
1369 u32 num_ports = OPA_AM_NPORT(am);
1370 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1371 int ret, i, invalid = 0, call_set_mtu = 0;
1372 int call_link_downgrade_policy = 0;
1373
f1685179
ND
1374 if (num_ports != 1 ||
1375 smp_length_check(sizeof(*pi), max_len)) {
77241056
MM
1376 smp->status |= IB_SMP_INVALID_FIELD;
1377 return reply((struct ib_mad_hdr *)smp);
1378 }
1379
51e658f5
DC
1380 lid = be32_to_cpu(pi->lid);
1381 if (lid & 0xFF000000) {
1382 pr_warn("OPA_PortInfo lid out of range: %X\n", lid);
77241056
MM
1383 smp->status |= IB_SMP_INVALID_FIELD;
1384 goto get_only;
1385 }
1386
77241056
MM
1387
1388 smlid = be32_to_cpu(pi->sm_lid);
51e658f5 1389 if (smlid & 0xFF000000) {
77241056
MM
1390 pr_warn("OPA_PortInfo SM lid out of range: %X\n", smlid);
1391 smp->status |= IB_SMP_INVALID_FIELD;
1392 goto get_only;
1393 }
77241056
MM
1394
1395 clientrereg = (pi->clientrereg_subnettimeout &
1396 OPA_PI_MASK_CLIENT_REREGISTER);
1397
1398 dd = dd_from_ibdev(ibdev);
1399 /* IB numbers ports from 1, hw from 0 */
1400 ppd = dd->pport + (port - 1);
1401 ibp = &ppd->ibport_data;
1402 event.device = ibdev;
1403 event.element.port_num = port;
1404
1405 ls_old = driver_lstate(ppd);
1406
4eb06882 1407 ibp->rvp.mkey = pi->mkey;
51e658f5
DC
1408 if (ibp->rvp.gid_prefix != pi->subnet_prefix) {
1409 ibp->rvp.gid_prefix = pi->subnet_prefix;
1410 event.event = IB_EVENT_GID_CHANGE;
1411 ib_dispatch_event(&event);
1412 }
4eb06882 1413 ibp->rvp.mkey_lease_period = be16_to_cpu(pi->mkey_lease_period);
77241056
MM
1414
1415 /* Must be a valid unicast LID address. */
1416 if ((lid == 0 && ls_old > IB_PORT_INIT) ||
51e658f5 1417 (hfi1_is_16B_mcast(lid))) {
77241056
MM
1418 smp->status |= IB_SMP_INVALID_FIELD;
1419 pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n",
1420 lid);
1421 } else if (ppd->lid != lid ||
1422 ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) {
1423 if (ppd->lid != lid)
1424 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LID_CHANGE_BIT);
1425 if (ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC))
1426 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LMC_CHANGE_BIT);
1427 hfi1_set_lid(ppd, lid, pi->mkeyprotect_lmc & OPA_PI_MASK_LMC);
1428 event.event = IB_EVENT_LID_CHANGE;
1429 ib_dispatch_event(&event);
51e658f5
DC
1430
1431 if (HFI1_PORT_GUID_INDEX + 1 < HFI1_GUIDS_PER_PORT) {
1432 /* Manufacture GID from LID to support extended
1433 * addresses
1434 */
1435 ppd->guids[HFI1_PORT_GUID_INDEX + 1] =
1436 be64_to_cpu(OPA_MAKE_ID(lid));
1437 event.event = IB_EVENT_GID_CHANGE;
1438 ib_dispatch_event(&event);
1439 }
77241056
MM
1440 }
1441
1442 msl = pi->smsl & OPA_PI_MASK_SMSL;
1443 if (pi->partenforce_filterraw & OPA_PI_MASK_LINKINIT_REASON)
1444 ppd->linkinit_reason =
1445 (pi->partenforce_filterraw &
1446 OPA_PI_MASK_LINKINIT_REASON);
77241056
MM
1447
1448 /* Must be a valid unicast LID address. */
1449 if ((smlid == 0 && ls_old > IB_PORT_INIT) ||
51e658f5 1450 (hfi1_is_16B_mcast(smlid))) {
77241056
MM
1451 smp->status |= IB_SMP_INVALID_FIELD;
1452 pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid);
4eb06882 1453 } else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
77241056 1454 pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid);
4eb06882 1455 spin_lock_irqsave(&ibp->rvp.lock, flags);
9c4a311e 1456 if (ibp->rvp.sm_ah) {
4eb06882 1457 if (smlid != ibp->rvp.sm_lid)
51e658f5 1458 hfi1_modify_qp0_ah(ibp, ibp->rvp.sm_ah, smlid);
4eb06882 1459 if (msl != ibp->rvp.sm_sl)
d8966fcd 1460 rdma_ah_set_sl(&ibp->rvp.sm_ah->attr, msl);
77241056 1461 }
4eb06882
DD
1462 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1463 if (smlid != ibp->rvp.sm_lid)
1464 ibp->rvp.sm_lid = smlid;
1465 if (msl != ibp->rvp.sm_sl)
1466 ibp->rvp.sm_sl = msl;
77241056
MM
1467 event.event = IB_EVENT_SM_CHANGE;
1468 ib_dispatch_event(&event);
1469 }
1470
1471 if (pi->link_down_reason == 0) {
1472 ppd->local_link_down_reason.sma = 0;
1473 ppd->local_link_down_reason.latest = 0;
1474 }
1475
1476 if (pi->neigh_link_down_reason == 0) {
1477 ppd->neigh_link_down_reason.sma = 0;
1478 ppd->neigh_link_down_reason.latest = 0;
1479 }
1480
1481 ppd->sm_trap_qp = be32_to_cpu(pi->sm_trap_qp);
1482 ppd->sa_qp = be32_to_cpu(pi->sa_qp);
1483
1484 ppd->port_error_action = be32_to_cpu(pi->port_error_action);
1485 lwe = be16_to_cpu(pi->link_width.enabled);
1486 if (lwe) {
d0d236ea
JJ
1487 if (lwe == OPA_LINK_WIDTH_RESET ||
1488 lwe == OPA_LINK_WIDTH_RESET_OLD)
77241056
MM
1489 set_link_width_enabled(ppd, ppd->link_width_supported);
1490 else if ((lwe & ~ppd->link_width_supported) == 0)
1491 set_link_width_enabled(ppd, lwe);
1492 else
1493 smp->status |= IB_SMP_INVALID_FIELD;
1494 }
1495 lwe = be16_to_cpu(pi->link_width_downgrade.enabled);
1496 /* LWD.E is always applied - 0 means "disabled" */
d0d236ea
JJ
1497 if (lwe == OPA_LINK_WIDTH_RESET ||
1498 lwe == OPA_LINK_WIDTH_RESET_OLD) {
77241056 1499 set_link_width_downgrade_enabled(ppd,
17fb4f29
JJ
1500 ppd->
1501 link_width_downgrade_supported
1502 );
77241056
MM
1503 } else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) {
1504 /* only set and apply if something changed */
1505 if (lwe != ppd->link_width_downgrade_enabled) {
1506 set_link_width_downgrade_enabled(ppd, lwe);
1507 call_link_downgrade_policy = 1;
1508 }
e490974e 1509 } else {
77241056 1510 smp->status |= IB_SMP_INVALID_FIELD;
e490974e 1511 }
77241056
MM
1512 lse = be16_to_cpu(pi->link_speed.enabled);
1513 if (lse) {
1514 if (lse & be16_to_cpu(pi->link_speed.supported))
1515 set_link_speed_enabled(ppd, lse);
1516 else
1517 smp->status |= IB_SMP_INVALID_FIELD;
1518 }
1519
4eb06882
DD
1520 ibp->rvp.mkeyprot =
1521 (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6;
1522 ibp->rvp.vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF;
77241056 1523 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT,
4eb06882 1524 ibp->rvp.vl_high_limit);
77241056 1525
8638b77f 1526 if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
17fb4f29 1527 ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
77241056
MM
1528 smp->status |= IB_SMP_INVALID_FIELD;
1529 return reply((struct ib_mad_hdr *)smp);
1530 }
1531 for (i = 0; i < ppd->vls_supported; i++) {
1532 if ((i % 2) == 0)
17fb4f29
JJ
1533 mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >>
1534 4) & 0xF);
77241056 1535 else
17fb4f29
JJ
1536 mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] &
1537 0xF);
77241056
MM
1538 if (mtu == 0xffff) {
1539 pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n",
1540 mtu,
1541 (pi->neigh_mtu.pvlx_to_mtu[0] >> 4) & 0xF);
1542 smp->status |= IB_SMP_INVALID_FIELD;
1543 mtu = hfi1_max_mtu; /* use a valid MTU */
1544 }
1545 if (dd->vld[i].mtu != mtu) {
1546 dd_dev_info(dd,
17fb4f29
JJ
1547 "MTU change on vl %d from %d to %d\n",
1548 i, dd->vld[i].mtu, mtu);
77241056
MM
1549 dd->vld[i].mtu = mtu;
1550 call_set_mtu++;
1551 }
1552 }
1553 /* As per OPAV1 spec: VL15 must support and be configured
1554 * for operation with a 2048 or larger MTU.
1555 */
8638b77f 1556 mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15 / 2] & 0xF);
77241056
MM
1557 if (mtu < 2048 || mtu == 0xffff)
1558 mtu = 2048;
1559 if (dd->vld[15].mtu != mtu) {
1560 dd_dev_info(dd,
17fb4f29
JJ
1561 "MTU change on vl 15 from %d to %d\n",
1562 dd->vld[15].mtu, mtu);
77241056
MM
1563 dd->vld[15].mtu = mtu;
1564 call_set_mtu++;
1565 }
1566 if (call_set_mtu)
1567 set_mtu(ppd);
1568
1569 /* Set operational VLs */
1570 vls = pi->operational_vls & OPA_PI_MASK_OPERATIONAL_VL;
1571 if (vls) {
1572 if (vls > ppd->vls_supported) {
1573 pr_warn("SubnSet(OPA_PortInfo) VL's supported invalid %d\n",
1574 pi->operational_vls);
1575 smp->status |= IB_SMP_INVALID_FIELD;
1576 } else {
1577 if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS,
17fb4f29 1578 vls) == -EINVAL)
77241056
MM
1579 smp->status |= IB_SMP_INVALID_FIELD;
1580 }
1581 }
1582
1583 if (pi->mkey_violations == 0)
4eb06882 1584 ibp->rvp.mkey_violations = 0;
77241056
MM
1585
1586 if (pi->pkey_violations == 0)
4eb06882 1587 ibp->rvp.pkey_violations = 0;
77241056
MM
1588
1589 if (pi->qkey_violations == 0)
4eb06882 1590 ibp->rvp.qkey_violations = 0;
77241056 1591
4eb06882 1592 ibp->rvp.subnet_timeout =
77241056
MM
1593 pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT;
1594
1595 crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode);
1596 crc_enabled >>= 4;
1597 crc_enabled &= 0xf;
1598
1599 if (crc_enabled != 0)
1600 ppd->port_crc_mode_enabled = port_ltp_to_cap(crc_enabled);
1601
1602 ppd->is_active_optimize_enabled =
1603 !!(be16_to_cpu(pi->port_mode)
1604 & OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE);
1605
1606 ls_new = pi->port_states.portphysstate_portstate &
1607 OPA_PI_MASK_PORT_STATE;
1608 ps_new = (pi->port_states.portphysstate_portstate &
1609 OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4;
1610
1611 if (ls_old == IB_PORT_INIT) {
1612 if (start_of_sm_config) {
1613 if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
1614 ppd->is_sm_config_started = 1;
1615 } else if (ls_new == IB_PORT_ARMED) {
a276672e 1616 if (ppd->is_sm_config_started == 0) {
77241056 1617 invalid = 1;
a276672e
GM
1618 smp->status |= IB_SMP_INVALID_FIELD;
1619 }
77241056
MM
1620 }
1621 }
1622
1623 /* Handle CLIENT_REREGISTER event b/c SM asked us for it */
1624 if (clientrereg) {
1625 event.event = IB_EVENT_CLIENT_REREGISTER;
1626 ib_dispatch_event(&event);
1627 }
1628
1629 /*
1630 * Do the port state change now that the other link parameters
1631 * have been set.
1632 * Changing the port physical state only makes sense if the link
1633 * is down or is being set to down.
1634 */
1635
a276672e
GM
1636 if (!invalid) {
1637 ret = set_port_states(ppd, smp, ls_new, ps_new);
1638 if (ret)
1639 return ret;
1640 }
77241056 1641
f1685179
ND
1642 ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len,
1643 max_len);
77241056
MM
1644
1645 /* restore re-reg bit per o14-12.2.1 */
1646 pi->clientrereg_subnettimeout |= clientrereg;
1647
1648 /*
1649 * Apply the new link downgrade policy. This may result in a link
1650 * bounce. Do this after everything else so things are settled.
1651 * Possible problem: if setting the port state above fails, then
1652 * the policy change is not applied.
1653 */
1654 if (call_link_downgrade_policy)
1655 apply_link_downgrade_policy(ppd, 0);
1656
1657 return ret;
1658
1659get_only:
f1685179
ND
1660 return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len,
1661 max_len);
77241056
MM
1662}
1663
1664/**
1665 * set_pkeys - set the PKEY table for ctxt 0
1666 * @dd: the hfi1_ib device
1667 * @port: the IB port number
1668 * @pkeys: the PKEY table
1669 */
1670static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
1671{
1672 struct hfi1_pportdata *ppd;
1673 int i;
1674 int changed = 0;
1675 int update_includes_mgmt_partition = 0;
1676
1677 /*
1678 * IB port one/two always maps to context zero/one,
1679 * always a kernel context, no locking needed
1680 * If we get here with ppd setup, no need to check
1681 * that rcd is valid.
1682 */
1683 ppd = dd->pport + (port - 1);
1684 /*
1685 * If the update does not include the management pkey, don't do it.
1686 */
1687 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1688 if (pkeys[i] == LIM_MGMT_P_KEY) {
1689 update_includes_mgmt_partition = 1;
1690 break;
1691 }
1692 }
1693
1694 if (!update_includes_mgmt_partition)
1695 return 1;
1696
1697 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1698 u16 key = pkeys[i];
1699 u16 okey = ppd->pkeys[i];
1700
1701 if (key == okey)
1702 continue;
1703 /*
1704 * The SM gives us the complete PKey table. We have
1705 * to ensure that we put the PKeys in the matching
1706 * slots.
1707 */
1708 ppd->pkeys[i] = key;
1709 changed = 1;
1710 }
1711
1712 if (changed) {
77241056 1713 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
34d351f8 1714 hfi1_event_pkey_change(dd, port);
77241056 1715 }
34d351f8 1716
77241056
MM
1717 return 0;
1718}
1719
1720static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
1721 struct ib_device *ibdev, u8 port,
f1685179 1722 u32 *resp_len, u32 max_len)
77241056
MM
1723{
1724 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1725 u32 n_blocks_sent = OPA_AM_NBLK(am);
1726 u32 start_block = am & 0x7ff;
50e5dcbe 1727 u16 *p = (u16 *)data;
77241056
MM
1728 __be16 *q = (__be16 *)data;
1729 int i;
1730 u16 n_blocks_avail;
1731 unsigned npkeys = hfi1_get_npkeys(dd);
f1685179 1732 u32 size = 0;
77241056
MM
1733
1734 if (n_blocks_sent == 0) {
1735 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
1736 port, start_block, n_blocks_sent);
1737 smp->status |= IB_SMP_INVALID_FIELD;
1738 return reply((struct ib_mad_hdr *)smp);
1739 }
1740
8638b77f 1741 n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
77241056 1742
f1685179
ND
1743 size = sizeof(u16) * (n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE);
1744
1745 if (smp_length_check(size, max_len)) {
1746 smp->status |= IB_SMP_INVALID_FIELD;
1747 return reply((struct ib_mad_hdr *)smp);
1748 }
1749
77241056
MM
1750 if (start_block + n_blocks_sent > n_blocks_avail ||
1751 n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
1752 pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n",
1753 start_block, n_blocks_sent, n_blocks_avail,
1754 OPA_NUM_PKEY_BLOCKS_PER_SMP);
1755 smp->status |= IB_SMP_INVALID_FIELD;
1756 return reply((struct ib_mad_hdr *)smp);
1757 }
1758
1759 for (i = 0; i < n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE; i++)
1760 p[i] = be16_to_cpu(q[i]);
1761
1762 if (start_block == 0 && set_pkeys(dd, port, p) != 0) {
1763 smp->status |= IB_SMP_INVALID_FIELD;
1764 return reply((struct ib_mad_hdr *)smp);
1765 }
1766
f1685179
ND
1767 return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len,
1768 max_len);
77241056
MM
1769}
1770
77241056
MM
1771#define ILLEGAL_VL 12
1772/*
1773 * filter_sc2vlt changes mappings to VL15 to ILLEGAL_VL (except
1774 * for SC15, which must map to VL15). If we don't remap things this
1775 * way it is possible for VL15 counters to increment when we try to
1776 * send on a SC which is mapped to an invalid VL.
3ca4fbc8 1777 * When getting the table convert ILLEGAL_VL back to VL15.
77241056 1778 */
3ca4fbc8 1779static void filter_sc2vlt(void *data, bool set)
77241056
MM
1780{
1781 int i;
a787bde8 1782 u8 *pd = data;
77241056
MM
1783
1784 for (i = 0; i < OPA_MAX_SCS; i++) {
1785 if (i == 15)
1786 continue;
3ca4fbc8
SS
1787
1788 if (set) {
1789 if ((pd[i] & 0x1f) == 0xf)
1790 pd[i] = ILLEGAL_VL;
1791 } else {
1792 if ((pd[i] & 0x1f) == ILLEGAL_VL)
1793 pd[i] = 0xf;
1794 }
77241056
MM
1795 }
1796}
1797
1798static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1799{
a787bde8 1800 u64 *val = data;
77241056 1801
3ca4fbc8 1802 filter_sc2vlt(data, true);
77241056
MM
1803
1804 write_csr(dd, SEND_SC2VLT0, *val++);
1805 write_csr(dd, SEND_SC2VLT1, *val++);
1806 write_csr(dd, SEND_SC2VLT2, *val++);
1807 write_csr(dd, SEND_SC2VLT3, *val++);
1808 write_seqlock_irq(&dd->sc2vl_lock);
a787bde8 1809 memcpy(dd->sc2vl, data, sizeof(dd->sc2vl));
77241056
MM
1810 write_sequnlock_irq(&dd->sc2vl_lock);
1811 return 0;
1812}
1813
3ca4fbc8
SS
1814static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1815{
1816 u64 *val = (u64 *)data;
1817
1818 *val++ = read_csr(dd, SEND_SC2VLT0);
1819 *val++ = read_csr(dd, SEND_SC2VLT1);
1820 *val++ = read_csr(dd, SEND_SC2VLT2);
1821 *val++ = read_csr(dd, SEND_SC2VLT3);
1822
1823 filter_sc2vlt((u64 *)data, false);
1824 return 0;
1825}
1826
77241056
MM
1827static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1828 struct ib_device *ibdev, u8 port,
f1685179 1829 u32 *resp_len, u32 max_len)
77241056
MM
1830{
1831 struct hfi1_ibport *ibp = to_iport(ibdev, port);
6618c051 1832 u8 *p = data;
77241056
MM
1833 size_t size = ARRAY_SIZE(ibp->sl_to_sc); /* == 32 */
1834 unsigned i;
1835
f1685179 1836 if (am || smp_length_check(size, max_len)) {
77241056
MM
1837 smp->status |= IB_SMP_INVALID_FIELD;
1838 return reply((struct ib_mad_hdr *)smp);
1839 }
1840
1841 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++)
1842 *p++ = ibp->sl_to_sc[i];
1843
1844 if (resp_len)
1845 *resp_len += size;
1846
1847 return reply((struct ib_mad_hdr *)smp);
1848}
1849
1850static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1851 struct ib_device *ibdev, u8 port,
f1685179 1852 u32 *resp_len, u32 max_len)
77241056
MM
1853{
1854 struct hfi1_ibport *ibp = to_iport(ibdev, port);
6618c051 1855 u8 *p = data;
f1685179 1856 size_t size = ARRAY_SIZE(ibp->sl_to_sc);
77241056 1857 int i;
0ec79e87 1858 u8 sc;
77241056 1859
f1685179 1860 if (am || smp_length_check(size, max_len)) {
77241056
MM
1861 smp->status |= IB_SMP_INVALID_FIELD;
1862 return reply((struct ib_mad_hdr *)smp);
1863 }
1864
0ec79e87
KW
1865 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++) {
1866 sc = *p++;
1867 if (ibp->sl_to_sc[i] != sc) {
1868 ibp->sl_to_sc[i] = sc;
1869
1870 /* Put all stale qps into error state */
1871 hfi1_error_port_qps(ibp, i);
1872 }
1873 }
77241056 1874
f1685179
ND
1875 return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len,
1876 max_len);
77241056
MM
1877}
1878
1879static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1880 struct ib_device *ibdev, u8 port,
f1685179 1881 u32 *resp_len, u32 max_len)
77241056
MM
1882{
1883 struct hfi1_ibport *ibp = to_iport(ibdev, port);
6618c051 1884 u8 *p = data;
77241056
MM
1885 size_t size = ARRAY_SIZE(ibp->sc_to_sl); /* == 32 */
1886 unsigned i;
1887
f1685179 1888 if (am || smp_length_check(size, max_len)) {
77241056
MM
1889 smp->status |= IB_SMP_INVALID_FIELD;
1890 return reply((struct ib_mad_hdr *)smp);
1891 }
1892
1893 for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1894 *p++ = ibp->sc_to_sl[i];
1895
1896 if (resp_len)
1897 *resp_len += size;
1898
1899 return reply((struct ib_mad_hdr *)smp);
1900}
1901
1902static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1903 struct ib_device *ibdev, u8 port,
f1685179 1904 u32 *resp_len, u32 max_len)
77241056
MM
1905{
1906 struct hfi1_ibport *ibp = to_iport(ibdev, port);
f1685179 1907 size_t size = ARRAY_SIZE(ibp->sc_to_sl);
6618c051 1908 u8 *p = data;
77241056
MM
1909 int i;
1910
f1685179 1911 if (am || smp_length_check(size, max_len)) {
77241056
MM
1912 smp->status |= IB_SMP_INVALID_FIELD;
1913 return reply((struct ib_mad_hdr *)smp);
1914 }
1915
1916 for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1917 ibp->sc_to_sl[i] = *p++;
1918
f1685179
ND
1919 return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len,
1920 max_len);
77241056
MM
1921}
1922
1923static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1924 struct ib_device *ibdev, u8 port,
f1685179 1925 u32 *resp_len, u32 max_len)
77241056
MM
1926{
1927 u32 n_blocks = OPA_AM_NBLK(am);
1928 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
50e5dcbe 1929 void *vp = (void *)data;
77241056
MM
1930 size_t size = 4 * sizeof(u64);
1931
f1685179 1932 if (n_blocks != 1 || smp_length_check(size, max_len)) {
77241056
MM
1933 smp->status |= IB_SMP_INVALID_FIELD;
1934 return reply((struct ib_mad_hdr *)smp);
1935 }
1936
1937 get_sc2vlt_tables(dd, vp);
1938
1939 if (resp_len)
1940 *resp_len += size;
1941
1942 return reply((struct ib_mad_hdr *)smp);
1943}
1944
1945static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1946 struct ib_device *ibdev, u8 port,
f1685179 1947 u32 *resp_len, u32 max_len)
77241056
MM
1948{
1949 u32 n_blocks = OPA_AM_NBLK(am);
1950 int async_update = OPA_AM_ASYNC(am);
1951 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
50e5dcbe 1952 void *vp = (void *)data;
77241056
MM
1953 struct hfi1_pportdata *ppd;
1954 int lstate;
f1685179
ND
1955 /*
1956 * set_sc2vlt_tables writes the information contained in *data
1957 * to four 64-bit registers SendSC2VLt[0-3]. We need to make
1958 * sure *max_len is not greater than the total size of the four
1959 * SendSC2VLt[0-3] registers.
1960 */
1961 size_t size = 4 * sizeof(u64);
77241056 1962
f1685179 1963 if (n_blocks != 1 || async_update || smp_length_check(size, max_len)) {
77241056
MM
1964 smp->status |= IB_SMP_INVALID_FIELD;
1965 return reply((struct ib_mad_hdr *)smp);
1966 }
1967
1968 /* IB numbers ports from 1, hw from 0 */
1969 ppd = dd->pport + (port - 1);
1970 lstate = driver_lstate(ppd);
4d114fdd
JJ
1971 /*
1972 * it's known that async_update is 0 by this point, but include
1973 * the explicit check for clarity
1974 */
77241056
MM
1975 if (!async_update &&
1976 (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) {
1977 smp->status |= IB_SMP_INVALID_FIELD;
1978 return reply((struct ib_mad_hdr *)smp);
1979 }
1980
1981 set_sc2vlt_tables(dd, vp);
1982
f1685179
ND
1983 return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len,
1984 max_len);
77241056
MM
1985}
1986
1987static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
1988 struct ib_device *ibdev, u8 port,
f1685179 1989 u32 *resp_len, u32 max_len)
77241056
MM
1990{
1991 u32 n_blocks = OPA_AM_NPORT(am);
1992 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1993 struct hfi1_pportdata *ppd;
50e5dcbe 1994 void *vp = (void *)data;
f1685179 1995 int size = sizeof(struct sc2vlnt);
77241056 1996
f1685179 1997 if (n_blocks != 1 || smp_length_check(size, max_len)) {
77241056
MM
1998 smp->status |= IB_SMP_INVALID_FIELD;
1999 return reply((struct ib_mad_hdr *)smp);
2000 }
2001
2002 ppd = dd->pport + (port - 1);
2003
f1685179 2004 fm_get_table(ppd, FM_TBL_SC2VLNT, vp);
77241056
MM
2005
2006 if (resp_len)
2007 *resp_len += size;
2008
2009 return reply((struct ib_mad_hdr *)smp);
2010}
2011
2012static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
2013 struct ib_device *ibdev, u8 port,
f1685179 2014 u32 *resp_len, u32 max_len)
77241056
MM
2015{
2016 u32 n_blocks = OPA_AM_NPORT(am);
2017 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2018 struct hfi1_pportdata *ppd;
50e5dcbe 2019 void *vp = (void *)data;
77241056 2020 int lstate;
f1685179 2021 int size = sizeof(struct sc2vlnt);
77241056 2022
f1685179 2023 if (n_blocks != 1 || smp_length_check(size, max_len)) {
77241056
MM
2024 smp->status |= IB_SMP_INVALID_FIELD;
2025 return reply((struct ib_mad_hdr *)smp);
2026 }
2027
2028 /* IB numbers ports from 1, hw from 0 */
2029 ppd = dd->pport + (port - 1);
2030 lstate = driver_lstate(ppd);
2031 if (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE) {
2032 smp->status |= IB_SMP_INVALID_FIELD;
2033 return reply((struct ib_mad_hdr *)smp);
2034 }
2035
2036 ppd = dd->pport + (port - 1);
2037
2038 fm_set_table(ppd, FM_TBL_SC2VLNT, vp);
2039
2040 return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
f1685179 2041 resp_len, max_len);
77241056
MM
2042}
2043
2044static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
2045 struct ib_device *ibdev, u8 port,
f1685179 2046 u32 *resp_len, u32 max_len)
77241056
MM
2047{
2048 u32 nports = OPA_AM_NPORT(am);
2049 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
2050 u32 lstate;
2051 struct hfi1_ibport *ibp;
2052 struct hfi1_pportdata *ppd;
50e5dcbe 2053 struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
77241056 2054
f1685179 2055 if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) {
77241056
MM
2056 smp->status |= IB_SMP_INVALID_FIELD;
2057 return reply((struct ib_mad_hdr *)smp);
2058 }
2059
2060 ibp = to_iport(ibdev, port);
2061 ppd = ppd_from_ibp(ibp);
2062
2063 lstate = driver_lstate(ppd);
2064
2065 if (start_of_sm_config && (lstate == IB_PORT_INIT))
2066 ppd->is_sm_config_started = 1;
2067
77241056
MM
2068 psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
2069 psi->port_states.ledenable_offlinereason |=
2070 ppd->is_sm_config_started << 5;
2071 psi->port_states.ledenable_offlinereason |=
a9c05e35 2072 ppd->offline_disabled_reason;
77241056
MM
2073
2074 psi->port_states.portphysstate_portstate =
bec7c79c 2075 (driver_pstate(ppd) << 4) | (lstate & 0xf);
77241056 2076 psi->link_width_downgrade_tx_active =
aadfc3b2 2077 cpu_to_be16(ppd->link_width_downgrade_tx_active);
77241056 2078 psi->link_width_downgrade_rx_active =
aadfc3b2 2079 cpu_to_be16(ppd->link_width_downgrade_rx_active);
77241056
MM
2080 if (resp_len)
2081 *resp_len += sizeof(struct opa_port_state_info);
2082
2083 return reply((struct ib_mad_hdr *)smp);
2084}
2085
2086static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
2087 struct ib_device *ibdev, u8 port,
f1685179 2088 u32 *resp_len, u32 max_len)
77241056
MM
2089{
2090 u32 nports = OPA_AM_NPORT(am);
2091 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
2092 u32 ls_old;
2093 u8 ls_new, ps_new;
2094 struct hfi1_ibport *ibp;
2095 struct hfi1_pportdata *ppd;
50e5dcbe 2096 struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
77241056
MM
2097 int ret, invalid = 0;
2098
f1685179 2099 if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) {
77241056
MM
2100 smp->status |= IB_SMP_INVALID_FIELD;
2101 return reply((struct ib_mad_hdr *)smp);
2102 }
2103
2104 ibp = to_iport(ibdev, port);
2105 ppd = ppd_from_ibp(ibp);
2106
2107 ls_old = driver_lstate(ppd);
2108
2109 ls_new = port_states_to_logical_state(&psi->port_states);
2110 ps_new = port_states_to_phys_state(&psi->port_states);
2111
2112 if (ls_old == IB_PORT_INIT) {
2113 if (start_of_sm_config) {
2114 if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
2115 ppd->is_sm_config_started = 1;
2116 } else if (ls_new == IB_PORT_ARMED) {
a276672e 2117 if (ppd->is_sm_config_started == 0) {
77241056 2118 invalid = 1;
a276672e
GM
2119 smp->status |= IB_SMP_INVALID_FIELD;
2120 }
77241056
MM
2121 }
2122 }
2123
a276672e
GM
2124 if (!invalid) {
2125 ret = set_port_states(ppd, smp, ls_new, ps_new);
2126 if (ret)
2127 return ret;
2128 }
77241056 2129
f1685179
ND
2130 return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len,
2131 max_len);
77241056
MM
2132}
2133
2134static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
2135 struct ib_device *ibdev, u8 port,
f1685179 2136 u32 *resp_len, u32 max_len)
77241056
MM
2137{
2138 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2139 u32 addr = OPA_AM_CI_ADDR(am);
2140 u32 len = OPA_AM_CI_LEN(am) + 1;
2141 int ret;
2142
f1685179
ND
2143 if (dd->pport->port_type != PORT_TYPE_QSFP ||
2144 smp_length_check(len, max_len)) {
f29a08dc
EH
2145 smp->status |= IB_SMP_INVALID_FIELD;
2146 return reply((struct ib_mad_hdr *)smp);
2147 }
2148
349ac71f 2149#define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
77241056
MM
2150#define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
2151#define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
2152
4d114fdd
JJ
2153 /*
2154 * check that addr is within spec, and
2155 * addr and (addr + len - 1) are on the same "page"
2156 */
77241056 2157 if (addr >= 4096 ||
17fb4f29 2158 (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) {
77241056
MM
2159 smp->status |= IB_SMP_INVALID_FIELD;
2160 return reply((struct ib_mad_hdr *)smp);
2161 }
2162
2163 ret = get_cable_info(dd, port, addr, len, data);
2164
2165 if (ret == -ENODEV) {
2166 smp->status |= IB_SMP_UNSUP_METH_ATTR;
2167 return reply((struct ib_mad_hdr *)smp);
2168 }
2169
2170 /* The address range for the CableInfo SMA query is wider than the
2171 * memory available on the QSFP cable. We want to return a valid
2172 * response, albeit zeroed out, for address ranges beyond available
2173 * memory but that are within the CableInfo query spec
2174 */
2175 if (ret < 0 && ret != -ERANGE) {
2176 smp->status |= IB_SMP_INVALID_FIELD;
2177 return reply((struct ib_mad_hdr *)smp);
2178 }
2179
2180 if (resp_len)
2181 *resp_len += len;
2182
2183 return reply((struct ib_mad_hdr *)smp);
2184}
2185
2186static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
f1685179
ND
2187 struct ib_device *ibdev, u8 port, u32 *resp_len,
2188 u32 max_len)
77241056
MM
2189{
2190 u32 num_ports = OPA_AM_NPORT(am);
2191 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2192 struct hfi1_pportdata *ppd;
50e5dcbe 2193 struct buffer_control *p = (struct buffer_control *)data;
f1685179 2194 int size = sizeof(struct buffer_control);
77241056 2195
f1685179 2196 if (num_ports != 1 || smp_length_check(size, max_len)) {
77241056
MM
2197 smp->status |= IB_SMP_INVALID_FIELD;
2198 return reply((struct ib_mad_hdr *)smp);
2199 }
2200
2201 ppd = dd->pport + (port - 1);
f1685179 2202 fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p);
77241056
MM
2203 trace_bct_get(dd, p);
2204 if (resp_len)
2205 *resp_len += size;
2206
2207 return reply((struct ib_mad_hdr *)smp);
2208}
2209
2210static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
f1685179
ND
2211 struct ib_device *ibdev, u8 port, u32 *resp_len,
2212 u32 max_len)
77241056
MM
2213{
2214 u32 num_ports = OPA_AM_NPORT(am);
2215 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2216 struct hfi1_pportdata *ppd;
50e5dcbe 2217 struct buffer_control *p = (struct buffer_control *)data;
77241056 2218
f1685179 2219 if (num_ports != 1 || smp_length_check(sizeof(*p), max_len)) {
77241056
MM
2220 smp->status |= IB_SMP_INVALID_FIELD;
2221 return reply((struct ib_mad_hdr *)smp);
2222 }
2223 ppd = dd->pport + (port - 1);
2224 trace_bct_set(dd, p);
2225 if (fm_set_table(ppd, FM_TBL_BUFFER_CONTROL, p) < 0) {
2226 smp->status |= IB_SMP_INVALID_FIELD;
2227 return reply((struct ib_mad_hdr *)smp);
2228 }
2229
f1685179
ND
2230 return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len,
2231 max_len);
77241056
MM
2232}
2233
2234static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
2235 struct ib_device *ibdev, u8 port,
f1685179 2236 u32 *resp_len, u32 max_len)
77241056
MM
2237{
2238 struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
2239 u32 num_ports = OPA_AM_NPORT(am);
2240 u8 section = (am & 0x00ff0000) >> 16;
2241 u8 *p = data;
f1685179 2242 int size = 256;
77241056 2243
f1685179 2244 if (num_ports != 1 || smp_length_check(size, max_len)) {
77241056
MM
2245 smp->status |= IB_SMP_INVALID_FIELD;
2246 return reply((struct ib_mad_hdr *)smp);
2247 }
2248
2249 switch (section) {
2250 case OPA_VLARB_LOW_ELEMENTS:
f1685179 2251 fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p);
77241056
MM
2252 break;
2253 case OPA_VLARB_HIGH_ELEMENTS:
f1685179 2254 fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p);
77241056
MM
2255 break;
2256 case OPA_VLARB_PREEMPT_ELEMENTS:
f1685179 2257 fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p);
77241056
MM
2258 break;
2259 case OPA_VLARB_PREEMPT_MATRIX:
f1685179 2260 fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p);
77241056
MM
2261 break;
2262 default:
2263 pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n",
2264 be32_to_cpu(smp->attr_mod));
2265 smp->status |= IB_SMP_INVALID_FIELD;
f1685179 2266 size = 0;
77241056
MM
2267 break;
2268 }
2269
2270 if (size > 0 && resp_len)
2271 *resp_len += size;
2272
2273 return reply((struct ib_mad_hdr *)smp);
2274}
2275
2276static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
2277 struct ib_device *ibdev, u8 port,
f1685179 2278 u32 *resp_len, u32 max_len)
77241056
MM
2279{
2280 struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
2281 u32 num_ports = OPA_AM_NPORT(am);
2282 u8 section = (am & 0x00ff0000) >> 16;
2283 u8 *p = data;
f1685179 2284 int size = 256;
77241056 2285
f1685179 2286 if (num_ports != 1 || smp_length_check(size, max_len)) {
77241056
MM
2287 smp->status |= IB_SMP_INVALID_FIELD;
2288 return reply((struct ib_mad_hdr *)smp);
2289 }
2290
2291 switch (section) {
2292 case OPA_VLARB_LOW_ELEMENTS:
50e5dcbe 2293 (void)fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
77241056
MM
2294 break;
2295 case OPA_VLARB_HIGH_ELEMENTS:
50e5dcbe 2296 (void)fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
77241056 2297 break;
4d114fdd
JJ
2298 /*
2299 * neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX
2300 * can be changed from the default values
2301 */
77241056
MM
2302 case OPA_VLARB_PREEMPT_ELEMENTS:
2303 /* FALLTHROUGH */
2304 case OPA_VLARB_PREEMPT_MATRIX:
2305 smp->status |= IB_SMP_UNSUP_METH_ATTR;
2306 break;
2307 default:
2308 pr_warn("OPA SubnSet(VL Arb) AM Invalid : 0x%x\n",
2309 be32_to_cpu(smp->attr_mod));
2310 smp->status |= IB_SMP_INVALID_FIELD;
2311 break;
2312 }
2313
f1685179
ND
2314 return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len,
2315 max_len);
77241056
MM
2316}
2317
2318struct opa_pma_mad {
2319 struct ib_mad_hdr mad_hdr;
2320 u8 data[2024];
2321} __packed;
2322
77241056
MM
2323struct opa_port_status_req {
2324 __u8 port_num;
2325 __u8 reserved[3];
2326 __be32 vl_select_mask;
2327};
2328
2329#define VL_MASK_ALL 0x000080ff
2330
2331struct opa_port_status_rsp {
2332 __u8 port_num;
2333 __u8 reserved[3];
2334 __be32 vl_select_mask;
2335
2336 /* Data counters */
2337 __be64 port_xmit_data;
2338 __be64 port_rcv_data;
2339 __be64 port_xmit_pkts;
2340 __be64 port_rcv_pkts;
2341 __be64 port_multicast_xmit_pkts;
2342 __be64 port_multicast_rcv_pkts;
2343 __be64 port_xmit_wait;
2344 __be64 sw_port_congestion;
2345 __be64 port_rcv_fecn;
2346 __be64 port_rcv_becn;
2347 __be64 port_xmit_time_cong;
2348 __be64 port_xmit_wasted_bw;
2349 __be64 port_xmit_wait_data;
2350 __be64 port_rcv_bubble;
2351 __be64 port_mark_fecn;
2352 /* Error counters */
2353 __be64 port_rcv_constraint_errors;
2354 __be64 port_rcv_switch_relay_errors;
2355 __be64 port_xmit_discards;
2356 __be64 port_xmit_constraint_errors;
2357 __be64 port_rcv_remote_physical_errors;
2358 __be64 local_link_integrity_errors;
2359 __be64 port_rcv_errors;
2360 __be64 excessive_buffer_overruns;
2361 __be64 fm_config_errors;
2362 __be32 link_error_recovery;
2363 __be32 link_downed;
2364 u8 uncorrectable_errors;
2365
2366 u8 link_quality_indicator; /* 5res, 3bit */
2367 u8 res2[6];
2368 struct _vls_pctrs {
2369 /* per-VL Data counters */
2370 __be64 port_vl_xmit_data;
2371 __be64 port_vl_rcv_data;
2372 __be64 port_vl_xmit_pkts;
2373 __be64 port_vl_rcv_pkts;
2374 __be64 port_vl_xmit_wait;
2375 __be64 sw_port_vl_congestion;
2376 __be64 port_vl_rcv_fecn;
2377 __be64 port_vl_rcv_becn;
2378 __be64 port_xmit_time_cong;
2379 __be64 port_vl_xmit_wasted_bw;
2380 __be64 port_vl_xmit_wait_data;
2381 __be64 port_vl_rcv_bubble;
2382 __be64 port_vl_mark_fecn;
2383 __be64 port_vl_xmit_discards;
2384 } vls[0]; /* real array size defined by # bits set in vl_select_mask */
2385};
2386
2387enum counter_selects {
2388 CS_PORT_XMIT_DATA = (1 << 31),
2389 CS_PORT_RCV_DATA = (1 << 30),
2390 CS_PORT_XMIT_PKTS = (1 << 29),
2391 CS_PORT_RCV_PKTS = (1 << 28),
2392 CS_PORT_MCAST_XMIT_PKTS = (1 << 27),
2393 CS_PORT_MCAST_RCV_PKTS = (1 << 26),
2394 CS_PORT_XMIT_WAIT = (1 << 25),
2395 CS_SW_PORT_CONGESTION = (1 << 24),
2396 CS_PORT_RCV_FECN = (1 << 23),
2397 CS_PORT_RCV_BECN = (1 << 22),
2398 CS_PORT_XMIT_TIME_CONG = (1 << 21),
2399 CS_PORT_XMIT_WASTED_BW = (1 << 20),
2400 CS_PORT_XMIT_WAIT_DATA = (1 << 19),
2401 CS_PORT_RCV_BUBBLE = (1 << 18),
2402 CS_PORT_MARK_FECN = (1 << 17),
2403 CS_PORT_RCV_CONSTRAINT_ERRORS = (1 << 16),
2404 CS_PORT_RCV_SWITCH_RELAY_ERRORS = (1 << 15),
2405 CS_PORT_XMIT_DISCARDS = (1 << 14),
2406 CS_PORT_XMIT_CONSTRAINT_ERRORS = (1 << 13),
2407 CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS = (1 << 12),
2408 CS_LOCAL_LINK_INTEGRITY_ERRORS = (1 << 11),
2409 CS_PORT_RCV_ERRORS = (1 << 10),
2410 CS_EXCESSIVE_BUFFER_OVERRUNS = (1 << 9),
2411 CS_FM_CONFIG_ERRORS = (1 << 8),
2412 CS_LINK_ERROR_RECOVERY = (1 << 7),
2413 CS_LINK_DOWNED = (1 << 6),
2414 CS_UNCORRECTABLE_ERRORS = (1 << 5),
2415};
2416
2417struct opa_clear_port_status {
2418 __be64 port_select_mask[4];
2419 __be32 counter_select_mask;
2420};
2421
2422struct opa_aggregate {
2423 __be16 attr_id;
2424 __be16 err_reqlength; /* 1 bit, 8 res, 7 bit */
2425 __be32 attr_mod;
2426 u8 data[0];
2427};
2428
f0852922
AL
2429#define MSK_LLI 0x000000f0
2430#define MSK_LLI_SFT 4
2431#define MSK_LER 0x0000000f
2432#define MSK_LER_SFT 0
2433#define ADD_LLI 8
2434#define ADD_LER 2
2435
2436/* Request contains first three fields, response contains those plus the rest */
77241056
MM
2437struct opa_port_data_counters_msg {
2438 __be64 port_select_mask[4];
2439 __be32 vl_select_mask;
f0852922 2440 __be32 resolution;
77241056
MM
2441
2442 /* Response fields follow */
77241056
MM
2443 struct _port_dctrs {
2444 u8 port_number;
2445 u8 reserved2[3];
2446 __be32 link_quality_indicator; /* 29res, 3bit */
2447
2448 /* Data counters */
2449 __be64 port_xmit_data;
2450 __be64 port_rcv_data;
2451 __be64 port_xmit_pkts;
2452 __be64 port_rcv_pkts;
2453 __be64 port_multicast_xmit_pkts;
2454 __be64 port_multicast_rcv_pkts;
2455 __be64 port_xmit_wait;
2456 __be64 sw_port_congestion;
2457 __be64 port_rcv_fecn;
2458 __be64 port_rcv_becn;
2459 __be64 port_xmit_time_cong;
2460 __be64 port_xmit_wasted_bw;
2461 __be64 port_xmit_wait_data;
2462 __be64 port_rcv_bubble;
2463 __be64 port_mark_fecn;
2464
2465 __be64 port_error_counter_summary;
2466 /* Sum of error counts/port */
2467
2468 struct _vls_dctrs {
2469 /* per-VL Data counters */
2470 __be64 port_vl_xmit_data;
2471 __be64 port_vl_rcv_data;
2472 __be64 port_vl_xmit_pkts;
2473 __be64 port_vl_rcv_pkts;
2474 __be64 port_vl_xmit_wait;
2475 __be64 sw_port_vl_congestion;
2476 __be64 port_vl_rcv_fecn;
2477 __be64 port_vl_rcv_becn;
2478 __be64 port_xmit_time_cong;
2479 __be64 port_vl_xmit_wasted_bw;
2480 __be64 port_vl_xmit_wait_data;
2481 __be64 port_vl_rcv_bubble;
2482 __be64 port_vl_mark_fecn;
2483 } vls[0];
2484 /* array size defined by #bits set in vl_select_mask*/
2485 } port[1]; /* array size defined by #ports in attribute modifier */
2486};
2487
2488struct opa_port_error_counters64_msg {
4d114fdd
JJ
2489 /*
2490 * Request contains first two fields, response contains the
2491 * whole magilla
2492 */
77241056
MM
2493 __be64 port_select_mask[4];
2494 __be32 vl_select_mask;
2495
2496 /* Response-only fields follow */
2497 __be32 reserved1;
2498 struct _port_ectrs {
2499 u8 port_number;
2500 u8 reserved2[7];
2501 __be64 port_rcv_constraint_errors;
2502 __be64 port_rcv_switch_relay_errors;
2503 __be64 port_xmit_discards;
2504 __be64 port_xmit_constraint_errors;
2505 __be64 port_rcv_remote_physical_errors;
2506 __be64 local_link_integrity_errors;
2507 __be64 port_rcv_errors;
2508 __be64 excessive_buffer_overruns;
2509 __be64 fm_config_errors;
2510 __be32 link_error_recovery;
2511 __be32 link_downed;
2512 u8 uncorrectable_errors;
2513 u8 reserved3[7];
2514 struct _vls_ectrs {
2515 __be64 port_vl_xmit_discards;
2516 } vls[0];
2517 /* array size defined by #bits set in vl_select_mask */
2518 } port[1]; /* array size defined by #ports in attribute modifier */
2519};
2520
2521struct opa_port_error_info_msg {
2522 __be64 port_select_mask[4];
2523 __be32 error_info_select_mask;
2524 __be32 reserved1;
2525 struct _port_ei {
77241056
MM
2526 u8 port_number;
2527 u8 reserved2[7];
2528
2529 /* PortRcvErrorInfo */
2530 struct {
2531 u8 status_and_code;
2532 union {
2533 u8 raw[17];
2534 struct {
2535 /* EI1to12 format */
2536 u8 packet_flit1[8];
2537 u8 packet_flit2[8];
2538 u8 remaining_flit_bits12;
2539 } ei1to12;
2540 struct {
2541 u8 packet_bytes[8];
2542 u8 remaining_flit_bits;
2543 } ei13;
2544 } ei;
2545 u8 reserved3[6];
2546 } __packed port_rcv_ei;
2547
2548 /* ExcessiveBufferOverrunInfo */
2549 struct {
2550 u8 status_and_sc;
2551 u8 reserved4[7];
2552 } __packed excessive_buffer_overrun_ei;
2553
2554 /* PortXmitConstraintErrorInfo */
2555 struct {
2556 u8 status;
2557 u8 reserved5;
2558 __be16 pkey;
2559 __be32 slid;
2560 } __packed port_xmit_constraint_ei;
2561
2562 /* PortRcvConstraintErrorInfo */
2563 struct {
2564 u8 status;
2565 u8 reserved6;
2566 __be16 pkey;
2567 __be32 slid;
2568 } __packed port_rcv_constraint_ei;
2569
2570 /* PortRcvSwitchRelayErrorInfo */
2571 struct {
2572 u8 status_and_code;
2573 u8 reserved7[3];
2574 __u32 error_info;
2575 } __packed port_rcv_switch_relay_ei;
2576
2577 /* UncorrectableErrorInfo */
2578 struct {
2579 u8 status_and_code;
2580 u8 reserved8;
2581 } __packed uncorrectable_ei;
2582
2583 /* FMConfigErrorInfo */
2584 struct {
2585 u8 status_and_code;
2586 u8 error_info;
2587 } __packed fm_config_ei;
2588 __u32 reserved9;
2589 } port[1]; /* actual array size defined by #ports in attr modifier */
2590};
2591
2592/* opa_port_error_info_msg error_info_select_mask bit definitions */
2593enum error_info_selects {
2594 ES_PORT_RCV_ERROR_INFO = (1 << 31),
2595 ES_EXCESSIVE_BUFFER_OVERRUN_INFO = (1 << 30),
2596 ES_PORT_XMIT_CONSTRAINT_ERROR_INFO = (1 << 29),
2597 ES_PORT_RCV_CONSTRAINT_ERROR_INFO = (1 << 28),
2598 ES_PORT_RCV_SWITCH_RELAY_ERROR_INFO = (1 << 27),
2599 ES_UNCORRECTABLE_ERROR_INFO = (1 << 26),
2600 ES_FM_CONFIG_ERROR_INFO = (1 << 25)
2601};
2602
2603static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
17fb4f29 2604 struct ib_device *ibdev, u32 *resp_len)
77241056
MM
2605{
2606 struct opa_class_port_info *p =
2607 (struct opa_class_port_info *)pmp->data;
2608
2609 memset(pmp->data, 0, sizeof(pmp->data));
2610
2611 if (pmp->mad_hdr.attr_mod != 0)
2612 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2613
2614 p->base_version = OPA_MGMT_BASE_VERSION;
9fa240bb 2615 p->class_version = OPA_SM_CLASS_VERSION;
77241056
MM
2616 /*
2617 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
2618 */
2619 p->cap_mask2_resp_time = cpu_to_be32(18);
2620
2621 if (resp_len)
2622 *resp_len += sizeof(*p);
2623
2624 return reply((struct ib_mad_hdr *)pmp);
2625}
2626
2627static void a0_portstatus(struct hfi1_pportdata *ppd,
2628 struct opa_port_status_rsp *rsp, u32 vl_select_mask)
2629{
2630 if (!is_bx(ppd->dd)) {
2631 unsigned long vl;
d9d3e025 2632 u64 sum_vl_xmit_wait = 0;
77241056 2633 u32 vl_all_mask = VL_MASK_ALL;
77241056
MM
2634
2635 for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
2636 8 * sizeof(vl_all_mask)) {
d9d3e025
IW
2637 u64 tmp = sum_vl_xmit_wait +
2638 read_port_cntr(ppd, C_TX_WAIT_VL,
2639 idx_from_vl(vl));
2640 if (tmp < sum_vl_xmit_wait) {
2641 /* we wrapped */
2642 sum_vl_xmit_wait = (u64)~0;
2643 break;
2644 }
2645 sum_vl_xmit_wait = tmp;
77241056 2646 }
d9d3e025
IW
2647 if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
2648 rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
77241056
MM
2649 }
2650}
2651
77241056 2652static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
17fb4f29
JJ
2653 struct ib_device *ibdev,
2654 u8 port, u32 *resp_len)
77241056
MM
2655{
2656 struct opa_port_status_req *req =
2657 (struct opa_port_status_req *)pmp->data;
2658 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2659 struct opa_port_status_rsp *rsp;
2660 u32 vl_select_mask = be32_to_cpu(req->vl_select_mask);
2661 unsigned long vl;
2662 size_t response_data_size;
2663 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2664 u8 port_num = req->port_num;
2665 u8 num_vls = hweight32(vl_select_mask);
2666 struct _vls_pctrs *vlinfo;
2667 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2668 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2669 int vfi;
2670 u64 tmp, tmp2;
2671
2672 response_data_size = sizeof(struct opa_port_status_rsp) +
2673 num_vls * sizeof(struct _vls_pctrs);
2674 if (response_data_size > sizeof(pmp->data)) {
2675 pmp->mad_hdr.status |= OPA_PM_STATUS_REQUEST_TOO_LARGE;
2676 return reply((struct ib_mad_hdr *)pmp);
2677 }
2678
d0d236ea
JJ
2679 if (nports != 1 || (port_num && port_num != port) ||
2680 num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) {
77241056
MM
2681 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2682 return reply((struct ib_mad_hdr *)pmp);
2683 }
2684
2685 memset(pmp->data, 0, sizeof(pmp->data));
2686
2687 rsp = (struct opa_port_status_rsp *)pmp->data;
2688 if (port_num)
2689 rsp->port_num = port_num;
2690 else
2691 rsp->port_num = port;
2692
2693 rsp->port_rcv_constraint_errors =
2694 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2695 CNTR_INVALID_VL));
2696
2697 hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
2698
2699 rsp->vl_select_mask = cpu_to_be32(vl_select_mask);
2700 rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2701 CNTR_INVALID_VL));
2702 rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2703 CNTR_INVALID_VL));
77241056
MM
2704 rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2705 CNTR_INVALID_VL));
2706 rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2707 CNTR_INVALID_VL));
2708 rsp->port_multicast_xmit_pkts =
2709 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
17fb4f29 2710 CNTR_INVALID_VL));
77241056
MM
2711 rsp->port_multicast_rcv_pkts =
2712 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2713 CNTR_INVALID_VL));
2714 rsp->port_xmit_wait =
2715 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
2716 rsp->port_rcv_fecn =
2717 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
2718 rsp->port_rcv_becn =
2719 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
2720 rsp->port_xmit_discards =
2721 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
2722 CNTR_INVALID_VL));
2723 rsp->port_xmit_constraint_errors =
2724 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2725 CNTR_INVALID_VL));
2726 rsp->port_rcv_remote_physical_errors =
2727 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2728 CNTR_INVALID_VL));
3210314a
JP
2729 rsp->local_link_integrity_errors =
2730 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
2731 CNTR_INVALID_VL));
77241056
MM
2732 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2733 tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
17fb4f29 2734 CNTR_INVALID_VL);
77241056
MM
2735 if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
2736 /* overflow/wrapped */
2737 rsp->link_error_recovery = cpu_to_be32(~0);
2738 } else {
2739 rsp->link_error_recovery = cpu_to_be32(tmp2);
2740 }
2741 rsp->port_rcv_errors =
2742 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
2743 rsp->excessive_buffer_overruns =
2744 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
2745 rsp->fm_config_errors =
2746 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2747 CNTR_INVALID_VL));
2748 rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
17fb4f29 2749 CNTR_INVALID_VL));
77241056
MM
2750
2751 /* rsp->uncorrectable_errors is 8 bits wide, and it pegs at 0xff */
2752 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2753 rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
2754
58721b8f 2755 vlinfo = &rsp->vls[0];
77241056
MM
2756 vfi = 0;
2757 /* The vl_select_mask has been checked above, and we know
2758 * that it contains only entries which represent valid VLs.
2759 * So in the for_each_set_bit() loop below, we don't need
2760 * any additional checks for vl.
2761 */
2762 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2763 8 * sizeof(vl_select_mask)) {
2764 memset(vlinfo, 0, sizeof(*vlinfo));
2765
2766 tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
2767 rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(tmp);
77241056
MM
2768
2769 rsp->vls[vfi].port_vl_rcv_pkts =
2770 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
17fb4f29 2771 idx_from_vl(vl)));
77241056
MM
2772
2773 rsp->vls[vfi].port_vl_xmit_data =
2774 cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
17fb4f29 2775 idx_from_vl(vl)));
77241056
MM
2776
2777 rsp->vls[vfi].port_vl_xmit_pkts =
2778 cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
17fb4f29 2779 idx_from_vl(vl)));
77241056
MM
2780
2781 rsp->vls[vfi].port_vl_xmit_wait =
2782 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
17fb4f29 2783 idx_from_vl(vl)));
77241056
MM
2784
2785 rsp->vls[vfi].port_vl_rcv_fecn =
2786 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
17fb4f29 2787 idx_from_vl(vl)));
77241056
MM
2788
2789 rsp->vls[vfi].port_vl_rcv_becn =
2790 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
17fb4f29 2791 idx_from_vl(vl)));
77241056 2792
583eb8b8
JP
2793 rsp->vls[vfi].port_vl_xmit_discards =
2794 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
2795 idx_from_vl(vl)));
77241056
MM
2796 vlinfo++;
2797 vfi++;
2798 }
2799
2800 a0_portstatus(ppd, rsp, vl_select_mask);
2801
2802 if (resp_len)
2803 *resp_len += response_data_size;
2804
2805 return reply((struct ib_mad_hdr *)pmp);
2806}
2807
f0852922
AL
2808static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
2809 u8 res_lli, u8 res_ler)
77241056
MM
2810{
2811 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2812 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2813 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2814 u64 error_counter_summary = 0, tmp;
2815
2816 error_counter_summary += read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2817 CNTR_INVALID_VL);
2818 /* port_rcv_switch_relay_errors is 0 for HFIs */
2819 error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_DSCD,
2820 CNTR_INVALID_VL);
2821 error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2822 CNTR_INVALID_VL);
2823 error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
17fb4f29 2824 CNTR_INVALID_VL);
f0852922 2825 /* local link integrity must be right-shifted by the lli resolution */
3210314a
JP
2826 error_counter_summary += (read_dev_cntr(dd, C_DC_RX_REPLAY,
2827 CNTR_INVALID_VL) >> res_lli);
f0852922
AL
2828 /* link error recovery must b right-shifted by the ler resolution */
2829 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2830 tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
2831 error_counter_summary += (tmp >> res_ler);
77241056 2832 error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR,
17fb4f29 2833 CNTR_INVALID_VL);
77241056
MM
2834 error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
2835 error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR,
17fb4f29 2836 CNTR_INVALID_VL);
77241056
MM
2837 /* ppd->link_downed is a 32-bit value */
2838 error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN,
2839 CNTR_INVALID_VL);
2840 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2841 /* this is an 8-bit quantity */
2842 error_counter_summary += tmp < 0x100 ? (tmp & 0xff) : 0xff;
2843
2844 return error_counter_summary;
2845}
2846
d9d3e025 2847static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
77241056
MM
2848 u32 vl_select_mask)
2849{
d9d3e025 2850 if (!is_bx(ppd->dd)) {
77241056 2851 unsigned long vl;
db00a055 2852 u64 sum_vl_xmit_wait = 0;
d9d3e025 2853 u32 vl_all_mask = VL_MASK_ALL;
db00a055 2854
d9d3e025
IW
2855 for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
2856 8 * sizeof(vl_all_mask)) {
77241056 2857 u64 tmp = sum_vl_xmit_wait +
d9d3e025
IW
2858 read_port_cntr(ppd, C_TX_WAIT_VL,
2859 idx_from_vl(vl));
77241056
MM
2860 if (tmp < sum_vl_xmit_wait) {
2861 /* we wrapped */
50e5dcbe 2862 sum_vl_xmit_wait = (u64)~0;
77241056
MM
2863 break;
2864 }
2865 sum_vl_xmit_wait = tmp;
2866 }
2867 if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
2868 rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
2869 }
2870}
2871
b8d114eb
SS
2872static void pma_get_opa_port_dctrs(struct ib_device *ibdev,
2873 struct _port_dctrs *rsp)
2874{
2875 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2876
2877 rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2878 CNTR_INVALID_VL));
2879 rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2880 CNTR_INVALID_VL));
2881 rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2882 CNTR_INVALID_VL));
2883 rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2884 CNTR_INVALID_VL));
2885 rsp->port_multicast_xmit_pkts =
2886 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
2887 CNTR_INVALID_VL));
2888 rsp->port_multicast_rcv_pkts =
2889 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2890 CNTR_INVALID_VL));
2891}
2892
77241056 2893static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
17fb4f29
JJ
2894 struct ib_device *ibdev,
2895 u8 port, u32 *resp_len)
77241056
MM
2896{
2897 struct opa_port_data_counters_msg *req =
2898 (struct opa_port_data_counters_msg *)pmp->data;
2899 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2900 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2901 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2902 struct _port_dctrs *rsp;
2903 struct _vls_dctrs *vlinfo;
2904 size_t response_data_size;
2905 u32 num_ports;
77241056 2906 u8 lq, num_vls;
f0852922 2907 u8 res_lli, res_ler;
77241056 2908 u64 port_mask;
61a28d2b 2909 u8 port_num;
77241056
MM
2910 unsigned long vl;
2911 u32 vl_select_mask;
2912 int vfi;
2913
2914 num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
77241056
MM
2915 num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
2916 vl_select_mask = be32_to_cpu(req->vl_select_mask);
f0852922
AL
2917 res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
2918 res_lli = res_lli ? res_lli + ADD_LLI : 0;
2919 res_ler = (u8)(be32_to_cpu(req->resolution) & MSK_LER) >> MSK_LER_SFT;
2920 res_ler = res_ler ? res_ler + ADD_LER : 0;
77241056
MM
2921
2922 if (num_ports != 1 || (vl_select_mask & ~VL_MASK_ALL)) {
2923 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2924 return reply((struct ib_mad_hdr *)pmp);
2925 }
2926
2927 /* Sanity check */
2928 response_data_size = sizeof(struct opa_port_data_counters_msg) +
2929 num_vls * sizeof(struct _vls_dctrs);
2930
2931 if (response_data_size > sizeof(pmp->data)) {
2932 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2933 return reply((struct ib_mad_hdr *)pmp);
2934 }
2935
2936 /*
2937 * The bit set in the mask needs to be consistent with the
2938 * port the request came in on.
2939 */
2940 port_mask = be64_to_cpu(req->port_select_mask[3]);
2941 port_num = find_first_bit((unsigned long *)&port_mask,
6aaa382f 2942 sizeof(port_mask) * 8);
77241056 2943
61a28d2b 2944 if (port_num != port) {
77241056
MM
2945 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2946 return reply((struct ib_mad_hdr *)pmp);
2947 }
2948
acc17d67 2949 rsp = &req->port[0];
77241056
MM
2950 memset(rsp, 0, sizeof(*rsp));
2951
2952 rsp->port_number = port;
2953 /*
2954 * Note that link_quality_indicator is a 32 bit quantity in
2955 * 'datacounters' queries (as opposed to 'portinfo' queries,
2956 * where it's a byte).
2957 */
2958 hfi1_read_link_quality(dd, &lq);
2959 rsp->link_quality_indicator = cpu_to_be32((u32)lq);
b8d114eb 2960 pma_get_opa_port_dctrs(ibdev, rsp);
77241056 2961
77241056
MM
2962 rsp->port_xmit_wait =
2963 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
2964 rsp->port_rcv_fecn =
2965 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
2966 rsp->port_rcv_becn =
2967 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
77241056 2968 rsp->port_error_counter_summary =
f0852922
AL
2969 cpu_to_be64(get_error_counter_summary(ibdev, port,
2970 res_lli, res_ler));
77241056 2971
58721b8f 2972 vlinfo = &rsp->vls[0];
77241056
MM
2973 vfi = 0;
2974 /* The vl_select_mask has been checked above, and we know
2975 * that it contains only entries which represent valid VLs.
2976 * So in the for_each_set_bit() loop below, we don't need
2977 * any additional checks for vl.
2978 */
2979 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
17fb4f29 2980 8 * sizeof(req->vl_select_mask)) {
77241056
MM
2981 memset(vlinfo, 0, sizeof(*vlinfo));
2982
2983 rsp->vls[vfi].port_vl_xmit_data =
2984 cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
17fb4f29 2985 idx_from_vl(vl)));
77241056
MM
2986
2987 rsp->vls[vfi].port_vl_rcv_data =
2988 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL,
17fb4f29 2989 idx_from_vl(vl)));
77241056
MM
2990
2991 rsp->vls[vfi].port_vl_xmit_pkts =
2992 cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
17fb4f29 2993 idx_from_vl(vl)));
77241056
MM
2994
2995 rsp->vls[vfi].port_vl_rcv_pkts =
2996 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
17fb4f29 2997 idx_from_vl(vl)));
77241056
MM
2998
2999 rsp->vls[vfi].port_vl_xmit_wait =
3000 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
17fb4f29 3001 idx_from_vl(vl)));
77241056
MM
3002
3003 rsp->vls[vfi].port_vl_rcv_fecn =
3004 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
17fb4f29 3005 idx_from_vl(vl)));
77241056
MM
3006 rsp->vls[vfi].port_vl_rcv_becn =
3007 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
17fb4f29 3008 idx_from_vl(vl)));
77241056
MM
3009
3010 /* rsp->port_vl_xmit_time_cong is 0 for HFIs */
3011 /* rsp->port_vl_xmit_wasted_bw ??? */
3012 /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ???
4d114fdd
JJ
3013 * does this differ from rsp->vls[vfi].port_vl_xmit_wait
3014 */
77241056 3015 /*rsp->vls[vfi].port_vl_mark_fecn =
4d114fdd
JJ
3016 * cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT
3017 * + offset));
3018 */
77241056
MM
3019 vlinfo++;
3020 vfi++;
3021 }
3022
d9d3e025 3023 a0_datacounters(ppd, rsp, vl_select_mask);
77241056
MM
3024
3025 if (resp_len)
3026 *resp_len += response_data_size;
3027
3028 return reply((struct ib_mad_hdr *)pmp);
3029}
3030
b8d114eb
SS
3031static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp,
3032 struct ib_device *ibdev, u8 port)
3033{
3034 struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)
3035 pmp->data;
3036 struct _port_dctrs rsp;
3037
3038 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
3039 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3040 goto bail;
3041 }
3042
3043 memset(&rsp, 0, sizeof(rsp));
3044 pma_get_opa_port_dctrs(ibdev, &rsp);
3045
3046 p->port_xmit_data = rsp.port_xmit_data;
3047 p->port_rcv_data = rsp.port_rcv_data;
3048 p->port_xmit_packets = rsp.port_xmit_pkts;
3049 p->port_rcv_packets = rsp.port_rcv_pkts;
3050 p->port_unicast_xmit_packets = 0;
3051 p->port_unicast_rcv_packets = 0;
3052 p->port_multicast_xmit_packets = rsp.port_multicast_xmit_pkts;
3053 p->port_multicast_rcv_packets = rsp.port_multicast_rcv_pkts;
3054
3055bail:
3056 return reply((struct ib_mad_hdr *)pmp);
3057}
3058
3059static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
3060 struct _port_ectrs *rsp, u8 port)
3061{
3062 u64 tmp, tmp2;
3063 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3064 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3065 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3066
3067 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
3068 tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
3069 CNTR_INVALID_VL);
3070 if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
3071 /* overflow/wrapped */
3072 rsp->link_error_recovery = cpu_to_be32(~0);
3073 } else {
3074 rsp->link_error_recovery = cpu_to_be32(tmp2);
3075 }
3076
3077 rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
3078 CNTR_INVALID_VL));
3079 rsp->port_rcv_errors =
3080 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
3081 rsp->port_rcv_remote_physical_errors =
3082 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
3083 CNTR_INVALID_VL));
3084 rsp->port_rcv_switch_relay_errors = 0;
3085 rsp->port_xmit_discards =
3086 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
3087 CNTR_INVALID_VL));
3088 rsp->port_xmit_constraint_errors =
3089 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
3090 CNTR_INVALID_VL));
3091 rsp->port_rcv_constraint_errors =
3092 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
3093 CNTR_INVALID_VL));
3210314a
JP
3094 rsp->local_link_integrity_errors =
3095 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
3096 CNTR_INVALID_VL));
b8d114eb
SS
3097 rsp->excessive_buffer_overruns =
3098 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
3099}
3100
77241056 3101static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
17fb4f29
JJ
3102 struct ib_device *ibdev,
3103 u8 port, u32 *resp_len)
77241056
MM
3104{
3105 size_t response_data_size;
3106 struct _port_ectrs *rsp;
eb2e557c 3107 u8 port_num;
77241056
MM
3108 struct opa_port_error_counters64_msg *req;
3109 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3110 u32 num_ports;
3111 u8 num_pslm;
3112 u8 num_vls;
3113 struct hfi1_ibport *ibp;
3114 struct hfi1_pportdata *ppd;
3115 struct _vls_ectrs *vlinfo;
3116 unsigned long vl;
b8d114eb 3117 u64 port_mask, tmp;
77241056
MM
3118 u32 vl_select_mask;
3119 int vfi;
3120
3121 req = (struct opa_port_error_counters64_msg *)pmp->data;
3122
3123 num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
3124
3125 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
3126 num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
3127
3128 if (num_ports != 1 || num_ports != num_pslm) {
3129 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3130 return reply((struct ib_mad_hdr *)pmp);
3131 }
3132
3133 response_data_size = sizeof(struct opa_port_error_counters64_msg) +
3134 num_vls * sizeof(struct _vls_ectrs);
3135
3136 if (response_data_size > sizeof(pmp->data)) {
3137 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3138 return reply((struct ib_mad_hdr *)pmp);
3139 }
3140 /*
3141 * The bit set in the mask needs to be consistent with the
3142 * port the request came in on.
3143 */
3144 port_mask = be64_to_cpu(req->port_select_mask[3]);
3145 port_num = find_first_bit((unsigned long *)&port_mask,
6aaa382f 3146 sizeof(port_mask) * 8);
77241056 3147
eb2e557c 3148 if (port_num != port) {
77241056
MM
3149 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3150 return reply((struct ib_mad_hdr *)pmp);
3151 }
3152
acc17d67 3153 rsp = &req->port[0];
77241056
MM
3154
3155 ibp = to_iport(ibdev, port_num);
3156 ppd = ppd_from_ibp(ibp);
3157
3158 memset(rsp, 0, sizeof(*rsp));
eb2e557c 3159 rsp->port_number = port_num;
77241056 3160
b8d114eb 3161 pma_get_opa_port_ectrs(ibdev, rsp, port_num);
77241056 3162
77241056
MM
3163 rsp->port_rcv_remote_physical_errors =
3164 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
b8d114eb 3165 CNTR_INVALID_VL));
77241056
MM
3166 rsp->fm_config_errors =
3167 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
17fb4f29 3168 CNTR_INVALID_VL));
77241056 3169 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
b8d114eb 3170
77241056 3171 rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
2b719046
JP
3172 rsp->port_rcv_errors =
3173 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
acc17d67 3174 vlinfo = &rsp->vls[0];
77241056
MM
3175 vfi = 0;
3176 vl_select_mask = be32_to_cpu(req->vl_select_mask);
3177 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
3178 8 * sizeof(req->vl_select_mask)) {
3179 memset(vlinfo, 0, sizeof(*vlinfo));
583eb8b8
JP
3180 rsp->vls[vfi].port_vl_xmit_discards =
3181 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
3182 idx_from_vl(vl)));
77241056
MM
3183 vlinfo += 1;
3184 vfi++;
3185 }
3186
3187 if (resp_len)
3188 *resp_len += response_data_size;
3189
3190 return reply((struct ib_mad_hdr *)pmp);
3191}
3192
b8d114eb
SS
3193static int pma_get_ib_portcounters(struct ib_pma_mad *pmp,
3194 struct ib_device *ibdev, u8 port)
3195{
3196 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
3197 pmp->data;
3198 struct _port_ectrs rsp;
3199 u64 temp_link_overrun_errors;
3200 u64 temp_64;
3201 u32 temp_32;
3202
3203 memset(&rsp, 0, sizeof(rsp));
3204 pma_get_opa_port_ectrs(ibdev, &rsp, port);
3205
3206 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
3207 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3208 goto bail;
3209 }
3210
3211 p->symbol_error_counter = 0; /* N/A for OPA */
3212
3213 temp_32 = be32_to_cpu(rsp.link_error_recovery);
3214 if (temp_32 > 0xFFUL)
3215 p->link_error_recovery_counter = 0xFF;
3216 else
3217 p->link_error_recovery_counter = (u8)temp_32;
3218
3219 temp_32 = be32_to_cpu(rsp.link_downed);
3220 if (temp_32 > 0xFFUL)
3221 p->link_downed_counter = 0xFF;
3222 else
3223 p->link_downed_counter = (u8)temp_32;
3224
3225 temp_64 = be64_to_cpu(rsp.port_rcv_errors);
3226 if (temp_64 > 0xFFFFUL)
3227 p->port_rcv_errors = cpu_to_be16(0xFFFF);
3228 else
3229 p->port_rcv_errors = cpu_to_be16((u16)temp_64);
3230
3231 temp_64 = be64_to_cpu(rsp.port_rcv_remote_physical_errors);
3232 if (temp_64 > 0xFFFFUL)
3233 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
3234 else
3235 p->port_rcv_remphys_errors = cpu_to_be16((u16)temp_64);
3236
3237 temp_64 = be64_to_cpu(rsp.port_rcv_switch_relay_errors);
3238 p->port_rcv_switch_relay_errors = cpu_to_be16((u16)temp_64);
3239
3240 temp_64 = be64_to_cpu(rsp.port_xmit_discards);
3241 if (temp_64 > 0xFFFFUL)
3242 p->port_xmit_discards = cpu_to_be16(0xFFFF);
3243 else
3244 p->port_xmit_discards = cpu_to_be16((u16)temp_64);
3245
3246 temp_64 = be64_to_cpu(rsp.port_xmit_constraint_errors);
3247 if (temp_64 > 0xFFUL)
3248 p->port_xmit_constraint_errors = 0xFF;
3249 else
3250 p->port_xmit_constraint_errors = (u8)temp_64;
3251
3252 temp_64 = be64_to_cpu(rsp.port_rcv_constraint_errors);
3253 if (temp_64 > 0xFFUL)
3254 p->port_rcv_constraint_errors = 0xFFUL;
3255 else
3256 p->port_rcv_constraint_errors = (u8)temp_64;
3257
3258 /* LocalLink: 7:4, BufferOverrun: 3:0 */
3259 temp_64 = be64_to_cpu(rsp.local_link_integrity_errors);
3260 if (temp_64 > 0xFUL)
3261 temp_64 = 0xFUL;
3262
3263 temp_link_overrun_errors = temp_64 << 4;
3264
3265 temp_64 = be64_to_cpu(rsp.excessive_buffer_overruns);
3266 if (temp_64 > 0xFUL)
3267 temp_64 = 0xFUL;
3268 temp_link_overrun_errors |= temp_64;
3269
3270 p->link_overrun_errors = (u8)temp_link_overrun_errors;
3271
3272 p->vl15_dropped = 0; /* N/A for OPA */
3273
3274bail:
3275 return reply((struct ib_mad_hdr *)pmp);
3276}
3277
77241056 3278static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
17fb4f29
JJ
3279 struct ib_device *ibdev,
3280 u8 port, u32 *resp_len)
77241056
MM
3281{
3282 size_t response_data_size;
3283 struct _port_ei *rsp;
3284 struct opa_port_error_info_msg *req;
3285 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3286 u64 port_mask;
3287 u32 num_ports;
eb2e557c 3288 u8 port_num;
77241056
MM
3289 u8 num_pslm;
3290 u64 reg;
3291
3292 req = (struct opa_port_error_info_msg *)pmp->data;
acc17d67 3293 rsp = &req->port[0];
77241056
MM
3294
3295 num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
3296 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
3297
3298 memset(rsp, 0, sizeof(*rsp));
3299
3300 if (num_ports != 1 || num_ports != num_pslm) {
3301 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3302 return reply((struct ib_mad_hdr *)pmp);
3303 }
3304
3305 /* Sanity check */
3306 response_data_size = sizeof(struct opa_port_error_info_msg);
3307
3308 if (response_data_size > sizeof(pmp->data)) {
3309 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3310 return reply((struct ib_mad_hdr *)pmp);
3311 }
3312
3313 /*
3314 * The bit set in the mask needs to be consistent with the port
3315 * the request came in on.
3316 */
3317 port_mask = be64_to_cpu(req->port_select_mask[3]);
3318 port_num = find_first_bit((unsigned long *)&port_mask,
6aaa382f 3319 sizeof(port_mask) * 8);
77241056 3320
eb2e557c 3321 if (port_num != port) {
77241056
MM
3322 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3323 return reply((struct ib_mad_hdr *)pmp);
3324 }
3325
3326 /* PortRcvErrorInfo */
3327 rsp->port_rcv_ei.status_and_code =
3328 dd->err_info_rcvport.status_and_code;
3329 memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1,
17fb4f29 3330 &dd->err_info_rcvport.packet_flit1, sizeof(u64));
77241056 3331 memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2,
17fb4f29 3332 &dd->err_info_rcvport.packet_flit2, sizeof(u64));
77241056
MM
3333
3334 /* ExcessiverBufferOverrunInfo */
3335 reg = read_csr(dd, RCV_ERR_INFO);
3336 if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) {
4d114fdd
JJ
3337 /*
3338 * if the RcvExcessBufferOverrun bit is set, save SC of
3339 * first pkt that encountered an excess buffer overrun
3340 */
77241056
MM
3341 u8 tmp = (u8)reg;
3342
3343 tmp &= RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK;
3344 tmp <<= 2;
3345 rsp->excessive_buffer_overrun_ei.status_and_sc = tmp;
3346 /* set the status bit */
3347 rsp->excessive_buffer_overrun_ei.status_and_sc |= 0x80;
3348 }
3349
3350 rsp->port_xmit_constraint_ei.status =
3351 dd->err_info_xmit_constraint.status;
3352 rsp->port_xmit_constraint_ei.pkey =
3353 cpu_to_be16(dd->err_info_xmit_constraint.pkey);
3354 rsp->port_xmit_constraint_ei.slid =
3355 cpu_to_be32(dd->err_info_xmit_constraint.slid);
3356
3357 rsp->port_rcv_constraint_ei.status =
3358 dd->err_info_rcv_constraint.status;
3359 rsp->port_rcv_constraint_ei.pkey =
3360 cpu_to_be16(dd->err_info_rcv_constraint.pkey);
3361 rsp->port_rcv_constraint_ei.slid =
3362 cpu_to_be32(dd->err_info_rcv_constraint.slid);
3363
3364 /* UncorrectableErrorInfo */
3365 rsp->uncorrectable_ei.status_and_code = dd->err_info_uncorrectable;
3366
3367 /* FMConfigErrorInfo */
3368 rsp->fm_config_ei.status_and_code = dd->err_info_fmconfig;
3369
3370 if (resp_len)
3371 *resp_len += response_data_size;
3372
3373 return reply((struct ib_mad_hdr *)pmp);
3374}
3375
3376static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
17fb4f29
JJ
3377 struct ib_device *ibdev,
3378 u8 port, u32 *resp_len)
77241056
MM
3379{
3380 struct opa_clear_port_status *req =
3381 (struct opa_clear_port_status *)pmp->data;
3382 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3383 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3384 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3385 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
3386 u64 portn = be64_to_cpu(req->port_select_mask[3]);
3387 u32 counter_select = be32_to_cpu(req->counter_select_mask);
3388 u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
3389 unsigned long vl;
3390
3391 if ((nports != 1) || (portn != 1 << port)) {
3392 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3393 return reply((struct ib_mad_hdr *)pmp);
3394 }
3395 /*
3396 * only counters returned by pma_get_opa_portstatus() are
3397 * handled, so when pma_get_opa_portstatus() gets a fix,
3398 * the corresponding change should be made here as well.
3399 */
3400
3401 if (counter_select & CS_PORT_XMIT_DATA)
3402 write_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL, 0);
3403
3404 if (counter_select & CS_PORT_RCV_DATA)
3405 write_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL, 0);
3406
3407 if (counter_select & CS_PORT_XMIT_PKTS)
3408 write_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL, 0);
3409
3410 if (counter_select & CS_PORT_RCV_PKTS)
3411 write_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL, 0);
3412
3413 if (counter_select & CS_PORT_MCAST_XMIT_PKTS)
3414 write_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL, 0);
3415
3416 if (counter_select & CS_PORT_MCAST_RCV_PKTS)
3417 write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0);
3418
3419 if (counter_select & CS_PORT_XMIT_WAIT)
3420 write_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL, 0);
3421
3422 /* ignore cs_sw_portCongestion for HFIs */
3423
3424 if (counter_select & CS_PORT_RCV_FECN)
3425 write_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL, 0);
3426
3427 if (counter_select & CS_PORT_RCV_BECN)
3428 write_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL, 0);
3429
3430 /* ignore cs_port_xmit_time_cong for HFIs */
3431 /* ignore cs_port_xmit_wasted_bw for now */
3432 /* ignore cs_port_xmit_wait_data for now */
3433 if (counter_select & CS_PORT_RCV_BUBBLE)
3434 write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0);
3435
3436 /* Only applicable for switch */
4d114fdd
JJ
3437 /* if (counter_select & CS_PORT_MARK_FECN)
3438 * write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0);
3439 */
77241056
MM
3440
3441 if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS)
3442 write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0);
3443
3444 /* ignore cs_port_rcv_switch_relay_errors for HFIs */
3445 if (counter_select & CS_PORT_XMIT_DISCARDS)
3446 write_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL, 0);
3447
3448 if (counter_select & CS_PORT_XMIT_CONSTRAINT_ERRORS)
3449 write_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL, 0);
3450
3451 if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS)
3452 write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
3453
3210314a 3454 if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS)
77241056 3455 write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
77241056
MM
3456
3457 if (counter_select & CS_LINK_ERROR_RECOVERY) {
3458 write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
3459 write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
17fb4f29 3460 CNTR_INVALID_VL, 0);
77241056
MM
3461 }
3462
3463 if (counter_select & CS_PORT_RCV_ERRORS)
3464 write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
3465
3466 if (counter_select & CS_EXCESSIVE_BUFFER_OVERRUNS) {
3467 write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
3468 dd->rcv_ovfl_cnt = 0;
3469 }
3470
3471 if (counter_select & CS_FM_CONFIG_ERRORS)
3472 write_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL, 0);
3473
3474 if (counter_select & CS_LINK_DOWNED)
3475 write_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL, 0);
3476
3477 if (counter_select & CS_UNCORRECTABLE_ERRORS)
3478 write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
3479
3480 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
3481 8 * sizeof(vl_select_mask)) {
77241056
MM
3482 if (counter_select & CS_PORT_XMIT_DATA)
3483 write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
3484
3485 if (counter_select & CS_PORT_RCV_DATA)
3486 write_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl), 0);
3487
3488 if (counter_select & CS_PORT_XMIT_PKTS)
3489 write_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl), 0);
3490
3491 if (counter_select & CS_PORT_RCV_PKTS)
3492 write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0);
3493
3494 if (counter_select & CS_PORT_XMIT_WAIT)
3495 write_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl), 0);
3496
3497 /* sw_port_vl_congestion is 0 for HFIs */
3498 if (counter_select & CS_PORT_RCV_FECN)
3499 write_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl), 0);
3500
3501 if (counter_select & CS_PORT_RCV_BECN)
3502 write_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl), 0);
3503
3504 /* port_vl_xmit_time_cong is 0 for HFIs */
3505 /* port_vl_xmit_wasted_bw ??? */
3506 /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? */
3507 if (counter_select & CS_PORT_RCV_BUBBLE)
3508 write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0);
3509
4d114fdd
JJ
3510 /* if (counter_select & CS_PORT_MARK_FECN)
3511 * write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
3512 */
583eb8b8
JP
3513 if (counter_select & C_SW_XMIT_DSCD_VL)
3514 write_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
3515 idx_from_vl(vl), 0);
77241056
MM
3516 }
3517
3518 if (resp_len)
3519 *resp_len += sizeof(*req);
3520
3521 return reply((struct ib_mad_hdr *)pmp);
3522}
3523
3524static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
17fb4f29
JJ
3525 struct ib_device *ibdev,
3526 u8 port, u32 *resp_len)
77241056
MM
3527{
3528 struct _port_ei *rsp;
3529 struct opa_port_error_info_msg *req;
3530 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3531 u64 port_mask;
3532 u32 num_ports;
eb2e557c 3533 u8 port_num;
77241056
MM
3534 u8 num_pslm;
3535 u32 error_info_select;
3536
3537 req = (struct opa_port_error_info_msg *)pmp->data;
acc17d67 3538 rsp = &req->port[0];
77241056
MM
3539
3540 num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
3541 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
3542
3543 memset(rsp, 0, sizeof(*rsp));
3544
3545 if (num_ports != 1 || num_ports != num_pslm) {
3546 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3547 return reply((struct ib_mad_hdr *)pmp);
3548 }
3549
3550 /*
3551 * The bit set in the mask needs to be consistent with the port
3552 * the request came in on.
3553 */
3554 port_mask = be64_to_cpu(req->port_select_mask[3]);
3555 port_num = find_first_bit((unsigned long *)&port_mask,
6aaa382f 3556 sizeof(port_mask) * 8);
77241056 3557
eb2e557c 3558 if (port_num != port) {
77241056
MM
3559 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3560 return reply((struct ib_mad_hdr *)pmp);
3561 }
3562
3563 error_info_select = be32_to_cpu(req->error_info_select_mask);
3564
3565 /* PortRcvErrorInfo */
3566 if (error_info_select & ES_PORT_RCV_ERROR_INFO)
3567 /* turn off status bit */
3568 dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
3569
3570 /* ExcessiverBufferOverrunInfo */
3571 if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO)
4d114fdd
JJ
3572 /*
3573 * status bit is essentially kept in the h/w - bit 5 of
3574 * RCV_ERR_INFO
3575 */
77241056
MM
3576 write_csr(dd, RCV_ERR_INFO,
3577 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
3578
3579 if (error_info_select & ES_PORT_XMIT_CONSTRAINT_ERROR_INFO)
3580 dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
3581
3582 if (error_info_select & ES_PORT_RCV_CONSTRAINT_ERROR_INFO)
3583 dd->err_info_rcv_constraint.status &= ~OPA_EI_STATUS_SMASK;
3584
3585 /* UncorrectableErrorInfo */
3586 if (error_info_select & ES_UNCORRECTABLE_ERROR_INFO)
3587 /* turn off status bit */
3588 dd->err_info_uncorrectable &= ~OPA_EI_STATUS_SMASK;
3589
3590 /* FMConfigErrorInfo */
3591 if (error_info_select & ES_FM_CONFIG_ERROR_INFO)
3592 /* turn off status bit */
3593 dd->err_info_fmconfig &= ~OPA_EI_STATUS_SMASK;
3594
3595 if (resp_len)
3596 *resp_len += sizeof(*req);
3597
3598 return reply((struct ib_mad_hdr *)pmp);
3599}
3600
3601struct opa_congestion_info_attr {
3602 __be16 congestion_info;
3603 u8 control_table_cap; /* Multiple of 64 entry unit CCTs */
3604 u8 congestion_log_length;
3605} __packed;
3606
3607static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
3608 struct ib_device *ibdev, u8 port,
f1685179 3609 u32 *resp_len, u32 max_len)
77241056
MM
3610{
3611 struct opa_congestion_info_attr *p =
3612 (struct opa_congestion_info_attr *)data;
3613 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3614 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3615
f1685179
ND
3616 if (smp_length_check(sizeof(*p), max_len)) {
3617 smp->status |= IB_SMP_INVALID_FIELD;
3618 return reply((struct ib_mad_hdr *)smp);
3619 }
3620
77241056
MM
3621 p->congestion_info = 0;
3622 p->control_table_cap = ppd->cc_max_table_entries;
3623 p->congestion_log_length = OPA_CONG_LOG_ELEMS;
3624
3625 if (resp_len)
3626 *resp_len += sizeof(*p);
3627
3628 return reply((struct ib_mad_hdr *)smp);
3629}
3630
3631static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
17fb4f29 3632 u8 *data, struct ib_device *ibdev,
f1685179 3633 u8 port, u32 *resp_len, u32 max_len)
77241056
MM
3634{
3635 int i;
3636 struct opa_congestion_setting_attr *p =
50e5dcbe 3637 (struct opa_congestion_setting_attr *)data;
77241056
MM
3638 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3639 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3640 struct opa_congestion_setting_entry_shadow *entries;
3641 struct cc_state *cc_state;
3642
f1685179
ND
3643 if (smp_length_check(sizeof(*p), max_len)) {
3644 smp->status |= IB_SMP_INVALID_FIELD;
3645 return reply((struct ib_mad_hdr *)smp);
3646 }
3647
77241056
MM
3648 rcu_read_lock();
3649
3650 cc_state = get_cc_state(ppd);
3651
d125a6c6 3652 if (!cc_state) {
77241056
MM
3653 rcu_read_unlock();
3654 return reply((struct ib_mad_hdr *)smp);
3655 }
3656
3657 entries = cc_state->cong_setting.entries;
3658 p->port_control = cpu_to_be16(cc_state->cong_setting.port_control);
3659 p->control_map = cpu_to_be32(cc_state->cong_setting.control_map);
3660 for (i = 0; i < OPA_MAX_SLS; i++) {
3661 p->entries[i].ccti_increase = entries[i].ccti_increase;
3662 p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
3663 p->entries[i].trigger_threshold =
3664 entries[i].trigger_threshold;
3665 p->entries[i].ccti_min = entries[i].ccti_min;
3666 }
3667
3668 rcu_read_unlock();
3669
3670 if (resp_len)
3671 *resp_len += sizeof(*p);
3672
3673 return reply((struct ib_mad_hdr *)smp);
3674}
3675
f036780b
DL
3676/*
3677 * Apply congestion control information stored in the ppd to the
3678 * active structure.
3679 */
3680static void apply_cc_state(struct hfi1_pportdata *ppd)
3681{
3682 struct cc_state *old_cc_state, *new_cc_state;
3683
3684 new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
3685 if (!new_cc_state)
3686 return;
3687
3688 /*
3689 * Hold the lock for updating *and* to prevent ppd information
3690 * from changing during the update.
3691 */
3692 spin_lock(&ppd->cc_state_lock);
3693
8adf71fa 3694 old_cc_state = get_cc_state_protected(ppd);
f036780b
DL
3695 if (!old_cc_state) {
3696 /* never active, or shutting down */
3697 spin_unlock(&ppd->cc_state_lock);
3698 kfree(new_cc_state);
3699 return;
3700 }
3701
3702 *new_cc_state = *old_cc_state;
3703
685894dd
DD
3704 if (ppd->total_cct_entry)
3705 new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
3706 else
3707 new_cc_state->cct.ccti_limit = 0;
3708
f036780b
DL
3709 memcpy(new_cc_state->cct.entries, ppd->ccti_entries,
3710 ppd->total_cct_entry * sizeof(struct ib_cc_table_entry));
3711
3712 new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
3713 new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
3714 memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
3715 OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
3716
3717 rcu_assign_pointer(ppd->cc_state, new_cc_state);
3718
3719 spin_unlock(&ppd->cc_state_lock);
3720
476d95bd 3721 kfree_rcu(old_cc_state, rcu);
f036780b
DL
3722}
3723
77241056
MM
3724static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
3725 struct ib_device *ibdev, u8 port,
f1685179 3726 u32 *resp_len, u32 max_len)
77241056
MM
3727{
3728 struct opa_congestion_setting_attr *p =
50e5dcbe 3729 (struct opa_congestion_setting_attr *)data;
77241056
MM
3730 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3731 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3732 struct opa_congestion_setting_entry_shadow *entries;
3733 int i;
3734
f1685179
ND
3735 if (smp_length_check(sizeof(*p), max_len)) {
3736 smp->status |= IB_SMP_INVALID_FIELD;
3737 return reply((struct ib_mad_hdr *)smp);
3738 }
3739
f036780b
DL
3740 /*
3741 * Save details from packet into the ppd. Hold the cc_state_lock so
3742 * our information is consistent with anyone trying to apply the state.
3743 */
3744 spin_lock(&ppd->cc_state_lock);
77241056
MM
3745 ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
3746
3747 entries = ppd->congestion_entries;
3748 for (i = 0; i < OPA_MAX_SLS; i++) {
3749 entries[i].ccti_increase = p->entries[i].ccti_increase;
3750 entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer);
3751 entries[i].trigger_threshold =
3752 p->entries[i].trigger_threshold;
3753 entries[i].ccti_min = p->entries[i].ccti_min;
3754 }
f036780b
DL
3755 spin_unlock(&ppd->cc_state_lock);
3756
3757 /* now apply the information */
3758 apply_cc_state(ppd);
77241056
MM
3759
3760 return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
f1685179 3761 resp_len, max_len);
77241056
MM
3762}
3763
3764static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
3765 u8 *data, struct ib_device *ibdev,
f1685179 3766 u8 port, u32 *resp_len, u32 max_len)
77241056
MM
3767{
3768 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3769 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3770 struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data;
d61ea075 3771 u64 ts;
77241056
MM
3772 int i;
3773
f1685179 3774 if (am || smp_length_check(sizeof(*cong_log), max_len)) {
77241056
MM
3775 smp->status |= IB_SMP_INVALID_FIELD;
3776 return reply((struct ib_mad_hdr *)smp);
3777 }
3778
b77d713a 3779 spin_lock_irq(&ppd->cc_log_lock);
77241056
MM
3780
3781 cong_log->log_type = OPA_CC_LOG_TYPE_HFI;
3782 cong_log->congestion_flags = 0;
3783 cong_log->threshold_event_counter =
3784 cpu_to_be16(ppd->threshold_event_counter);
3785 memcpy(cong_log->threshold_cong_event_map,
3786 ppd->threshold_cong_event_map,
3787 sizeof(cong_log->threshold_cong_event_map));
3788 /* keep timestamp in units of 1.024 usec */
d61ea075 3789 ts = ktime_get_ns() / 1024;
77241056
MM
3790 cong_log->current_time_stamp = cpu_to_be32(ts);
3791 for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) {
3792 struct opa_hfi1_cong_log_event_internal *cce =
3793 &ppd->cc_events[ppd->cc_mad_idx++];
3794 if (ppd->cc_mad_idx == OPA_CONG_LOG_ELEMS)
3795 ppd->cc_mad_idx = 0;
3796 /*
3797 * Entries which are older than twice the time
3798 * required to wrap the counter are supposed to
3799 * be zeroed (CA10-49 IBTA, release 1.2.1, V1).
3800 */
d61ea075 3801 if ((ts - cce->timestamp) / 2 > U32_MAX)
77241056
MM
3802 continue;
3803 memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
3804 memcpy(cong_log->events[i].remote_qp_number_cn_entry,
17fb4f29 3805 &cce->rqpn, 3);
77241056
MM
3806 cong_log->events[i].sl_svc_type_cn_entry =
3807 ((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7);
3808 cong_log->events[i].remote_lid_cn_entry =
3809 cpu_to_be32(cce->rlid);
3810 cong_log->events[i].timestamp_cn_entry =
3811 cpu_to_be32(cce->timestamp);
3812 }
3813
3814 /*
3815 * Reset threshold_cong_event_map, and threshold_event_counter
3816 * to 0 when log is read.
3817 */
3818 memset(ppd->threshold_cong_event_map, 0x0,
3819 sizeof(ppd->threshold_cong_event_map));
3820 ppd->threshold_event_counter = 0;
3821
b77d713a 3822 spin_unlock_irq(&ppd->cc_log_lock);
77241056
MM
3823
3824 if (resp_len)
3825 *resp_len += sizeof(struct opa_hfi1_cong_log);
3826
3827 return reply((struct ib_mad_hdr *)smp);
3828}
3829
3830static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3831 struct ib_device *ibdev, u8 port,
f1685179 3832 u32 *resp_len, u32 max_len)
77241056
MM
3833{
3834 struct ib_cc_table_attr *cc_table_attr =
50e5dcbe 3835 (struct ib_cc_table_attr *)data;
77241056
MM
3836 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3837 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3838 u32 start_block = OPA_AM_START_BLK(am);
3839 u32 n_blocks = OPA_AM_NBLK(am);
3840 struct ib_cc_table_entry_shadow *entries;
3841 int i, j;
3842 u32 sentry, eentry;
3843 struct cc_state *cc_state;
f1685179 3844 u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
77241056
MM
3845
3846 /* sanity check n_blocks, start_block */
f1685179 3847 if (n_blocks == 0 || smp_length_check(size, max_len) ||
77241056
MM
3848 start_block + n_blocks > ppd->cc_max_table_entries) {
3849 smp->status |= IB_SMP_INVALID_FIELD;
3850 return reply((struct ib_mad_hdr *)smp);
3851 }
3852
3853 rcu_read_lock();
3854
3855 cc_state = get_cc_state(ppd);
3856
d125a6c6 3857 if (!cc_state) {
77241056
MM
3858 rcu_read_unlock();
3859 return reply((struct ib_mad_hdr *)smp);
3860 }
3861
3862 sentry = start_block * IB_CCT_ENTRIES;
3863 eentry = sentry + (IB_CCT_ENTRIES * n_blocks);
3864
3865 cc_table_attr->ccti_limit = cpu_to_be16(cc_state->cct.ccti_limit);
3866
3867 entries = cc_state->cct.entries;
3868
3869 /* return n_blocks, though the last block may not be full */
3870 for (j = 0, i = sentry; i < eentry; j++, i++)
3871 cc_table_attr->ccti_entries[j].entry =
3872 cpu_to_be16(entries[i].entry);
3873
3874 rcu_read_unlock();
3875
3876 if (resp_len)
f1685179 3877 *resp_len += size;
77241056
MM
3878
3879 return reply((struct ib_mad_hdr *)smp);
3880}
3881
77241056
MM
3882static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3883 struct ib_device *ibdev, u8 port,
f1685179 3884 u32 *resp_len, u32 max_len)
77241056 3885{
50e5dcbe 3886 struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data;
77241056
MM
3887 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3888 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3889 u32 start_block = OPA_AM_START_BLK(am);
3890 u32 n_blocks = OPA_AM_NBLK(am);
3891 struct ib_cc_table_entry_shadow *entries;
3892 int i, j;
3893 u32 sentry, eentry;
3894 u16 ccti_limit;
f1685179 3895 u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
77241056
MM
3896
3897 /* sanity check n_blocks, start_block */
f1685179 3898 if (n_blocks == 0 || smp_length_check(size, max_len) ||
77241056
MM
3899 start_block + n_blocks > ppd->cc_max_table_entries) {
3900 smp->status |= IB_SMP_INVALID_FIELD;
3901 return reply((struct ib_mad_hdr *)smp);
3902 }
3903
3904 sentry = start_block * IB_CCT_ENTRIES;
3905 eentry = sentry + ((n_blocks - 1) * IB_CCT_ENTRIES) +
3906 (be16_to_cpu(p->ccti_limit)) % IB_CCT_ENTRIES + 1;
3907
3908 /* sanity check ccti_limit */
3909 ccti_limit = be16_to_cpu(p->ccti_limit);
3910 if (ccti_limit + 1 > eentry) {
3911 smp->status |= IB_SMP_INVALID_FIELD;
3912 return reply((struct ib_mad_hdr *)smp);
3913 }
3914
f036780b
DL
3915 /*
3916 * Save details from packet into the ppd. Hold the cc_state_lock so
3917 * our information is consistent with anyone trying to apply the state.
3918 */
77241056 3919 spin_lock(&ppd->cc_state_lock);
77241056 3920 ppd->total_cct_entry = ccti_limit + 1;
f036780b 3921 entries = ppd->ccti_entries;
77241056
MM
3922 for (j = 0, i = sentry; i < eentry; j++, i++)
3923 entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
77241056
MM
3924 spin_unlock(&ppd->cc_state_lock);
3925
f036780b
DL
3926 /* now apply the information */
3927 apply_cc_state(ppd);
77241056 3928
f1685179
ND
3929 return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len,
3930 max_len);
77241056
MM
3931}
3932
3933struct opa_led_info {
3934 __be32 rsvd_led_mask;
3935 __be32 rsvd;
3936};
3937
3938#define OPA_LED_SHIFT 31
349ac71f 3939#define OPA_LED_MASK BIT(OPA_LED_SHIFT)
77241056
MM
3940
3941static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
3942 struct ib_device *ibdev, u8 port,
f1685179 3943 u32 *resp_len, u32 max_len)
77241056
MM
3944{
3945 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
409b1462 3946 struct hfi1_pportdata *ppd = dd->pport;
50e5dcbe 3947 struct opa_led_info *p = (struct opa_led_info *)data;
77241056 3948 u32 nport = OPA_AM_NPORT(am);
409b1462 3949 u32 is_beaconing_active;
77241056 3950
f1685179 3951 if (nport != 1 || smp_length_check(sizeof(*p), max_len)) {
77241056
MM
3952 smp->status |= IB_SMP_INVALID_FIELD;
3953 return reply((struct ib_mad_hdr *)smp);
3954 }
3955
409b1462 3956 /*
2243472e
EH
3957 * This pairs with the memory barrier in hfi1_start_led_override to
3958 * ensure that we read the correct state of LED beaconing represented
3959 * by led_override_timer_active
409b1462 3960 */
2243472e 3961 smp_rmb();
409b1462
EH
3962 is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
3963 p->rsvd_led_mask = cpu_to_be32(is_beaconing_active << OPA_LED_SHIFT);
77241056
MM
3964
3965 if (resp_len)
3966 *resp_len += sizeof(struct opa_led_info);
3967
3968 return reply((struct ib_mad_hdr *)smp);
3969}
3970
3971static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
3972 struct ib_device *ibdev, u8 port,
f1685179 3973 u32 *resp_len, u32 max_len)
77241056
MM
3974{
3975 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
50e5dcbe 3976 struct opa_led_info *p = (struct opa_led_info *)data;
77241056
MM
3977 u32 nport = OPA_AM_NPORT(am);
3978 int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
3979
f1685179 3980 if (nport != 1 || smp_length_check(sizeof(*p), max_len)) {
77241056
MM
3981 smp->status |= IB_SMP_INVALID_FIELD;
3982 return reply((struct ib_mad_hdr *)smp);
3983 }
3984
91ab4ed3 3985 if (on)
2243472e 3986 hfi1_start_led_override(dd->pport, 2000, 1500);
91ab4ed3 3987 else
2243472e 3988 shutdown_led_override(dd->pport);
77241056 3989
f1685179
ND
3990 return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len,
3991 max_len);
77241056
MM
3992}
3993
3994static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
3995 u8 *data, struct ib_device *ibdev, u8 port,
f1685179 3996 u32 *resp_len, u32 max_len)
77241056
MM
3997{
3998 int ret;
3999 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4000
4001 switch (attr_id) {
4002 case IB_SMP_ATTR_NODE_DESC:
4003 ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port,
f1685179 4004 resp_len, max_len);
77241056
MM
4005 break;
4006 case IB_SMP_ATTR_NODE_INFO:
4007 ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port,
f1685179 4008 resp_len, max_len);
77241056
MM
4009 break;
4010 case IB_SMP_ATTR_PORT_INFO:
4011 ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port,
f1685179 4012 resp_len, max_len);
77241056
MM
4013 break;
4014 case IB_SMP_ATTR_PKEY_TABLE:
4015 ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port,
f1685179 4016 resp_len, max_len);
77241056
MM
4017 break;
4018 case OPA_ATTRIB_ID_SL_TO_SC_MAP:
4019 ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port,
f1685179 4020 resp_len, max_len);
77241056
MM
4021 break;
4022 case OPA_ATTRIB_ID_SC_TO_SL_MAP:
4023 ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port,
f1685179 4024 resp_len, max_len);
77241056
MM
4025 break;
4026 case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
4027 ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port,
f1685179 4028 resp_len, max_len);
77241056
MM
4029 break;
4030 case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
4031 ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
f1685179 4032 resp_len, max_len);
77241056
MM
4033 break;
4034 case OPA_ATTRIB_ID_PORT_STATE_INFO:
4035 ret = __subn_get_opa_psi(smp, am, data, ibdev, port,
f1685179 4036 resp_len, max_len);
77241056
MM
4037 break;
4038 case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
4039 ret = __subn_get_opa_bct(smp, am, data, ibdev, port,
f1685179 4040 resp_len, max_len);
77241056
MM
4041 break;
4042 case OPA_ATTRIB_ID_CABLE_INFO:
4043 ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port,
f1685179 4044 resp_len, max_len);
77241056
MM
4045 break;
4046 case IB_SMP_ATTR_VL_ARB_TABLE:
4047 ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port,
f1685179 4048 resp_len, max_len);
77241056
MM
4049 break;
4050 case OPA_ATTRIB_ID_CONGESTION_INFO:
4051 ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port,
f1685179 4052 resp_len, max_len);
77241056
MM
4053 break;
4054 case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
4055 ret = __subn_get_opa_cong_setting(smp, am, data, ibdev,
f1685179 4056 port, resp_len, max_len);
77241056
MM
4057 break;
4058 case OPA_ATTRIB_ID_HFI_CONGESTION_LOG:
4059 ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev,
f1685179 4060 port, resp_len, max_len);
77241056
MM
4061 break;
4062 case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
4063 ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port,
f1685179 4064 resp_len, max_len);
77241056
MM
4065 break;
4066 case IB_SMP_ATTR_LED_INFO:
4067 ret = __subn_get_opa_led_info(smp, am, data, ibdev, port,
f1685179 4068 resp_len, max_len);
77241056
MM
4069 break;
4070 case IB_SMP_ATTR_SM_INFO:
4eb06882 4071 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
77241056 4072 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
4eb06882 4073 if (ibp->rvp.port_cap_flags & IB_PORT_SM)
77241056
MM
4074 return IB_MAD_RESULT_SUCCESS;
4075 /* FALLTHROUGH */
4076 default:
4077 smp->status |= IB_SMP_UNSUP_METH_ATTR;
4078 ret = reply((struct ib_mad_hdr *)smp);
4079 break;
4080 }
4081 return ret;
4082}
4083
4084static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
4085 u8 *data, struct ib_device *ibdev, u8 port,
f1685179 4086 u32 *resp_len, u32 max_len)
77241056
MM
4087{
4088 int ret;
4089 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4090
4091 switch (attr_id) {
4092 case IB_SMP_ATTR_PORT_INFO:
4093 ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port,
f1685179 4094 resp_len, max_len);
77241056
MM
4095 break;
4096 case IB_SMP_ATTR_PKEY_TABLE:
4097 ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port,
f1685179 4098 resp_len, max_len);
77241056
MM
4099 break;
4100 case OPA_ATTRIB_ID_SL_TO_SC_MAP:
4101 ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port,
f1685179 4102 resp_len, max_len);
77241056
MM
4103 break;
4104 case OPA_ATTRIB_ID_SC_TO_SL_MAP:
4105 ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port,
f1685179 4106 resp_len, max_len);
77241056
MM
4107 break;
4108 case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
4109 ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port,
f1685179 4110 resp_len, max_len);
77241056
MM
4111 break;
4112 case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
4113 ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port,
f1685179 4114 resp_len, max_len);
77241056
MM
4115 break;
4116 case OPA_ATTRIB_ID_PORT_STATE_INFO:
4117 ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
f1685179 4118 resp_len, max_len);
77241056
MM
4119 break;
4120 case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
4121 ret = __subn_set_opa_bct(smp, am, data, ibdev, port,
f1685179 4122 resp_len, max_len);
77241056
MM
4123 break;
4124 case IB_SMP_ATTR_VL_ARB_TABLE:
4125 ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port,
f1685179 4126 resp_len, max_len);
77241056
MM
4127 break;
4128 case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
4129 ret = __subn_set_opa_cong_setting(smp, am, data, ibdev,
f1685179 4130 port, resp_len, max_len);
77241056
MM
4131 break;
4132 case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
4133 ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port,
f1685179 4134 resp_len, max_len);
77241056
MM
4135 break;
4136 case IB_SMP_ATTR_LED_INFO:
4137 ret = __subn_set_opa_led_info(smp, am, data, ibdev, port,
f1685179 4138 resp_len, max_len);
77241056
MM
4139 break;
4140 case IB_SMP_ATTR_SM_INFO:
4eb06882 4141 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
77241056 4142 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
4eb06882 4143 if (ibp->rvp.port_cap_flags & IB_PORT_SM)
77241056
MM
4144 return IB_MAD_RESULT_SUCCESS;
4145 /* FALLTHROUGH */
4146 default:
4147 smp->status |= IB_SMP_UNSUP_METH_ATTR;
4148 ret = reply((struct ib_mad_hdr *)smp);
4149 break;
4150 }
4151 return ret;
4152}
4153
4154static inline void set_aggr_error(struct opa_aggregate *ag)
4155{
4156 ag->err_reqlength |= cpu_to_be16(0x8000);
4157}
4158
4159static int subn_get_opa_aggregate(struct opa_smp *smp,
4160 struct ib_device *ibdev, u8 port,
4161 u32 *resp_len)
4162{
4163 int i;
4164 u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
4165 u8 *next_smp = opa_get_smp_data(smp);
4166
4167 if (num_attr < 1 || num_attr > 117) {
4168 smp->status |= IB_SMP_INVALID_FIELD;
4169 return reply((struct ib_mad_hdr *)smp);
4170 }
4171
4172 for (i = 0; i < num_attr; i++) {
4173 struct opa_aggregate *agg;
4174 size_t agg_data_len;
4175 size_t agg_size;
4176 u32 am;
4177
4178 agg = (struct opa_aggregate *)next_smp;
4179 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
4180 agg_size = sizeof(*agg) + agg_data_len;
4181 am = be32_to_cpu(agg->attr_mod);
4182
4183 *resp_len += agg_size;
4184
4185 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
4186 smp->status |= IB_SMP_INVALID_FIELD;
4187 return reply((struct ib_mad_hdr *)smp);
4188 }
4189
4190 /* zero the payload for this segment */
4191 memset(next_smp + sizeof(*agg), 0, agg_data_len);
4192
50e5dcbe 4193 (void)subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
f1685179
ND
4194 ibdev, port, NULL, (u32)agg_data_len);
4195
4196 if (smp->status & IB_SMP_INVALID_FIELD)
4197 break;
77241056
MM
4198 if (smp->status & ~IB_SMP_DIRECTION) {
4199 set_aggr_error(agg);
4200 return reply((struct ib_mad_hdr *)smp);
4201 }
4202 next_smp += agg_size;
77241056
MM
4203 }
4204
4205 return reply((struct ib_mad_hdr *)smp);
4206}
4207
4208static int subn_set_opa_aggregate(struct opa_smp *smp,
4209 struct ib_device *ibdev, u8 port,
4210 u32 *resp_len)
4211{
4212 int i;
4213 u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
4214 u8 *next_smp = opa_get_smp_data(smp);
4215
4216 if (num_attr < 1 || num_attr > 117) {
4217 smp->status |= IB_SMP_INVALID_FIELD;
4218 return reply((struct ib_mad_hdr *)smp);
4219 }
4220
4221 for (i = 0; i < num_attr; i++) {
4222 struct opa_aggregate *agg;
4223 size_t agg_data_len;
4224 size_t agg_size;
4225 u32 am;
4226
4227 agg = (struct opa_aggregate *)next_smp;
4228 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
4229 agg_size = sizeof(*agg) + agg_data_len;
4230 am = be32_to_cpu(agg->attr_mod);
4231
4232 *resp_len += agg_size;
4233
4234 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
4235 smp->status |= IB_SMP_INVALID_FIELD;
4236 return reply((struct ib_mad_hdr *)smp);
4237 }
4238
50e5dcbe 4239 (void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
f1685179
ND
4240 ibdev, port, NULL, (u32)agg_data_len);
4241 if (smp->status & IB_SMP_INVALID_FIELD)
4242 break;
77241056
MM
4243 if (smp->status & ~IB_SMP_DIRECTION) {
4244 set_aggr_error(agg);
4245 return reply((struct ib_mad_hdr *)smp);
4246 }
4247 next_smp += agg_size;
77241056
MM
4248 }
4249
4250 return reply((struct ib_mad_hdr *)smp);
4251}
4252
4253/*
4254 * OPAv1 specifies that, on the transition to link up, these counters
4255 * are cleared:
4256 * PortRcvErrors [*]
4257 * LinkErrorRecovery
4258 * LocalLinkIntegrityErrors
4259 * ExcessiveBufferOverruns [*]
4260 *
4261 * [*] Error info associated with these counters is retained, but the
4262 * error info status is reset to 0.
4263 */
4264void clear_linkup_counters(struct hfi1_devdata *dd)
4265{
4266 /* PortRcvErrors */
4267 write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
4268 dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
4269 /* LinkErrorRecovery */
4270 write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
4271 write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
4272 /* LocalLinkIntegrityErrors */
77241056
MM
4273 write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
4274 /* ExcessiveBufferOverruns */
4275 write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
4276 dd->rcv_ovfl_cnt = 0;
4277 dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
4278}
4279
406310c6
SS
4280static int is_full_mgmt_pkey_in_table(struct hfi1_ibport *ibp)
4281{
4282 unsigned int i;
4283 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
4284
4285 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
4286 if (ppd->pkeys[i] == FULL_MGMT_P_KEY)
4287 return 1;
4288
4289 return 0;
4290}
4291
77241056
MM
4292/*
4293 * is_local_mad() returns 1 if 'mad' is sent from, and destined to the
4294 * local node, 0 otherwise.
4295 */
4296static int is_local_mad(struct hfi1_ibport *ibp, const struct opa_mad *mad,
4297 const struct ib_wc *in_wc)
4298{
4299 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
4300 const struct opa_smp *smp = (const struct opa_smp *)mad;
4301
4302 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
4303 return (smp->hop_cnt == 0 &&
4304 smp->route.dr.dr_slid == OPA_LID_PERMISSIVE &&
4305 smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE);
4306 }
4307
4308 return (in_wc->slid == ppd->lid);
4309}
4310
4311/*
4312 * opa_local_smp_check() should only be called on MADs for which
4313 * is_local_mad() returns true. It applies the SMP checks that are
4314 * specific to SMPs which are sent from, and destined to this node.
4315 * opa_local_smp_check() returns 0 if the SMP passes its checks, 1
4316 * otherwise.
4317 *
4318 * SMPs which arrive from other nodes are instead checked by
4319 * opa_smp_check().
4320 */
4321static int opa_local_smp_check(struct hfi1_ibport *ibp,
4322 const struct ib_wc *in_wc)
4323{
4324 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
77241056
MM
4325 u16 pkey;
4326
4327 if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
4328 return 1;
4329
4330 pkey = ppd->pkeys[in_wc->pkey_index];
4331 /*
4332 * We need to do the "node-local" checks specified in OPAv1,
4333 * rev 0.90, section 9.10.26, which are:
4334 * - pkey is 0x7fff, or 0xffff
4335 * - Source QPN == 0 || Destination QPN == 0
4336 * - the MAD header's management class is either
4337 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE or
4338 * IB_MGMT_CLASS_SUBN_LID_ROUTED
4339 * - SLID != 0
4340 *
4341 * However, we know (and so don't need to check again) that,
4342 * for local SMPs, the MAD stack passes MADs with:
4343 * - Source QPN of 0
4344 * - MAD mgmt_class is IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
4345 * - SLID is either: OPA_LID_PERMISSIVE (0xFFFFFFFF), or
4346 * our own port's lid
4347 *
4348 */
4349 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
4350 return 0;
2e903b61 4351 ingress_pkey_table_fail(ppd, pkey, in_wc->slid);
77241056
MM
4352 return 1;
4353}
4354
406310c6
SS
4355/**
4356 * hfi1_pkey_validation_pma - It validates PKEYs for incoming PMA MAD packets.
4357 * @ibp: IB port data
4358 * @in_mad: MAD packet with header and data
4359 * @in_wc: Work completion data such as source LID, port number, etc.
4360 *
4361 * These are all the possible logic rules for validating a pkey:
4362 *
4363 * a) If pkey neither FULL_MGMT_P_KEY nor LIM_MGMT_P_KEY,
4364 * and NOT self-originated packet:
4365 * Drop MAD packet as it should always be part of the
4366 * management partition unless it's a self-originated packet.
4367 *
4368 * b) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY in pkey table:
4369 * The packet is coming from a management node and the receiving node
4370 * is also a management node, so it is safe for the packet to go through.
4371 *
4372 * c) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY is NOT in pkey table:
4373 * Drop the packet as LIM_MGMT_P_KEY should always be in the pkey table.
4374 * It could be an FM misconfiguration.
4375 *
4376 * d) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY is NOT in pkey table:
4377 * It is safe for the packet to go through since a non-management node is
4378 * talking to another non-management node.
4379 *
4380 * e) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY in pkey table:
4381 * Drop the packet because a non-management node is talking to a
4382 * management node, and it could be an attack.
4383 *
4384 * For the implementation, these rules can be simplied to only checking
4385 * for (a) and (e). There's no need to check for rule (b) as
4386 * the packet doesn't need to be dropped. Rule (c) is not possible in
4387 * the driver as LIM_MGMT_P_KEY is always in the pkey table.
4388 *
4389 * Return:
4390 * 0 - pkey is okay, -EINVAL it's a bad pkey
4391 */
4392static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp,
4393 const struct opa_mad *in_mad,
4394 const struct ib_wc *in_wc)
4395{
4396 u16 pkey_value = hfi1_lookup_pkey_value(ibp, in_wc->pkey_index);
4397
4398 /* Rule (a) from above */
4399 if (!is_local_mad(ibp, in_mad, in_wc) &&
4400 pkey_value != LIM_MGMT_P_KEY &&
4401 pkey_value != FULL_MGMT_P_KEY)
4402 return -EINVAL;
4403
4404 /* Rule (e) from above */
4405 if (pkey_value == LIM_MGMT_P_KEY &&
4406 is_full_mgmt_pkey_in_table(ibp))
4407 return -EINVAL;
4408
4409 return 0;
4410}
4411
77241056
MM
4412static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
4413 u8 port, const struct opa_mad *in_mad,
4414 struct opa_mad *out_mad,
4415 u32 *resp_len)
4416{
4417 struct opa_smp *smp = (struct opa_smp *)out_mad;
4418 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4419 u8 *data;
f1685179 4420 u32 am, data_size;
77241056
MM
4421 __be16 attr_id;
4422 int ret;
4423
4424 *out_mad = *in_mad;
4425 data = opa_get_smp_data(smp);
f1685179 4426 data_size = (u32)opa_get_smp_data_size(smp);
77241056
MM
4427
4428 am = be32_to_cpu(smp->attr_mod);
4429 attr_id = smp->attr_id;
9fa240bb 4430 if (smp->class_version != OPA_SM_CLASS_VERSION) {
77241056
MM
4431 smp->status |= IB_SMP_UNSUP_VERSION;
4432 ret = reply((struct ib_mad_hdr *)smp);
5950e9b1 4433 return ret;
77241056
MM
4434 }
4435 ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey,
4436 smp->route.dr.dr_slid, smp->route.dr.return_path,
4437 smp->hop_cnt);
4438 if (ret) {
4439 u32 port_num = be32_to_cpu(smp->attr_mod);
4440
4441 /*
4442 * If this is a get/set portinfo, we already check the
4443 * M_Key if the MAD is for another port and the M_Key
4444 * is OK on the receiving port. This check is needed
4445 * to increment the error counters when the M_Key
4446 * fails to match on *both* ports.
4447 */
4448 if (attr_id == IB_SMP_ATTR_PORT_INFO &&
4449 (smp->method == IB_MGMT_METHOD_GET ||
4450 smp->method == IB_MGMT_METHOD_SET) &&
4451 port_num && port_num <= ibdev->phys_port_cnt &&
4452 port != port_num)
50e5dcbe 4453 (void)check_mkey(to_iport(ibdev, port_num),
77241056
MM
4454 (struct ib_mad_hdr *)smp, 0,
4455 smp->mkey, smp->route.dr.dr_slid,
4456 smp->route.dr.return_path,
4457 smp->hop_cnt);
4458 ret = IB_MAD_RESULT_FAILURE;
5950e9b1 4459 return ret;
77241056
MM
4460 }
4461
4462 *resp_len = opa_get_smp_header_size(smp);
4463
4464 switch (smp->method) {
4465 case IB_MGMT_METHOD_GET:
4466 switch (attr_id) {
4467 default:
4468 clear_opa_smp_data(smp);
4469 ret = subn_get_opa_sma(attr_id, smp, am, data,
f1685179
ND
4470 ibdev, port, resp_len,
4471 data_size);
5950e9b1 4472 break;
77241056
MM
4473 case OPA_ATTRIB_ID_AGGREGATE:
4474 ret = subn_get_opa_aggregate(smp, ibdev, port,
4475 resp_len);
5950e9b1 4476 break;
77241056 4477 }
5950e9b1 4478 break;
77241056
MM
4479 case IB_MGMT_METHOD_SET:
4480 switch (attr_id) {
4481 default:
4482 ret = subn_set_opa_sma(attr_id, smp, am, data,
f1685179
ND
4483 ibdev, port, resp_len,
4484 data_size);
5950e9b1 4485 break;
77241056
MM
4486 case OPA_ATTRIB_ID_AGGREGATE:
4487 ret = subn_set_opa_aggregate(smp, ibdev, port,
4488 resp_len);
5950e9b1 4489 break;
77241056 4490 }
5950e9b1 4491 break;
77241056
MM
4492 case IB_MGMT_METHOD_TRAP:
4493 case IB_MGMT_METHOD_REPORT:
4494 case IB_MGMT_METHOD_REPORT_RESP:
4495 case IB_MGMT_METHOD_GET_RESP:
4496 /*
4497 * The ib_mad module will call us to process responses
4498 * before checking for other consumers.
4499 * Just tell the caller to process it normally.
4500 */
4501 ret = IB_MAD_RESULT_SUCCESS;
5950e9b1 4502 break;
bf90aadd
MR
4503 case IB_MGMT_METHOD_TRAP_REPRESS:
4504 subn_handle_opa_trap_repress(ibp, smp);
4505 /* Always successful */
4506 ret = IB_MAD_RESULT_SUCCESS;
4507 break;
77241056
MM
4508 default:
4509 smp->status |= IB_SMP_UNSUP_METHOD;
4510 ret = reply((struct ib_mad_hdr *)smp);
5950e9b1 4511 break;
77241056
MM
4512 }
4513
77241056
MM
4514 return ret;
4515}
4516
4517static int process_subn(struct ib_device *ibdev, int mad_flags,
4518 u8 port, const struct ib_mad *in_mad,
4519 struct ib_mad *out_mad)
4520{
4521 struct ib_smp *smp = (struct ib_smp *)out_mad;
4522 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4523 int ret;
4524
4525 *out_mad = *in_mad;
4526 if (smp->class_version != 1) {
4527 smp->status |= IB_SMP_UNSUP_VERSION;
4528 ret = reply((struct ib_mad_hdr *)smp);
5950e9b1 4529 return ret;
77241056
MM
4530 }
4531
4532 ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags,
4533 smp->mkey, (__force __be32)smp->dr_slid,
4534 smp->return_path, smp->hop_cnt);
4535 if (ret) {
4536 u32 port_num = be32_to_cpu(smp->attr_mod);
4537
4538 /*
4539 * If this is a get/set portinfo, we already check the
4540 * M_Key if the MAD is for another port and the M_Key
4541 * is OK on the receiving port. This check is needed
4542 * to increment the error counters when the M_Key
4543 * fails to match on *both* ports.
4544 */
4545 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
4546 (smp->method == IB_MGMT_METHOD_GET ||
4547 smp->method == IB_MGMT_METHOD_SET) &&
4548 port_num && port_num <= ibdev->phys_port_cnt &&
4549 port != port_num)
50e5dcbe 4550 (void)check_mkey(to_iport(ibdev, port_num),
17fb4f29
JJ
4551 (struct ib_mad_hdr *)smp, 0,
4552 smp->mkey,
4553 (__force __be32)smp->dr_slid,
4554 smp->return_path, smp->hop_cnt);
77241056 4555 ret = IB_MAD_RESULT_FAILURE;
5950e9b1 4556 return ret;
77241056
MM
4557 }
4558
4559 switch (smp->method) {
4560 case IB_MGMT_METHOD_GET:
4561 switch (smp->attr_id) {
4562 case IB_SMP_ATTR_NODE_INFO:
4563 ret = subn_get_nodeinfo(smp, ibdev, port);
5950e9b1 4564 break;
77241056
MM
4565 default:
4566 smp->status |= IB_SMP_UNSUP_METH_ATTR;
4567 ret = reply((struct ib_mad_hdr *)smp);
5950e9b1 4568 break;
77241056 4569 }
5950e9b1 4570 break;
77241056
MM
4571 }
4572
77241056
MM
4573 return ret;
4574}
4575
b8d114eb
SS
4576static int process_perf(struct ib_device *ibdev, u8 port,
4577 const struct ib_mad *in_mad,
4578 struct ib_mad *out_mad)
4579{
4580 struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
4581 struct ib_class_port_info *cpi = (struct ib_class_port_info *)
4582 &pmp->data;
4583 int ret = IB_MAD_RESULT_FAILURE;
4584
4585 *out_mad = *in_mad;
4586 if (pmp->mad_hdr.class_version != 1) {
4587 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
4588 ret = reply((struct ib_mad_hdr *)pmp);
4589 return ret;
4590 }
4591
4592 switch (pmp->mad_hdr.method) {
4593 case IB_MGMT_METHOD_GET:
4594 switch (pmp->mad_hdr.attr_id) {
4595 case IB_PMA_PORT_COUNTERS:
4596 ret = pma_get_ib_portcounters(pmp, ibdev, port);
4597 break;
4598 case IB_PMA_PORT_COUNTERS_EXT:
4599 ret = pma_get_ib_portcounters_ext(pmp, ibdev, port);
4600 break;
4601 case IB_PMA_CLASS_PORT_INFO:
4602 cpi->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
4603 ret = reply((struct ib_mad_hdr *)pmp);
4604 break;
4605 default:
4606 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4607 ret = reply((struct ib_mad_hdr *)pmp);
4608 break;
4609 }
4610 break;
4611
4612 case IB_MGMT_METHOD_SET:
4613 if (pmp->mad_hdr.attr_id) {
4614 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4615 ret = reply((struct ib_mad_hdr *)pmp);
4616 }
4617 break;
4618
4619 case IB_MGMT_METHOD_TRAP:
4620 case IB_MGMT_METHOD_GET_RESP:
4621 /*
4622 * The ib_mad module will call us to process responses
4623 * before checking for other consumers.
4624 * Just tell the caller to process it normally.
4625 */
4626 ret = IB_MAD_RESULT_SUCCESS;
4627 break;
4628
4629 default:
4630 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
4631 ret = reply((struct ib_mad_hdr *)pmp);
4632 break;
77241056
MM
4633 }
4634
77241056
MM
4635 return ret;
4636}
4637
4638static int process_perf_opa(struct ib_device *ibdev, u8 port,
4639 const struct opa_mad *in_mad,
4640 struct opa_mad *out_mad, u32 *resp_len)
4641{
4642 struct opa_pma_mad *pmp = (struct opa_pma_mad *)out_mad;
4643 int ret;
4644
4645 *out_mad = *in_mad;
4646
9fa240bb 4647 if (pmp->mad_hdr.class_version != OPA_SM_CLASS_VERSION) {
77241056
MM
4648 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
4649 return reply((struct ib_mad_hdr *)pmp);
4650 }
4651
4652 *resp_len = sizeof(pmp->mad_hdr);
4653
4654 switch (pmp->mad_hdr.method) {
4655 case IB_MGMT_METHOD_GET:
4656 switch (pmp->mad_hdr.attr_id) {
4657 case IB_PMA_CLASS_PORT_INFO:
4658 ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len);
5950e9b1 4659 break;
77241056
MM
4660 case OPA_PM_ATTRIB_ID_PORT_STATUS:
4661 ret = pma_get_opa_portstatus(pmp, ibdev, port,
17fb4f29 4662 resp_len);
5950e9b1 4663 break;
77241056
MM
4664 case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS:
4665 ret = pma_get_opa_datacounters(pmp, ibdev, port,
17fb4f29 4666 resp_len);
5950e9b1 4667 break;
77241056
MM
4668 case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS:
4669 ret = pma_get_opa_porterrors(pmp, ibdev, port,
17fb4f29 4670 resp_len);
5950e9b1 4671 break;
77241056
MM
4672 case OPA_PM_ATTRIB_ID_ERROR_INFO:
4673 ret = pma_get_opa_errorinfo(pmp, ibdev, port,
17fb4f29 4674 resp_len);
5950e9b1 4675 break;
77241056
MM
4676 default:
4677 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4678 ret = reply((struct ib_mad_hdr *)pmp);
5950e9b1 4679 break;
77241056 4680 }
5950e9b1 4681 break;
77241056
MM
4682
4683 case IB_MGMT_METHOD_SET:
4684 switch (pmp->mad_hdr.attr_id) {
4685 case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS:
4686 ret = pma_set_opa_portstatus(pmp, ibdev, port,
17fb4f29 4687 resp_len);
5950e9b1 4688 break;
77241056
MM
4689 case OPA_PM_ATTRIB_ID_ERROR_INFO:
4690 ret = pma_set_opa_errorinfo(pmp, ibdev, port,
17fb4f29 4691 resp_len);
5950e9b1 4692 break;
77241056
MM
4693 default:
4694 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4695 ret = reply((struct ib_mad_hdr *)pmp);
5950e9b1 4696 break;
77241056 4697 }
5950e9b1 4698 break;
77241056
MM
4699
4700 case IB_MGMT_METHOD_TRAP:
4701 case IB_MGMT_METHOD_GET_RESP:
4702 /*
4703 * The ib_mad module will call us to process responses
4704 * before checking for other consumers.
4705 * Just tell the caller to process it normally.
4706 */
4707 ret = IB_MAD_RESULT_SUCCESS;
5950e9b1 4708 break;
77241056
MM
4709
4710 default:
4711 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
4712 ret = reply((struct ib_mad_hdr *)pmp);
5950e9b1 4713 break;
77241056
MM
4714 }
4715
77241056
MM
4716 return ret;
4717}
4718
4719static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
a724648e
JB
4720 u8 port, const struct ib_wc *in_wc,
4721 const struct ib_grh *in_grh,
4722 const struct opa_mad *in_mad,
4723 struct opa_mad *out_mad, size_t *out_mad_size,
4724 u16 *out_mad_pkey_index)
77241056
MM
4725{
4726 int ret;
4727 int pkey_idx;
4728 u32 resp_len = 0;
4729 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4730
4731 pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
4732 if (pkey_idx < 0) {
4733 pr_warn("failed to find limited mgmt pkey, defaulting 0x%x\n",
4734 hfi1_get_pkey(ibp, 1));
4735 pkey_idx = 1;
4736 }
4737 *out_mad_pkey_index = (u16)pkey_idx;
4738
4739 switch (in_mad->mad_hdr.mgmt_class) {
4740 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4741 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4742 if (is_local_mad(ibp, in_mad, in_wc)) {
4743 ret = opa_local_smp_check(ibp, in_wc);
4744 if (ret)
4745 return IB_MAD_RESULT_FAILURE;
4746 }
4747 ret = process_subn_opa(ibdev, mad_flags, port, in_mad,
4748 out_mad, &resp_len);
4749 goto bail;
4750 case IB_MGMT_CLASS_PERF_MGMT:
406310c6
SS
4751 ret = hfi1_pkey_validation_pma(ibp, in_mad, in_wc);
4752 if (ret)
4753 return IB_MAD_RESULT_FAILURE;
4754
4755 ret = process_perf_opa(ibdev, port, in_mad, out_mad, &resp_len);
77241056
MM
4756 goto bail;
4757
4758 default:
4759 ret = IB_MAD_RESULT_SUCCESS;
4760 }
4761
4762bail:
4763 if (ret & IB_MAD_RESULT_REPLY)
4764 *out_mad_size = round_up(resp_len, 8);
4765 else if (ret & IB_MAD_RESULT_SUCCESS)
4766 *out_mad_size = in_wc->byte_len - sizeof(struct ib_grh);
4767
4768 return ret;
4769}
4770
4771static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4772 const struct ib_wc *in_wc,
4773 const struct ib_grh *in_grh,
4774 const struct ib_mad *in_mad,
4775 struct ib_mad *out_mad)
4776{
4777 int ret;
4778
4779 switch (in_mad->mad_hdr.mgmt_class) {
4780 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4781 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4782 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
5950e9b1 4783 break;
b8d114eb
SS
4784 case IB_MGMT_CLASS_PERF_MGMT:
4785 ret = process_perf(ibdev, port, in_mad, out_mad);
4786 break;
77241056
MM
4787 default:
4788 ret = IB_MAD_RESULT_SUCCESS;
5950e9b1 4789 break;
77241056
MM
4790 }
4791
77241056
MM
4792 return ret;
4793}
4794
4795/**
4796 * hfi1_process_mad - process an incoming MAD packet
4797 * @ibdev: the infiniband device this packet came in on
4798 * @mad_flags: MAD flags
4799 * @port: the port number this packet came in on
4800 * @in_wc: the work completion entry for this packet
4801 * @in_grh: the global route header for this packet
4802 * @in_mad: the incoming MAD
4803 * @out_mad: any outgoing MAD reply
4804 *
4805 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
4806 * interested in processing.
4807 *
4808 * Note that the verbs framework has already done the MAD sanity checks,
4809 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
4810 * MADs.
4811 *
4812 * This is called by the ib_mad module.
4813 */
4814int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4815 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
4816 const struct ib_mad_hdr *in_mad, size_t in_mad_size,
4817 struct ib_mad_hdr *out_mad, size_t *out_mad_size,
4818 u16 *out_mad_pkey_index)
4819{
4820 switch (in_mad->base_version) {
4821 case OPA_MGMT_BASE_VERSION:
4822 if (unlikely(in_mad_size != sizeof(struct opa_mad))) {
3067771c 4823 dev_err(ibdev->dev.parent, "invalid in_mad_size\n");
77241056
MM
4824 return IB_MAD_RESULT_FAILURE;
4825 }
4826 return hfi1_process_opa_mad(ibdev, mad_flags, port,
4827 in_wc, in_grh,
4828 (struct opa_mad *)in_mad,
4829 (struct opa_mad *)out_mad,
4830 out_mad_size,
4831 out_mad_pkey_index);
4832 case IB_MGMT_BASE_VERSION:
4833 return hfi1_process_ib_mad(ibdev, mad_flags, port,
4834 in_wc, in_grh,
4835 (const struct ib_mad *)in_mad,
4836 (struct ib_mad *)out_mad);
4837 default:
4838 break;
4839 }
4840
4841 return IB_MAD_RESULT_FAILURE;
4842}