RDMA: Add and use rdma_for_each_port
[linux-2.6-block.git] / drivers / infiniband / core / mad.c
CommitLineData
1da177e4 1/*
de493d47 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
fa619a77
HR
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
b76aabc3 5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
8e4349d1 6 * Copyright (c) 2014 Intel Corporation. All rights reserved.
1da177e4
LT
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
1da177e4 36 */
7ef5d4b0
IW
37
38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
1da177e4 40#include <linux/dma-mapping.h>
9a41e38a 41#include <linux/idr.h>
5a0e3ad6 42#include <linux/slab.h>
e4dd23d7 43#include <linux/module.h>
47a2b338 44#include <linux/security.h>
9874e746 45#include <rdma/ib_cache.h>
1da177e4
LT
46
47#include "mad_priv.h"
47a2b338 48#include "core_priv.h"
fa619a77 49#include "mad_rmpp.h"
1da177e4 50#include "smi.h"
8e4349d1 51#include "opa_smi.h"
1da177e4 52#include "agent.h"
1da177e4 53
16933955
RD
54static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
55static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
b76aabc3
HR
56
57module_param_named(send_queue_size, mad_sendq_size, int, 0444);
58MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
59module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
60MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
61
9a41e38a 62/*
63 * The mlx4 driver uses the top byte to distinguish which virtual function
64 * generated the MAD, so we must avoid using it.
65 */
66#define AGENT_ID_LIMIT (1 << 24)
67static DEFINE_IDR(ib_mad_clients);
1da177e4 68static struct list_head ib_mad_port_list;
1da177e4
LT
69
70/* Port list lock */
6276e08a 71static DEFINE_SPINLOCK(ib_mad_port_list_lock);
1da177e4
LT
72
73/* Forward declarations */
74static int method_in_use(struct ib_mad_mgmt_method_table **method,
75 struct ib_mad_reg_req *mad_reg_req);
76static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
77static struct ib_mad_agent_private *find_mad_agent(
78 struct ib_mad_port_private *port_priv,
d94bd266 79 const struct ib_mad_hdr *mad);
1da177e4
LT
80static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
81 struct ib_mad_private *mad);
82static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
c4028958
DH
83static void timeout_sends(struct work_struct *work);
84static void local_completions(struct work_struct *work);
1da177e4
LT
85static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86 struct ib_mad_agent_private *agent_priv,
87 u8 mgmt_class);
88static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
89 struct ib_mad_agent_private *agent_priv);
d53e11fd
CH
90static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
91 struct ib_wc *wc);
92static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
1da177e4
LT
93
94/*
95 * Returns a ib_mad_port_private structure or NULL for a device/port
96 * Assumes ib_mad_port_list_lock is being held
97 */
98static inline struct ib_mad_port_private *
99__ib_get_mad_port(struct ib_device *device, int port_num)
100{
101 struct ib_mad_port_private *entry;
102
103 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
104 if (entry->device == device && entry->port_num == port_num)
105 return entry;
106 }
107 return NULL;
108}
109
110/*
111 * Wrapper function to return a ib_mad_port_private structure or NULL
112 * for a device/port
113 */
114static inline struct ib_mad_port_private *
115ib_get_mad_port(struct ib_device *device, int port_num)
116{
117 struct ib_mad_port_private *entry;
118 unsigned long flags;
119
120 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
121 entry = __ib_get_mad_port(device, port_num);
122 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
123
124 return entry;
125}
126
127static inline u8 convert_mgmt_class(u8 mgmt_class)
128{
129 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
130 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
131 0 : mgmt_class;
132}
133
134static int get_spl_qp_index(enum ib_qp_type qp_type)
135{
136 switch (qp_type)
137 {
138 case IB_QPT_SMI:
139 return 0;
140 case IB_QPT_GSI:
141 return 1;
142 default:
143 return -1;
144 }
145}
146
147static int vendor_class_index(u8 mgmt_class)
148{
149 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
150}
151
152static int is_vendor_class(u8 mgmt_class)
153{
154 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
155 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
156 return 0;
157 return 1;
158}
159
160static int is_vendor_oui(char *oui)
161{
162 if (oui[0] || oui[1] || oui[2])
163 return 1;
164 return 0;
165}
166
167static int is_vendor_method_in_use(
168 struct ib_mad_mgmt_vendor_class *vendor_class,
169 struct ib_mad_reg_req *mad_reg_req)
170{
171 struct ib_mad_mgmt_method_table *method;
172 int i;
173
174 for (i = 0; i < MAX_MGMT_OUI; i++) {
175 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
176 method = vendor_class->method_table[i];
177 if (method) {
178 if (method_in_use(&method, mad_reg_req))
179 return 1;
180 else
181 break;
182 }
183 }
184 }
185 return 0;
186}
187
96909308 188int ib_response_mad(const struct ib_mad_hdr *hdr)
2527e681 189{
96909308
IW
190 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
191 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
192 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
193 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
2527e681
SH
194}
195EXPORT_SYMBOL(ib_response_mad);
196
1da177e4
LT
197/*
198 * ib_register_mad_agent - Register to send/receive MADs
0c271c43
MW
199 *
200 * Context: Process context.
1da177e4
LT
201 */
202struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
203 u8 port_num,
204 enum ib_qp_type qp_type,
205 struct ib_mad_reg_req *mad_reg_req,
206 u8 rmpp_version,
207 ib_mad_send_handler send_handler,
208 ib_mad_recv_handler recv_handler,
0f29b46d
IW
209 void *context,
210 u32 registration_flags)
1da177e4
LT
211{
212 struct ib_mad_port_private *port_priv;
213 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
214 struct ib_mad_agent_private *mad_agent_priv;
215 struct ib_mad_reg_req *reg_req = NULL;
216 struct ib_mad_mgmt_class_table *class;
217 struct ib_mad_mgmt_vendor_class_table *vendor;
218 struct ib_mad_mgmt_vendor_class *vendor_class;
219 struct ib_mad_mgmt_method_table *method;
220 int ret2, qpn;
1da177e4
LT
221 u8 mgmt_class, vclass;
222
798bba01
PP
223 if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) ||
224 (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num)))
225 return ERR_PTR(-EPROTONOSUPPORT);
226
1da177e4
LT
227 /* Validate parameters */
228 qpn = get_spl_qp_index(qp_type);
9ad13a42 229 if (qpn == -1) {
f9d08f1e
PP
230 dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n",
231 __func__, qp_type);
1da177e4 232 goto error1;
9ad13a42 233 }
1da177e4 234
9ad13a42 235 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
f9d08f1e
PP
236 dev_dbg_ratelimited(&device->dev,
237 "%s: invalid RMPP Version %u\n",
238 __func__, rmpp_version);
fa619a77 239 goto error1;
9ad13a42 240 }
1da177e4
LT
241
242 /* Validate MAD registration request if supplied */
243 if (mad_reg_req) {
9ad13a42 244 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
f9d08f1e
PP
245 dev_dbg_ratelimited(&device->dev,
246 "%s: invalid Class Version %u\n",
247 __func__,
248 mad_reg_req->mgmt_class_version);
1da177e4 249 goto error1;
9ad13a42
IW
250 }
251 if (!recv_handler) {
f9d08f1e
PP
252 dev_dbg_ratelimited(&device->dev,
253 "%s: no recv_handler\n", __func__);
1da177e4 254 goto error1;
9ad13a42 255 }
1da177e4
LT
256 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
257 /*
258 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
259 * one in this range currently allowed
260 */
261 if (mad_reg_req->mgmt_class !=
9ad13a42 262 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
f9d08f1e
PP
263 dev_dbg_ratelimited(&device->dev,
264 "%s: Invalid Mgmt Class 0x%x\n",
265 __func__, mad_reg_req->mgmt_class);
1da177e4 266 goto error1;
9ad13a42 267 }
1da177e4
LT
268 } else if (mad_reg_req->mgmt_class == 0) {
269 /*
270 * Class 0 is reserved in IBA and is used for
271 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
272 */
f9d08f1e
PP
273 dev_dbg_ratelimited(&device->dev,
274 "%s: Invalid Mgmt Class 0\n",
275 __func__);
1da177e4
LT
276 goto error1;
277 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
278 /*
279 * If class is in "new" vendor range,
280 * ensure supplied OUI is not zero
281 */
9ad13a42 282 if (!is_vendor_oui(mad_reg_req->oui)) {
f9d08f1e
PP
283 dev_dbg_ratelimited(&device->dev,
284 "%s: No OUI specified for class 0x%x\n",
285 __func__,
286 mad_reg_req->mgmt_class);
1da177e4 287 goto error1;
9ad13a42 288 }
1da177e4 289 }
618a3c03 290 /* Make sure class supplied is consistent with RMPP */
64cb9c6a 291 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
9ad13a42 292 if (rmpp_version) {
f9d08f1e
PP
293 dev_dbg_ratelimited(&device->dev,
294 "%s: RMPP version for non-RMPP class 0x%x\n",
295 __func__, mad_reg_req->mgmt_class);
618a3c03 296 goto error1;
9ad13a42 297 }
618a3c03 298 }
1471cb6c 299
1da177e4
LT
300 /* Make sure class supplied is consistent with QP type */
301 if (qp_type == IB_QPT_SMI) {
302 if ((mad_reg_req->mgmt_class !=
303 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
304 (mad_reg_req->mgmt_class !=
9ad13a42 305 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
f9d08f1e
PP
306 dev_dbg_ratelimited(&device->dev,
307 "%s: Invalid SM QP type: class 0x%x\n",
308 __func__, mad_reg_req->mgmt_class);
1da177e4 309 goto error1;
9ad13a42 310 }
1da177e4
LT
311 } else {
312 if ((mad_reg_req->mgmt_class ==
313 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
314 (mad_reg_req->mgmt_class ==
9ad13a42 315 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
f9d08f1e
PP
316 dev_dbg_ratelimited(&device->dev,
317 "%s: Invalid GS QP type: class 0x%x\n",
318 __func__, mad_reg_req->mgmt_class);
1da177e4 319 goto error1;
9ad13a42 320 }
1da177e4
LT
321 }
322 } else {
323 /* No registration request supplied */
324 if (!send_handler)
325 goto error1;
1471cb6c
IW
326 if (registration_flags & IB_MAD_USER_RMPP)
327 goto error1;
1da177e4
LT
328 }
329
330 /* Validate device and port */
331 port_priv = ib_get_mad_port(device, port_num);
332 if (!port_priv) {
f9d08f1e
PP
333 dev_dbg_ratelimited(&device->dev, "%s: Invalid port %d\n",
334 __func__, port_num);
1da177e4
LT
335 ret = ERR_PTR(-ENODEV);
336 goto error1;
337 }
338
f9d08f1e
PP
339 /* Verify the QP requested is supported. For example, Ethernet devices
340 * will not have QP0.
341 */
c8367c4c 342 if (!port_priv->qp_info[qpn].qp) {
f9d08f1e
PP
343 dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n",
344 __func__, qpn);
c8367c4c
IW
345 ret = ERR_PTR(-EPROTONOSUPPORT);
346 goto error1;
347 }
348
1da177e4 349 /* Allocate structures */
de6eb66b 350 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
1da177e4
LT
351 if (!mad_agent_priv) {
352 ret = ERR_PTR(-ENOMEM);
353 goto error1;
354 }
b82cab6b 355
1da177e4 356 if (mad_reg_req) {
9893e742 357 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
1da177e4
LT
358 if (!reg_req) {
359 ret = ERR_PTR(-ENOMEM);
b82cab6b 360 goto error3;
1da177e4 361 }
1da177e4
LT
362 }
363
364 /* Now, fill in the various structures */
1da177e4
LT
365 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
366 mad_agent_priv->reg_req = reg_req;
fa619a77 367 mad_agent_priv->agent.rmpp_version = rmpp_version;
1da177e4
LT
368 mad_agent_priv->agent.device = device;
369 mad_agent_priv->agent.recv_handler = recv_handler;
370 mad_agent_priv->agent.send_handler = send_handler;
371 mad_agent_priv->agent.context = context;
372 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
373 mad_agent_priv->agent.port_num = port_num;
0f29b46d 374 mad_agent_priv->agent.flags = registration_flags;
d9620a4c
RC
375 spin_lock_init(&mad_agent_priv->lock);
376 INIT_LIST_HEAD(&mad_agent_priv->send_list);
377 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
378 INIT_LIST_HEAD(&mad_agent_priv->done_list);
379 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
380 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
381 INIT_LIST_HEAD(&mad_agent_priv->local_list);
382 INIT_WORK(&mad_agent_priv->local_work, local_completions);
383 atomic_set(&mad_agent_priv->refcount, 1);
384 init_completion(&mad_agent_priv->comp);
1da177e4 385
47a2b338
DJ
386 ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
387 if (ret2) {
388 ret = ERR_PTR(ret2);
389 goto error4;
390 }
391
9a41e38a 392 idr_preload(GFP_KERNEL);
393 idr_lock(&ib_mad_clients);
394 ret2 = idr_alloc_cyclic(&ib_mad_clients, mad_agent_priv, 0,
395 AGENT_ID_LIMIT, GFP_ATOMIC);
396 idr_unlock(&ib_mad_clients);
397 idr_preload_end();
398
399 if (ret2 < 0) {
400 ret = ERR_PTR(ret2);
401 goto error5;
402 }
403 mad_agent_priv->agent.hi_tid = ret2;
1da177e4
LT
404
405 /*
406 * Make sure MAD registration (if supplied)
407 * is non overlapping with any existing ones
408 */
9a41e38a 409 spin_lock_irq(&port_priv->reg_lock);
1da177e4
LT
410 if (mad_reg_req) {
411 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
412 if (!is_vendor_class(mgmt_class)) {
413 class = port_priv->version[mad_reg_req->
414 mgmt_class_version].class;
415 if (class) {
416 method = class->method_table[mgmt_class];
417 if (method) {
418 if (method_in_use(&method,
419 mad_reg_req))
9a41e38a 420 goto error6;
1da177e4
LT
421 }
422 }
423 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
424 mgmt_class);
425 } else {
426 /* "New" vendor class range */
427 vendor = port_priv->version[mad_reg_req->
428 mgmt_class_version].vendor;
429 if (vendor) {
430 vclass = vendor_class_index(mgmt_class);
431 vendor_class = vendor->vendor_class[vclass];
432 if (vendor_class) {
433 if (is_vendor_method_in_use(
434 vendor_class,
435 mad_reg_req))
9a41e38a 436 goto error6;
1da177e4
LT
437 }
438 }
439 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
440 }
441 if (ret2) {
442 ret = ERR_PTR(ret2);
9a41e38a 443 goto error6;
1da177e4
LT
444 }
445 }
0c271c43 446 spin_unlock_irq(&port_priv->reg_lock);
1da177e4 447
1da177e4 448 return &mad_agent_priv->agent;
9a41e38a 449error6:
0c271c43 450 spin_unlock_irq(&port_priv->reg_lock);
9a41e38a 451 idr_lock(&ib_mad_clients);
452 idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
453 idr_unlock(&ib_mad_clients);
454error5:
47a2b338
DJ
455 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
456error4:
1da177e4 457 kfree(reg_req);
b82cab6b 458error3:
2012a116 459 kfree(mad_agent_priv);
1da177e4
LT
460error1:
461 return ret;
462}
463EXPORT_SYMBOL(ib_register_mad_agent);
464
465static inline int is_snooping_sends(int mad_snoop_flags)
466{
467 return (mad_snoop_flags &
468 (/*IB_MAD_SNOOP_POSTED_SENDS |
469 IB_MAD_SNOOP_RMPP_SENDS |*/
470 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
471 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
472}
473
474static inline int is_snooping_recvs(int mad_snoop_flags)
475{
476 return (mad_snoop_flags &
477 (IB_MAD_SNOOP_RECVS /*|
478 IB_MAD_SNOOP_RMPP_RECVS*/));
479}
480
481static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
482 struct ib_mad_snoop_private *mad_snoop_priv)
483{
484 struct ib_mad_snoop_private **new_snoop_table;
485 unsigned long flags;
486 int i;
487
488 spin_lock_irqsave(&qp_info->snoop_lock, flags);
489 /* Check for empty slot in array. */
490 for (i = 0; i < qp_info->snoop_table_size; i++)
491 if (!qp_info->snoop_table[i])
492 break;
493
494 if (i == qp_info->snoop_table_size) {
495 /* Grow table. */
52805174
RD
496 new_snoop_table = krealloc(qp_info->snoop_table,
497 sizeof mad_snoop_priv *
498 (qp_info->snoop_table_size + 1),
499 GFP_ATOMIC);
1da177e4
LT
500 if (!new_snoop_table) {
501 i = -ENOMEM;
502 goto out;
503 }
52805174 504
1da177e4
LT
505 qp_info->snoop_table = new_snoop_table;
506 qp_info->snoop_table_size++;
507 }
508 qp_info->snoop_table[i] = mad_snoop_priv;
509 atomic_inc(&qp_info->snoop_count);
510out:
511 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
512 return i;
513}
514
515struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
516 u8 port_num,
517 enum ib_qp_type qp_type,
518 int mad_snoop_flags,
519 ib_mad_snoop_handler snoop_handler,
520 ib_mad_recv_handler recv_handler,
521 void *context)
522{
523 struct ib_mad_port_private *port_priv;
524 struct ib_mad_agent *ret;
525 struct ib_mad_snoop_private *mad_snoop_priv;
526 int qpn;
47a2b338 527 int err;
1da177e4
LT
528
529 /* Validate parameters */
530 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
531 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
532 ret = ERR_PTR(-EINVAL);
533 goto error1;
534 }
535 qpn = get_spl_qp_index(qp_type);
536 if (qpn == -1) {
537 ret = ERR_PTR(-EINVAL);
538 goto error1;
539 }
540 port_priv = ib_get_mad_port(device, port_num);
541 if (!port_priv) {
542 ret = ERR_PTR(-ENODEV);
543 goto error1;
544 }
545 /* Allocate structures */
de6eb66b 546 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
1da177e4
LT
547 if (!mad_snoop_priv) {
548 ret = ERR_PTR(-ENOMEM);
549 goto error1;
550 }
551
552 /* Now, fill in the various structures */
1da177e4
LT
553 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
554 mad_snoop_priv->agent.device = device;
555 mad_snoop_priv->agent.recv_handler = recv_handler;
556 mad_snoop_priv->agent.snoop_handler = snoop_handler;
557 mad_snoop_priv->agent.context = context;
558 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
559 mad_snoop_priv->agent.port_num = port_num;
560 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
1b52fa98 561 init_completion(&mad_snoop_priv->comp);
47a2b338
DJ
562
563 err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type);
564 if (err) {
565 ret = ERR_PTR(err);
566 goto error2;
567 }
568
1da177e4
LT
569 mad_snoop_priv->snoop_index = register_snoop_agent(
570 &port_priv->qp_info[qpn],
571 mad_snoop_priv);
572 if (mad_snoop_priv->snoop_index < 0) {
573 ret = ERR_PTR(mad_snoop_priv->snoop_index);
47a2b338 574 goto error3;
1da177e4
LT
575 }
576
577 atomic_set(&mad_snoop_priv->refcount, 1);
578 return &mad_snoop_priv->agent;
47a2b338
DJ
579error3:
580 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
1da177e4
LT
581error2:
582 kfree(mad_snoop_priv);
583error1:
584 return ret;
585}
586EXPORT_SYMBOL(ib_register_mad_snoop);
587
1b52fa98
SH
588static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
589{
590 if (atomic_dec_and_test(&mad_agent_priv->refcount))
591 complete(&mad_agent_priv->comp);
592}
593
594static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
595{
596 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
597 complete(&mad_snoop_priv->comp);
598}
599
1da177e4
LT
600static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
601{
602 struct ib_mad_port_private *port_priv;
1da177e4
LT
603
604 /* Note that we could still be handling received MADs */
605
606 /*
607 * Canceling all sends results in dropping received response
608 * MADs, preventing us from queuing additional work
609 */
610 cancel_mads(mad_agent_priv);
1da177e4 611 port_priv = mad_agent_priv->qp_info->port_priv;
1da177e4 612 cancel_delayed_work(&mad_agent_priv->timed_work);
1da177e4 613
0c271c43 614 spin_lock_irq(&port_priv->reg_lock);
1da177e4 615 remove_mad_reg_req(mad_agent_priv);
0c271c43 616 spin_unlock_irq(&port_priv->reg_lock);
9a41e38a 617 idr_lock(&ib_mad_clients);
618 idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
619 idr_unlock(&ib_mad_clients);
1da177e4 620
b82cab6b 621 flush_workqueue(port_priv->wq);
fa619a77 622 ib_cancel_rmpp_recvs(mad_agent_priv);
1da177e4 623
1b52fa98
SH
624 deref_mad_agent(mad_agent_priv);
625 wait_for_completion(&mad_agent_priv->comp);
1da177e4 626
47a2b338
DJ
627 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
628
6044ec88 629 kfree(mad_agent_priv->reg_req);
9a41e38a 630 kfree_rcu(mad_agent_priv, rcu);
1da177e4
LT
631}
632
633static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
634{
635 struct ib_mad_qp_info *qp_info;
636 unsigned long flags;
637
638 qp_info = mad_snoop_priv->qp_info;
639 spin_lock_irqsave(&qp_info->snoop_lock, flags);
640 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
641 atomic_dec(&qp_info->snoop_count);
642 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
643
1b52fa98
SH
644 deref_snoop_agent(mad_snoop_priv);
645 wait_for_completion(&mad_snoop_priv->comp);
1da177e4 646
47a2b338
DJ
647 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
648
1da177e4
LT
649 kfree(mad_snoop_priv);
650}
651
652/*
653 * ib_unregister_mad_agent - Unregisters a client from using MAD services
0c271c43
MW
654 *
655 * Context: Process context.
1da177e4 656 */
8d2216be 657void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
1da177e4
LT
658{
659 struct ib_mad_agent_private *mad_agent_priv;
660 struct ib_mad_snoop_private *mad_snoop_priv;
661
662 /* If the TID is zero, the agent can only snoop. */
663 if (mad_agent->hi_tid) {
664 mad_agent_priv = container_of(mad_agent,
665 struct ib_mad_agent_private,
666 agent);
667 unregister_mad_agent(mad_agent_priv);
668 } else {
669 mad_snoop_priv = container_of(mad_agent,
670 struct ib_mad_snoop_private,
671 agent);
672 unregister_mad_snoop(mad_snoop_priv);
673 }
1da177e4
LT
674}
675EXPORT_SYMBOL(ib_unregister_mad_agent);
676
677static void dequeue_mad(struct ib_mad_list_head *mad_list)
678{
679 struct ib_mad_queue *mad_queue;
680 unsigned long flags;
681
1da177e4
LT
682 mad_queue = mad_list->mad_queue;
683 spin_lock_irqsave(&mad_queue->lock, flags);
684 list_del(&mad_list->list);
685 mad_queue->count--;
686 spin_unlock_irqrestore(&mad_queue->lock, flags);
687}
688
689static void snoop_send(struct ib_mad_qp_info *qp_info,
34816ad9 690 struct ib_mad_send_buf *send_buf,
1da177e4
LT
691 struct ib_mad_send_wc *mad_send_wc,
692 int mad_snoop_flags)
693{
694 struct ib_mad_snoop_private *mad_snoop_priv;
695 unsigned long flags;
696 int i;
697
698 spin_lock_irqsave(&qp_info->snoop_lock, flags);
699 for (i = 0; i < qp_info->snoop_table_size; i++) {
700 mad_snoop_priv = qp_info->snoop_table[i];
701 if (!mad_snoop_priv ||
702 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
703 continue;
704
705 atomic_inc(&mad_snoop_priv->refcount);
706 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
707 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
34816ad9 708 send_buf, mad_send_wc);
1b52fa98 709 deref_snoop_agent(mad_snoop_priv);
1da177e4
LT
710 spin_lock_irqsave(&qp_info->snoop_lock, flags);
711 }
712 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
713}
714
715static void snoop_recv(struct ib_mad_qp_info *qp_info,
716 struct ib_mad_recv_wc *mad_recv_wc,
717 int mad_snoop_flags)
718{
719 struct ib_mad_snoop_private *mad_snoop_priv;
720 unsigned long flags;
721 int i;
722
723 spin_lock_irqsave(&qp_info->snoop_lock, flags);
724 for (i = 0; i < qp_info->snoop_table_size; i++) {
725 mad_snoop_priv = qp_info->snoop_table[i];
726 if (!mad_snoop_priv ||
727 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
728 continue;
729
730 atomic_inc(&mad_snoop_priv->refcount);
731 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
ca281265 732 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
1da177e4 733 mad_recv_wc);
1b52fa98 734 deref_snoop_agent(mad_snoop_priv);
1da177e4
LT
735 spin_lock_irqsave(&qp_info->snoop_lock, flags);
736 }
737 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
738}
739
d53e11fd
CH
740static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
741 u16 pkey_index, u8 port_num, struct ib_wc *wc)
1da177e4
LT
742{
743 memset(wc, 0, sizeof *wc);
d53e11fd 744 wc->wr_cqe = cqe;
1da177e4
LT
745 wc->status = IB_WC_SUCCESS;
746 wc->opcode = IB_WC_RECV;
747 wc->pkey_index = pkey_index;
748 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
749 wc->src_qp = IB_QP0;
062dbb69 750 wc->qp = qp;
1da177e4
LT
751 wc->slid = slid;
752 wc->sl = 0;
753 wc->dlid_path_bits = 0;
754 wc->port_num = port_num;
755}
756
c9082e51
IW
757static size_t mad_priv_size(const struct ib_mad_private *mp)
758{
759 return sizeof(struct ib_mad_private) + mp->mad_size;
760}
761
762static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
763{
764 size_t size = sizeof(struct ib_mad_private) + mad_size;
765 struct ib_mad_private *ret = kzalloc(size, flags);
766
767 if (ret)
768 ret->mad_size = mad_size;
769
770 return ret;
771}
772
773static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
774{
775 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
776}
777
778static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
779{
780 return sizeof(struct ib_grh) + mp->mad_size;
781}
782
1da177e4
LT
783/*
784 * Return 0 if SMP is to be sent
785 * Return 1 if SMP was consumed locally (whether or not solicited)
786 * Return < 0 if error
787 */
788static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
34816ad9 789 struct ib_mad_send_wr_private *mad_send_wr)
1da177e4 790{
de493d47 791 int ret = 0;
34816ad9 792 struct ib_smp *smp = mad_send_wr->send_buf.mad;
8e4349d1 793 struct opa_smp *opa_smp = (struct opa_smp *)smp;
1da177e4
LT
794 unsigned long flags;
795 struct ib_mad_local_private *local;
796 struct ib_mad_private *mad_priv;
797 struct ib_mad_port_private *port_priv;
798 struct ib_mad_agent_private *recv_mad_agent = NULL;
799 struct ib_device *device = mad_agent_priv->agent.device;
1bae4dbf 800 u8 port_num;
1da177e4 801 struct ib_wc mad_wc;
e622f2f4 802 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
c9082e51 803 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
4cd7c947 804 u16 out_mad_pkey_index = 0;
8e4349d1
IW
805 u16 drslid;
806 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
807 mad_agent_priv->qp_info->port_priv->port_num);
1da177e4 808
4139032b 809 if (rdma_cap_ib_switch(device) &&
1bae4dbf 810 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
e622f2f4 811 port_num = send_wr->port_num;
1bae4dbf
HR
812 else
813 port_num = mad_agent_priv->agent.port_num;
814
8cf3f04f
RC
815 /*
816 * Directed route handling starts if the initial LID routed part of
817 * a request or the ending LID routed part of a response is empty.
818 * If we are at the start of the LID routed part, don't update the
819 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
820 */
9fa240bb 821 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
8e4349d1
IW
822 u32 opa_drslid;
823
824 if ((opa_get_smp_direction(opa_smp)
825 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
826 OPA_LID_PERMISSIVE &&
4139032b
HR
827 opa_smi_handle_dr_smp_send(opa_smp,
828 rdma_cap_ib_switch(device),
8e4349d1
IW
829 port_num) == IB_SMI_DISCARD) {
830 ret = -EINVAL;
831 dev_err(&device->dev, "OPA Invalid directed route\n");
832 goto out;
833 }
834 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
cd4cd565 835 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
8e4349d1
IW
836 opa_drslid & 0xffff0000) {
837 ret = -EINVAL;
838 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
839 opa_drslid);
840 goto out;
841 }
842 drslid = (u16)(opa_drslid & 0x0000ffff);
de493d47 843
8e4349d1
IW
844 /* Check to post send on QP or process locally */
845 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
846 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
847 goto out;
848 } else {
849 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
850 IB_LID_PERMISSIVE &&
4139032b 851 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
8e4349d1
IW
852 IB_SMI_DISCARD) {
853 ret = -EINVAL;
854 dev_err(&device->dev, "Invalid directed route\n");
855 goto out;
856 }
857 drslid = be16_to_cpu(smp->dr_slid);
858
859 /* Check to post send on QP or process locally */
860 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
861 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
862 goto out;
863 }
1da177e4
LT
864
865 local = kmalloc(sizeof *local, GFP_ATOMIC);
866 if (!local) {
867 ret = -ENOMEM;
1da177e4
LT
868 goto out;
869 }
870 local->mad_priv = NULL;
871 local->recv_mad_agent = NULL;
c9082e51 872 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
1da177e4
LT
873 if (!mad_priv) {
874 ret = -ENOMEM;
1da177e4
LT
875 kfree(local);
876 goto out;
877 }
878
062dbb69 879 build_smp_wc(mad_agent_priv->agent.qp,
d53e11fd 880 send_wr->wr.wr_cqe, drslid,
e622f2f4
CH
881 send_wr->pkey_index,
882 send_wr->port_num, &mad_wc);
1da177e4 883
8e4349d1
IW
884 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
885 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
886 + mad_send_wr->send_buf.data_len
887 + sizeof(struct ib_grh);
888 }
889
1da177e4 890 /* No GRH for DR SMP */
3023a1e9
KH
891 ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
892 (const struct ib_mad_hdr *)smp, mad_size,
893 (struct ib_mad_hdr *)mad_priv->mad,
894 &mad_size, &out_mad_pkey_index);
1da177e4
LT
895 switch (ret)
896 {
897 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
c9082e51 898 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
1da177e4
LT
899 mad_agent_priv->agent.recv_handler) {
900 local->mad_priv = mad_priv;
901 local->recv_mad_agent = mad_agent_priv;
902 /*
903 * Reference MAD agent until receive
904 * side of local completion handled
905 */
906 atomic_inc(&mad_agent_priv->refcount);
907 } else
c9082e51 908 kfree(mad_priv);
1da177e4
LT
909 break;
910 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
c9082e51 911 kfree(mad_priv);
4780c195 912 break;
1da177e4
LT
913 case IB_MAD_RESULT_SUCCESS:
914 /* Treat like an incoming receive MAD */
1da177e4
LT
915 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
916 mad_agent_priv->agent.port_num);
917 if (port_priv) {
c9082e51 918 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
1da177e4 919 recv_mad_agent = find_mad_agent(port_priv,
c9082e51 920 (const struct ib_mad_hdr *)mad_priv->mad);
1da177e4
LT
921 }
922 if (!port_priv || !recv_mad_agent) {
4780c195
RC
923 /*
924 * No receiving agent so drop packet and
925 * generate send completion.
926 */
c9082e51 927 kfree(mad_priv);
4780c195 928 break;
1da177e4
LT
929 }
930 local->mad_priv = mad_priv;
931 local->recv_mad_agent = recv_mad_agent;
932 break;
933 default:
c9082e51 934 kfree(mad_priv);
1da177e4
LT
935 kfree(local);
936 ret = -EINVAL;
937 goto out;
938 }
939
34816ad9 940 local->mad_send_wr = mad_send_wr;
8e4349d1 941 if (opa) {
e622f2f4 942 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
8e4349d1
IW
943 local->return_wc_byte_len = mad_size;
944 }
1da177e4
LT
945 /* Reference MAD agent until send side of local completion handled */
946 atomic_inc(&mad_agent_priv->refcount);
947 /* Queue local completion to local list */
948 spin_lock_irqsave(&mad_agent_priv->lock, flags);
949 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
950 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
951 queue_work(mad_agent_priv->qp_info->port_priv->wq,
b82cab6b 952 &mad_agent_priv->local_work);
1da177e4
LT
953 ret = 1;
954out:
955 return ret;
956}
957
548ead17 958static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
824c8ae7
HR
959{
960 int seg_size, pad;
961
548ead17 962 seg_size = mad_size - hdr_len;
824c8ae7
HR
963 if (data_len && seg_size) {
964 pad = seg_size - data_len % seg_size;
f36e1793 965 return pad == seg_size ? 0 : pad;
824c8ae7 966 } else
f36e1793
JM
967 return seg_size;
968}
969
970static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
971{
972 struct ib_rmpp_segment *s, *t;
973
974 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
975 list_del(&s->list);
976 kfree(s);
977 }
978}
979
980static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
548ead17 981 size_t mad_size, gfp_t gfp_mask)
f36e1793
JM
982{
983 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
984 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
985 struct ib_rmpp_segment *seg = NULL;
986 int left, seg_size, pad;
987
548ead17
IW
988 send_buf->seg_size = mad_size - send_buf->hdr_len;
989 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
f36e1793
JM
990 seg_size = send_buf->seg_size;
991 pad = send_wr->pad;
992
993 /* Allocate data segments. */
994 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
995 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
996 if (!seg) {
f36e1793
JM
997 free_send_rmpp_list(send_wr);
998 return -ENOMEM;
999 }
1000 seg->num = ++send_buf->seg_count;
1001 list_add_tail(&seg->list, &send_wr->rmpp_list);
1002 }
1003
1004 /* Zero any padding */
1005 if (pad)
1006 memset(seg->data + seg_size - pad, 0, pad);
1007
1008 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
1009 agent.rmpp_version;
1010 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
1011 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
1012
1013 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
1014 struct ib_rmpp_segment, list);
1015 send_wr->last_ack_seg = send_wr->cur_seg;
1016 return 0;
824c8ae7
HR
1017}
1018
f766c58f 1019int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
1471cb6c
IW
1020{
1021 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
1022}
1023EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
1024
824c8ae7
HR
1025struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
1026 u32 remote_qpn, u16 pkey_index,
34816ad9 1027 int rmpp_active,
824c8ae7 1028 int hdr_len, int data_len,
da2dfaa3
IW
1029 gfp_t gfp_mask,
1030 u8 base_version)
824c8ae7
HR
1031{
1032 struct ib_mad_agent_private *mad_agent_priv;
34816ad9 1033 struct ib_mad_send_wr_private *mad_send_wr;
f36e1793 1034 int pad, message_size, ret, size;
824c8ae7 1035 void *buf;
548ead17
IW
1036 size_t mad_size;
1037 bool opa;
824c8ae7 1038
34816ad9
SH
1039 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1040 agent);
548ead17
IW
1041
1042 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1043
1044 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1045 mad_size = sizeof(struct opa_mad);
1046 else
1047 mad_size = sizeof(struct ib_mad);
1048
1049 pad = get_pad_size(hdr_len, data_len, mad_size);
f36e1793 1050 message_size = hdr_len + data_len + pad;
824c8ae7 1051
1471cb6c 1052 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
548ead17 1053 if (!rmpp_active && message_size > mad_size)
1471cb6c
IW
1054 return ERR_PTR(-EINVAL);
1055 } else
548ead17 1056 if (rmpp_active || message_size > mad_size)
1471cb6c 1057 return ERR_PTR(-EINVAL);
fa619a77 1058
548ead17 1059 size = rmpp_active ? hdr_len : mad_size;
f36e1793 1060 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
824c8ae7
HR
1061 if (!buf)
1062 return ERR_PTR(-ENOMEM);
34816ad9 1063
f36e1793
JM
1064 mad_send_wr = buf + size;
1065 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
34816ad9 1066 mad_send_wr->send_buf.mad = buf;
f36e1793
JM
1067 mad_send_wr->send_buf.hdr_len = hdr_len;
1068 mad_send_wr->send_buf.data_len = data_len;
1069 mad_send_wr->pad = pad;
34816ad9
SH
1070
1071 mad_send_wr->mad_agent_priv = mad_agent_priv;
f36e1793 1072 mad_send_wr->sg_list[0].length = hdr_len;
4be90bc6 1073 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
548ead17
IW
1074
1075 /* OPA MADs don't have to be the full 2048 bytes */
1076 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1077 data_len < mad_size - hdr_len)
1078 mad_send_wr->sg_list[1].length = data_len;
1079 else
1080 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1081
4be90bc6 1082 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
34816ad9 1083
d53e11fd
CH
1084 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1085
1086 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
e622f2f4
CH
1087 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1088 mad_send_wr->send_wr.wr.num_sge = 2;
1089 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1090 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1091 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1092 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1093 mad_send_wr->send_wr.pkey_index = pkey_index;
fa619a77
HR
1094
1095 if (rmpp_active) {
548ead17 1096 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
f36e1793
JM
1097 if (ret) {
1098 kfree(buf);
1099 return ERR_PTR(ret);
1100 }
fa619a77
HR
1101 }
1102
34816ad9 1103 mad_send_wr->send_buf.mad_agent = mad_agent;
824c8ae7 1104 atomic_inc(&mad_agent_priv->refcount);
34816ad9 1105 return &mad_send_wr->send_buf;
824c8ae7
HR
1106}
1107EXPORT_SYMBOL(ib_create_send_mad);
1108
618a3c03
HR
1109int ib_get_mad_data_offset(u8 mgmt_class)
1110{
1111 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1112 return IB_MGMT_SA_HDR;
1113 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1114 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1115 (mgmt_class == IB_MGMT_CLASS_BIS))
1116 return IB_MGMT_DEVICE_HDR;
1117 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1118 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1119 return IB_MGMT_VENDOR_HDR;
1120 else
1121 return IB_MGMT_MAD_HDR;
1122}
1123EXPORT_SYMBOL(ib_get_mad_data_offset);
1124
1125int ib_is_mad_class_rmpp(u8 mgmt_class)
1126{
1127 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1128 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1129 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1130 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1131 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1132 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1133 return 1;
1134 return 0;
1135}
1136EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1137
f36e1793
JM
1138void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1139{
1140 struct ib_mad_send_wr_private *mad_send_wr;
1141 struct list_head *list;
1142
1143 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1144 send_buf);
1145 list = &mad_send_wr->cur_seg->list;
1146
1147 if (mad_send_wr->cur_seg->num < seg_num) {
1148 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1149 if (mad_send_wr->cur_seg->num == seg_num)
1150 break;
1151 } else if (mad_send_wr->cur_seg->num > seg_num) {
1152 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1153 if (mad_send_wr->cur_seg->num == seg_num)
1154 break;
1155 }
1156 return mad_send_wr->cur_seg->data;
1157}
1158EXPORT_SYMBOL(ib_get_rmpp_segment);
1159
1160static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1161{
1162 if (mad_send_wr->send_buf.seg_count)
1163 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1164 mad_send_wr->seg_num);
1165 else
1166 return mad_send_wr->send_buf.mad +
1167 mad_send_wr->send_buf.hdr_len;
1168}
1169
824c8ae7
HR
1170void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1171{
1172 struct ib_mad_agent_private *mad_agent_priv;
f36e1793 1173 struct ib_mad_send_wr_private *mad_send_wr;
824c8ae7
HR
1174
1175 mad_agent_priv = container_of(send_buf->mad_agent,
1176 struct ib_mad_agent_private, agent);
f36e1793
JM
1177 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1178 send_buf);
824c8ae7 1179
f36e1793
JM
1180 free_send_rmpp_list(mad_send_wr);
1181 kfree(send_buf->mad);
1b52fa98 1182 deref_mad_agent(mad_agent_priv);
824c8ae7
HR
1183}
1184EXPORT_SYMBOL(ib_free_send_mad);
1185
fa619a77 1186int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4
LT
1187{
1188 struct ib_mad_qp_info *qp_info;
cabe3cbc 1189 struct list_head *list;
34816ad9
SH
1190 struct ib_mad_agent *mad_agent;
1191 struct ib_sge *sge;
1da177e4
LT
1192 unsigned long flags;
1193 int ret;
1194
f8197a4e 1195 /* Set WR ID to find mad_send_wr upon completion */
d760ce8f 1196 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1da177e4 1197 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
d53e11fd
CH
1198 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1199 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1da177e4 1200
34816ad9
SH
1201 mad_agent = mad_send_wr->send_buf.mad_agent;
1202 sge = mad_send_wr->sg_list;
1527106f
RC
1203 sge[0].addr = ib_dma_map_single(mad_agent->device,
1204 mad_send_wr->send_buf.mad,
1205 sge[0].length,
1206 DMA_TO_DEVICE);
2c34e68f
YB
1207 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1208 return -ENOMEM;
1209
1527106f
RC
1210 mad_send_wr->header_mapping = sge[0].addr;
1211
1212 sge[1].addr = ib_dma_map_single(mad_agent->device,
1213 ib_get_payload(mad_send_wr),
1214 sge[1].length,
1215 DMA_TO_DEVICE);
2c34e68f
YB
1216 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1217 ib_dma_unmap_single(mad_agent->device,
1218 mad_send_wr->header_mapping,
1219 sge[0].length, DMA_TO_DEVICE);
1220 return -ENOMEM;
1221 }
1527106f 1222 mad_send_wr->payload_mapping = sge[1].addr;
34816ad9 1223
1da177e4 1224 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
cabe3cbc 1225 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
e622f2f4 1226 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1fec77bf 1227 NULL);
cabe3cbc 1228 list = &qp_info->send_queue.list;
1da177e4 1229 } else {
1da177e4 1230 ret = 0;
cabe3cbc 1231 list = &qp_info->overflow_list;
1da177e4 1232 }
cabe3cbc
HR
1233
1234 if (!ret) {
1235 qp_info->send_queue.count++;
1236 list_add_tail(&mad_send_wr->mad_list.list, list);
1237 }
1238 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
f36e1793 1239 if (ret) {
1527106f
RC
1240 ib_dma_unmap_single(mad_agent->device,
1241 mad_send_wr->header_mapping,
1242 sge[0].length, DMA_TO_DEVICE);
1243 ib_dma_unmap_single(mad_agent->device,
1244 mad_send_wr->payload_mapping,
1245 sge[1].length, DMA_TO_DEVICE);
f36e1793 1246 }
1da177e4
LT
1247 return ret;
1248}
1249
1250/*
1251 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1252 * with the registered client
1253 */
34816ad9
SH
1254int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1255 struct ib_mad_send_buf **bad_send_buf)
1da177e4 1256{
1da177e4 1257 struct ib_mad_agent_private *mad_agent_priv;
34816ad9
SH
1258 struct ib_mad_send_buf *next_send_buf;
1259 struct ib_mad_send_wr_private *mad_send_wr;
1260 unsigned long flags;
1261 int ret = -EINVAL;
1da177e4
LT
1262
1263 /* Walk list of send WRs and post each on send list */
34816ad9 1264 for (; send_buf; send_buf = next_send_buf) {
34816ad9
SH
1265 mad_send_wr = container_of(send_buf,
1266 struct ib_mad_send_wr_private,
1267 send_buf);
1268 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4 1269
47a2b338
DJ
1270 ret = ib_mad_enforce_security(mad_agent_priv,
1271 mad_send_wr->send_wr.pkey_index);
1272 if (ret)
1273 goto error;
1274
34816ad9
SH
1275 if (!send_buf->mad_agent->send_handler ||
1276 (send_buf->timeout_ms &&
1277 !send_buf->mad_agent->recv_handler)) {
1278 ret = -EINVAL;
1279 goto error;
1da177e4
LT
1280 }
1281
618a3c03
HR
1282 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1283 if (mad_agent_priv->agent.rmpp_version) {
1284 ret = -EINVAL;
1285 goto error;
1286 }
1287 }
1288
1da177e4
LT
1289 /*
1290 * Save pointer to next work request to post in case the
1291 * current one completes, and the user modifies the work
1292 * request associated with the completion
1293 */
34816ad9 1294 next_send_buf = send_buf->next;
e622f2f4 1295 mad_send_wr->send_wr.ah = send_buf->ah;
1da177e4 1296
34816ad9
SH
1297 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1298 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1299 ret = handle_outgoing_dr_smp(mad_agent_priv,
1300 mad_send_wr);
1da177e4 1301 if (ret < 0) /* error */
34816ad9 1302 goto error;
1da177e4 1303 else if (ret == 1) /* locally consumed */
34816ad9 1304 continue;
1da177e4
LT
1305 }
1306
34816ad9 1307 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1da177e4 1308 /* Timeout will be updated after send completes */
34816ad9 1309 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
4fc8cd49
SH
1310 mad_send_wr->max_retries = send_buf->retries;
1311 mad_send_wr->retries_left = send_buf->retries;
1312 send_buf->retries = 0;
34816ad9 1313 /* Reference for work request to QP + response */
1da177e4
LT
1314 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1315 mad_send_wr->status = IB_WC_SUCCESS;
1316
1317 /* Reference MAD agent until send completes */
1318 atomic_inc(&mad_agent_priv->refcount);
1319 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1320 list_add_tail(&mad_send_wr->agent_list,
1321 &mad_agent_priv->send_list);
1322 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1323
1471cb6c 1324 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
fa619a77
HR
1325 ret = ib_send_rmpp_mad(mad_send_wr);
1326 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1327 ret = ib_send_mad(mad_send_wr);
1328 } else
1329 ret = ib_send_mad(mad_send_wr);
1330 if (ret < 0) {
1da177e4
LT
1331 /* Fail send request */
1332 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1333 list_del(&mad_send_wr->agent_list);
1334 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1335 atomic_dec(&mad_agent_priv->refcount);
34816ad9 1336 goto error;
1da177e4 1337 }
1da177e4
LT
1338 }
1339 return 0;
34816ad9
SH
1340error:
1341 if (bad_send_buf)
1342 *bad_send_buf = send_buf;
1da177e4
LT
1343 return ret;
1344}
1345EXPORT_SYMBOL(ib_post_send_mad);
1346
1347/*
1348 * ib_free_recv_mad - Returns data buffers used to receive
1349 * a MAD to the access layer
1350 */
1351void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1352{
fa619a77 1353 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1da177e4
LT
1354 struct ib_mad_private_header *mad_priv_hdr;
1355 struct ib_mad_private *priv;
fa619a77 1356 struct list_head free_list;
1da177e4 1357
fa619a77
HR
1358 INIT_LIST_HEAD(&free_list);
1359 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1da177e4 1360
fa619a77
HR
1361 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1362 &free_list, list) {
1363 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1364 recv_buf);
1da177e4
LT
1365 mad_priv_hdr = container_of(mad_recv_wc,
1366 struct ib_mad_private_header,
1367 recv_wc);
1368 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1369 header);
c9082e51 1370 kfree(priv);
1da177e4 1371 }
1da177e4
LT
1372}
1373EXPORT_SYMBOL(ib_free_recv_mad);
1374
1da177e4
LT
1375struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1376 u8 rmpp_version,
1377 ib_mad_send_handler send_handler,
1378 ib_mad_recv_handler recv_handler,
1379 void *context)
1380{
1381 return ERR_PTR(-EINVAL); /* XXX: for now */
1382}
1383EXPORT_SYMBOL(ib_redirect_mad_qp);
1384
1385int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1386 struct ib_wc *wc)
1387{
7ef5d4b0
IW
1388 dev_err(&mad_agent->device->dev,
1389 "ib_process_mad_wc() not implemented yet\n");
1da177e4
LT
1390 return 0;
1391}
1392EXPORT_SYMBOL(ib_process_mad_wc);
1393
1394static int method_in_use(struct ib_mad_mgmt_method_table **method,
1395 struct ib_mad_reg_req *mad_reg_req)
1396{
1397 int i;
1398
19b629f5 1399 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1da177e4 1400 if ((*method)->agent[i]) {
7ef5d4b0 1401 pr_err("Method %d already in use\n", i);
1da177e4
LT
1402 return -EINVAL;
1403 }
1404 }
1405 return 0;
1406}
1407
1408static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1409{
1410 /* Allocate management method table */
de6eb66b 1411 *method = kzalloc(sizeof **method, GFP_ATOMIC);
27162432 1412 return (*method) ? 0 : (-ENOMEM);
1da177e4
LT
1413}
1414
1415/*
1416 * Check to see if there are any methods still in use
1417 */
1418static int check_method_table(struct ib_mad_mgmt_method_table *method)
1419{
1420 int i;
1421
1422 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1423 if (method->agent[i])
1424 return 1;
1425 return 0;
1426}
1427
1428/*
1429 * Check to see if there are any method tables for this class still in use
1430 */
1431static int check_class_table(struct ib_mad_mgmt_class_table *class)
1432{
1433 int i;
1434
1435 for (i = 0; i < MAX_MGMT_CLASS; i++)
1436 if (class->method_table[i])
1437 return 1;
1438 return 0;
1439}
1440
1441static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1442{
1443 int i;
1444
1445 for (i = 0; i < MAX_MGMT_OUI; i++)
1446 if (vendor_class->method_table[i])
1447 return 1;
1448 return 0;
1449}
1450
1451static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
d94bd266 1452 const char *oui)
1da177e4
LT
1453{
1454 int i;
1455
1456 for (i = 0; i < MAX_MGMT_OUI; i++)
3cd96564
RD
1457 /* Is there matching OUI for this vendor class ? */
1458 if (!memcmp(vendor_class->oui[i], oui, 3))
1da177e4
LT
1459 return i;
1460
1461 return -1;
1462}
1463
1464static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1465{
1466 int i;
1467
1468 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1469 if (vendor->vendor_class[i])
1470 return 1;
1471
1472 return 0;
1473}
1474
1475static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1476 struct ib_mad_agent_private *agent)
1477{
1478 int i;
1479
1480 /* Remove any methods for this mad agent */
1481 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1482 if (method->agent[i] == agent) {
1483 method->agent[i] = NULL;
1484 }
1485 }
1486}
1487
1488static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1489 struct ib_mad_agent_private *agent_priv,
1490 u8 mgmt_class)
1491{
1492 struct ib_mad_port_private *port_priv;
1493 struct ib_mad_mgmt_class_table **class;
1494 struct ib_mad_mgmt_method_table **method;
1495 int i, ret;
1496
1497 port_priv = agent_priv->qp_info->port_priv;
1498 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1499 if (!*class) {
1500 /* Allocate management class table for "new" class version */
de6eb66b 1501 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1da177e4 1502 if (!*class) {
1da177e4
LT
1503 ret = -ENOMEM;
1504 goto error1;
1505 }
de6eb66b 1506
1da177e4
LT
1507 /* Allocate method table for this management class */
1508 method = &(*class)->method_table[mgmt_class];
1509 if ((ret = allocate_method_table(method)))
1510 goto error2;
1511 } else {
1512 method = &(*class)->method_table[mgmt_class];
1513 if (!*method) {
1514 /* Allocate method table for this management class */
1515 if ((ret = allocate_method_table(method)))
1516 goto error1;
1517 }
1518 }
1519
1520 /* Now, make sure methods are not already in use */
1521 if (method_in_use(method, mad_reg_req))
1522 goto error3;
1523
1524 /* Finally, add in methods being registered */
19b629f5 1525 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1da177e4 1526 (*method)->agent[i] = agent_priv;
19b629f5 1527
1da177e4
LT
1528 return 0;
1529
1530error3:
1531 /* Remove any methods for this mad agent */
1532 remove_methods_mad_agent(*method, agent_priv);
1533 /* Now, check to see if there are any methods in use */
1534 if (!check_method_table(*method)) {
1535 /* If not, release management method table */
1536 kfree(*method);
1537 *method = NULL;
1538 }
1539 ret = -EINVAL;
1540 goto error1;
1541error2:
1542 kfree(*class);
1543 *class = NULL;
1544error1:
1545 return ret;
1546}
1547
1548static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1549 struct ib_mad_agent_private *agent_priv)
1550{
1551 struct ib_mad_port_private *port_priv;
1552 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1553 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1554 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1555 struct ib_mad_mgmt_method_table **method;
1556 int i, ret = -ENOMEM;
1557 u8 vclass;
1558
1559 /* "New" vendor (with OUI) class */
1560 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1561 port_priv = agent_priv->qp_info->port_priv;
1562 vendor_table = &port_priv->version[
1563 mad_reg_req->mgmt_class_version].vendor;
1564 if (!*vendor_table) {
1565 /* Allocate mgmt vendor class table for "new" class version */
de6eb66b 1566 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
27162432 1567 if (!vendor)
1da177e4 1568 goto error1;
de6eb66b 1569
1da177e4
LT
1570 *vendor_table = vendor;
1571 }
1572 if (!(*vendor_table)->vendor_class[vclass]) {
1573 /* Allocate table for this management vendor class */
de6eb66b 1574 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
27162432 1575 if (!vendor_class)
1da177e4 1576 goto error2;
de6eb66b 1577
1da177e4
LT
1578 (*vendor_table)->vendor_class[vclass] = vendor_class;
1579 }
1580 for (i = 0; i < MAX_MGMT_OUI; i++) {
1581 /* Is there matching OUI for this vendor class ? */
1582 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1583 mad_reg_req->oui, 3)) {
1584 method = &(*vendor_table)->vendor_class[
1585 vclass]->method_table[i];
2468b82d
LR
1586 if (!*method)
1587 goto error3;
1da177e4
LT
1588 goto check_in_use;
1589 }
1590 }
1591 for (i = 0; i < MAX_MGMT_OUI; i++) {
1592 /* OUI slot available ? */
1593 if (!is_vendor_oui((*vendor_table)->vendor_class[
1594 vclass]->oui[i])) {
1595 method = &(*vendor_table)->vendor_class[
1596 vclass]->method_table[i];
1da177e4 1597 /* Allocate method table for this OUI */
2468b82d
LR
1598 if (!*method) {
1599 ret = allocate_method_table(method);
1600 if (ret)
1601 goto error3;
1602 }
1da177e4
LT
1603 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1604 mad_reg_req->oui, 3);
1605 goto check_in_use;
1606 }
1607 }
7ef5d4b0 1608 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1da177e4
LT
1609 goto error3;
1610
1611check_in_use:
1612 /* Now, make sure methods are not already in use */
1613 if (method_in_use(method, mad_reg_req))
1614 goto error4;
1615
1616 /* Finally, add in methods being registered */
19b629f5 1617 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1da177e4 1618 (*method)->agent[i] = agent_priv;
19b629f5 1619
1da177e4
LT
1620 return 0;
1621
1622error4:
1623 /* Remove any methods for this mad agent */
1624 remove_methods_mad_agent(*method, agent_priv);
1625 /* Now, check to see if there are any methods in use */
1626 if (!check_method_table(*method)) {
1627 /* If not, release management method table */
1628 kfree(*method);
1629 *method = NULL;
1630 }
1631 ret = -EINVAL;
1632error3:
1633 if (vendor_class) {
1634 (*vendor_table)->vendor_class[vclass] = NULL;
1635 kfree(vendor_class);
1636 }
1637error2:
1638 if (vendor) {
1639 *vendor_table = NULL;
1640 kfree(vendor);
1641 }
1642error1:
1643 return ret;
1644}
1645
1646static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1647{
1648 struct ib_mad_port_private *port_priv;
1649 struct ib_mad_mgmt_class_table *class;
1650 struct ib_mad_mgmt_method_table *method;
1651 struct ib_mad_mgmt_vendor_class_table *vendor;
1652 struct ib_mad_mgmt_vendor_class *vendor_class;
1653 int index;
1654 u8 mgmt_class;
1655
1656 /*
1657 * Was MAD registration request supplied
1658 * with original registration ?
1659 */
1660 if (!agent_priv->reg_req) {
1661 goto out;
1662 }
1663
1664 port_priv = agent_priv->qp_info->port_priv;
1665 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1666 class = port_priv->version[
1667 agent_priv->reg_req->mgmt_class_version].class;
1668 if (!class)
1669 goto vendor_check;
1670
1671 method = class->method_table[mgmt_class];
1672 if (method) {
1673 /* Remove any methods for this mad agent */
1674 remove_methods_mad_agent(method, agent_priv);
1675 /* Now, check to see if there are any methods still in use */
1676 if (!check_method_table(method)) {
1677 /* If not, release management method table */
2190d10d
BVA
1678 kfree(method);
1679 class->method_table[mgmt_class] = NULL;
1680 /* Any management classes left ? */
1da177e4
LT
1681 if (!check_class_table(class)) {
1682 /* If not, release management class table */
1683 kfree(class);
1684 port_priv->version[
1685 agent_priv->reg_req->
1686 mgmt_class_version].class = NULL;
1687 }
1688 }
1689 }
1690
1691vendor_check:
1692 if (!is_vendor_class(mgmt_class))
1693 goto out;
1694
1695 /* normalize mgmt_class to vendor range 2 */
1696 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1697 vendor = port_priv->version[
1698 agent_priv->reg_req->mgmt_class_version].vendor;
1699
1700 if (!vendor)
1701 goto out;
1702
1703 vendor_class = vendor->vendor_class[mgmt_class];
1704 if (vendor_class) {
1705 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1706 if (index < 0)
1707 goto out;
1708 method = vendor_class->method_table[index];
1709 if (method) {
1710 /* Remove any methods for this mad agent */
1711 remove_methods_mad_agent(method, agent_priv);
1712 /*
1713 * Now, check to see if there are
1714 * any methods still in use
1715 */
1716 if (!check_method_table(method)) {
1717 /* If not, release management method table */
1718 kfree(method);
1719 vendor_class->method_table[index] = NULL;
1720 memset(vendor_class->oui[index], 0, 3);
1721 /* Any OUIs left ? */
1722 if (!check_vendor_class(vendor_class)) {
1723 /* If not, release vendor class table */
1724 kfree(vendor_class);
1725 vendor->vendor_class[mgmt_class] = NULL;
1726 /* Any other vendor classes left ? */
1727 if (!check_vendor_table(vendor)) {
1728 kfree(vendor);
1729 port_priv->version[
1730 agent_priv->reg_req->
1731 mgmt_class_version].
1732 vendor = NULL;
1733 }
1734 }
1735 }
1736 }
1737 }
1738
1739out:
1740 return;
1741}
1742
1da177e4
LT
1743static struct ib_mad_agent_private *
1744find_mad_agent(struct ib_mad_port_private *port_priv,
d94bd266 1745 const struct ib_mad_hdr *mad_hdr)
1da177e4
LT
1746{
1747 struct ib_mad_agent_private *mad_agent = NULL;
1748 unsigned long flags;
1749
d94bd266 1750 if (ib_response_mad(mad_hdr)) {
1da177e4 1751 u32 hi_tid;
1da177e4
LT
1752
1753 /*
1754 * Routing is based on high 32 bits of transaction ID
1755 * of MAD.
1756 */
d94bd266 1757 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
9a41e38a 1758 rcu_read_lock();
1759 mad_agent = idr_find(&ib_mad_clients, hi_tid);
1760 if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount))
1761 mad_agent = NULL;
1762 rcu_read_unlock();
1da177e4
LT
1763 } else {
1764 struct ib_mad_mgmt_class_table *class;
1765 struct ib_mad_mgmt_method_table *method;
1766 struct ib_mad_mgmt_vendor_class_table *vendor;
1767 struct ib_mad_mgmt_vendor_class *vendor_class;
d94bd266 1768 const struct ib_vendor_mad *vendor_mad;
1da177e4
LT
1769 int index;
1770
9a41e38a 1771 spin_lock_irqsave(&port_priv->reg_lock, flags);
1da177e4
LT
1772 /*
1773 * Routing is based on version, class, and method
1774 * For "newer" vendor MADs, also based on OUI
1775 */
d94bd266 1776 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1da177e4 1777 goto out;
d94bd266 1778 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1da177e4 1779 class = port_priv->version[
d94bd266 1780 mad_hdr->class_version].class;
1da177e4
LT
1781 if (!class)
1782 goto out;
d94bd266 1783 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
2fe2f378 1784 ARRAY_SIZE(class->method_table))
b7ab0b19 1785 goto out;
1da177e4 1786 method = class->method_table[convert_mgmt_class(
d94bd266 1787 mad_hdr->mgmt_class)];
1da177e4 1788 if (method)
d94bd266 1789 mad_agent = method->agent[mad_hdr->method &
1da177e4
LT
1790 ~IB_MGMT_METHOD_RESP];
1791 } else {
1792 vendor = port_priv->version[
d94bd266 1793 mad_hdr->class_version].vendor;
1da177e4
LT
1794 if (!vendor)
1795 goto out;
1796 vendor_class = vendor->vendor_class[vendor_class_index(
d94bd266 1797 mad_hdr->mgmt_class)];
1da177e4
LT
1798 if (!vendor_class)
1799 goto out;
1800 /* Find matching OUI */
d94bd266 1801 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1da177e4
LT
1802 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1803 if (index == -1)
1804 goto out;
1805 method = vendor_class->method_table[index];
1806 if (method) {
d94bd266 1807 mad_agent = method->agent[mad_hdr->method &
1da177e4
LT
1808 ~IB_MGMT_METHOD_RESP];
1809 }
1810 }
9a41e38a 1811 if (mad_agent)
1812 atomic_inc(&mad_agent->refcount);
1813out:
1814 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1da177e4
LT
1815 }
1816
9a41e38a 1817 if (mad_agent && !mad_agent->agent.recv_handler) {
1818 dev_notice(&port_priv->device->dev,
1819 "No receive handler for client %p on port %d\n",
1820 &mad_agent->agent, port_priv->port_num);
1821 deref_mad_agent(mad_agent);
1822 mad_agent = NULL;
1da177e4 1823 }
1da177e4
LT
1824
1825 return mad_agent;
1826}
1827
8e4349d1
IW
1828static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1829 const struct ib_mad_qp_info *qp_info,
1830 bool opa)
1da177e4
LT
1831{
1832 int valid = 0;
8e4349d1 1833 u32 qp_num = qp_info->qp->qp_num;
1da177e4
LT
1834
1835 /* Make sure MAD base version is understood */
8e4349d1
IW
1836 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1837 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1838 pr_err("MAD received with unsupported base version %d %s\n",
1839 mad_hdr->base_version, opa ? "(opa)" : "");
1da177e4
LT
1840 goto out;
1841 }
1842
1843 /* Filter SMI packets sent to other than QP0 */
77f60833
IW
1844 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1845 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1da177e4
LT
1846 if (qp_num == 0)
1847 valid = 1;
1848 } else {
53370886
HR
1849 /* CM attributes other than ClassPortInfo only use Send method */
1850 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1851 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1852 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1853 goto out;
1da177e4
LT
1854 /* Filter GSI packets sent to QP0 */
1855 if (qp_num != 0)
1856 valid = 1;
1857 }
1858
1859out:
1860 return valid;
1861}
1862
f766c58f
IW
1863static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1864 const struct ib_mad_hdr *mad_hdr)
fa619a77
HR
1865{
1866 struct ib_rmpp_mad *rmpp_mad;
1867
1868 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1869 return !mad_agent_priv->agent.rmpp_version ||
1471cb6c 1870 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
fa619a77
HR
1871 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1872 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1873 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1874}
1875
8bf4b30c
IW
1876static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1877 const struct ib_mad_recv_wc *rwc)
fa9656bb 1878{
8bf4b30c 1879 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
fa9656bb
JM
1880 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1881}
1882
f766c58f
IW
1883static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1884 const struct ib_mad_send_wr_private *wr,
1885 const struct ib_mad_recv_wc *rwc )
fa9656bb 1886{
90898850 1887 struct rdma_ah_attr attr;
fa9656bb 1888 u8 send_resp, rcv_resp;
9874e746
JM
1889 union ib_gid sgid;
1890 struct ib_device *device = mad_agent_priv->agent.device;
1891 u8 port_num = mad_agent_priv->agent.port_num;
1892 u8 lmc;
d8966fcd 1893 bool has_grh;
fa9656bb 1894
96909308
IW
1895 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1896 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
fa9656bb 1897
fa9656bb
JM
1898 if (send_resp == rcv_resp)
1899 /* both requests, or both responses. GIDs different */
1900 return 0;
1901
bfbfd661 1902 if (rdma_query_ah(wr->send_buf.ah, &attr))
fa9656bb
JM
1903 /* Assume not equal, to avoid false positives. */
1904 return 0;
1905
d8966fcd
DC
1906 has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH);
1907 if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH))
fa9656bb
JM
1908 /* one has GID, other does not. Assume different */
1909 return 0;
9874e746
JM
1910
1911 if (!send_resp && rcv_resp) {
1912 /* is request/response. */
d8966fcd 1913 if (!has_grh) {
9874e746
JM
1914 if (ib_get_cached_lmc(device, port_num, &lmc))
1915 return 0;
d8966fcd 1916 return (!lmc || !((rdma_ah_get_path_bits(&attr) ^
9874e746
JM
1917 rwc->wc->dlid_path_bits) &
1918 ((1 << lmc) - 1)));
1919 } else {
d8966fcd
DC
1920 const struct ib_global_route *grh =
1921 rdma_ah_read_grh(&attr);
1922
1dfce294
PP
1923 if (rdma_query_gid(device, port_num,
1924 grh->sgid_index, &sgid))
9874e746
JM
1925 return 0;
1926 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1927 16);
1928 }
1929 }
1930
d8966fcd
DC
1931 if (!has_grh)
1932 return rdma_ah_get_dlid(&attr) == rwc->wc->slid;
9874e746 1933 else
d8966fcd
DC
1934 return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw,
1935 rwc->recv_buf.grh->sgid.raw,
9874e746
JM
1936 16);
1937}
1938
1939static inline int is_direct(u8 class)
1940{
1941 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
fa9656bb 1942}
9874e746 1943
fa619a77 1944struct ib_mad_send_wr_private*
f766c58f
IW
1945ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1946 const struct ib_mad_recv_wc *wc)
1da177e4 1947{
9874e746 1948 struct ib_mad_send_wr_private *wr;
83a1d228 1949 const struct ib_mad_hdr *mad_hdr;
fa9656bb 1950
83a1d228 1951 mad_hdr = &wc->recv_buf.mad->mad_hdr;
9874e746
JM
1952
1953 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
83a1d228 1954 if ((wr->tid == mad_hdr->tid) &&
9874e746
JM
1955 rcv_has_same_class(wr, wc) &&
1956 /*
1957 * Don't check GID for direct routed MADs.
1958 * These might have permissive LIDs.
1959 */
83a1d228 1960 (is_direct(mad_hdr->mgmt_class) ||
9874e746 1961 rcv_has_same_gid(mad_agent_priv, wr, wc)))
39798695 1962 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1da177e4
LT
1963 }
1964
1965 /*
1966 * It's possible to receive the response before we've
1967 * been notified that the send has completed
1968 */
9874e746 1969 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
c597eee5 1970 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
83a1d228 1971 wr->tid == mad_hdr->tid &&
9874e746
JM
1972 wr->timeout &&
1973 rcv_has_same_class(wr, wc) &&
1974 /*
1975 * Don't check GID for direct routed MADs.
1976 * These might have permissive LIDs.
1977 */
83a1d228 1978 (is_direct(mad_hdr->mgmt_class) ||
9874e746 1979 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1da177e4 1980 /* Verify request has not been canceled */
9874e746 1981 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1da177e4
LT
1982 }
1983 return NULL;
1984}
1985
fa619a77 1986void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
6a0c435e
HR
1987{
1988 mad_send_wr->timeout = 0;
179e0917
AM
1989 if (mad_send_wr->refcount == 1)
1990 list_move_tail(&mad_send_wr->agent_list,
6a0c435e 1991 &mad_send_wr->mad_agent_priv->done_list);
6a0c435e
HR
1992}
1993
1da177e4 1994static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
4a0754fa 1995 struct ib_mad_recv_wc *mad_recv_wc)
1da177e4
LT
1996{
1997 struct ib_mad_send_wr_private *mad_send_wr;
1998 struct ib_mad_send_wc mad_send_wc;
1999 unsigned long flags;
47a2b338
DJ
2000 int ret;
2001
89548bca 2002 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
47a2b338
DJ
2003 ret = ib_mad_enforce_security(mad_agent_priv,
2004 mad_recv_wc->wc->pkey_index);
2005 if (ret) {
2006 ib_free_recv_mad(mad_recv_wc);
2007 deref_mad_agent(mad_agent_priv);
89548bca 2008 return;
47a2b338 2009 }
1da177e4 2010
fa619a77 2011 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1471cb6c 2012 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
fa619a77
HR
2013 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
2014 mad_recv_wc);
2015 if (!mad_recv_wc) {
1b52fa98 2016 deref_mad_agent(mad_agent_priv);
fa619a77
HR
2017 return;
2018 }
2019 }
2020
1da177e4 2021 /* Complete corresponding request */
96909308 2022 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1da177e4 2023 spin_lock_irqsave(&mad_agent_priv->lock, flags);
fa9656bb 2024 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1da177e4
LT
2025 if (!mad_send_wr) {
2026 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1471cb6c
IW
2027 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
2028 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
2029 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
2030 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
2031 /* user rmpp is in effect
2032 * and this is an active RMPP MAD
2033 */
ca281265
CH
2034 mad_agent_priv->agent.recv_handler(
2035 &mad_agent_priv->agent, NULL,
2036 mad_recv_wc);
1471cb6c
IW
2037 atomic_dec(&mad_agent_priv->refcount);
2038 } else {
2039 /* not user rmpp, revert to normal behavior and
2040 * drop the mad */
2041 ib_free_recv_mad(mad_recv_wc);
2042 deref_mad_agent(mad_agent_priv);
2043 return;
2044 }
2045 } else {
2046 ib_mark_mad_done(mad_send_wr);
2047 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1da177e4 2048
1471cb6c 2049 /* Defined behavior is to complete response before request */
ca281265
CH
2050 mad_agent_priv->agent.recv_handler(
2051 &mad_agent_priv->agent,
2052 &mad_send_wr->send_buf,
2053 mad_recv_wc);
1471cb6c 2054 atomic_dec(&mad_agent_priv->refcount);
1da177e4 2055
1471cb6c
IW
2056 mad_send_wc.status = IB_WC_SUCCESS;
2057 mad_send_wc.vendor_err = 0;
2058 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2059 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2060 }
1da177e4 2061 } else {
ca281265 2062 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
4a0754fa 2063 mad_recv_wc);
1b52fa98 2064 deref_mad_agent(mad_agent_priv);
1da177e4 2065 }
47a2b338
DJ
2066
2067 return;
1da177e4
LT
2068}
2069
e11ae8aa
IW
2070static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2071 const struct ib_mad_qp_info *qp_info,
2072 const struct ib_wc *wc,
2073 int port_num,
2074 struct ib_mad_private *recv,
2075 struct ib_mad_private *response)
2076{
2077 enum smi_forward_action retsmi;
c9082e51 2078 struct ib_smp *smp = (struct ib_smp *)recv->mad;
e11ae8aa 2079
c9082e51 2080 if (smi_handle_dr_smp_recv(smp,
4139032b 2081 rdma_cap_ib_switch(port_priv->device),
e11ae8aa
IW
2082 port_num,
2083 port_priv->device->phys_port_cnt) ==
2084 IB_SMI_DISCARD)
2085 return IB_SMI_DISCARD;
2086
c9082e51 2087 retsmi = smi_check_forward_dr_smp(smp);
e11ae8aa
IW
2088 if (retsmi == IB_SMI_LOCAL)
2089 return IB_SMI_HANDLE;
2090
2091 if (retsmi == IB_SMI_SEND) { /* don't forward */
c9082e51 2092 if (smi_handle_dr_smp_send(smp,
4139032b 2093 rdma_cap_ib_switch(port_priv->device),
e11ae8aa
IW
2094 port_num) == IB_SMI_DISCARD)
2095 return IB_SMI_DISCARD;
2096
c9082e51 2097 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
e11ae8aa 2098 return IB_SMI_DISCARD;
4139032b 2099 } else if (rdma_cap_ib_switch(port_priv->device)) {
e11ae8aa 2100 /* forward case for switches */
c9082e51 2101 memcpy(response, recv, mad_priv_size(response));
e11ae8aa 2102 response->header.recv_wc.wc = &response->header.wc;
c9082e51 2103 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
e11ae8aa
IW
2104 response->header.recv_wc.recv_buf.grh = &response->grh;
2105
c9082e51 2106 agent_send_response((const struct ib_mad_hdr *)response->mad,
e11ae8aa
IW
2107 &response->grh, wc,
2108 port_priv->device,
c9082e51
IW
2109 smi_get_fwd_port(smp),
2110 qp_info->qp->qp_num,
8e4349d1
IW
2111 response->mad_size,
2112 false);
e11ae8aa
IW
2113
2114 return IB_SMI_DISCARD;
2115 }
2116 return IB_SMI_HANDLE;
2117}
2118
c9082e51 2119static bool generate_unmatched_resp(const struct ib_mad_private *recv,
8e4349d1
IW
2120 struct ib_mad_private *response,
2121 size_t *resp_len, bool opa)
0b307043 2122{
c9082e51
IW
2123 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2124 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2125
2126 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2127 recv_hdr->method == IB_MGMT_METHOD_SET) {
2128 memcpy(response, recv, mad_priv_size(response));
0b307043 2129 response->header.recv_wc.wc = &response->header.wc;
c9082e51 2130 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
0b307043 2131 response->header.recv_wc.recv_buf.grh = &response->grh;
c9082e51
IW
2132 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2133 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2134 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2135 resp_hdr->status |= IB_SMP_DIRECTION;
0b307043 2136
8e4349d1
IW
2137 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2138 if (recv_hdr->mgmt_class ==
2139 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2140 recv_hdr->mgmt_class ==
2141 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2142 *resp_len = opa_get_smp_header_size(
2143 (struct opa_smp *)recv->mad);
2144 else
2145 *resp_len = sizeof(struct ib_mad_hdr);
2146 }
2147
0b307043
ST
2148 return true;
2149 } else {
2150 return false;
2151 }
2152}
8e4349d1
IW
2153
2154static enum smi_action
2155handle_opa_smi(struct ib_mad_port_private *port_priv,
2156 struct ib_mad_qp_info *qp_info,
2157 struct ib_wc *wc,
2158 int port_num,
2159 struct ib_mad_private *recv,
2160 struct ib_mad_private *response)
2161{
2162 enum smi_forward_action retsmi;
2163 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2164
2165 if (opa_smi_handle_dr_smp_recv(smp,
4139032b 2166 rdma_cap_ib_switch(port_priv->device),
8e4349d1
IW
2167 port_num,
2168 port_priv->device->phys_port_cnt) ==
2169 IB_SMI_DISCARD)
2170 return IB_SMI_DISCARD;
2171
2172 retsmi = opa_smi_check_forward_dr_smp(smp);
2173 if (retsmi == IB_SMI_LOCAL)
2174 return IB_SMI_HANDLE;
2175
2176 if (retsmi == IB_SMI_SEND) { /* don't forward */
2177 if (opa_smi_handle_dr_smp_send(smp,
4139032b 2178 rdma_cap_ib_switch(port_priv->device),
8e4349d1
IW
2179 port_num) == IB_SMI_DISCARD)
2180 return IB_SMI_DISCARD;
2181
2182 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2183 IB_SMI_DISCARD)
2184 return IB_SMI_DISCARD;
2185
4139032b 2186 } else if (rdma_cap_ib_switch(port_priv->device)) {
8e4349d1
IW
2187 /* forward case for switches */
2188 memcpy(response, recv, mad_priv_size(response));
2189 response->header.recv_wc.wc = &response->header.wc;
2190 response->header.recv_wc.recv_buf.opa_mad =
2191 (struct opa_mad *)response->mad;
2192 response->header.recv_wc.recv_buf.grh = &response->grh;
2193
2194 agent_send_response((const struct ib_mad_hdr *)response->mad,
2195 &response->grh, wc,
2196 port_priv->device,
2197 opa_smi_get_fwd_port(smp),
2198 qp_info->qp->qp_num,
2199 recv->header.wc.byte_len,
2200 true);
2201
2202 return IB_SMI_DISCARD;
2203 }
2204
2205 return IB_SMI_HANDLE;
2206}
2207
2208static enum smi_action
2209handle_smi(struct ib_mad_port_private *port_priv,
2210 struct ib_mad_qp_info *qp_info,
2211 struct ib_wc *wc,
2212 int port_num,
2213 struct ib_mad_private *recv,
2214 struct ib_mad_private *response,
2215 bool opa)
2216{
2217 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2218
2219 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
9fa240bb 2220 mad_hdr->class_version == OPA_SM_CLASS_VERSION)
8e4349d1
IW
2221 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2222 response);
2223
2224 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2225}
2226
d53e11fd 2227static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1da177e4 2228{
d53e11fd
CH
2229 struct ib_mad_port_private *port_priv = cq->cq_context;
2230 struct ib_mad_list_head *mad_list =
2231 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
1da177e4
LT
2232 struct ib_mad_qp_info *qp_info;
2233 struct ib_mad_private_header *mad_priv_hdr;
445d6807 2234 struct ib_mad_private *recv, *response = NULL;
1da177e4 2235 struct ib_mad_agent_private *mad_agent;
1bae4dbf 2236 int port_num;
a9e74323 2237 int ret = IB_MAD_RESULT_SUCCESS;
4cd7c947
IW
2238 size_t mad_size;
2239 u16 resp_mad_pkey_index = 0;
8e4349d1 2240 bool opa;
1da177e4 2241
d53e11fd
CH
2242 if (list_empty_careful(&port_priv->port_list))
2243 return;
2244
2245 if (wc->status != IB_WC_SUCCESS) {
2246 /*
2247 * Receive errors indicate that the QP has entered the error
2248 * state - error handling/shutdown code will cleanup
2249 */
2250 return;
2251 }
2252
1da177e4
LT
2253 qp_info = mad_list->mad_queue->qp_info;
2254 dequeue_mad(mad_list);
2255
8e4349d1
IW
2256 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2257 qp_info->port_priv->port_num);
2258
1da177e4
LT
2259 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2260 mad_list);
2261 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1527106f
RC
2262 ib_dma_unmap_single(port_priv->device,
2263 recv->header.mapping,
c9082e51 2264 mad_priv_dma_size(recv),
1527106f 2265 DMA_FROM_DEVICE);
1da177e4
LT
2266
2267 /* Setup MAD receive work completion from "normal" work completion */
24239aff
SH
2268 recv->header.wc = *wc;
2269 recv->header.recv_wc.wc = &recv->header.wc;
8e4349d1
IW
2270
2271 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2272 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2273 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2274 } else {
2275 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2276 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2277 }
2278
c9082e51 2279 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
1da177e4
LT
2280 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2281
2282 if (atomic_read(&qp_info->snoop_count))
2283 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2284
2285 /* Validate MAD */
8e4349d1 2286 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
1da177e4
LT
2287 goto out;
2288
4cd7c947
IW
2289 mad_size = recv->mad_size;
2290 response = alloc_mad_private(mad_size, GFP_KERNEL);
27162432 2291 if (!response)
445d6807 2292 goto out;
445d6807 2293
4139032b 2294 if (rdma_cap_ib_switch(port_priv->device))
1bae4dbf
HR
2295 port_num = wc->port_num;
2296 else
2297 port_num = port_priv->port_num;
2298
c9082e51 2299 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
1da177e4 2300 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
8e4349d1
IW
2301 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2302 response, opa)
e11ae8aa 2303 == IB_SMI_DISCARD)
1da177e4 2304 goto out;
1da177e4
LT
2305 }
2306
1da177e4 2307 /* Give driver "right of first refusal" on incoming MAD */
3023a1e9
KH
2308 if (port_priv->device->ops.process_mad) {
2309 ret = port_priv->device->ops.process_mad(
2310 port_priv->device, 0, port_priv->port_num, wc,
2311 &recv->grh, (const struct ib_mad_hdr *)recv->mad,
2312 recv->mad_size, (struct ib_mad_hdr *)response->mad,
2313 &mad_size, &resp_mad_pkey_index);
8e4349d1
IW
2314
2315 if (opa)
2316 wc->pkey_index = resp_mad_pkey_index;
2317
1da177e4
LT
2318 if (ret & IB_MAD_RESULT_SUCCESS) {
2319 if (ret & IB_MAD_RESULT_CONSUMED)
2320 goto out;
2321 if (ret & IB_MAD_RESULT_REPLY) {
c9082e51 2322 agent_send_response((const struct ib_mad_hdr *)response->mad,
34816ad9
SH
2323 &recv->grh, wc,
2324 port_priv->device,
1bae4dbf 2325 port_num,
c9082e51 2326 qp_info->qp->qp_num,
8e4349d1 2327 mad_size, opa);
1da177e4
LT
2328 goto out;
2329 }
2330 }
2331 }
2332
c9082e51 2333 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
1da177e4 2334 if (mad_agent) {
4a0754fa 2335 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1da177e4
LT
2336 /*
2337 * recv is freed up in error cases in ib_mad_complete_recv
2338 * or via recv_handler in ib_mad_complete_recv()
2339 */
2340 recv = NULL;
a9e74323 2341 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
8e4349d1 2342 generate_unmatched_resp(recv, response, &mad_size, opa)) {
c9082e51
IW
2343 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2344 port_priv->device, port_num,
8e4349d1 2345 qp_info->qp->qp_num, mad_size, opa);
1da177e4
LT
2346 }
2347
2348out:
2349 /* Post another receive request for this QP */
2350 if (response) {
2351 ib_mad_post_receive_mads(qp_info, response);
c9082e51 2352 kfree(recv);
1da177e4
LT
2353 } else
2354 ib_mad_post_receive_mads(qp_info, recv);
2355}
2356
2357static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2358{
2359 struct ib_mad_send_wr_private *mad_send_wr;
2360 unsigned long delay;
2361
2362 if (list_empty(&mad_agent_priv->wait_list)) {
136b5721 2363 cancel_delayed_work(&mad_agent_priv->timed_work);
1da177e4
LT
2364 } else {
2365 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2366 struct ib_mad_send_wr_private,
2367 agent_list);
2368
2369 if (time_after(mad_agent_priv->timeout,
2370 mad_send_wr->timeout)) {
2371 mad_agent_priv->timeout = mad_send_wr->timeout;
1da177e4
LT
2372 delay = mad_send_wr->timeout - jiffies;
2373 if ((long)delay <= 0)
2374 delay = 1;
e7c2f967
TH
2375 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2376 &mad_agent_priv->timed_work, delay);
1da177e4
LT
2377 }
2378 }
2379}
2380
d760ce8f 2381static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4 2382{
d760ce8f 2383 struct ib_mad_agent_private *mad_agent_priv;
1da177e4
LT
2384 struct ib_mad_send_wr_private *temp_mad_send_wr;
2385 struct list_head *list_item;
2386 unsigned long delay;
2387
d760ce8f 2388 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4
LT
2389 list_del(&mad_send_wr->agent_list);
2390
2391 delay = mad_send_wr->timeout;
2392 mad_send_wr->timeout += jiffies;
2393
29bb33dd
HR
2394 if (delay) {
2395 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2396 temp_mad_send_wr = list_entry(list_item,
2397 struct ib_mad_send_wr_private,
2398 agent_list);
2399 if (time_after(mad_send_wr->timeout,
2400 temp_mad_send_wr->timeout))
2401 break;
2402 }
1da177e4 2403 }
29bb33dd
HR
2404 else
2405 list_item = &mad_agent_priv->wait_list;
1da177e4
LT
2406 list_add(&mad_send_wr->agent_list, list_item);
2407
2408 /* Reschedule a work item if we have a shorter timeout */
e7c2f967
TH
2409 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2410 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2411 &mad_agent_priv->timed_work, delay);
1da177e4
LT
2412}
2413
03b61ad2 2414void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
dbace111 2415 unsigned long timeout_ms)
03b61ad2
HR
2416{
2417 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2418 wait_for_response(mad_send_wr);
2419}
2420
1da177e4
LT
2421/*
2422 * Process a send work completion
2423 */
fa619a77
HR
2424void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2425 struct ib_mad_send_wc *mad_send_wc)
1da177e4
LT
2426{
2427 struct ib_mad_agent_private *mad_agent_priv;
2428 unsigned long flags;
fa619a77 2429 int ret;
1da177e4 2430
d760ce8f 2431 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4 2432 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1471cb6c 2433 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
fa619a77
HR
2434 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2435 if (ret == IB_RMPP_RESULT_CONSUMED)
2436 goto done;
2437 } else
2438 ret = IB_RMPP_RESULT_UNHANDLED;
2439
1da177e4
LT
2440 if (mad_send_wc->status != IB_WC_SUCCESS &&
2441 mad_send_wr->status == IB_WC_SUCCESS) {
2442 mad_send_wr->status = mad_send_wc->status;
2443 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2444 }
2445
2446 if (--mad_send_wr->refcount > 0) {
2447 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2448 mad_send_wr->status == IB_WC_SUCCESS) {
d760ce8f 2449 wait_for_response(mad_send_wr);
1da177e4 2450 }
fa619a77 2451 goto done;
1da177e4
LT
2452 }
2453
2454 /* Remove send from MAD agent and notify client of completion */
2455 list_del(&mad_send_wr->agent_list);
2456 adjust_timeout(mad_agent_priv);
2457 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2458
2459 if (mad_send_wr->status != IB_WC_SUCCESS )
2460 mad_send_wc->status = mad_send_wr->status;
34816ad9
SH
2461 if (ret == IB_RMPP_RESULT_INTERNAL)
2462 ib_rmpp_send_handler(mad_send_wc);
2463 else
fa619a77
HR
2464 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2465 mad_send_wc);
1da177e4
LT
2466
2467 /* Release reference on agent taken when sending */
1b52fa98 2468 deref_mad_agent(mad_agent_priv);
fa619a77
HR
2469 return;
2470done:
2471 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1da177e4
LT
2472}
2473
d53e11fd 2474static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
1da177e4 2475{
d53e11fd
CH
2476 struct ib_mad_port_private *port_priv = cq->cq_context;
2477 struct ib_mad_list_head *mad_list =
2478 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
1da177e4 2479 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
1da177e4
LT
2480 struct ib_mad_qp_info *qp_info;
2481 struct ib_mad_queue *send_queue;
34816ad9 2482 struct ib_mad_send_wc mad_send_wc;
1da177e4
LT
2483 unsigned long flags;
2484 int ret;
2485
d53e11fd
CH
2486 if (list_empty_careful(&port_priv->port_list))
2487 return;
2488
2489 if (wc->status != IB_WC_SUCCESS) {
2490 if (!ib_mad_send_error(port_priv, wc))
2491 return;
2492 }
2493
1da177e4
LT
2494 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2495 mad_list);
2496 send_queue = mad_list->mad_queue;
2497 qp_info = send_queue->qp_info;
2498
2499retry:
1527106f
RC
2500 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2501 mad_send_wr->header_mapping,
2502 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2503 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2504 mad_send_wr->payload_mapping,
2505 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
1da177e4
LT
2506 queued_send_wr = NULL;
2507 spin_lock_irqsave(&send_queue->lock, flags);
2508 list_del(&mad_list->list);
2509
2510 /* Move queued send to the send queue */
2511 if (send_queue->count-- > send_queue->max_active) {
2512 mad_list = container_of(qp_info->overflow_list.next,
2513 struct ib_mad_list_head, list);
2514 queued_send_wr = container_of(mad_list,
2515 struct ib_mad_send_wr_private,
2516 mad_list);
179e0917 2517 list_move_tail(&mad_list->list, &send_queue->list);
1da177e4
LT
2518 }
2519 spin_unlock_irqrestore(&send_queue->lock, flags);
2520
34816ad9
SH
2521 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2522 mad_send_wc.status = wc->status;
2523 mad_send_wc.vendor_err = wc->vendor_err;
1da177e4 2524 if (atomic_read(&qp_info->snoop_count))
34816ad9 2525 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
1da177e4 2526 IB_MAD_SNOOP_SEND_COMPLETIONS);
34816ad9 2527 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1da177e4
LT
2528
2529 if (queued_send_wr) {
e622f2f4 2530 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
1fec77bf 2531 NULL);
1da177e4 2532 if (ret) {
7ef5d4b0
IW
2533 dev_err(&port_priv->device->dev,
2534 "ib_post_send failed: %d\n", ret);
1da177e4
LT
2535 mad_send_wr = queued_send_wr;
2536 wc->status = IB_WC_LOC_QP_OP_ERR;
2537 goto retry;
2538 }
2539 }
2540}
2541
2542static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2543{
2544 struct ib_mad_send_wr_private *mad_send_wr;
2545 struct ib_mad_list_head *mad_list;
2546 unsigned long flags;
2547
2548 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2549 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2550 mad_send_wr = container_of(mad_list,
2551 struct ib_mad_send_wr_private,
2552 mad_list);
2553 mad_send_wr->retry = 1;
2554 }
2555 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2556}
2557
d53e11fd
CH
2558static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2559 struct ib_wc *wc)
1da177e4 2560{
d53e11fd
CH
2561 struct ib_mad_list_head *mad_list =
2562 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2563 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
1da177e4
LT
2564 struct ib_mad_send_wr_private *mad_send_wr;
2565 int ret;
2566
1da177e4
LT
2567 /*
2568 * Send errors will transition the QP to SQE - move
2569 * QP to RTS and repost flushed work requests
2570 */
2571 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2572 mad_list);
2573 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2574 if (mad_send_wr->retry) {
2575 /* Repost send */
1da177e4 2576 mad_send_wr->retry = 0;
e622f2f4 2577 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
1fec77bf 2578 NULL);
d53e11fd
CH
2579 if (!ret)
2580 return false;
2581 }
1da177e4
LT
2582 } else {
2583 struct ib_qp_attr *attr;
2584
2585 /* Transition QP to RTS and fail offending send */
2586 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2587 if (attr) {
2588 attr->qp_state = IB_QPS_RTS;
2589 attr->cur_qp_state = IB_QPS_SQE;
2590 ret = ib_modify_qp(qp_info->qp, attr,
2591 IB_QP_STATE | IB_QP_CUR_STATE);
2592 kfree(attr);
2593 if (ret)
7ef5d4b0 2594 dev_err(&port_priv->device->dev,
d53e11fd
CH
2595 "%s - ib_modify_qp to RTS: %d\n",
2596 __func__, ret);
1da177e4
LT
2597 else
2598 mark_sends_for_retry(qp_info);
2599 }
1da177e4 2600 }
1da177e4 2601
d53e11fd 2602 return true;
1da177e4
LT
2603}
2604
2605static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2606{
2607 unsigned long flags;
2608 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2609 struct ib_mad_send_wc mad_send_wc;
2610 struct list_head cancel_list;
2611
2612 INIT_LIST_HEAD(&cancel_list);
2613
2614 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2615 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2616 &mad_agent_priv->send_list, agent_list) {
2617 if (mad_send_wr->status == IB_WC_SUCCESS) {
3cd96564 2618 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
1da177e4
LT
2619 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2620 }
2621 }
2622
2623 /* Empty wait list to prevent receives from finding a request */
2624 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2625 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2626
2627 /* Report all cancelled requests */
2628 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2629 mad_send_wc.vendor_err = 0;
2630
2631 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2632 &cancel_list, agent_list) {
34816ad9
SH
2633 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2634 list_del(&mad_send_wr->agent_list);
1da177e4
LT
2635 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2636 &mad_send_wc);
1da177e4
LT
2637 atomic_dec(&mad_agent_priv->refcount);
2638 }
2639}
2640
2641static struct ib_mad_send_wr_private*
34816ad9
SH
2642find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2643 struct ib_mad_send_buf *send_buf)
1da177e4
LT
2644{
2645 struct ib_mad_send_wr_private *mad_send_wr;
2646
2647 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2648 agent_list) {
34816ad9 2649 if (&mad_send_wr->send_buf == send_buf)
1da177e4
LT
2650 return mad_send_wr;
2651 }
2652
2653 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2654 agent_list) {
c597eee5
IW
2655 if (is_rmpp_data_mad(mad_agent_priv,
2656 mad_send_wr->send_buf.mad) &&
34816ad9 2657 &mad_send_wr->send_buf == send_buf)
1da177e4
LT
2658 return mad_send_wr;
2659 }
2660 return NULL;
2661}
2662
34816ad9
SH
2663int ib_modify_mad(struct ib_mad_agent *mad_agent,
2664 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
1da177e4
LT
2665{
2666 struct ib_mad_agent_private *mad_agent_priv;
2667 struct ib_mad_send_wr_private *mad_send_wr;
2668 unsigned long flags;
cabe3cbc 2669 int active;
1da177e4
LT
2670
2671 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2672 agent);
2673 spin_lock_irqsave(&mad_agent_priv->lock, flags);
34816ad9 2674 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
03b61ad2 2675 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
1da177e4 2676 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
03b61ad2 2677 return -EINVAL;
1da177e4
LT
2678 }
2679
cabe3cbc 2680 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
03b61ad2 2681 if (!timeout_ms) {
1da177e4 2682 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
03b61ad2 2683 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1da177e4
LT
2684 }
2685
34816ad9 2686 mad_send_wr->send_buf.timeout_ms = timeout_ms;
cabe3cbc 2687 if (active)
03b61ad2
HR
2688 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2689 else
2690 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2691
1da177e4 2692 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
03b61ad2
HR
2693 return 0;
2694}
2695EXPORT_SYMBOL(ib_modify_mad);
1da177e4 2696
34816ad9
SH
2697void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2698 struct ib_mad_send_buf *send_buf)
03b61ad2 2699{
34816ad9 2700 ib_modify_mad(mad_agent, send_buf, 0);
1da177e4
LT
2701}
2702EXPORT_SYMBOL(ib_cancel_mad);
2703
c4028958 2704static void local_completions(struct work_struct *work)
1da177e4
LT
2705{
2706 struct ib_mad_agent_private *mad_agent_priv;
2707 struct ib_mad_local_private *local;
2708 struct ib_mad_agent_private *recv_mad_agent;
2709 unsigned long flags;
1d9bc6d6 2710 int free_mad;
1da177e4
LT
2711 struct ib_wc wc;
2712 struct ib_mad_send_wc mad_send_wc;
8e4349d1 2713 bool opa;
1da177e4 2714
c4028958
DH
2715 mad_agent_priv =
2716 container_of(work, struct ib_mad_agent_private, local_work);
1da177e4 2717
8e4349d1
IW
2718 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2719 mad_agent_priv->qp_info->port_priv->port_num);
2720
1da177e4
LT
2721 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2722 while (!list_empty(&mad_agent_priv->local_list)) {
2723 local = list_entry(mad_agent_priv->local_list.next,
2724 struct ib_mad_local_private,
2725 completion_list);
37289efe 2726 list_del(&local->completion_list);
1da177e4 2727 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1d9bc6d6 2728 free_mad = 0;
1da177e4 2729 if (local->mad_priv) {
8e4349d1 2730 u8 base_version;
1da177e4
LT
2731 recv_mad_agent = local->recv_mad_agent;
2732 if (!recv_mad_agent) {
7ef5d4b0
IW
2733 dev_err(&mad_agent_priv->agent.device->dev,
2734 "No receive MAD agent for local completion\n");
1d9bc6d6 2735 free_mad = 1;
1da177e4
LT
2736 goto local_send_completion;
2737 }
2738
2739 /*
2740 * Defined behavior is to complete response
2741 * before request
2742 */
062dbb69 2743 build_smp_wc(recv_mad_agent->agent.qp,
d53e11fd 2744 local->mad_send_wr->send_wr.wr.wr_cqe,
97f52eb4 2745 be16_to_cpu(IB_LID_PERMISSIVE),
e622f2f4 2746 local->mad_send_wr->send_wr.pkey_index,
8e4349d1 2747 recv_mad_agent->agent.port_num, &wc);
1da177e4
LT
2748
2749 local->mad_priv->header.recv_wc.wc = &wc;
8e4349d1
IW
2750
2751 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2752 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2753 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2754 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2755 } else {
2756 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2757 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2758 }
2759
fa619a77
HR
2760 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2761 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2762 &local->mad_priv->header.recv_wc.rmpp_list);
1da177e4
LT
2763 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2764 local->mad_priv->header.recv_wc.recv_buf.mad =
c9082e51 2765 (struct ib_mad *)local->mad_priv->mad;
1da177e4
LT
2766 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2767 snoop_recv(recv_mad_agent->qp_info,
2768 &local->mad_priv->header.recv_wc,
2769 IB_MAD_SNOOP_RECVS);
2770 recv_mad_agent->agent.recv_handler(
2771 &recv_mad_agent->agent,
ca281265 2772 &local->mad_send_wr->send_buf,
1da177e4
LT
2773 &local->mad_priv->header.recv_wc);
2774 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2775 atomic_dec(&recv_mad_agent->refcount);
2776 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2777 }
2778
2779local_send_completion:
2780 /* Complete send */
2781 mad_send_wc.status = IB_WC_SUCCESS;
2782 mad_send_wc.vendor_err = 0;
34816ad9 2783 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
1da177e4 2784 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
34816ad9
SH
2785 snoop_send(mad_agent_priv->qp_info,
2786 &local->mad_send_wr->send_buf,
2787 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
1da177e4
LT
2788 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2789 &mad_send_wc);
2790
2791 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1da177e4 2792 atomic_dec(&mad_agent_priv->refcount);
1d9bc6d6 2793 if (free_mad)
c9082e51 2794 kfree(local->mad_priv);
1da177e4
LT
2795 kfree(local);
2796 }
2797 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2798}
2799
f75b7a52
HR
2800static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2801{
2802 int ret;
2803
4fc8cd49 2804 if (!mad_send_wr->retries_left)
f75b7a52
HR
2805 return -ETIMEDOUT;
2806
4fc8cd49
SH
2807 mad_send_wr->retries_left--;
2808 mad_send_wr->send_buf.retries++;
2809
34816ad9 2810 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
f75b7a52 2811
1471cb6c 2812 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
fa619a77
HR
2813 ret = ib_retry_rmpp(mad_send_wr);
2814 switch (ret) {
2815 case IB_RMPP_RESULT_UNHANDLED:
2816 ret = ib_send_mad(mad_send_wr);
2817 break;
2818 case IB_RMPP_RESULT_CONSUMED:
2819 ret = 0;
2820 break;
2821 default:
2822 ret = -ECOMM;
2823 break;
2824 }
2825 } else
2826 ret = ib_send_mad(mad_send_wr);
f75b7a52
HR
2827
2828 if (!ret) {
2829 mad_send_wr->refcount++;
f75b7a52
HR
2830 list_add_tail(&mad_send_wr->agent_list,
2831 &mad_send_wr->mad_agent_priv->send_list);
2832 }
2833 return ret;
2834}
2835
c4028958 2836static void timeout_sends(struct work_struct *work)
1da177e4
LT
2837{
2838 struct ib_mad_agent_private *mad_agent_priv;
2839 struct ib_mad_send_wr_private *mad_send_wr;
2840 struct ib_mad_send_wc mad_send_wc;
2841 unsigned long flags, delay;
2842
c4028958
DH
2843 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2844 timed_work.work);
1da177e4
LT
2845 mad_send_wc.vendor_err = 0;
2846
2847 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2848 while (!list_empty(&mad_agent_priv->wait_list)) {
2849 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2850 struct ib_mad_send_wr_private,
2851 agent_list);
2852
2853 if (time_after(mad_send_wr->timeout, jiffies)) {
2854 delay = mad_send_wr->timeout - jiffies;
2855 if ((long)delay <= 0)
2856 delay = 1;
2857 queue_delayed_work(mad_agent_priv->qp_info->
2858 port_priv->wq,
2859 &mad_agent_priv->timed_work, delay);
2860 break;
2861 }
2862
dbf9227b 2863 list_del(&mad_send_wr->agent_list);
29bb33dd
HR
2864 if (mad_send_wr->status == IB_WC_SUCCESS &&
2865 !retry_send(mad_send_wr))
f75b7a52
HR
2866 continue;
2867
1da177e4
LT
2868 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2869
03b61ad2
HR
2870 if (mad_send_wr->status == IB_WC_SUCCESS)
2871 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2872 else
2873 mad_send_wc.status = mad_send_wr->status;
34816ad9 2874 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1da177e4
LT
2875 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2876 &mad_send_wc);
2877
1da177e4
LT
2878 atomic_dec(&mad_agent_priv->refcount);
2879 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2880 }
2881 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2882}
2883
1da177e4
LT
2884/*
2885 * Allocate receive MADs and post receive WRs for them
2886 */
2887static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2888 struct ib_mad_private *mad)
2889{
2890 unsigned long flags;
2891 int post, ret;
2892 struct ib_mad_private *mad_priv;
2893 struct ib_sge sg_list;
1fec77bf 2894 struct ib_recv_wr recv_wr;
1da177e4
LT
2895 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2896
2897 /* Initialize common scatter list fields */
4be90bc6 2898 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
1da177e4
LT
2899
2900 /* Initialize common receive WR fields */
2901 recv_wr.next = NULL;
2902 recv_wr.sg_list = &sg_list;
2903 recv_wr.num_sge = 1;
2904
2905 do {
2906 /* Allocate and map receive buffer */
2907 if (mad) {
2908 mad_priv = mad;
2909 mad = NULL;
2910 } else {
c9082e51
IW
2911 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2912 GFP_ATOMIC);
1da177e4 2913 if (!mad_priv) {
1da177e4
LT
2914 ret = -ENOMEM;
2915 break;
2916 }
2917 }
c9082e51 2918 sg_list.length = mad_priv_dma_size(mad_priv);
1527106f
RC
2919 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2920 &mad_priv->grh,
c9082e51 2921 mad_priv_dma_size(mad_priv),
1527106f 2922 DMA_FROM_DEVICE);
2c34e68f
YB
2923 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2924 sg_list.addr))) {
2925 ret = -ENOMEM;
2926 break;
2927 }
1527106f 2928 mad_priv->header.mapping = sg_list.addr;
1da177e4 2929 mad_priv->header.mad_list.mad_queue = recv_queue;
d53e11fd
CH
2930 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2931 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
1da177e4
LT
2932
2933 /* Post receive WR */
2934 spin_lock_irqsave(&recv_queue->lock, flags);
2935 post = (++recv_queue->count < recv_queue->max_active);
2936 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2937 spin_unlock_irqrestore(&recv_queue->lock, flags);
1fec77bf 2938 ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
1da177e4
LT
2939 if (ret) {
2940 spin_lock_irqsave(&recv_queue->lock, flags);
2941 list_del(&mad_priv->header.mad_list.list);
2942 recv_queue->count--;
2943 spin_unlock_irqrestore(&recv_queue->lock, flags);
1527106f
RC
2944 ib_dma_unmap_single(qp_info->port_priv->device,
2945 mad_priv->header.mapping,
c9082e51 2946 mad_priv_dma_size(mad_priv),
1527106f 2947 DMA_FROM_DEVICE);
c9082e51 2948 kfree(mad_priv);
7ef5d4b0
IW
2949 dev_err(&qp_info->port_priv->device->dev,
2950 "ib_post_recv failed: %d\n", ret);
1da177e4
LT
2951 break;
2952 }
2953 } while (post);
2954
2955 return ret;
2956}
2957
2958/*
2959 * Return all the posted receive MADs
2960 */
2961static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2962{
2963 struct ib_mad_private_header *mad_priv_hdr;
2964 struct ib_mad_private *recv;
2965 struct ib_mad_list_head *mad_list;
2966
fac70d51
EC
2967 if (!qp_info->qp)
2968 return;
2969
1da177e4
LT
2970 while (!list_empty(&qp_info->recv_queue.list)) {
2971
2972 mad_list = list_entry(qp_info->recv_queue.list.next,
2973 struct ib_mad_list_head, list);
2974 mad_priv_hdr = container_of(mad_list,
2975 struct ib_mad_private_header,
2976 mad_list);
2977 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2978 header);
2979
2980 /* Remove from posted receive MAD list */
2981 list_del(&mad_list->list);
2982
1527106f
RC
2983 ib_dma_unmap_single(qp_info->port_priv->device,
2984 recv->header.mapping,
c9082e51 2985 mad_priv_dma_size(recv),
1527106f 2986 DMA_FROM_DEVICE);
c9082e51 2987 kfree(recv);
1da177e4
LT
2988 }
2989
2990 qp_info->recv_queue.count = 0;
2991}
2992
2993/*
2994 * Start the port
2995 */
2996static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2997{
2998 int ret, i;
2999 struct ib_qp_attr *attr;
3000 struct ib_qp *qp;
ef5ed416 3001 u16 pkey_index;
1da177e4
LT
3002
3003 attr = kmalloc(sizeof *attr, GFP_KERNEL);
27162432 3004 if (!attr)
1da177e4 3005 return -ENOMEM;
1da177e4 3006
ef5ed416
JM
3007 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
3008 IB_DEFAULT_PKEY_FULL, &pkey_index);
3009 if (ret)
3010 pkey_index = 0;
3011
1da177e4
LT
3012 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3013 qp = port_priv->qp_info[i].qp;
fac70d51
EC
3014 if (!qp)
3015 continue;
3016
1da177e4
LT
3017 /*
3018 * PKey index for QP1 is irrelevant but
3019 * one is needed for the Reset to Init transition
3020 */
3021 attr->qp_state = IB_QPS_INIT;
ef5ed416 3022 attr->pkey_index = pkey_index;
1da177e4
LT
3023 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
3024 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
3025 IB_QP_PKEY_INDEX | IB_QP_QKEY);
3026 if (ret) {
7ef5d4b0
IW
3027 dev_err(&port_priv->device->dev,
3028 "Couldn't change QP%d state to INIT: %d\n",
3029 i, ret);
1da177e4
LT
3030 goto out;
3031 }
3032
3033 attr->qp_state = IB_QPS_RTR;
3034 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3035 if (ret) {
7ef5d4b0
IW
3036 dev_err(&port_priv->device->dev,
3037 "Couldn't change QP%d state to RTR: %d\n",
3038 i, ret);
1da177e4
LT
3039 goto out;
3040 }
3041
3042 attr->qp_state = IB_QPS_RTS;
3043 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3044 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3045 if (ret) {
7ef5d4b0
IW
3046 dev_err(&port_priv->device->dev,
3047 "Couldn't change QP%d state to RTS: %d\n",
3048 i, ret);
1da177e4
LT
3049 goto out;
3050 }
3051 }
3052
3053 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3054 if (ret) {
7ef5d4b0
IW
3055 dev_err(&port_priv->device->dev,
3056 "Failed to request completion notification: %d\n",
3057 ret);
1da177e4
LT
3058 goto out;
3059 }
3060
3061 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
fac70d51
EC
3062 if (!port_priv->qp_info[i].qp)
3063 continue;
3064
1da177e4
LT
3065 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3066 if (ret) {
7ef5d4b0
IW
3067 dev_err(&port_priv->device->dev,
3068 "Couldn't post receive WRs\n");
1da177e4
LT
3069 goto out;
3070 }
3071 }
3072out:
3073 kfree(attr);
3074 return ret;
3075}
3076
3077static void qp_event_handler(struct ib_event *event, void *qp_context)
3078{
3079 struct ib_mad_qp_info *qp_info = qp_context;
3080
3081 /* It's worse than that! He's dead, Jim! */
7ef5d4b0
IW
3082 dev_err(&qp_info->port_priv->device->dev,
3083 "Fatal error (%d) on MAD QP (%d)\n",
1da177e4
LT
3084 event->event, qp_info->qp->qp_num);
3085}
3086
3087static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3088 struct ib_mad_queue *mad_queue)
3089{
3090 mad_queue->qp_info = qp_info;
3091 mad_queue->count = 0;
3092 spin_lock_init(&mad_queue->lock);
3093 INIT_LIST_HEAD(&mad_queue->list);
3094}
3095
3096static void init_mad_qp(struct ib_mad_port_private *port_priv,
3097 struct ib_mad_qp_info *qp_info)
3098{
3099 qp_info->port_priv = port_priv;
3100 init_mad_queue(qp_info, &qp_info->send_queue);
3101 init_mad_queue(qp_info, &qp_info->recv_queue);
3102 INIT_LIST_HEAD(&qp_info->overflow_list);
3103 spin_lock_init(&qp_info->snoop_lock);
3104 qp_info->snoop_table = NULL;
3105 qp_info->snoop_table_size = 0;
3106 atomic_set(&qp_info->snoop_count, 0);
3107}
3108
3109static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3110 enum ib_qp_type qp_type)
3111{
3112 struct ib_qp_init_attr qp_init_attr;
3113 int ret;
3114
3115 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3116 qp_init_attr.send_cq = qp_info->port_priv->cq;
3117 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3118 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
b76aabc3
HR
3119 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3120 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
1da177e4
LT
3121 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3122 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3123 qp_init_attr.qp_type = qp_type;
3124 qp_init_attr.port_num = qp_info->port_priv->port_num;
3125 qp_init_attr.qp_context = qp_info;
3126 qp_init_attr.event_handler = qp_event_handler;
3127 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3128 if (IS_ERR(qp_info->qp)) {
7ef5d4b0
IW
3129 dev_err(&qp_info->port_priv->device->dev,
3130 "Couldn't create ib_mad QP%d\n",
3131 get_spl_qp_index(qp_type));
1da177e4
LT
3132 ret = PTR_ERR(qp_info->qp);
3133 goto error;
3134 }
3135 /* Use minimum queue sizes unless the CQ is resized */
b76aabc3
HR
3136 qp_info->send_queue.max_active = mad_sendq_size;
3137 qp_info->recv_queue.max_active = mad_recvq_size;
1da177e4
LT
3138 return 0;
3139
3140error:
3141 return ret;
3142}
3143
3144static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3145{
fac70d51
EC
3146 if (!qp_info->qp)
3147 return;
3148
1da177e4 3149 ib_destroy_qp(qp_info->qp);
6044ec88 3150 kfree(qp_info->snoop_table);
1da177e4
LT
3151}
3152
3153/*
3154 * Open the port
3155 * Create the QP, PD, MR, and CQ if needed
3156 */
3157static int ib_mad_port_open(struct ib_device *device,
3158 int port_num)
3159{
3160 int ret, cq_size;
3161 struct ib_mad_port_private *port_priv;
3162 unsigned long flags;
3163 char name[sizeof "ib_mad123"];
fac70d51 3164 int has_smi;
1da177e4 3165
337877a4
IW
3166 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3167 return -EFAULT;
3168
548ead17
IW
3169 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3170 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3171 return -EFAULT;
3172
1da177e4 3173 /* Create new device info */
de6eb66b 3174 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
27162432 3175 if (!port_priv)
1da177e4 3176 return -ENOMEM;
de6eb66b 3177
1da177e4
LT
3178 port_priv->device = device;
3179 port_priv->port_num = port_num;
3180 spin_lock_init(&port_priv->reg_lock);
1da177e4
LT
3181 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3182 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3183
fac70d51 3184 cq_size = mad_sendq_size + mad_recvq_size;
29541e3a 3185 has_smi = rdma_cap_ib_smi(device, port_num);
fac70d51
EC
3186 if (has_smi)
3187 cq_size *= 2;
3188
d53e11fd 3189 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
f794809a 3190 IB_POLL_UNBOUND_WORKQUEUE);
1da177e4 3191 if (IS_ERR(port_priv->cq)) {
7ef5d4b0 3192 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
1da177e4
LT
3193 ret = PTR_ERR(port_priv->cq);
3194 goto error3;
3195 }
3196
ed082d36 3197 port_priv->pd = ib_alloc_pd(device, 0);
1da177e4 3198 if (IS_ERR(port_priv->pd)) {
7ef5d4b0 3199 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
1da177e4
LT
3200 ret = PTR_ERR(port_priv->pd);
3201 goto error4;
3202 }
3203
fac70d51
EC
3204 if (has_smi) {
3205 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3206 if (ret)
3207 goto error6;
3208 }
1da177e4
LT
3209 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3210 if (ret)
3211 goto error7;
3212
3213 snprintf(name, sizeof name, "ib_mad%d", port_num);
1c99e299 3214 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
1da177e4
LT
3215 if (!port_priv->wq) {
3216 ret = -ENOMEM;
3217 goto error8;
3218 }
1da177e4 3219
dc05980d
MT
3220 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3221 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3222 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3223
1da177e4
LT
3224 ret = ib_mad_port_start(port_priv);
3225 if (ret) {
7ef5d4b0 3226 dev_err(&device->dev, "Couldn't start port\n");
1da177e4
LT
3227 goto error9;
3228 }
3229
1da177e4
LT
3230 return 0;
3231
3232error9:
dc05980d
MT
3233 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3234 list_del_init(&port_priv->port_list);
3235 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3236
1da177e4
LT
3237 destroy_workqueue(port_priv->wq);
3238error8:
3239 destroy_mad_qp(&port_priv->qp_info[1]);
3240error7:
3241 destroy_mad_qp(&port_priv->qp_info[0]);
3242error6:
1da177e4
LT
3243 ib_dealloc_pd(port_priv->pd);
3244error4:
d53e11fd 3245 ib_free_cq(port_priv->cq);
1da177e4
LT
3246 cleanup_recv_queue(&port_priv->qp_info[1]);
3247 cleanup_recv_queue(&port_priv->qp_info[0]);
3248error3:
3249 kfree(port_priv);
3250
3251 return ret;
3252}
3253
3254/*
3255 * Close the port
3256 * If there are no classes using the port, free the port
3257 * resources (CQ, MR, PD, QP) and remove the port's info structure
3258 */
3259static int ib_mad_port_close(struct ib_device *device, int port_num)
3260{
3261 struct ib_mad_port_private *port_priv;
3262 unsigned long flags;
3263
3264 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3265 port_priv = __ib_get_mad_port(device, port_num);
3266 if (port_priv == NULL) {
3267 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
7ef5d4b0 3268 dev_err(&device->dev, "Port %d not found\n", port_num);
1da177e4
LT
3269 return -ENODEV;
3270 }
dc05980d 3271 list_del_init(&port_priv->port_list);
1da177e4
LT
3272 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3273
1da177e4
LT
3274 destroy_workqueue(port_priv->wq);
3275 destroy_mad_qp(&port_priv->qp_info[1]);
3276 destroy_mad_qp(&port_priv->qp_info[0]);
1da177e4 3277 ib_dealloc_pd(port_priv->pd);
d53e11fd 3278 ib_free_cq(port_priv->cq);
1da177e4
LT
3279 cleanup_recv_queue(&port_priv->qp_info[1]);
3280 cleanup_recv_queue(&port_priv->qp_info[0]);
3281 /* XXX: Handle deallocation of MAD registration tables */
3282
3283 kfree(port_priv);
3284
3285 return 0;
3286}
3287
3288static void ib_mad_init_device(struct ib_device *device)
3289{
4139032b 3290 int start, i;
1da177e4 3291
4139032b 3292 start = rdma_start_port(device);
4ab6fb7e 3293
4139032b 3294 for (i = start; i <= rdma_end_port(device); i++) {
c757dea8 3295 if (!rdma_cap_ib_mad(device, i))
827f2a8b
MW
3296 continue;
3297
4ab6fb7e 3298 if (ib_mad_port_open(device, i)) {
7ef5d4b0 3299 dev_err(&device->dev, "Couldn't open port %d\n", i);
4ab6fb7e 3300 goto error;
1da177e4 3301 }
4ab6fb7e 3302 if (ib_agent_port_open(device, i)) {
7ef5d4b0
IW
3303 dev_err(&device->dev,
3304 "Couldn't open port %d for agents\n", i);
4ab6fb7e 3305 goto error_agent;
1da177e4
LT
3306 }
3307 }
f68bcc2d 3308 return;
1da177e4 3309
4ab6fb7e
RD
3310error_agent:
3311 if (ib_mad_port_close(device, i))
7ef5d4b0 3312 dev_err(&device->dev, "Couldn't close port %d\n", i);
4ab6fb7e
RD
3313
3314error:
827f2a8b 3315 while (--i >= start) {
c757dea8 3316 if (!rdma_cap_ib_mad(device, i))
827f2a8b 3317 continue;
4ab6fb7e 3318
4ab6fb7e 3319 if (ib_agent_port_close(device, i))
7ef5d4b0
IW
3320 dev_err(&device->dev,
3321 "Couldn't close port %d for agents\n", i);
4ab6fb7e 3322 if (ib_mad_port_close(device, i))
7ef5d4b0 3323 dev_err(&device->dev, "Couldn't close port %d\n", i);
1da177e4 3324 }
1da177e4
LT
3325}
3326
7c1eb45a 3327static void ib_mad_remove_device(struct ib_device *device, void *client_data)
1da177e4 3328{
ea1075ed 3329 unsigned int i;
827f2a8b 3330
ea1075ed 3331 rdma_for_each_port (device, i) {
c757dea8 3332 if (!rdma_cap_ib_mad(device, i))
827f2a8b
MW
3333 continue;
3334
3335 if (ib_agent_port_close(device, i))
7ef5d4b0 3336 dev_err(&device->dev,
827f2a8b
MW
3337 "Couldn't close port %d for agents\n", i);
3338 if (ib_mad_port_close(device, i))
3339 dev_err(&device->dev, "Couldn't close port %d\n", i);
1da177e4
LT
3340 }
3341}
3342
3343static struct ib_client mad_client = {
3344 .name = "mad",
3345 .add = ib_mad_init_device,
3346 .remove = ib_mad_remove_device
3347};
3348
4c2cb422 3349int ib_mad_init(void)
1da177e4 3350{
b76aabc3
HR
3351 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3352 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3353
3354 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3355 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3356
1da177e4
LT
3357 INIT_LIST_HEAD(&ib_mad_port_list);
3358
9a41e38a 3359 /* Client ID 0 is used for snoop-only clients */
3360 idr_alloc(&ib_mad_clients, NULL, 0, 0, GFP_KERNEL);
3361
1da177e4 3362 if (ib_register_client(&mad_client)) {
7ef5d4b0 3363 pr_err("Couldn't register ib_mad client\n");
c9082e51 3364 return -EINVAL;
1da177e4
LT
3365 }
3366
3367 return 0;
1da177e4
LT
3368}
3369
4c2cb422 3370void ib_mad_cleanup(void)
1da177e4
LT
3371{
3372 ib_unregister_client(&mad_client);
1da177e4 3373}