Merge tag 'mfd-fixes-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd
[linux-2.6-block.git] / drivers / infiniband / core / mad.c
CommitLineData
1da177e4 1/*
de493d47 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
fa619a77
HR
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
b76aabc3 5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
8e4349d1 6 * Copyright (c) 2014 Intel Corporation. All rights reserved.
1da177e4
LT
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
1da177e4 36 */
7ef5d4b0
IW
37
38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
1da177e4 40#include <linux/dma-mapping.h>
5a0e3ad6 41#include <linux/slab.h>
e4dd23d7 42#include <linux/module.h>
9874e746 43#include <rdma/ib_cache.h>
1da177e4
LT
44
45#include "mad_priv.h"
fa619a77 46#include "mad_rmpp.h"
1da177e4 47#include "smi.h"
8e4349d1 48#include "opa_smi.h"
1da177e4 49#include "agent.h"
4c2cb422 50#include "core_priv.h"
1da177e4 51
16933955
RD
52static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
53static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
b76aabc3
HR
54
55module_param_named(send_queue_size, mad_sendq_size, int, 0444);
56MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
57module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
58MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
59
1da177e4
LT
60static struct list_head ib_mad_port_list;
61static u32 ib_mad_client_id = 0;
62
63/* Port list lock */
6276e08a 64static DEFINE_SPINLOCK(ib_mad_port_list_lock);
1da177e4
LT
65
66/* Forward declarations */
67static int method_in_use(struct ib_mad_mgmt_method_table **method,
68 struct ib_mad_reg_req *mad_reg_req);
69static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
70static struct ib_mad_agent_private *find_mad_agent(
71 struct ib_mad_port_private *port_priv,
d94bd266 72 const struct ib_mad_hdr *mad);
1da177e4
LT
73static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
74 struct ib_mad_private *mad);
75static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
c4028958
DH
76static void timeout_sends(struct work_struct *work);
77static void local_completions(struct work_struct *work);
1da177e4
LT
78static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
79 struct ib_mad_agent_private *agent_priv,
80 u8 mgmt_class);
81static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
82 struct ib_mad_agent_private *agent_priv);
d53e11fd
CH
83static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
84 struct ib_wc *wc);
85static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
1da177e4
LT
86
87/*
88 * Returns a ib_mad_port_private structure or NULL for a device/port
89 * Assumes ib_mad_port_list_lock is being held
90 */
91static inline struct ib_mad_port_private *
92__ib_get_mad_port(struct ib_device *device, int port_num)
93{
94 struct ib_mad_port_private *entry;
95
96 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
97 if (entry->device == device && entry->port_num == port_num)
98 return entry;
99 }
100 return NULL;
101}
102
103/*
104 * Wrapper function to return a ib_mad_port_private structure or NULL
105 * for a device/port
106 */
107static inline struct ib_mad_port_private *
108ib_get_mad_port(struct ib_device *device, int port_num)
109{
110 struct ib_mad_port_private *entry;
111 unsigned long flags;
112
113 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
114 entry = __ib_get_mad_port(device, port_num);
115 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
116
117 return entry;
118}
119
120static inline u8 convert_mgmt_class(u8 mgmt_class)
121{
122 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
123 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
124 0 : mgmt_class;
125}
126
127static int get_spl_qp_index(enum ib_qp_type qp_type)
128{
129 switch (qp_type)
130 {
131 case IB_QPT_SMI:
132 return 0;
133 case IB_QPT_GSI:
134 return 1;
135 default:
136 return -1;
137 }
138}
139
140static int vendor_class_index(u8 mgmt_class)
141{
142 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
143}
144
145static int is_vendor_class(u8 mgmt_class)
146{
147 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
148 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
149 return 0;
150 return 1;
151}
152
153static int is_vendor_oui(char *oui)
154{
155 if (oui[0] || oui[1] || oui[2])
156 return 1;
157 return 0;
158}
159
160static int is_vendor_method_in_use(
161 struct ib_mad_mgmt_vendor_class *vendor_class,
162 struct ib_mad_reg_req *mad_reg_req)
163{
164 struct ib_mad_mgmt_method_table *method;
165 int i;
166
167 for (i = 0; i < MAX_MGMT_OUI; i++) {
168 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
169 method = vendor_class->method_table[i];
170 if (method) {
171 if (method_in_use(&method, mad_reg_req))
172 return 1;
173 else
174 break;
175 }
176 }
177 }
178 return 0;
179}
180
96909308 181int ib_response_mad(const struct ib_mad_hdr *hdr)
2527e681 182{
96909308
IW
183 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
184 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
185 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
186 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
2527e681
SH
187}
188EXPORT_SYMBOL(ib_response_mad);
189
1da177e4
LT
190/*
191 * ib_register_mad_agent - Register to send/receive MADs
192 */
193struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
194 u8 port_num,
195 enum ib_qp_type qp_type,
196 struct ib_mad_reg_req *mad_reg_req,
197 u8 rmpp_version,
198 ib_mad_send_handler send_handler,
199 ib_mad_recv_handler recv_handler,
0f29b46d
IW
200 void *context,
201 u32 registration_flags)
1da177e4
LT
202{
203 struct ib_mad_port_private *port_priv;
204 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
205 struct ib_mad_agent_private *mad_agent_priv;
206 struct ib_mad_reg_req *reg_req = NULL;
207 struct ib_mad_mgmt_class_table *class;
208 struct ib_mad_mgmt_vendor_class_table *vendor;
209 struct ib_mad_mgmt_vendor_class *vendor_class;
210 struct ib_mad_mgmt_method_table *method;
211 int ret2, qpn;
212 unsigned long flags;
213 u8 mgmt_class, vclass;
214
215 /* Validate parameters */
216 qpn = get_spl_qp_index(qp_type);
9ad13a42
IW
217 if (qpn == -1) {
218 dev_notice(&device->dev,
219 "ib_register_mad_agent: invalid QP Type %d\n",
220 qp_type);
1da177e4 221 goto error1;
9ad13a42 222 }
1da177e4 223
9ad13a42
IW
224 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
225 dev_notice(&device->dev,
226 "ib_register_mad_agent: invalid RMPP Version %u\n",
227 rmpp_version);
fa619a77 228 goto error1;
9ad13a42 229 }
1da177e4
LT
230
231 /* Validate MAD registration request if supplied */
232 if (mad_reg_req) {
9ad13a42
IW
233 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
234 dev_notice(&device->dev,
235 "ib_register_mad_agent: invalid Class Version %u\n",
236 mad_reg_req->mgmt_class_version);
1da177e4 237 goto error1;
9ad13a42
IW
238 }
239 if (!recv_handler) {
240 dev_notice(&device->dev,
241 "ib_register_mad_agent: no recv_handler\n");
1da177e4 242 goto error1;
9ad13a42 243 }
1da177e4
LT
244 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
245 /*
246 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
247 * one in this range currently allowed
248 */
249 if (mad_reg_req->mgmt_class !=
9ad13a42
IW
250 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
251 dev_notice(&device->dev,
252 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
253 mad_reg_req->mgmt_class);
1da177e4 254 goto error1;
9ad13a42 255 }
1da177e4
LT
256 } else if (mad_reg_req->mgmt_class == 0) {
257 /*
258 * Class 0 is reserved in IBA and is used for
259 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
260 */
9ad13a42
IW
261 dev_notice(&device->dev,
262 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
1da177e4
LT
263 goto error1;
264 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
265 /*
266 * If class is in "new" vendor range,
267 * ensure supplied OUI is not zero
268 */
9ad13a42
IW
269 if (!is_vendor_oui(mad_reg_req->oui)) {
270 dev_notice(&device->dev,
271 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
272 mad_reg_req->mgmt_class);
1da177e4 273 goto error1;
9ad13a42 274 }
1da177e4 275 }
618a3c03 276 /* Make sure class supplied is consistent with RMPP */
64cb9c6a 277 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
9ad13a42
IW
278 if (rmpp_version) {
279 dev_notice(&device->dev,
280 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
281 mad_reg_req->mgmt_class);
618a3c03 282 goto error1;
9ad13a42 283 }
618a3c03 284 }
1471cb6c 285
1da177e4
LT
286 /* Make sure class supplied is consistent with QP type */
287 if (qp_type == IB_QPT_SMI) {
288 if ((mad_reg_req->mgmt_class !=
289 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
290 (mad_reg_req->mgmt_class !=
9ad13a42
IW
291 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
292 dev_notice(&device->dev,
293 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
294 mad_reg_req->mgmt_class);
1da177e4 295 goto error1;
9ad13a42 296 }
1da177e4
LT
297 } else {
298 if ((mad_reg_req->mgmt_class ==
299 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
300 (mad_reg_req->mgmt_class ==
9ad13a42
IW
301 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
302 dev_notice(&device->dev,
303 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
304 mad_reg_req->mgmt_class);
1da177e4 305 goto error1;
9ad13a42 306 }
1da177e4
LT
307 }
308 } else {
309 /* No registration request supplied */
310 if (!send_handler)
311 goto error1;
1471cb6c
IW
312 if (registration_flags & IB_MAD_USER_RMPP)
313 goto error1;
1da177e4
LT
314 }
315
316 /* Validate device and port */
317 port_priv = ib_get_mad_port(device, port_num);
318 if (!port_priv) {
9ad13a42 319 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
1da177e4
LT
320 ret = ERR_PTR(-ENODEV);
321 goto error1;
322 }
323
c8367c4c
IW
324 /* Verify the QP requested is supported. For example, Ethernet devices
325 * will not have QP0 */
326 if (!port_priv->qp_info[qpn].qp) {
9ad13a42
IW
327 dev_notice(&device->dev,
328 "ib_register_mad_agent: QP %d not supported\n", qpn);
c8367c4c
IW
329 ret = ERR_PTR(-EPROTONOSUPPORT);
330 goto error1;
331 }
332
1da177e4 333 /* Allocate structures */
de6eb66b 334 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
1da177e4
LT
335 if (!mad_agent_priv) {
336 ret = ERR_PTR(-ENOMEM);
337 goto error1;
338 }
b82cab6b 339
1da177e4 340 if (mad_reg_req) {
9893e742 341 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
1da177e4
LT
342 if (!reg_req) {
343 ret = ERR_PTR(-ENOMEM);
b82cab6b 344 goto error3;
1da177e4 345 }
1da177e4
LT
346 }
347
348 /* Now, fill in the various structures */
1da177e4
LT
349 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
350 mad_agent_priv->reg_req = reg_req;
fa619a77 351 mad_agent_priv->agent.rmpp_version = rmpp_version;
1da177e4
LT
352 mad_agent_priv->agent.device = device;
353 mad_agent_priv->agent.recv_handler = recv_handler;
354 mad_agent_priv->agent.send_handler = send_handler;
355 mad_agent_priv->agent.context = context;
356 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
357 mad_agent_priv->agent.port_num = port_num;
0f29b46d 358 mad_agent_priv->agent.flags = registration_flags;
d9620a4c
RC
359 spin_lock_init(&mad_agent_priv->lock);
360 INIT_LIST_HEAD(&mad_agent_priv->send_list);
361 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
362 INIT_LIST_HEAD(&mad_agent_priv->done_list);
363 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
364 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
365 INIT_LIST_HEAD(&mad_agent_priv->local_list);
366 INIT_WORK(&mad_agent_priv->local_work, local_completions);
367 atomic_set(&mad_agent_priv->refcount, 1);
368 init_completion(&mad_agent_priv->comp);
1da177e4
LT
369
370 spin_lock_irqsave(&port_priv->reg_lock, flags);
371 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
372
373 /*
374 * Make sure MAD registration (if supplied)
375 * is non overlapping with any existing ones
376 */
377 if (mad_reg_req) {
378 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
379 if (!is_vendor_class(mgmt_class)) {
380 class = port_priv->version[mad_reg_req->
381 mgmt_class_version].class;
382 if (class) {
383 method = class->method_table[mgmt_class];
384 if (method) {
385 if (method_in_use(&method,
386 mad_reg_req))
b82cab6b 387 goto error4;
1da177e4
LT
388 }
389 }
390 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
391 mgmt_class);
392 } else {
393 /* "New" vendor class range */
394 vendor = port_priv->version[mad_reg_req->
395 mgmt_class_version].vendor;
396 if (vendor) {
397 vclass = vendor_class_index(mgmt_class);
398 vendor_class = vendor->vendor_class[vclass];
399 if (vendor_class) {
400 if (is_vendor_method_in_use(
401 vendor_class,
402 mad_reg_req))
b82cab6b 403 goto error4;
1da177e4
LT
404 }
405 }
406 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
407 }
408 if (ret2) {
409 ret = ERR_PTR(ret2);
b82cab6b 410 goto error4;
1da177e4
LT
411 }
412 }
413
414 /* Add mad agent into port's agent list */
415 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
416 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
417
1da177e4
LT
418 return &mad_agent_priv->agent;
419
b82cab6b 420error4:
1da177e4
LT
421 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
422 kfree(reg_req);
b82cab6b 423error3:
2012a116 424 kfree(mad_agent_priv);
1da177e4
LT
425error1:
426 return ret;
427}
428EXPORT_SYMBOL(ib_register_mad_agent);
429
430static inline int is_snooping_sends(int mad_snoop_flags)
431{
432 return (mad_snoop_flags &
433 (/*IB_MAD_SNOOP_POSTED_SENDS |
434 IB_MAD_SNOOP_RMPP_SENDS |*/
435 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
436 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
437}
438
439static inline int is_snooping_recvs(int mad_snoop_flags)
440{
441 return (mad_snoop_flags &
442 (IB_MAD_SNOOP_RECVS /*|
443 IB_MAD_SNOOP_RMPP_RECVS*/));
444}
445
446static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
447 struct ib_mad_snoop_private *mad_snoop_priv)
448{
449 struct ib_mad_snoop_private **new_snoop_table;
450 unsigned long flags;
451 int i;
452
453 spin_lock_irqsave(&qp_info->snoop_lock, flags);
454 /* Check for empty slot in array. */
455 for (i = 0; i < qp_info->snoop_table_size; i++)
456 if (!qp_info->snoop_table[i])
457 break;
458
459 if (i == qp_info->snoop_table_size) {
460 /* Grow table. */
52805174
RD
461 new_snoop_table = krealloc(qp_info->snoop_table,
462 sizeof mad_snoop_priv *
463 (qp_info->snoop_table_size + 1),
464 GFP_ATOMIC);
1da177e4
LT
465 if (!new_snoop_table) {
466 i = -ENOMEM;
467 goto out;
468 }
52805174 469
1da177e4
LT
470 qp_info->snoop_table = new_snoop_table;
471 qp_info->snoop_table_size++;
472 }
473 qp_info->snoop_table[i] = mad_snoop_priv;
474 atomic_inc(&qp_info->snoop_count);
475out:
476 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
477 return i;
478}
479
480struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
481 u8 port_num,
482 enum ib_qp_type qp_type,
483 int mad_snoop_flags,
484 ib_mad_snoop_handler snoop_handler,
485 ib_mad_recv_handler recv_handler,
486 void *context)
487{
488 struct ib_mad_port_private *port_priv;
489 struct ib_mad_agent *ret;
490 struct ib_mad_snoop_private *mad_snoop_priv;
491 int qpn;
492
493 /* Validate parameters */
494 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
495 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
496 ret = ERR_PTR(-EINVAL);
497 goto error1;
498 }
499 qpn = get_spl_qp_index(qp_type);
500 if (qpn == -1) {
501 ret = ERR_PTR(-EINVAL);
502 goto error1;
503 }
504 port_priv = ib_get_mad_port(device, port_num);
505 if (!port_priv) {
506 ret = ERR_PTR(-ENODEV);
507 goto error1;
508 }
509 /* Allocate structures */
de6eb66b 510 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
1da177e4
LT
511 if (!mad_snoop_priv) {
512 ret = ERR_PTR(-ENOMEM);
513 goto error1;
514 }
515
516 /* Now, fill in the various structures */
1da177e4
LT
517 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
518 mad_snoop_priv->agent.device = device;
519 mad_snoop_priv->agent.recv_handler = recv_handler;
520 mad_snoop_priv->agent.snoop_handler = snoop_handler;
521 mad_snoop_priv->agent.context = context;
522 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
523 mad_snoop_priv->agent.port_num = port_num;
524 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
1b52fa98 525 init_completion(&mad_snoop_priv->comp);
1da177e4
LT
526 mad_snoop_priv->snoop_index = register_snoop_agent(
527 &port_priv->qp_info[qpn],
528 mad_snoop_priv);
529 if (mad_snoop_priv->snoop_index < 0) {
530 ret = ERR_PTR(mad_snoop_priv->snoop_index);
531 goto error2;
532 }
533
534 atomic_set(&mad_snoop_priv->refcount, 1);
535 return &mad_snoop_priv->agent;
536
537error2:
538 kfree(mad_snoop_priv);
539error1:
540 return ret;
541}
542EXPORT_SYMBOL(ib_register_mad_snoop);
543
1b52fa98
SH
544static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
545{
546 if (atomic_dec_and_test(&mad_agent_priv->refcount))
547 complete(&mad_agent_priv->comp);
548}
549
550static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
551{
552 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
553 complete(&mad_snoop_priv->comp);
554}
555
1da177e4
LT
556static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
557{
558 struct ib_mad_port_private *port_priv;
559 unsigned long flags;
560
561 /* Note that we could still be handling received MADs */
562
563 /*
564 * Canceling all sends results in dropping received response
565 * MADs, preventing us from queuing additional work
566 */
567 cancel_mads(mad_agent_priv);
1da177e4 568 port_priv = mad_agent_priv->qp_info->port_priv;
1da177e4 569 cancel_delayed_work(&mad_agent_priv->timed_work);
1da177e4
LT
570
571 spin_lock_irqsave(&port_priv->reg_lock, flags);
572 remove_mad_reg_req(mad_agent_priv);
573 list_del(&mad_agent_priv->agent_list);
574 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
575
b82cab6b 576 flush_workqueue(port_priv->wq);
fa619a77 577 ib_cancel_rmpp_recvs(mad_agent_priv);
1da177e4 578
1b52fa98
SH
579 deref_mad_agent(mad_agent_priv);
580 wait_for_completion(&mad_agent_priv->comp);
1da177e4 581
6044ec88 582 kfree(mad_agent_priv->reg_req);
1da177e4
LT
583 kfree(mad_agent_priv);
584}
585
586static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
587{
588 struct ib_mad_qp_info *qp_info;
589 unsigned long flags;
590
591 qp_info = mad_snoop_priv->qp_info;
592 spin_lock_irqsave(&qp_info->snoop_lock, flags);
593 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
594 atomic_dec(&qp_info->snoop_count);
595 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
596
1b52fa98
SH
597 deref_snoop_agent(mad_snoop_priv);
598 wait_for_completion(&mad_snoop_priv->comp);
1da177e4
LT
599
600 kfree(mad_snoop_priv);
601}
602
603/*
604 * ib_unregister_mad_agent - Unregisters a client from using MAD services
605 */
606int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
607{
608 struct ib_mad_agent_private *mad_agent_priv;
609 struct ib_mad_snoop_private *mad_snoop_priv;
610
611 /* If the TID is zero, the agent can only snoop. */
612 if (mad_agent->hi_tid) {
613 mad_agent_priv = container_of(mad_agent,
614 struct ib_mad_agent_private,
615 agent);
616 unregister_mad_agent(mad_agent_priv);
617 } else {
618 mad_snoop_priv = container_of(mad_agent,
619 struct ib_mad_snoop_private,
620 agent);
621 unregister_mad_snoop(mad_snoop_priv);
622 }
623 return 0;
624}
625EXPORT_SYMBOL(ib_unregister_mad_agent);
626
627static void dequeue_mad(struct ib_mad_list_head *mad_list)
628{
629 struct ib_mad_queue *mad_queue;
630 unsigned long flags;
631
632 BUG_ON(!mad_list->mad_queue);
633 mad_queue = mad_list->mad_queue;
634 spin_lock_irqsave(&mad_queue->lock, flags);
635 list_del(&mad_list->list);
636 mad_queue->count--;
637 spin_unlock_irqrestore(&mad_queue->lock, flags);
638}
639
640static void snoop_send(struct ib_mad_qp_info *qp_info,
34816ad9 641 struct ib_mad_send_buf *send_buf,
1da177e4
LT
642 struct ib_mad_send_wc *mad_send_wc,
643 int mad_snoop_flags)
644{
645 struct ib_mad_snoop_private *mad_snoop_priv;
646 unsigned long flags;
647 int i;
648
649 spin_lock_irqsave(&qp_info->snoop_lock, flags);
650 for (i = 0; i < qp_info->snoop_table_size; i++) {
651 mad_snoop_priv = qp_info->snoop_table[i];
652 if (!mad_snoop_priv ||
653 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
654 continue;
655
656 atomic_inc(&mad_snoop_priv->refcount);
657 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
658 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
34816ad9 659 send_buf, mad_send_wc);
1b52fa98 660 deref_snoop_agent(mad_snoop_priv);
1da177e4
LT
661 spin_lock_irqsave(&qp_info->snoop_lock, flags);
662 }
663 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
664}
665
666static void snoop_recv(struct ib_mad_qp_info *qp_info,
667 struct ib_mad_recv_wc *mad_recv_wc,
668 int mad_snoop_flags)
669{
670 struct ib_mad_snoop_private *mad_snoop_priv;
671 unsigned long flags;
672 int i;
673
674 spin_lock_irqsave(&qp_info->snoop_lock, flags);
675 for (i = 0; i < qp_info->snoop_table_size; i++) {
676 mad_snoop_priv = qp_info->snoop_table[i];
677 if (!mad_snoop_priv ||
678 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
679 continue;
680
681 atomic_inc(&mad_snoop_priv->refcount);
682 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
ca281265 683 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
1da177e4 684 mad_recv_wc);
1b52fa98 685 deref_snoop_agent(mad_snoop_priv);
1da177e4
LT
686 spin_lock_irqsave(&qp_info->snoop_lock, flags);
687 }
688 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
689}
690
d53e11fd
CH
691static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
692 u16 pkey_index, u8 port_num, struct ib_wc *wc)
1da177e4
LT
693{
694 memset(wc, 0, sizeof *wc);
d53e11fd 695 wc->wr_cqe = cqe;
1da177e4
LT
696 wc->status = IB_WC_SUCCESS;
697 wc->opcode = IB_WC_RECV;
698 wc->pkey_index = pkey_index;
699 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
700 wc->src_qp = IB_QP0;
062dbb69 701 wc->qp = qp;
1da177e4
LT
702 wc->slid = slid;
703 wc->sl = 0;
704 wc->dlid_path_bits = 0;
705 wc->port_num = port_num;
706}
707
c9082e51
IW
708static size_t mad_priv_size(const struct ib_mad_private *mp)
709{
710 return sizeof(struct ib_mad_private) + mp->mad_size;
711}
712
713static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
714{
715 size_t size = sizeof(struct ib_mad_private) + mad_size;
716 struct ib_mad_private *ret = kzalloc(size, flags);
717
718 if (ret)
719 ret->mad_size = mad_size;
720
721 return ret;
722}
723
724static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
725{
726 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
727}
728
729static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
730{
731 return sizeof(struct ib_grh) + mp->mad_size;
732}
733
1da177e4
LT
734/*
735 * Return 0 if SMP is to be sent
736 * Return 1 if SMP was consumed locally (whether or not solicited)
737 * Return < 0 if error
738 */
739static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
34816ad9 740 struct ib_mad_send_wr_private *mad_send_wr)
1da177e4 741{
de493d47 742 int ret = 0;
34816ad9 743 struct ib_smp *smp = mad_send_wr->send_buf.mad;
8e4349d1 744 struct opa_smp *opa_smp = (struct opa_smp *)smp;
1da177e4
LT
745 unsigned long flags;
746 struct ib_mad_local_private *local;
747 struct ib_mad_private *mad_priv;
748 struct ib_mad_port_private *port_priv;
749 struct ib_mad_agent_private *recv_mad_agent = NULL;
750 struct ib_device *device = mad_agent_priv->agent.device;
1bae4dbf 751 u8 port_num;
1da177e4 752 struct ib_wc mad_wc;
e622f2f4 753 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
c9082e51 754 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
4cd7c947 755 u16 out_mad_pkey_index = 0;
8e4349d1
IW
756 u16 drslid;
757 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
758 mad_agent_priv->qp_info->port_priv->port_num);
1da177e4 759
4139032b 760 if (rdma_cap_ib_switch(device) &&
1bae4dbf 761 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
e622f2f4 762 port_num = send_wr->port_num;
1bae4dbf
HR
763 else
764 port_num = mad_agent_priv->agent.port_num;
765
8cf3f04f
RC
766 /*
767 * Directed route handling starts if the initial LID routed part of
768 * a request or the ending LID routed part of a response is empty.
769 * If we are at the start of the LID routed part, don't update the
770 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
771 */
8e4349d1
IW
772 if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
773 u32 opa_drslid;
774
775 if ((opa_get_smp_direction(opa_smp)
776 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
777 OPA_LID_PERMISSIVE &&
4139032b
HR
778 opa_smi_handle_dr_smp_send(opa_smp,
779 rdma_cap_ib_switch(device),
8e4349d1
IW
780 port_num) == IB_SMI_DISCARD) {
781 ret = -EINVAL;
782 dev_err(&device->dev, "OPA Invalid directed route\n");
783 goto out;
784 }
785 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
cd4cd565 786 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
8e4349d1
IW
787 opa_drslid & 0xffff0000) {
788 ret = -EINVAL;
789 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
790 opa_drslid);
791 goto out;
792 }
793 drslid = (u16)(opa_drslid & 0x0000ffff);
de493d47 794
8e4349d1
IW
795 /* Check to post send on QP or process locally */
796 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
797 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
798 goto out;
799 } else {
800 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
801 IB_LID_PERMISSIVE &&
4139032b 802 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
8e4349d1
IW
803 IB_SMI_DISCARD) {
804 ret = -EINVAL;
805 dev_err(&device->dev, "Invalid directed route\n");
806 goto out;
807 }
808 drslid = be16_to_cpu(smp->dr_slid);
809
810 /* Check to post send on QP or process locally */
811 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
812 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
813 goto out;
814 }
1da177e4
LT
815
816 local = kmalloc(sizeof *local, GFP_ATOMIC);
817 if (!local) {
818 ret = -ENOMEM;
7ef5d4b0 819 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
1da177e4
LT
820 goto out;
821 }
822 local->mad_priv = NULL;
823 local->recv_mad_agent = NULL;
c9082e51 824 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
1da177e4
LT
825 if (!mad_priv) {
826 ret = -ENOMEM;
7ef5d4b0 827 dev_err(&device->dev, "No memory for local response MAD\n");
1da177e4
LT
828 kfree(local);
829 goto out;
830 }
831
062dbb69 832 build_smp_wc(mad_agent_priv->agent.qp,
d53e11fd 833 send_wr->wr.wr_cqe, drslid,
e622f2f4
CH
834 send_wr->pkey_index,
835 send_wr->port_num, &mad_wc);
1da177e4 836
8e4349d1
IW
837 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
838 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
839 + mad_send_wr->send_buf.data_len
840 + sizeof(struct ib_grh);
841 }
842
1da177e4
LT
843 /* No GRH for DR SMP */
844 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
4cd7c947
IW
845 (const struct ib_mad_hdr *)smp, mad_size,
846 (struct ib_mad_hdr *)mad_priv->mad,
847 &mad_size, &out_mad_pkey_index);
1da177e4
LT
848 switch (ret)
849 {
850 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
c9082e51 851 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
1da177e4
LT
852 mad_agent_priv->agent.recv_handler) {
853 local->mad_priv = mad_priv;
854 local->recv_mad_agent = mad_agent_priv;
855 /*
856 * Reference MAD agent until receive
857 * side of local completion handled
858 */
859 atomic_inc(&mad_agent_priv->refcount);
860 } else
c9082e51 861 kfree(mad_priv);
1da177e4
LT
862 break;
863 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
c9082e51 864 kfree(mad_priv);
4780c195 865 break;
1da177e4
LT
866 case IB_MAD_RESULT_SUCCESS:
867 /* Treat like an incoming receive MAD */
1da177e4
LT
868 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
869 mad_agent_priv->agent.port_num);
870 if (port_priv) {
c9082e51 871 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
1da177e4 872 recv_mad_agent = find_mad_agent(port_priv,
c9082e51 873 (const struct ib_mad_hdr *)mad_priv->mad);
1da177e4
LT
874 }
875 if (!port_priv || !recv_mad_agent) {
4780c195
RC
876 /*
877 * No receiving agent so drop packet and
878 * generate send completion.
879 */
c9082e51 880 kfree(mad_priv);
4780c195 881 break;
1da177e4
LT
882 }
883 local->mad_priv = mad_priv;
884 local->recv_mad_agent = recv_mad_agent;
885 break;
886 default:
c9082e51 887 kfree(mad_priv);
1da177e4
LT
888 kfree(local);
889 ret = -EINVAL;
890 goto out;
891 }
892
34816ad9 893 local->mad_send_wr = mad_send_wr;
8e4349d1 894 if (opa) {
e622f2f4 895 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
8e4349d1
IW
896 local->return_wc_byte_len = mad_size;
897 }
1da177e4
LT
898 /* Reference MAD agent until send side of local completion handled */
899 atomic_inc(&mad_agent_priv->refcount);
900 /* Queue local completion to local list */
901 spin_lock_irqsave(&mad_agent_priv->lock, flags);
902 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
903 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
904 queue_work(mad_agent_priv->qp_info->port_priv->wq,
b82cab6b 905 &mad_agent_priv->local_work);
1da177e4
LT
906 ret = 1;
907out:
908 return ret;
909}
910
548ead17 911static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
824c8ae7
HR
912{
913 int seg_size, pad;
914
548ead17 915 seg_size = mad_size - hdr_len;
824c8ae7
HR
916 if (data_len && seg_size) {
917 pad = seg_size - data_len % seg_size;
f36e1793 918 return pad == seg_size ? 0 : pad;
824c8ae7 919 } else
f36e1793
JM
920 return seg_size;
921}
922
923static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
924{
925 struct ib_rmpp_segment *s, *t;
926
927 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
928 list_del(&s->list);
929 kfree(s);
930 }
931}
932
933static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
548ead17 934 size_t mad_size, gfp_t gfp_mask)
f36e1793
JM
935{
936 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
937 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
938 struct ib_rmpp_segment *seg = NULL;
939 int left, seg_size, pad;
940
548ead17
IW
941 send_buf->seg_size = mad_size - send_buf->hdr_len;
942 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
f36e1793
JM
943 seg_size = send_buf->seg_size;
944 pad = send_wr->pad;
945
946 /* Allocate data segments. */
947 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
948 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
949 if (!seg) {
7ef5d4b0
IW
950 dev_err(&send_buf->mad_agent->device->dev,
951 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
952 sizeof (*seg) + seg_size, gfp_mask);
f36e1793
JM
953 free_send_rmpp_list(send_wr);
954 return -ENOMEM;
955 }
956 seg->num = ++send_buf->seg_count;
957 list_add_tail(&seg->list, &send_wr->rmpp_list);
958 }
959
960 /* Zero any padding */
961 if (pad)
962 memset(seg->data + seg_size - pad, 0, pad);
963
964 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
965 agent.rmpp_version;
966 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
967 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
968
969 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
970 struct ib_rmpp_segment, list);
971 send_wr->last_ack_seg = send_wr->cur_seg;
972 return 0;
824c8ae7
HR
973}
974
f766c58f 975int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
1471cb6c
IW
976{
977 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
978}
979EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
980
824c8ae7
HR
981struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
982 u32 remote_qpn, u16 pkey_index,
34816ad9 983 int rmpp_active,
824c8ae7 984 int hdr_len, int data_len,
da2dfaa3
IW
985 gfp_t gfp_mask,
986 u8 base_version)
824c8ae7
HR
987{
988 struct ib_mad_agent_private *mad_agent_priv;
34816ad9 989 struct ib_mad_send_wr_private *mad_send_wr;
f36e1793 990 int pad, message_size, ret, size;
824c8ae7 991 void *buf;
548ead17
IW
992 size_t mad_size;
993 bool opa;
824c8ae7 994
34816ad9
SH
995 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
996 agent);
548ead17
IW
997
998 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
999
1000 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1001 mad_size = sizeof(struct opa_mad);
1002 else
1003 mad_size = sizeof(struct ib_mad);
1004
1005 pad = get_pad_size(hdr_len, data_len, mad_size);
f36e1793 1006 message_size = hdr_len + data_len + pad;
824c8ae7 1007
1471cb6c 1008 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
548ead17 1009 if (!rmpp_active && message_size > mad_size)
1471cb6c
IW
1010 return ERR_PTR(-EINVAL);
1011 } else
548ead17 1012 if (rmpp_active || message_size > mad_size)
1471cb6c 1013 return ERR_PTR(-EINVAL);
fa619a77 1014
548ead17 1015 size = rmpp_active ? hdr_len : mad_size;
f36e1793 1016 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
824c8ae7
HR
1017 if (!buf)
1018 return ERR_PTR(-ENOMEM);
34816ad9 1019
f36e1793
JM
1020 mad_send_wr = buf + size;
1021 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
34816ad9 1022 mad_send_wr->send_buf.mad = buf;
f36e1793
JM
1023 mad_send_wr->send_buf.hdr_len = hdr_len;
1024 mad_send_wr->send_buf.data_len = data_len;
1025 mad_send_wr->pad = pad;
34816ad9
SH
1026
1027 mad_send_wr->mad_agent_priv = mad_agent_priv;
f36e1793 1028 mad_send_wr->sg_list[0].length = hdr_len;
4be90bc6 1029 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
548ead17
IW
1030
1031 /* OPA MADs don't have to be the full 2048 bytes */
1032 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1033 data_len < mad_size - hdr_len)
1034 mad_send_wr->sg_list[1].length = data_len;
1035 else
1036 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1037
4be90bc6 1038 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
34816ad9 1039
d53e11fd
CH
1040 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1041
1042 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
e622f2f4
CH
1043 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1044 mad_send_wr->send_wr.wr.num_sge = 2;
1045 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1046 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1047 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1048 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1049 mad_send_wr->send_wr.pkey_index = pkey_index;
fa619a77
HR
1050
1051 if (rmpp_active) {
548ead17 1052 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
f36e1793
JM
1053 if (ret) {
1054 kfree(buf);
1055 return ERR_PTR(ret);
1056 }
fa619a77
HR
1057 }
1058
34816ad9 1059 mad_send_wr->send_buf.mad_agent = mad_agent;
824c8ae7 1060 atomic_inc(&mad_agent_priv->refcount);
34816ad9 1061 return &mad_send_wr->send_buf;
824c8ae7
HR
1062}
1063EXPORT_SYMBOL(ib_create_send_mad);
1064
618a3c03
HR
1065int ib_get_mad_data_offset(u8 mgmt_class)
1066{
1067 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1068 return IB_MGMT_SA_HDR;
1069 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1070 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1071 (mgmt_class == IB_MGMT_CLASS_BIS))
1072 return IB_MGMT_DEVICE_HDR;
1073 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1074 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1075 return IB_MGMT_VENDOR_HDR;
1076 else
1077 return IB_MGMT_MAD_HDR;
1078}
1079EXPORT_SYMBOL(ib_get_mad_data_offset);
1080
1081int ib_is_mad_class_rmpp(u8 mgmt_class)
1082{
1083 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1084 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1085 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1086 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1087 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1088 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1089 return 1;
1090 return 0;
1091}
1092EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1093
f36e1793
JM
1094void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1095{
1096 struct ib_mad_send_wr_private *mad_send_wr;
1097 struct list_head *list;
1098
1099 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1100 send_buf);
1101 list = &mad_send_wr->cur_seg->list;
1102
1103 if (mad_send_wr->cur_seg->num < seg_num) {
1104 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1105 if (mad_send_wr->cur_seg->num == seg_num)
1106 break;
1107 } else if (mad_send_wr->cur_seg->num > seg_num) {
1108 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1109 if (mad_send_wr->cur_seg->num == seg_num)
1110 break;
1111 }
1112 return mad_send_wr->cur_seg->data;
1113}
1114EXPORT_SYMBOL(ib_get_rmpp_segment);
1115
1116static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1117{
1118 if (mad_send_wr->send_buf.seg_count)
1119 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1120 mad_send_wr->seg_num);
1121 else
1122 return mad_send_wr->send_buf.mad +
1123 mad_send_wr->send_buf.hdr_len;
1124}
1125
824c8ae7
HR
1126void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1127{
1128 struct ib_mad_agent_private *mad_agent_priv;
f36e1793 1129 struct ib_mad_send_wr_private *mad_send_wr;
824c8ae7
HR
1130
1131 mad_agent_priv = container_of(send_buf->mad_agent,
1132 struct ib_mad_agent_private, agent);
f36e1793
JM
1133 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1134 send_buf);
824c8ae7 1135
f36e1793
JM
1136 free_send_rmpp_list(mad_send_wr);
1137 kfree(send_buf->mad);
1b52fa98 1138 deref_mad_agent(mad_agent_priv);
824c8ae7
HR
1139}
1140EXPORT_SYMBOL(ib_free_send_mad);
1141
fa619a77 1142int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4
LT
1143{
1144 struct ib_mad_qp_info *qp_info;
cabe3cbc 1145 struct list_head *list;
34816ad9
SH
1146 struct ib_send_wr *bad_send_wr;
1147 struct ib_mad_agent *mad_agent;
1148 struct ib_sge *sge;
1da177e4
LT
1149 unsigned long flags;
1150 int ret;
1151
f8197a4e 1152 /* Set WR ID to find mad_send_wr upon completion */
d760ce8f 1153 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1da177e4 1154 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
d53e11fd
CH
1155 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1156 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1da177e4 1157
34816ad9
SH
1158 mad_agent = mad_send_wr->send_buf.mad_agent;
1159 sge = mad_send_wr->sg_list;
1527106f
RC
1160 sge[0].addr = ib_dma_map_single(mad_agent->device,
1161 mad_send_wr->send_buf.mad,
1162 sge[0].length,
1163 DMA_TO_DEVICE);
2c34e68f
YB
1164 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1165 return -ENOMEM;
1166
1527106f
RC
1167 mad_send_wr->header_mapping = sge[0].addr;
1168
1169 sge[1].addr = ib_dma_map_single(mad_agent->device,
1170 ib_get_payload(mad_send_wr),
1171 sge[1].length,
1172 DMA_TO_DEVICE);
2c34e68f
YB
1173 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1174 ib_dma_unmap_single(mad_agent->device,
1175 mad_send_wr->header_mapping,
1176 sge[0].length, DMA_TO_DEVICE);
1177 return -ENOMEM;
1178 }
1527106f 1179 mad_send_wr->payload_mapping = sge[1].addr;
34816ad9 1180
1da177e4 1181 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
cabe3cbc 1182 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
e622f2f4 1183 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
34816ad9 1184 &bad_send_wr);
cabe3cbc 1185 list = &qp_info->send_queue.list;
1da177e4 1186 } else {
1da177e4 1187 ret = 0;
cabe3cbc 1188 list = &qp_info->overflow_list;
1da177e4 1189 }
cabe3cbc
HR
1190
1191 if (!ret) {
1192 qp_info->send_queue.count++;
1193 list_add_tail(&mad_send_wr->mad_list.list, list);
1194 }
1195 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
f36e1793 1196 if (ret) {
1527106f
RC
1197 ib_dma_unmap_single(mad_agent->device,
1198 mad_send_wr->header_mapping,
1199 sge[0].length, DMA_TO_DEVICE);
1200 ib_dma_unmap_single(mad_agent->device,
1201 mad_send_wr->payload_mapping,
1202 sge[1].length, DMA_TO_DEVICE);
f36e1793 1203 }
1da177e4
LT
1204 return ret;
1205}
1206
1207/*
1208 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1209 * with the registered client
1210 */
34816ad9
SH
1211int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1212 struct ib_mad_send_buf **bad_send_buf)
1da177e4 1213{
1da177e4 1214 struct ib_mad_agent_private *mad_agent_priv;
34816ad9
SH
1215 struct ib_mad_send_buf *next_send_buf;
1216 struct ib_mad_send_wr_private *mad_send_wr;
1217 unsigned long flags;
1218 int ret = -EINVAL;
1da177e4
LT
1219
1220 /* Walk list of send WRs and post each on send list */
34816ad9 1221 for (; send_buf; send_buf = next_send_buf) {
1da177e4 1222
34816ad9
SH
1223 mad_send_wr = container_of(send_buf,
1224 struct ib_mad_send_wr_private,
1225 send_buf);
1226 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4 1227
34816ad9
SH
1228 if (!send_buf->mad_agent->send_handler ||
1229 (send_buf->timeout_ms &&
1230 !send_buf->mad_agent->recv_handler)) {
1231 ret = -EINVAL;
1232 goto error;
1da177e4
LT
1233 }
1234
618a3c03
HR
1235 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1236 if (mad_agent_priv->agent.rmpp_version) {
1237 ret = -EINVAL;
1238 goto error;
1239 }
1240 }
1241
1da177e4
LT
1242 /*
1243 * Save pointer to next work request to post in case the
1244 * current one completes, and the user modifies the work
1245 * request associated with the completion
1246 */
34816ad9 1247 next_send_buf = send_buf->next;
e622f2f4 1248 mad_send_wr->send_wr.ah = send_buf->ah;
1da177e4 1249
34816ad9
SH
1250 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1252 ret = handle_outgoing_dr_smp(mad_agent_priv,
1253 mad_send_wr);
1da177e4 1254 if (ret < 0) /* error */
34816ad9 1255 goto error;
1da177e4 1256 else if (ret == 1) /* locally consumed */
34816ad9 1257 continue;
1da177e4
LT
1258 }
1259
34816ad9 1260 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1da177e4 1261 /* Timeout will be updated after send completes */
34816ad9 1262 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
4fc8cd49
SH
1263 mad_send_wr->max_retries = send_buf->retries;
1264 mad_send_wr->retries_left = send_buf->retries;
1265 send_buf->retries = 0;
34816ad9 1266 /* Reference for work request to QP + response */
1da177e4
LT
1267 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1268 mad_send_wr->status = IB_WC_SUCCESS;
1269
1270 /* Reference MAD agent until send completes */
1271 atomic_inc(&mad_agent_priv->refcount);
1272 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1273 list_add_tail(&mad_send_wr->agent_list,
1274 &mad_agent_priv->send_list);
1275 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1276
1471cb6c 1277 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
fa619a77
HR
1278 ret = ib_send_rmpp_mad(mad_send_wr);
1279 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1280 ret = ib_send_mad(mad_send_wr);
1281 } else
1282 ret = ib_send_mad(mad_send_wr);
1283 if (ret < 0) {
1da177e4
LT
1284 /* Fail send request */
1285 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1286 list_del(&mad_send_wr->agent_list);
1287 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1288 atomic_dec(&mad_agent_priv->refcount);
34816ad9 1289 goto error;
1da177e4 1290 }
1da177e4
LT
1291 }
1292 return 0;
34816ad9
SH
1293error:
1294 if (bad_send_buf)
1295 *bad_send_buf = send_buf;
1da177e4
LT
1296 return ret;
1297}
1298EXPORT_SYMBOL(ib_post_send_mad);
1299
1300/*
1301 * ib_free_recv_mad - Returns data buffers used to receive
1302 * a MAD to the access layer
1303 */
1304void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1305{
fa619a77 1306 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1da177e4
LT
1307 struct ib_mad_private_header *mad_priv_hdr;
1308 struct ib_mad_private *priv;
fa619a77 1309 struct list_head free_list;
1da177e4 1310
fa619a77
HR
1311 INIT_LIST_HEAD(&free_list);
1312 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1da177e4 1313
fa619a77
HR
1314 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1315 &free_list, list) {
1316 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1317 recv_buf);
1da177e4
LT
1318 mad_priv_hdr = container_of(mad_recv_wc,
1319 struct ib_mad_private_header,
1320 recv_wc);
1321 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1322 header);
c9082e51 1323 kfree(priv);
1da177e4 1324 }
1da177e4
LT
1325}
1326EXPORT_SYMBOL(ib_free_recv_mad);
1327
1da177e4
LT
1328struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1329 u8 rmpp_version,
1330 ib_mad_send_handler send_handler,
1331 ib_mad_recv_handler recv_handler,
1332 void *context)
1333{
1334 return ERR_PTR(-EINVAL); /* XXX: for now */
1335}
1336EXPORT_SYMBOL(ib_redirect_mad_qp);
1337
1338int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1339 struct ib_wc *wc)
1340{
7ef5d4b0
IW
1341 dev_err(&mad_agent->device->dev,
1342 "ib_process_mad_wc() not implemented yet\n");
1da177e4
LT
1343 return 0;
1344}
1345EXPORT_SYMBOL(ib_process_mad_wc);
1346
1347static int method_in_use(struct ib_mad_mgmt_method_table **method,
1348 struct ib_mad_reg_req *mad_reg_req)
1349{
1350 int i;
1351
19b629f5 1352 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1da177e4 1353 if ((*method)->agent[i]) {
7ef5d4b0 1354 pr_err("Method %d already in use\n", i);
1da177e4
LT
1355 return -EINVAL;
1356 }
1357 }
1358 return 0;
1359}
1360
1361static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1362{
1363 /* Allocate management method table */
de6eb66b 1364 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1da177e4 1365 if (!*method) {
7ef5d4b0 1366 pr_err("No memory for ib_mad_mgmt_method_table\n");
1da177e4
LT
1367 return -ENOMEM;
1368 }
1da177e4
LT
1369
1370 return 0;
1371}
1372
1373/*
1374 * Check to see if there are any methods still in use
1375 */
1376static int check_method_table(struct ib_mad_mgmt_method_table *method)
1377{
1378 int i;
1379
1380 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1381 if (method->agent[i])
1382 return 1;
1383 return 0;
1384}
1385
1386/*
1387 * Check to see if there are any method tables for this class still in use
1388 */
1389static int check_class_table(struct ib_mad_mgmt_class_table *class)
1390{
1391 int i;
1392
1393 for (i = 0; i < MAX_MGMT_CLASS; i++)
1394 if (class->method_table[i])
1395 return 1;
1396 return 0;
1397}
1398
1399static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1400{
1401 int i;
1402
1403 for (i = 0; i < MAX_MGMT_OUI; i++)
1404 if (vendor_class->method_table[i])
1405 return 1;
1406 return 0;
1407}
1408
1409static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
d94bd266 1410 const char *oui)
1da177e4
LT
1411{
1412 int i;
1413
1414 for (i = 0; i < MAX_MGMT_OUI; i++)
3cd96564
RD
1415 /* Is there matching OUI for this vendor class ? */
1416 if (!memcmp(vendor_class->oui[i], oui, 3))
1da177e4
LT
1417 return i;
1418
1419 return -1;
1420}
1421
1422static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1423{
1424 int i;
1425
1426 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1427 if (vendor->vendor_class[i])
1428 return 1;
1429
1430 return 0;
1431}
1432
1433static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1434 struct ib_mad_agent_private *agent)
1435{
1436 int i;
1437
1438 /* Remove any methods for this mad agent */
1439 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1440 if (method->agent[i] == agent) {
1441 method->agent[i] = NULL;
1442 }
1443 }
1444}
1445
1446static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1447 struct ib_mad_agent_private *agent_priv,
1448 u8 mgmt_class)
1449{
1450 struct ib_mad_port_private *port_priv;
1451 struct ib_mad_mgmt_class_table **class;
1452 struct ib_mad_mgmt_method_table **method;
1453 int i, ret;
1454
1455 port_priv = agent_priv->qp_info->port_priv;
1456 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1457 if (!*class) {
1458 /* Allocate management class table for "new" class version */
de6eb66b 1459 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1da177e4 1460 if (!*class) {
7ef5d4b0
IW
1461 dev_err(&agent_priv->agent.device->dev,
1462 "No memory for ib_mad_mgmt_class_table\n");
1da177e4
LT
1463 ret = -ENOMEM;
1464 goto error1;
1465 }
de6eb66b 1466
1da177e4
LT
1467 /* Allocate method table for this management class */
1468 method = &(*class)->method_table[mgmt_class];
1469 if ((ret = allocate_method_table(method)))
1470 goto error2;
1471 } else {
1472 method = &(*class)->method_table[mgmt_class];
1473 if (!*method) {
1474 /* Allocate method table for this management class */
1475 if ((ret = allocate_method_table(method)))
1476 goto error1;
1477 }
1478 }
1479
1480 /* Now, make sure methods are not already in use */
1481 if (method_in_use(method, mad_reg_req))
1482 goto error3;
1483
1484 /* Finally, add in methods being registered */
19b629f5 1485 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1da177e4 1486 (*method)->agent[i] = agent_priv;
19b629f5 1487
1da177e4
LT
1488 return 0;
1489
1490error3:
1491 /* Remove any methods for this mad agent */
1492 remove_methods_mad_agent(*method, agent_priv);
1493 /* Now, check to see if there are any methods in use */
1494 if (!check_method_table(*method)) {
1495 /* If not, release management method table */
1496 kfree(*method);
1497 *method = NULL;
1498 }
1499 ret = -EINVAL;
1500 goto error1;
1501error2:
1502 kfree(*class);
1503 *class = NULL;
1504error1:
1505 return ret;
1506}
1507
1508static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1509 struct ib_mad_agent_private *agent_priv)
1510{
1511 struct ib_mad_port_private *port_priv;
1512 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1513 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1514 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1515 struct ib_mad_mgmt_method_table **method;
1516 int i, ret = -ENOMEM;
1517 u8 vclass;
1518
1519 /* "New" vendor (with OUI) class */
1520 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1521 port_priv = agent_priv->qp_info->port_priv;
1522 vendor_table = &port_priv->version[
1523 mad_reg_req->mgmt_class_version].vendor;
1524 if (!*vendor_table) {
1525 /* Allocate mgmt vendor class table for "new" class version */
de6eb66b 1526 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1da177e4 1527 if (!vendor) {
7ef5d4b0
IW
1528 dev_err(&agent_priv->agent.device->dev,
1529 "No memory for ib_mad_mgmt_vendor_class_table\n");
1da177e4
LT
1530 goto error1;
1531 }
de6eb66b 1532
1da177e4
LT
1533 *vendor_table = vendor;
1534 }
1535 if (!(*vendor_table)->vendor_class[vclass]) {
1536 /* Allocate table for this management vendor class */
de6eb66b 1537 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1da177e4 1538 if (!vendor_class) {
7ef5d4b0
IW
1539 dev_err(&agent_priv->agent.device->dev,
1540 "No memory for ib_mad_mgmt_vendor_class\n");
1da177e4
LT
1541 goto error2;
1542 }
de6eb66b 1543
1da177e4
LT
1544 (*vendor_table)->vendor_class[vclass] = vendor_class;
1545 }
1546 for (i = 0; i < MAX_MGMT_OUI; i++) {
1547 /* Is there matching OUI for this vendor class ? */
1548 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1549 mad_reg_req->oui, 3)) {
1550 method = &(*vendor_table)->vendor_class[
1551 vclass]->method_table[i];
1552 BUG_ON(!*method);
1553 goto check_in_use;
1554 }
1555 }
1556 for (i = 0; i < MAX_MGMT_OUI; i++) {
1557 /* OUI slot available ? */
1558 if (!is_vendor_oui((*vendor_table)->vendor_class[
1559 vclass]->oui[i])) {
1560 method = &(*vendor_table)->vendor_class[
1561 vclass]->method_table[i];
1562 BUG_ON(*method);
1563 /* Allocate method table for this OUI */
1564 if ((ret = allocate_method_table(method)))
1565 goto error3;
1566 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1567 mad_reg_req->oui, 3);
1568 goto check_in_use;
1569 }
1570 }
7ef5d4b0 1571 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1da177e4
LT
1572 goto error3;
1573
1574check_in_use:
1575 /* Now, make sure methods are not already in use */
1576 if (method_in_use(method, mad_reg_req))
1577 goto error4;
1578
1579 /* Finally, add in methods being registered */
19b629f5 1580 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1da177e4 1581 (*method)->agent[i] = agent_priv;
19b629f5 1582
1da177e4
LT
1583 return 0;
1584
1585error4:
1586 /* Remove any methods for this mad agent */
1587 remove_methods_mad_agent(*method, agent_priv);
1588 /* Now, check to see if there are any methods in use */
1589 if (!check_method_table(*method)) {
1590 /* If not, release management method table */
1591 kfree(*method);
1592 *method = NULL;
1593 }
1594 ret = -EINVAL;
1595error3:
1596 if (vendor_class) {
1597 (*vendor_table)->vendor_class[vclass] = NULL;
1598 kfree(vendor_class);
1599 }
1600error2:
1601 if (vendor) {
1602 *vendor_table = NULL;
1603 kfree(vendor);
1604 }
1605error1:
1606 return ret;
1607}
1608
1609static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1610{
1611 struct ib_mad_port_private *port_priv;
1612 struct ib_mad_mgmt_class_table *class;
1613 struct ib_mad_mgmt_method_table *method;
1614 struct ib_mad_mgmt_vendor_class_table *vendor;
1615 struct ib_mad_mgmt_vendor_class *vendor_class;
1616 int index;
1617 u8 mgmt_class;
1618
1619 /*
1620 * Was MAD registration request supplied
1621 * with original registration ?
1622 */
1623 if (!agent_priv->reg_req) {
1624 goto out;
1625 }
1626
1627 port_priv = agent_priv->qp_info->port_priv;
1628 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1629 class = port_priv->version[
1630 agent_priv->reg_req->mgmt_class_version].class;
1631 if (!class)
1632 goto vendor_check;
1633
1634 method = class->method_table[mgmt_class];
1635 if (method) {
1636 /* Remove any methods for this mad agent */
1637 remove_methods_mad_agent(method, agent_priv);
1638 /* Now, check to see if there are any methods still in use */
1639 if (!check_method_table(method)) {
1640 /* If not, release management method table */
2190d10d
BVA
1641 kfree(method);
1642 class->method_table[mgmt_class] = NULL;
1643 /* Any management classes left ? */
1da177e4
LT
1644 if (!check_class_table(class)) {
1645 /* If not, release management class table */
1646 kfree(class);
1647 port_priv->version[
1648 agent_priv->reg_req->
1649 mgmt_class_version].class = NULL;
1650 }
1651 }
1652 }
1653
1654vendor_check:
1655 if (!is_vendor_class(mgmt_class))
1656 goto out;
1657
1658 /* normalize mgmt_class to vendor range 2 */
1659 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1660 vendor = port_priv->version[
1661 agent_priv->reg_req->mgmt_class_version].vendor;
1662
1663 if (!vendor)
1664 goto out;
1665
1666 vendor_class = vendor->vendor_class[mgmt_class];
1667 if (vendor_class) {
1668 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1669 if (index < 0)
1670 goto out;
1671 method = vendor_class->method_table[index];
1672 if (method) {
1673 /* Remove any methods for this mad agent */
1674 remove_methods_mad_agent(method, agent_priv);
1675 /*
1676 * Now, check to see if there are
1677 * any methods still in use
1678 */
1679 if (!check_method_table(method)) {
1680 /* If not, release management method table */
1681 kfree(method);
1682 vendor_class->method_table[index] = NULL;
1683 memset(vendor_class->oui[index], 0, 3);
1684 /* Any OUIs left ? */
1685 if (!check_vendor_class(vendor_class)) {
1686 /* If not, release vendor class table */
1687 kfree(vendor_class);
1688 vendor->vendor_class[mgmt_class] = NULL;
1689 /* Any other vendor classes left ? */
1690 if (!check_vendor_table(vendor)) {
1691 kfree(vendor);
1692 port_priv->version[
1693 agent_priv->reg_req->
1694 mgmt_class_version].
1695 vendor = NULL;
1696 }
1697 }
1698 }
1699 }
1700 }
1701
1702out:
1703 return;
1704}
1705
1da177e4
LT
1706static struct ib_mad_agent_private *
1707find_mad_agent(struct ib_mad_port_private *port_priv,
d94bd266 1708 const struct ib_mad_hdr *mad_hdr)
1da177e4
LT
1709{
1710 struct ib_mad_agent_private *mad_agent = NULL;
1711 unsigned long flags;
1712
1713 spin_lock_irqsave(&port_priv->reg_lock, flags);
d94bd266 1714 if (ib_response_mad(mad_hdr)) {
1da177e4
LT
1715 u32 hi_tid;
1716 struct ib_mad_agent_private *entry;
1717
1718 /*
1719 * Routing is based on high 32 bits of transaction ID
1720 * of MAD.
1721 */
d94bd266 1722 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
34816ad9 1723 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1da177e4
LT
1724 if (entry->agent.hi_tid == hi_tid) {
1725 mad_agent = entry;
1726 break;
1727 }
1728 }
1729 } else {
1730 struct ib_mad_mgmt_class_table *class;
1731 struct ib_mad_mgmt_method_table *method;
1732 struct ib_mad_mgmt_vendor_class_table *vendor;
1733 struct ib_mad_mgmt_vendor_class *vendor_class;
d94bd266 1734 const struct ib_vendor_mad *vendor_mad;
1da177e4
LT
1735 int index;
1736
1737 /*
1738 * Routing is based on version, class, and method
1739 * For "newer" vendor MADs, also based on OUI
1740 */
d94bd266 1741 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1da177e4 1742 goto out;
d94bd266 1743 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1da177e4 1744 class = port_priv->version[
d94bd266 1745 mad_hdr->class_version].class;
1da177e4
LT
1746 if (!class)
1747 goto out;
d94bd266 1748 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
b7ab0b19
HS
1749 IB_MGMT_MAX_METHODS)
1750 goto out;
1da177e4 1751 method = class->method_table[convert_mgmt_class(
d94bd266 1752 mad_hdr->mgmt_class)];
1da177e4 1753 if (method)
d94bd266 1754 mad_agent = method->agent[mad_hdr->method &
1da177e4
LT
1755 ~IB_MGMT_METHOD_RESP];
1756 } else {
1757 vendor = port_priv->version[
d94bd266 1758 mad_hdr->class_version].vendor;
1da177e4
LT
1759 if (!vendor)
1760 goto out;
1761 vendor_class = vendor->vendor_class[vendor_class_index(
d94bd266 1762 mad_hdr->mgmt_class)];
1da177e4
LT
1763 if (!vendor_class)
1764 goto out;
1765 /* Find matching OUI */
d94bd266 1766 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1da177e4
LT
1767 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1768 if (index == -1)
1769 goto out;
1770 method = vendor_class->method_table[index];
1771 if (method) {
d94bd266 1772 mad_agent = method->agent[mad_hdr->method &
1da177e4
LT
1773 ~IB_MGMT_METHOD_RESP];
1774 }
1775 }
1776 }
1777
1778 if (mad_agent) {
1779 if (mad_agent->agent.recv_handler)
1780 atomic_inc(&mad_agent->refcount);
1781 else {
7ef5d4b0
IW
1782 dev_notice(&port_priv->device->dev,
1783 "No receive handler for client %p on port %d\n",
1784 &mad_agent->agent, port_priv->port_num);
1da177e4
LT
1785 mad_agent = NULL;
1786 }
1787 }
1788out:
1789 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1790
1791 return mad_agent;
1792}
1793
8e4349d1
IW
1794static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1795 const struct ib_mad_qp_info *qp_info,
1796 bool opa)
1da177e4
LT
1797{
1798 int valid = 0;
8e4349d1 1799 u32 qp_num = qp_info->qp->qp_num;
1da177e4
LT
1800
1801 /* Make sure MAD base version is understood */
8e4349d1
IW
1802 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1803 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1804 pr_err("MAD received with unsupported base version %d %s\n",
1805 mad_hdr->base_version, opa ? "(opa)" : "");
1da177e4
LT
1806 goto out;
1807 }
1808
1809 /* Filter SMI packets sent to other than QP0 */
77f60833
IW
1810 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1811 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1da177e4
LT
1812 if (qp_num == 0)
1813 valid = 1;
1814 } else {
53370886
HR
1815 /* CM attributes other than ClassPortInfo only use Send method */
1816 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1817 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1818 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1819 goto out;
1da177e4
LT
1820 /* Filter GSI packets sent to QP0 */
1821 if (qp_num != 0)
1822 valid = 1;
1823 }
1824
1825out:
1826 return valid;
1827}
1828
f766c58f
IW
1829static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1830 const struct ib_mad_hdr *mad_hdr)
fa619a77
HR
1831{
1832 struct ib_rmpp_mad *rmpp_mad;
1833
1834 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1835 return !mad_agent_priv->agent.rmpp_version ||
1471cb6c 1836 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
fa619a77
HR
1837 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1838 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1839 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1840}
1841
8bf4b30c
IW
1842static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1843 const struct ib_mad_recv_wc *rwc)
fa9656bb 1844{
8bf4b30c 1845 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
fa9656bb
JM
1846 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1847}
1848
f766c58f
IW
1849static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1850 const struct ib_mad_send_wr_private *wr,
1851 const struct ib_mad_recv_wc *rwc )
fa9656bb
JM
1852{
1853 struct ib_ah_attr attr;
1854 u8 send_resp, rcv_resp;
9874e746
JM
1855 union ib_gid sgid;
1856 struct ib_device *device = mad_agent_priv->agent.device;
1857 u8 port_num = mad_agent_priv->agent.port_num;
1858 u8 lmc;
fa9656bb 1859
96909308
IW
1860 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1861 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
fa9656bb 1862
fa9656bb
JM
1863 if (send_resp == rcv_resp)
1864 /* both requests, or both responses. GIDs different */
1865 return 0;
1866
1867 if (ib_query_ah(wr->send_buf.ah, &attr))
1868 /* Assume not equal, to avoid false positives. */
1869 return 0;
1870
9874e746
JM
1871 if (!!(attr.ah_flags & IB_AH_GRH) !=
1872 !!(rwc->wc->wc_flags & IB_WC_GRH))
fa9656bb
JM
1873 /* one has GID, other does not. Assume different */
1874 return 0;
9874e746
JM
1875
1876 if (!send_resp && rcv_resp) {
1877 /* is request/response. */
1878 if (!(attr.ah_flags & IB_AH_GRH)) {
1879 if (ib_get_cached_lmc(device, port_num, &lmc))
1880 return 0;
1881 return (!lmc || !((attr.src_path_bits ^
1882 rwc->wc->dlid_path_bits) &
1883 ((1 << lmc) - 1)));
1884 } else {
1885 if (ib_get_cached_gid(device, port_num,
55ee3ab2 1886 attr.grh.sgid_index, &sgid, NULL))
9874e746
JM
1887 return 0;
1888 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1889 16);
1890 }
1891 }
1892
1893 if (!(attr.ah_flags & IB_AH_GRH))
1894 return attr.dlid == rwc->wc->slid;
1895 else
1896 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1897 16);
1898}
1899
1900static inline int is_direct(u8 class)
1901{
1902 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
fa9656bb 1903}
9874e746 1904
fa619a77 1905struct ib_mad_send_wr_private*
f766c58f
IW
1906ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1907 const struct ib_mad_recv_wc *wc)
1da177e4 1908{
9874e746 1909 struct ib_mad_send_wr_private *wr;
83a1d228 1910 const struct ib_mad_hdr *mad_hdr;
fa9656bb 1911
83a1d228 1912 mad_hdr = &wc->recv_buf.mad->mad_hdr;
9874e746
JM
1913
1914 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
83a1d228 1915 if ((wr->tid == mad_hdr->tid) &&
9874e746
JM
1916 rcv_has_same_class(wr, wc) &&
1917 /*
1918 * Don't check GID for direct routed MADs.
1919 * These might have permissive LIDs.
1920 */
83a1d228 1921 (is_direct(mad_hdr->mgmt_class) ||
9874e746 1922 rcv_has_same_gid(mad_agent_priv, wr, wc)))
39798695 1923 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1da177e4
LT
1924 }
1925
1926 /*
1927 * It's possible to receive the response before we've
1928 * been notified that the send has completed
1929 */
9874e746 1930 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
c597eee5 1931 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
83a1d228 1932 wr->tid == mad_hdr->tid &&
9874e746
JM
1933 wr->timeout &&
1934 rcv_has_same_class(wr, wc) &&
1935 /*
1936 * Don't check GID for direct routed MADs.
1937 * These might have permissive LIDs.
1938 */
83a1d228 1939 (is_direct(mad_hdr->mgmt_class) ||
9874e746 1940 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1da177e4 1941 /* Verify request has not been canceled */
9874e746 1942 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1da177e4
LT
1943 }
1944 return NULL;
1945}
1946
fa619a77 1947void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
6a0c435e
HR
1948{
1949 mad_send_wr->timeout = 0;
179e0917
AM
1950 if (mad_send_wr->refcount == 1)
1951 list_move_tail(&mad_send_wr->agent_list,
6a0c435e 1952 &mad_send_wr->mad_agent_priv->done_list);
6a0c435e
HR
1953}
1954
1da177e4 1955static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
4a0754fa 1956 struct ib_mad_recv_wc *mad_recv_wc)
1da177e4
LT
1957{
1958 struct ib_mad_send_wr_private *mad_send_wr;
1959 struct ib_mad_send_wc mad_send_wc;
1960 unsigned long flags;
1961
fa619a77
HR
1962 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1963 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1471cb6c 1964 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
fa619a77
HR
1965 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1966 mad_recv_wc);
1967 if (!mad_recv_wc) {
1b52fa98 1968 deref_mad_agent(mad_agent_priv);
fa619a77
HR
1969 return;
1970 }
1971 }
1972
1da177e4 1973 /* Complete corresponding request */
96909308 1974 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1da177e4 1975 spin_lock_irqsave(&mad_agent_priv->lock, flags);
fa9656bb 1976 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1da177e4
LT
1977 if (!mad_send_wr) {
1978 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1471cb6c
IW
1979 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1980 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1981 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1982 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1983 /* user rmpp is in effect
1984 * and this is an active RMPP MAD
1985 */
ca281265
CH
1986 mad_agent_priv->agent.recv_handler(
1987 &mad_agent_priv->agent, NULL,
1988 mad_recv_wc);
1471cb6c
IW
1989 atomic_dec(&mad_agent_priv->refcount);
1990 } else {
1991 /* not user rmpp, revert to normal behavior and
1992 * drop the mad */
1993 ib_free_recv_mad(mad_recv_wc);
1994 deref_mad_agent(mad_agent_priv);
1995 return;
1996 }
1997 } else {
1998 ib_mark_mad_done(mad_send_wr);
1999 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1da177e4 2000
1471cb6c 2001 /* Defined behavior is to complete response before request */
ca281265
CH
2002 mad_agent_priv->agent.recv_handler(
2003 &mad_agent_priv->agent,
2004 &mad_send_wr->send_buf,
2005 mad_recv_wc);
1471cb6c 2006 atomic_dec(&mad_agent_priv->refcount);
1da177e4 2007
1471cb6c
IW
2008 mad_send_wc.status = IB_WC_SUCCESS;
2009 mad_send_wc.vendor_err = 0;
2010 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2011 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2012 }
1da177e4 2013 } else {
ca281265 2014 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
4a0754fa 2015 mad_recv_wc);
1b52fa98 2016 deref_mad_agent(mad_agent_priv);
1da177e4
LT
2017 }
2018}
2019
e11ae8aa
IW
2020static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2021 const struct ib_mad_qp_info *qp_info,
2022 const struct ib_wc *wc,
2023 int port_num,
2024 struct ib_mad_private *recv,
2025 struct ib_mad_private *response)
2026{
2027 enum smi_forward_action retsmi;
c9082e51 2028 struct ib_smp *smp = (struct ib_smp *)recv->mad;
e11ae8aa 2029
c9082e51 2030 if (smi_handle_dr_smp_recv(smp,
4139032b 2031 rdma_cap_ib_switch(port_priv->device),
e11ae8aa
IW
2032 port_num,
2033 port_priv->device->phys_port_cnt) ==
2034 IB_SMI_DISCARD)
2035 return IB_SMI_DISCARD;
2036
c9082e51 2037 retsmi = smi_check_forward_dr_smp(smp);
e11ae8aa
IW
2038 if (retsmi == IB_SMI_LOCAL)
2039 return IB_SMI_HANDLE;
2040
2041 if (retsmi == IB_SMI_SEND) { /* don't forward */
c9082e51 2042 if (smi_handle_dr_smp_send(smp,
4139032b 2043 rdma_cap_ib_switch(port_priv->device),
e11ae8aa
IW
2044 port_num) == IB_SMI_DISCARD)
2045 return IB_SMI_DISCARD;
2046
c9082e51 2047 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
e11ae8aa 2048 return IB_SMI_DISCARD;
4139032b 2049 } else if (rdma_cap_ib_switch(port_priv->device)) {
e11ae8aa 2050 /* forward case for switches */
c9082e51 2051 memcpy(response, recv, mad_priv_size(response));
e11ae8aa 2052 response->header.recv_wc.wc = &response->header.wc;
c9082e51 2053 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
e11ae8aa
IW
2054 response->header.recv_wc.recv_buf.grh = &response->grh;
2055
c9082e51 2056 agent_send_response((const struct ib_mad_hdr *)response->mad,
e11ae8aa
IW
2057 &response->grh, wc,
2058 port_priv->device,
c9082e51
IW
2059 smi_get_fwd_port(smp),
2060 qp_info->qp->qp_num,
8e4349d1
IW
2061 response->mad_size,
2062 false);
e11ae8aa
IW
2063
2064 return IB_SMI_DISCARD;
2065 }
2066 return IB_SMI_HANDLE;
2067}
2068
c9082e51 2069static bool generate_unmatched_resp(const struct ib_mad_private *recv,
8e4349d1
IW
2070 struct ib_mad_private *response,
2071 size_t *resp_len, bool opa)
0b307043 2072{
c9082e51
IW
2073 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2074 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2075
2076 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2077 recv_hdr->method == IB_MGMT_METHOD_SET) {
2078 memcpy(response, recv, mad_priv_size(response));
0b307043 2079 response->header.recv_wc.wc = &response->header.wc;
c9082e51 2080 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
0b307043 2081 response->header.recv_wc.recv_buf.grh = &response->grh;
c9082e51
IW
2082 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2083 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2084 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2085 resp_hdr->status |= IB_SMP_DIRECTION;
0b307043 2086
8e4349d1
IW
2087 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2088 if (recv_hdr->mgmt_class ==
2089 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2090 recv_hdr->mgmt_class ==
2091 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2092 *resp_len = opa_get_smp_header_size(
2093 (struct opa_smp *)recv->mad);
2094 else
2095 *resp_len = sizeof(struct ib_mad_hdr);
2096 }
2097
0b307043
ST
2098 return true;
2099 } else {
2100 return false;
2101 }
2102}
8e4349d1
IW
2103
2104static enum smi_action
2105handle_opa_smi(struct ib_mad_port_private *port_priv,
2106 struct ib_mad_qp_info *qp_info,
2107 struct ib_wc *wc,
2108 int port_num,
2109 struct ib_mad_private *recv,
2110 struct ib_mad_private *response)
2111{
2112 enum smi_forward_action retsmi;
2113 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2114
2115 if (opa_smi_handle_dr_smp_recv(smp,
4139032b 2116 rdma_cap_ib_switch(port_priv->device),
8e4349d1
IW
2117 port_num,
2118 port_priv->device->phys_port_cnt) ==
2119 IB_SMI_DISCARD)
2120 return IB_SMI_DISCARD;
2121
2122 retsmi = opa_smi_check_forward_dr_smp(smp);
2123 if (retsmi == IB_SMI_LOCAL)
2124 return IB_SMI_HANDLE;
2125
2126 if (retsmi == IB_SMI_SEND) { /* don't forward */
2127 if (opa_smi_handle_dr_smp_send(smp,
4139032b 2128 rdma_cap_ib_switch(port_priv->device),
8e4349d1
IW
2129 port_num) == IB_SMI_DISCARD)
2130 return IB_SMI_DISCARD;
2131
2132 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2133 IB_SMI_DISCARD)
2134 return IB_SMI_DISCARD;
2135
4139032b 2136 } else if (rdma_cap_ib_switch(port_priv->device)) {
8e4349d1
IW
2137 /* forward case for switches */
2138 memcpy(response, recv, mad_priv_size(response));
2139 response->header.recv_wc.wc = &response->header.wc;
2140 response->header.recv_wc.recv_buf.opa_mad =
2141 (struct opa_mad *)response->mad;
2142 response->header.recv_wc.recv_buf.grh = &response->grh;
2143
2144 agent_send_response((const struct ib_mad_hdr *)response->mad,
2145 &response->grh, wc,
2146 port_priv->device,
2147 opa_smi_get_fwd_port(smp),
2148 qp_info->qp->qp_num,
2149 recv->header.wc.byte_len,
2150 true);
2151
2152 return IB_SMI_DISCARD;
2153 }
2154
2155 return IB_SMI_HANDLE;
2156}
2157
2158static enum smi_action
2159handle_smi(struct ib_mad_port_private *port_priv,
2160 struct ib_mad_qp_info *qp_info,
2161 struct ib_wc *wc,
2162 int port_num,
2163 struct ib_mad_private *recv,
2164 struct ib_mad_private *response,
2165 bool opa)
2166{
2167 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2168
2169 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2170 mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
2171 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2172 response);
2173
2174 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2175}
2176
d53e11fd 2177static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1da177e4 2178{
d53e11fd
CH
2179 struct ib_mad_port_private *port_priv = cq->cq_context;
2180 struct ib_mad_list_head *mad_list =
2181 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
1da177e4
LT
2182 struct ib_mad_qp_info *qp_info;
2183 struct ib_mad_private_header *mad_priv_hdr;
445d6807 2184 struct ib_mad_private *recv, *response = NULL;
1da177e4 2185 struct ib_mad_agent_private *mad_agent;
1bae4dbf 2186 int port_num;
a9e74323 2187 int ret = IB_MAD_RESULT_SUCCESS;
4cd7c947
IW
2188 size_t mad_size;
2189 u16 resp_mad_pkey_index = 0;
8e4349d1 2190 bool opa;
1da177e4 2191
d53e11fd
CH
2192 if (list_empty_careful(&port_priv->port_list))
2193 return;
2194
2195 if (wc->status != IB_WC_SUCCESS) {
2196 /*
2197 * Receive errors indicate that the QP has entered the error
2198 * state - error handling/shutdown code will cleanup
2199 */
2200 return;
2201 }
2202
1da177e4
LT
2203 qp_info = mad_list->mad_queue->qp_info;
2204 dequeue_mad(mad_list);
2205
8e4349d1
IW
2206 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2207 qp_info->port_priv->port_num);
2208
1da177e4
LT
2209 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2210 mad_list);
2211 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1527106f
RC
2212 ib_dma_unmap_single(port_priv->device,
2213 recv->header.mapping,
c9082e51 2214 mad_priv_dma_size(recv),
1527106f 2215 DMA_FROM_DEVICE);
1da177e4
LT
2216
2217 /* Setup MAD receive work completion from "normal" work completion */
24239aff
SH
2218 recv->header.wc = *wc;
2219 recv->header.recv_wc.wc = &recv->header.wc;
8e4349d1
IW
2220
2221 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2222 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2223 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2224 } else {
2225 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2226 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2227 }
2228
c9082e51 2229 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
1da177e4
LT
2230 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2231
2232 if (atomic_read(&qp_info->snoop_count))
2233 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2234
2235 /* Validate MAD */
8e4349d1 2236 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
1da177e4
LT
2237 goto out;
2238
4cd7c947
IW
2239 mad_size = recv->mad_size;
2240 response = alloc_mad_private(mad_size, GFP_KERNEL);
445d6807 2241 if (!response) {
7ef5d4b0 2242 dev_err(&port_priv->device->dev,
d53e11fd 2243 "%s: no memory for response buffer\n", __func__);
445d6807
HR
2244 goto out;
2245 }
2246
4139032b 2247 if (rdma_cap_ib_switch(port_priv->device))
1bae4dbf
HR
2248 port_num = wc->port_num;
2249 else
2250 port_num = port_priv->port_num;
2251
c9082e51 2252 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
1da177e4 2253 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
8e4349d1
IW
2254 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2255 response, opa)
e11ae8aa 2256 == IB_SMI_DISCARD)
1da177e4 2257 goto out;
1da177e4
LT
2258 }
2259
1da177e4
LT
2260 /* Give driver "right of first refusal" on incoming MAD */
2261 if (port_priv->device->process_mad) {
1da177e4
LT
2262 ret = port_priv->device->process_mad(port_priv->device, 0,
2263 port_priv->port_num,
2264 wc, &recv->grh,
4cd7c947
IW
2265 (const struct ib_mad_hdr *)recv->mad,
2266 recv->mad_size,
2267 (struct ib_mad_hdr *)response->mad,
2268 &mad_size, &resp_mad_pkey_index);
8e4349d1
IW
2269
2270 if (opa)
2271 wc->pkey_index = resp_mad_pkey_index;
2272
1da177e4
LT
2273 if (ret & IB_MAD_RESULT_SUCCESS) {
2274 if (ret & IB_MAD_RESULT_CONSUMED)
2275 goto out;
2276 if (ret & IB_MAD_RESULT_REPLY) {
c9082e51 2277 agent_send_response((const struct ib_mad_hdr *)response->mad,
34816ad9
SH
2278 &recv->grh, wc,
2279 port_priv->device,
1bae4dbf 2280 port_num,
c9082e51 2281 qp_info->qp->qp_num,
8e4349d1 2282 mad_size, opa);
1da177e4
LT
2283 goto out;
2284 }
2285 }
2286 }
2287
c9082e51 2288 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
1da177e4 2289 if (mad_agent) {
4a0754fa 2290 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1da177e4
LT
2291 /*
2292 * recv is freed up in error cases in ib_mad_complete_recv
2293 * or via recv_handler in ib_mad_complete_recv()
2294 */
2295 recv = NULL;
a9e74323 2296 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
8e4349d1 2297 generate_unmatched_resp(recv, response, &mad_size, opa)) {
c9082e51
IW
2298 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2299 port_priv->device, port_num,
8e4349d1 2300 qp_info->qp->qp_num, mad_size, opa);
1da177e4
LT
2301 }
2302
2303out:
2304 /* Post another receive request for this QP */
2305 if (response) {
2306 ib_mad_post_receive_mads(qp_info, response);
c9082e51 2307 kfree(recv);
1da177e4
LT
2308 } else
2309 ib_mad_post_receive_mads(qp_info, recv);
2310}
2311
2312static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2313{
2314 struct ib_mad_send_wr_private *mad_send_wr;
2315 unsigned long delay;
2316
2317 if (list_empty(&mad_agent_priv->wait_list)) {
136b5721 2318 cancel_delayed_work(&mad_agent_priv->timed_work);
1da177e4
LT
2319 } else {
2320 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2321 struct ib_mad_send_wr_private,
2322 agent_list);
2323
2324 if (time_after(mad_agent_priv->timeout,
2325 mad_send_wr->timeout)) {
2326 mad_agent_priv->timeout = mad_send_wr->timeout;
1da177e4
LT
2327 delay = mad_send_wr->timeout - jiffies;
2328 if ((long)delay <= 0)
2329 delay = 1;
e7c2f967
TH
2330 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2331 &mad_agent_priv->timed_work, delay);
1da177e4
LT
2332 }
2333 }
2334}
2335
d760ce8f 2336static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4 2337{
d760ce8f 2338 struct ib_mad_agent_private *mad_agent_priv;
1da177e4
LT
2339 struct ib_mad_send_wr_private *temp_mad_send_wr;
2340 struct list_head *list_item;
2341 unsigned long delay;
2342
d760ce8f 2343 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4
LT
2344 list_del(&mad_send_wr->agent_list);
2345
2346 delay = mad_send_wr->timeout;
2347 mad_send_wr->timeout += jiffies;
2348
29bb33dd
HR
2349 if (delay) {
2350 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2351 temp_mad_send_wr = list_entry(list_item,
2352 struct ib_mad_send_wr_private,
2353 agent_list);
2354 if (time_after(mad_send_wr->timeout,
2355 temp_mad_send_wr->timeout))
2356 break;
2357 }
1da177e4 2358 }
29bb33dd
HR
2359 else
2360 list_item = &mad_agent_priv->wait_list;
1da177e4
LT
2361 list_add(&mad_send_wr->agent_list, list_item);
2362
2363 /* Reschedule a work item if we have a shorter timeout */
e7c2f967
TH
2364 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2365 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2366 &mad_agent_priv->timed_work, delay);
1da177e4
LT
2367}
2368
03b61ad2
HR
2369void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2370 int timeout_ms)
2371{
2372 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2373 wait_for_response(mad_send_wr);
2374}
2375
1da177e4
LT
2376/*
2377 * Process a send work completion
2378 */
fa619a77
HR
2379void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2380 struct ib_mad_send_wc *mad_send_wc)
1da177e4
LT
2381{
2382 struct ib_mad_agent_private *mad_agent_priv;
2383 unsigned long flags;
fa619a77 2384 int ret;
1da177e4 2385
d760ce8f 2386 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4 2387 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1471cb6c 2388 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
fa619a77
HR
2389 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2390 if (ret == IB_RMPP_RESULT_CONSUMED)
2391 goto done;
2392 } else
2393 ret = IB_RMPP_RESULT_UNHANDLED;
2394
1da177e4
LT
2395 if (mad_send_wc->status != IB_WC_SUCCESS &&
2396 mad_send_wr->status == IB_WC_SUCCESS) {
2397 mad_send_wr->status = mad_send_wc->status;
2398 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2399 }
2400
2401 if (--mad_send_wr->refcount > 0) {
2402 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2403 mad_send_wr->status == IB_WC_SUCCESS) {
d760ce8f 2404 wait_for_response(mad_send_wr);
1da177e4 2405 }
fa619a77 2406 goto done;
1da177e4
LT
2407 }
2408
2409 /* Remove send from MAD agent and notify client of completion */
2410 list_del(&mad_send_wr->agent_list);
2411 adjust_timeout(mad_agent_priv);
2412 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2413
2414 if (mad_send_wr->status != IB_WC_SUCCESS )
2415 mad_send_wc->status = mad_send_wr->status;
34816ad9
SH
2416 if (ret == IB_RMPP_RESULT_INTERNAL)
2417 ib_rmpp_send_handler(mad_send_wc);
2418 else
fa619a77
HR
2419 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2420 mad_send_wc);
1da177e4
LT
2421
2422 /* Release reference on agent taken when sending */
1b52fa98 2423 deref_mad_agent(mad_agent_priv);
fa619a77
HR
2424 return;
2425done:
2426 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1da177e4
LT
2427}
2428
d53e11fd 2429static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
1da177e4 2430{
d53e11fd
CH
2431 struct ib_mad_port_private *port_priv = cq->cq_context;
2432 struct ib_mad_list_head *mad_list =
2433 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
1da177e4 2434 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
1da177e4
LT
2435 struct ib_mad_qp_info *qp_info;
2436 struct ib_mad_queue *send_queue;
2437 struct ib_send_wr *bad_send_wr;
34816ad9 2438 struct ib_mad_send_wc mad_send_wc;
1da177e4
LT
2439 unsigned long flags;
2440 int ret;
2441
d53e11fd
CH
2442 if (list_empty_careful(&port_priv->port_list))
2443 return;
2444
2445 if (wc->status != IB_WC_SUCCESS) {
2446 if (!ib_mad_send_error(port_priv, wc))
2447 return;
2448 }
2449
1da177e4
LT
2450 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2451 mad_list);
2452 send_queue = mad_list->mad_queue;
2453 qp_info = send_queue->qp_info;
2454
2455retry:
1527106f
RC
2456 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2457 mad_send_wr->header_mapping,
2458 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2459 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2460 mad_send_wr->payload_mapping,
2461 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
1da177e4
LT
2462 queued_send_wr = NULL;
2463 spin_lock_irqsave(&send_queue->lock, flags);
2464 list_del(&mad_list->list);
2465
2466 /* Move queued send to the send queue */
2467 if (send_queue->count-- > send_queue->max_active) {
2468 mad_list = container_of(qp_info->overflow_list.next,
2469 struct ib_mad_list_head, list);
2470 queued_send_wr = container_of(mad_list,
2471 struct ib_mad_send_wr_private,
2472 mad_list);
179e0917 2473 list_move_tail(&mad_list->list, &send_queue->list);
1da177e4
LT
2474 }
2475 spin_unlock_irqrestore(&send_queue->lock, flags);
2476
34816ad9
SH
2477 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2478 mad_send_wc.status = wc->status;
2479 mad_send_wc.vendor_err = wc->vendor_err;
1da177e4 2480 if (atomic_read(&qp_info->snoop_count))
34816ad9 2481 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
1da177e4 2482 IB_MAD_SNOOP_SEND_COMPLETIONS);
34816ad9 2483 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1da177e4
LT
2484
2485 if (queued_send_wr) {
e622f2f4 2486 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
34816ad9 2487 &bad_send_wr);
1da177e4 2488 if (ret) {
7ef5d4b0
IW
2489 dev_err(&port_priv->device->dev,
2490 "ib_post_send failed: %d\n", ret);
1da177e4
LT
2491 mad_send_wr = queued_send_wr;
2492 wc->status = IB_WC_LOC_QP_OP_ERR;
2493 goto retry;
2494 }
2495 }
2496}
2497
2498static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2499{
2500 struct ib_mad_send_wr_private *mad_send_wr;
2501 struct ib_mad_list_head *mad_list;
2502 unsigned long flags;
2503
2504 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2505 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2506 mad_send_wr = container_of(mad_list,
2507 struct ib_mad_send_wr_private,
2508 mad_list);
2509 mad_send_wr->retry = 1;
2510 }
2511 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2512}
2513
d53e11fd
CH
2514static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2515 struct ib_wc *wc)
1da177e4 2516{
d53e11fd
CH
2517 struct ib_mad_list_head *mad_list =
2518 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2519 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
1da177e4
LT
2520 struct ib_mad_send_wr_private *mad_send_wr;
2521 int ret;
2522
1da177e4
LT
2523 /*
2524 * Send errors will transition the QP to SQE - move
2525 * QP to RTS and repost flushed work requests
2526 */
2527 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2528 mad_list);
2529 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2530 if (mad_send_wr->retry) {
2531 /* Repost send */
2532 struct ib_send_wr *bad_send_wr;
2533
2534 mad_send_wr->retry = 0;
e622f2f4 2535 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
1da177e4 2536 &bad_send_wr);
d53e11fd
CH
2537 if (!ret)
2538 return false;
2539 }
1da177e4
LT
2540 } else {
2541 struct ib_qp_attr *attr;
2542
2543 /* Transition QP to RTS and fail offending send */
2544 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2545 if (attr) {
2546 attr->qp_state = IB_QPS_RTS;
2547 attr->cur_qp_state = IB_QPS_SQE;
2548 ret = ib_modify_qp(qp_info->qp, attr,
2549 IB_QP_STATE | IB_QP_CUR_STATE);
2550 kfree(attr);
2551 if (ret)
7ef5d4b0 2552 dev_err(&port_priv->device->dev,
d53e11fd
CH
2553 "%s - ib_modify_qp to RTS: %d\n",
2554 __func__, ret);
1da177e4
LT
2555 else
2556 mark_sends_for_retry(qp_info);
2557 }
1da177e4 2558 }
1da177e4 2559
d53e11fd 2560 return true;
1da177e4
LT
2561}
2562
2563static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2564{
2565 unsigned long flags;
2566 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2567 struct ib_mad_send_wc mad_send_wc;
2568 struct list_head cancel_list;
2569
2570 INIT_LIST_HEAD(&cancel_list);
2571
2572 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2573 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2574 &mad_agent_priv->send_list, agent_list) {
2575 if (mad_send_wr->status == IB_WC_SUCCESS) {
3cd96564 2576 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
1da177e4
LT
2577 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2578 }
2579 }
2580
2581 /* Empty wait list to prevent receives from finding a request */
2582 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2583 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2584
2585 /* Report all cancelled requests */
2586 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2587 mad_send_wc.vendor_err = 0;
2588
2589 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2590 &cancel_list, agent_list) {
34816ad9
SH
2591 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2592 list_del(&mad_send_wr->agent_list);
1da177e4
LT
2593 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2594 &mad_send_wc);
1da177e4
LT
2595 atomic_dec(&mad_agent_priv->refcount);
2596 }
2597}
2598
2599static struct ib_mad_send_wr_private*
34816ad9
SH
2600find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2601 struct ib_mad_send_buf *send_buf)
1da177e4
LT
2602{
2603 struct ib_mad_send_wr_private *mad_send_wr;
2604
2605 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2606 agent_list) {
34816ad9 2607 if (&mad_send_wr->send_buf == send_buf)
1da177e4
LT
2608 return mad_send_wr;
2609 }
2610
2611 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2612 agent_list) {
c597eee5
IW
2613 if (is_rmpp_data_mad(mad_agent_priv,
2614 mad_send_wr->send_buf.mad) &&
34816ad9 2615 &mad_send_wr->send_buf == send_buf)
1da177e4
LT
2616 return mad_send_wr;
2617 }
2618 return NULL;
2619}
2620
34816ad9
SH
2621int ib_modify_mad(struct ib_mad_agent *mad_agent,
2622 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
1da177e4
LT
2623{
2624 struct ib_mad_agent_private *mad_agent_priv;
2625 struct ib_mad_send_wr_private *mad_send_wr;
2626 unsigned long flags;
cabe3cbc 2627 int active;
1da177e4
LT
2628
2629 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2630 agent);
2631 spin_lock_irqsave(&mad_agent_priv->lock, flags);
34816ad9 2632 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
03b61ad2 2633 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
1da177e4 2634 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
03b61ad2 2635 return -EINVAL;
1da177e4
LT
2636 }
2637
cabe3cbc 2638 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
03b61ad2 2639 if (!timeout_ms) {
1da177e4 2640 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
03b61ad2 2641 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1da177e4
LT
2642 }
2643
34816ad9 2644 mad_send_wr->send_buf.timeout_ms = timeout_ms;
cabe3cbc 2645 if (active)
03b61ad2
HR
2646 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2647 else
2648 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2649
1da177e4 2650 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
03b61ad2
HR
2651 return 0;
2652}
2653EXPORT_SYMBOL(ib_modify_mad);
1da177e4 2654
34816ad9
SH
2655void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2656 struct ib_mad_send_buf *send_buf)
03b61ad2 2657{
34816ad9 2658 ib_modify_mad(mad_agent, send_buf, 0);
1da177e4
LT
2659}
2660EXPORT_SYMBOL(ib_cancel_mad);
2661
c4028958 2662static void local_completions(struct work_struct *work)
1da177e4
LT
2663{
2664 struct ib_mad_agent_private *mad_agent_priv;
2665 struct ib_mad_local_private *local;
2666 struct ib_mad_agent_private *recv_mad_agent;
2667 unsigned long flags;
1d9bc6d6 2668 int free_mad;
1da177e4
LT
2669 struct ib_wc wc;
2670 struct ib_mad_send_wc mad_send_wc;
8e4349d1 2671 bool opa;
1da177e4 2672
c4028958
DH
2673 mad_agent_priv =
2674 container_of(work, struct ib_mad_agent_private, local_work);
1da177e4 2675
8e4349d1
IW
2676 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2677 mad_agent_priv->qp_info->port_priv->port_num);
2678
1da177e4
LT
2679 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2680 while (!list_empty(&mad_agent_priv->local_list)) {
2681 local = list_entry(mad_agent_priv->local_list.next,
2682 struct ib_mad_local_private,
2683 completion_list);
37289efe 2684 list_del(&local->completion_list);
1da177e4 2685 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1d9bc6d6 2686 free_mad = 0;
1da177e4 2687 if (local->mad_priv) {
8e4349d1 2688 u8 base_version;
1da177e4
LT
2689 recv_mad_agent = local->recv_mad_agent;
2690 if (!recv_mad_agent) {
7ef5d4b0
IW
2691 dev_err(&mad_agent_priv->agent.device->dev,
2692 "No receive MAD agent for local completion\n");
1d9bc6d6 2693 free_mad = 1;
1da177e4
LT
2694 goto local_send_completion;
2695 }
2696
2697 /*
2698 * Defined behavior is to complete response
2699 * before request
2700 */
062dbb69 2701 build_smp_wc(recv_mad_agent->agent.qp,
d53e11fd 2702 local->mad_send_wr->send_wr.wr.wr_cqe,
97f52eb4 2703 be16_to_cpu(IB_LID_PERMISSIVE),
e622f2f4 2704 local->mad_send_wr->send_wr.pkey_index,
8e4349d1 2705 recv_mad_agent->agent.port_num, &wc);
1da177e4
LT
2706
2707 local->mad_priv->header.recv_wc.wc = &wc;
8e4349d1
IW
2708
2709 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2710 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2711 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2712 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2713 } else {
2714 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2715 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2716 }
2717
fa619a77
HR
2718 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2719 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2720 &local->mad_priv->header.recv_wc.rmpp_list);
1da177e4
LT
2721 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2722 local->mad_priv->header.recv_wc.recv_buf.mad =
c9082e51 2723 (struct ib_mad *)local->mad_priv->mad;
1da177e4
LT
2724 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2725 snoop_recv(recv_mad_agent->qp_info,
2726 &local->mad_priv->header.recv_wc,
2727 IB_MAD_SNOOP_RECVS);
2728 recv_mad_agent->agent.recv_handler(
2729 &recv_mad_agent->agent,
ca281265 2730 &local->mad_send_wr->send_buf,
1da177e4
LT
2731 &local->mad_priv->header.recv_wc);
2732 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2733 atomic_dec(&recv_mad_agent->refcount);
2734 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2735 }
2736
2737local_send_completion:
2738 /* Complete send */
2739 mad_send_wc.status = IB_WC_SUCCESS;
2740 mad_send_wc.vendor_err = 0;
34816ad9 2741 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
1da177e4 2742 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
34816ad9
SH
2743 snoop_send(mad_agent_priv->qp_info,
2744 &local->mad_send_wr->send_buf,
2745 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
1da177e4
LT
2746 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2747 &mad_send_wc);
2748
2749 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1da177e4 2750 atomic_dec(&mad_agent_priv->refcount);
1d9bc6d6 2751 if (free_mad)
c9082e51 2752 kfree(local->mad_priv);
1da177e4
LT
2753 kfree(local);
2754 }
2755 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2756}
2757
f75b7a52
HR
2758static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2759{
2760 int ret;
2761
4fc8cd49 2762 if (!mad_send_wr->retries_left)
f75b7a52
HR
2763 return -ETIMEDOUT;
2764
4fc8cd49
SH
2765 mad_send_wr->retries_left--;
2766 mad_send_wr->send_buf.retries++;
2767
34816ad9 2768 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
f75b7a52 2769
1471cb6c 2770 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
fa619a77
HR
2771 ret = ib_retry_rmpp(mad_send_wr);
2772 switch (ret) {
2773 case IB_RMPP_RESULT_UNHANDLED:
2774 ret = ib_send_mad(mad_send_wr);
2775 break;
2776 case IB_RMPP_RESULT_CONSUMED:
2777 ret = 0;
2778 break;
2779 default:
2780 ret = -ECOMM;
2781 break;
2782 }
2783 } else
2784 ret = ib_send_mad(mad_send_wr);
f75b7a52
HR
2785
2786 if (!ret) {
2787 mad_send_wr->refcount++;
f75b7a52
HR
2788 list_add_tail(&mad_send_wr->agent_list,
2789 &mad_send_wr->mad_agent_priv->send_list);
2790 }
2791 return ret;
2792}
2793
c4028958 2794static void timeout_sends(struct work_struct *work)
1da177e4
LT
2795{
2796 struct ib_mad_agent_private *mad_agent_priv;
2797 struct ib_mad_send_wr_private *mad_send_wr;
2798 struct ib_mad_send_wc mad_send_wc;
2799 unsigned long flags, delay;
2800
c4028958
DH
2801 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2802 timed_work.work);
1da177e4
LT
2803 mad_send_wc.vendor_err = 0;
2804
2805 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2806 while (!list_empty(&mad_agent_priv->wait_list)) {
2807 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2808 struct ib_mad_send_wr_private,
2809 agent_list);
2810
2811 if (time_after(mad_send_wr->timeout, jiffies)) {
2812 delay = mad_send_wr->timeout - jiffies;
2813 if ((long)delay <= 0)
2814 delay = 1;
2815 queue_delayed_work(mad_agent_priv->qp_info->
2816 port_priv->wq,
2817 &mad_agent_priv->timed_work, delay);
2818 break;
2819 }
2820
dbf9227b 2821 list_del(&mad_send_wr->agent_list);
29bb33dd
HR
2822 if (mad_send_wr->status == IB_WC_SUCCESS &&
2823 !retry_send(mad_send_wr))
f75b7a52
HR
2824 continue;
2825
1da177e4
LT
2826 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2827
03b61ad2
HR
2828 if (mad_send_wr->status == IB_WC_SUCCESS)
2829 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2830 else
2831 mad_send_wc.status = mad_send_wr->status;
34816ad9 2832 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1da177e4
LT
2833 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2834 &mad_send_wc);
2835
1da177e4
LT
2836 atomic_dec(&mad_agent_priv->refcount);
2837 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2838 }
2839 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2840}
2841
1da177e4
LT
2842/*
2843 * Allocate receive MADs and post receive WRs for them
2844 */
2845static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2846 struct ib_mad_private *mad)
2847{
2848 unsigned long flags;
2849 int post, ret;
2850 struct ib_mad_private *mad_priv;
2851 struct ib_sge sg_list;
2852 struct ib_recv_wr recv_wr, *bad_recv_wr;
2853 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2854
2855 /* Initialize common scatter list fields */
4be90bc6 2856 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
1da177e4
LT
2857
2858 /* Initialize common receive WR fields */
2859 recv_wr.next = NULL;
2860 recv_wr.sg_list = &sg_list;
2861 recv_wr.num_sge = 1;
2862
2863 do {
2864 /* Allocate and map receive buffer */
2865 if (mad) {
2866 mad_priv = mad;
2867 mad = NULL;
2868 } else {
c9082e51
IW
2869 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2870 GFP_ATOMIC);
1da177e4 2871 if (!mad_priv) {
7ef5d4b0
IW
2872 dev_err(&qp_info->port_priv->device->dev,
2873 "No memory for receive buffer\n");
1da177e4
LT
2874 ret = -ENOMEM;
2875 break;
2876 }
2877 }
c9082e51 2878 sg_list.length = mad_priv_dma_size(mad_priv);
1527106f
RC
2879 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2880 &mad_priv->grh,
c9082e51 2881 mad_priv_dma_size(mad_priv),
1527106f 2882 DMA_FROM_DEVICE);
2c34e68f
YB
2883 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2884 sg_list.addr))) {
2885 ret = -ENOMEM;
2886 break;
2887 }
1527106f 2888 mad_priv->header.mapping = sg_list.addr;
1da177e4 2889 mad_priv->header.mad_list.mad_queue = recv_queue;
d53e11fd
CH
2890 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2891 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
1da177e4
LT
2892
2893 /* Post receive WR */
2894 spin_lock_irqsave(&recv_queue->lock, flags);
2895 post = (++recv_queue->count < recv_queue->max_active);
2896 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2897 spin_unlock_irqrestore(&recv_queue->lock, flags);
2898 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2899 if (ret) {
2900 spin_lock_irqsave(&recv_queue->lock, flags);
2901 list_del(&mad_priv->header.mad_list.list);
2902 recv_queue->count--;
2903 spin_unlock_irqrestore(&recv_queue->lock, flags);
1527106f
RC
2904 ib_dma_unmap_single(qp_info->port_priv->device,
2905 mad_priv->header.mapping,
c9082e51 2906 mad_priv_dma_size(mad_priv),
1527106f 2907 DMA_FROM_DEVICE);
c9082e51 2908 kfree(mad_priv);
7ef5d4b0
IW
2909 dev_err(&qp_info->port_priv->device->dev,
2910 "ib_post_recv failed: %d\n", ret);
1da177e4
LT
2911 break;
2912 }
2913 } while (post);
2914
2915 return ret;
2916}
2917
2918/*
2919 * Return all the posted receive MADs
2920 */
2921static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2922{
2923 struct ib_mad_private_header *mad_priv_hdr;
2924 struct ib_mad_private *recv;
2925 struct ib_mad_list_head *mad_list;
2926
fac70d51
EC
2927 if (!qp_info->qp)
2928 return;
2929
1da177e4
LT
2930 while (!list_empty(&qp_info->recv_queue.list)) {
2931
2932 mad_list = list_entry(qp_info->recv_queue.list.next,
2933 struct ib_mad_list_head, list);
2934 mad_priv_hdr = container_of(mad_list,
2935 struct ib_mad_private_header,
2936 mad_list);
2937 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2938 header);
2939
2940 /* Remove from posted receive MAD list */
2941 list_del(&mad_list->list);
2942
1527106f
RC
2943 ib_dma_unmap_single(qp_info->port_priv->device,
2944 recv->header.mapping,
c9082e51 2945 mad_priv_dma_size(recv),
1527106f 2946 DMA_FROM_DEVICE);
c9082e51 2947 kfree(recv);
1da177e4
LT
2948 }
2949
2950 qp_info->recv_queue.count = 0;
2951}
2952
2953/*
2954 * Start the port
2955 */
2956static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2957{
2958 int ret, i;
2959 struct ib_qp_attr *attr;
2960 struct ib_qp *qp;
ef5ed416 2961 u16 pkey_index;
1da177e4
LT
2962
2963 attr = kmalloc(sizeof *attr, GFP_KERNEL);
3cd96564 2964 if (!attr) {
7ef5d4b0
IW
2965 dev_err(&port_priv->device->dev,
2966 "Couldn't kmalloc ib_qp_attr\n");
1da177e4
LT
2967 return -ENOMEM;
2968 }
2969
ef5ed416
JM
2970 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2971 IB_DEFAULT_PKEY_FULL, &pkey_index);
2972 if (ret)
2973 pkey_index = 0;
2974
1da177e4
LT
2975 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2976 qp = port_priv->qp_info[i].qp;
fac70d51
EC
2977 if (!qp)
2978 continue;
2979
1da177e4
LT
2980 /*
2981 * PKey index for QP1 is irrelevant but
2982 * one is needed for the Reset to Init transition
2983 */
2984 attr->qp_state = IB_QPS_INIT;
ef5ed416 2985 attr->pkey_index = pkey_index;
1da177e4
LT
2986 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2987 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2988 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2989 if (ret) {
7ef5d4b0
IW
2990 dev_err(&port_priv->device->dev,
2991 "Couldn't change QP%d state to INIT: %d\n",
2992 i, ret);
1da177e4
LT
2993 goto out;
2994 }
2995
2996 attr->qp_state = IB_QPS_RTR;
2997 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2998 if (ret) {
7ef5d4b0
IW
2999 dev_err(&port_priv->device->dev,
3000 "Couldn't change QP%d state to RTR: %d\n",
3001 i, ret);
1da177e4
LT
3002 goto out;
3003 }
3004
3005 attr->qp_state = IB_QPS_RTS;
3006 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3007 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3008 if (ret) {
7ef5d4b0
IW
3009 dev_err(&port_priv->device->dev,
3010 "Couldn't change QP%d state to RTS: %d\n",
3011 i, ret);
1da177e4
LT
3012 goto out;
3013 }
3014 }
3015
3016 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3017 if (ret) {
7ef5d4b0
IW
3018 dev_err(&port_priv->device->dev,
3019 "Failed to request completion notification: %d\n",
3020 ret);
1da177e4
LT
3021 goto out;
3022 }
3023
3024 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
fac70d51
EC
3025 if (!port_priv->qp_info[i].qp)
3026 continue;
3027
1da177e4
LT
3028 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3029 if (ret) {
7ef5d4b0
IW
3030 dev_err(&port_priv->device->dev,
3031 "Couldn't post receive WRs\n");
1da177e4
LT
3032 goto out;
3033 }
3034 }
3035out:
3036 kfree(attr);
3037 return ret;
3038}
3039
3040static void qp_event_handler(struct ib_event *event, void *qp_context)
3041{
3042 struct ib_mad_qp_info *qp_info = qp_context;
3043
3044 /* It's worse than that! He's dead, Jim! */
7ef5d4b0
IW
3045 dev_err(&qp_info->port_priv->device->dev,
3046 "Fatal error (%d) on MAD QP (%d)\n",
1da177e4
LT
3047 event->event, qp_info->qp->qp_num);
3048}
3049
3050static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3051 struct ib_mad_queue *mad_queue)
3052{
3053 mad_queue->qp_info = qp_info;
3054 mad_queue->count = 0;
3055 spin_lock_init(&mad_queue->lock);
3056 INIT_LIST_HEAD(&mad_queue->list);
3057}
3058
3059static void init_mad_qp(struct ib_mad_port_private *port_priv,
3060 struct ib_mad_qp_info *qp_info)
3061{
3062 qp_info->port_priv = port_priv;
3063 init_mad_queue(qp_info, &qp_info->send_queue);
3064 init_mad_queue(qp_info, &qp_info->recv_queue);
3065 INIT_LIST_HEAD(&qp_info->overflow_list);
3066 spin_lock_init(&qp_info->snoop_lock);
3067 qp_info->snoop_table = NULL;
3068 qp_info->snoop_table_size = 0;
3069 atomic_set(&qp_info->snoop_count, 0);
3070}
3071
3072static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3073 enum ib_qp_type qp_type)
3074{
3075 struct ib_qp_init_attr qp_init_attr;
3076 int ret;
3077
3078 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3079 qp_init_attr.send_cq = qp_info->port_priv->cq;
3080 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3081 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
b76aabc3
HR
3082 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3083 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
1da177e4
LT
3084 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3085 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3086 qp_init_attr.qp_type = qp_type;
3087 qp_init_attr.port_num = qp_info->port_priv->port_num;
3088 qp_init_attr.qp_context = qp_info;
3089 qp_init_attr.event_handler = qp_event_handler;
3090 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3091 if (IS_ERR(qp_info->qp)) {
7ef5d4b0
IW
3092 dev_err(&qp_info->port_priv->device->dev,
3093 "Couldn't create ib_mad QP%d\n",
3094 get_spl_qp_index(qp_type));
1da177e4
LT
3095 ret = PTR_ERR(qp_info->qp);
3096 goto error;
3097 }
3098 /* Use minimum queue sizes unless the CQ is resized */
b76aabc3
HR
3099 qp_info->send_queue.max_active = mad_sendq_size;
3100 qp_info->recv_queue.max_active = mad_recvq_size;
1da177e4
LT
3101 return 0;
3102
3103error:
3104 return ret;
3105}
3106
3107static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3108{
fac70d51
EC
3109 if (!qp_info->qp)
3110 return;
3111
1da177e4 3112 ib_destroy_qp(qp_info->qp);
6044ec88 3113 kfree(qp_info->snoop_table);
1da177e4
LT
3114}
3115
3116/*
3117 * Open the port
3118 * Create the QP, PD, MR, and CQ if needed
3119 */
3120static int ib_mad_port_open(struct ib_device *device,
3121 int port_num)
3122{
3123 int ret, cq_size;
3124 struct ib_mad_port_private *port_priv;
3125 unsigned long flags;
3126 char name[sizeof "ib_mad123"];
fac70d51 3127 int has_smi;
1da177e4 3128
337877a4
IW
3129 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3130 return -EFAULT;
3131
548ead17
IW
3132 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3133 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3134 return -EFAULT;
3135
1da177e4 3136 /* Create new device info */
de6eb66b 3137 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
1da177e4 3138 if (!port_priv) {
7ef5d4b0 3139 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
1da177e4
LT
3140 return -ENOMEM;
3141 }
de6eb66b 3142
1da177e4
LT
3143 port_priv->device = device;
3144 port_priv->port_num = port_num;
3145 spin_lock_init(&port_priv->reg_lock);
3146 INIT_LIST_HEAD(&port_priv->agent_list);
3147 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3148 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3149
fac70d51 3150 cq_size = mad_sendq_size + mad_recvq_size;
29541e3a 3151 has_smi = rdma_cap_ib_smi(device, port_num);
fac70d51
EC
3152 if (has_smi)
3153 cq_size *= 2;
3154
d53e11fd
CH
3155 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3156 IB_POLL_WORKQUEUE);
1da177e4 3157 if (IS_ERR(port_priv->cq)) {
7ef5d4b0 3158 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
1da177e4
LT
3159 ret = PTR_ERR(port_priv->cq);
3160 goto error3;
3161 }
3162
3163 port_priv->pd = ib_alloc_pd(device);
3164 if (IS_ERR(port_priv->pd)) {
7ef5d4b0 3165 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
1da177e4
LT
3166 ret = PTR_ERR(port_priv->pd);
3167 goto error4;
3168 }
3169
fac70d51
EC
3170 if (has_smi) {
3171 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3172 if (ret)
3173 goto error6;
3174 }
1da177e4
LT
3175 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3176 if (ret)
3177 goto error7;
3178
3179 snprintf(name, sizeof name, "ib_mad%d", port_num);
3180 port_priv->wq = create_singlethread_workqueue(name);
3181 if (!port_priv->wq) {
3182 ret = -ENOMEM;
3183 goto error8;
3184 }
1da177e4 3185
dc05980d
MT
3186 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3187 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3188 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3189
1da177e4
LT
3190 ret = ib_mad_port_start(port_priv);
3191 if (ret) {
7ef5d4b0 3192 dev_err(&device->dev, "Couldn't start port\n");
1da177e4
LT
3193 goto error9;
3194 }
3195
1da177e4
LT
3196 return 0;
3197
3198error9:
dc05980d
MT
3199 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3200 list_del_init(&port_priv->port_list);
3201 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3202
1da177e4
LT
3203 destroy_workqueue(port_priv->wq);
3204error8:
3205 destroy_mad_qp(&port_priv->qp_info[1]);
3206error7:
3207 destroy_mad_qp(&port_priv->qp_info[0]);
3208error6:
1da177e4
LT
3209 ib_dealloc_pd(port_priv->pd);
3210error4:
d53e11fd 3211 ib_free_cq(port_priv->cq);
1da177e4
LT
3212 cleanup_recv_queue(&port_priv->qp_info[1]);
3213 cleanup_recv_queue(&port_priv->qp_info[0]);
3214error3:
3215 kfree(port_priv);
3216
3217 return ret;
3218}
3219
3220/*
3221 * Close the port
3222 * If there are no classes using the port, free the port
3223 * resources (CQ, MR, PD, QP) and remove the port's info structure
3224 */
3225static int ib_mad_port_close(struct ib_device *device, int port_num)
3226{
3227 struct ib_mad_port_private *port_priv;
3228 unsigned long flags;
3229
3230 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3231 port_priv = __ib_get_mad_port(device, port_num);
3232 if (port_priv == NULL) {
3233 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
7ef5d4b0 3234 dev_err(&device->dev, "Port %d not found\n", port_num);
1da177e4
LT
3235 return -ENODEV;
3236 }
dc05980d 3237 list_del_init(&port_priv->port_list);
1da177e4
LT
3238 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3239
1da177e4
LT
3240 destroy_workqueue(port_priv->wq);
3241 destroy_mad_qp(&port_priv->qp_info[1]);
3242 destroy_mad_qp(&port_priv->qp_info[0]);
1da177e4 3243 ib_dealloc_pd(port_priv->pd);
d53e11fd 3244 ib_free_cq(port_priv->cq);
1da177e4
LT
3245 cleanup_recv_queue(&port_priv->qp_info[1]);
3246 cleanup_recv_queue(&port_priv->qp_info[0]);
3247 /* XXX: Handle deallocation of MAD registration tables */
3248
3249 kfree(port_priv);
3250
3251 return 0;
3252}
3253
3254static void ib_mad_init_device(struct ib_device *device)
3255{
4139032b 3256 int start, i;
1da177e4 3257
4139032b 3258 start = rdma_start_port(device);
4ab6fb7e 3259
4139032b 3260 for (i = start; i <= rdma_end_port(device); i++) {
c757dea8 3261 if (!rdma_cap_ib_mad(device, i))
827f2a8b
MW
3262 continue;
3263
4ab6fb7e 3264 if (ib_mad_port_open(device, i)) {
7ef5d4b0 3265 dev_err(&device->dev, "Couldn't open port %d\n", i);
4ab6fb7e 3266 goto error;
1da177e4 3267 }
4ab6fb7e 3268 if (ib_agent_port_open(device, i)) {
7ef5d4b0
IW
3269 dev_err(&device->dev,
3270 "Couldn't open port %d for agents\n", i);
4ab6fb7e 3271 goto error_agent;
1da177e4
LT
3272 }
3273 }
f68bcc2d 3274 return;
1da177e4 3275
4ab6fb7e
RD
3276error_agent:
3277 if (ib_mad_port_close(device, i))
7ef5d4b0 3278 dev_err(&device->dev, "Couldn't close port %d\n", i);
4ab6fb7e
RD
3279
3280error:
827f2a8b 3281 while (--i >= start) {
c757dea8 3282 if (!rdma_cap_ib_mad(device, i))
827f2a8b 3283 continue;
4ab6fb7e 3284
4ab6fb7e 3285 if (ib_agent_port_close(device, i))
7ef5d4b0
IW
3286 dev_err(&device->dev,
3287 "Couldn't close port %d for agents\n", i);
4ab6fb7e 3288 if (ib_mad_port_close(device, i))
7ef5d4b0 3289 dev_err(&device->dev, "Couldn't close port %d\n", i);
1da177e4 3290 }
1da177e4
LT
3291}
3292
7c1eb45a 3293static void ib_mad_remove_device(struct ib_device *device, void *client_data)
1da177e4 3294{
4139032b 3295 int i;
827f2a8b 3296
4139032b 3297 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
c757dea8 3298 if (!rdma_cap_ib_mad(device, i))
827f2a8b
MW
3299 continue;
3300
3301 if (ib_agent_port_close(device, i))
7ef5d4b0 3302 dev_err(&device->dev,
827f2a8b
MW
3303 "Couldn't close port %d for agents\n", i);
3304 if (ib_mad_port_close(device, i))
3305 dev_err(&device->dev, "Couldn't close port %d\n", i);
1da177e4
LT
3306 }
3307}
3308
3309static struct ib_client mad_client = {
3310 .name = "mad",
3311 .add = ib_mad_init_device,
3312 .remove = ib_mad_remove_device
3313};
3314
4c2cb422 3315int ib_mad_init(void)
1da177e4 3316{
b76aabc3
HR
3317 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3318 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3319
3320 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3321 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3322
1da177e4
LT
3323 INIT_LIST_HEAD(&ib_mad_port_list);
3324
3325 if (ib_register_client(&mad_client)) {
7ef5d4b0 3326 pr_err("Couldn't register ib_mad client\n");
c9082e51 3327 return -EINVAL;
1da177e4
LT
3328 }
3329
3330 return 0;
1da177e4
LT
3331}
3332
4c2cb422 3333void ib_mad_cleanup(void)
1da177e4
LT
3334{
3335 ib_unregister_client(&mad_client);
1da177e4 3336}