IB/mad: Update module to [pr|dev]_* style print messages
[linux-2.6-block.git] / drivers / infiniband / core / mad.c
CommitLineData
1da177e4 1/*
de493d47 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
fa619a77
HR
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
b76aabc3 5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
1da177e4
LT
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
1da177e4 35 */
7ef5d4b0
IW
36
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
1da177e4 39#include <linux/dma-mapping.h>
5a0e3ad6 40#include <linux/slab.h>
e4dd23d7 41#include <linux/module.h>
9874e746 42#include <rdma/ib_cache.h>
1da177e4
LT
43
44#include "mad_priv.h"
fa619a77 45#include "mad_rmpp.h"
1da177e4
LT
46#include "smi.h"
47#include "agent.h"
48
49MODULE_LICENSE("Dual BSD/GPL");
50MODULE_DESCRIPTION("kernel IB MAD API");
51MODULE_AUTHOR("Hal Rosenstock");
52MODULE_AUTHOR("Sean Hefty");
53
16933955
RD
54static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
55static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
b76aabc3
HR
56
57module_param_named(send_queue_size, mad_sendq_size, int, 0444);
58MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
59module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
60MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
61
e54f8188 62static struct kmem_cache *ib_mad_cache;
fa619a77 63
1da177e4
LT
64static struct list_head ib_mad_port_list;
65static u32 ib_mad_client_id = 0;
66
67/* Port list lock */
6276e08a 68static DEFINE_SPINLOCK(ib_mad_port_list_lock);
1da177e4
LT
69
70/* Forward declarations */
71static int method_in_use(struct ib_mad_mgmt_method_table **method,
72 struct ib_mad_reg_req *mad_reg_req);
73static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
74static struct ib_mad_agent_private *find_mad_agent(
75 struct ib_mad_port_private *port_priv,
4a0754fa 76 struct ib_mad *mad);
1da177e4
LT
77static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
78 struct ib_mad_private *mad);
79static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
c4028958
DH
80static void timeout_sends(struct work_struct *work);
81static void local_completions(struct work_struct *work);
1da177e4
LT
82static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
83 struct ib_mad_agent_private *agent_priv,
84 u8 mgmt_class);
85static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86 struct ib_mad_agent_private *agent_priv);
87
88/*
89 * Returns a ib_mad_port_private structure or NULL for a device/port
90 * Assumes ib_mad_port_list_lock is being held
91 */
92static inline struct ib_mad_port_private *
93__ib_get_mad_port(struct ib_device *device, int port_num)
94{
95 struct ib_mad_port_private *entry;
96
97 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
98 if (entry->device == device && entry->port_num == port_num)
99 return entry;
100 }
101 return NULL;
102}
103
104/*
105 * Wrapper function to return a ib_mad_port_private structure or NULL
106 * for a device/port
107 */
108static inline struct ib_mad_port_private *
109ib_get_mad_port(struct ib_device *device, int port_num)
110{
111 struct ib_mad_port_private *entry;
112 unsigned long flags;
113
114 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
115 entry = __ib_get_mad_port(device, port_num);
116 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
117
118 return entry;
119}
120
121static inline u8 convert_mgmt_class(u8 mgmt_class)
122{
123 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
124 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
125 0 : mgmt_class;
126}
127
128static int get_spl_qp_index(enum ib_qp_type qp_type)
129{
130 switch (qp_type)
131 {
132 case IB_QPT_SMI:
133 return 0;
134 case IB_QPT_GSI:
135 return 1;
136 default:
137 return -1;
138 }
139}
140
141static int vendor_class_index(u8 mgmt_class)
142{
143 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
144}
145
146static int is_vendor_class(u8 mgmt_class)
147{
148 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
149 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
150 return 0;
151 return 1;
152}
153
154static int is_vendor_oui(char *oui)
155{
156 if (oui[0] || oui[1] || oui[2])
157 return 1;
158 return 0;
159}
160
161static int is_vendor_method_in_use(
162 struct ib_mad_mgmt_vendor_class *vendor_class,
163 struct ib_mad_reg_req *mad_reg_req)
164{
165 struct ib_mad_mgmt_method_table *method;
166 int i;
167
168 for (i = 0; i < MAX_MGMT_OUI; i++) {
169 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
170 method = vendor_class->method_table[i];
171 if (method) {
172 if (method_in_use(&method, mad_reg_req))
173 return 1;
174 else
175 break;
176 }
177 }
178 }
179 return 0;
180}
181
2527e681
SH
182int ib_response_mad(struct ib_mad *mad)
183{
184 return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
185 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
186 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
187 (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
188}
189EXPORT_SYMBOL(ib_response_mad);
190
1da177e4
LT
191/*
192 * ib_register_mad_agent - Register to send/receive MADs
193 */
194struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
195 u8 port_num,
196 enum ib_qp_type qp_type,
197 struct ib_mad_reg_req *mad_reg_req,
198 u8 rmpp_version,
199 ib_mad_send_handler send_handler,
200 ib_mad_recv_handler recv_handler,
201 void *context)
202{
203 struct ib_mad_port_private *port_priv;
204 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
205 struct ib_mad_agent_private *mad_agent_priv;
206 struct ib_mad_reg_req *reg_req = NULL;
207 struct ib_mad_mgmt_class_table *class;
208 struct ib_mad_mgmt_vendor_class_table *vendor;
209 struct ib_mad_mgmt_vendor_class *vendor_class;
210 struct ib_mad_mgmt_method_table *method;
211 int ret2, qpn;
212 unsigned long flags;
213 u8 mgmt_class, vclass;
214
215 /* Validate parameters */
216 qpn = get_spl_qp_index(qp_type);
217 if (qpn == -1)
218 goto error1;
219
fa619a77
HR
220 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
221 goto error1;
1da177e4
LT
222
223 /* Validate MAD registration request if supplied */
224 if (mad_reg_req) {
225 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
226 goto error1;
227 if (!recv_handler)
228 goto error1;
229 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
230 /*
231 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
232 * one in this range currently allowed
233 */
234 if (mad_reg_req->mgmt_class !=
235 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
236 goto error1;
237 } else if (mad_reg_req->mgmt_class == 0) {
238 /*
239 * Class 0 is reserved in IBA and is used for
240 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
241 */
242 goto error1;
243 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
244 /*
245 * If class is in "new" vendor range,
246 * ensure supplied OUI is not zero
247 */
248 if (!is_vendor_oui(mad_reg_req->oui))
249 goto error1;
250 }
618a3c03 251 /* Make sure class supplied is consistent with RMPP */
64cb9c6a 252 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
618a3c03
HR
253 if (rmpp_version)
254 goto error1;
255 }
1da177e4
LT
256 /* Make sure class supplied is consistent with QP type */
257 if (qp_type == IB_QPT_SMI) {
258 if ((mad_reg_req->mgmt_class !=
259 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
260 (mad_reg_req->mgmt_class !=
261 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
262 goto error1;
263 } else {
264 if ((mad_reg_req->mgmt_class ==
265 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
266 (mad_reg_req->mgmt_class ==
267 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
268 goto error1;
269 }
270 } else {
271 /* No registration request supplied */
272 if (!send_handler)
273 goto error1;
274 }
275
276 /* Validate device and port */
277 port_priv = ib_get_mad_port(device, port_num);
278 if (!port_priv) {
279 ret = ERR_PTR(-ENODEV);
280 goto error1;
281 }
282
c8367c4c
IW
283 /* Verify the QP requested is supported. For example, Ethernet devices
284 * will not have QP0 */
285 if (!port_priv->qp_info[qpn].qp) {
286 ret = ERR_PTR(-EPROTONOSUPPORT);
287 goto error1;
288 }
289
1da177e4 290 /* Allocate structures */
de6eb66b 291 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
1da177e4
LT
292 if (!mad_agent_priv) {
293 ret = ERR_PTR(-ENOMEM);
294 goto error1;
295 }
b82cab6b
HR
296
297 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
298 IB_ACCESS_LOCAL_WRITE);
299 if (IS_ERR(mad_agent_priv->agent.mr)) {
300 ret = ERR_PTR(-ENOMEM);
301 goto error2;
302 }
1da177e4
LT
303
304 if (mad_reg_req) {
9893e742 305 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
1da177e4
LT
306 if (!reg_req) {
307 ret = ERR_PTR(-ENOMEM);
b82cab6b 308 goto error3;
1da177e4 309 }
1da177e4
LT
310 }
311
312 /* Now, fill in the various structures */
1da177e4
LT
313 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
314 mad_agent_priv->reg_req = reg_req;
fa619a77 315 mad_agent_priv->agent.rmpp_version = rmpp_version;
1da177e4
LT
316 mad_agent_priv->agent.device = device;
317 mad_agent_priv->agent.recv_handler = recv_handler;
318 mad_agent_priv->agent.send_handler = send_handler;
319 mad_agent_priv->agent.context = context;
320 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
321 mad_agent_priv->agent.port_num = port_num;
d9620a4c
RC
322 spin_lock_init(&mad_agent_priv->lock);
323 INIT_LIST_HEAD(&mad_agent_priv->send_list);
324 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
325 INIT_LIST_HEAD(&mad_agent_priv->done_list);
326 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
327 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
328 INIT_LIST_HEAD(&mad_agent_priv->local_list);
329 INIT_WORK(&mad_agent_priv->local_work, local_completions);
330 atomic_set(&mad_agent_priv->refcount, 1);
331 init_completion(&mad_agent_priv->comp);
1da177e4
LT
332
333 spin_lock_irqsave(&port_priv->reg_lock, flags);
334 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
335
336 /*
337 * Make sure MAD registration (if supplied)
338 * is non overlapping with any existing ones
339 */
340 if (mad_reg_req) {
341 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
342 if (!is_vendor_class(mgmt_class)) {
343 class = port_priv->version[mad_reg_req->
344 mgmt_class_version].class;
345 if (class) {
346 method = class->method_table[mgmt_class];
347 if (method) {
348 if (method_in_use(&method,
349 mad_reg_req))
b82cab6b 350 goto error4;
1da177e4
LT
351 }
352 }
353 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
354 mgmt_class);
355 } else {
356 /* "New" vendor class range */
357 vendor = port_priv->version[mad_reg_req->
358 mgmt_class_version].vendor;
359 if (vendor) {
360 vclass = vendor_class_index(mgmt_class);
361 vendor_class = vendor->vendor_class[vclass];
362 if (vendor_class) {
363 if (is_vendor_method_in_use(
364 vendor_class,
365 mad_reg_req))
b82cab6b 366 goto error4;
1da177e4
LT
367 }
368 }
369 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
370 }
371 if (ret2) {
372 ret = ERR_PTR(ret2);
b82cab6b 373 goto error4;
1da177e4
LT
374 }
375 }
376
377 /* Add mad agent into port's agent list */
378 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
379 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
380
1da177e4
LT
381 return &mad_agent_priv->agent;
382
b82cab6b 383error4:
1da177e4
LT
384 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
385 kfree(reg_req);
b82cab6b 386error3:
b82cab6b 387 ib_dereg_mr(mad_agent_priv->agent.mr);
2012a116
AB
388error2:
389 kfree(mad_agent_priv);
1da177e4
LT
390error1:
391 return ret;
392}
393EXPORT_SYMBOL(ib_register_mad_agent);
394
395static inline int is_snooping_sends(int mad_snoop_flags)
396{
397 return (mad_snoop_flags &
398 (/*IB_MAD_SNOOP_POSTED_SENDS |
399 IB_MAD_SNOOP_RMPP_SENDS |*/
400 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
401 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
402}
403
404static inline int is_snooping_recvs(int mad_snoop_flags)
405{
406 return (mad_snoop_flags &
407 (IB_MAD_SNOOP_RECVS /*|
408 IB_MAD_SNOOP_RMPP_RECVS*/));
409}
410
411static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
412 struct ib_mad_snoop_private *mad_snoop_priv)
413{
414 struct ib_mad_snoop_private **new_snoop_table;
415 unsigned long flags;
416 int i;
417
418 spin_lock_irqsave(&qp_info->snoop_lock, flags);
419 /* Check for empty slot in array. */
420 for (i = 0; i < qp_info->snoop_table_size; i++)
421 if (!qp_info->snoop_table[i])
422 break;
423
424 if (i == qp_info->snoop_table_size) {
425 /* Grow table. */
52805174
RD
426 new_snoop_table = krealloc(qp_info->snoop_table,
427 sizeof mad_snoop_priv *
428 (qp_info->snoop_table_size + 1),
429 GFP_ATOMIC);
1da177e4
LT
430 if (!new_snoop_table) {
431 i = -ENOMEM;
432 goto out;
433 }
52805174 434
1da177e4
LT
435 qp_info->snoop_table = new_snoop_table;
436 qp_info->snoop_table_size++;
437 }
438 qp_info->snoop_table[i] = mad_snoop_priv;
439 atomic_inc(&qp_info->snoop_count);
440out:
441 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
442 return i;
443}
444
445struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
446 u8 port_num,
447 enum ib_qp_type qp_type,
448 int mad_snoop_flags,
449 ib_mad_snoop_handler snoop_handler,
450 ib_mad_recv_handler recv_handler,
451 void *context)
452{
453 struct ib_mad_port_private *port_priv;
454 struct ib_mad_agent *ret;
455 struct ib_mad_snoop_private *mad_snoop_priv;
456 int qpn;
457
458 /* Validate parameters */
459 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
460 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
461 ret = ERR_PTR(-EINVAL);
462 goto error1;
463 }
464 qpn = get_spl_qp_index(qp_type);
465 if (qpn == -1) {
466 ret = ERR_PTR(-EINVAL);
467 goto error1;
468 }
469 port_priv = ib_get_mad_port(device, port_num);
470 if (!port_priv) {
471 ret = ERR_PTR(-ENODEV);
472 goto error1;
473 }
474 /* Allocate structures */
de6eb66b 475 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
1da177e4
LT
476 if (!mad_snoop_priv) {
477 ret = ERR_PTR(-ENOMEM);
478 goto error1;
479 }
480
481 /* Now, fill in the various structures */
1da177e4
LT
482 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
483 mad_snoop_priv->agent.device = device;
484 mad_snoop_priv->agent.recv_handler = recv_handler;
485 mad_snoop_priv->agent.snoop_handler = snoop_handler;
486 mad_snoop_priv->agent.context = context;
487 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
488 mad_snoop_priv->agent.port_num = port_num;
489 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
1b52fa98 490 init_completion(&mad_snoop_priv->comp);
1da177e4
LT
491 mad_snoop_priv->snoop_index = register_snoop_agent(
492 &port_priv->qp_info[qpn],
493 mad_snoop_priv);
494 if (mad_snoop_priv->snoop_index < 0) {
495 ret = ERR_PTR(mad_snoop_priv->snoop_index);
496 goto error2;
497 }
498
499 atomic_set(&mad_snoop_priv->refcount, 1);
500 return &mad_snoop_priv->agent;
501
502error2:
503 kfree(mad_snoop_priv);
504error1:
505 return ret;
506}
507EXPORT_SYMBOL(ib_register_mad_snoop);
508
1b52fa98
SH
509static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
510{
511 if (atomic_dec_and_test(&mad_agent_priv->refcount))
512 complete(&mad_agent_priv->comp);
513}
514
515static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
516{
517 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
518 complete(&mad_snoop_priv->comp);
519}
520
1da177e4
LT
521static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
522{
523 struct ib_mad_port_private *port_priv;
524 unsigned long flags;
525
526 /* Note that we could still be handling received MADs */
527
528 /*
529 * Canceling all sends results in dropping received response
530 * MADs, preventing us from queuing additional work
531 */
532 cancel_mads(mad_agent_priv);
1da177e4 533 port_priv = mad_agent_priv->qp_info->port_priv;
1da177e4 534 cancel_delayed_work(&mad_agent_priv->timed_work);
1da177e4
LT
535
536 spin_lock_irqsave(&port_priv->reg_lock, flags);
537 remove_mad_reg_req(mad_agent_priv);
538 list_del(&mad_agent_priv->agent_list);
539 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
540
b82cab6b 541 flush_workqueue(port_priv->wq);
fa619a77 542 ib_cancel_rmpp_recvs(mad_agent_priv);
1da177e4 543
1b52fa98
SH
544 deref_mad_agent(mad_agent_priv);
545 wait_for_completion(&mad_agent_priv->comp);
1da177e4 546
6044ec88 547 kfree(mad_agent_priv->reg_req);
b82cab6b 548 ib_dereg_mr(mad_agent_priv->agent.mr);
1da177e4
LT
549 kfree(mad_agent_priv);
550}
551
552static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
553{
554 struct ib_mad_qp_info *qp_info;
555 unsigned long flags;
556
557 qp_info = mad_snoop_priv->qp_info;
558 spin_lock_irqsave(&qp_info->snoop_lock, flags);
559 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
560 atomic_dec(&qp_info->snoop_count);
561 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
562
1b52fa98
SH
563 deref_snoop_agent(mad_snoop_priv);
564 wait_for_completion(&mad_snoop_priv->comp);
1da177e4
LT
565
566 kfree(mad_snoop_priv);
567}
568
569/*
570 * ib_unregister_mad_agent - Unregisters a client from using MAD services
571 */
572int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
573{
574 struct ib_mad_agent_private *mad_agent_priv;
575 struct ib_mad_snoop_private *mad_snoop_priv;
576
577 /* If the TID is zero, the agent can only snoop. */
578 if (mad_agent->hi_tid) {
579 mad_agent_priv = container_of(mad_agent,
580 struct ib_mad_agent_private,
581 agent);
582 unregister_mad_agent(mad_agent_priv);
583 } else {
584 mad_snoop_priv = container_of(mad_agent,
585 struct ib_mad_snoop_private,
586 agent);
587 unregister_mad_snoop(mad_snoop_priv);
588 }
589 return 0;
590}
591EXPORT_SYMBOL(ib_unregister_mad_agent);
592
593static void dequeue_mad(struct ib_mad_list_head *mad_list)
594{
595 struct ib_mad_queue *mad_queue;
596 unsigned long flags;
597
598 BUG_ON(!mad_list->mad_queue);
599 mad_queue = mad_list->mad_queue;
600 spin_lock_irqsave(&mad_queue->lock, flags);
601 list_del(&mad_list->list);
602 mad_queue->count--;
603 spin_unlock_irqrestore(&mad_queue->lock, flags);
604}
605
606static void snoop_send(struct ib_mad_qp_info *qp_info,
34816ad9 607 struct ib_mad_send_buf *send_buf,
1da177e4
LT
608 struct ib_mad_send_wc *mad_send_wc,
609 int mad_snoop_flags)
610{
611 struct ib_mad_snoop_private *mad_snoop_priv;
612 unsigned long flags;
613 int i;
614
615 spin_lock_irqsave(&qp_info->snoop_lock, flags);
616 for (i = 0; i < qp_info->snoop_table_size; i++) {
617 mad_snoop_priv = qp_info->snoop_table[i];
618 if (!mad_snoop_priv ||
619 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
620 continue;
621
622 atomic_inc(&mad_snoop_priv->refcount);
623 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
624 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
34816ad9 625 send_buf, mad_send_wc);
1b52fa98 626 deref_snoop_agent(mad_snoop_priv);
1da177e4
LT
627 spin_lock_irqsave(&qp_info->snoop_lock, flags);
628 }
629 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
630}
631
632static void snoop_recv(struct ib_mad_qp_info *qp_info,
633 struct ib_mad_recv_wc *mad_recv_wc,
634 int mad_snoop_flags)
635{
636 struct ib_mad_snoop_private *mad_snoop_priv;
637 unsigned long flags;
638 int i;
639
640 spin_lock_irqsave(&qp_info->snoop_lock, flags);
641 for (i = 0; i < qp_info->snoop_table_size; i++) {
642 mad_snoop_priv = qp_info->snoop_table[i];
643 if (!mad_snoop_priv ||
644 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
645 continue;
646
647 atomic_inc(&mad_snoop_priv->refcount);
648 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
649 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
650 mad_recv_wc);
1b52fa98 651 deref_snoop_agent(mad_snoop_priv);
1da177e4
LT
652 spin_lock_irqsave(&qp_info->snoop_lock, flags);
653 }
654 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
655}
656
062dbb69
MT
657static void build_smp_wc(struct ib_qp *qp,
658 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
1da177e4
LT
659 struct ib_wc *wc)
660{
661 memset(wc, 0, sizeof *wc);
662 wc->wr_id = wr_id;
663 wc->status = IB_WC_SUCCESS;
664 wc->opcode = IB_WC_RECV;
665 wc->pkey_index = pkey_index;
666 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
667 wc->src_qp = IB_QP0;
062dbb69 668 wc->qp = qp;
1da177e4
LT
669 wc->slid = slid;
670 wc->sl = 0;
671 wc->dlid_path_bits = 0;
672 wc->port_num = port_num;
673}
674
675/*
676 * Return 0 if SMP is to be sent
677 * Return 1 if SMP was consumed locally (whether or not solicited)
678 * Return < 0 if error
679 */
680static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
34816ad9 681 struct ib_mad_send_wr_private *mad_send_wr)
1da177e4 682{
de493d47 683 int ret = 0;
34816ad9 684 struct ib_smp *smp = mad_send_wr->send_buf.mad;
1da177e4
LT
685 unsigned long flags;
686 struct ib_mad_local_private *local;
687 struct ib_mad_private *mad_priv;
688 struct ib_mad_port_private *port_priv;
689 struct ib_mad_agent_private *recv_mad_agent = NULL;
690 struct ib_device *device = mad_agent_priv->agent.device;
1bae4dbf 691 u8 port_num;
1da177e4 692 struct ib_wc mad_wc;
34816ad9 693 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
1da177e4 694
1bae4dbf
HR
695 if (device->node_type == RDMA_NODE_IB_SWITCH &&
696 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
697 port_num = send_wr->wr.ud.port_num;
698 else
699 port_num = mad_agent_priv->agent.port_num;
700
8cf3f04f
RC
701 /*
702 * Directed route handling starts if the initial LID routed part of
703 * a request or the ending LID routed part of a response is empty.
704 * If we are at the start of the LID routed part, don't update the
705 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
706 */
707 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
708 IB_LID_PERMISSIVE &&
de493d47
HR
709 smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
710 IB_SMI_DISCARD) {
1da177e4 711 ret = -EINVAL;
7ef5d4b0 712 dev_err(&device->dev, "Invalid directed route\n");
1da177e4
LT
713 goto out;
714 }
de493d47 715
1da177e4 716 /* Check to post send on QP or process locally */
727792da
SW
717 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
718 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
1da177e4
LT
719 goto out;
720
721 local = kmalloc(sizeof *local, GFP_ATOMIC);
722 if (!local) {
723 ret = -ENOMEM;
7ef5d4b0 724 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
1da177e4
LT
725 goto out;
726 }
727 local->mad_priv = NULL;
728 local->recv_mad_agent = NULL;
729 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
730 if (!mad_priv) {
731 ret = -ENOMEM;
7ef5d4b0 732 dev_err(&device->dev, "No memory for local response MAD\n");
1da177e4
LT
733 kfree(local);
734 goto out;
735 }
736
062dbb69
MT
737 build_smp_wc(mad_agent_priv->agent.qp,
738 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
97f52eb4 739 send_wr->wr.ud.pkey_index,
1da177e4
LT
740 send_wr->wr.ud.port_num, &mad_wc);
741
742 /* No GRH for DR SMP */
743 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
744 (struct ib_mad *)smp,
745 (struct ib_mad *)&mad_priv->mad);
746 switch (ret)
747 {
748 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
2527e681 749 if (ib_response_mad(&mad_priv->mad.mad) &&
1da177e4
LT
750 mad_agent_priv->agent.recv_handler) {
751 local->mad_priv = mad_priv;
752 local->recv_mad_agent = mad_agent_priv;
753 /*
754 * Reference MAD agent until receive
755 * side of local completion handled
756 */
757 atomic_inc(&mad_agent_priv->refcount);
758 } else
759 kmem_cache_free(ib_mad_cache, mad_priv);
760 break;
761 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
762 kmem_cache_free(ib_mad_cache, mad_priv);
4780c195 763 break;
1da177e4
LT
764 case IB_MAD_RESULT_SUCCESS:
765 /* Treat like an incoming receive MAD */
1da177e4
LT
766 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
767 mad_agent_priv->agent.port_num);
768 if (port_priv) {
727792da 769 memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
1da177e4 770 recv_mad_agent = find_mad_agent(port_priv,
4a0754fa 771 &mad_priv->mad.mad);
1da177e4
LT
772 }
773 if (!port_priv || !recv_mad_agent) {
4780c195
RC
774 /*
775 * No receiving agent so drop packet and
776 * generate send completion.
777 */
1da177e4 778 kmem_cache_free(ib_mad_cache, mad_priv);
4780c195 779 break;
1da177e4
LT
780 }
781 local->mad_priv = mad_priv;
782 local->recv_mad_agent = recv_mad_agent;
783 break;
784 default:
785 kmem_cache_free(ib_mad_cache, mad_priv);
786 kfree(local);
787 ret = -EINVAL;
788 goto out;
789 }
790
34816ad9 791 local->mad_send_wr = mad_send_wr;
1da177e4
LT
792 /* Reference MAD agent until send side of local completion handled */
793 atomic_inc(&mad_agent_priv->refcount);
794 /* Queue local completion to local list */
795 spin_lock_irqsave(&mad_agent_priv->lock, flags);
796 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
797 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
798 queue_work(mad_agent_priv->qp_info->port_priv->wq,
b82cab6b 799 &mad_agent_priv->local_work);
1da177e4
LT
800 ret = 1;
801out:
802 return ret;
803}
804
f36e1793 805static int get_pad_size(int hdr_len, int data_len)
824c8ae7
HR
806{
807 int seg_size, pad;
808
809 seg_size = sizeof(struct ib_mad) - hdr_len;
810 if (data_len && seg_size) {
811 pad = seg_size - data_len % seg_size;
f36e1793 812 return pad == seg_size ? 0 : pad;
824c8ae7 813 } else
f36e1793
JM
814 return seg_size;
815}
816
817static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
818{
819 struct ib_rmpp_segment *s, *t;
820
821 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
822 list_del(&s->list);
823 kfree(s);
824 }
825}
826
827static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
828 gfp_t gfp_mask)
829{
830 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
831 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
832 struct ib_rmpp_segment *seg = NULL;
833 int left, seg_size, pad;
834
835 send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
836 seg_size = send_buf->seg_size;
837 pad = send_wr->pad;
838
839 /* Allocate data segments. */
840 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
841 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
842 if (!seg) {
7ef5d4b0
IW
843 dev_err(&send_buf->mad_agent->device->dev,
844 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
845 sizeof (*seg) + seg_size, gfp_mask);
f36e1793
JM
846 free_send_rmpp_list(send_wr);
847 return -ENOMEM;
848 }
849 seg->num = ++send_buf->seg_count;
850 list_add_tail(&seg->list, &send_wr->rmpp_list);
851 }
852
853 /* Zero any padding */
854 if (pad)
855 memset(seg->data + seg_size - pad, 0, pad);
856
857 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
858 agent.rmpp_version;
859 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
860 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
861
862 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
863 struct ib_rmpp_segment, list);
864 send_wr->last_ack_seg = send_wr->cur_seg;
865 return 0;
824c8ae7
HR
866}
867
868struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
869 u32 remote_qpn, u16 pkey_index,
34816ad9 870 int rmpp_active,
824c8ae7 871 int hdr_len, int data_len,
dd0fc66f 872 gfp_t gfp_mask)
824c8ae7
HR
873{
874 struct ib_mad_agent_private *mad_agent_priv;
34816ad9 875 struct ib_mad_send_wr_private *mad_send_wr;
f36e1793 876 int pad, message_size, ret, size;
824c8ae7
HR
877 void *buf;
878
34816ad9
SH
879 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
880 agent);
f36e1793
JM
881 pad = get_pad_size(hdr_len, data_len);
882 message_size = hdr_len + data_len + pad;
824c8ae7 883
fa619a77 884 if ((!mad_agent->rmpp_version &&
f36e1793
JM
885 (rmpp_active || message_size > sizeof(struct ib_mad))) ||
886 (!rmpp_active && message_size > sizeof(struct ib_mad)))
fa619a77
HR
887 return ERR_PTR(-EINVAL);
888
f36e1793
JM
889 size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
890 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
824c8ae7
HR
891 if (!buf)
892 return ERR_PTR(-ENOMEM);
34816ad9 893
f36e1793
JM
894 mad_send_wr = buf + size;
895 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
34816ad9 896 mad_send_wr->send_buf.mad = buf;
f36e1793
JM
897 mad_send_wr->send_buf.hdr_len = hdr_len;
898 mad_send_wr->send_buf.data_len = data_len;
899 mad_send_wr->pad = pad;
34816ad9
SH
900
901 mad_send_wr->mad_agent_priv = mad_agent_priv;
f36e1793 902 mad_send_wr->sg_list[0].length = hdr_len;
34816ad9 903 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
f36e1793
JM
904 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
905 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
34816ad9
SH
906
907 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
908 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
f36e1793 909 mad_send_wr->send_wr.num_sge = 2;
34816ad9
SH
910 mad_send_wr->send_wr.opcode = IB_WR_SEND;
911 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
912 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
913 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
914 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
fa619a77
HR
915
916 if (rmpp_active) {
f36e1793
JM
917 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
918 if (ret) {
919 kfree(buf);
920 return ERR_PTR(ret);
921 }
fa619a77
HR
922 }
923
34816ad9 924 mad_send_wr->send_buf.mad_agent = mad_agent;
824c8ae7 925 atomic_inc(&mad_agent_priv->refcount);
34816ad9 926 return &mad_send_wr->send_buf;
824c8ae7
HR
927}
928EXPORT_SYMBOL(ib_create_send_mad);
929
618a3c03
HR
930int ib_get_mad_data_offset(u8 mgmt_class)
931{
932 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
933 return IB_MGMT_SA_HDR;
934 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
935 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
936 (mgmt_class == IB_MGMT_CLASS_BIS))
937 return IB_MGMT_DEVICE_HDR;
938 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
939 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
940 return IB_MGMT_VENDOR_HDR;
941 else
942 return IB_MGMT_MAD_HDR;
943}
944EXPORT_SYMBOL(ib_get_mad_data_offset);
945
946int ib_is_mad_class_rmpp(u8 mgmt_class)
947{
948 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
949 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
950 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
951 (mgmt_class == IB_MGMT_CLASS_BIS) ||
952 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
953 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
954 return 1;
955 return 0;
956}
957EXPORT_SYMBOL(ib_is_mad_class_rmpp);
958
f36e1793
JM
959void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
960{
961 struct ib_mad_send_wr_private *mad_send_wr;
962 struct list_head *list;
963
964 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
965 send_buf);
966 list = &mad_send_wr->cur_seg->list;
967
968 if (mad_send_wr->cur_seg->num < seg_num) {
969 list_for_each_entry(mad_send_wr->cur_seg, list, list)
970 if (mad_send_wr->cur_seg->num == seg_num)
971 break;
972 } else if (mad_send_wr->cur_seg->num > seg_num) {
973 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
974 if (mad_send_wr->cur_seg->num == seg_num)
975 break;
976 }
977 return mad_send_wr->cur_seg->data;
978}
979EXPORT_SYMBOL(ib_get_rmpp_segment);
980
981static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
982{
983 if (mad_send_wr->send_buf.seg_count)
984 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
985 mad_send_wr->seg_num);
986 else
987 return mad_send_wr->send_buf.mad +
988 mad_send_wr->send_buf.hdr_len;
989}
990
824c8ae7
HR
991void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
992{
993 struct ib_mad_agent_private *mad_agent_priv;
f36e1793 994 struct ib_mad_send_wr_private *mad_send_wr;
824c8ae7
HR
995
996 mad_agent_priv = container_of(send_buf->mad_agent,
997 struct ib_mad_agent_private, agent);
f36e1793
JM
998 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
999 send_buf);
824c8ae7 1000
f36e1793
JM
1001 free_send_rmpp_list(mad_send_wr);
1002 kfree(send_buf->mad);
1b52fa98 1003 deref_mad_agent(mad_agent_priv);
824c8ae7
HR
1004}
1005EXPORT_SYMBOL(ib_free_send_mad);
1006
fa619a77 1007int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4
LT
1008{
1009 struct ib_mad_qp_info *qp_info;
cabe3cbc 1010 struct list_head *list;
34816ad9
SH
1011 struct ib_send_wr *bad_send_wr;
1012 struct ib_mad_agent *mad_agent;
1013 struct ib_sge *sge;
1da177e4
LT
1014 unsigned long flags;
1015 int ret;
1016
f8197a4e 1017 /* Set WR ID to find mad_send_wr upon completion */
d760ce8f 1018 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1da177e4
LT
1019 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1020 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1021
34816ad9
SH
1022 mad_agent = mad_send_wr->send_buf.mad_agent;
1023 sge = mad_send_wr->sg_list;
1527106f
RC
1024 sge[0].addr = ib_dma_map_single(mad_agent->device,
1025 mad_send_wr->send_buf.mad,
1026 sge[0].length,
1027 DMA_TO_DEVICE);
2c34e68f
YB
1028 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1029 return -ENOMEM;
1030
1527106f
RC
1031 mad_send_wr->header_mapping = sge[0].addr;
1032
1033 sge[1].addr = ib_dma_map_single(mad_agent->device,
1034 ib_get_payload(mad_send_wr),
1035 sge[1].length,
1036 DMA_TO_DEVICE);
2c34e68f
YB
1037 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1038 ib_dma_unmap_single(mad_agent->device,
1039 mad_send_wr->header_mapping,
1040 sge[0].length, DMA_TO_DEVICE);
1041 return -ENOMEM;
1042 }
1527106f 1043 mad_send_wr->payload_mapping = sge[1].addr;
34816ad9 1044
1da177e4 1045 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
cabe3cbc 1046 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
34816ad9
SH
1047 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1048 &bad_send_wr);
cabe3cbc 1049 list = &qp_info->send_queue.list;
1da177e4 1050 } else {
1da177e4 1051 ret = 0;
cabe3cbc 1052 list = &qp_info->overflow_list;
1da177e4 1053 }
cabe3cbc
HR
1054
1055 if (!ret) {
1056 qp_info->send_queue.count++;
1057 list_add_tail(&mad_send_wr->mad_list.list, list);
1058 }
1059 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
f36e1793 1060 if (ret) {
1527106f
RC
1061 ib_dma_unmap_single(mad_agent->device,
1062 mad_send_wr->header_mapping,
1063 sge[0].length, DMA_TO_DEVICE);
1064 ib_dma_unmap_single(mad_agent->device,
1065 mad_send_wr->payload_mapping,
1066 sge[1].length, DMA_TO_DEVICE);
f36e1793 1067 }
1da177e4
LT
1068 return ret;
1069}
1070
1071/*
1072 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1073 * with the registered client
1074 */
34816ad9
SH
1075int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1076 struct ib_mad_send_buf **bad_send_buf)
1da177e4 1077{
1da177e4 1078 struct ib_mad_agent_private *mad_agent_priv;
34816ad9
SH
1079 struct ib_mad_send_buf *next_send_buf;
1080 struct ib_mad_send_wr_private *mad_send_wr;
1081 unsigned long flags;
1082 int ret = -EINVAL;
1da177e4
LT
1083
1084 /* Walk list of send WRs and post each on send list */
34816ad9 1085 for (; send_buf; send_buf = next_send_buf) {
1da177e4 1086
34816ad9
SH
1087 mad_send_wr = container_of(send_buf,
1088 struct ib_mad_send_wr_private,
1089 send_buf);
1090 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4 1091
34816ad9
SH
1092 if (!send_buf->mad_agent->send_handler ||
1093 (send_buf->timeout_ms &&
1094 !send_buf->mad_agent->recv_handler)) {
1095 ret = -EINVAL;
1096 goto error;
1da177e4
LT
1097 }
1098
618a3c03
HR
1099 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1100 if (mad_agent_priv->agent.rmpp_version) {
1101 ret = -EINVAL;
1102 goto error;
1103 }
1104 }
1105
1da177e4
LT
1106 /*
1107 * Save pointer to next work request to post in case the
1108 * current one completes, and the user modifies the work
1109 * request associated with the completion
1110 */
34816ad9
SH
1111 next_send_buf = send_buf->next;
1112 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1da177e4 1113
34816ad9
SH
1114 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1115 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1116 ret = handle_outgoing_dr_smp(mad_agent_priv,
1117 mad_send_wr);
1da177e4 1118 if (ret < 0) /* error */
34816ad9 1119 goto error;
1da177e4 1120 else if (ret == 1) /* locally consumed */
34816ad9 1121 continue;
1da177e4
LT
1122 }
1123
34816ad9 1124 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1da177e4 1125 /* Timeout will be updated after send completes */
34816ad9 1126 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
4fc8cd49
SH
1127 mad_send_wr->max_retries = send_buf->retries;
1128 mad_send_wr->retries_left = send_buf->retries;
1129 send_buf->retries = 0;
34816ad9 1130 /* Reference for work request to QP + response */
1da177e4
LT
1131 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1132 mad_send_wr->status = IB_WC_SUCCESS;
1133
1134 /* Reference MAD agent until send completes */
1135 atomic_inc(&mad_agent_priv->refcount);
1136 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1137 list_add_tail(&mad_send_wr->agent_list,
1138 &mad_agent_priv->send_list);
1139 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1140
fa619a77
HR
1141 if (mad_agent_priv->agent.rmpp_version) {
1142 ret = ib_send_rmpp_mad(mad_send_wr);
1143 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1144 ret = ib_send_mad(mad_send_wr);
1145 } else
1146 ret = ib_send_mad(mad_send_wr);
1147 if (ret < 0) {
1da177e4
LT
1148 /* Fail send request */
1149 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1150 list_del(&mad_send_wr->agent_list);
1151 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1152 atomic_dec(&mad_agent_priv->refcount);
34816ad9 1153 goto error;
1da177e4 1154 }
1da177e4
LT
1155 }
1156 return 0;
34816ad9
SH
1157error:
1158 if (bad_send_buf)
1159 *bad_send_buf = send_buf;
1da177e4
LT
1160 return ret;
1161}
1162EXPORT_SYMBOL(ib_post_send_mad);
1163
1164/*
1165 * ib_free_recv_mad - Returns data buffers used to receive
1166 * a MAD to the access layer
1167 */
1168void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1169{
fa619a77 1170 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1da177e4
LT
1171 struct ib_mad_private_header *mad_priv_hdr;
1172 struct ib_mad_private *priv;
fa619a77 1173 struct list_head free_list;
1da177e4 1174
fa619a77
HR
1175 INIT_LIST_HEAD(&free_list);
1176 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1da177e4 1177
fa619a77
HR
1178 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1179 &free_list, list) {
1180 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1181 recv_buf);
1da177e4
LT
1182 mad_priv_hdr = container_of(mad_recv_wc,
1183 struct ib_mad_private_header,
1184 recv_wc);
1185 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1186 header);
fa619a77 1187 kmem_cache_free(ib_mad_cache, priv);
1da177e4 1188 }
1da177e4
LT
1189}
1190EXPORT_SYMBOL(ib_free_recv_mad);
1191
1da177e4
LT
1192struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1193 u8 rmpp_version,
1194 ib_mad_send_handler send_handler,
1195 ib_mad_recv_handler recv_handler,
1196 void *context)
1197{
1198 return ERR_PTR(-EINVAL); /* XXX: for now */
1199}
1200EXPORT_SYMBOL(ib_redirect_mad_qp);
1201
1202int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1203 struct ib_wc *wc)
1204{
7ef5d4b0
IW
1205 dev_err(&mad_agent->device->dev,
1206 "ib_process_mad_wc() not implemented yet\n");
1da177e4
LT
1207 return 0;
1208}
1209EXPORT_SYMBOL(ib_process_mad_wc);
1210
1211static int method_in_use(struct ib_mad_mgmt_method_table **method,
1212 struct ib_mad_reg_req *mad_reg_req)
1213{
1214 int i;
1215
19b629f5 1216 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1da177e4 1217 if ((*method)->agent[i]) {
7ef5d4b0 1218 pr_err("Method %d already in use\n", i);
1da177e4
LT
1219 return -EINVAL;
1220 }
1221 }
1222 return 0;
1223}
1224
1225static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1226{
1227 /* Allocate management method table */
de6eb66b 1228 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1da177e4 1229 if (!*method) {
7ef5d4b0 1230 pr_err("No memory for ib_mad_mgmt_method_table\n");
1da177e4
LT
1231 return -ENOMEM;
1232 }
1da177e4
LT
1233
1234 return 0;
1235}
1236
1237/*
1238 * Check to see if there are any methods still in use
1239 */
1240static int check_method_table(struct ib_mad_mgmt_method_table *method)
1241{
1242 int i;
1243
1244 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1245 if (method->agent[i])
1246 return 1;
1247 return 0;
1248}
1249
1250/*
1251 * Check to see if there are any method tables for this class still in use
1252 */
1253static int check_class_table(struct ib_mad_mgmt_class_table *class)
1254{
1255 int i;
1256
1257 for (i = 0; i < MAX_MGMT_CLASS; i++)
1258 if (class->method_table[i])
1259 return 1;
1260 return 0;
1261}
1262
1263static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1264{
1265 int i;
1266
1267 for (i = 0; i < MAX_MGMT_OUI; i++)
1268 if (vendor_class->method_table[i])
1269 return 1;
1270 return 0;
1271}
1272
1273static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1274 char *oui)
1275{
1276 int i;
1277
1278 for (i = 0; i < MAX_MGMT_OUI; i++)
3cd96564
RD
1279 /* Is there matching OUI for this vendor class ? */
1280 if (!memcmp(vendor_class->oui[i], oui, 3))
1da177e4
LT
1281 return i;
1282
1283 return -1;
1284}
1285
1286static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1287{
1288 int i;
1289
1290 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1291 if (vendor->vendor_class[i])
1292 return 1;
1293
1294 return 0;
1295}
1296
1297static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1298 struct ib_mad_agent_private *agent)
1299{
1300 int i;
1301
1302 /* Remove any methods for this mad agent */
1303 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1304 if (method->agent[i] == agent) {
1305 method->agent[i] = NULL;
1306 }
1307 }
1308}
1309
1310static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1311 struct ib_mad_agent_private *agent_priv,
1312 u8 mgmt_class)
1313{
1314 struct ib_mad_port_private *port_priv;
1315 struct ib_mad_mgmt_class_table **class;
1316 struct ib_mad_mgmt_method_table **method;
1317 int i, ret;
1318
1319 port_priv = agent_priv->qp_info->port_priv;
1320 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1321 if (!*class) {
1322 /* Allocate management class table for "new" class version */
de6eb66b 1323 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1da177e4 1324 if (!*class) {
7ef5d4b0
IW
1325 dev_err(&agent_priv->agent.device->dev,
1326 "No memory for ib_mad_mgmt_class_table\n");
1da177e4
LT
1327 ret = -ENOMEM;
1328 goto error1;
1329 }
de6eb66b 1330
1da177e4
LT
1331 /* Allocate method table for this management class */
1332 method = &(*class)->method_table[mgmt_class];
1333 if ((ret = allocate_method_table(method)))
1334 goto error2;
1335 } else {
1336 method = &(*class)->method_table[mgmt_class];
1337 if (!*method) {
1338 /* Allocate method table for this management class */
1339 if ((ret = allocate_method_table(method)))
1340 goto error1;
1341 }
1342 }
1343
1344 /* Now, make sure methods are not already in use */
1345 if (method_in_use(method, mad_reg_req))
1346 goto error3;
1347
1348 /* Finally, add in methods being registered */
19b629f5 1349 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1da177e4 1350 (*method)->agent[i] = agent_priv;
19b629f5 1351
1da177e4
LT
1352 return 0;
1353
1354error3:
1355 /* Remove any methods for this mad agent */
1356 remove_methods_mad_agent(*method, agent_priv);
1357 /* Now, check to see if there are any methods in use */
1358 if (!check_method_table(*method)) {
1359 /* If not, release management method table */
1360 kfree(*method);
1361 *method = NULL;
1362 }
1363 ret = -EINVAL;
1364 goto error1;
1365error2:
1366 kfree(*class);
1367 *class = NULL;
1368error1:
1369 return ret;
1370}
1371
1372static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1373 struct ib_mad_agent_private *agent_priv)
1374{
1375 struct ib_mad_port_private *port_priv;
1376 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1377 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1378 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1379 struct ib_mad_mgmt_method_table **method;
1380 int i, ret = -ENOMEM;
1381 u8 vclass;
1382
1383 /* "New" vendor (with OUI) class */
1384 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1385 port_priv = agent_priv->qp_info->port_priv;
1386 vendor_table = &port_priv->version[
1387 mad_reg_req->mgmt_class_version].vendor;
1388 if (!*vendor_table) {
1389 /* Allocate mgmt vendor class table for "new" class version */
de6eb66b 1390 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1da177e4 1391 if (!vendor) {
7ef5d4b0
IW
1392 dev_err(&agent_priv->agent.device->dev,
1393 "No memory for ib_mad_mgmt_vendor_class_table\n");
1da177e4
LT
1394 goto error1;
1395 }
de6eb66b 1396
1da177e4
LT
1397 *vendor_table = vendor;
1398 }
1399 if (!(*vendor_table)->vendor_class[vclass]) {
1400 /* Allocate table for this management vendor class */
de6eb66b 1401 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1da177e4 1402 if (!vendor_class) {
7ef5d4b0
IW
1403 dev_err(&agent_priv->agent.device->dev,
1404 "No memory for ib_mad_mgmt_vendor_class\n");
1da177e4
LT
1405 goto error2;
1406 }
de6eb66b 1407
1da177e4
LT
1408 (*vendor_table)->vendor_class[vclass] = vendor_class;
1409 }
1410 for (i = 0; i < MAX_MGMT_OUI; i++) {
1411 /* Is there matching OUI for this vendor class ? */
1412 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1413 mad_reg_req->oui, 3)) {
1414 method = &(*vendor_table)->vendor_class[
1415 vclass]->method_table[i];
1416 BUG_ON(!*method);
1417 goto check_in_use;
1418 }
1419 }
1420 for (i = 0; i < MAX_MGMT_OUI; i++) {
1421 /* OUI slot available ? */
1422 if (!is_vendor_oui((*vendor_table)->vendor_class[
1423 vclass]->oui[i])) {
1424 method = &(*vendor_table)->vendor_class[
1425 vclass]->method_table[i];
1426 BUG_ON(*method);
1427 /* Allocate method table for this OUI */
1428 if ((ret = allocate_method_table(method)))
1429 goto error3;
1430 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1431 mad_reg_req->oui, 3);
1432 goto check_in_use;
1433 }
1434 }
7ef5d4b0 1435 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1da177e4
LT
1436 goto error3;
1437
1438check_in_use:
1439 /* Now, make sure methods are not already in use */
1440 if (method_in_use(method, mad_reg_req))
1441 goto error4;
1442
1443 /* Finally, add in methods being registered */
19b629f5 1444 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1da177e4 1445 (*method)->agent[i] = agent_priv;
19b629f5 1446
1da177e4
LT
1447 return 0;
1448
1449error4:
1450 /* Remove any methods for this mad agent */
1451 remove_methods_mad_agent(*method, agent_priv);
1452 /* Now, check to see if there are any methods in use */
1453 if (!check_method_table(*method)) {
1454 /* If not, release management method table */
1455 kfree(*method);
1456 *method = NULL;
1457 }
1458 ret = -EINVAL;
1459error3:
1460 if (vendor_class) {
1461 (*vendor_table)->vendor_class[vclass] = NULL;
1462 kfree(vendor_class);
1463 }
1464error2:
1465 if (vendor) {
1466 *vendor_table = NULL;
1467 kfree(vendor);
1468 }
1469error1:
1470 return ret;
1471}
1472
1473static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1474{
1475 struct ib_mad_port_private *port_priv;
1476 struct ib_mad_mgmt_class_table *class;
1477 struct ib_mad_mgmt_method_table *method;
1478 struct ib_mad_mgmt_vendor_class_table *vendor;
1479 struct ib_mad_mgmt_vendor_class *vendor_class;
1480 int index;
1481 u8 mgmt_class;
1482
1483 /*
1484 * Was MAD registration request supplied
1485 * with original registration ?
1486 */
1487 if (!agent_priv->reg_req) {
1488 goto out;
1489 }
1490
1491 port_priv = agent_priv->qp_info->port_priv;
1492 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1493 class = port_priv->version[
1494 agent_priv->reg_req->mgmt_class_version].class;
1495 if (!class)
1496 goto vendor_check;
1497
1498 method = class->method_table[mgmt_class];
1499 if (method) {
1500 /* Remove any methods for this mad agent */
1501 remove_methods_mad_agent(method, agent_priv);
1502 /* Now, check to see if there are any methods still in use */
1503 if (!check_method_table(method)) {
1504 /* If not, release management method table */
1505 kfree(method);
1506 class->method_table[mgmt_class] = NULL;
1507 /* Any management classes left ? */
1508 if (!check_class_table(class)) {
1509 /* If not, release management class table */
1510 kfree(class);
1511 port_priv->version[
1512 agent_priv->reg_req->
1513 mgmt_class_version].class = NULL;
1514 }
1515 }
1516 }
1517
1518vendor_check:
1519 if (!is_vendor_class(mgmt_class))
1520 goto out;
1521
1522 /* normalize mgmt_class to vendor range 2 */
1523 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1524 vendor = port_priv->version[
1525 agent_priv->reg_req->mgmt_class_version].vendor;
1526
1527 if (!vendor)
1528 goto out;
1529
1530 vendor_class = vendor->vendor_class[mgmt_class];
1531 if (vendor_class) {
1532 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1533 if (index < 0)
1534 goto out;
1535 method = vendor_class->method_table[index];
1536 if (method) {
1537 /* Remove any methods for this mad agent */
1538 remove_methods_mad_agent(method, agent_priv);
1539 /*
1540 * Now, check to see if there are
1541 * any methods still in use
1542 */
1543 if (!check_method_table(method)) {
1544 /* If not, release management method table */
1545 kfree(method);
1546 vendor_class->method_table[index] = NULL;
1547 memset(vendor_class->oui[index], 0, 3);
1548 /* Any OUIs left ? */
1549 if (!check_vendor_class(vendor_class)) {
1550 /* If not, release vendor class table */
1551 kfree(vendor_class);
1552 vendor->vendor_class[mgmt_class] = NULL;
1553 /* Any other vendor classes left ? */
1554 if (!check_vendor_table(vendor)) {
1555 kfree(vendor);
1556 port_priv->version[
1557 agent_priv->reg_req->
1558 mgmt_class_version].
1559 vendor = NULL;
1560 }
1561 }
1562 }
1563 }
1564 }
1565
1566out:
1567 return;
1568}
1569
1da177e4
LT
1570static struct ib_mad_agent_private *
1571find_mad_agent(struct ib_mad_port_private *port_priv,
4a0754fa 1572 struct ib_mad *mad)
1da177e4
LT
1573{
1574 struct ib_mad_agent_private *mad_agent = NULL;
1575 unsigned long flags;
1576
1577 spin_lock_irqsave(&port_priv->reg_lock, flags);
2527e681 1578 if (ib_response_mad(mad)) {
1da177e4
LT
1579 u32 hi_tid;
1580 struct ib_mad_agent_private *entry;
1581
1582 /*
1583 * Routing is based on high 32 bits of transaction ID
1584 * of MAD.
1585 */
1586 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
34816ad9 1587 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1da177e4
LT
1588 if (entry->agent.hi_tid == hi_tid) {
1589 mad_agent = entry;
1590 break;
1591 }
1592 }
1593 } else {
1594 struct ib_mad_mgmt_class_table *class;
1595 struct ib_mad_mgmt_method_table *method;
1596 struct ib_mad_mgmt_vendor_class_table *vendor;
1597 struct ib_mad_mgmt_vendor_class *vendor_class;
1598 struct ib_vendor_mad *vendor_mad;
1599 int index;
1600
1601 /*
1602 * Routing is based on version, class, and method
1603 * For "newer" vendor MADs, also based on OUI
1604 */
1605 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1606 goto out;
1607 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1608 class = port_priv->version[
1609 mad->mad_hdr.class_version].class;
1610 if (!class)
1611 goto out;
b7ab0b19
HS
1612 if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >=
1613 IB_MGMT_MAX_METHODS)
1614 goto out;
1da177e4
LT
1615 method = class->method_table[convert_mgmt_class(
1616 mad->mad_hdr.mgmt_class)];
1617 if (method)
1618 mad_agent = method->agent[mad->mad_hdr.method &
1619 ~IB_MGMT_METHOD_RESP];
1620 } else {
1621 vendor = port_priv->version[
1622 mad->mad_hdr.class_version].vendor;
1623 if (!vendor)
1624 goto out;
1625 vendor_class = vendor->vendor_class[vendor_class_index(
1626 mad->mad_hdr.mgmt_class)];
1627 if (!vendor_class)
1628 goto out;
1629 /* Find matching OUI */
1630 vendor_mad = (struct ib_vendor_mad *)mad;
1631 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1632 if (index == -1)
1633 goto out;
1634 method = vendor_class->method_table[index];
1635 if (method) {
1636 mad_agent = method->agent[mad->mad_hdr.method &
1637 ~IB_MGMT_METHOD_RESP];
1638 }
1639 }
1640 }
1641
1642 if (mad_agent) {
1643 if (mad_agent->agent.recv_handler)
1644 atomic_inc(&mad_agent->refcount);
1645 else {
7ef5d4b0
IW
1646 dev_notice(&port_priv->device->dev,
1647 "No receive handler for client %p on port %d\n",
1648 &mad_agent->agent, port_priv->port_num);
1da177e4
LT
1649 mad_agent = NULL;
1650 }
1651 }
1652out:
1653 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1654
1655 return mad_agent;
1656}
1657
1658static int validate_mad(struct ib_mad *mad, u32 qp_num)
1659{
1660 int valid = 0;
1661
1662 /* Make sure MAD base version is understood */
1663 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
7ef5d4b0
IW
1664 pr_err("MAD received with unsupported base version %d\n",
1665 mad->mad_hdr.base_version);
1da177e4
LT
1666 goto out;
1667 }
1668
1669 /* Filter SMI packets sent to other than QP0 */
1670 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1671 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1672 if (qp_num == 0)
1673 valid = 1;
1674 } else {
1675 /* Filter GSI packets sent to QP0 */
1676 if (qp_num != 0)
1677 valid = 1;
1678 }
1679
1680out:
1681 return valid;
1682}
1683
fa619a77
HR
1684static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1685 struct ib_mad_hdr *mad_hdr)
1686{
1687 struct ib_rmpp_mad *rmpp_mad;
1688
1689 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1690 return !mad_agent_priv->agent.rmpp_version ||
1691 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1692 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1693 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1694}
1695
fa9656bb
JM
1696static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1697 struct ib_mad_recv_wc *rwc)
1698{
1699 return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1700 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1701}
1702
9874e746
JM
1703static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1704 struct ib_mad_send_wr_private *wr,
fa9656bb
JM
1705 struct ib_mad_recv_wc *rwc )
1706{
1707 struct ib_ah_attr attr;
1708 u8 send_resp, rcv_resp;
9874e746
JM
1709 union ib_gid sgid;
1710 struct ib_device *device = mad_agent_priv->agent.device;
1711 u8 port_num = mad_agent_priv->agent.port_num;
1712 u8 lmc;
fa9656bb 1713
7097228c
MB
1714 send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1715 rcv_resp = ib_response_mad(rwc->recv_buf.mad);
fa9656bb 1716
fa9656bb
JM
1717 if (send_resp == rcv_resp)
1718 /* both requests, or both responses. GIDs different */
1719 return 0;
1720
1721 if (ib_query_ah(wr->send_buf.ah, &attr))
1722 /* Assume not equal, to avoid false positives. */
1723 return 0;
1724
9874e746
JM
1725 if (!!(attr.ah_flags & IB_AH_GRH) !=
1726 !!(rwc->wc->wc_flags & IB_WC_GRH))
fa9656bb
JM
1727 /* one has GID, other does not. Assume different */
1728 return 0;
9874e746
JM
1729
1730 if (!send_resp && rcv_resp) {
1731 /* is request/response. */
1732 if (!(attr.ah_flags & IB_AH_GRH)) {
1733 if (ib_get_cached_lmc(device, port_num, &lmc))
1734 return 0;
1735 return (!lmc || !((attr.src_path_bits ^
1736 rwc->wc->dlid_path_bits) &
1737 ((1 << lmc) - 1)));
1738 } else {
1739 if (ib_get_cached_gid(device, port_num,
1740 attr.grh.sgid_index, &sgid))
1741 return 0;
1742 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1743 16);
1744 }
1745 }
1746
1747 if (!(attr.ah_flags & IB_AH_GRH))
1748 return attr.dlid == rwc->wc->slid;
1749 else
1750 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1751 16);
1752}
1753
1754static inline int is_direct(u8 class)
1755{
1756 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
fa9656bb 1757}
9874e746 1758
fa619a77 1759struct ib_mad_send_wr_private*
fa9656bb 1760ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
9874e746 1761 struct ib_mad_recv_wc *wc)
1da177e4 1762{
9874e746 1763 struct ib_mad_send_wr_private *wr;
fa9656bb
JM
1764 struct ib_mad *mad;
1765
9874e746
JM
1766 mad = (struct ib_mad *)wc->recv_buf.mad;
1767
1768 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1769 if ((wr->tid == mad->mad_hdr.tid) &&
1770 rcv_has_same_class(wr, wc) &&
1771 /*
1772 * Don't check GID for direct routed MADs.
1773 * These might have permissive LIDs.
1774 */
1775 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1776 rcv_has_same_gid(mad_agent_priv, wr, wc)))
39798695 1777 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1da177e4
LT
1778 }
1779
1780 /*
1781 * It's possible to receive the response before we've
1782 * been notified that the send has completed
1783 */
9874e746
JM
1784 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1785 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1786 wr->tid == mad->mad_hdr.tid &&
1787 wr->timeout &&
1788 rcv_has_same_class(wr, wc) &&
1789 /*
1790 * Don't check GID for direct routed MADs.
1791 * These might have permissive LIDs.
1792 */
1793 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1794 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1da177e4 1795 /* Verify request has not been canceled */
9874e746 1796 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1da177e4
LT
1797 }
1798 return NULL;
1799}
1800
fa619a77 1801void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
6a0c435e
HR
1802{
1803 mad_send_wr->timeout = 0;
179e0917
AM
1804 if (mad_send_wr->refcount == 1)
1805 list_move_tail(&mad_send_wr->agent_list,
6a0c435e 1806 &mad_send_wr->mad_agent_priv->done_list);
6a0c435e
HR
1807}
1808
1da177e4 1809static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
4a0754fa 1810 struct ib_mad_recv_wc *mad_recv_wc)
1da177e4
LT
1811{
1812 struct ib_mad_send_wr_private *mad_send_wr;
1813 struct ib_mad_send_wc mad_send_wc;
1814 unsigned long flags;
1815
fa619a77
HR
1816 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1817 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1818 if (mad_agent_priv->agent.rmpp_version) {
1819 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1820 mad_recv_wc);
1821 if (!mad_recv_wc) {
1b52fa98 1822 deref_mad_agent(mad_agent_priv);
fa619a77
HR
1823 return;
1824 }
1825 }
1826
1da177e4 1827 /* Complete corresponding request */
2527e681 1828 if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1da177e4 1829 spin_lock_irqsave(&mad_agent_priv->lock, flags);
fa9656bb 1830 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1da177e4
LT
1831 if (!mad_send_wr) {
1832 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
4a0754fa 1833 ib_free_recv_mad(mad_recv_wc);
1b52fa98 1834 deref_mad_agent(mad_agent_priv);
1da177e4
LT
1835 return;
1836 }
fa619a77 1837 ib_mark_mad_done(mad_send_wr);
1da177e4
LT
1838 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1839
1840 /* Defined behavior is to complete response before request */
34816ad9 1841 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
4a0754fa
HR
1842 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1843 mad_recv_wc);
1da177e4
LT
1844 atomic_dec(&mad_agent_priv->refcount);
1845
1846 mad_send_wc.status = IB_WC_SUCCESS;
1847 mad_send_wc.vendor_err = 0;
34816ad9 1848 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1da177e4
LT
1849 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1850 } else {
4a0754fa
HR
1851 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1852 mad_recv_wc);
1b52fa98 1853 deref_mad_agent(mad_agent_priv);
1da177e4
LT
1854 }
1855}
1856
0b307043
ST
1857static bool generate_unmatched_resp(struct ib_mad_private *recv,
1858 struct ib_mad_private *response)
1859{
1860 if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET ||
1861 recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) {
1862 memcpy(response, recv, sizeof *response);
1863 response->header.recv_wc.wc = &response->header.wc;
1864 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1865 response->header.recv_wc.recv_buf.grh = &response->grh;
1866 response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
1867 response->mad.mad.mad_hdr.status =
1868 cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
840777de
JM
1869 if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
1870 response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION;
0b307043
ST
1871
1872 return true;
1873 } else {
1874 return false;
1875 }
1876}
1da177e4
LT
1877static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1878 struct ib_wc *wc)
1879{
1880 struct ib_mad_qp_info *qp_info;
1881 struct ib_mad_private_header *mad_priv_hdr;
445d6807 1882 struct ib_mad_private *recv, *response = NULL;
1da177e4
LT
1883 struct ib_mad_list_head *mad_list;
1884 struct ib_mad_agent_private *mad_agent;
1bae4dbf 1885 int port_num;
a9e74323 1886 int ret = IB_MAD_RESULT_SUCCESS;
1da177e4 1887
1da177e4
LT
1888 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1889 qp_info = mad_list->mad_queue->qp_info;
1890 dequeue_mad(mad_list);
1891
1892 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1893 mad_list);
1894 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1527106f
RC
1895 ib_dma_unmap_single(port_priv->device,
1896 recv->header.mapping,
1897 sizeof(struct ib_mad_private) -
1898 sizeof(struct ib_mad_private_header),
1899 DMA_FROM_DEVICE);
1da177e4
LT
1900
1901 /* Setup MAD receive work completion from "normal" work completion */
24239aff
SH
1902 recv->header.wc = *wc;
1903 recv->header.recv_wc.wc = &recv->header.wc;
1da177e4
LT
1904 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1905 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1906 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1907
1908 if (atomic_read(&qp_info->snoop_count))
1909 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1910
1911 /* Validate MAD */
1912 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1913 goto out;
1914
445d6807
HR
1915 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1916 if (!response) {
7ef5d4b0
IW
1917 dev_err(&port_priv->device->dev,
1918 "ib_mad_recv_done_handler no memory for response buffer\n");
445d6807
HR
1919 goto out;
1920 }
1921
1bae4dbf
HR
1922 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1923 port_num = wc->port_num;
1924 else
1925 port_num = port_priv->port_num;
1926
1da177e4
LT
1927 if (recv->mad.mad.mad_hdr.mgmt_class ==
1928 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1bae4dbf
HR
1929 enum smi_forward_action retsmi;
1930
de493d47
HR
1931 if (smi_handle_dr_smp_recv(&recv->mad.smp,
1932 port_priv->device->node_type,
1bae4dbf 1933 port_num,
de493d47
HR
1934 port_priv->device->phys_port_cnt) ==
1935 IB_SMI_DISCARD)
1da177e4 1936 goto out;
de493d47 1937
1bae4dbf
HR
1938 retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1939 if (retsmi == IB_SMI_LOCAL)
1da177e4 1940 goto local;
de493d47 1941
1bae4dbf
HR
1942 if (retsmi == IB_SMI_SEND) { /* don't forward */
1943 if (smi_handle_dr_smp_send(&recv->mad.smp,
1944 port_priv->device->node_type,
1945 port_num) == IB_SMI_DISCARD)
1946 goto out;
1947
1948 if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1949 goto out;
1950 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1951 /* forward case for switches */
1952 memcpy(response, recv, sizeof(*response));
1953 response->header.recv_wc.wc = &response->header.wc;
1954 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1955 response->header.recv_wc.recv_buf.grh = &response->grh;
1956
86dfbecd
HR
1957 agent_send_response(&response->mad.mad,
1958 &response->grh, wc,
1959 port_priv->device,
1960 smi_get_fwd_port(&recv->mad.smp),
1961 qp_info->qp->qp_num);
de493d47 1962
1da177e4 1963 goto out;
1bae4dbf 1964 }
1da177e4
LT
1965 }
1966
1967local:
1968 /* Give driver "right of first refusal" on incoming MAD */
1969 if (port_priv->device->process_mad) {
1da177e4
LT
1970 ret = port_priv->device->process_mad(port_priv->device, 0,
1971 port_priv->port_num,
1972 wc, &recv->grh,
1973 &recv->mad.mad,
1974 &response->mad.mad);
1975 if (ret & IB_MAD_RESULT_SUCCESS) {
1976 if (ret & IB_MAD_RESULT_CONSUMED)
1977 goto out;
1978 if (ret & IB_MAD_RESULT_REPLY) {
34816ad9
SH
1979 agent_send_response(&response->mad.mad,
1980 &recv->grh, wc,
1981 port_priv->device,
1bae4dbf 1982 port_num,
34816ad9 1983 qp_info->qp->qp_num);
1da177e4
LT
1984 goto out;
1985 }
1986 }
1987 }
1988
4a0754fa 1989 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1da177e4 1990 if (mad_agent) {
4a0754fa 1991 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1da177e4
LT
1992 /*
1993 * recv is freed up in error cases in ib_mad_complete_recv
1994 * or via recv_handler in ib_mad_complete_recv()
1995 */
1996 recv = NULL;
a9e74323
JM
1997 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
1998 generate_unmatched_resp(recv, response)) {
0b307043
ST
1999 agent_send_response(&response->mad.mad, &recv->grh, wc,
2000 port_priv->device, port_num, qp_info->qp->qp_num);
1da177e4
LT
2001 }
2002
2003out:
2004 /* Post another receive request for this QP */
2005 if (response) {
2006 ib_mad_post_receive_mads(qp_info, response);
2007 if (recv)
2008 kmem_cache_free(ib_mad_cache, recv);
2009 } else
2010 ib_mad_post_receive_mads(qp_info, recv);
2011}
2012
2013static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2014{
2015 struct ib_mad_send_wr_private *mad_send_wr;
2016 unsigned long delay;
2017
2018 if (list_empty(&mad_agent_priv->wait_list)) {
136b5721 2019 cancel_delayed_work(&mad_agent_priv->timed_work);
1da177e4
LT
2020 } else {
2021 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2022 struct ib_mad_send_wr_private,
2023 agent_list);
2024
2025 if (time_after(mad_agent_priv->timeout,
2026 mad_send_wr->timeout)) {
2027 mad_agent_priv->timeout = mad_send_wr->timeout;
1da177e4
LT
2028 delay = mad_send_wr->timeout - jiffies;
2029 if ((long)delay <= 0)
2030 delay = 1;
e7c2f967
TH
2031 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2032 &mad_agent_priv->timed_work, delay);
1da177e4
LT
2033 }
2034 }
2035}
2036
d760ce8f 2037static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4 2038{
d760ce8f 2039 struct ib_mad_agent_private *mad_agent_priv;
1da177e4
LT
2040 struct ib_mad_send_wr_private *temp_mad_send_wr;
2041 struct list_head *list_item;
2042 unsigned long delay;
2043
d760ce8f 2044 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4
LT
2045 list_del(&mad_send_wr->agent_list);
2046
2047 delay = mad_send_wr->timeout;
2048 mad_send_wr->timeout += jiffies;
2049
29bb33dd
HR
2050 if (delay) {
2051 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2052 temp_mad_send_wr = list_entry(list_item,
2053 struct ib_mad_send_wr_private,
2054 agent_list);
2055 if (time_after(mad_send_wr->timeout,
2056 temp_mad_send_wr->timeout))
2057 break;
2058 }
1da177e4 2059 }
29bb33dd
HR
2060 else
2061 list_item = &mad_agent_priv->wait_list;
1da177e4
LT
2062 list_add(&mad_send_wr->agent_list, list_item);
2063
2064 /* Reschedule a work item if we have a shorter timeout */
e7c2f967
TH
2065 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2066 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2067 &mad_agent_priv->timed_work, delay);
1da177e4
LT
2068}
2069
03b61ad2
HR
2070void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2071 int timeout_ms)
2072{
2073 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2074 wait_for_response(mad_send_wr);
2075}
2076
1da177e4
LT
2077/*
2078 * Process a send work completion
2079 */
fa619a77
HR
2080void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2081 struct ib_mad_send_wc *mad_send_wc)
1da177e4
LT
2082{
2083 struct ib_mad_agent_private *mad_agent_priv;
2084 unsigned long flags;
fa619a77 2085 int ret;
1da177e4 2086
d760ce8f 2087 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4 2088 spin_lock_irqsave(&mad_agent_priv->lock, flags);
fa619a77
HR
2089 if (mad_agent_priv->agent.rmpp_version) {
2090 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2091 if (ret == IB_RMPP_RESULT_CONSUMED)
2092 goto done;
2093 } else
2094 ret = IB_RMPP_RESULT_UNHANDLED;
2095
1da177e4
LT
2096 if (mad_send_wc->status != IB_WC_SUCCESS &&
2097 mad_send_wr->status == IB_WC_SUCCESS) {
2098 mad_send_wr->status = mad_send_wc->status;
2099 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2100 }
2101
2102 if (--mad_send_wr->refcount > 0) {
2103 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2104 mad_send_wr->status == IB_WC_SUCCESS) {
d760ce8f 2105 wait_for_response(mad_send_wr);
1da177e4 2106 }
fa619a77 2107 goto done;
1da177e4
LT
2108 }
2109
2110 /* Remove send from MAD agent and notify client of completion */
2111 list_del(&mad_send_wr->agent_list);
2112 adjust_timeout(mad_agent_priv);
2113 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2114
2115 if (mad_send_wr->status != IB_WC_SUCCESS )
2116 mad_send_wc->status = mad_send_wr->status;
34816ad9
SH
2117 if (ret == IB_RMPP_RESULT_INTERNAL)
2118 ib_rmpp_send_handler(mad_send_wc);
2119 else
fa619a77
HR
2120 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2121 mad_send_wc);
1da177e4
LT
2122
2123 /* Release reference on agent taken when sending */
1b52fa98 2124 deref_mad_agent(mad_agent_priv);
fa619a77
HR
2125 return;
2126done:
2127 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1da177e4
LT
2128}
2129
2130static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2131 struct ib_wc *wc)
2132{
2133 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2134 struct ib_mad_list_head *mad_list;
2135 struct ib_mad_qp_info *qp_info;
2136 struct ib_mad_queue *send_queue;
2137 struct ib_send_wr *bad_send_wr;
34816ad9 2138 struct ib_mad_send_wc mad_send_wc;
1da177e4
LT
2139 unsigned long flags;
2140 int ret;
2141
2142 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2143 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2144 mad_list);
2145 send_queue = mad_list->mad_queue;
2146 qp_info = send_queue->qp_info;
2147
2148retry:
1527106f
RC
2149 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2150 mad_send_wr->header_mapping,
2151 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2152 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2153 mad_send_wr->payload_mapping,
2154 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
1da177e4
LT
2155 queued_send_wr = NULL;
2156 spin_lock_irqsave(&send_queue->lock, flags);
2157 list_del(&mad_list->list);
2158
2159 /* Move queued send to the send queue */
2160 if (send_queue->count-- > send_queue->max_active) {
2161 mad_list = container_of(qp_info->overflow_list.next,
2162 struct ib_mad_list_head, list);
2163 queued_send_wr = container_of(mad_list,
2164 struct ib_mad_send_wr_private,
2165 mad_list);
179e0917 2166 list_move_tail(&mad_list->list, &send_queue->list);
1da177e4
LT
2167 }
2168 spin_unlock_irqrestore(&send_queue->lock, flags);
2169
34816ad9
SH
2170 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2171 mad_send_wc.status = wc->status;
2172 mad_send_wc.vendor_err = wc->vendor_err;
1da177e4 2173 if (atomic_read(&qp_info->snoop_count))
34816ad9 2174 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
1da177e4 2175 IB_MAD_SNOOP_SEND_COMPLETIONS);
34816ad9 2176 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1da177e4
LT
2177
2178 if (queued_send_wr) {
2179 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
34816ad9 2180 &bad_send_wr);
1da177e4 2181 if (ret) {
7ef5d4b0
IW
2182 dev_err(&port_priv->device->dev,
2183 "ib_post_send failed: %d\n", ret);
1da177e4
LT
2184 mad_send_wr = queued_send_wr;
2185 wc->status = IB_WC_LOC_QP_OP_ERR;
2186 goto retry;
2187 }
2188 }
2189}
2190
2191static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2192{
2193 struct ib_mad_send_wr_private *mad_send_wr;
2194 struct ib_mad_list_head *mad_list;
2195 unsigned long flags;
2196
2197 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2198 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2199 mad_send_wr = container_of(mad_list,
2200 struct ib_mad_send_wr_private,
2201 mad_list);
2202 mad_send_wr->retry = 1;
2203 }
2204 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2205}
2206
2207static void mad_error_handler(struct ib_mad_port_private *port_priv,
2208 struct ib_wc *wc)
2209{
2210 struct ib_mad_list_head *mad_list;
2211 struct ib_mad_qp_info *qp_info;
2212 struct ib_mad_send_wr_private *mad_send_wr;
2213 int ret;
2214
2215 /* Determine if failure was a send or receive */
2216 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2217 qp_info = mad_list->mad_queue->qp_info;
2218 if (mad_list->mad_queue == &qp_info->recv_queue)
2219 /*
2220 * Receive errors indicate that the QP has entered the error
2221 * state - error handling/shutdown code will cleanup
2222 */
2223 return;
2224
2225 /*
2226 * Send errors will transition the QP to SQE - move
2227 * QP to RTS and repost flushed work requests
2228 */
2229 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2230 mad_list);
2231 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2232 if (mad_send_wr->retry) {
2233 /* Repost send */
2234 struct ib_send_wr *bad_send_wr;
2235
2236 mad_send_wr->retry = 0;
2237 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2238 &bad_send_wr);
2239 if (ret)
2240 ib_mad_send_done_handler(port_priv, wc);
2241 } else
2242 ib_mad_send_done_handler(port_priv, wc);
2243 } else {
2244 struct ib_qp_attr *attr;
2245
2246 /* Transition QP to RTS and fail offending send */
2247 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2248 if (attr) {
2249 attr->qp_state = IB_QPS_RTS;
2250 attr->cur_qp_state = IB_QPS_SQE;
2251 ret = ib_modify_qp(qp_info->qp, attr,
2252 IB_QP_STATE | IB_QP_CUR_STATE);
2253 kfree(attr);
2254 if (ret)
7ef5d4b0
IW
2255 dev_err(&port_priv->device->dev,
2256 "mad_error_handler - ib_modify_qp to RTS : %d\n",
2257 ret);
1da177e4
LT
2258 else
2259 mark_sends_for_retry(qp_info);
2260 }
2261 ib_mad_send_done_handler(port_priv, wc);
2262 }
2263}
2264
2265/*
2266 * IB MAD completion callback
2267 */
c4028958 2268static void ib_mad_completion_handler(struct work_struct *work)
1da177e4
LT
2269{
2270 struct ib_mad_port_private *port_priv;
2271 struct ib_wc wc;
2272
c4028958 2273 port_priv = container_of(work, struct ib_mad_port_private, work);
1da177e4
LT
2274 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2275
2276 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2277 if (wc.status == IB_WC_SUCCESS) {
2278 switch (wc.opcode) {
2279 case IB_WC_SEND:
2280 ib_mad_send_done_handler(port_priv, &wc);
2281 break;
2282 case IB_WC_RECV:
2283 ib_mad_recv_done_handler(port_priv, &wc);
2284 break;
2285 default:
2286 BUG_ON(1);
2287 break;
2288 }
2289 } else
2290 mad_error_handler(port_priv, &wc);
2291 }
2292}
2293
2294static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2295{
2296 unsigned long flags;
2297 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2298 struct ib_mad_send_wc mad_send_wc;
2299 struct list_head cancel_list;
2300
2301 INIT_LIST_HEAD(&cancel_list);
2302
2303 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2304 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2305 &mad_agent_priv->send_list, agent_list) {
2306 if (mad_send_wr->status == IB_WC_SUCCESS) {
3cd96564 2307 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
1da177e4
LT
2308 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2309 }
2310 }
2311
2312 /* Empty wait list to prevent receives from finding a request */
2313 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2314 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2315
2316 /* Report all cancelled requests */
2317 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2318 mad_send_wc.vendor_err = 0;
2319
2320 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2321 &cancel_list, agent_list) {
34816ad9
SH
2322 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2323 list_del(&mad_send_wr->agent_list);
1da177e4
LT
2324 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2325 &mad_send_wc);
1da177e4
LT
2326 atomic_dec(&mad_agent_priv->refcount);
2327 }
2328}
2329
2330static struct ib_mad_send_wr_private*
34816ad9
SH
2331find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2332 struct ib_mad_send_buf *send_buf)
1da177e4
LT
2333{
2334 struct ib_mad_send_wr_private *mad_send_wr;
2335
2336 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2337 agent_list) {
34816ad9 2338 if (&mad_send_wr->send_buf == send_buf)
1da177e4
LT
2339 return mad_send_wr;
2340 }
2341
2342 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2343 agent_list) {
34816ad9
SH
2344 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2345 &mad_send_wr->send_buf == send_buf)
1da177e4
LT
2346 return mad_send_wr;
2347 }
2348 return NULL;
2349}
2350
34816ad9
SH
2351int ib_modify_mad(struct ib_mad_agent *mad_agent,
2352 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
1da177e4
LT
2353{
2354 struct ib_mad_agent_private *mad_agent_priv;
2355 struct ib_mad_send_wr_private *mad_send_wr;
2356 unsigned long flags;
cabe3cbc 2357 int active;
1da177e4
LT
2358
2359 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2360 agent);
2361 spin_lock_irqsave(&mad_agent_priv->lock, flags);
34816ad9 2362 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
03b61ad2 2363 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
1da177e4 2364 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
03b61ad2 2365 return -EINVAL;
1da177e4
LT
2366 }
2367
cabe3cbc 2368 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
03b61ad2 2369 if (!timeout_ms) {
1da177e4 2370 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
03b61ad2 2371 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1da177e4
LT
2372 }
2373
34816ad9 2374 mad_send_wr->send_buf.timeout_ms = timeout_ms;
cabe3cbc 2375 if (active)
03b61ad2
HR
2376 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2377 else
2378 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2379
1da177e4 2380 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
03b61ad2
HR
2381 return 0;
2382}
2383EXPORT_SYMBOL(ib_modify_mad);
1da177e4 2384
34816ad9
SH
2385void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2386 struct ib_mad_send_buf *send_buf)
03b61ad2 2387{
34816ad9 2388 ib_modify_mad(mad_agent, send_buf, 0);
1da177e4
LT
2389}
2390EXPORT_SYMBOL(ib_cancel_mad);
2391
c4028958 2392static void local_completions(struct work_struct *work)
1da177e4
LT
2393{
2394 struct ib_mad_agent_private *mad_agent_priv;
2395 struct ib_mad_local_private *local;
2396 struct ib_mad_agent_private *recv_mad_agent;
2397 unsigned long flags;
1d9bc6d6 2398 int free_mad;
1da177e4
LT
2399 struct ib_wc wc;
2400 struct ib_mad_send_wc mad_send_wc;
2401
c4028958
DH
2402 mad_agent_priv =
2403 container_of(work, struct ib_mad_agent_private, local_work);
1da177e4
LT
2404
2405 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2406 while (!list_empty(&mad_agent_priv->local_list)) {
2407 local = list_entry(mad_agent_priv->local_list.next,
2408 struct ib_mad_local_private,
2409 completion_list);
37289efe 2410 list_del(&local->completion_list);
1da177e4 2411 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1d9bc6d6 2412 free_mad = 0;
1da177e4
LT
2413 if (local->mad_priv) {
2414 recv_mad_agent = local->recv_mad_agent;
2415 if (!recv_mad_agent) {
7ef5d4b0
IW
2416 dev_err(&mad_agent_priv->agent.device->dev,
2417 "No receive MAD agent for local completion\n");
1d9bc6d6 2418 free_mad = 1;
1da177e4
LT
2419 goto local_send_completion;
2420 }
2421
2422 /*
2423 * Defined behavior is to complete response
2424 * before request
2425 */
062dbb69
MT
2426 build_smp_wc(recv_mad_agent->agent.qp,
2427 (unsigned long) local->mad_send_wr,
97f52eb4 2428 be16_to_cpu(IB_LID_PERMISSIVE),
34816ad9 2429 0, recv_mad_agent->agent.port_num, &wc);
1da177e4
LT
2430
2431 local->mad_priv->header.recv_wc.wc = &wc;
2432 local->mad_priv->header.recv_wc.mad_len =
2433 sizeof(struct ib_mad);
fa619a77
HR
2434 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2435 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2436 &local->mad_priv->header.recv_wc.rmpp_list);
1da177e4
LT
2437 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2438 local->mad_priv->header.recv_wc.recv_buf.mad =
2439 &local->mad_priv->mad.mad;
2440 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2441 snoop_recv(recv_mad_agent->qp_info,
2442 &local->mad_priv->header.recv_wc,
2443 IB_MAD_SNOOP_RECVS);
2444 recv_mad_agent->agent.recv_handler(
2445 &recv_mad_agent->agent,
2446 &local->mad_priv->header.recv_wc);
2447 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2448 atomic_dec(&recv_mad_agent->refcount);
2449 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2450 }
2451
2452local_send_completion:
2453 /* Complete send */
2454 mad_send_wc.status = IB_WC_SUCCESS;
2455 mad_send_wc.vendor_err = 0;
34816ad9 2456 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
1da177e4 2457 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
34816ad9
SH
2458 snoop_send(mad_agent_priv->qp_info,
2459 &local->mad_send_wr->send_buf,
2460 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
1da177e4
LT
2461 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2462 &mad_send_wc);
2463
2464 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1da177e4 2465 atomic_dec(&mad_agent_priv->refcount);
1d9bc6d6 2466 if (free_mad)
2c153b93 2467 kmem_cache_free(ib_mad_cache, local->mad_priv);
1da177e4
LT
2468 kfree(local);
2469 }
2470 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2471}
2472
f75b7a52
HR
2473static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2474{
2475 int ret;
2476
4fc8cd49 2477 if (!mad_send_wr->retries_left)
f75b7a52
HR
2478 return -ETIMEDOUT;
2479
4fc8cd49
SH
2480 mad_send_wr->retries_left--;
2481 mad_send_wr->send_buf.retries++;
2482
34816ad9 2483 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
f75b7a52 2484
fa619a77
HR
2485 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2486 ret = ib_retry_rmpp(mad_send_wr);
2487 switch (ret) {
2488 case IB_RMPP_RESULT_UNHANDLED:
2489 ret = ib_send_mad(mad_send_wr);
2490 break;
2491 case IB_RMPP_RESULT_CONSUMED:
2492 ret = 0;
2493 break;
2494 default:
2495 ret = -ECOMM;
2496 break;
2497 }
2498 } else
2499 ret = ib_send_mad(mad_send_wr);
f75b7a52
HR
2500
2501 if (!ret) {
2502 mad_send_wr->refcount++;
f75b7a52
HR
2503 list_add_tail(&mad_send_wr->agent_list,
2504 &mad_send_wr->mad_agent_priv->send_list);
2505 }
2506 return ret;
2507}
2508
c4028958 2509static void timeout_sends(struct work_struct *work)
1da177e4
LT
2510{
2511 struct ib_mad_agent_private *mad_agent_priv;
2512 struct ib_mad_send_wr_private *mad_send_wr;
2513 struct ib_mad_send_wc mad_send_wc;
2514 unsigned long flags, delay;
2515
c4028958
DH
2516 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2517 timed_work.work);
1da177e4
LT
2518 mad_send_wc.vendor_err = 0;
2519
2520 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2521 while (!list_empty(&mad_agent_priv->wait_list)) {
2522 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2523 struct ib_mad_send_wr_private,
2524 agent_list);
2525
2526 if (time_after(mad_send_wr->timeout, jiffies)) {
2527 delay = mad_send_wr->timeout - jiffies;
2528 if ((long)delay <= 0)
2529 delay = 1;
2530 queue_delayed_work(mad_agent_priv->qp_info->
2531 port_priv->wq,
2532 &mad_agent_priv->timed_work, delay);
2533 break;
2534 }
2535
dbf9227b 2536 list_del(&mad_send_wr->agent_list);
29bb33dd
HR
2537 if (mad_send_wr->status == IB_WC_SUCCESS &&
2538 !retry_send(mad_send_wr))
f75b7a52
HR
2539 continue;
2540
1da177e4
LT
2541 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2542
03b61ad2
HR
2543 if (mad_send_wr->status == IB_WC_SUCCESS)
2544 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2545 else
2546 mad_send_wc.status = mad_send_wr->status;
34816ad9 2547 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1da177e4
LT
2548 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2549 &mad_send_wc);
2550
1da177e4
LT
2551 atomic_dec(&mad_agent_priv->refcount);
2552 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2553 }
2554 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2555}
2556
5dd2ce12 2557static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
1da177e4
LT
2558{
2559 struct ib_mad_port_private *port_priv = cq->cq_context;
dc05980d 2560 unsigned long flags;
1da177e4 2561
dc05980d
MT
2562 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2563 if (!list_empty(&port_priv->port_list))
2564 queue_work(port_priv->wq, &port_priv->work);
2565 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
1da177e4
LT
2566}
2567
2568/*
2569 * Allocate receive MADs and post receive WRs for them
2570 */
2571static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2572 struct ib_mad_private *mad)
2573{
2574 unsigned long flags;
2575 int post, ret;
2576 struct ib_mad_private *mad_priv;
2577 struct ib_sge sg_list;
2578 struct ib_recv_wr recv_wr, *bad_recv_wr;
2579 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2580
2581 /* Initialize common scatter list fields */
2582 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2583 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2584
2585 /* Initialize common receive WR fields */
2586 recv_wr.next = NULL;
2587 recv_wr.sg_list = &sg_list;
2588 recv_wr.num_sge = 1;
2589
2590 do {
2591 /* Allocate and map receive buffer */
2592 if (mad) {
2593 mad_priv = mad;
2594 mad = NULL;
2595 } else {
2596 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2597 if (!mad_priv) {
7ef5d4b0
IW
2598 dev_err(&qp_info->port_priv->device->dev,
2599 "No memory for receive buffer\n");
1da177e4
LT
2600 ret = -ENOMEM;
2601 break;
2602 }
2603 }
1527106f
RC
2604 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2605 &mad_priv->grh,
2606 sizeof *mad_priv -
2607 sizeof mad_priv->header,
2608 DMA_FROM_DEVICE);
2c34e68f
YB
2609 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2610 sg_list.addr))) {
2611 ret = -ENOMEM;
2612 break;
2613 }
1527106f 2614 mad_priv->header.mapping = sg_list.addr;
1da177e4
LT
2615 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2616 mad_priv->header.mad_list.mad_queue = recv_queue;
2617
2618 /* Post receive WR */
2619 spin_lock_irqsave(&recv_queue->lock, flags);
2620 post = (++recv_queue->count < recv_queue->max_active);
2621 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2622 spin_unlock_irqrestore(&recv_queue->lock, flags);
2623 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2624 if (ret) {
2625 spin_lock_irqsave(&recv_queue->lock, flags);
2626 list_del(&mad_priv->header.mad_list.list);
2627 recv_queue->count--;
2628 spin_unlock_irqrestore(&recv_queue->lock, flags);
1527106f
RC
2629 ib_dma_unmap_single(qp_info->port_priv->device,
2630 mad_priv->header.mapping,
2631 sizeof *mad_priv -
2632 sizeof mad_priv->header,
2633 DMA_FROM_DEVICE);
1da177e4 2634 kmem_cache_free(ib_mad_cache, mad_priv);
7ef5d4b0
IW
2635 dev_err(&qp_info->port_priv->device->dev,
2636 "ib_post_recv failed: %d\n", ret);
1da177e4
LT
2637 break;
2638 }
2639 } while (post);
2640
2641 return ret;
2642}
2643
2644/*
2645 * Return all the posted receive MADs
2646 */
2647static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2648{
2649 struct ib_mad_private_header *mad_priv_hdr;
2650 struct ib_mad_private *recv;
2651 struct ib_mad_list_head *mad_list;
2652
fac70d51
EC
2653 if (!qp_info->qp)
2654 return;
2655
1da177e4
LT
2656 while (!list_empty(&qp_info->recv_queue.list)) {
2657
2658 mad_list = list_entry(qp_info->recv_queue.list.next,
2659 struct ib_mad_list_head, list);
2660 mad_priv_hdr = container_of(mad_list,
2661 struct ib_mad_private_header,
2662 mad_list);
2663 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2664 header);
2665
2666 /* Remove from posted receive MAD list */
2667 list_del(&mad_list->list);
2668
1527106f
RC
2669 ib_dma_unmap_single(qp_info->port_priv->device,
2670 recv->header.mapping,
2671 sizeof(struct ib_mad_private) -
2672 sizeof(struct ib_mad_private_header),
2673 DMA_FROM_DEVICE);
1da177e4
LT
2674 kmem_cache_free(ib_mad_cache, recv);
2675 }
2676
2677 qp_info->recv_queue.count = 0;
2678}
2679
2680/*
2681 * Start the port
2682 */
2683static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2684{
2685 int ret, i;
2686 struct ib_qp_attr *attr;
2687 struct ib_qp *qp;
ef5ed416 2688 u16 pkey_index;
1da177e4
LT
2689
2690 attr = kmalloc(sizeof *attr, GFP_KERNEL);
3cd96564 2691 if (!attr) {
7ef5d4b0
IW
2692 dev_err(&port_priv->device->dev,
2693 "Couldn't kmalloc ib_qp_attr\n");
1da177e4
LT
2694 return -ENOMEM;
2695 }
2696
ef5ed416
JM
2697 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2698 IB_DEFAULT_PKEY_FULL, &pkey_index);
2699 if (ret)
2700 pkey_index = 0;
2701
1da177e4
LT
2702 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2703 qp = port_priv->qp_info[i].qp;
fac70d51
EC
2704 if (!qp)
2705 continue;
2706
1da177e4
LT
2707 /*
2708 * PKey index for QP1 is irrelevant but
2709 * one is needed for the Reset to Init transition
2710 */
2711 attr->qp_state = IB_QPS_INIT;
ef5ed416 2712 attr->pkey_index = pkey_index;
1da177e4
LT
2713 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2714 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2715 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2716 if (ret) {
7ef5d4b0
IW
2717 dev_err(&port_priv->device->dev,
2718 "Couldn't change QP%d state to INIT: %d\n",
2719 i, ret);
1da177e4
LT
2720 goto out;
2721 }
2722
2723 attr->qp_state = IB_QPS_RTR;
2724 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2725 if (ret) {
7ef5d4b0
IW
2726 dev_err(&port_priv->device->dev,
2727 "Couldn't change QP%d state to RTR: %d\n",
2728 i, ret);
1da177e4
LT
2729 goto out;
2730 }
2731
2732 attr->qp_state = IB_QPS_RTS;
2733 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2734 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2735 if (ret) {
7ef5d4b0
IW
2736 dev_err(&port_priv->device->dev,
2737 "Couldn't change QP%d state to RTS: %d\n",
2738 i, ret);
1da177e4
LT
2739 goto out;
2740 }
2741 }
2742
2743 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2744 if (ret) {
7ef5d4b0
IW
2745 dev_err(&port_priv->device->dev,
2746 "Failed to request completion notification: %d\n",
2747 ret);
1da177e4
LT
2748 goto out;
2749 }
2750
2751 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
fac70d51
EC
2752 if (!port_priv->qp_info[i].qp)
2753 continue;
2754
1da177e4
LT
2755 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2756 if (ret) {
7ef5d4b0
IW
2757 dev_err(&port_priv->device->dev,
2758 "Couldn't post receive WRs\n");
1da177e4
LT
2759 goto out;
2760 }
2761 }
2762out:
2763 kfree(attr);
2764 return ret;
2765}
2766
2767static void qp_event_handler(struct ib_event *event, void *qp_context)
2768{
2769 struct ib_mad_qp_info *qp_info = qp_context;
2770
2771 /* It's worse than that! He's dead, Jim! */
7ef5d4b0
IW
2772 dev_err(&qp_info->port_priv->device->dev,
2773 "Fatal error (%d) on MAD QP (%d)\n",
1da177e4
LT
2774 event->event, qp_info->qp->qp_num);
2775}
2776
2777static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2778 struct ib_mad_queue *mad_queue)
2779{
2780 mad_queue->qp_info = qp_info;
2781 mad_queue->count = 0;
2782 spin_lock_init(&mad_queue->lock);
2783 INIT_LIST_HEAD(&mad_queue->list);
2784}
2785
2786static void init_mad_qp(struct ib_mad_port_private *port_priv,
2787 struct ib_mad_qp_info *qp_info)
2788{
2789 qp_info->port_priv = port_priv;
2790 init_mad_queue(qp_info, &qp_info->send_queue);
2791 init_mad_queue(qp_info, &qp_info->recv_queue);
2792 INIT_LIST_HEAD(&qp_info->overflow_list);
2793 spin_lock_init(&qp_info->snoop_lock);
2794 qp_info->snoop_table = NULL;
2795 qp_info->snoop_table_size = 0;
2796 atomic_set(&qp_info->snoop_count, 0);
2797}
2798
2799static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2800 enum ib_qp_type qp_type)
2801{
2802 struct ib_qp_init_attr qp_init_attr;
2803 int ret;
2804
2805 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2806 qp_init_attr.send_cq = qp_info->port_priv->cq;
2807 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2808 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
b76aabc3
HR
2809 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2810 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
1da177e4
LT
2811 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2812 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2813 qp_init_attr.qp_type = qp_type;
2814 qp_init_attr.port_num = qp_info->port_priv->port_num;
2815 qp_init_attr.qp_context = qp_info;
2816 qp_init_attr.event_handler = qp_event_handler;
2817 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2818 if (IS_ERR(qp_info->qp)) {
7ef5d4b0
IW
2819 dev_err(&qp_info->port_priv->device->dev,
2820 "Couldn't create ib_mad QP%d\n",
2821 get_spl_qp_index(qp_type));
1da177e4
LT
2822 ret = PTR_ERR(qp_info->qp);
2823 goto error;
2824 }
2825 /* Use minimum queue sizes unless the CQ is resized */
b76aabc3
HR
2826 qp_info->send_queue.max_active = mad_sendq_size;
2827 qp_info->recv_queue.max_active = mad_recvq_size;
1da177e4
LT
2828 return 0;
2829
2830error:
2831 return ret;
2832}
2833
2834static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2835{
fac70d51
EC
2836 if (!qp_info->qp)
2837 return;
2838
1da177e4 2839 ib_destroy_qp(qp_info->qp);
6044ec88 2840 kfree(qp_info->snoop_table);
1da177e4
LT
2841}
2842
2843/*
2844 * Open the port
2845 * Create the QP, PD, MR, and CQ if needed
2846 */
2847static int ib_mad_port_open(struct ib_device *device,
2848 int port_num)
2849{
2850 int ret, cq_size;
2851 struct ib_mad_port_private *port_priv;
2852 unsigned long flags;
2853 char name[sizeof "ib_mad123"];
fac70d51 2854 int has_smi;
1da177e4 2855
1da177e4 2856 /* Create new device info */
de6eb66b 2857 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
1da177e4 2858 if (!port_priv) {
7ef5d4b0 2859 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
1da177e4
LT
2860 return -ENOMEM;
2861 }
de6eb66b 2862
1da177e4
LT
2863 port_priv->device = device;
2864 port_priv->port_num = port_num;
2865 spin_lock_init(&port_priv->reg_lock);
2866 INIT_LIST_HEAD(&port_priv->agent_list);
2867 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2868 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2869
fac70d51
EC
2870 cq_size = mad_sendq_size + mad_recvq_size;
2871 has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
2872 if (has_smi)
2873 cq_size *= 2;
2874
1da177e4 2875 port_priv->cq = ib_create_cq(port_priv->device,
5dd2ce12 2876 ib_mad_thread_completion_handler,
f4fd0b22 2877 NULL, port_priv, cq_size, 0);
1da177e4 2878 if (IS_ERR(port_priv->cq)) {
7ef5d4b0 2879 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
1da177e4
LT
2880 ret = PTR_ERR(port_priv->cq);
2881 goto error3;
2882 }
2883
2884 port_priv->pd = ib_alloc_pd(device);
2885 if (IS_ERR(port_priv->pd)) {
7ef5d4b0 2886 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
1da177e4
LT
2887 ret = PTR_ERR(port_priv->pd);
2888 goto error4;
2889 }
2890
2891 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2892 if (IS_ERR(port_priv->mr)) {
7ef5d4b0 2893 dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n");
1da177e4
LT
2894 ret = PTR_ERR(port_priv->mr);
2895 goto error5;
2896 }
2897
fac70d51
EC
2898 if (has_smi) {
2899 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2900 if (ret)
2901 goto error6;
2902 }
1da177e4
LT
2903 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2904 if (ret)
2905 goto error7;
2906
2907 snprintf(name, sizeof name, "ib_mad%d", port_num);
2908 port_priv->wq = create_singlethread_workqueue(name);
2909 if (!port_priv->wq) {
2910 ret = -ENOMEM;
2911 goto error8;
2912 }
c4028958 2913 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
1da177e4 2914
dc05980d
MT
2915 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2916 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2917 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2918
1da177e4
LT
2919 ret = ib_mad_port_start(port_priv);
2920 if (ret) {
7ef5d4b0 2921 dev_err(&device->dev, "Couldn't start port\n");
1da177e4
LT
2922 goto error9;
2923 }
2924
1da177e4
LT
2925 return 0;
2926
2927error9:
dc05980d
MT
2928 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2929 list_del_init(&port_priv->port_list);
2930 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2931
1da177e4
LT
2932 destroy_workqueue(port_priv->wq);
2933error8:
2934 destroy_mad_qp(&port_priv->qp_info[1]);
2935error7:
2936 destroy_mad_qp(&port_priv->qp_info[0]);
2937error6:
2938 ib_dereg_mr(port_priv->mr);
2939error5:
2940 ib_dealloc_pd(port_priv->pd);
2941error4:
2942 ib_destroy_cq(port_priv->cq);
2943 cleanup_recv_queue(&port_priv->qp_info[1]);
2944 cleanup_recv_queue(&port_priv->qp_info[0]);
2945error3:
2946 kfree(port_priv);
2947
2948 return ret;
2949}
2950
2951/*
2952 * Close the port
2953 * If there are no classes using the port, free the port
2954 * resources (CQ, MR, PD, QP) and remove the port's info structure
2955 */
2956static int ib_mad_port_close(struct ib_device *device, int port_num)
2957{
2958 struct ib_mad_port_private *port_priv;
2959 unsigned long flags;
2960
2961 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2962 port_priv = __ib_get_mad_port(device, port_num);
2963 if (port_priv == NULL) {
2964 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
7ef5d4b0 2965 dev_err(&device->dev, "Port %d not found\n", port_num);
1da177e4
LT
2966 return -ENODEV;
2967 }
dc05980d 2968 list_del_init(&port_priv->port_list);
1da177e4
LT
2969 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2970
1da177e4
LT
2971 destroy_workqueue(port_priv->wq);
2972 destroy_mad_qp(&port_priv->qp_info[1]);
2973 destroy_mad_qp(&port_priv->qp_info[0]);
2974 ib_dereg_mr(port_priv->mr);
2975 ib_dealloc_pd(port_priv->pd);
2976 ib_destroy_cq(port_priv->cq);
2977 cleanup_recv_queue(&port_priv->qp_info[1]);
2978 cleanup_recv_queue(&port_priv->qp_info[0]);
2979 /* XXX: Handle deallocation of MAD registration tables */
2980
2981 kfree(port_priv);
2982
2983 return 0;
2984}
2985
2986static void ib_mad_init_device(struct ib_device *device)
2987{
4ab6fb7e 2988 int start, end, i;
1da177e4 2989
07ebafba
TT
2990 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2991 return;
2992
2993 if (device->node_type == RDMA_NODE_IB_SWITCH) {
4ab6fb7e
RD
2994 start = 0;
2995 end = 0;
1da177e4 2996 } else {
4ab6fb7e
RD
2997 start = 1;
2998 end = device->phys_port_cnt;
1da177e4 2999 }
4ab6fb7e
RD
3000
3001 for (i = start; i <= end; i++) {
3002 if (ib_mad_port_open(device, i)) {
7ef5d4b0 3003 dev_err(&device->dev, "Couldn't open port %d\n", i);
4ab6fb7e 3004 goto error;
1da177e4 3005 }
4ab6fb7e 3006 if (ib_agent_port_open(device, i)) {
7ef5d4b0
IW
3007 dev_err(&device->dev,
3008 "Couldn't open port %d for agents\n", i);
4ab6fb7e 3009 goto error_agent;
1da177e4
LT
3010 }
3011 }
f68bcc2d 3012 return;
1da177e4 3013
4ab6fb7e
RD
3014error_agent:
3015 if (ib_mad_port_close(device, i))
7ef5d4b0 3016 dev_err(&device->dev, "Couldn't close port %d\n", i);
4ab6fb7e
RD
3017
3018error:
3019 i--;
3020
3021 while (i >= start) {
3022 if (ib_agent_port_close(device, i))
7ef5d4b0
IW
3023 dev_err(&device->dev,
3024 "Couldn't close port %d for agents\n", i);
4ab6fb7e 3025 if (ib_mad_port_close(device, i))
7ef5d4b0 3026 dev_err(&device->dev, "Couldn't close port %d\n", i);
1da177e4
LT
3027 i--;
3028 }
1da177e4
LT
3029}
3030
3031static void ib_mad_remove_device(struct ib_device *device)
3032{
f68bcc2d 3033 int i, num_ports, cur_port;
1da177e4 3034
070e140c
SW
3035 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
3036 return;
3037
07ebafba 3038 if (device->node_type == RDMA_NODE_IB_SWITCH) {
1da177e4
LT
3039 num_ports = 1;
3040 cur_port = 0;
3041 } else {
3042 num_ports = device->phys_port_cnt;
3043 cur_port = 1;
3044 }
3045 for (i = 0; i < num_ports; i++, cur_port++) {
f68bcc2d 3046 if (ib_agent_port_close(device, cur_port))
7ef5d4b0
IW
3047 dev_err(&device->dev,
3048 "Couldn't close port %d for agents\n",
3049 cur_port);
f68bcc2d 3050 if (ib_mad_port_close(device, cur_port))
7ef5d4b0
IW
3051 dev_err(&device->dev, "Couldn't close port %d\n",
3052 cur_port);
1da177e4
LT
3053 }
3054}
3055
3056static struct ib_client mad_client = {
3057 .name = "mad",
3058 .add = ib_mad_init_device,
3059 .remove = ib_mad_remove_device
3060};
3061
3062static int __init ib_mad_init_module(void)
3063{
3064 int ret;
3065
b76aabc3
HR
3066 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3067 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3068
3069 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3070 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3071
1da177e4
LT
3072 ib_mad_cache = kmem_cache_create("ib_mad",
3073 sizeof(struct ib_mad_private),
3074 0,
3075 SLAB_HWCACHE_ALIGN,
1da177e4
LT
3076 NULL);
3077 if (!ib_mad_cache) {
7ef5d4b0 3078 pr_err("Couldn't create ib_mad cache\n");
1da177e4
LT
3079 ret = -ENOMEM;
3080 goto error1;
3081 }
3082
3083 INIT_LIST_HEAD(&ib_mad_port_list);
3084
3085 if (ib_register_client(&mad_client)) {
7ef5d4b0 3086 pr_err("Couldn't register ib_mad client\n");
1da177e4
LT
3087 ret = -EINVAL;
3088 goto error2;
3089 }
3090
3091 return 0;
3092
3093error2:
3094 kmem_cache_destroy(ib_mad_cache);
3095error1:
3096 return ret;
3097}
3098
3099static void __exit ib_mad_cleanup_module(void)
3100{
3101 ib_unregister_client(&mad_client);
1a1d92c1 3102 kmem_cache_destroy(ib_mad_cache);
1da177e4
LT
3103}
3104
3105module_init(ib_mad_init_module);
3106module_exit(ib_mad_cleanup_module);