IB: Use DEFINE_SPINLOCK() for static spinlocks
[linux-2.6-block.git] / drivers / infiniband / core / mad.c
CommitLineData
1da177e4 1/*
de493d47 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
fa619a77
HR
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
1da177e4
LT
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
1da177e4 34 */
1da177e4 35#include <linux/dma-mapping.h>
9874e746 36#include <rdma/ib_cache.h>
1da177e4
LT
37
38#include "mad_priv.h"
fa619a77 39#include "mad_rmpp.h"
1da177e4
LT
40#include "smi.h"
41#include "agent.h"
42
43MODULE_LICENSE("Dual BSD/GPL");
44MODULE_DESCRIPTION("kernel IB MAD API");
45MODULE_AUTHOR("Hal Rosenstock");
46MODULE_AUTHOR("Sean Hefty");
47
e54f8188 48static struct kmem_cache *ib_mad_cache;
fa619a77 49
1da177e4
LT
50static struct list_head ib_mad_port_list;
51static u32 ib_mad_client_id = 0;
52
53/* Port list lock */
6276e08a 54static DEFINE_SPINLOCK(ib_mad_port_list_lock);
1da177e4
LT
55
56/* Forward declarations */
57static int method_in_use(struct ib_mad_mgmt_method_table **method,
58 struct ib_mad_reg_req *mad_reg_req);
59static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
60static struct ib_mad_agent_private *find_mad_agent(
61 struct ib_mad_port_private *port_priv,
4a0754fa 62 struct ib_mad *mad);
1da177e4
LT
63static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
64 struct ib_mad_private *mad);
65static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
c4028958
DH
66static void timeout_sends(struct work_struct *work);
67static void local_completions(struct work_struct *work);
1da177e4
LT
68static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
69 struct ib_mad_agent_private *agent_priv,
70 u8 mgmt_class);
71static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
72 struct ib_mad_agent_private *agent_priv);
73
74/*
75 * Returns a ib_mad_port_private structure or NULL for a device/port
76 * Assumes ib_mad_port_list_lock is being held
77 */
78static inline struct ib_mad_port_private *
79__ib_get_mad_port(struct ib_device *device, int port_num)
80{
81 struct ib_mad_port_private *entry;
82
83 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
84 if (entry->device == device && entry->port_num == port_num)
85 return entry;
86 }
87 return NULL;
88}
89
90/*
91 * Wrapper function to return a ib_mad_port_private structure or NULL
92 * for a device/port
93 */
94static inline struct ib_mad_port_private *
95ib_get_mad_port(struct ib_device *device, int port_num)
96{
97 struct ib_mad_port_private *entry;
98 unsigned long flags;
99
100 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
101 entry = __ib_get_mad_port(device, port_num);
102 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
103
104 return entry;
105}
106
107static inline u8 convert_mgmt_class(u8 mgmt_class)
108{
109 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
110 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
111 0 : mgmt_class;
112}
113
114static int get_spl_qp_index(enum ib_qp_type qp_type)
115{
116 switch (qp_type)
117 {
118 case IB_QPT_SMI:
119 return 0;
120 case IB_QPT_GSI:
121 return 1;
122 default:
123 return -1;
124 }
125}
126
127static int vendor_class_index(u8 mgmt_class)
128{
129 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
130}
131
132static int is_vendor_class(u8 mgmt_class)
133{
134 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
135 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
136 return 0;
137 return 1;
138}
139
140static int is_vendor_oui(char *oui)
141{
142 if (oui[0] || oui[1] || oui[2])
143 return 1;
144 return 0;
145}
146
147static int is_vendor_method_in_use(
148 struct ib_mad_mgmt_vendor_class *vendor_class,
149 struct ib_mad_reg_req *mad_reg_req)
150{
151 struct ib_mad_mgmt_method_table *method;
152 int i;
153
154 for (i = 0; i < MAX_MGMT_OUI; i++) {
155 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
156 method = vendor_class->method_table[i];
157 if (method) {
158 if (method_in_use(&method, mad_reg_req))
159 return 1;
160 else
161 break;
162 }
163 }
164 }
165 return 0;
166}
167
2527e681
SH
168int ib_response_mad(struct ib_mad *mad)
169{
170 return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
171 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
172 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
173 (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
174}
175EXPORT_SYMBOL(ib_response_mad);
176
1da177e4
LT
177/*
178 * ib_register_mad_agent - Register to send/receive MADs
179 */
180struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
181 u8 port_num,
182 enum ib_qp_type qp_type,
183 struct ib_mad_reg_req *mad_reg_req,
184 u8 rmpp_version,
185 ib_mad_send_handler send_handler,
186 ib_mad_recv_handler recv_handler,
187 void *context)
188{
189 struct ib_mad_port_private *port_priv;
190 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
191 struct ib_mad_agent_private *mad_agent_priv;
192 struct ib_mad_reg_req *reg_req = NULL;
193 struct ib_mad_mgmt_class_table *class;
194 struct ib_mad_mgmt_vendor_class_table *vendor;
195 struct ib_mad_mgmt_vendor_class *vendor_class;
196 struct ib_mad_mgmt_method_table *method;
197 int ret2, qpn;
198 unsigned long flags;
199 u8 mgmt_class, vclass;
200
201 /* Validate parameters */
202 qpn = get_spl_qp_index(qp_type);
203 if (qpn == -1)
204 goto error1;
205
fa619a77
HR
206 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
207 goto error1;
1da177e4
LT
208
209 /* Validate MAD registration request if supplied */
210 if (mad_reg_req) {
211 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
212 goto error1;
213 if (!recv_handler)
214 goto error1;
215 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
216 /*
217 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
218 * one in this range currently allowed
219 */
220 if (mad_reg_req->mgmt_class !=
221 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
222 goto error1;
223 } else if (mad_reg_req->mgmt_class == 0) {
224 /*
225 * Class 0 is reserved in IBA and is used for
226 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
227 */
228 goto error1;
229 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
230 /*
231 * If class is in "new" vendor range,
232 * ensure supplied OUI is not zero
233 */
234 if (!is_vendor_oui(mad_reg_req->oui))
235 goto error1;
236 }
618a3c03 237 /* Make sure class supplied is consistent with RMPP */
64cb9c6a 238 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
618a3c03
HR
239 if (rmpp_version)
240 goto error1;
241 }
1da177e4
LT
242 /* Make sure class supplied is consistent with QP type */
243 if (qp_type == IB_QPT_SMI) {
244 if ((mad_reg_req->mgmt_class !=
245 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
246 (mad_reg_req->mgmt_class !=
247 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
248 goto error1;
249 } else {
250 if ((mad_reg_req->mgmt_class ==
251 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
252 (mad_reg_req->mgmt_class ==
253 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
254 goto error1;
255 }
256 } else {
257 /* No registration request supplied */
258 if (!send_handler)
259 goto error1;
260 }
261
262 /* Validate device and port */
263 port_priv = ib_get_mad_port(device, port_num);
264 if (!port_priv) {
265 ret = ERR_PTR(-ENODEV);
266 goto error1;
267 }
268
269 /* Allocate structures */
de6eb66b 270 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
1da177e4
LT
271 if (!mad_agent_priv) {
272 ret = ERR_PTR(-ENOMEM);
273 goto error1;
274 }
b82cab6b
HR
275
276 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
277 IB_ACCESS_LOCAL_WRITE);
278 if (IS_ERR(mad_agent_priv->agent.mr)) {
279 ret = ERR_PTR(-ENOMEM);
280 goto error2;
281 }
1da177e4
LT
282
283 if (mad_reg_req) {
284 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
285 if (!reg_req) {
286 ret = ERR_PTR(-ENOMEM);
b82cab6b 287 goto error3;
1da177e4
LT
288 }
289 /* Make a copy of the MAD registration request */
290 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
291 }
292
293 /* Now, fill in the various structures */
1da177e4
LT
294 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
295 mad_agent_priv->reg_req = reg_req;
fa619a77 296 mad_agent_priv->agent.rmpp_version = rmpp_version;
1da177e4
LT
297 mad_agent_priv->agent.device = device;
298 mad_agent_priv->agent.recv_handler = recv_handler;
299 mad_agent_priv->agent.send_handler = send_handler;
300 mad_agent_priv->agent.context = context;
301 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
302 mad_agent_priv->agent.port_num = port_num;
d9620a4c
RC
303 spin_lock_init(&mad_agent_priv->lock);
304 INIT_LIST_HEAD(&mad_agent_priv->send_list);
305 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
306 INIT_LIST_HEAD(&mad_agent_priv->done_list);
307 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
308 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
309 INIT_LIST_HEAD(&mad_agent_priv->local_list);
310 INIT_WORK(&mad_agent_priv->local_work, local_completions);
311 atomic_set(&mad_agent_priv->refcount, 1);
312 init_completion(&mad_agent_priv->comp);
1da177e4
LT
313
314 spin_lock_irqsave(&port_priv->reg_lock, flags);
315 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
316
317 /*
318 * Make sure MAD registration (if supplied)
319 * is non overlapping with any existing ones
320 */
321 if (mad_reg_req) {
322 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
323 if (!is_vendor_class(mgmt_class)) {
324 class = port_priv->version[mad_reg_req->
325 mgmt_class_version].class;
326 if (class) {
327 method = class->method_table[mgmt_class];
328 if (method) {
329 if (method_in_use(&method,
330 mad_reg_req))
b82cab6b 331 goto error4;
1da177e4
LT
332 }
333 }
334 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
335 mgmt_class);
336 } else {
337 /* "New" vendor class range */
338 vendor = port_priv->version[mad_reg_req->
339 mgmt_class_version].vendor;
340 if (vendor) {
341 vclass = vendor_class_index(mgmt_class);
342 vendor_class = vendor->vendor_class[vclass];
343 if (vendor_class) {
344 if (is_vendor_method_in_use(
345 vendor_class,
346 mad_reg_req))
b82cab6b 347 goto error4;
1da177e4
LT
348 }
349 }
350 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
351 }
352 if (ret2) {
353 ret = ERR_PTR(ret2);
b82cab6b 354 goto error4;
1da177e4
LT
355 }
356 }
357
358 /* Add mad agent into port's agent list */
359 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
360 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
361
1da177e4
LT
362 return &mad_agent_priv->agent;
363
b82cab6b 364error4:
1da177e4
LT
365 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
366 kfree(reg_req);
b82cab6b 367error3:
b82cab6b 368 ib_dereg_mr(mad_agent_priv->agent.mr);
2012a116
AB
369error2:
370 kfree(mad_agent_priv);
1da177e4
LT
371error1:
372 return ret;
373}
374EXPORT_SYMBOL(ib_register_mad_agent);
375
376static inline int is_snooping_sends(int mad_snoop_flags)
377{
378 return (mad_snoop_flags &
379 (/*IB_MAD_SNOOP_POSTED_SENDS |
380 IB_MAD_SNOOP_RMPP_SENDS |*/
381 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
382 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
383}
384
385static inline int is_snooping_recvs(int mad_snoop_flags)
386{
387 return (mad_snoop_flags &
388 (IB_MAD_SNOOP_RECVS /*|
389 IB_MAD_SNOOP_RMPP_RECVS*/));
390}
391
392static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
393 struct ib_mad_snoop_private *mad_snoop_priv)
394{
395 struct ib_mad_snoop_private **new_snoop_table;
396 unsigned long flags;
397 int i;
398
399 spin_lock_irqsave(&qp_info->snoop_lock, flags);
400 /* Check for empty slot in array. */
401 for (i = 0; i < qp_info->snoop_table_size; i++)
402 if (!qp_info->snoop_table[i])
403 break;
404
405 if (i == qp_info->snoop_table_size) {
406 /* Grow table. */
52805174
RD
407 new_snoop_table = krealloc(qp_info->snoop_table,
408 sizeof mad_snoop_priv *
409 (qp_info->snoop_table_size + 1),
410 GFP_ATOMIC);
1da177e4
LT
411 if (!new_snoop_table) {
412 i = -ENOMEM;
413 goto out;
414 }
52805174 415
1da177e4
LT
416 qp_info->snoop_table = new_snoop_table;
417 qp_info->snoop_table_size++;
418 }
419 qp_info->snoop_table[i] = mad_snoop_priv;
420 atomic_inc(&qp_info->snoop_count);
421out:
422 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
423 return i;
424}
425
426struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
427 u8 port_num,
428 enum ib_qp_type qp_type,
429 int mad_snoop_flags,
430 ib_mad_snoop_handler snoop_handler,
431 ib_mad_recv_handler recv_handler,
432 void *context)
433{
434 struct ib_mad_port_private *port_priv;
435 struct ib_mad_agent *ret;
436 struct ib_mad_snoop_private *mad_snoop_priv;
437 int qpn;
438
439 /* Validate parameters */
440 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
441 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
442 ret = ERR_PTR(-EINVAL);
443 goto error1;
444 }
445 qpn = get_spl_qp_index(qp_type);
446 if (qpn == -1) {
447 ret = ERR_PTR(-EINVAL);
448 goto error1;
449 }
450 port_priv = ib_get_mad_port(device, port_num);
451 if (!port_priv) {
452 ret = ERR_PTR(-ENODEV);
453 goto error1;
454 }
455 /* Allocate structures */
de6eb66b 456 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
1da177e4
LT
457 if (!mad_snoop_priv) {
458 ret = ERR_PTR(-ENOMEM);
459 goto error1;
460 }
461
462 /* Now, fill in the various structures */
1da177e4
LT
463 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
464 mad_snoop_priv->agent.device = device;
465 mad_snoop_priv->agent.recv_handler = recv_handler;
466 mad_snoop_priv->agent.snoop_handler = snoop_handler;
467 mad_snoop_priv->agent.context = context;
468 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
469 mad_snoop_priv->agent.port_num = port_num;
470 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
1b52fa98 471 init_completion(&mad_snoop_priv->comp);
1da177e4
LT
472 mad_snoop_priv->snoop_index = register_snoop_agent(
473 &port_priv->qp_info[qpn],
474 mad_snoop_priv);
475 if (mad_snoop_priv->snoop_index < 0) {
476 ret = ERR_PTR(mad_snoop_priv->snoop_index);
477 goto error2;
478 }
479
480 atomic_set(&mad_snoop_priv->refcount, 1);
481 return &mad_snoop_priv->agent;
482
483error2:
484 kfree(mad_snoop_priv);
485error1:
486 return ret;
487}
488EXPORT_SYMBOL(ib_register_mad_snoop);
489
1b52fa98
SH
490static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
491{
492 if (atomic_dec_and_test(&mad_agent_priv->refcount))
493 complete(&mad_agent_priv->comp);
494}
495
496static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
497{
498 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
499 complete(&mad_snoop_priv->comp);
500}
501
1da177e4
LT
502static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
503{
504 struct ib_mad_port_private *port_priv;
505 unsigned long flags;
506
507 /* Note that we could still be handling received MADs */
508
509 /*
510 * Canceling all sends results in dropping received response
511 * MADs, preventing us from queuing additional work
512 */
513 cancel_mads(mad_agent_priv);
1da177e4 514 port_priv = mad_agent_priv->qp_info->port_priv;
1da177e4 515 cancel_delayed_work(&mad_agent_priv->timed_work);
1da177e4
LT
516
517 spin_lock_irqsave(&port_priv->reg_lock, flags);
518 remove_mad_reg_req(mad_agent_priv);
519 list_del(&mad_agent_priv->agent_list);
520 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
521
b82cab6b 522 flush_workqueue(port_priv->wq);
fa619a77 523 ib_cancel_rmpp_recvs(mad_agent_priv);
1da177e4 524
1b52fa98
SH
525 deref_mad_agent(mad_agent_priv);
526 wait_for_completion(&mad_agent_priv->comp);
1da177e4 527
6044ec88 528 kfree(mad_agent_priv->reg_req);
b82cab6b 529 ib_dereg_mr(mad_agent_priv->agent.mr);
1da177e4
LT
530 kfree(mad_agent_priv);
531}
532
533static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
534{
535 struct ib_mad_qp_info *qp_info;
536 unsigned long flags;
537
538 qp_info = mad_snoop_priv->qp_info;
539 spin_lock_irqsave(&qp_info->snoop_lock, flags);
540 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
541 atomic_dec(&qp_info->snoop_count);
542 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
543
1b52fa98
SH
544 deref_snoop_agent(mad_snoop_priv);
545 wait_for_completion(&mad_snoop_priv->comp);
1da177e4
LT
546
547 kfree(mad_snoop_priv);
548}
549
550/*
551 * ib_unregister_mad_agent - Unregisters a client from using MAD services
552 */
553int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
554{
555 struct ib_mad_agent_private *mad_agent_priv;
556 struct ib_mad_snoop_private *mad_snoop_priv;
557
558 /* If the TID is zero, the agent can only snoop. */
559 if (mad_agent->hi_tid) {
560 mad_agent_priv = container_of(mad_agent,
561 struct ib_mad_agent_private,
562 agent);
563 unregister_mad_agent(mad_agent_priv);
564 } else {
565 mad_snoop_priv = container_of(mad_agent,
566 struct ib_mad_snoop_private,
567 agent);
568 unregister_mad_snoop(mad_snoop_priv);
569 }
570 return 0;
571}
572EXPORT_SYMBOL(ib_unregister_mad_agent);
573
574static void dequeue_mad(struct ib_mad_list_head *mad_list)
575{
576 struct ib_mad_queue *mad_queue;
577 unsigned long flags;
578
579 BUG_ON(!mad_list->mad_queue);
580 mad_queue = mad_list->mad_queue;
581 spin_lock_irqsave(&mad_queue->lock, flags);
582 list_del(&mad_list->list);
583 mad_queue->count--;
584 spin_unlock_irqrestore(&mad_queue->lock, flags);
585}
586
587static void snoop_send(struct ib_mad_qp_info *qp_info,
34816ad9 588 struct ib_mad_send_buf *send_buf,
1da177e4
LT
589 struct ib_mad_send_wc *mad_send_wc,
590 int mad_snoop_flags)
591{
592 struct ib_mad_snoop_private *mad_snoop_priv;
593 unsigned long flags;
594 int i;
595
596 spin_lock_irqsave(&qp_info->snoop_lock, flags);
597 for (i = 0; i < qp_info->snoop_table_size; i++) {
598 mad_snoop_priv = qp_info->snoop_table[i];
599 if (!mad_snoop_priv ||
600 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
601 continue;
602
603 atomic_inc(&mad_snoop_priv->refcount);
604 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
605 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
34816ad9 606 send_buf, mad_send_wc);
1b52fa98 607 deref_snoop_agent(mad_snoop_priv);
1da177e4
LT
608 spin_lock_irqsave(&qp_info->snoop_lock, flags);
609 }
610 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
611}
612
613static void snoop_recv(struct ib_mad_qp_info *qp_info,
614 struct ib_mad_recv_wc *mad_recv_wc,
615 int mad_snoop_flags)
616{
617 struct ib_mad_snoop_private *mad_snoop_priv;
618 unsigned long flags;
619 int i;
620
621 spin_lock_irqsave(&qp_info->snoop_lock, flags);
622 for (i = 0; i < qp_info->snoop_table_size; i++) {
623 mad_snoop_priv = qp_info->snoop_table[i];
624 if (!mad_snoop_priv ||
625 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
626 continue;
627
628 atomic_inc(&mad_snoop_priv->refcount);
629 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
630 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
631 mad_recv_wc);
1b52fa98 632 deref_snoop_agent(mad_snoop_priv);
1da177e4
LT
633 spin_lock_irqsave(&qp_info->snoop_lock, flags);
634 }
635 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
636}
637
062dbb69
MT
638static void build_smp_wc(struct ib_qp *qp,
639 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
1da177e4
LT
640 struct ib_wc *wc)
641{
642 memset(wc, 0, sizeof *wc);
643 wc->wr_id = wr_id;
644 wc->status = IB_WC_SUCCESS;
645 wc->opcode = IB_WC_RECV;
646 wc->pkey_index = pkey_index;
647 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
648 wc->src_qp = IB_QP0;
062dbb69 649 wc->qp = qp;
1da177e4
LT
650 wc->slid = slid;
651 wc->sl = 0;
652 wc->dlid_path_bits = 0;
653 wc->port_num = port_num;
654}
655
656/*
657 * Return 0 if SMP is to be sent
658 * Return 1 if SMP was consumed locally (whether or not solicited)
659 * Return < 0 if error
660 */
661static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
34816ad9 662 struct ib_mad_send_wr_private *mad_send_wr)
1da177e4 663{
de493d47 664 int ret = 0;
34816ad9 665 struct ib_smp *smp = mad_send_wr->send_buf.mad;
1da177e4
LT
666 unsigned long flags;
667 struct ib_mad_local_private *local;
668 struct ib_mad_private *mad_priv;
669 struct ib_mad_port_private *port_priv;
670 struct ib_mad_agent_private *recv_mad_agent = NULL;
671 struct ib_device *device = mad_agent_priv->agent.device;
1bae4dbf 672 u8 port_num;
1da177e4 673 struct ib_wc mad_wc;
34816ad9 674 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
1da177e4 675
1bae4dbf
HR
676 if (device->node_type == RDMA_NODE_IB_SWITCH &&
677 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
678 port_num = send_wr->wr.ud.port_num;
679 else
680 port_num = mad_agent_priv->agent.port_num;
681
8cf3f04f
RC
682 /*
683 * Directed route handling starts if the initial LID routed part of
684 * a request or the ending LID routed part of a response is empty.
685 * If we are at the start of the LID routed part, don't update the
686 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
687 */
688 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
689 IB_LID_PERMISSIVE &&
de493d47
HR
690 smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
691 IB_SMI_DISCARD) {
1da177e4
LT
692 ret = -EINVAL;
693 printk(KERN_ERR PFX "Invalid directed route\n");
694 goto out;
695 }
de493d47 696
1da177e4 697 /* Check to post send on QP or process locally */
727792da
SW
698 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
699 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
1da177e4
LT
700 goto out;
701
702 local = kmalloc(sizeof *local, GFP_ATOMIC);
703 if (!local) {
704 ret = -ENOMEM;
705 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
706 goto out;
707 }
708 local->mad_priv = NULL;
709 local->recv_mad_agent = NULL;
710 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
711 if (!mad_priv) {
712 ret = -ENOMEM;
713 printk(KERN_ERR PFX "No memory for local response MAD\n");
714 kfree(local);
715 goto out;
716 }
717
062dbb69
MT
718 build_smp_wc(mad_agent_priv->agent.qp,
719 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
97f52eb4 720 send_wr->wr.ud.pkey_index,
1da177e4
LT
721 send_wr->wr.ud.port_num, &mad_wc);
722
723 /* No GRH for DR SMP */
724 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
725 (struct ib_mad *)smp,
726 (struct ib_mad *)&mad_priv->mad);
727 switch (ret)
728 {
729 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
2527e681 730 if (ib_response_mad(&mad_priv->mad.mad) &&
1da177e4
LT
731 mad_agent_priv->agent.recv_handler) {
732 local->mad_priv = mad_priv;
733 local->recv_mad_agent = mad_agent_priv;
734 /*
735 * Reference MAD agent until receive
736 * side of local completion handled
737 */
738 atomic_inc(&mad_agent_priv->refcount);
739 } else
740 kmem_cache_free(ib_mad_cache, mad_priv);
741 break;
742 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
743 kmem_cache_free(ib_mad_cache, mad_priv);
4780c195 744 break;
1da177e4
LT
745 case IB_MAD_RESULT_SUCCESS:
746 /* Treat like an incoming receive MAD */
1da177e4
LT
747 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
748 mad_agent_priv->agent.port_num);
749 if (port_priv) {
727792da 750 memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
1da177e4 751 recv_mad_agent = find_mad_agent(port_priv,
4a0754fa 752 &mad_priv->mad.mad);
1da177e4
LT
753 }
754 if (!port_priv || !recv_mad_agent) {
4780c195
RC
755 /*
756 * No receiving agent so drop packet and
757 * generate send completion.
758 */
1da177e4 759 kmem_cache_free(ib_mad_cache, mad_priv);
4780c195 760 break;
1da177e4
LT
761 }
762 local->mad_priv = mad_priv;
763 local->recv_mad_agent = recv_mad_agent;
764 break;
765 default:
766 kmem_cache_free(ib_mad_cache, mad_priv);
767 kfree(local);
768 ret = -EINVAL;
769 goto out;
770 }
771
34816ad9 772 local->mad_send_wr = mad_send_wr;
1da177e4
LT
773 /* Reference MAD agent until send side of local completion handled */
774 atomic_inc(&mad_agent_priv->refcount);
775 /* Queue local completion to local list */
776 spin_lock_irqsave(&mad_agent_priv->lock, flags);
777 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
778 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
779 queue_work(mad_agent_priv->qp_info->port_priv->wq,
b82cab6b 780 &mad_agent_priv->local_work);
1da177e4
LT
781 ret = 1;
782out:
783 return ret;
784}
785
f36e1793 786static int get_pad_size(int hdr_len, int data_len)
824c8ae7
HR
787{
788 int seg_size, pad;
789
790 seg_size = sizeof(struct ib_mad) - hdr_len;
791 if (data_len && seg_size) {
792 pad = seg_size - data_len % seg_size;
f36e1793 793 return pad == seg_size ? 0 : pad;
824c8ae7 794 } else
f36e1793
JM
795 return seg_size;
796}
797
798static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
799{
800 struct ib_rmpp_segment *s, *t;
801
802 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
803 list_del(&s->list);
804 kfree(s);
805 }
806}
807
808static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
809 gfp_t gfp_mask)
810{
811 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
812 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
813 struct ib_rmpp_segment *seg = NULL;
814 int left, seg_size, pad;
815
816 send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
817 seg_size = send_buf->seg_size;
818 pad = send_wr->pad;
819
820 /* Allocate data segments. */
821 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
822 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
823 if (!seg) {
824 printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
825 "alloc failed for len %zd, gfp %#x\n",
826 sizeof (*seg) + seg_size, gfp_mask);
827 free_send_rmpp_list(send_wr);
828 return -ENOMEM;
829 }
830 seg->num = ++send_buf->seg_count;
831 list_add_tail(&seg->list, &send_wr->rmpp_list);
832 }
833
834 /* Zero any padding */
835 if (pad)
836 memset(seg->data + seg_size - pad, 0, pad);
837
838 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
839 agent.rmpp_version;
840 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
841 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
842
843 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
844 struct ib_rmpp_segment, list);
845 send_wr->last_ack_seg = send_wr->cur_seg;
846 return 0;
824c8ae7
HR
847}
848
849struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
850 u32 remote_qpn, u16 pkey_index,
34816ad9 851 int rmpp_active,
824c8ae7 852 int hdr_len, int data_len,
dd0fc66f 853 gfp_t gfp_mask)
824c8ae7
HR
854{
855 struct ib_mad_agent_private *mad_agent_priv;
34816ad9 856 struct ib_mad_send_wr_private *mad_send_wr;
f36e1793 857 int pad, message_size, ret, size;
824c8ae7
HR
858 void *buf;
859
34816ad9
SH
860 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
861 agent);
f36e1793
JM
862 pad = get_pad_size(hdr_len, data_len);
863 message_size = hdr_len + data_len + pad;
824c8ae7 864
fa619a77 865 if ((!mad_agent->rmpp_version &&
f36e1793
JM
866 (rmpp_active || message_size > sizeof(struct ib_mad))) ||
867 (!rmpp_active && message_size > sizeof(struct ib_mad)))
fa619a77
HR
868 return ERR_PTR(-EINVAL);
869
f36e1793
JM
870 size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
871 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
824c8ae7
HR
872 if (!buf)
873 return ERR_PTR(-ENOMEM);
34816ad9 874
f36e1793
JM
875 mad_send_wr = buf + size;
876 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
34816ad9 877 mad_send_wr->send_buf.mad = buf;
f36e1793
JM
878 mad_send_wr->send_buf.hdr_len = hdr_len;
879 mad_send_wr->send_buf.data_len = data_len;
880 mad_send_wr->pad = pad;
34816ad9
SH
881
882 mad_send_wr->mad_agent_priv = mad_agent_priv;
f36e1793 883 mad_send_wr->sg_list[0].length = hdr_len;
34816ad9 884 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
f36e1793
JM
885 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
886 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
34816ad9
SH
887
888 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
889 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
f36e1793 890 mad_send_wr->send_wr.num_sge = 2;
34816ad9
SH
891 mad_send_wr->send_wr.opcode = IB_WR_SEND;
892 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
893 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
894 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
895 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
fa619a77
HR
896
897 if (rmpp_active) {
f36e1793
JM
898 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
899 if (ret) {
900 kfree(buf);
901 return ERR_PTR(ret);
902 }
fa619a77
HR
903 }
904
34816ad9 905 mad_send_wr->send_buf.mad_agent = mad_agent;
824c8ae7 906 atomic_inc(&mad_agent_priv->refcount);
34816ad9 907 return &mad_send_wr->send_buf;
824c8ae7
HR
908}
909EXPORT_SYMBOL(ib_create_send_mad);
910
618a3c03
HR
911int ib_get_mad_data_offset(u8 mgmt_class)
912{
913 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
914 return IB_MGMT_SA_HDR;
915 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
916 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
917 (mgmt_class == IB_MGMT_CLASS_BIS))
918 return IB_MGMT_DEVICE_HDR;
919 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
920 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
921 return IB_MGMT_VENDOR_HDR;
922 else
923 return IB_MGMT_MAD_HDR;
924}
925EXPORT_SYMBOL(ib_get_mad_data_offset);
926
927int ib_is_mad_class_rmpp(u8 mgmt_class)
928{
929 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
930 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
931 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
932 (mgmt_class == IB_MGMT_CLASS_BIS) ||
933 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
934 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
935 return 1;
936 return 0;
937}
938EXPORT_SYMBOL(ib_is_mad_class_rmpp);
939
f36e1793
JM
940void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
941{
942 struct ib_mad_send_wr_private *mad_send_wr;
943 struct list_head *list;
944
945 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
946 send_buf);
947 list = &mad_send_wr->cur_seg->list;
948
949 if (mad_send_wr->cur_seg->num < seg_num) {
950 list_for_each_entry(mad_send_wr->cur_seg, list, list)
951 if (mad_send_wr->cur_seg->num == seg_num)
952 break;
953 } else if (mad_send_wr->cur_seg->num > seg_num) {
954 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
955 if (mad_send_wr->cur_seg->num == seg_num)
956 break;
957 }
958 return mad_send_wr->cur_seg->data;
959}
960EXPORT_SYMBOL(ib_get_rmpp_segment);
961
962static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
963{
964 if (mad_send_wr->send_buf.seg_count)
965 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
966 mad_send_wr->seg_num);
967 else
968 return mad_send_wr->send_buf.mad +
969 mad_send_wr->send_buf.hdr_len;
970}
971
824c8ae7
HR
972void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
973{
974 struct ib_mad_agent_private *mad_agent_priv;
f36e1793 975 struct ib_mad_send_wr_private *mad_send_wr;
824c8ae7
HR
976
977 mad_agent_priv = container_of(send_buf->mad_agent,
978 struct ib_mad_agent_private, agent);
f36e1793
JM
979 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
980 send_buf);
824c8ae7 981
f36e1793
JM
982 free_send_rmpp_list(mad_send_wr);
983 kfree(send_buf->mad);
1b52fa98 984 deref_mad_agent(mad_agent_priv);
824c8ae7
HR
985}
986EXPORT_SYMBOL(ib_free_send_mad);
987
fa619a77 988int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4
LT
989{
990 struct ib_mad_qp_info *qp_info;
cabe3cbc 991 struct list_head *list;
34816ad9
SH
992 struct ib_send_wr *bad_send_wr;
993 struct ib_mad_agent *mad_agent;
994 struct ib_sge *sge;
1da177e4
LT
995 unsigned long flags;
996 int ret;
997
f8197a4e 998 /* Set WR ID to find mad_send_wr upon completion */
d760ce8f 999 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1da177e4
LT
1000 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1001 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1002
34816ad9
SH
1003 mad_agent = mad_send_wr->send_buf.mad_agent;
1004 sge = mad_send_wr->sg_list;
1527106f
RC
1005 sge[0].addr = ib_dma_map_single(mad_agent->device,
1006 mad_send_wr->send_buf.mad,
1007 sge[0].length,
1008 DMA_TO_DEVICE);
1009 mad_send_wr->header_mapping = sge[0].addr;
1010
1011 sge[1].addr = ib_dma_map_single(mad_agent->device,
1012 ib_get_payload(mad_send_wr),
1013 sge[1].length,
1014 DMA_TO_DEVICE);
1015 mad_send_wr->payload_mapping = sge[1].addr;
34816ad9 1016
1da177e4 1017 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
cabe3cbc 1018 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
34816ad9
SH
1019 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1020 &bad_send_wr);
cabe3cbc 1021 list = &qp_info->send_queue.list;
1da177e4 1022 } else {
1da177e4 1023 ret = 0;
cabe3cbc 1024 list = &qp_info->overflow_list;
1da177e4 1025 }
cabe3cbc
HR
1026
1027 if (!ret) {
1028 qp_info->send_queue.count++;
1029 list_add_tail(&mad_send_wr->mad_list.list, list);
1030 }
1031 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
f36e1793 1032 if (ret) {
1527106f
RC
1033 ib_dma_unmap_single(mad_agent->device,
1034 mad_send_wr->header_mapping,
1035 sge[0].length, DMA_TO_DEVICE);
1036 ib_dma_unmap_single(mad_agent->device,
1037 mad_send_wr->payload_mapping,
1038 sge[1].length, DMA_TO_DEVICE);
f36e1793 1039 }
1da177e4
LT
1040 return ret;
1041}
1042
1043/*
1044 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1045 * with the registered client
1046 */
34816ad9
SH
1047int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1048 struct ib_mad_send_buf **bad_send_buf)
1da177e4 1049{
1da177e4 1050 struct ib_mad_agent_private *mad_agent_priv;
34816ad9
SH
1051 struct ib_mad_send_buf *next_send_buf;
1052 struct ib_mad_send_wr_private *mad_send_wr;
1053 unsigned long flags;
1054 int ret = -EINVAL;
1da177e4
LT
1055
1056 /* Walk list of send WRs and post each on send list */
34816ad9 1057 for (; send_buf; send_buf = next_send_buf) {
1da177e4 1058
34816ad9
SH
1059 mad_send_wr = container_of(send_buf,
1060 struct ib_mad_send_wr_private,
1061 send_buf);
1062 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4 1063
34816ad9
SH
1064 if (!send_buf->mad_agent->send_handler ||
1065 (send_buf->timeout_ms &&
1066 !send_buf->mad_agent->recv_handler)) {
1067 ret = -EINVAL;
1068 goto error;
1da177e4
LT
1069 }
1070
618a3c03
HR
1071 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1072 if (mad_agent_priv->agent.rmpp_version) {
1073 ret = -EINVAL;
1074 goto error;
1075 }
1076 }
1077
1da177e4
LT
1078 /*
1079 * Save pointer to next work request to post in case the
1080 * current one completes, and the user modifies the work
1081 * request associated with the completion
1082 */
34816ad9
SH
1083 next_send_buf = send_buf->next;
1084 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1da177e4 1085
34816ad9
SH
1086 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1087 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1088 ret = handle_outgoing_dr_smp(mad_agent_priv,
1089 mad_send_wr);
1da177e4 1090 if (ret < 0) /* error */
34816ad9 1091 goto error;
1da177e4 1092 else if (ret == 1) /* locally consumed */
34816ad9 1093 continue;
1da177e4
LT
1094 }
1095
34816ad9 1096 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1da177e4 1097 /* Timeout will be updated after send completes */
34816ad9 1098 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
4fc8cd49
SH
1099 mad_send_wr->max_retries = send_buf->retries;
1100 mad_send_wr->retries_left = send_buf->retries;
1101 send_buf->retries = 0;
34816ad9 1102 /* Reference for work request to QP + response */
1da177e4
LT
1103 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1104 mad_send_wr->status = IB_WC_SUCCESS;
1105
1106 /* Reference MAD agent until send completes */
1107 atomic_inc(&mad_agent_priv->refcount);
1108 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1109 list_add_tail(&mad_send_wr->agent_list,
1110 &mad_agent_priv->send_list);
1111 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1112
fa619a77
HR
1113 if (mad_agent_priv->agent.rmpp_version) {
1114 ret = ib_send_rmpp_mad(mad_send_wr);
1115 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1116 ret = ib_send_mad(mad_send_wr);
1117 } else
1118 ret = ib_send_mad(mad_send_wr);
1119 if (ret < 0) {
1da177e4
LT
1120 /* Fail send request */
1121 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1122 list_del(&mad_send_wr->agent_list);
1123 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1124 atomic_dec(&mad_agent_priv->refcount);
34816ad9 1125 goto error;
1da177e4 1126 }
1da177e4
LT
1127 }
1128 return 0;
34816ad9
SH
1129error:
1130 if (bad_send_buf)
1131 *bad_send_buf = send_buf;
1da177e4
LT
1132 return ret;
1133}
1134EXPORT_SYMBOL(ib_post_send_mad);
1135
1136/*
1137 * ib_free_recv_mad - Returns data buffers used to receive
1138 * a MAD to the access layer
1139 */
1140void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1141{
fa619a77 1142 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1da177e4
LT
1143 struct ib_mad_private_header *mad_priv_hdr;
1144 struct ib_mad_private *priv;
fa619a77 1145 struct list_head free_list;
1da177e4 1146
fa619a77
HR
1147 INIT_LIST_HEAD(&free_list);
1148 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1da177e4 1149
fa619a77
HR
1150 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1151 &free_list, list) {
1152 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1153 recv_buf);
1da177e4
LT
1154 mad_priv_hdr = container_of(mad_recv_wc,
1155 struct ib_mad_private_header,
1156 recv_wc);
1157 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1158 header);
fa619a77 1159 kmem_cache_free(ib_mad_cache, priv);
1da177e4 1160 }
1da177e4
LT
1161}
1162EXPORT_SYMBOL(ib_free_recv_mad);
1163
1da177e4
LT
1164struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1165 u8 rmpp_version,
1166 ib_mad_send_handler send_handler,
1167 ib_mad_recv_handler recv_handler,
1168 void *context)
1169{
1170 return ERR_PTR(-EINVAL); /* XXX: for now */
1171}
1172EXPORT_SYMBOL(ib_redirect_mad_qp);
1173
1174int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1175 struct ib_wc *wc)
1176{
1177 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1178 return 0;
1179}
1180EXPORT_SYMBOL(ib_process_mad_wc);
1181
1182static int method_in_use(struct ib_mad_mgmt_method_table **method,
1183 struct ib_mad_reg_req *mad_reg_req)
1184{
1185 int i;
1186
1187 for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1188 i < IB_MGMT_MAX_METHODS;
1189 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1190 1+i)) {
1191 if ((*method)->agent[i]) {
1192 printk(KERN_ERR PFX "Method %d already in use\n", i);
1193 return -EINVAL;
1194 }
1195 }
1196 return 0;
1197}
1198
1199static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1200{
1201 /* Allocate management method table */
de6eb66b 1202 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1da177e4
LT
1203 if (!*method) {
1204 printk(KERN_ERR PFX "No memory for "
1205 "ib_mad_mgmt_method_table\n");
1206 return -ENOMEM;
1207 }
1da177e4
LT
1208
1209 return 0;
1210}
1211
1212/*
1213 * Check to see if there are any methods still in use
1214 */
1215static int check_method_table(struct ib_mad_mgmt_method_table *method)
1216{
1217 int i;
1218
1219 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1220 if (method->agent[i])
1221 return 1;
1222 return 0;
1223}
1224
1225/*
1226 * Check to see if there are any method tables for this class still in use
1227 */
1228static int check_class_table(struct ib_mad_mgmt_class_table *class)
1229{
1230 int i;
1231
1232 for (i = 0; i < MAX_MGMT_CLASS; i++)
1233 if (class->method_table[i])
1234 return 1;
1235 return 0;
1236}
1237
1238static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1239{
1240 int i;
1241
1242 for (i = 0; i < MAX_MGMT_OUI; i++)
1243 if (vendor_class->method_table[i])
1244 return 1;
1245 return 0;
1246}
1247
1248static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1249 char *oui)
1250{
1251 int i;
1252
1253 for (i = 0; i < MAX_MGMT_OUI; i++)
3cd96564
RD
1254 /* Is there matching OUI for this vendor class ? */
1255 if (!memcmp(vendor_class->oui[i], oui, 3))
1da177e4
LT
1256 return i;
1257
1258 return -1;
1259}
1260
1261static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1262{
1263 int i;
1264
1265 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1266 if (vendor->vendor_class[i])
1267 return 1;
1268
1269 return 0;
1270}
1271
1272static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1273 struct ib_mad_agent_private *agent)
1274{
1275 int i;
1276
1277 /* Remove any methods for this mad agent */
1278 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1279 if (method->agent[i] == agent) {
1280 method->agent[i] = NULL;
1281 }
1282 }
1283}
1284
1285static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1286 struct ib_mad_agent_private *agent_priv,
1287 u8 mgmt_class)
1288{
1289 struct ib_mad_port_private *port_priv;
1290 struct ib_mad_mgmt_class_table **class;
1291 struct ib_mad_mgmt_method_table **method;
1292 int i, ret;
1293
1294 port_priv = agent_priv->qp_info->port_priv;
1295 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1296 if (!*class) {
1297 /* Allocate management class table for "new" class version */
de6eb66b 1298 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1da177e4
LT
1299 if (!*class) {
1300 printk(KERN_ERR PFX "No memory for "
1301 "ib_mad_mgmt_class_table\n");
1302 ret = -ENOMEM;
1303 goto error1;
1304 }
de6eb66b 1305
1da177e4
LT
1306 /* Allocate method table for this management class */
1307 method = &(*class)->method_table[mgmt_class];
1308 if ((ret = allocate_method_table(method)))
1309 goto error2;
1310 } else {
1311 method = &(*class)->method_table[mgmt_class];
1312 if (!*method) {
1313 /* Allocate method table for this management class */
1314 if ((ret = allocate_method_table(method)))
1315 goto error1;
1316 }
1317 }
1318
1319 /* Now, make sure methods are not already in use */
1320 if (method_in_use(method, mad_reg_req))
1321 goto error3;
1322
1323 /* Finally, add in methods being registered */
1324 for (i = find_first_bit(mad_reg_req->method_mask,
1325 IB_MGMT_MAX_METHODS);
1326 i < IB_MGMT_MAX_METHODS;
1327 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1328 1+i)) {
1329 (*method)->agent[i] = agent_priv;
1330 }
1331 return 0;
1332
1333error3:
1334 /* Remove any methods for this mad agent */
1335 remove_methods_mad_agent(*method, agent_priv);
1336 /* Now, check to see if there are any methods in use */
1337 if (!check_method_table(*method)) {
1338 /* If not, release management method table */
1339 kfree(*method);
1340 *method = NULL;
1341 }
1342 ret = -EINVAL;
1343 goto error1;
1344error2:
1345 kfree(*class);
1346 *class = NULL;
1347error1:
1348 return ret;
1349}
1350
1351static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1352 struct ib_mad_agent_private *agent_priv)
1353{
1354 struct ib_mad_port_private *port_priv;
1355 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1356 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1357 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1358 struct ib_mad_mgmt_method_table **method;
1359 int i, ret = -ENOMEM;
1360 u8 vclass;
1361
1362 /* "New" vendor (with OUI) class */
1363 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1364 port_priv = agent_priv->qp_info->port_priv;
1365 vendor_table = &port_priv->version[
1366 mad_reg_req->mgmt_class_version].vendor;
1367 if (!*vendor_table) {
1368 /* Allocate mgmt vendor class table for "new" class version */
de6eb66b 1369 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1da177e4
LT
1370 if (!vendor) {
1371 printk(KERN_ERR PFX "No memory for "
1372 "ib_mad_mgmt_vendor_class_table\n");
1373 goto error1;
1374 }
de6eb66b 1375
1da177e4
LT
1376 *vendor_table = vendor;
1377 }
1378 if (!(*vendor_table)->vendor_class[vclass]) {
1379 /* Allocate table for this management vendor class */
de6eb66b 1380 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1da177e4
LT
1381 if (!vendor_class) {
1382 printk(KERN_ERR PFX "No memory for "
1383 "ib_mad_mgmt_vendor_class\n");
1384 goto error2;
1385 }
de6eb66b 1386
1da177e4
LT
1387 (*vendor_table)->vendor_class[vclass] = vendor_class;
1388 }
1389 for (i = 0; i < MAX_MGMT_OUI; i++) {
1390 /* Is there matching OUI for this vendor class ? */
1391 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1392 mad_reg_req->oui, 3)) {
1393 method = &(*vendor_table)->vendor_class[
1394 vclass]->method_table[i];
1395 BUG_ON(!*method);
1396 goto check_in_use;
1397 }
1398 }
1399 for (i = 0; i < MAX_MGMT_OUI; i++) {
1400 /* OUI slot available ? */
1401 if (!is_vendor_oui((*vendor_table)->vendor_class[
1402 vclass]->oui[i])) {
1403 method = &(*vendor_table)->vendor_class[
1404 vclass]->method_table[i];
1405 BUG_ON(*method);
1406 /* Allocate method table for this OUI */
1407 if ((ret = allocate_method_table(method)))
1408 goto error3;
1409 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1410 mad_reg_req->oui, 3);
1411 goto check_in_use;
1412 }
1413 }
1414 printk(KERN_ERR PFX "All OUI slots in use\n");
1415 goto error3;
1416
1417check_in_use:
1418 /* Now, make sure methods are not already in use */
1419 if (method_in_use(method, mad_reg_req))
1420 goto error4;
1421
1422 /* Finally, add in methods being registered */
1423 for (i = find_first_bit(mad_reg_req->method_mask,
1424 IB_MGMT_MAX_METHODS);
1425 i < IB_MGMT_MAX_METHODS;
1426 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1427 1+i)) {
1428 (*method)->agent[i] = agent_priv;
1429 }
1430 return 0;
1431
1432error4:
1433 /* Remove any methods for this mad agent */
1434 remove_methods_mad_agent(*method, agent_priv);
1435 /* Now, check to see if there are any methods in use */
1436 if (!check_method_table(*method)) {
1437 /* If not, release management method table */
1438 kfree(*method);
1439 *method = NULL;
1440 }
1441 ret = -EINVAL;
1442error3:
1443 if (vendor_class) {
1444 (*vendor_table)->vendor_class[vclass] = NULL;
1445 kfree(vendor_class);
1446 }
1447error2:
1448 if (vendor) {
1449 *vendor_table = NULL;
1450 kfree(vendor);
1451 }
1452error1:
1453 return ret;
1454}
1455
1456static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1457{
1458 struct ib_mad_port_private *port_priv;
1459 struct ib_mad_mgmt_class_table *class;
1460 struct ib_mad_mgmt_method_table *method;
1461 struct ib_mad_mgmt_vendor_class_table *vendor;
1462 struct ib_mad_mgmt_vendor_class *vendor_class;
1463 int index;
1464 u8 mgmt_class;
1465
1466 /*
1467 * Was MAD registration request supplied
1468 * with original registration ?
1469 */
1470 if (!agent_priv->reg_req) {
1471 goto out;
1472 }
1473
1474 port_priv = agent_priv->qp_info->port_priv;
1475 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1476 class = port_priv->version[
1477 agent_priv->reg_req->mgmt_class_version].class;
1478 if (!class)
1479 goto vendor_check;
1480
1481 method = class->method_table[mgmt_class];
1482 if (method) {
1483 /* Remove any methods for this mad agent */
1484 remove_methods_mad_agent(method, agent_priv);
1485 /* Now, check to see if there are any methods still in use */
1486 if (!check_method_table(method)) {
1487 /* If not, release management method table */
1488 kfree(method);
1489 class->method_table[mgmt_class] = NULL;
1490 /* Any management classes left ? */
1491 if (!check_class_table(class)) {
1492 /* If not, release management class table */
1493 kfree(class);
1494 port_priv->version[
1495 agent_priv->reg_req->
1496 mgmt_class_version].class = NULL;
1497 }
1498 }
1499 }
1500
1501vendor_check:
1502 if (!is_vendor_class(mgmt_class))
1503 goto out;
1504
1505 /* normalize mgmt_class to vendor range 2 */
1506 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1507 vendor = port_priv->version[
1508 agent_priv->reg_req->mgmt_class_version].vendor;
1509
1510 if (!vendor)
1511 goto out;
1512
1513 vendor_class = vendor->vendor_class[mgmt_class];
1514 if (vendor_class) {
1515 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1516 if (index < 0)
1517 goto out;
1518 method = vendor_class->method_table[index];
1519 if (method) {
1520 /* Remove any methods for this mad agent */
1521 remove_methods_mad_agent(method, agent_priv);
1522 /*
1523 * Now, check to see if there are
1524 * any methods still in use
1525 */
1526 if (!check_method_table(method)) {
1527 /* If not, release management method table */
1528 kfree(method);
1529 vendor_class->method_table[index] = NULL;
1530 memset(vendor_class->oui[index], 0, 3);
1531 /* Any OUIs left ? */
1532 if (!check_vendor_class(vendor_class)) {
1533 /* If not, release vendor class table */
1534 kfree(vendor_class);
1535 vendor->vendor_class[mgmt_class] = NULL;
1536 /* Any other vendor classes left ? */
1537 if (!check_vendor_table(vendor)) {
1538 kfree(vendor);
1539 port_priv->version[
1540 agent_priv->reg_req->
1541 mgmt_class_version].
1542 vendor = NULL;
1543 }
1544 }
1545 }
1546 }
1547 }
1548
1549out:
1550 return;
1551}
1552
1da177e4
LT
1553static struct ib_mad_agent_private *
1554find_mad_agent(struct ib_mad_port_private *port_priv,
4a0754fa 1555 struct ib_mad *mad)
1da177e4
LT
1556{
1557 struct ib_mad_agent_private *mad_agent = NULL;
1558 unsigned long flags;
1559
1560 spin_lock_irqsave(&port_priv->reg_lock, flags);
2527e681 1561 if (ib_response_mad(mad)) {
1da177e4
LT
1562 u32 hi_tid;
1563 struct ib_mad_agent_private *entry;
1564
1565 /*
1566 * Routing is based on high 32 bits of transaction ID
1567 * of MAD.
1568 */
1569 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
34816ad9 1570 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1da177e4
LT
1571 if (entry->agent.hi_tid == hi_tid) {
1572 mad_agent = entry;
1573 break;
1574 }
1575 }
1576 } else {
1577 struct ib_mad_mgmt_class_table *class;
1578 struct ib_mad_mgmt_method_table *method;
1579 struct ib_mad_mgmt_vendor_class_table *vendor;
1580 struct ib_mad_mgmt_vendor_class *vendor_class;
1581 struct ib_vendor_mad *vendor_mad;
1582 int index;
1583
1584 /*
1585 * Routing is based on version, class, and method
1586 * For "newer" vendor MADs, also based on OUI
1587 */
1588 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1589 goto out;
1590 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1591 class = port_priv->version[
1592 mad->mad_hdr.class_version].class;
1593 if (!class)
1594 goto out;
1595 method = class->method_table[convert_mgmt_class(
1596 mad->mad_hdr.mgmt_class)];
1597 if (method)
1598 mad_agent = method->agent[mad->mad_hdr.method &
1599 ~IB_MGMT_METHOD_RESP];
1600 } else {
1601 vendor = port_priv->version[
1602 mad->mad_hdr.class_version].vendor;
1603 if (!vendor)
1604 goto out;
1605 vendor_class = vendor->vendor_class[vendor_class_index(
1606 mad->mad_hdr.mgmt_class)];
1607 if (!vendor_class)
1608 goto out;
1609 /* Find matching OUI */
1610 vendor_mad = (struct ib_vendor_mad *)mad;
1611 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1612 if (index == -1)
1613 goto out;
1614 method = vendor_class->method_table[index];
1615 if (method) {
1616 mad_agent = method->agent[mad->mad_hdr.method &
1617 ~IB_MGMT_METHOD_RESP];
1618 }
1619 }
1620 }
1621
1622 if (mad_agent) {
1623 if (mad_agent->agent.recv_handler)
1624 atomic_inc(&mad_agent->refcount);
1625 else {
1626 printk(KERN_NOTICE PFX "No receive handler for client "
1627 "%p on port %d\n",
1628 &mad_agent->agent, port_priv->port_num);
1629 mad_agent = NULL;
1630 }
1631 }
1632out:
1633 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1634
1635 return mad_agent;
1636}
1637
1638static int validate_mad(struct ib_mad *mad, u32 qp_num)
1639{
1640 int valid = 0;
1641
1642 /* Make sure MAD base version is understood */
1643 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1644 printk(KERN_ERR PFX "MAD received with unsupported base "
1645 "version %d\n", mad->mad_hdr.base_version);
1646 goto out;
1647 }
1648
1649 /* Filter SMI packets sent to other than QP0 */
1650 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1651 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1652 if (qp_num == 0)
1653 valid = 1;
1654 } else {
1655 /* Filter GSI packets sent to QP0 */
1656 if (qp_num != 0)
1657 valid = 1;
1658 }
1659
1660out:
1661 return valid;
1662}
1663
fa619a77
HR
1664static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1665 struct ib_mad_hdr *mad_hdr)
1666{
1667 struct ib_rmpp_mad *rmpp_mad;
1668
1669 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1670 return !mad_agent_priv->agent.rmpp_version ||
1671 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1672 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1673 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1674}
1675
fa9656bb
JM
1676static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1677 struct ib_mad_recv_wc *rwc)
1678{
1679 return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1680 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1681}
1682
9874e746
JM
1683static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1684 struct ib_mad_send_wr_private *wr,
fa9656bb
JM
1685 struct ib_mad_recv_wc *rwc )
1686{
1687 struct ib_ah_attr attr;
1688 u8 send_resp, rcv_resp;
9874e746
JM
1689 union ib_gid sgid;
1690 struct ib_device *device = mad_agent_priv->agent.device;
1691 u8 port_num = mad_agent_priv->agent.port_num;
1692 u8 lmc;
fa9656bb 1693
7097228c
MB
1694 send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1695 rcv_resp = ib_response_mad(rwc->recv_buf.mad);
fa9656bb 1696
fa9656bb
JM
1697 if (send_resp == rcv_resp)
1698 /* both requests, or both responses. GIDs different */
1699 return 0;
1700
1701 if (ib_query_ah(wr->send_buf.ah, &attr))
1702 /* Assume not equal, to avoid false positives. */
1703 return 0;
1704
9874e746
JM
1705 if (!!(attr.ah_flags & IB_AH_GRH) !=
1706 !!(rwc->wc->wc_flags & IB_WC_GRH))
fa9656bb
JM
1707 /* one has GID, other does not. Assume different */
1708 return 0;
9874e746
JM
1709
1710 if (!send_resp && rcv_resp) {
1711 /* is request/response. */
1712 if (!(attr.ah_flags & IB_AH_GRH)) {
1713 if (ib_get_cached_lmc(device, port_num, &lmc))
1714 return 0;
1715 return (!lmc || !((attr.src_path_bits ^
1716 rwc->wc->dlid_path_bits) &
1717 ((1 << lmc) - 1)));
1718 } else {
1719 if (ib_get_cached_gid(device, port_num,
1720 attr.grh.sgid_index, &sgid))
1721 return 0;
1722 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1723 16);
1724 }
1725 }
1726
1727 if (!(attr.ah_flags & IB_AH_GRH))
1728 return attr.dlid == rwc->wc->slid;
1729 else
1730 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1731 16);
1732}
1733
1734static inline int is_direct(u8 class)
1735{
1736 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
fa9656bb 1737}
9874e746 1738
fa619a77 1739struct ib_mad_send_wr_private*
fa9656bb 1740ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
9874e746 1741 struct ib_mad_recv_wc *wc)
1da177e4 1742{
9874e746 1743 struct ib_mad_send_wr_private *wr;
fa9656bb
JM
1744 struct ib_mad *mad;
1745
9874e746
JM
1746 mad = (struct ib_mad *)wc->recv_buf.mad;
1747
1748 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1749 if ((wr->tid == mad->mad_hdr.tid) &&
1750 rcv_has_same_class(wr, wc) &&
1751 /*
1752 * Don't check GID for direct routed MADs.
1753 * These might have permissive LIDs.
1754 */
1755 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1756 rcv_has_same_gid(mad_agent_priv, wr, wc)))
39798695 1757 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1da177e4
LT
1758 }
1759
1760 /*
1761 * It's possible to receive the response before we've
1762 * been notified that the send has completed
1763 */
9874e746
JM
1764 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1765 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1766 wr->tid == mad->mad_hdr.tid &&
1767 wr->timeout &&
1768 rcv_has_same_class(wr, wc) &&
1769 /*
1770 * Don't check GID for direct routed MADs.
1771 * These might have permissive LIDs.
1772 */
1773 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1774 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1da177e4 1775 /* Verify request has not been canceled */
9874e746 1776 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1da177e4
LT
1777 }
1778 return NULL;
1779}
1780
fa619a77 1781void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
6a0c435e
HR
1782{
1783 mad_send_wr->timeout = 0;
179e0917
AM
1784 if (mad_send_wr->refcount == 1)
1785 list_move_tail(&mad_send_wr->agent_list,
6a0c435e 1786 &mad_send_wr->mad_agent_priv->done_list);
6a0c435e
HR
1787}
1788
1da177e4 1789static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
4a0754fa 1790 struct ib_mad_recv_wc *mad_recv_wc)
1da177e4
LT
1791{
1792 struct ib_mad_send_wr_private *mad_send_wr;
1793 struct ib_mad_send_wc mad_send_wc;
1794 unsigned long flags;
1795
fa619a77
HR
1796 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1797 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1798 if (mad_agent_priv->agent.rmpp_version) {
1799 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1800 mad_recv_wc);
1801 if (!mad_recv_wc) {
1b52fa98 1802 deref_mad_agent(mad_agent_priv);
fa619a77
HR
1803 return;
1804 }
1805 }
1806
1da177e4 1807 /* Complete corresponding request */
2527e681 1808 if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1da177e4 1809 spin_lock_irqsave(&mad_agent_priv->lock, flags);
fa9656bb 1810 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1da177e4
LT
1811 if (!mad_send_wr) {
1812 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
4a0754fa 1813 ib_free_recv_mad(mad_recv_wc);
1b52fa98 1814 deref_mad_agent(mad_agent_priv);
1da177e4
LT
1815 return;
1816 }
fa619a77 1817 ib_mark_mad_done(mad_send_wr);
1da177e4
LT
1818 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1819
1820 /* Defined behavior is to complete response before request */
34816ad9 1821 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
4a0754fa
HR
1822 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1823 mad_recv_wc);
1da177e4
LT
1824 atomic_dec(&mad_agent_priv->refcount);
1825
1826 mad_send_wc.status = IB_WC_SUCCESS;
1827 mad_send_wc.vendor_err = 0;
34816ad9 1828 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1da177e4
LT
1829 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1830 } else {
4a0754fa
HR
1831 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1832 mad_recv_wc);
1b52fa98 1833 deref_mad_agent(mad_agent_priv);
1da177e4
LT
1834 }
1835}
1836
1837static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1838 struct ib_wc *wc)
1839{
1840 struct ib_mad_qp_info *qp_info;
1841 struct ib_mad_private_header *mad_priv_hdr;
445d6807 1842 struct ib_mad_private *recv, *response = NULL;
1da177e4
LT
1843 struct ib_mad_list_head *mad_list;
1844 struct ib_mad_agent_private *mad_agent;
1bae4dbf 1845 int port_num;
1da177e4 1846
1da177e4
LT
1847 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1848 qp_info = mad_list->mad_queue->qp_info;
1849 dequeue_mad(mad_list);
1850
1851 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1852 mad_list);
1853 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1527106f
RC
1854 ib_dma_unmap_single(port_priv->device,
1855 recv->header.mapping,
1856 sizeof(struct ib_mad_private) -
1857 sizeof(struct ib_mad_private_header),
1858 DMA_FROM_DEVICE);
1da177e4
LT
1859
1860 /* Setup MAD receive work completion from "normal" work completion */
24239aff
SH
1861 recv->header.wc = *wc;
1862 recv->header.recv_wc.wc = &recv->header.wc;
1da177e4
LT
1863 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1864 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1865 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1866
1867 if (atomic_read(&qp_info->snoop_count))
1868 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1869
1870 /* Validate MAD */
1871 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1872 goto out;
1873
445d6807
HR
1874 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1875 if (!response) {
1876 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1877 "for response buffer\n");
1878 goto out;
1879 }
1880
1bae4dbf
HR
1881 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1882 port_num = wc->port_num;
1883 else
1884 port_num = port_priv->port_num;
1885
1da177e4
LT
1886 if (recv->mad.mad.mad_hdr.mgmt_class ==
1887 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1bae4dbf
HR
1888 enum smi_forward_action retsmi;
1889
de493d47
HR
1890 if (smi_handle_dr_smp_recv(&recv->mad.smp,
1891 port_priv->device->node_type,
1bae4dbf 1892 port_num,
de493d47
HR
1893 port_priv->device->phys_port_cnt) ==
1894 IB_SMI_DISCARD)
1da177e4 1895 goto out;
de493d47 1896
1bae4dbf
HR
1897 retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1898 if (retsmi == IB_SMI_LOCAL)
1da177e4 1899 goto local;
de493d47 1900
1bae4dbf
HR
1901 if (retsmi == IB_SMI_SEND) { /* don't forward */
1902 if (smi_handle_dr_smp_send(&recv->mad.smp,
1903 port_priv->device->node_type,
1904 port_num) == IB_SMI_DISCARD)
1905 goto out;
1906
1907 if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1908 goto out;
1909 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1910 /* forward case for switches */
1911 memcpy(response, recv, sizeof(*response));
1912 response->header.recv_wc.wc = &response->header.wc;
1913 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1914 response->header.recv_wc.recv_buf.grh = &response->grh;
1915
86dfbecd
HR
1916 agent_send_response(&response->mad.mad,
1917 &response->grh, wc,
1918 port_priv->device,
1919 smi_get_fwd_port(&recv->mad.smp),
1920 qp_info->qp->qp_num);
de493d47 1921
1da177e4 1922 goto out;
1bae4dbf 1923 }
1da177e4
LT
1924 }
1925
1926local:
1927 /* Give driver "right of first refusal" on incoming MAD */
1928 if (port_priv->device->process_mad) {
1929 int ret;
1930
1da177e4
LT
1931 ret = port_priv->device->process_mad(port_priv->device, 0,
1932 port_priv->port_num,
1933 wc, &recv->grh,
1934 &recv->mad.mad,
1935 &response->mad.mad);
1936 if (ret & IB_MAD_RESULT_SUCCESS) {
1937 if (ret & IB_MAD_RESULT_CONSUMED)
1938 goto out;
1939 if (ret & IB_MAD_RESULT_REPLY) {
34816ad9
SH
1940 agent_send_response(&response->mad.mad,
1941 &recv->grh, wc,
1942 port_priv->device,
1bae4dbf 1943 port_num,
34816ad9 1944 qp_info->qp->qp_num);
1da177e4
LT
1945 goto out;
1946 }
1947 }
1948 }
1949
4a0754fa 1950 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1da177e4 1951 if (mad_agent) {
4a0754fa 1952 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1da177e4
LT
1953 /*
1954 * recv is freed up in error cases in ib_mad_complete_recv
1955 * or via recv_handler in ib_mad_complete_recv()
1956 */
1957 recv = NULL;
1958 }
1959
1960out:
1961 /* Post another receive request for this QP */
1962 if (response) {
1963 ib_mad_post_receive_mads(qp_info, response);
1964 if (recv)
1965 kmem_cache_free(ib_mad_cache, recv);
1966 } else
1967 ib_mad_post_receive_mads(qp_info, recv);
1968}
1969
1970static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1971{
1972 struct ib_mad_send_wr_private *mad_send_wr;
1973 unsigned long delay;
1974
1975 if (list_empty(&mad_agent_priv->wait_list)) {
1976 cancel_delayed_work(&mad_agent_priv->timed_work);
1977 } else {
1978 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1979 struct ib_mad_send_wr_private,
1980 agent_list);
1981
1982 if (time_after(mad_agent_priv->timeout,
1983 mad_send_wr->timeout)) {
1984 mad_agent_priv->timeout = mad_send_wr->timeout;
1985 cancel_delayed_work(&mad_agent_priv->timed_work);
1986 delay = mad_send_wr->timeout - jiffies;
1987 if ((long)delay <= 0)
1988 delay = 1;
1989 queue_delayed_work(mad_agent_priv->qp_info->
1990 port_priv->wq,
1991 &mad_agent_priv->timed_work, delay);
1992 }
1993 }
1994}
1995
d760ce8f 1996static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4 1997{
d760ce8f 1998 struct ib_mad_agent_private *mad_agent_priv;
1da177e4
LT
1999 struct ib_mad_send_wr_private *temp_mad_send_wr;
2000 struct list_head *list_item;
2001 unsigned long delay;
2002
d760ce8f 2003 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4
LT
2004 list_del(&mad_send_wr->agent_list);
2005
2006 delay = mad_send_wr->timeout;
2007 mad_send_wr->timeout += jiffies;
2008
29bb33dd
HR
2009 if (delay) {
2010 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2011 temp_mad_send_wr = list_entry(list_item,
2012 struct ib_mad_send_wr_private,
2013 agent_list);
2014 if (time_after(mad_send_wr->timeout,
2015 temp_mad_send_wr->timeout))
2016 break;
2017 }
1da177e4 2018 }
29bb33dd
HR
2019 else
2020 list_item = &mad_agent_priv->wait_list;
1da177e4
LT
2021 list_add(&mad_send_wr->agent_list, list_item);
2022
2023 /* Reschedule a work item if we have a shorter timeout */
2024 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
2025 cancel_delayed_work(&mad_agent_priv->timed_work);
2026 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2027 &mad_agent_priv->timed_work, delay);
2028 }
2029}
2030
03b61ad2
HR
2031void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2032 int timeout_ms)
2033{
2034 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2035 wait_for_response(mad_send_wr);
2036}
2037
1da177e4
LT
2038/*
2039 * Process a send work completion
2040 */
fa619a77
HR
2041void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2042 struct ib_mad_send_wc *mad_send_wc)
1da177e4
LT
2043{
2044 struct ib_mad_agent_private *mad_agent_priv;
2045 unsigned long flags;
fa619a77 2046 int ret;
1da177e4 2047
d760ce8f 2048 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4 2049 spin_lock_irqsave(&mad_agent_priv->lock, flags);
fa619a77
HR
2050 if (mad_agent_priv->agent.rmpp_version) {
2051 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2052 if (ret == IB_RMPP_RESULT_CONSUMED)
2053 goto done;
2054 } else
2055 ret = IB_RMPP_RESULT_UNHANDLED;
2056
1da177e4
LT
2057 if (mad_send_wc->status != IB_WC_SUCCESS &&
2058 mad_send_wr->status == IB_WC_SUCCESS) {
2059 mad_send_wr->status = mad_send_wc->status;
2060 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2061 }
2062
2063 if (--mad_send_wr->refcount > 0) {
2064 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2065 mad_send_wr->status == IB_WC_SUCCESS) {
d760ce8f 2066 wait_for_response(mad_send_wr);
1da177e4 2067 }
fa619a77 2068 goto done;
1da177e4
LT
2069 }
2070
2071 /* Remove send from MAD agent and notify client of completion */
2072 list_del(&mad_send_wr->agent_list);
2073 adjust_timeout(mad_agent_priv);
2074 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2075
2076 if (mad_send_wr->status != IB_WC_SUCCESS )
2077 mad_send_wc->status = mad_send_wr->status;
34816ad9
SH
2078 if (ret == IB_RMPP_RESULT_INTERNAL)
2079 ib_rmpp_send_handler(mad_send_wc);
2080 else
fa619a77
HR
2081 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2082 mad_send_wc);
1da177e4
LT
2083
2084 /* Release reference on agent taken when sending */
1b52fa98 2085 deref_mad_agent(mad_agent_priv);
fa619a77
HR
2086 return;
2087done:
2088 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1da177e4
LT
2089}
2090
2091static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2092 struct ib_wc *wc)
2093{
2094 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2095 struct ib_mad_list_head *mad_list;
2096 struct ib_mad_qp_info *qp_info;
2097 struct ib_mad_queue *send_queue;
2098 struct ib_send_wr *bad_send_wr;
34816ad9 2099 struct ib_mad_send_wc mad_send_wc;
1da177e4
LT
2100 unsigned long flags;
2101 int ret;
2102
2103 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2104 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2105 mad_list);
2106 send_queue = mad_list->mad_queue;
2107 qp_info = send_queue->qp_info;
2108
2109retry:
1527106f
RC
2110 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2111 mad_send_wr->header_mapping,
2112 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2113 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2114 mad_send_wr->payload_mapping,
2115 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
1da177e4
LT
2116 queued_send_wr = NULL;
2117 spin_lock_irqsave(&send_queue->lock, flags);
2118 list_del(&mad_list->list);
2119
2120 /* Move queued send to the send queue */
2121 if (send_queue->count-- > send_queue->max_active) {
2122 mad_list = container_of(qp_info->overflow_list.next,
2123 struct ib_mad_list_head, list);
2124 queued_send_wr = container_of(mad_list,
2125 struct ib_mad_send_wr_private,
2126 mad_list);
179e0917 2127 list_move_tail(&mad_list->list, &send_queue->list);
1da177e4
LT
2128 }
2129 spin_unlock_irqrestore(&send_queue->lock, flags);
2130
34816ad9
SH
2131 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2132 mad_send_wc.status = wc->status;
2133 mad_send_wc.vendor_err = wc->vendor_err;
1da177e4 2134 if (atomic_read(&qp_info->snoop_count))
34816ad9 2135 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
1da177e4 2136 IB_MAD_SNOOP_SEND_COMPLETIONS);
34816ad9 2137 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1da177e4
LT
2138
2139 if (queued_send_wr) {
2140 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
34816ad9 2141 &bad_send_wr);
1da177e4
LT
2142 if (ret) {
2143 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
2144 mad_send_wr = queued_send_wr;
2145 wc->status = IB_WC_LOC_QP_OP_ERR;
2146 goto retry;
2147 }
2148 }
2149}
2150
2151static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2152{
2153 struct ib_mad_send_wr_private *mad_send_wr;
2154 struct ib_mad_list_head *mad_list;
2155 unsigned long flags;
2156
2157 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2158 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2159 mad_send_wr = container_of(mad_list,
2160 struct ib_mad_send_wr_private,
2161 mad_list);
2162 mad_send_wr->retry = 1;
2163 }
2164 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2165}
2166
2167static void mad_error_handler(struct ib_mad_port_private *port_priv,
2168 struct ib_wc *wc)
2169{
2170 struct ib_mad_list_head *mad_list;
2171 struct ib_mad_qp_info *qp_info;
2172 struct ib_mad_send_wr_private *mad_send_wr;
2173 int ret;
2174
2175 /* Determine if failure was a send or receive */
2176 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2177 qp_info = mad_list->mad_queue->qp_info;
2178 if (mad_list->mad_queue == &qp_info->recv_queue)
2179 /*
2180 * Receive errors indicate that the QP has entered the error
2181 * state - error handling/shutdown code will cleanup
2182 */
2183 return;
2184
2185 /*
2186 * Send errors will transition the QP to SQE - move
2187 * QP to RTS and repost flushed work requests
2188 */
2189 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2190 mad_list);
2191 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2192 if (mad_send_wr->retry) {
2193 /* Repost send */
2194 struct ib_send_wr *bad_send_wr;
2195
2196 mad_send_wr->retry = 0;
2197 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2198 &bad_send_wr);
2199 if (ret)
2200 ib_mad_send_done_handler(port_priv, wc);
2201 } else
2202 ib_mad_send_done_handler(port_priv, wc);
2203 } else {
2204 struct ib_qp_attr *attr;
2205
2206 /* Transition QP to RTS and fail offending send */
2207 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2208 if (attr) {
2209 attr->qp_state = IB_QPS_RTS;
2210 attr->cur_qp_state = IB_QPS_SQE;
2211 ret = ib_modify_qp(qp_info->qp, attr,
2212 IB_QP_STATE | IB_QP_CUR_STATE);
2213 kfree(attr);
2214 if (ret)
2215 printk(KERN_ERR PFX "mad_error_handler - "
2216 "ib_modify_qp to RTS : %d\n", ret);
2217 else
2218 mark_sends_for_retry(qp_info);
2219 }
2220 ib_mad_send_done_handler(port_priv, wc);
2221 }
2222}
2223
2224/*
2225 * IB MAD completion callback
2226 */
c4028958 2227static void ib_mad_completion_handler(struct work_struct *work)
1da177e4
LT
2228{
2229 struct ib_mad_port_private *port_priv;
2230 struct ib_wc wc;
2231
c4028958 2232 port_priv = container_of(work, struct ib_mad_port_private, work);
1da177e4
LT
2233 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2234
2235 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2236 if (wc.status == IB_WC_SUCCESS) {
2237 switch (wc.opcode) {
2238 case IB_WC_SEND:
2239 ib_mad_send_done_handler(port_priv, &wc);
2240 break;
2241 case IB_WC_RECV:
2242 ib_mad_recv_done_handler(port_priv, &wc);
2243 break;
2244 default:
2245 BUG_ON(1);
2246 break;
2247 }
2248 } else
2249 mad_error_handler(port_priv, &wc);
2250 }
2251}
2252
2253static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2254{
2255 unsigned long flags;
2256 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2257 struct ib_mad_send_wc mad_send_wc;
2258 struct list_head cancel_list;
2259
2260 INIT_LIST_HEAD(&cancel_list);
2261
2262 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2263 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2264 &mad_agent_priv->send_list, agent_list) {
2265 if (mad_send_wr->status == IB_WC_SUCCESS) {
3cd96564 2266 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
1da177e4
LT
2267 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2268 }
2269 }
2270
2271 /* Empty wait list to prevent receives from finding a request */
2272 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2273 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2274
2275 /* Report all cancelled requests */
2276 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2277 mad_send_wc.vendor_err = 0;
2278
2279 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2280 &cancel_list, agent_list) {
34816ad9
SH
2281 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2282 list_del(&mad_send_wr->agent_list);
1da177e4
LT
2283 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2284 &mad_send_wc);
1da177e4
LT
2285 atomic_dec(&mad_agent_priv->refcount);
2286 }
2287}
2288
2289static struct ib_mad_send_wr_private*
34816ad9
SH
2290find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2291 struct ib_mad_send_buf *send_buf)
1da177e4
LT
2292{
2293 struct ib_mad_send_wr_private *mad_send_wr;
2294
2295 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2296 agent_list) {
34816ad9 2297 if (&mad_send_wr->send_buf == send_buf)
1da177e4
LT
2298 return mad_send_wr;
2299 }
2300
2301 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2302 agent_list) {
34816ad9
SH
2303 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2304 &mad_send_wr->send_buf == send_buf)
1da177e4
LT
2305 return mad_send_wr;
2306 }
2307 return NULL;
2308}
2309
34816ad9
SH
2310int ib_modify_mad(struct ib_mad_agent *mad_agent,
2311 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
1da177e4
LT
2312{
2313 struct ib_mad_agent_private *mad_agent_priv;
2314 struct ib_mad_send_wr_private *mad_send_wr;
2315 unsigned long flags;
cabe3cbc 2316 int active;
1da177e4
LT
2317
2318 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2319 agent);
2320 spin_lock_irqsave(&mad_agent_priv->lock, flags);
34816ad9 2321 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
03b61ad2 2322 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
1da177e4 2323 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
03b61ad2 2324 return -EINVAL;
1da177e4
LT
2325 }
2326
cabe3cbc 2327 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
03b61ad2 2328 if (!timeout_ms) {
1da177e4 2329 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
03b61ad2 2330 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1da177e4
LT
2331 }
2332
34816ad9 2333 mad_send_wr->send_buf.timeout_ms = timeout_ms;
cabe3cbc 2334 if (active)
03b61ad2
HR
2335 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2336 else
2337 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2338
1da177e4 2339 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
03b61ad2
HR
2340 return 0;
2341}
2342EXPORT_SYMBOL(ib_modify_mad);
1da177e4 2343
34816ad9
SH
2344void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2345 struct ib_mad_send_buf *send_buf)
03b61ad2 2346{
34816ad9 2347 ib_modify_mad(mad_agent, send_buf, 0);
1da177e4
LT
2348}
2349EXPORT_SYMBOL(ib_cancel_mad);
2350
c4028958 2351static void local_completions(struct work_struct *work)
1da177e4
LT
2352{
2353 struct ib_mad_agent_private *mad_agent_priv;
2354 struct ib_mad_local_private *local;
2355 struct ib_mad_agent_private *recv_mad_agent;
2356 unsigned long flags;
1d9bc6d6 2357 int free_mad;
1da177e4
LT
2358 struct ib_wc wc;
2359 struct ib_mad_send_wc mad_send_wc;
2360
c4028958
DH
2361 mad_agent_priv =
2362 container_of(work, struct ib_mad_agent_private, local_work);
1da177e4
LT
2363
2364 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2365 while (!list_empty(&mad_agent_priv->local_list)) {
2366 local = list_entry(mad_agent_priv->local_list.next,
2367 struct ib_mad_local_private,
2368 completion_list);
37289efe 2369 list_del(&local->completion_list);
1da177e4 2370 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1d9bc6d6 2371 free_mad = 0;
1da177e4
LT
2372 if (local->mad_priv) {
2373 recv_mad_agent = local->recv_mad_agent;
2374 if (!recv_mad_agent) {
2375 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
1d9bc6d6 2376 free_mad = 1;
1da177e4
LT
2377 goto local_send_completion;
2378 }
2379
2380 /*
2381 * Defined behavior is to complete response
2382 * before request
2383 */
062dbb69
MT
2384 build_smp_wc(recv_mad_agent->agent.qp,
2385 (unsigned long) local->mad_send_wr,
97f52eb4 2386 be16_to_cpu(IB_LID_PERMISSIVE),
34816ad9 2387 0, recv_mad_agent->agent.port_num, &wc);
1da177e4
LT
2388
2389 local->mad_priv->header.recv_wc.wc = &wc;
2390 local->mad_priv->header.recv_wc.mad_len =
2391 sizeof(struct ib_mad);
fa619a77
HR
2392 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2393 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2394 &local->mad_priv->header.recv_wc.rmpp_list);
1da177e4
LT
2395 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2396 local->mad_priv->header.recv_wc.recv_buf.mad =
2397 &local->mad_priv->mad.mad;
2398 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2399 snoop_recv(recv_mad_agent->qp_info,
2400 &local->mad_priv->header.recv_wc,
2401 IB_MAD_SNOOP_RECVS);
2402 recv_mad_agent->agent.recv_handler(
2403 &recv_mad_agent->agent,
2404 &local->mad_priv->header.recv_wc);
2405 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2406 atomic_dec(&recv_mad_agent->refcount);
2407 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2408 }
2409
2410local_send_completion:
2411 /* Complete send */
2412 mad_send_wc.status = IB_WC_SUCCESS;
2413 mad_send_wc.vendor_err = 0;
34816ad9 2414 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
1da177e4 2415 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
34816ad9
SH
2416 snoop_send(mad_agent_priv->qp_info,
2417 &local->mad_send_wr->send_buf,
2418 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
1da177e4
LT
2419 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2420 &mad_send_wc);
2421
2422 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1da177e4 2423 atomic_dec(&mad_agent_priv->refcount);
1d9bc6d6 2424 if (free_mad)
2c153b93 2425 kmem_cache_free(ib_mad_cache, local->mad_priv);
1da177e4
LT
2426 kfree(local);
2427 }
2428 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2429}
2430
f75b7a52
HR
2431static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2432{
2433 int ret;
2434
4fc8cd49 2435 if (!mad_send_wr->retries_left)
f75b7a52
HR
2436 return -ETIMEDOUT;
2437
4fc8cd49
SH
2438 mad_send_wr->retries_left--;
2439 mad_send_wr->send_buf.retries++;
2440
34816ad9 2441 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
f75b7a52 2442
fa619a77
HR
2443 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2444 ret = ib_retry_rmpp(mad_send_wr);
2445 switch (ret) {
2446 case IB_RMPP_RESULT_UNHANDLED:
2447 ret = ib_send_mad(mad_send_wr);
2448 break;
2449 case IB_RMPP_RESULT_CONSUMED:
2450 ret = 0;
2451 break;
2452 default:
2453 ret = -ECOMM;
2454 break;
2455 }
2456 } else
2457 ret = ib_send_mad(mad_send_wr);
f75b7a52
HR
2458
2459 if (!ret) {
2460 mad_send_wr->refcount++;
f75b7a52
HR
2461 list_add_tail(&mad_send_wr->agent_list,
2462 &mad_send_wr->mad_agent_priv->send_list);
2463 }
2464 return ret;
2465}
2466
c4028958 2467static void timeout_sends(struct work_struct *work)
1da177e4
LT
2468{
2469 struct ib_mad_agent_private *mad_agent_priv;
2470 struct ib_mad_send_wr_private *mad_send_wr;
2471 struct ib_mad_send_wc mad_send_wc;
2472 unsigned long flags, delay;
2473
c4028958
DH
2474 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2475 timed_work.work);
1da177e4
LT
2476 mad_send_wc.vendor_err = 0;
2477
2478 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2479 while (!list_empty(&mad_agent_priv->wait_list)) {
2480 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2481 struct ib_mad_send_wr_private,
2482 agent_list);
2483
2484 if (time_after(mad_send_wr->timeout, jiffies)) {
2485 delay = mad_send_wr->timeout - jiffies;
2486 if ((long)delay <= 0)
2487 delay = 1;
2488 queue_delayed_work(mad_agent_priv->qp_info->
2489 port_priv->wq,
2490 &mad_agent_priv->timed_work, delay);
2491 break;
2492 }
2493
dbf9227b 2494 list_del(&mad_send_wr->agent_list);
29bb33dd
HR
2495 if (mad_send_wr->status == IB_WC_SUCCESS &&
2496 !retry_send(mad_send_wr))
f75b7a52
HR
2497 continue;
2498
1da177e4
LT
2499 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2500
03b61ad2
HR
2501 if (mad_send_wr->status == IB_WC_SUCCESS)
2502 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2503 else
2504 mad_send_wc.status = mad_send_wr->status;
34816ad9 2505 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1da177e4
LT
2506 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2507 &mad_send_wc);
2508
1da177e4
LT
2509 atomic_dec(&mad_agent_priv->refcount);
2510 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2511 }
2512 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2513}
2514
5dd2ce12 2515static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
1da177e4
LT
2516{
2517 struct ib_mad_port_private *port_priv = cq->cq_context;
dc05980d 2518 unsigned long flags;
1da177e4 2519
dc05980d
MT
2520 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2521 if (!list_empty(&port_priv->port_list))
2522 queue_work(port_priv->wq, &port_priv->work);
2523 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
1da177e4
LT
2524}
2525
2526/*
2527 * Allocate receive MADs and post receive WRs for them
2528 */
2529static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2530 struct ib_mad_private *mad)
2531{
2532 unsigned long flags;
2533 int post, ret;
2534 struct ib_mad_private *mad_priv;
2535 struct ib_sge sg_list;
2536 struct ib_recv_wr recv_wr, *bad_recv_wr;
2537 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2538
2539 /* Initialize common scatter list fields */
2540 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2541 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2542
2543 /* Initialize common receive WR fields */
2544 recv_wr.next = NULL;
2545 recv_wr.sg_list = &sg_list;
2546 recv_wr.num_sge = 1;
2547
2548 do {
2549 /* Allocate and map receive buffer */
2550 if (mad) {
2551 mad_priv = mad;
2552 mad = NULL;
2553 } else {
2554 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2555 if (!mad_priv) {
2556 printk(KERN_ERR PFX "No memory for receive buffer\n");
2557 ret = -ENOMEM;
2558 break;
2559 }
2560 }
1527106f
RC
2561 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2562 &mad_priv->grh,
2563 sizeof *mad_priv -
2564 sizeof mad_priv->header,
2565 DMA_FROM_DEVICE);
2566 mad_priv->header.mapping = sg_list.addr;
1da177e4
LT
2567 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2568 mad_priv->header.mad_list.mad_queue = recv_queue;
2569
2570 /* Post receive WR */
2571 spin_lock_irqsave(&recv_queue->lock, flags);
2572 post = (++recv_queue->count < recv_queue->max_active);
2573 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2574 spin_unlock_irqrestore(&recv_queue->lock, flags);
2575 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2576 if (ret) {
2577 spin_lock_irqsave(&recv_queue->lock, flags);
2578 list_del(&mad_priv->header.mad_list.list);
2579 recv_queue->count--;
2580 spin_unlock_irqrestore(&recv_queue->lock, flags);
1527106f
RC
2581 ib_dma_unmap_single(qp_info->port_priv->device,
2582 mad_priv->header.mapping,
2583 sizeof *mad_priv -
2584 sizeof mad_priv->header,
2585 DMA_FROM_DEVICE);
1da177e4
LT
2586 kmem_cache_free(ib_mad_cache, mad_priv);
2587 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2588 break;
2589 }
2590 } while (post);
2591
2592 return ret;
2593}
2594
2595/*
2596 * Return all the posted receive MADs
2597 */
2598static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2599{
2600 struct ib_mad_private_header *mad_priv_hdr;
2601 struct ib_mad_private *recv;
2602 struct ib_mad_list_head *mad_list;
2603
2604 while (!list_empty(&qp_info->recv_queue.list)) {
2605
2606 mad_list = list_entry(qp_info->recv_queue.list.next,
2607 struct ib_mad_list_head, list);
2608 mad_priv_hdr = container_of(mad_list,
2609 struct ib_mad_private_header,
2610 mad_list);
2611 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2612 header);
2613
2614 /* Remove from posted receive MAD list */
2615 list_del(&mad_list->list);
2616
1527106f
RC
2617 ib_dma_unmap_single(qp_info->port_priv->device,
2618 recv->header.mapping,
2619 sizeof(struct ib_mad_private) -
2620 sizeof(struct ib_mad_private_header),
2621 DMA_FROM_DEVICE);
1da177e4
LT
2622 kmem_cache_free(ib_mad_cache, recv);
2623 }
2624
2625 qp_info->recv_queue.count = 0;
2626}
2627
2628/*
2629 * Start the port
2630 */
2631static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2632{
2633 int ret, i;
2634 struct ib_qp_attr *attr;
2635 struct ib_qp *qp;
2636
2637 attr = kmalloc(sizeof *attr, GFP_KERNEL);
3cd96564 2638 if (!attr) {
1da177e4
LT
2639 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2640 return -ENOMEM;
2641 }
2642
2643 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2644 qp = port_priv->qp_info[i].qp;
2645 /*
2646 * PKey index for QP1 is irrelevant but
2647 * one is needed for the Reset to Init transition
2648 */
2649 attr->qp_state = IB_QPS_INIT;
2650 attr->pkey_index = 0;
2651 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2652 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2653 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2654 if (ret) {
2655 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2656 "INIT: %d\n", i, ret);
2657 goto out;
2658 }
2659
2660 attr->qp_state = IB_QPS_RTR;
2661 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2662 if (ret) {
2663 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2664 "RTR: %d\n", i, ret);
2665 goto out;
2666 }
2667
2668 attr->qp_state = IB_QPS_RTS;
2669 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2670 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2671 if (ret) {
2672 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2673 "RTS: %d\n", i, ret);
2674 goto out;
2675 }
2676 }
2677
2678 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2679 if (ret) {
2680 printk(KERN_ERR PFX "Failed to request completion "
2681 "notification: %d\n", ret);
2682 goto out;
2683 }
2684
2685 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2686 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2687 if (ret) {
2688 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2689 goto out;
2690 }
2691 }
2692out:
2693 kfree(attr);
2694 return ret;
2695}
2696
2697static void qp_event_handler(struct ib_event *event, void *qp_context)
2698{
2699 struct ib_mad_qp_info *qp_info = qp_context;
2700
2701 /* It's worse than that! He's dead, Jim! */
2702 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2703 event->event, qp_info->qp->qp_num);
2704}
2705
2706static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2707 struct ib_mad_queue *mad_queue)
2708{
2709 mad_queue->qp_info = qp_info;
2710 mad_queue->count = 0;
2711 spin_lock_init(&mad_queue->lock);
2712 INIT_LIST_HEAD(&mad_queue->list);
2713}
2714
2715static void init_mad_qp(struct ib_mad_port_private *port_priv,
2716 struct ib_mad_qp_info *qp_info)
2717{
2718 qp_info->port_priv = port_priv;
2719 init_mad_queue(qp_info, &qp_info->send_queue);
2720 init_mad_queue(qp_info, &qp_info->recv_queue);
2721 INIT_LIST_HEAD(&qp_info->overflow_list);
2722 spin_lock_init(&qp_info->snoop_lock);
2723 qp_info->snoop_table = NULL;
2724 qp_info->snoop_table_size = 0;
2725 atomic_set(&qp_info->snoop_count, 0);
2726}
2727
2728static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2729 enum ib_qp_type qp_type)
2730{
2731 struct ib_qp_init_attr qp_init_attr;
2732 int ret;
2733
2734 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2735 qp_init_attr.send_cq = qp_info->port_priv->cq;
2736 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2737 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2738 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2739 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2740 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2741 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2742 qp_init_attr.qp_type = qp_type;
2743 qp_init_attr.port_num = qp_info->port_priv->port_num;
2744 qp_init_attr.qp_context = qp_info;
2745 qp_init_attr.event_handler = qp_event_handler;
2746 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2747 if (IS_ERR(qp_info->qp)) {
2748 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2749 get_spl_qp_index(qp_type));
2750 ret = PTR_ERR(qp_info->qp);
2751 goto error;
2752 }
2753 /* Use minimum queue sizes unless the CQ is resized */
2754 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2755 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2756 return 0;
2757
2758error:
2759 return ret;
2760}
2761
2762static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2763{
2764 ib_destroy_qp(qp_info->qp);
6044ec88 2765 kfree(qp_info->snoop_table);
1da177e4
LT
2766}
2767
2768/*
2769 * Open the port
2770 * Create the QP, PD, MR, and CQ if needed
2771 */
2772static int ib_mad_port_open(struct ib_device *device,
2773 int port_num)
2774{
2775 int ret, cq_size;
2776 struct ib_mad_port_private *port_priv;
2777 unsigned long flags;
2778 char name[sizeof "ib_mad123"];
2779
1da177e4 2780 /* Create new device info */
de6eb66b 2781 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
1da177e4
LT
2782 if (!port_priv) {
2783 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2784 return -ENOMEM;
2785 }
de6eb66b 2786
1da177e4
LT
2787 port_priv->device = device;
2788 port_priv->port_num = port_num;
2789 spin_lock_init(&port_priv->reg_lock);
2790 INIT_LIST_HEAD(&port_priv->agent_list);
2791 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2792 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2793
2794 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2795 port_priv->cq = ib_create_cq(port_priv->device,
5dd2ce12 2796 ib_mad_thread_completion_handler,
f4fd0b22 2797 NULL, port_priv, cq_size, 0);
1da177e4
LT
2798 if (IS_ERR(port_priv->cq)) {
2799 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2800 ret = PTR_ERR(port_priv->cq);
2801 goto error3;
2802 }
2803
2804 port_priv->pd = ib_alloc_pd(device);
2805 if (IS_ERR(port_priv->pd)) {
2806 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2807 ret = PTR_ERR(port_priv->pd);
2808 goto error4;
2809 }
2810
2811 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2812 if (IS_ERR(port_priv->mr)) {
2813 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2814 ret = PTR_ERR(port_priv->mr);
2815 goto error5;
2816 }
2817
2818 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2819 if (ret)
2820 goto error6;
2821 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2822 if (ret)
2823 goto error7;
2824
2825 snprintf(name, sizeof name, "ib_mad%d", port_num);
2826 port_priv->wq = create_singlethread_workqueue(name);
2827 if (!port_priv->wq) {
2828 ret = -ENOMEM;
2829 goto error8;
2830 }
c4028958 2831 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
1da177e4 2832
dc05980d
MT
2833 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2834 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2835 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2836
1da177e4
LT
2837 ret = ib_mad_port_start(port_priv);
2838 if (ret) {
2839 printk(KERN_ERR PFX "Couldn't start port\n");
2840 goto error9;
2841 }
2842
1da177e4
LT
2843 return 0;
2844
2845error9:
dc05980d
MT
2846 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2847 list_del_init(&port_priv->port_list);
2848 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2849
1da177e4
LT
2850 destroy_workqueue(port_priv->wq);
2851error8:
2852 destroy_mad_qp(&port_priv->qp_info[1]);
2853error7:
2854 destroy_mad_qp(&port_priv->qp_info[0]);
2855error6:
2856 ib_dereg_mr(port_priv->mr);
2857error5:
2858 ib_dealloc_pd(port_priv->pd);
2859error4:
2860 ib_destroy_cq(port_priv->cq);
2861 cleanup_recv_queue(&port_priv->qp_info[1]);
2862 cleanup_recv_queue(&port_priv->qp_info[0]);
2863error3:
2864 kfree(port_priv);
2865
2866 return ret;
2867}
2868
2869/*
2870 * Close the port
2871 * If there are no classes using the port, free the port
2872 * resources (CQ, MR, PD, QP) and remove the port's info structure
2873 */
2874static int ib_mad_port_close(struct ib_device *device, int port_num)
2875{
2876 struct ib_mad_port_private *port_priv;
2877 unsigned long flags;
2878
2879 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2880 port_priv = __ib_get_mad_port(device, port_num);
2881 if (port_priv == NULL) {
2882 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2883 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2884 return -ENODEV;
2885 }
dc05980d 2886 list_del_init(&port_priv->port_list);
1da177e4
LT
2887 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2888
1da177e4
LT
2889 destroy_workqueue(port_priv->wq);
2890 destroy_mad_qp(&port_priv->qp_info[1]);
2891 destroy_mad_qp(&port_priv->qp_info[0]);
2892 ib_dereg_mr(port_priv->mr);
2893 ib_dealloc_pd(port_priv->pd);
2894 ib_destroy_cq(port_priv->cq);
2895 cleanup_recv_queue(&port_priv->qp_info[1]);
2896 cleanup_recv_queue(&port_priv->qp_info[0]);
2897 /* XXX: Handle deallocation of MAD registration tables */
2898
2899 kfree(port_priv);
2900
2901 return 0;
2902}
2903
2904static void ib_mad_init_device(struct ib_device *device)
2905{
4ab6fb7e 2906 int start, end, i;
1da177e4 2907
07ebafba
TT
2908 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2909 return;
2910
2911 if (device->node_type == RDMA_NODE_IB_SWITCH) {
4ab6fb7e
RD
2912 start = 0;
2913 end = 0;
1da177e4 2914 } else {
4ab6fb7e
RD
2915 start = 1;
2916 end = device->phys_port_cnt;
1da177e4 2917 }
4ab6fb7e
RD
2918
2919 for (i = start; i <= end; i++) {
2920 if (ib_mad_port_open(device, i)) {
1da177e4 2921 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
4ab6fb7e
RD
2922 device->name, i);
2923 goto error;
1da177e4 2924 }
4ab6fb7e 2925 if (ib_agent_port_open(device, i)) {
1da177e4
LT
2926 printk(KERN_ERR PFX "Couldn't open %s port %d "
2927 "for agents\n",
4ab6fb7e
RD
2928 device->name, i);
2929 goto error_agent;
1da177e4
LT
2930 }
2931 }
f68bcc2d 2932 return;
1da177e4 2933
4ab6fb7e
RD
2934error_agent:
2935 if (ib_mad_port_close(device, i))
2936 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2937 device->name, i);
2938
2939error:
2940 i--;
2941
2942 while (i >= start) {
2943 if (ib_agent_port_close(device, i))
1da177e4
LT
2944 printk(KERN_ERR PFX "Couldn't close %s port %d "
2945 "for agents\n",
4ab6fb7e
RD
2946 device->name, i);
2947 if (ib_mad_port_close(device, i))
1da177e4 2948 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
4ab6fb7e 2949 device->name, i);
1da177e4
LT
2950 i--;
2951 }
1da177e4
LT
2952}
2953
2954static void ib_mad_remove_device(struct ib_device *device)
2955{
f68bcc2d 2956 int i, num_ports, cur_port;
1da177e4 2957
07ebafba 2958 if (device->node_type == RDMA_NODE_IB_SWITCH) {
1da177e4
LT
2959 num_ports = 1;
2960 cur_port = 0;
2961 } else {
2962 num_ports = device->phys_port_cnt;
2963 cur_port = 1;
2964 }
2965 for (i = 0; i < num_ports; i++, cur_port++) {
f68bcc2d 2966 if (ib_agent_port_close(device, cur_port))
1da177e4
LT
2967 printk(KERN_ERR PFX "Couldn't close %s port %d "
2968 "for agents\n",
2969 device->name, cur_port);
f68bcc2d 2970 if (ib_mad_port_close(device, cur_port))
1da177e4
LT
2971 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2972 device->name, cur_port);
1da177e4
LT
2973 }
2974}
2975
2976static struct ib_client mad_client = {
2977 .name = "mad",
2978 .add = ib_mad_init_device,
2979 .remove = ib_mad_remove_device
2980};
2981
2982static int __init ib_mad_init_module(void)
2983{
2984 int ret;
2985
1da177e4
LT
2986 ib_mad_cache = kmem_cache_create("ib_mad",
2987 sizeof(struct ib_mad_private),
2988 0,
2989 SLAB_HWCACHE_ALIGN,
1da177e4
LT
2990 NULL);
2991 if (!ib_mad_cache) {
2992 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
2993 ret = -ENOMEM;
2994 goto error1;
2995 }
2996
2997 INIT_LIST_HEAD(&ib_mad_port_list);
2998
2999 if (ib_register_client(&mad_client)) {
3000 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
3001 ret = -EINVAL;
3002 goto error2;
3003 }
3004
3005 return 0;
3006
3007error2:
3008 kmem_cache_destroy(ib_mad_cache);
3009error1:
3010 return ret;
3011}
3012
3013static void __exit ib_mad_cleanup_module(void)
3014{
3015 ib_unregister_client(&mad_client);
1a1d92c1 3016 kmem_cache_destroy(ib_mad_cache);
1da177e4
LT
3017}
3018
3019module_init(ib_mad_init_module);
3020module_exit(ib_mad_cleanup_module);