[PATCH] IB: Add MAD helper functions
[linux-block.git] / drivers / infiniband / core / mad.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: mad.c 1389 2004-12-27 22:56:47Z roland $
33 */
34
35#include <linux/dma-mapping.h>
1da177e4
LT
36
37#include "mad_priv.h"
38#include "smi.h"
39#include "agent.h"
40
41MODULE_LICENSE("Dual BSD/GPL");
42MODULE_DESCRIPTION("kernel IB MAD API");
43MODULE_AUTHOR("Hal Rosenstock");
44MODULE_AUTHOR("Sean Hefty");
45
46
47kmem_cache_t *ib_mad_cache;
48static struct list_head ib_mad_port_list;
49static u32 ib_mad_client_id = 0;
50
51/* Port list lock */
52static spinlock_t ib_mad_port_list_lock;
53
54
55/* Forward declarations */
56static int method_in_use(struct ib_mad_mgmt_method_table **method,
57 struct ib_mad_reg_req *mad_reg_req);
58static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
59static struct ib_mad_agent_private *find_mad_agent(
60 struct ib_mad_port_private *port_priv,
61 struct ib_mad *mad, int solicited);
62static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
63 struct ib_mad_private *mad);
64static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
65static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
66 struct ib_mad_send_wc *mad_send_wc);
67static void timeout_sends(void *data);
68static void cancel_sends(void *data);
69static void local_completions(void *data);
70static int solicited_mad(struct ib_mad *mad);
71static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
72 struct ib_mad_agent_private *agent_priv,
73 u8 mgmt_class);
74static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
75 struct ib_mad_agent_private *agent_priv);
76
77/*
78 * Returns a ib_mad_port_private structure or NULL for a device/port
79 * Assumes ib_mad_port_list_lock is being held
80 */
81static inline struct ib_mad_port_private *
82__ib_get_mad_port(struct ib_device *device, int port_num)
83{
84 struct ib_mad_port_private *entry;
85
86 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
87 if (entry->device == device && entry->port_num == port_num)
88 return entry;
89 }
90 return NULL;
91}
92
93/*
94 * Wrapper function to return a ib_mad_port_private structure or NULL
95 * for a device/port
96 */
97static inline struct ib_mad_port_private *
98ib_get_mad_port(struct ib_device *device, int port_num)
99{
100 struct ib_mad_port_private *entry;
101 unsigned long flags;
102
103 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
104 entry = __ib_get_mad_port(device, port_num);
105 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
106
107 return entry;
108}
109
110static inline u8 convert_mgmt_class(u8 mgmt_class)
111{
112 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
113 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
114 0 : mgmt_class;
115}
116
117static int get_spl_qp_index(enum ib_qp_type qp_type)
118{
119 switch (qp_type)
120 {
121 case IB_QPT_SMI:
122 return 0;
123 case IB_QPT_GSI:
124 return 1;
125 default:
126 return -1;
127 }
128}
129
130static int vendor_class_index(u8 mgmt_class)
131{
132 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
133}
134
135static int is_vendor_class(u8 mgmt_class)
136{
137 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
138 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
139 return 0;
140 return 1;
141}
142
143static int is_vendor_oui(char *oui)
144{
145 if (oui[0] || oui[1] || oui[2])
146 return 1;
147 return 0;
148}
149
150static int is_vendor_method_in_use(
151 struct ib_mad_mgmt_vendor_class *vendor_class,
152 struct ib_mad_reg_req *mad_reg_req)
153{
154 struct ib_mad_mgmt_method_table *method;
155 int i;
156
157 for (i = 0; i < MAX_MGMT_OUI; i++) {
158 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
159 method = vendor_class->method_table[i];
160 if (method) {
161 if (method_in_use(&method, mad_reg_req))
162 return 1;
163 else
164 break;
165 }
166 }
167 }
168 return 0;
169}
170
171/*
172 * ib_register_mad_agent - Register to send/receive MADs
173 */
174struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
175 u8 port_num,
176 enum ib_qp_type qp_type,
177 struct ib_mad_reg_req *mad_reg_req,
178 u8 rmpp_version,
179 ib_mad_send_handler send_handler,
180 ib_mad_recv_handler recv_handler,
181 void *context)
182{
183 struct ib_mad_port_private *port_priv;
184 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
185 struct ib_mad_agent_private *mad_agent_priv;
186 struct ib_mad_reg_req *reg_req = NULL;
187 struct ib_mad_mgmt_class_table *class;
188 struct ib_mad_mgmt_vendor_class_table *vendor;
189 struct ib_mad_mgmt_vendor_class *vendor_class;
190 struct ib_mad_mgmt_method_table *method;
191 int ret2, qpn;
192 unsigned long flags;
193 u8 mgmt_class, vclass;
194
195 /* Validate parameters */
196 qpn = get_spl_qp_index(qp_type);
197 if (qpn == -1)
198 goto error1;
199
200 if (rmpp_version)
201 goto error1; /* XXX: until RMPP implemented */
202
203 /* Validate MAD registration request if supplied */
204 if (mad_reg_req) {
205 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
206 goto error1;
207 if (!recv_handler)
208 goto error1;
209 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
210 /*
211 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
212 * one in this range currently allowed
213 */
214 if (mad_reg_req->mgmt_class !=
215 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
216 goto error1;
217 } else if (mad_reg_req->mgmt_class == 0) {
218 /*
219 * Class 0 is reserved in IBA and is used for
220 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
221 */
222 goto error1;
223 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
224 /*
225 * If class is in "new" vendor range,
226 * ensure supplied OUI is not zero
227 */
228 if (!is_vendor_oui(mad_reg_req->oui))
229 goto error1;
230 }
231 /* Make sure class supplied is consistent with QP type */
232 if (qp_type == IB_QPT_SMI) {
233 if ((mad_reg_req->mgmt_class !=
234 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
235 (mad_reg_req->mgmt_class !=
236 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
237 goto error1;
238 } else {
239 if ((mad_reg_req->mgmt_class ==
240 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
241 (mad_reg_req->mgmt_class ==
242 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
243 goto error1;
244 }
245 } else {
246 /* No registration request supplied */
247 if (!send_handler)
248 goto error1;
249 }
250
251 /* Validate device and port */
252 port_priv = ib_get_mad_port(device, port_num);
253 if (!port_priv) {
254 ret = ERR_PTR(-ENODEV);
255 goto error1;
256 }
257
258 /* Allocate structures */
259 mad_agent_priv = kmalloc(sizeof *mad_agent_priv, GFP_KERNEL);
260 if (!mad_agent_priv) {
261 ret = ERR_PTR(-ENOMEM);
262 goto error1;
263 }
b82cab6b
HR
264 memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
265
266 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
267 IB_ACCESS_LOCAL_WRITE);
268 if (IS_ERR(mad_agent_priv->agent.mr)) {
269 ret = ERR_PTR(-ENOMEM);
270 goto error2;
271 }
1da177e4
LT
272
273 if (mad_reg_req) {
274 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
275 if (!reg_req) {
276 ret = ERR_PTR(-ENOMEM);
b82cab6b 277 goto error3;
1da177e4
LT
278 }
279 /* Make a copy of the MAD registration request */
280 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
281 }
282
283 /* Now, fill in the various structures */
1da177e4
LT
284 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
285 mad_agent_priv->reg_req = reg_req;
286 mad_agent_priv->rmpp_version = rmpp_version;
287 mad_agent_priv->agent.device = device;
288 mad_agent_priv->agent.recv_handler = recv_handler;
289 mad_agent_priv->agent.send_handler = send_handler;
290 mad_agent_priv->agent.context = context;
291 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
292 mad_agent_priv->agent.port_num = port_num;
293
294 spin_lock_irqsave(&port_priv->reg_lock, flags);
295 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
296
297 /*
298 * Make sure MAD registration (if supplied)
299 * is non overlapping with any existing ones
300 */
301 if (mad_reg_req) {
302 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
303 if (!is_vendor_class(mgmt_class)) {
304 class = port_priv->version[mad_reg_req->
305 mgmt_class_version].class;
306 if (class) {
307 method = class->method_table[mgmt_class];
308 if (method) {
309 if (method_in_use(&method,
310 mad_reg_req))
b82cab6b 311 goto error4;
1da177e4
LT
312 }
313 }
314 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
315 mgmt_class);
316 } else {
317 /* "New" vendor class range */
318 vendor = port_priv->version[mad_reg_req->
319 mgmt_class_version].vendor;
320 if (vendor) {
321 vclass = vendor_class_index(mgmt_class);
322 vendor_class = vendor->vendor_class[vclass];
323 if (vendor_class) {
324 if (is_vendor_method_in_use(
325 vendor_class,
326 mad_reg_req))
b82cab6b 327 goto error4;
1da177e4
LT
328 }
329 }
330 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
331 }
332 if (ret2) {
333 ret = ERR_PTR(ret2);
b82cab6b 334 goto error4;
1da177e4
LT
335 }
336 }
337
338 /* Add mad agent into port's agent list */
339 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
340 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
341
342 spin_lock_init(&mad_agent_priv->lock);
343 INIT_LIST_HEAD(&mad_agent_priv->send_list);
344 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
345 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
346 INIT_LIST_HEAD(&mad_agent_priv->local_list);
347 INIT_WORK(&mad_agent_priv->local_work, local_completions,
348 mad_agent_priv);
349 INIT_LIST_HEAD(&mad_agent_priv->canceled_list);
350 INIT_WORK(&mad_agent_priv->canceled_work, cancel_sends, mad_agent_priv);
351 atomic_set(&mad_agent_priv->refcount, 1);
352 init_waitqueue_head(&mad_agent_priv->wait);
353
354 return &mad_agent_priv->agent;
355
b82cab6b 356error4:
1da177e4
LT
357 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
358 kfree(reg_req);
b82cab6b 359error3:
1da177e4 360 kfree(mad_agent_priv);
b82cab6b
HR
361error2:
362 ib_dereg_mr(mad_agent_priv->agent.mr);
1da177e4
LT
363error1:
364 return ret;
365}
366EXPORT_SYMBOL(ib_register_mad_agent);
367
368static inline int is_snooping_sends(int mad_snoop_flags)
369{
370 return (mad_snoop_flags &
371 (/*IB_MAD_SNOOP_POSTED_SENDS |
372 IB_MAD_SNOOP_RMPP_SENDS |*/
373 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
374 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
375}
376
377static inline int is_snooping_recvs(int mad_snoop_flags)
378{
379 return (mad_snoop_flags &
380 (IB_MAD_SNOOP_RECVS /*|
381 IB_MAD_SNOOP_RMPP_RECVS*/));
382}
383
384static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
385 struct ib_mad_snoop_private *mad_snoop_priv)
386{
387 struct ib_mad_snoop_private **new_snoop_table;
388 unsigned long flags;
389 int i;
390
391 spin_lock_irqsave(&qp_info->snoop_lock, flags);
392 /* Check for empty slot in array. */
393 for (i = 0; i < qp_info->snoop_table_size; i++)
394 if (!qp_info->snoop_table[i])
395 break;
396
397 if (i == qp_info->snoop_table_size) {
398 /* Grow table. */
399 new_snoop_table = kmalloc(sizeof mad_snoop_priv *
400 qp_info->snoop_table_size + 1,
401 GFP_ATOMIC);
402 if (!new_snoop_table) {
403 i = -ENOMEM;
404 goto out;
405 }
406 if (qp_info->snoop_table) {
407 memcpy(new_snoop_table, qp_info->snoop_table,
408 sizeof mad_snoop_priv *
409 qp_info->snoop_table_size);
410 kfree(qp_info->snoop_table);
411 }
412 qp_info->snoop_table = new_snoop_table;
413 qp_info->snoop_table_size++;
414 }
415 qp_info->snoop_table[i] = mad_snoop_priv;
416 atomic_inc(&qp_info->snoop_count);
417out:
418 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
419 return i;
420}
421
422struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
423 u8 port_num,
424 enum ib_qp_type qp_type,
425 int mad_snoop_flags,
426 ib_mad_snoop_handler snoop_handler,
427 ib_mad_recv_handler recv_handler,
428 void *context)
429{
430 struct ib_mad_port_private *port_priv;
431 struct ib_mad_agent *ret;
432 struct ib_mad_snoop_private *mad_snoop_priv;
433 int qpn;
434
435 /* Validate parameters */
436 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
437 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
438 ret = ERR_PTR(-EINVAL);
439 goto error1;
440 }
441 qpn = get_spl_qp_index(qp_type);
442 if (qpn == -1) {
443 ret = ERR_PTR(-EINVAL);
444 goto error1;
445 }
446 port_priv = ib_get_mad_port(device, port_num);
447 if (!port_priv) {
448 ret = ERR_PTR(-ENODEV);
449 goto error1;
450 }
451 /* Allocate structures */
452 mad_snoop_priv = kmalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
453 if (!mad_snoop_priv) {
454 ret = ERR_PTR(-ENOMEM);
455 goto error1;
456 }
457
458 /* Now, fill in the various structures */
459 memset(mad_snoop_priv, 0, sizeof *mad_snoop_priv);
460 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
461 mad_snoop_priv->agent.device = device;
462 mad_snoop_priv->agent.recv_handler = recv_handler;
463 mad_snoop_priv->agent.snoop_handler = snoop_handler;
464 mad_snoop_priv->agent.context = context;
465 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
466 mad_snoop_priv->agent.port_num = port_num;
467 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
468 init_waitqueue_head(&mad_snoop_priv->wait);
469 mad_snoop_priv->snoop_index = register_snoop_agent(
470 &port_priv->qp_info[qpn],
471 mad_snoop_priv);
472 if (mad_snoop_priv->snoop_index < 0) {
473 ret = ERR_PTR(mad_snoop_priv->snoop_index);
474 goto error2;
475 }
476
477 atomic_set(&mad_snoop_priv->refcount, 1);
478 return &mad_snoop_priv->agent;
479
480error2:
481 kfree(mad_snoop_priv);
482error1:
483 return ret;
484}
485EXPORT_SYMBOL(ib_register_mad_snoop);
486
487static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
488{
489 struct ib_mad_port_private *port_priv;
490 unsigned long flags;
491
492 /* Note that we could still be handling received MADs */
493
494 /*
495 * Canceling all sends results in dropping received response
496 * MADs, preventing us from queuing additional work
497 */
498 cancel_mads(mad_agent_priv);
1da177e4 499 port_priv = mad_agent_priv->qp_info->port_priv;
1da177e4 500 cancel_delayed_work(&mad_agent_priv->timed_work);
1da177e4
LT
501
502 spin_lock_irqsave(&port_priv->reg_lock, flags);
503 remove_mad_reg_req(mad_agent_priv);
504 list_del(&mad_agent_priv->agent_list);
505 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
506
b82cab6b 507 flush_workqueue(port_priv->wq);
1da177e4
LT
508
509 atomic_dec(&mad_agent_priv->refcount);
510 wait_event(mad_agent_priv->wait,
511 !atomic_read(&mad_agent_priv->refcount));
512
513 if (mad_agent_priv->reg_req)
514 kfree(mad_agent_priv->reg_req);
b82cab6b 515 ib_dereg_mr(mad_agent_priv->agent.mr);
1da177e4
LT
516 kfree(mad_agent_priv);
517}
518
519static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
520{
521 struct ib_mad_qp_info *qp_info;
522 unsigned long flags;
523
524 qp_info = mad_snoop_priv->qp_info;
525 spin_lock_irqsave(&qp_info->snoop_lock, flags);
526 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
527 atomic_dec(&qp_info->snoop_count);
528 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
529
530 atomic_dec(&mad_snoop_priv->refcount);
531 wait_event(mad_snoop_priv->wait,
532 !atomic_read(&mad_snoop_priv->refcount));
533
534 kfree(mad_snoop_priv);
535}
536
537/*
538 * ib_unregister_mad_agent - Unregisters a client from using MAD services
539 */
540int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
541{
542 struct ib_mad_agent_private *mad_agent_priv;
543 struct ib_mad_snoop_private *mad_snoop_priv;
544
545 /* If the TID is zero, the agent can only snoop. */
546 if (mad_agent->hi_tid) {
547 mad_agent_priv = container_of(mad_agent,
548 struct ib_mad_agent_private,
549 agent);
550 unregister_mad_agent(mad_agent_priv);
551 } else {
552 mad_snoop_priv = container_of(mad_agent,
553 struct ib_mad_snoop_private,
554 agent);
555 unregister_mad_snoop(mad_snoop_priv);
556 }
557 return 0;
558}
559EXPORT_SYMBOL(ib_unregister_mad_agent);
560
561static void dequeue_mad(struct ib_mad_list_head *mad_list)
562{
563 struct ib_mad_queue *mad_queue;
564 unsigned long flags;
565
566 BUG_ON(!mad_list->mad_queue);
567 mad_queue = mad_list->mad_queue;
568 spin_lock_irqsave(&mad_queue->lock, flags);
569 list_del(&mad_list->list);
570 mad_queue->count--;
571 spin_unlock_irqrestore(&mad_queue->lock, flags);
572}
573
574static void snoop_send(struct ib_mad_qp_info *qp_info,
575 struct ib_send_wr *send_wr,
576 struct ib_mad_send_wc *mad_send_wc,
577 int mad_snoop_flags)
578{
579 struct ib_mad_snoop_private *mad_snoop_priv;
580 unsigned long flags;
581 int i;
582
583 spin_lock_irqsave(&qp_info->snoop_lock, flags);
584 for (i = 0; i < qp_info->snoop_table_size; i++) {
585 mad_snoop_priv = qp_info->snoop_table[i];
586 if (!mad_snoop_priv ||
587 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
588 continue;
589
590 atomic_inc(&mad_snoop_priv->refcount);
591 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
592 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
593 send_wr, mad_send_wc);
594 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
595 wake_up(&mad_snoop_priv->wait);
596 spin_lock_irqsave(&qp_info->snoop_lock, flags);
597 }
598 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
599}
600
601static void snoop_recv(struct ib_mad_qp_info *qp_info,
602 struct ib_mad_recv_wc *mad_recv_wc,
603 int mad_snoop_flags)
604{
605 struct ib_mad_snoop_private *mad_snoop_priv;
606 unsigned long flags;
607 int i;
608
609 spin_lock_irqsave(&qp_info->snoop_lock, flags);
610 for (i = 0; i < qp_info->snoop_table_size; i++) {
611 mad_snoop_priv = qp_info->snoop_table[i];
612 if (!mad_snoop_priv ||
613 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
614 continue;
615
616 atomic_inc(&mad_snoop_priv->refcount);
617 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
618 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
619 mad_recv_wc);
620 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
621 wake_up(&mad_snoop_priv->wait);
622 spin_lock_irqsave(&qp_info->snoop_lock, flags);
623 }
624 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
625}
626
627static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
628 struct ib_wc *wc)
629{
630 memset(wc, 0, sizeof *wc);
631 wc->wr_id = wr_id;
632 wc->status = IB_WC_SUCCESS;
633 wc->opcode = IB_WC_RECV;
634 wc->pkey_index = pkey_index;
635 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
636 wc->src_qp = IB_QP0;
637 wc->qp_num = IB_QP0;
638 wc->slid = slid;
639 wc->sl = 0;
640 wc->dlid_path_bits = 0;
641 wc->port_num = port_num;
642}
643
644/*
645 * Return 0 if SMP is to be sent
646 * Return 1 if SMP was consumed locally (whether or not solicited)
647 * Return < 0 if error
648 */
649static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
650 struct ib_smp *smp,
651 struct ib_send_wr *send_wr)
652{
653 int ret, solicited;
654 unsigned long flags;
655 struct ib_mad_local_private *local;
656 struct ib_mad_private *mad_priv;
657 struct ib_mad_port_private *port_priv;
658 struct ib_mad_agent_private *recv_mad_agent = NULL;
659 struct ib_device *device = mad_agent_priv->agent.device;
660 u8 port_num = mad_agent_priv->agent.port_num;
661 struct ib_wc mad_wc;
662
663 if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
664 ret = -EINVAL;
665 printk(KERN_ERR PFX "Invalid directed route\n");
666 goto out;
667 }
668 /* Check to post send on QP or process locally */
669 ret = smi_check_local_dr_smp(smp, device, port_num);
670 if (!ret || !device->process_mad)
671 goto out;
672
673 local = kmalloc(sizeof *local, GFP_ATOMIC);
674 if (!local) {
675 ret = -ENOMEM;
676 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
677 goto out;
678 }
679 local->mad_priv = NULL;
680 local->recv_mad_agent = NULL;
681 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
682 if (!mad_priv) {
683 ret = -ENOMEM;
684 printk(KERN_ERR PFX "No memory for local response MAD\n");
685 kfree(local);
686 goto out;
687 }
688
689 build_smp_wc(send_wr->wr_id, smp->dr_slid, send_wr->wr.ud.pkey_index,
690 send_wr->wr.ud.port_num, &mad_wc);
691
692 /* No GRH for DR SMP */
693 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
694 (struct ib_mad *)smp,
695 (struct ib_mad *)&mad_priv->mad);
696 switch (ret)
697 {
698 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
699 /*
700 * See if response is solicited and
701 * there is a recv handler
702 */
703 if (solicited_mad(&mad_priv->mad.mad) &&
704 mad_agent_priv->agent.recv_handler) {
705 local->mad_priv = mad_priv;
706 local->recv_mad_agent = mad_agent_priv;
707 /*
708 * Reference MAD agent until receive
709 * side of local completion handled
710 */
711 atomic_inc(&mad_agent_priv->refcount);
712 } else
713 kmem_cache_free(ib_mad_cache, mad_priv);
714 break;
715 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
716 kmem_cache_free(ib_mad_cache, mad_priv);
717 break;
718 case IB_MAD_RESULT_SUCCESS:
719 /* Treat like an incoming receive MAD */
720 solicited = solicited_mad(&mad_priv->mad.mad);
721 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
722 mad_agent_priv->agent.port_num);
723 if (port_priv) {
724 mad_priv->mad.mad.mad_hdr.tid =
725 ((struct ib_mad *)smp)->mad_hdr.tid;
726 recv_mad_agent = find_mad_agent(port_priv,
727 &mad_priv->mad.mad,
728 solicited);
729 }
730 if (!port_priv || !recv_mad_agent) {
731 kmem_cache_free(ib_mad_cache, mad_priv);
732 kfree(local);
733 ret = 0;
734 goto out;
735 }
736 local->mad_priv = mad_priv;
737 local->recv_mad_agent = recv_mad_agent;
738 break;
739 default:
740 kmem_cache_free(ib_mad_cache, mad_priv);
741 kfree(local);
742 ret = -EINVAL;
743 goto out;
744 }
745
746 local->send_wr = *send_wr;
747 local->send_wr.sg_list = local->sg_list;
748 memcpy(local->sg_list, send_wr->sg_list,
749 sizeof *send_wr->sg_list * send_wr->num_sge);
750 local->send_wr.next = NULL;
751 local->tid = send_wr->wr.ud.mad_hdr->tid;
752 local->wr_id = send_wr->wr_id;
753 /* Reference MAD agent until send side of local completion handled */
754 atomic_inc(&mad_agent_priv->refcount);
755 /* Queue local completion to local list */
756 spin_lock_irqsave(&mad_agent_priv->lock, flags);
757 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
758 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
759 queue_work(mad_agent_priv->qp_info->port_priv->wq,
b82cab6b 760 &mad_agent_priv->local_work);
1da177e4
LT
761 ret = 1;
762out:
763 return ret;
764}
765
824c8ae7
HR
766static int get_buf_length(int hdr_len, int data_len)
767{
768 int seg_size, pad;
769
770 seg_size = sizeof(struct ib_mad) - hdr_len;
771 if (data_len && seg_size) {
772 pad = seg_size - data_len % seg_size;
773 if (pad == seg_size)
774 pad = 0;
775 } else
776 pad = seg_size;
777 return hdr_len + data_len + pad;
778}
779
780struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
781 u32 remote_qpn, u16 pkey_index,
782 struct ib_ah *ah,
783 int hdr_len, int data_len,
784 unsigned int __nocast gfp_mask)
785{
786 struct ib_mad_agent_private *mad_agent_priv;
787 struct ib_mad_send_buf *send_buf;
788 int buf_size;
789 void *buf;
790
791 mad_agent_priv = container_of(mad_agent,
792 struct ib_mad_agent_private, agent);
793 buf_size = get_buf_length(hdr_len, data_len);
794
795 buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask);
796 if (!buf)
797 return ERR_PTR(-ENOMEM);
798
799 send_buf = buf + buf_size;
800 memset(send_buf, 0, sizeof *send_buf);
801 send_buf->mad = buf;
802
803 send_buf->sge.addr = dma_map_single(mad_agent->device->dma_device,
804 buf, buf_size, DMA_TO_DEVICE);
805 pci_unmap_addr_set(send_buf, mapping, send_buf->sge.addr);
806 send_buf->sge.length = buf_size;
807 send_buf->sge.lkey = mad_agent->mr->lkey;
808
809 send_buf->send_wr.wr_id = (unsigned long) send_buf;
810 send_buf->send_wr.sg_list = &send_buf->sge;
811 send_buf->send_wr.num_sge = 1;
812 send_buf->send_wr.opcode = IB_WR_SEND;
813 send_buf->send_wr.send_flags = IB_SEND_SIGNALED;
814 send_buf->send_wr.wr.ud.ah = ah;
815 send_buf->send_wr.wr.ud.mad_hdr = &send_buf->mad->mad_hdr;
816 send_buf->send_wr.wr.ud.remote_qpn = remote_qpn;
817 send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
818 send_buf->send_wr.wr.ud.pkey_index = pkey_index;
819 send_buf->mad_agent = mad_agent;
820 atomic_inc(&mad_agent_priv->refcount);
821 return send_buf;
822}
823EXPORT_SYMBOL(ib_create_send_mad);
824
825void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
826{
827 struct ib_mad_agent_private *mad_agent_priv;
828
829 mad_agent_priv = container_of(send_buf->mad_agent,
830 struct ib_mad_agent_private, agent);
831
832 dma_unmap_single(send_buf->mad_agent->device->dma_device,
833 pci_unmap_addr(send_buf, mapping),
834 send_buf->sge.length, DMA_TO_DEVICE);
835 kfree(send_buf->mad);
836
837 if (atomic_dec_and_test(&mad_agent_priv->refcount))
838 wake_up(&mad_agent_priv->wait);
839}
840EXPORT_SYMBOL(ib_free_send_mad);
841
1da177e4
LT
842static int ib_send_mad(struct ib_mad_agent_private *mad_agent_priv,
843 struct ib_mad_send_wr_private *mad_send_wr)
844{
845 struct ib_mad_qp_info *qp_info;
846 struct ib_send_wr *bad_send_wr;
847 unsigned long flags;
848 int ret;
849
850 /* Replace user's WR ID with our own to find WR upon completion */
851 qp_info = mad_agent_priv->qp_info;
852 mad_send_wr->wr_id = mad_send_wr->send_wr.wr_id;
853 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
854 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
855
856 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
857 if (qp_info->send_queue.count++ < qp_info->send_queue.max_active) {
858 list_add_tail(&mad_send_wr->mad_list.list,
859 &qp_info->send_queue.list);
860 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
861 ret = ib_post_send(mad_agent_priv->agent.qp,
862 &mad_send_wr->send_wr, &bad_send_wr);
863 if (ret) {
864 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
865 dequeue_mad(&mad_send_wr->mad_list);
866 }
867 } else {
868 list_add_tail(&mad_send_wr->mad_list.list,
869 &qp_info->overflow_list);
870 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
871 ret = 0;
872 }
873 return ret;
874}
875
876/*
877 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
878 * with the registered client
879 */
880int ib_post_send_mad(struct ib_mad_agent *mad_agent,
881 struct ib_send_wr *send_wr,
882 struct ib_send_wr **bad_send_wr)
883{
884 int ret = -EINVAL;
885 struct ib_mad_agent_private *mad_agent_priv;
886
887 /* Validate supplied parameters */
888 if (!bad_send_wr)
889 goto error1;
890
891 if (!mad_agent || !send_wr)
892 goto error2;
893
894 if (!mad_agent->send_handler)
895 goto error2;
896
897 mad_agent_priv = container_of(mad_agent,
898 struct ib_mad_agent_private,
899 agent);
900
901 /* Walk list of send WRs and post each on send list */
902 while (send_wr) {
903 unsigned long flags;
904 struct ib_send_wr *next_send_wr;
905 struct ib_mad_send_wr_private *mad_send_wr;
906 struct ib_smp *smp;
907
908 /* Validate more parameters */
909 if (send_wr->num_sge > IB_MAD_SEND_REQ_MAX_SG)
910 goto error2;
911
912 if (send_wr->wr.ud.timeout_ms && !mad_agent->recv_handler)
913 goto error2;
914
915 if (!send_wr->wr.ud.mad_hdr) {
916 printk(KERN_ERR PFX "MAD header must be supplied "
917 "in WR %p\n", send_wr);
918 goto error2;
919 }
920
921 /*
922 * Save pointer to next work request to post in case the
923 * current one completes, and the user modifies the work
924 * request associated with the completion
925 */
926 next_send_wr = (struct ib_send_wr *)send_wr->next;
927
928 smp = (struct ib_smp *)send_wr->wr.ud.mad_hdr;
929 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
930 ret = handle_outgoing_dr_smp(mad_agent_priv, smp,
931 send_wr);
932 if (ret < 0) /* error */
933 goto error2;
934 else if (ret == 1) /* locally consumed */
935 goto next;
936 }
937
938 /* Allocate MAD send WR tracking structure */
939 mad_send_wr = kmalloc(sizeof *mad_send_wr, GFP_ATOMIC);
940 if (!mad_send_wr) {
941 printk(KERN_ERR PFX "No memory for "
942 "ib_mad_send_wr_private\n");
943 ret = -ENOMEM;
944 goto error2;
945 }
946
947 mad_send_wr->send_wr = *send_wr;
948 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
949 memcpy(mad_send_wr->sg_list, send_wr->sg_list,
950 sizeof *send_wr->sg_list * send_wr->num_sge);
951 mad_send_wr->send_wr.next = NULL;
952 mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid;
953 mad_send_wr->agent = mad_agent;
954 /* Timeout will be updated after send completes */
955 mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr.
956 ud.timeout_ms);
957 mad_send_wr->retry = 0;
958 /* One reference for each work request to QP + response */
959 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
960 mad_send_wr->status = IB_WC_SUCCESS;
961
962 /* Reference MAD agent until send completes */
963 atomic_inc(&mad_agent_priv->refcount);
964 spin_lock_irqsave(&mad_agent_priv->lock, flags);
965 list_add_tail(&mad_send_wr->agent_list,
966 &mad_agent_priv->send_list);
967 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
968
969 ret = ib_send_mad(mad_agent_priv, mad_send_wr);
970 if (ret) {
971 /* Fail send request */
972 spin_lock_irqsave(&mad_agent_priv->lock, flags);
973 list_del(&mad_send_wr->agent_list);
974 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
975 atomic_dec(&mad_agent_priv->refcount);
976 goto error2;
977 }
978next:
979 send_wr = next_send_wr;
980 }
981 return 0;
982
983error2:
984 *bad_send_wr = send_wr;
985error1:
986 return ret;
987}
988EXPORT_SYMBOL(ib_post_send_mad);
989
990/*
991 * ib_free_recv_mad - Returns data buffers used to receive
992 * a MAD to the access layer
993 */
994void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
995{
996 struct ib_mad_recv_buf *entry;
997 struct ib_mad_private_header *mad_priv_hdr;
998 struct ib_mad_private *priv;
999
1000 mad_priv_hdr = container_of(mad_recv_wc,
1001 struct ib_mad_private_header,
1002 recv_wc);
1003 priv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1004
1005 /*
1006 * Walk receive buffer list associated with this WC
1007 * No need to remove them from list of receive buffers
1008 */
1009 list_for_each_entry(entry, &mad_recv_wc->recv_buf.list, list) {
1010 /* Free previous receive buffer */
1011 kmem_cache_free(ib_mad_cache, priv);
1012 mad_priv_hdr = container_of(mad_recv_wc,
1013 struct ib_mad_private_header,
1014 recv_wc);
1015 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1016 header);
1017 }
1018
1019 /* Free last buffer */
1020 kmem_cache_free(ib_mad_cache, priv);
1021}
1022EXPORT_SYMBOL(ib_free_recv_mad);
1023
1024void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc,
1025 void *buf)
1026{
1027 printk(KERN_ERR PFX "ib_coalesce_recv_mad() not implemented yet\n");
1028}
1029EXPORT_SYMBOL(ib_coalesce_recv_mad);
1030
1031struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1032 u8 rmpp_version,
1033 ib_mad_send_handler send_handler,
1034 ib_mad_recv_handler recv_handler,
1035 void *context)
1036{
1037 return ERR_PTR(-EINVAL); /* XXX: for now */
1038}
1039EXPORT_SYMBOL(ib_redirect_mad_qp);
1040
1041int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1042 struct ib_wc *wc)
1043{
1044 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1045 return 0;
1046}
1047EXPORT_SYMBOL(ib_process_mad_wc);
1048
1049static int method_in_use(struct ib_mad_mgmt_method_table **method,
1050 struct ib_mad_reg_req *mad_reg_req)
1051{
1052 int i;
1053
1054 for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1055 i < IB_MGMT_MAX_METHODS;
1056 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1057 1+i)) {
1058 if ((*method)->agent[i]) {
1059 printk(KERN_ERR PFX "Method %d already in use\n", i);
1060 return -EINVAL;
1061 }
1062 }
1063 return 0;
1064}
1065
1066static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1067{
1068 /* Allocate management method table */
1069 *method = kmalloc(sizeof **method, GFP_ATOMIC);
1070 if (!*method) {
1071 printk(KERN_ERR PFX "No memory for "
1072 "ib_mad_mgmt_method_table\n");
1073 return -ENOMEM;
1074 }
1075 /* Clear management method table */
1076 memset(*method, 0, sizeof **method);
1077
1078 return 0;
1079}
1080
1081/*
1082 * Check to see if there are any methods still in use
1083 */
1084static int check_method_table(struct ib_mad_mgmt_method_table *method)
1085{
1086 int i;
1087
1088 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1089 if (method->agent[i])
1090 return 1;
1091 return 0;
1092}
1093
1094/*
1095 * Check to see if there are any method tables for this class still in use
1096 */
1097static int check_class_table(struct ib_mad_mgmt_class_table *class)
1098{
1099 int i;
1100
1101 for (i = 0; i < MAX_MGMT_CLASS; i++)
1102 if (class->method_table[i])
1103 return 1;
1104 return 0;
1105}
1106
1107static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1108{
1109 int i;
1110
1111 for (i = 0; i < MAX_MGMT_OUI; i++)
1112 if (vendor_class->method_table[i])
1113 return 1;
1114 return 0;
1115}
1116
1117static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1118 char *oui)
1119{
1120 int i;
1121
1122 for (i = 0; i < MAX_MGMT_OUI; i++)
1123 /* Is there matching OUI for this vendor class ? */
1124 if (!memcmp(vendor_class->oui[i], oui, 3))
1125 return i;
1126
1127 return -1;
1128}
1129
1130static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1131{
1132 int i;
1133
1134 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1135 if (vendor->vendor_class[i])
1136 return 1;
1137
1138 return 0;
1139}
1140
1141static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1142 struct ib_mad_agent_private *agent)
1143{
1144 int i;
1145
1146 /* Remove any methods for this mad agent */
1147 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1148 if (method->agent[i] == agent) {
1149 method->agent[i] = NULL;
1150 }
1151 }
1152}
1153
1154static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1155 struct ib_mad_agent_private *agent_priv,
1156 u8 mgmt_class)
1157{
1158 struct ib_mad_port_private *port_priv;
1159 struct ib_mad_mgmt_class_table **class;
1160 struct ib_mad_mgmt_method_table **method;
1161 int i, ret;
1162
1163 port_priv = agent_priv->qp_info->port_priv;
1164 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1165 if (!*class) {
1166 /* Allocate management class table for "new" class version */
1167 *class = kmalloc(sizeof **class, GFP_ATOMIC);
1168 if (!*class) {
1169 printk(KERN_ERR PFX "No memory for "
1170 "ib_mad_mgmt_class_table\n");
1171 ret = -ENOMEM;
1172 goto error1;
1173 }
1174 /* Clear management class table */
1175 memset(*class, 0, sizeof(**class));
1176 /* Allocate method table for this management class */
1177 method = &(*class)->method_table[mgmt_class];
1178 if ((ret = allocate_method_table(method)))
1179 goto error2;
1180 } else {
1181 method = &(*class)->method_table[mgmt_class];
1182 if (!*method) {
1183 /* Allocate method table for this management class */
1184 if ((ret = allocate_method_table(method)))
1185 goto error1;
1186 }
1187 }
1188
1189 /* Now, make sure methods are not already in use */
1190 if (method_in_use(method, mad_reg_req))
1191 goto error3;
1192
1193 /* Finally, add in methods being registered */
1194 for (i = find_first_bit(mad_reg_req->method_mask,
1195 IB_MGMT_MAX_METHODS);
1196 i < IB_MGMT_MAX_METHODS;
1197 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1198 1+i)) {
1199 (*method)->agent[i] = agent_priv;
1200 }
1201 return 0;
1202
1203error3:
1204 /* Remove any methods for this mad agent */
1205 remove_methods_mad_agent(*method, agent_priv);
1206 /* Now, check to see if there are any methods in use */
1207 if (!check_method_table(*method)) {
1208 /* If not, release management method table */
1209 kfree(*method);
1210 *method = NULL;
1211 }
1212 ret = -EINVAL;
1213 goto error1;
1214error2:
1215 kfree(*class);
1216 *class = NULL;
1217error1:
1218 return ret;
1219}
1220
1221static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1222 struct ib_mad_agent_private *agent_priv)
1223{
1224 struct ib_mad_port_private *port_priv;
1225 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1226 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1227 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1228 struct ib_mad_mgmt_method_table **method;
1229 int i, ret = -ENOMEM;
1230 u8 vclass;
1231
1232 /* "New" vendor (with OUI) class */
1233 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1234 port_priv = agent_priv->qp_info->port_priv;
1235 vendor_table = &port_priv->version[
1236 mad_reg_req->mgmt_class_version].vendor;
1237 if (!*vendor_table) {
1238 /* Allocate mgmt vendor class table for "new" class version */
1239 vendor = kmalloc(sizeof *vendor, GFP_ATOMIC);
1240 if (!vendor) {
1241 printk(KERN_ERR PFX "No memory for "
1242 "ib_mad_mgmt_vendor_class_table\n");
1243 goto error1;
1244 }
1245 /* Clear management vendor class table */
1246 memset(vendor, 0, sizeof(*vendor));
1247 *vendor_table = vendor;
1248 }
1249 if (!(*vendor_table)->vendor_class[vclass]) {
1250 /* Allocate table for this management vendor class */
1251 vendor_class = kmalloc(sizeof *vendor_class, GFP_ATOMIC);
1252 if (!vendor_class) {
1253 printk(KERN_ERR PFX "No memory for "
1254 "ib_mad_mgmt_vendor_class\n");
1255 goto error2;
1256 }
1257 memset(vendor_class, 0, sizeof(*vendor_class));
1258 (*vendor_table)->vendor_class[vclass] = vendor_class;
1259 }
1260 for (i = 0; i < MAX_MGMT_OUI; i++) {
1261 /* Is there matching OUI for this vendor class ? */
1262 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1263 mad_reg_req->oui, 3)) {
1264 method = &(*vendor_table)->vendor_class[
1265 vclass]->method_table[i];
1266 BUG_ON(!*method);
1267 goto check_in_use;
1268 }
1269 }
1270 for (i = 0; i < MAX_MGMT_OUI; i++) {
1271 /* OUI slot available ? */
1272 if (!is_vendor_oui((*vendor_table)->vendor_class[
1273 vclass]->oui[i])) {
1274 method = &(*vendor_table)->vendor_class[
1275 vclass]->method_table[i];
1276 BUG_ON(*method);
1277 /* Allocate method table for this OUI */
1278 if ((ret = allocate_method_table(method)))
1279 goto error3;
1280 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1281 mad_reg_req->oui, 3);
1282 goto check_in_use;
1283 }
1284 }
1285 printk(KERN_ERR PFX "All OUI slots in use\n");
1286 goto error3;
1287
1288check_in_use:
1289 /* Now, make sure methods are not already in use */
1290 if (method_in_use(method, mad_reg_req))
1291 goto error4;
1292
1293 /* Finally, add in methods being registered */
1294 for (i = find_first_bit(mad_reg_req->method_mask,
1295 IB_MGMT_MAX_METHODS);
1296 i < IB_MGMT_MAX_METHODS;
1297 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1298 1+i)) {
1299 (*method)->agent[i] = agent_priv;
1300 }
1301 return 0;
1302
1303error4:
1304 /* Remove any methods for this mad agent */
1305 remove_methods_mad_agent(*method, agent_priv);
1306 /* Now, check to see if there are any methods in use */
1307 if (!check_method_table(*method)) {
1308 /* If not, release management method table */
1309 kfree(*method);
1310 *method = NULL;
1311 }
1312 ret = -EINVAL;
1313error3:
1314 if (vendor_class) {
1315 (*vendor_table)->vendor_class[vclass] = NULL;
1316 kfree(vendor_class);
1317 }
1318error2:
1319 if (vendor) {
1320 *vendor_table = NULL;
1321 kfree(vendor);
1322 }
1323error1:
1324 return ret;
1325}
1326
1327static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1328{
1329 struct ib_mad_port_private *port_priv;
1330 struct ib_mad_mgmt_class_table *class;
1331 struct ib_mad_mgmt_method_table *method;
1332 struct ib_mad_mgmt_vendor_class_table *vendor;
1333 struct ib_mad_mgmt_vendor_class *vendor_class;
1334 int index;
1335 u8 mgmt_class;
1336
1337 /*
1338 * Was MAD registration request supplied
1339 * with original registration ?
1340 */
1341 if (!agent_priv->reg_req) {
1342 goto out;
1343 }
1344
1345 port_priv = agent_priv->qp_info->port_priv;
1346 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1347 class = port_priv->version[
1348 agent_priv->reg_req->mgmt_class_version].class;
1349 if (!class)
1350 goto vendor_check;
1351
1352 method = class->method_table[mgmt_class];
1353 if (method) {
1354 /* Remove any methods for this mad agent */
1355 remove_methods_mad_agent(method, agent_priv);
1356 /* Now, check to see if there are any methods still in use */
1357 if (!check_method_table(method)) {
1358 /* If not, release management method table */
1359 kfree(method);
1360 class->method_table[mgmt_class] = NULL;
1361 /* Any management classes left ? */
1362 if (!check_class_table(class)) {
1363 /* If not, release management class table */
1364 kfree(class);
1365 port_priv->version[
1366 agent_priv->reg_req->
1367 mgmt_class_version].class = NULL;
1368 }
1369 }
1370 }
1371
1372vendor_check:
1373 if (!is_vendor_class(mgmt_class))
1374 goto out;
1375
1376 /* normalize mgmt_class to vendor range 2 */
1377 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1378 vendor = port_priv->version[
1379 agent_priv->reg_req->mgmt_class_version].vendor;
1380
1381 if (!vendor)
1382 goto out;
1383
1384 vendor_class = vendor->vendor_class[mgmt_class];
1385 if (vendor_class) {
1386 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1387 if (index < 0)
1388 goto out;
1389 method = vendor_class->method_table[index];
1390 if (method) {
1391 /* Remove any methods for this mad agent */
1392 remove_methods_mad_agent(method, agent_priv);
1393 /*
1394 * Now, check to see if there are
1395 * any methods still in use
1396 */
1397 if (!check_method_table(method)) {
1398 /* If not, release management method table */
1399 kfree(method);
1400 vendor_class->method_table[index] = NULL;
1401 memset(vendor_class->oui[index], 0, 3);
1402 /* Any OUIs left ? */
1403 if (!check_vendor_class(vendor_class)) {
1404 /* If not, release vendor class table */
1405 kfree(vendor_class);
1406 vendor->vendor_class[mgmt_class] = NULL;
1407 /* Any other vendor classes left ? */
1408 if (!check_vendor_table(vendor)) {
1409 kfree(vendor);
1410 port_priv->version[
1411 agent_priv->reg_req->
1412 mgmt_class_version].
1413 vendor = NULL;
1414 }
1415 }
1416 }
1417 }
1418 }
1419
1420out:
1421 return;
1422}
1423
1424static int response_mad(struct ib_mad *mad)
1425{
1426 /* Trap represses are responses although response bit is reset */
1427 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
1428 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
1429}
1430
1431static int solicited_mad(struct ib_mad *mad)
1432{
1433 /* CM MADs are never solicited */
1434 if (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CM) {
1435 return 0;
1436 }
1437
1438 /* XXX: Determine whether MAD is using RMPP */
1439
1440 /* Not using RMPP */
1441 /* Is this MAD a response to a previous MAD ? */
1442 return response_mad(mad);
1443}
1444
1445static struct ib_mad_agent_private *
1446find_mad_agent(struct ib_mad_port_private *port_priv,
1447 struct ib_mad *mad,
1448 int solicited)
1449{
1450 struct ib_mad_agent_private *mad_agent = NULL;
1451 unsigned long flags;
1452
1453 spin_lock_irqsave(&port_priv->reg_lock, flags);
1454
1455 /*
1456 * Whether MAD was solicited determines type of routing to
1457 * MAD client.
1458 */
1459 if (solicited) {
1460 u32 hi_tid;
1461 struct ib_mad_agent_private *entry;
1462
1463 /*
1464 * Routing is based on high 32 bits of transaction ID
1465 * of MAD.
1466 */
1467 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1468 list_for_each_entry(entry, &port_priv->agent_list,
1469 agent_list) {
1470 if (entry->agent.hi_tid == hi_tid) {
1471 mad_agent = entry;
1472 break;
1473 }
1474 }
1475 } else {
1476 struct ib_mad_mgmt_class_table *class;
1477 struct ib_mad_mgmt_method_table *method;
1478 struct ib_mad_mgmt_vendor_class_table *vendor;
1479 struct ib_mad_mgmt_vendor_class *vendor_class;
1480 struct ib_vendor_mad *vendor_mad;
1481 int index;
1482
1483 /*
1484 * Routing is based on version, class, and method
1485 * For "newer" vendor MADs, also based on OUI
1486 */
1487 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1488 goto out;
1489 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1490 class = port_priv->version[
1491 mad->mad_hdr.class_version].class;
1492 if (!class)
1493 goto out;
1494 method = class->method_table[convert_mgmt_class(
1495 mad->mad_hdr.mgmt_class)];
1496 if (method)
1497 mad_agent = method->agent[mad->mad_hdr.method &
1498 ~IB_MGMT_METHOD_RESP];
1499 } else {
1500 vendor = port_priv->version[
1501 mad->mad_hdr.class_version].vendor;
1502 if (!vendor)
1503 goto out;
1504 vendor_class = vendor->vendor_class[vendor_class_index(
1505 mad->mad_hdr.mgmt_class)];
1506 if (!vendor_class)
1507 goto out;
1508 /* Find matching OUI */
1509 vendor_mad = (struct ib_vendor_mad *)mad;
1510 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1511 if (index == -1)
1512 goto out;
1513 method = vendor_class->method_table[index];
1514 if (method) {
1515 mad_agent = method->agent[mad->mad_hdr.method &
1516 ~IB_MGMT_METHOD_RESP];
1517 }
1518 }
1519 }
1520
1521 if (mad_agent) {
1522 if (mad_agent->agent.recv_handler)
1523 atomic_inc(&mad_agent->refcount);
1524 else {
1525 printk(KERN_NOTICE PFX "No receive handler for client "
1526 "%p on port %d\n",
1527 &mad_agent->agent, port_priv->port_num);
1528 mad_agent = NULL;
1529 }
1530 }
1531out:
1532 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1533
1534 return mad_agent;
1535}
1536
1537static int validate_mad(struct ib_mad *mad, u32 qp_num)
1538{
1539 int valid = 0;
1540
1541 /* Make sure MAD base version is understood */
1542 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1543 printk(KERN_ERR PFX "MAD received with unsupported base "
1544 "version %d\n", mad->mad_hdr.base_version);
1545 goto out;
1546 }
1547
1548 /* Filter SMI packets sent to other than QP0 */
1549 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1550 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1551 if (qp_num == 0)
1552 valid = 1;
1553 } else {
1554 /* Filter GSI packets sent to QP0 */
1555 if (qp_num != 0)
1556 valid = 1;
1557 }
1558
1559out:
1560 return valid;
1561}
1562
1563/*
1564 * Return start of fully reassembled MAD, or NULL, if MAD isn't assembled yet
1565 */
1566static struct ib_mad_private *
1567reassemble_recv(struct ib_mad_agent_private *mad_agent_priv,
1568 struct ib_mad_private *recv)
1569{
1570 /* Until we have RMPP, all receives are reassembled!... */
1571 INIT_LIST_HEAD(&recv->header.recv_wc.recv_buf.list);
1572 return recv;
1573}
1574
1575static struct ib_mad_send_wr_private*
1576find_send_req(struct ib_mad_agent_private *mad_agent_priv,
1577 u64 tid)
1578{
1579 struct ib_mad_send_wr_private *mad_send_wr;
1580
1581 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
1582 agent_list) {
1583 if (mad_send_wr->tid == tid)
1584 return mad_send_wr;
1585 }
1586
1587 /*
1588 * It's possible to receive the response before we've
1589 * been notified that the send has completed
1590 */
1591 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1592 agent_list) {
1593 if (mad_send_wr->tid == tid && mad_send_wr->timeout) {
1594 /* Verify request has not been canceled */
1595 return (mad_send_wr->status == IB_WC_SUCCESS) ?
1596 mad_send_wr : NULL;
1597 }
1598 }
1599 return NULL;
1600}
1601
1602static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1603 struct ib_mad_private *recv,
1604 int solicited)
1605{
1606 struct ib_mad_send_wr_private *mad_send_wr;
1607 struct ib_mad_send_wc mad_send_wc;
1608 unsigned long flags;
1609
1610 /* Fully reassemble receive before processing */
1611 recv = reassemble_recv(mad_agent_priv, recv);
1612 if (!recv) {
1613 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1614 wake_up(&mad_agent_priv->wait);
1615 return;
1616 }
1617
1618 /* Complete corresponding request */
1619 if (solicited) {
1620 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1621 mad_send_wr = find_send_req(mad_agent_priv,
1622 recv->mad.mad.mad_hdr.tid);
1623 if (!mad_send_wr) {
1624 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1625 ib_free_recv_mad(&recv->header.recv_wc);
1626 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1627 wake_up(&mad_agent_priv->wait);
1628 return;
1629 }
1630 /* Timeout = 0 means that we won't wait for a response */
1631 mad_send_wr->timeout = 0;
1632 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1633
1634 /* Defined behavior is to complete response before request */
1635 recv->header.recv_wc.wc->wr_id = mad_send_wr->wr_id;
1636 mad_agent_priv->agent.recv_handler(
1637 &mad_agent_priv->agent,
1638 &recv->header.recv_wc);
1639 atomic_dec(&mad_agent_priv->refcount);
1640
1641 mad_send_wc.status = IB_WC_SUCCESS;
1642 mad_send_wc.vendor_err = 0;
1643 mad_send_wc.wr_id = mad_send_wr->wr_id;
1644 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1645 } else {
1646 mad_agent_priv->agent.recv_handler(
1647 &mad_agent_priv->agent,
1648 &recv->header.recv_wc);
1649 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1650 wake_up(&mad_agent_priv->wait);
1651 }
1652}
1653
1654static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1655 struct ib_wc *wc)
1656{
1657 struct ib_mad_qp_info *qp_info;
1658 struct ib_mad_private_header *mad_priv_hdr;
1659 struct ib_mad_private *recv, *response;
1660 struct ib_mad_list_head *mad_list;
1661 struct ib_mad_agent_private *mad_agent;
1662 int solicited;
1663
1664 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1665 if (!response)
1666 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1667 "for response buffer\n");
1668
1669 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1670 qp_info = mad_list->mad_queue->qp_info;
1671 dequeue_mad(mad_list);
1672
1673 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1674 mad_list);
1675 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1676 dma_unmap_single(port_priv->device->dma_device,
1677 pci_unmap_addr(&recv->header, mapping),
1678 sizeof(struct ib_mad_private) -
1679 sizeof(struct ib_mad_private_header),
1680 DMA_FROM_DEVICE);
1681
1682 /* Setup MAD receive work completion from "normal" work completion */
24239aff
SH
1683 recv->header.wc = *wc;
1684 recv->header.recv_wc.wc = &recv->header.wc;
1da177e4
LT
1685 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1686 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1687 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1688
1689 if (atomic_read(&qp_info->snoop_count))
1690 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1691
1692 /* Validate MAD */
1693 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1694 goto out;
1695
1696 if (recv->mad.mad.mad_hdr.mgmt_class ==
1697 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1698 if (!smi_handle_dr_smp_recv(&recv->mad.smp,
1699 port_priv->device->node_type,
1700 port_priv->port_num,
1701 port_priv->device->phys_port_cnt))
1702 goto out;
1703 if (!smi_check_forward_dr_smp(&recv->mad.smp))
1704 goto local;
1705 if (!smi_handle_dr_smp_send(&recv->mad.smp,
1706 port_priv->device->node_type,
1707 port_priv->port_num))
1708 goto out;
1709 if (!smi_check_local_dr_smp(&recv->mad.smp,
1710 port_priv->device,
1711 port_priv->port_num))
1712 goto out;
1713 }
1714
1715local:
1716 /* Give driver "right of first refusal" on incoming MAD */
1717 if (port_priv->device->process_mad) {
1718 int ret;
1719
1720 if (!response) {
1721 printk(KERN_ERR PFX "No memory for response MAD\n");
1722 /*
1723 * Is it better to assume that
1724 * it wouldn't be processed ?
1725 */
1726 goto out;
1727 }
1728
1729 ret = port_priv->device->process_mad(port_priv->device, 0,
1730 port_priv->port_num,
1731 wc, &recv->grh,
1732 &recv->mad.mad,
1733 &response->mad.mad);
1734 if (ret & IB_MAD_RESULT_SUCCESS) {
1735 if (ret & IB_MAD_RESULT_CONSUMED)
1736 goto out;
1737 if (ret & IB_MAD_RESULT_REPLY) {
1738 /* Send response */
1739 if (!agent_send(response, &recv->grh, wc,
1740 port_priv->device,
1741 port_priv->port_num))
1742 response = NULL;
1743 goto out;
1744 }
1745 }
1746 }
1747
1748 /* Determine corresponding MAD agent for incoming receive MAD */
1749 solicited = solicited_mad(&recv->mad.mad);
1750 mad_agent = find_mad_agent(port_priv, &recv->mad.mad, solicited);
1751 if (mad_agent) {
1752 ib_mad_complete_recv(mad_agent, recv, solicited);
1753 /*
1754 * recv is freed up in error cases in ib_mad_complete_recv
1755 * or via recv_handler in ib_mad_complete_recv()
1756 */
1757 recv = NULL;
1758 }
1759
1760out:
1761 /* Post another receive request for this QP */
1762 if (response) {
1763 ib_mad_post_receive_mads(qp_info, response);
1764 if (recv)
1765 kmem_cache_free(ib_mad_cache, recv);
1766 } else
1767 ib_mad_post_receive_mads(qp_info, recv);
1768}
1769
1770static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1771{
1772 struct ib_mad_send_wr_private *mad_send_wr;
1773 unsigned long delay;
1774
1775 if (list_empty(&mad_agent_priv->wait_list)) {
1776 cancel_delayed_work(&mad_agent_priv->timed_work);
1777 } else {
1778 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1779 struct ib_mad_send_wr_private,
1780 agent_list);
1781
1782 if (time_after(mad_agent_priv->timeout,
1783 mad_send_wr->timeout)) {
1784 mad_agent_priv->timeout = mad_send_wr->timeout;
1785 cancel_delayed_work(&mad_agent_priv->timed_work);
1786 delay = mad_send_wr->timeout - jiffies;
1787 if ((long)delay <= 0)
1788 delay = 1;
1789 queue_delayed_work(mad_agent_priv->qp_info->
1790 port_priv->wq,
1791 &mad_agent_priv->timed_work, delay);
1792 }
1793 }
1794}
1795
1796static void wait_for_response(struct ib_mad_agent_private *mad_agent_priv,
1797 struct ib_mad_send_wr_private *mad_send_wr )
1798{
1799 struct ib_mad_send_wr_private *temp_mad_send_wr;
1800 struct list_head *list_item;
1801 unsigned long delay;
1802
1803 list_del(&mad_send_wr->agent_list);
1804
1805 delay = mad_send_wr->timeout;
1806 mad_send_wr->timeout += jiffies;
1807
1808 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
1809 temp_mad_send_wr = list_entry(list_item,
1810 struct ib_mad_send_wr_private,
1811 agent_list);
1812 if (time_after(mad_send_wr->timeout,
1813 temp_mad_send_wr->timeout))
1814 break;
1815 }
1816 list_add(&mad_send_wr->agent_list, list_item);
1817
1818 /* Reschedule a work item if we have a shorter timeout */
1819 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
1820 cancel_delayed_work(&mad_agent_priv->timed_work);
1821 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
1822 &mad_agent_priv->timed_work, delay);
1823 }
1824}
1825
1826/*
1827 * Process a send work completion
1828 */
1829static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1830 struct ib_mad_send_wc *mad_send_wc)
1831{
1832 struct ib_mad_agent_private *mad_agent_priv;
1833 unsigned long flags;
1834
1835 mad_agent_priv = container_of(mad_send_wr->agent,
1836 struct ib_mad_agent_private, agent);
1837
1838 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1839 if (mad_send_wc->status != IB_WC_SUCCESS &&
1840 mad_send_wr->status == IB_WC_SUCCESS) {
1841 mad_send_wr->status = mad_send_wc->status;
1842 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1843 }
1844
1845 if (--mad_send_wr->refcount > 0) {
1846 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
1847 mad_send_wr->status == IB_WC_SUCCESS) {
1848 wait_for_response(mad_agent_priv, mad_send_wr);
1849 }
1850 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1851 return;
1852 }
1853
1854 /* Remove send from MAD agent and notify client of completion */
1855 list_del(&mad_send_wr->agent_list);
1856 adjust_timeout(mad_agent_priv);
1857 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1858
1859 if (mad_send_wr->status != IB_WC_SUCCESS )
1860 mad_send_wc->status = mad_send_wr->status;
1861 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
1862 mad_send_wc);
1863
1864 /* Release reference on agent taken when sending */
1865 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1866 wake_up(&mad_agent_priv->wait);
1867
1868 kfree(mad_send_wr);
1869}
1870
1871static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
1872 struct ib_wc *wc)
1873{
1874 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
1875 struct ib_mad_list_head *mad_list;
1876 struct ib_mad_qp_info *qp_info;
1877 struct ib_mad_queue *send_queue;
1878 struct ib_send_wr *bad_send_wr;
1879 unsigned long flags;
1880 int ret;
1881
1882 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1883 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1884 mad_list);
1885 send_queue = mad_list->mad_queue;
1886 qp_info = send_queue->qp_info;
1887
1888retry:
1889 queued_send_wr = NULL;
1890 spin_lock_irqsave(&send_queue->lock, flags);
1891 list_del(&mad_list->list);
1892
1893 /* Move queued send to the send queue */
1894 if (send_queue->count-- > send_queue->max_active) {
1895 mad_list = container_of(qp_info->overflow_list.next,
1896 struct ib_mad_list_head, list);
1897 queued_send_wr = container_of(mad_list,
1898 struct ib_mad_send_wr_private,
1899 mad_list);
1900 list_del(&mad_list->list);
1901 list_add_tail(&mad_list->list, &send_queue->list);
1902 }
1903 spin_unlock_irqrestore(&send_queue->lock, flags);
1904
1905 /* Restore client wr_id in WC and complete send */
1906 wc->wr_id = mad_send_wr->wr_id;
1907 if (atomic_read(&qp_info->snoop_count))
1908 snoop_send(qp_info, &mad_send_wr->send_wr,
1909 (struct ib_mad_send_wc *)wc,
1910 IB_MAD_SNOOP_SEND_COMPLETIONS);
1911 ib_mad_complete_send_wr(mad_send_wr, (struct ib_mad_send_wc *)wc);
1912
1913 if (queued_send_wr) {
1914 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
1915 &bad_send_wr);
1916 if (ret) {
1917 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
1918 mad_send_wr = queued_send_wr;
1919 wc->status = IB_WC_LOC_QP_OP_ERR;
1920 goto retry;
1921 }
1922 }
1923}
1924
1925static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
1926{
1927 struct ib_mad_send_wr_private *mad_send_wr;
1928 struct ib_mad_list_head *mad_list;
1929 unsigned long flags;
1930
1931 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1932 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
1933 mad_send_wr = container_of(mad_list,
1934 struct ib_mad_send_wr_private,
1935 mad_list);
1936 mad_send_wr->retry = 1;
1937 }
1938 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1939}
1940
1941static void mad_error_handler(struct ib_mad_port_private *port_priv,
1942 struct ib_wc *wc)
1943{
1944 struct ib_mad_list_head *mad_list;
1945 struct ib_mad_qp_info *qp_info;
1946 struct ib_mad_send_wr_private *mad_send_wr;
1947 int ret;
1948
1949 /* Determine if failure was a send or receive */
1950 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1951 qp_info = mad_list->mad_queue->qp_info;
1952 if (mad_list->mad_queue == &qp_info->recv_queue)
1953 /*
1954 * Receive errors indicate that the QP has entered the error
1955 * state - error handling/shutdown code will cleanup
1956 */
1957 return;
1958
1959 /*
1960 * Send errors will transition the QP to SQE - move
1961 * QP to RTS and repost flushed work requests
1962 */
1963 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1964 mad_list);
1965 if (wc->status == IB_WC_WR_FLUSH_ERR) {
1966 if (mad_send_wr->retry) {
1967 /* Repost send */
1968 struct ib_send_wr *bad_send_wr;
1969
1970 mad_send_wr->retry = 0;
1971 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
1972 &bad_send_wr);
1973 if (ret)
1974 ib_mad_send_done_handler(port_priv, wc);
1975 } else
1976 ib_mad_send_done_handler(port_priv, wc);
1977 } else {
1978 struct ib_qp_attr *attr;
1979
1980 /* Transition QP to RTS and fail offending send */
1981 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1982 if (attr) {
1983 attr->qp_state = IB_QPS_RTS;
1984 attr->cur_qp_state = IB_QPS_SQE;
1985 ret = ib_modify_qp(qp_info->qp, attr,
1986 IB_QP_STATE | IB_QP_CUR_STATE);
1987 kfree(attr);
1988 if (ret)
1989 printk(KERN_ERR PFX "mad_error_handler - "
1990 "ib_modify_qp to RTS : %d\n", ret);
1991 else
1992 mark_sends_for_retry(qp_info);
1993 }
1994 ib_mad_send_done_handler(port_priv, wc);
1995 }
1996}
1997
1998/*
1999 * IB MAD completion callback
2000 */
2001static void ib_mad_completion_handler(void *data)
2002{
2003 struct ib_mad_port_private *port_priv;
2004 struct ib_wc wc;
2005
2006 port_priv = (struct ib_mad_port_private *)data;
2007 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2008
2009 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2010 if (wc.status == IB_WC_SUCCESS) {
2011 switch (wc.opcode) {
2012 case IB_WC_SEND:
2013 ib_mad_send_done_handler(port_priv, &wc);
2014 break;
2015 case IB_WC_RECV:
2016 ib_mad_recv_done_handler(port_priv, &wc);
2017 break;
2018 default:
2019 BUG_ON(1);
2020 break;
2021 }
2022 } else
2023 mad_error_handler(port_priv, &wc);
2024 }
2025}
2026
2027static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2028{
2029 unsigned long flags;
2030 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2031 struct ib_mad_send_wc mad_send_wc;
2032 struct list_head cancel_list;
2033
2034 INIT_LIST_HEAD(&cancel_list);
2035
2036 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2037 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2038 &mad_agent_priv->send_list, agent_list) {
2039 if (mad_send_wr->status == IB_WC_SUCCESS) {
2040 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2041 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2042 }
2043 }
2044
2045 /* Empty wait list to prevent receives from finding a request */
2046 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2047 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2048
2049 /* Report all cancelled requests */
2050 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2051 mad_send_wc.vendor_err = 0;
2052
2053 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2054 &cancel_list, agent_list) {
2055 mad_send_wc.wr_id = mad_send_wr->wr_id;
2056 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2057 &mad_send_wc);
2058
2059 list_del(&mad_send_wr->agent_list);
2060 kfree(mad_send_wr);
2061 atomic_dec(&mad_agent_priv->refcount);
2062 }
2063}
2064
2065static struct ib_mad_send_wr_private*
2066find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv,
2067 u64 wr_id)
2068{
2069 struct ib_mad_send_wr_private *mad_send_wr;
2070
2071 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2072 agent_list) {
2073 if (mad_send_wr->wr_id == wr_id)
2074 return mad_send_wr;
2075 }
2076
2077 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2078 agent_list) {
2079 if (mad_send_wr->wr_id == wr_id)
2080 return mad_send_wr;
2081 }
2082 return NULL;
2083}
2084
2085void cancel_sends(void *data)
2086{
2087 struct ib_mad_agent_private *mad_agent_priv;
2088 struct ib_mad_send_wr_private *mad_send_wr;
2089 struct ib_mad_send_wc mad_send_wc;
2090 unsigned long flags;
2091
2092 mad_agent_priv = data;
2093
2094 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2095 mad_send_wc.vendor_err = 0;
2096
2097 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2098 while (!list_empty(&mad_agent_priv->canceled_list)) {
2099 mad_send_wr = list_entry(mad_agent_priv->canceled_list.next,
2100 struct ib_mad_send_wr_private,
2101 agent_list);
2102
2103 list_del(&mad_send_wr->agent_list);
2104 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2105
2106 mad_send_wc.wr_id = mad_send_wr->wr_id;
2107 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2108 &mad_send_wc);
2109
2110 kfree(mad_send_wr);
2111 if (atomic_dec_and_test(&mad_agent_priv->refcount))
2112 wake_up(&mad_agent_priv->wait);
2113 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2114 }
2115 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2116}
2117
2118void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2119 u64 wr_id)
2120{
2121 struct ib_mad_agent_private *mad_agent_priv;
2122 struct ib_mad_send_wr_private *mad_send_wr;
2123 unsigned long flags;
2124
2125 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2126 agent);
2127 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2128 mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id);
2129 if (!mad_send_wr) {
2130 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2131 goto out;
2132 }
2133
2134 if (mad_send_wr->status == IB_WC_SUCCESS)
2135 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2136
2137 if (mad_send_wr->refcount != 0) {
2138 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2139 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2140 goto out;
2141 }
2142
2143 list_del(&mad_send_wr->agent_list);
2144 list_add_tail(&mad_send_wr->agent_list, &mad_agent_priv->canceled_list);
2145 adjust_timeout(mad_agent_priv);
2146 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2147
2148 queue_work(mad_agent_priv->qp_info->port_priv->wq,
2149 &mad_agent_priv->canceled_work);
2150out:
2151 return;
2152}
2153EXPORT_SYMBOL(ib_cancel_mad);
2154
2155static void local_completions(void *data)
2156{
2157 struct ib_mad_agent_private *mad_agent_priv;
2158 struct ib_mad_local_private *local;
2159 struct ib_mad_agent_private *recv_mad_agent;
2160 unsigned long flags;
2161 struct ib_wc wc;
2162 struct ib_mad_send_wc mad_send_wc;
2163
2164 mad_agent_priv = (struct ib_mad_agent_private *)data;
2165
2166 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2167 while (!list_empty(&mad_agent_priv->local_list)) {
2168 local = list_entry(mad_agent_priv->local_list.next,
2169 struct ib_mad_local_private,
2170 completion_list);
2171 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2172 if (local->mad_priv) {
2173 recv_mad_agent = local->recv_mad_agent;
2174 if (!recv_mad_agent) {
2175 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2176 kmem_cache_free(ib_mad_cache, local->mad_priv);
2177 goto local_send_completion;
2178 }
2179
2180 /*
2181 * Defined behavior is to complete response
2182 * before request
2183 */
2184 build_smp_wc(local->wr_id, IB_LID_PERMISSIVE,
2185 0 /* pkey index */,
2186 recv_mad_agent->agent.port_num, &wc);
2187
2188 local->mad_priv->header.recv_wc.wc = &wc;
2189 local->mad_priv->header.recv_wc.mad_len =
2190 sizeof(struct ib_mad);
2191 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.recv_buf.list);
2192 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2193 local->mad_priv->header.recv_wc.recv_buf.mad =
2194 &local->mad_priv->mad.mad;
2195 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2196 snoop_recv(recv_mad_agent->qp_info,
2197 &local->mad_priv->header.recv_wc,
2198 IB_MAD_SNOOP_RECVS);
2199 recv_mad_agent->agent.recv_handler(
2200 &recv_mad_agent->agent,
2201 &local->mad_priv->header.recv_wc);
2202 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2203 atomic_dec(&recv_mad_agent->refcount);
2204 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2205 }
2206
2207local_send_completion:
2208 /* Complete send */
2209 mad_send_wc.status = IB_WC_SUCCESS;
2210 mad_send_wc.vendor_err = 0;
2211 mad_send_wc.wr_id = local->wr_id;
2212 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2213 snoop_send(mad_agent_priv->qp_info, &local->send_wr,
2214 &mad_send_wc,
2215 IB_MAD_SNOOP_SEND_COMPLETIONS);
2216 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2217 &mad_send_wc);
2218
2219 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2220 list_del(&local->completion_list);
2221 atomic_dec(&mad_agent_priv->refcount);
2222 kfree(local);
2223 }
2224 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2225}
2226
2227static void timeout_sends(void *data)
2228{
2229 struct ib_mad_agent_private *mad_agent_priv;
2230 struct ib_mad_send_wr_private *mad_send_wr;
2231 struct ib_mad_send_wc mad_send_wc;
2232 unsigned long flags, delay;
2233
2234 mad_agent_priv = (struct ib_mad_agent_private *)data;
2235
2236 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2237 mad_send_wc.vendor_err = 0;
2238
2239 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2240 while (!list_empty(&mad_agent_priv->wait_list)) {
2241 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2242 struct ib_mad_send_wr_private,
2243 agent_list);
2244
2245 if (time_after(mad_send_wr->timeout, jiffies)) {
2246 delay = mad_send_wr->timeout - jiffies;
2247 if ((long)delay <= 0)
2248 delay = 1;
2249 queue_delayed_work(mad_agent_priv->qp_info->
2250 port_priv->wq,
2251 &mad_agent_priv->timed_work, delay);
2252 break;
2253 }
2254
2255 list_del(&mad_send_wr->agent_list);
2256 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2257
2258 mad_send_wc.wr_id = mad_send_wr->wr_id;
2259 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2260 &mad_send_wc);
2261
2262 kfree(mad_send_wr);
2263 atomic_dec(&mad_agent_priv->refcount);
2264 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2265 }
2266 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2267}
2268
2269static void ib_mad_thread_completion_handler(struct ib_cq *cq)
2270{
2271 struct ib_mad_port_private *port_priv = cq->cq_context;
2272
2273 queue_work(port_priv->wq, &port_priv->work);
2274}
2275
2276/*
2277 * Allocate receive MADs and post receive WRs for them
2278 */
2279static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2280 struct ib_mad_private *mad)
2281{
2282 unsigned long flags;
2283 int post, ret;
2284 struct ib_mad_private *mad_priv;
2285 struct ib_sge sg_list;
2286 struct ib_recv_wr recv_wr, *bad_recv_wr;
2287 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2288
2289 /* Initialize common scatter list fields */
2290 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2291 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2292
2293 /* Initialize common receive WR fields */
2294 recv_wr.next = NULL;
2295 recv_wr.sg_list = &sg_list;
2296 recv_wr.num_sge = 1;
2297
2298 do {
2299 /* Allocate and map receive buffer */
2300 if (mad) {
2301 mad_priv = mad;
2302 mad = NULL;
2303 } else {
2304 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2305 if (!mad_priv) {
2306 printk(KERN_ERR PFX "No memory for receive buffer\n");
2307 ret = -ENOMEM;
2308 break;
2309 }
2310 }
2311 sg_list.addr = dma_map_single(qp_info->port_priv->
2312 device->dma_device,
2313 &mad_priv->grh,
2314 sizeof *mad_priv -
2315 sizeof mad_priv->header,
2316 DMA_FROM_DEVICE);
2317 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2318 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2319 mad_priv->header.mad_list.mad_queue = recv_queue;
2320
2321 /* Post receive WR */
2322 spin_lock_irqsave(&recv_queue->lock, flags);
2323 post = (++recv_queue->count < recv_queue->max_active);
2324 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2325 spin_unlock_irqrestore(&recv_queue->lock, flags);
2326 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2327 if (ret) {
2328 spin_lock_irqsave(&recv_queue->lock, flags);
2329 list_del(&mad_priv->header.mad_list.list);
2330 recv_queue->count--;
2331 spin_unlock_irqrestore(&recv_queue->lock, flags);
2332 dma_unmap_single(qp_info->port_priv->device->dma_device,
2333 pci_unmap_addr(&mad_priv->header,
2334 mapping),
2335 sizeof *mad_priv -
2336 sizeof mad_priv->header,
2337 DMA_FROM_DEVICE);
2338 kmem_cache_free(ib_mad_cache, mad_priv);
2339 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2340 break;
2341 }
2342 } while (post);
2343
2344 return ret;
2345}
2346
2347/*
2348 * Return all the posted receive MADs
2349 */
2350static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2351{
2352 struct ib_mad_private_header *mad_priv_hdr;
2353 struct ib_mad_private *recv;
2354 struct ib_mad_list_head *mad_list;
2355
2356 while (!list_empty(&qp_info->recv_queue.list)) {
2357
2358 mad_list = list_entry(qp_info->recv_queue.list.next,
2359 struct ib_mad_list_head, list);
2360 mad_priv_hdr = container_of(mad_list,
2361 struct ib_mad_private_header,
2362 mad_list);
2363 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2364 header);
2365
2366 /* Remove from posted receive MAD list */
2367 list_del(&mad_list->list);
2368
1da177e4
LT
2369 dma_unmap_single(qp_info->port_priv->device->dma_device,
2370 pci_unmap_addr(&recv->header, mapping),
2371 sizeof(struct ib_mad_private) -
2372 sizeof(struct ib_mad_private_header),
2373 DMA_FROM_DEVICE);
2374 kmem_cache_free(ib_mad_cache, recv);
2375 }
2376
2377 qp_info->recv_queue.count = 0;
2378}
2379
2380/*
2381 * Start the port
2382 */
2383static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2384{
2385 int ret, i;
2386 struct ib_qp_attr *attr;
2387 struct ib_qp *qp;
2388
2389 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2390 if (!attr) {
2391 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2392 return -ENOMEM;
2393 }
2394
2395 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2396 qp = port_priv->qp_info[i].qp;
2397 /*
2398 * PKey index for QP1 is irrelevant but
2399 * one is needed for the Reset to Init transition
2400 */
2401 attr->qp_state = IB_QPS_INIT;
2402 attr->pkey_index = 0;
2403 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2404 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2405 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2406 if (ret) {
2407 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2408 "INIT: %d\n", i, ret);
2409 goto out;
2410 }
2411
2412 attr->qp_state = IB_QPS_RTR;
2413 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2414 if (ret) {
2415 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2416 "RTR: %d\n", i, ret);
2417 goto out;
2418 }
2419
2420 attr->qp_state = IB_QPS_RTS;
2421 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2422 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2423 if (ret) {
2424 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2425 "RTS: %d\n", i, ret);
2426 goto out;
2427 }
2428 }
2429
2430 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2431 if (ret) {
2432 printk(KERN_ERR PFX "Failed to request completion "
2433 "notification: %d\n", ret);
2434 goto out;
2435 }
2436
2437 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2438 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2439 if (ret) {
2440 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2441 goto out;
2442 }
2443 }
2444out:
2445 kfree(attr);
2446 return ret;
2447}
2448
2449static void qp_event_handler(struct ib_event *event, void *qp_context)
2450{
2451 struct ib_mad_qp_info *qp_info = qp_context;
2452
2453 /* It's worse than that! He's dead, Jim! */
2454 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2455 event->event, qp_info->qp->qp_num);
2456}
2457
2458static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2459 struct ib_mad_queue *mad_queue)
2460{
2461 mad_queue->qp_info = qp_info;
2462 mad_queue->count = 0;
2463 spin_lock_init(&mad_queue->lock);
2464 INIT_LIST_HEAD(&mad_queue->list);
2465}
2466
2467static void init_mad_qp(struct ib_mad_port_private *port_priv,
2468 struct ib_mad_qp_info *qp_info)
2469{
2470 qp_info->port_priv = port_priv;
2471 init_mad_queue(qp_info, &qp_info->send_queue);
2472 init_mad_queue(qp_info, &qp_info->recv_queue);
2473 INIT_LIST_HEAD(&qp_info->overflow_list);
2474 spin_lock_init(&qp_info->snoop_lock);
2475 qp_info->snoop_table = NULL;
2476 qp_info->snoop_table_size = 0;
2477 atomic_set(&qp_info->snoop_count, 0);
2478}
2479
2480static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2481 enum ib_qp_type qp_type)
2482{
2483 struct ib_qp_init_attr qp_init_attr;
2484 int ret;
2485
2486 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2487 qp_init_attr.send_cq = qp_info->port_priv->cq;
2488 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2489 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2490 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2491 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2492 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2493 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2494 qp_init_attr.qp_type = qp_type;
2495 qp_init_attr.port_num = qp_info->port_priv->port_num;
2496 qp_init_attr.qp_context = qp_info;
2497 qp_init_attr.event_handler = qp_event_handler;
2498 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2499 if (IS_ERR(qp_info->qp)) {
2500 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2501 get_spl_qp_index(qp_type));
2502 ret = PTR_ERR(qp_info->qp);
2503 goto error;
2504 }
2505 /* Use minimum queue sizes unless the CQ is resized */
2506 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2507 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2508 return 0;
2509
2510error:
2511 return ret;
2512}
2513
2514static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2515{
2516 ib_destroy_qp(qp_info->qp);
2517 if (qp_info->snoop_table)
2518 kfree(qp_info->snoop_table);
2519}
2520
2521/*
2522 * Open the port
2523 * Create the QP, PD, MR, and CQ if needed
2524 */
2525static int ib_mad_port_open(struct ib_device *device,
2526 int port_num)
2527{
2528 int ret, cq_size;
2529 struct ib_mad_port_private *port_priv;
2530 unsigned long flags;
2531 char name[sizeof "ib_mad123"];
2532
2533 /* First, check if port already open at MAD layer */
2534 port_priv = ib_get_mad_port(device, port_num);
2535 if (port_priv) {
2536 printk(KERN_DEBUG PFX "%s port %d already open\n",
2537 device->name, port_num);
2538 return 0;
2539 }
2540
2541 /* Create new device info */
2542 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
2543 if (!port_priv) {
2544 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2545 return -ENOMEM;
2546 }
2547 memset(port_priv, 0, sizeof *port_priv);
2548 port_priv->device = device;
2549 port_priv->port_num = port_num;
2550 spin_lock_init(&port_priv->reg_lock);
2551 INIT_LIST_HEAD(&port_priv->agent_list);
2552 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2553 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2554
2555 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2556 port_priv->cq = ib_create_cq(port_priv->device,
2557 (ib_comp_handler)
2558 ib_mad_thread_completion_handler,
2559 NULL, port_priv, cq_size);
2560 if (IS_ERR(port_priv->cq)) {
2561 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2562 ret = PTR_ERR(port_priv->cq);
2563 goto error3;
2564 }
2565
2566 port_priv->pd = ib_alloc_pd(device);
2567 if (IS_ERR(port_priv->pd)) {
2568 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2569 ret = PTR_ERR(port_priv->pd);
2570 goto error4;
2571 }
2572
2573 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2574 if (IS_ERR(port_priv->mr)) {
2575 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2576 ret = PTR_ERR(port_priv->mr);
2577 goto error5;
2578 }
2579
2580 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2581 if (ret)
2582 goto error6;
2583 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2584 if (ret)
2585 goto error7;
2586
2587 snprintf(name, sizeof name, "ib_mad%d", port_num);
2588 port_priv->wq = create_singlethread_workqueue(name);
2589 if (!port_priv->wq) {
2590 ret = -ENOMEM;
2591 goto error8;
2592 }
2593 INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
2594
2595 ret = ib_mad_port_start(port_priv);
2596 if (ret) {
2597 printk(KERN_ERR PFX "Couldn't start port\n");
2598 goto error9;
2599 }
2600
2601 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2602 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2603 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2604 return 0;
2605
2606error9:
2607 destroy_workqueue(port_priv->wq);
2608error8:
2609 destroy_mad_qp(&port_priv->qp_info[1]);
2610error7:
2611 destroy_mad_qp(&port_priv->qp_info[0]);
2612error6:
2613 ib_dereg_mr(port_priv->mr);
2614error5:
2615 ib_dealloc_pd(port_priv->pd);
2616error4:
2617 ib_destroy_cq(port_priv->cq);
2618 cleanup_recv_queue(&port_priv->qp_info[1]);
2619 cleanup_recv_queue(&port_priv->qp_info[0]);
2620error3:
2621 kfree(port_priv);
2622
2623 return ret;
2624}
2625
2626/*
2627 * Close the port
2628 * If there are no classes using the port, free the port
2629 * resources (CQ, MR, PD, QP) and remove the port's info structure
2630 */
2631static int ib_mad_port_close(struct ib_device *device, int port_num)
2632{
2633 struct ib_mad_port_private *port_priv;
2634 unsigned long flags;
2635
2636 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2637 port_priv = __ib_get_mad_port(device, port_num);
2638 if (port_priv == NULL) {
2639 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2640 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2641 return -ENODEV;
2642 }
2643 list_del(&port_priv->port_list);
2644 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2645
2646 /* Stop processing completions. */
2647 flush_workqueue(port_priv->wq);
2648 destroy_workqueue(port_priv->wq);
2649 destroy_mad_qp(&port_priv->qp_info[1]);
2650 destroy_mad_qp(&port_priv->qp_info[0]);
2651 ib_dereg_mr(port_priv->mr);
2652 ib_dealloc_pd(port_priv->pd);
2653 ib_destroy_cq(port_priv->cq);
2654 cleanup_recv_queue(&port_priv->qp_info[1]);
2655 cleanup_recv_queue(&port_priv->qp_info[0]);
2656 /* XXX: Handle deallocation of MAD registration tables */
2657
2658 kfree(port_priv);
2659
2660 return 0;
2661}
2662
2663static void ib_mad_init_device(struct ib_device *device)
2664{
2665 int ret, num_ports, cur_port, i, ret2;
2666
2667 if (device->node_type == IB_NODE_SWITCH) {
2668 num_ports = 1;
2669 cur_port = 0;
2670 } else {
2671 num_ports = device->phys_port_cnt;
2672 cur_port = 1;
2673 }
2674 for (i = 0; i < num_ports; i++, cur_port++) {
2675 ret = ib_mad_port_open(device, cur_port);
2676 if (ret) {
2677 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2678 device->name, cur_port);
2679 goto error_device_open;
2680 }
2681 ret = ib_agent_port_open(device, cur_port);
2682 if (ret) {
2683 printk(KERN_ERR PFX "Couldn't open %s port %d "
2684 "for agents\n",
2685 device->name, cur_port);
2686 goto error_device_open;
2687 }
2688 }
2689
2690 goto error_device_query;
2691
2692error_device_open:
2693 while (i > 0) {
2694 cur_port--;
2695 ret2 = ib_agent_port_close(device, cur_port);
2696 if (ret2) {
2697 printk(KERN_ERR PFX "Couldn't close %s port %d "
2698 "for agents\n",
2699 device->name, cur_port);
2700 }
2701 ret2 = ib_mad_port_close(device, cur_port);
2702 if (ret2) {
2703 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2704 device->name, cur_port);
2705 }
2706 i--;
2707 }
2708
2709error_device_query:
2710 return;
2711}
2712
2713static void ib_mad_remove_device(struct ib_device *device)
2714{
2715 int ret = 0, i, num_ports, cur_port, ret2;
2716
2717 if (device->node_type == IB_NODE_SWITCH) {
2718 num_ports = 1;
2719 cur_port = 0;
2720 } else {
2721 num_ports = device->phys_port_cnt;
2722 cur_port = 1;
2723 }
2724 for (i = 0; i < num_ports; i++, cur_port++) {
2725 ret2 = ib_agent_port_close(device, cur_port);
2726 if (ret2) {
2727 printk(KERN_ERR PFX "Couldn't close %s port %d "
2728 "for agents\n",
2729 device->name, cur_port);
2730 if (!ret)
2731 ret = ret2;
2732 }
2733 ret2 = ib_mad_port_close(device, cur_port);
2734 if (ret2) {
2735 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2736 device->name, cur_port);
2737 if (!ret)
2738 ret = ret2;
2739 }
2740 }
2741}
2742
2743static struct ib_client mad_client = {
2744 .name = "mad",
2745 .add = ib_mad_init_device,
2746 .remove = ib_mad_remove_device
2747};
2748
2749static int __init ib_mad_init_module(void)
2750{
2751 int ret;
2752
2753 spin_lock_init(&ib_mad_port_list_lock);
2754 spin_lock_init(&ib_agent_port_list_lock);
2755
2756 ib_mad_cache = kmem_cache_create("ib_mad",
2757 sizeof(struct ib_mad_private),
2758 0,
2759 SLAB_HWCACHE_ALIGN,
2760 NULL,
2761 NULL);
2762 if (!ib_mad_cache) {
2763 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
2764 ret = -ENOMEM;
2765 goto error1;
2766 }
2767
2768 INIT_LIST_HEAD(&ib_mad_port_list);
2769
2770 if (ib_register_client(&mad_client)) {
2771 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
2772 ret = -EINVAL;
2773 goto error2;
2774 }
2775
2776 return 0;
2777
2778error2:
2779 kmem_cache_destroy(ib_mad_cache);
2780error1:
2781 return ret;
2782}
2783
2784static void __exit ib_mad_cleanup_module(void)
2785{
2786 ib_unregister_client(&mad_client);
2787
2788 if (kmem_cache_destroy(ib_mad_cache)) {
2789 printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n");
2790 }
2791}
2792
2793module_init(ib_mad_init_module);
2794module_exit(ib_mad_cleanup_module);