[PATCH] IB: Change ib_mad_send_wr_private struct
[linux-block.git] / drivers / infiniband / core / mad.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: mad.c 1389 2004-12-27 22:56:47Z roland $
33 */
34
35#include <linux/dma-mapping.h>
1da177e4
LT
36
37#include "mad_priv.h"
38#include "smi.h"
39#include "agent.h"
40
41MODULE_LICENSE("Dual BSD/GPL");
42MODULE_DESCRIPTION("kernel IB MAD API");
43MODULE_AUTHOR("Hal Rosenstock");
44MODULE_AUTHOR("Sean Hefty");
45
46
47kmem_cache_t *ib_mad_cache;
48static struct list_head ib_mad_port_list;
49static u32 ib_mad_client_id = 0;
50
51/* Port list lock */
52static spinlock_t ib_mad_port_list_lock;
53
54
55/* Forward declarations */
56static int method_in_use(struct ib_mad_mgmt_method_table **method,
57 struct ib_mad_reg_req *mad_reg_req);
58static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
59static struct ib_mad_agent_private *find_mad_agent(
60 struct ib_mad_port_private *port_priv,
4a0754fa 61 struct ib_mad *mad);
1da177e4
LT
62static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
63 struct ib_mad_private *mad);
64static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
65static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
66 struct ib_mad_send_wc *mad_send_wc);
67static void timeout_sends(void *data);
68static void cancel_sends(void *data);
69static void local_completions(void *data);
1da177e4
LT
70static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
71 struct ib_mad_agent_private *agent_priv,
72 u8 mgmt_class);
73static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
74 struct ib_mad_agent_private *agent_priv);
75
76/*
77 * Returns a ib_mad_port_private structure or NULL for a device/port
78 * Assumes ib_mad_port_list_lock is being held
79 */
80static inline struct ib_mad_port_private *
81__ib_get_mad_port(struct ib_device *device, int port_num)
82{
83 struct ib_mad_port_private *entry;
84
85 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
86 if (entry->device == device && entry->port_num == port_num)
87 return entry;
88 }
89 return NULL;
90}
91
92/*
93 * Wrapper function to return a ib_mad_port_private structure or NULL
94 * for a device/port
95 */
96static inline struct ib_mad_port_private *
97ib_get_mad_port(struct ib_device *device, int port_num)
98{
99 struct ib_mad_port_private *entry;
100 unsigned long flags;
101
102 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
103 entry = __ib_get_mad_port(device, port_num);
104 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
105
106 return entry;
107}
108
109static inline u8 convert_mgmt_class(u8 mgmt_class)
110{
111 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
112 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
113 0 : mgmt_class;
114}
115
116static int get_spl_qp_index(enum ib_qp_type qp_type)
117{
118 switch (qp_type)
119 {
120 case IB_QPT_SMI:
121 return 0;
122 case IB_QPT_GSI:
123 return 1;
124 default:
125 return -1;
126 }
127}
128
129static int vendor_class_index(u8 mgmt_class)
130{
131 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
132}
133
134static int is_vendor_class(u8 mgmt_class)
135{
136 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
137 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
138 return 0;
139 return 1;
140}
141
142static int is_vendor_oui(char *oui)
143{
144 if (oui[0] || oui[1] || oui[2])
145 return 1;
146 return 0;
147}
148
149static int is_vendor_method_in_use(
150 struct ib_mad_mgmt_vendor_class *vendor_class,
151 struct ib_mad_reg_req *mad_reg_req)
152{
153 struct ib_mad_mgmt_method_table *method;
154 int i;
155
156 for (i = 0; i < MAX_MGMT_OUI; i++) {
157 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
158 method = vendor_class->method_table[i];
159 if (method) {
160 if (method_in_use(&method, mad_reg_req))
161 return 1;
162 else
163 break;
164 }
165 }
166 }
167 return 0;
168}
169
170/*
171 * ib_register_mad_agent - Register to send/receive MADs
172 */
173struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
174 u8 port_num,
175 enum ib_qp_type qp_type,
176 struct ib_mad_reg_req *mad_reg_req,
177 u8 rmpp_version,
178 ib_mad_send_handler send_handler,
179 ib_mad_recv_handler recv_handler,
180 void *context)
181{
182 struct ib_mad_port_private *port_priv;
183 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
184 struct ib_mad_agent_private *mad_agent_priv;
185 struct ib_mad_reg_req *reg_req = NULL;
186 struct ib_mad_mgmt_class_table *class;
187 struct ib_mad_mgmt_vendor_class_table *vendor;
188 struct ib_mad_mgmt_vendor_class *vendor_class;
189 struct ib_mad_mgmt_method_table *method;
190 int ret2, qpn;
191 unsigned long flags;
192 u8 mgmt_class, vclass;
193
194 /* Validate parameters */
195 qpn = get_spl_qp_index(qp_type);
196 if (qpn == -1)
197 goto error1;
198
199 if (rmpp_version)
200 goto error1; /* XXX: until RMPP implemented */
201
202 /* Validate MAD registration request if supplied */
203 if (mad_reg_req) {
204 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
205 goto error1;
206 if (!recv_handler)
207 goto error1;
208 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
209 /*
210 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
211 * one in this range currently allowed
212 */
213 if (mad_reg_req->mgmt_class !=
214 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
215 goto error1;
216 } else if (mad_reg_req->mgmt_class == 0) {
217 /*
218 * Class 0 is reserved in IBA and is used for
219 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
220 */
221 goto error1;
222 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
223 /*
224 * If class is in "new" vendor range,
225 * ensure supplied OUI is not zero
226 */
227 if (!is_vendor_oui(mad_reg_req->oui))
228 goto error1;
229 }
230 /* Make sure class supplied is consistent with QP type */
231 if (qp_type == IB_QPT_SMI) {
232 if ((mad_reg_req->mgmt_class !=
233 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
234 (mad_reg_req->mgmt_class !=
235 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
236 goto error1;
237 } else {
238 if ((mad_reg_req->mgmt_class ==
239 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
240 (mad_reg_req->mgmt_class ==
241 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
242 goto error1;
243 }
244 } else {
245 /* No registration request supplied */
246 if (!send_handler)
247 goto error1;
248 }
249
250 /* Validate device and port */
251 port_priv = ib_get_mad_port(device, port_num);
252 if (!port_priv) {
253 ret = ERR_PTR(-ENODEV);
254 goto error1;
255 }
256
257 /* Allocate structures */
258 mad_agent_priv = kmalloc(sizeof *mad_agent_priv, GFP_KERNEL);
259 if (!mad_agent_priv) {
260 ret = ERR_PTR(-ENOMEM);
261 goto error1;
262 }
b82cab6b
HR
263 memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
264
265 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
266 IB_ACCESS_LOCAL_WRITE);
267 if (IS_ERR(mad_agent_priv->agent.mr)) {
268 ret = ERR_PTR(-ENOMEM);
269 goto error2;
270 }
1da177e4
LT
271
272 if (mad_reg_req) {
273 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
274 if (!reg_req) {
275 ret = ERR_PTR(-ENOMEM);
b82cab6b 276 goto error3;
1da177e4
LT
277 }
278 /* Make a copy of the MAD registration request */
279 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
280 }
281
282 /* Now, fill in the various structures */
1da177e4
LT
283 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
284 mad_agent_priv->reg_req = reg_req;
285 mad_agent_priv->rmpp_version = rmpp_version;
286 mad_agent_priv->agent.device = device;
287 mad_agent_priv->agent.recv_handler = recv_handler;
288 mad_agent_priv->agent.send_handler = send_handler;
289 mad_agent_priv->agent.context = context;
290 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
291 mad_agent_priv->agent.port_num = port_num;
292
293 spin_lock_irqsave(&port_priv->reg_lock, flags);
294 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
295
296 /*
297 * Make sure MAD registration (if supplied)
298 * is non overlapping with any existing ones
299 */
300 if (mad_reg_req) {
301 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
302 if (!is_vendor_class(mgmt_class)) {
303 class = port_priv->version[mad_reg_req->
304 mgmt_class_version].class;
305 if (class) {
306 method = class->method_table[mgmt_class];
307 if (method) {
308 if (method_in_use(&method,
309 mad_reg_req))
b82cab6b 310 goto error4;
1da177e4
LT
311 }
312 }
313 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
314 mgmt_class);
315 } else {
316 /* "New" vendor class range */
317 vendor = port_priv->version[mad_reg_req->
318 mgmt_class_version].vendor;
319 if (vendor) {
320 vclass = vendor_class_index(mgmt_class);
321 vendor_class = vendor->vendor_class[vclass];
322 if (vendor_class) {
323 if (is_vendor_method_in_use(
324 vendor_class,
325 mad_reg_req))
b82cab6b 326 goto error4;
1da177e4
LT
327 }
328 }
329 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
330 }
331 if (ret2) {
332 ret = ERR_PTR(ret2);
b82cab6b 333 goto error4;
1da177e4
LT
334 }
335 }
336
337 /* Add mad agent into port's agent list */
338 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
339 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
340
341 spin_lock_init(&mad_agent_priv->lock);
342 INIT_LIST_HEAD(&mad_agent_priv->send_list);
343 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
344 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
345 INIT_LIST_HEAD(&mad_agent_priv->local_list);
346 INIT_WORK(&mad_agent_priv->local_work, local_completions,
347 mad_agent_priv);
348 INIT_LIST_HEAD(&mad_agent_priv->canceled_list);
349 INIT_WORK(&mad_agent_priv->canceled_work, cancel_sends, mad_agent_priv);
350 atomic_set(&mad_agent_priv->refcount, 1);
351 init_waitqueue_head(&mad_agent_priv->wait);
352
353 return &mad_agent_priv->agent;
354
b82cab6b 355error4:
1da177e4
LT
356 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
357 kfree(reg_req);
b82cab6b 358error3:
1da177e4 359 kfree(mad_agent_priv);
b82cab6b
HR
360error2:
361 ib_dereg_mr(mad_agent_priv->agent.mr);
1da177e4
LT
362error1:
363 return ret;
364}
365EXPORT_SYMBOL(ib_register_mad_agent);
366
367static inline int is_snooping_sends(int mad_snoop_flags)
368{
369 return (mad_snoop_flags &
370 (/*IB_MAD_SNOOP_POSTED_SENDS |
371 IB_MAD_SNOOP_RMPP_SENDS |*/
372 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
373 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
374}
375
376static inline int is_snooping_recvs(int mad_snoop_flags)
377{
378 return (mad_snoop_flags &
379 (IB_MAD_SNOOP_RECVS /*|
380 IB_MAD_SNOOP_RMPP_RECVS*/));
381}
382
383static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
384 struct ib_mad_snoop_private *mad_snoop_priv)
385{
386 struct ib_mad_snoop_private **new_snoop_table;
387 unsigned long flags;
388 int i;
389
390 spin_lock_irqsave(&qp_info->snoop_lock, flags);
391 /* Check for empty slot in array. */
392 for (i = 0; i < qp_info->snoop_table_size; i++)
393 if (!qp_info->snoop_table[i])
394 break;
395
396 if (i == qp_info->snoop_table_size) {
397 /* Grow table. */
398 new_snoop_table = kmalloc(sizeof mad_snoop_priv *
399 qp_info->snoop_table_size + 1,
400 GFP_ATOMIC);
401 if (!new_snoop_table) {
402 i = -ENOMEM;
403 goto out;
404 }
405 if (qp_info->snoop_table) {
406 memcpy(new_snoop_table, qp_info->snoop_table,
407 sizeof mad_snoop_priv *
408 qp_info->snoop_table_size);
409 kfree(qp_info->snoop_table);
410 }
411 qp_info->snoop_table = new_snoop_table;
412 qp_info->snoop_table_size++;
413 }
414 qp_info->snoop_table[i] = mad_snoop_priv;
415 atomic_inc(&qp_info->snoop_count);
416out:
417 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
418 return i;
419}
420
421struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
422 u8 port_num,
423 enum ib_qp_type qp_type,
424 int mad_snoop_flags,
425 ib_mad_snoop_handler snoop_handler,
426 ib_mad_recv_handler recv_handler,
427 void *context)
428{
429 struct ib_mad_port_private *port_priv;
430 struct ib_mad_agent *ret;
431 struct ib_mad_snoop_private *mad_snoop_priv;
432 int qpn;
433
434 /* Validate parameters */
435 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
436 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
437 ret = ERR_PTR(-EINVAL);
438 goto error1;
439 }
440 qpn = get_spl_qp_index(qp_type);
441 if (qpn == -1) {
442 ret = ERR_PTR(-EINVAL);
443 goto error1;
444 }
445 port_priv = ib_get_mad_port(device, port_num);
446 if (!port_priv) {
447 ret = ERR_PTR(-ENODEV);
448 goto error1;
449 }
450 /* Allocate structures */
451 mad_snoop_priv = kmalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
452 if (!mad_snoop_priv) {
453 ret = ERR_PTR(-ENOMEM);
454 goto error1;
455 }
456
457 /* Now, fill in the various structures */
458 memset(mad_snoop_priv, 0, sizeof *mad_snoop_priv);
459 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
460 mad_snoop_priv->agent.device = device;
461 mad_snoop_priv->agent.recv_handler = recv_handler;
462 mad_snoop_priv->agent.snoop_handler = snoop_handler;
463 mad_snoop_priv->agent.context = context;
464 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
465 mad_snoop_priv->agent.port_num = port_num;
466 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
467 init_waitqueue_head(&mad_snoop_priv->wait);
468 mad_snoop_priv->snoop_index = register_snoop_agent(
469 &port_priv->qp_info[qpn],
470 mad_snoop_priv);
471 if (mad_snoop_priv->snoop_index < 0) {
472 ret = ERR_PTR(mad_snoop_priv->snoop_index);
473 goto error2;
474 }
475
476 atomic_set(&mad_snoop_priv->refcount, 1);
477 return &mad_snoop_priv->agent;
478
479error2:
480 kfree(mad_snoop_priv);
481error1:
482 return ret;
483}
484EXPORT_SYMBOL(ib_register_mad_snoop);
485
486static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
487{
488 struct ib_mad_port_private *port_priv;
489 unsigned long flags;
490
491 /* Note that we could still be handling received MADs */
492
493 /*
494 * Canceling all sends results in dropping received response
495 * MADs, preventing us from queuing additional work
496 */
497 cancel_mads(mad_agent_priv);
1da177e4 498 port_priv = mad_agent_priv->qp_info->port_priv;
1da177e4 499 cancel_delayed_work(&mad_agent_priv->timed_work);
1da177e4
LT
500
501 spin_lock_irqsave(&port_priv->reg_lock, flags);
502 remove_mad_reg_req(mad_agent_priv);
503 list_del(&mad_agent_priv->agent_list);
504 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
505
b82cab6b 506 flush_workqueue(port_priv->wq);
1da177e4
LT
507
508 atomic_dec(&mad_agent_priv->refcount);
509 wait_event(mad_agent_priv->wait,
510 !atomic_read(&mad_agent_priv->refcount));
511
512 if (mad_agent_priv->reg_req)
513 kfree(mad_agent_priv->reg_req);
b82cab6b 514 ib_dereg_mr(mad_agent_priv->agent.mr);
1da177e4
LT
515 kfree(mad_agent_priv);
516}
517
518static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
519{
520 struct ib_mad_qp_info *qp_info;
521 unsigned long flags;
522
523 qp_info = mad_snoop_priv->qp_info;
524 spin_lock_irqsave(&qp_info->snoop_lock, flags);
525 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
526 atomic_dec(&qp_info->snoop_count);
527 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
528
529 atomic_dec(&mad_snoop_priv->refcount);
530 wait_event(mad_snoop_priv->wait,
531 !atomic_read(&mad_snoop_priv->refcount));
532
533 kfree(mad_snoop_priv);
534}
535
536/*
537 * ib_unregister_mad_agent - Unregisters a client from using MAD services
538 */
539int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
540{
541 struct ib_mad_agent_private *mad_agent_priv;
542 struct ib_mad_snoop_private *mad_snoop_priv;
543
544 /* If the TID is zero, the agent can only snoop. */
545 if (mad_agent->hi_tid) {
546 mad_agent_priv = container_of(mad_agent,
547 struct ib_mad_agent_private,
548 agent);
549 unregister_mad_agent(mad_agent_priv);
550 } else {
551 mad_snoop_priv = container_of(mad_agent,
552 struct ib_mad_snoop_private,
553 agent);
554 unregister_mad_snoop(mad_snoop_priv);
555 }
556 return 0;
557}
558EXPORT_SYMBOL(ib_unregister_mad_agent);
559
4a0754fa
HR
560static inline int response_mad(struct ib_mad *mad)
561{
562 /* Trap represses are responses although response bit is reset */
563 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
564 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
565}
566
1da177e4
LT
567static void dequeue_mad(struct ib_mad_list_head *mad_list)
568{
569 struct ib_mad_queue *mad_queue;
570 unsigned long flags;
571
572 BUG_ON(!mad_list->mad_queue);
573 mad_queue = mad_list->mad_queue;
574 spin_lock_irqsave(&mad_queue->lock, flags);
575 list_del(&mad_list->list);
576 mad_queue->count--;
577 spin_unlock_irqrestore(&mad_queue->lock, flags);
578}
579
580static void snoop_send(struct ib_mad_qp_info *qp_info,
581 struct ib_send_wr *send_wr,
582 struct ib_mad_send_wc *mad_send_wc,
583 int mad_snoop_flags)
584{
585 struct ib_mad_snoop_private *mad_snoop_priv;
586 unsigned long flags;
587 int i;
588
589 spin_lock_irqsave(&qp_info->snoop_lock, flags);
590 for (i = 0; i < qp_info->snoop_table_size; i++) {
591 mad_snoop_priv = qp_info->snoop_table[i];
592 if (!mad_snoop_priv ||
593 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
594 continue;
595
596 atomic_inc(&mad_snoop_priv->refcount);
597 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
598 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
599 send_wr, mad_send_wc);
600 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
601 wake_up(&mad_snoop_priv->wait);
602 spin_lock_irqsave(&qp_info->snoop_lock, flags);
603 }
604 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
605}
606
607static void snoop_recv(struct ib_mad_qp_info *qp_info,
608 struct ib_mad_recv_wc *mad_recv_wc,
609 int mad_snoop_flags)
610{
611 struct ib_mad_snoop_private *mad_snoop_priv;
612 unsigned long flags;
613 int i;
614
615 spin_lock_irqsave(&qp_info->snoop_lock, flags);
616 for (i = 0; i < qp_info->snoop_table_size; i++) {
617 mad_snoop_priv = qp_info->snoop_table[i];
618 if (!mad_snoop_priv ||
619 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
620 continue;
621
622 atomic_inc(&mad_snoop_priv->refcount);
623 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
624 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
625 mad_recv_wc);
626 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
627 wake_up(&mad_snoop_priv->wait);
628 spin_lock_irqsave(&qp_info->snoop_lock, flags);
629 }
630 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
631}
632
633static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
634 struct ib_wc *wc)
635{
636 memset(wc, 0, sizeof *wc);
637 wc->wr_id = wr_id;
638 wc->status = IB_WC_SUCCESS;
639 wc->opcode = IB_WC_RECV;
640 wc->pkey_index = pkey_index;
641 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
642 wc->src_qp = IB_QP0;
643 wc->qp_num = IB_QP0;
644 wc->slid = slid;
645 wc->sl = 0;
646 wc->dlid_path_bits = 0;
647 wc->port_num = port_num;
648}
649
650/*
651 * Return 0 if SMP is to be sent
652 * Return 1 if SMP was consumed locally (whether or not solicited)
653 * Return < 0 if error
654 */
655static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
656 struct ib_smp *smp,
657 struct ib_send_wr *send_wr)
658{
4a0754fa 659 int ret;
1da177e4
LT
660 unsigned long flags;
661 struct ib_mad_local_private *local;
662 struct ib_mad_private *mad_priv;
663 struct ib_mad_port_private *port_priv;
664 struct ib_mad_agent_private *recv_mad_agent = NULL;
665 struct ib_device *device = mad_agent_priv->agent.device;
666 u8 port_num = mad_agent_priv->agent.port_num;
667 struct ib_wc mad_wc;
668
669 if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
670 ret = -EINVAL;
671 printk(KERN_ERR PFX "Invalid directed route\n");
672 goto out;
673 }
674 /* Check to post send on QP or process locally */
675 ret = smi_check_local_dr_smp(smp, device, port_num);
676 if (!ret || !device->process_mad)
677 goto out;
678
679 local = kmalloc(sizeof *local, GFP_ATOMIC);
680 if (!local) {
681 ret = -ENOMEM;
682 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
683 goto out;
684 }
685 local->mad_priv = NULL;
686 local->recv_mad_agent = NULL;
687 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
688 if (!mad_priv) {
689 ret = -ENOMEM;
690 printk(KERN_ERR PFX "No memory for local response MAD\n");
691 kfree(local);
692 goto out;
693 }
694
695 build_smp_wc(send_wr->wr_id, smp->dr_slid, send_wr->wr.ud.pkey_index,
696 send_wr->wr.ud.port_num, &mad_wc);
697
698 /* No GRH for DR SMP */
699 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
700 (struct ib_mad *)smp,
701 (struct ib_mad *)&mad_priv->mad);
702 switch (ret)
703 {
704 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
4a0754fa 705 if (response_mad(&mad_priv->mad.mad) &&
1da177e4
LT
706 mad_agent_priv->agent.recv_handler) {
707 local->mad_priv = mad_priv;
708 local->recv_mad_agent = mad_agent_priv;
709 /*
710 * Reference MAD agent until receive
711 * side of local completion handled
712 */
713 atomic_inc(&mad_agent_priv->refcount);
714 } else
715 kmem_cache_free(ib_mad_cache, mad_priv);
716 break;
717 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
718 kmem_cache_free(ib_mad_cache, mad_priv);
719 break;
720 case IB_MAD_RESULT_SUCCESS:
721 /* Treat like an incoming receive MAD */
1da177e4
LT
722 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
723 mad_agent_priv->agent.port_num);
724 if (port_priv) {
725 mad_priv->mad.mad.mad_hdr.tid =
726 ((struct ib_mad *)smp)->mad_hdr.tid;
727 recv_mad_agent = find_mad_agent(port_priv,
4a0754fa 728 &mad_priv->mad.mad);
1da177e4
LT
729 }
730 if (!port_priv || !recv_mad_agent) {
731 kmem_cache_free(ib_mad_cache, mad_priv);
732 kfree(local);
733 ret = 0;
734 goto out;
735 }
736 local->mad_priv = mad_priv;
737 local->recv_mad_agent = recv_mad_agent;
738 break;
739 default:
740 kmem_cache_free(ib_mad_cache, mad_priv);
741 kfree(local);
742 ret = -EINVAL;
743 goto out;
744 }
745
746 local->send_wr = *send_wr;
747 local->send_wr.sg_list = local->sg_list;
748 memcpy(local->sg_list, send_wr->sg_list,
749 sizeof *send_wr->sg_list * send_wr->num_sge);
750 local->send_wr.next = NULL;
751 local->tid = send_wr->wr.ud.mad_hdr->tid;
752 local->wr_id = send_wr->wr_id;
753 /* Reference MAD agent until send side of local completion handled */
754 atomic_inc(&mad_agent_priv->refcount);
755 /* Queue local completion to local list */
756 spin_lock_irqsave(&mad_agent_priv->lock, flags);
757 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
758 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
759 queue_work(mad_agent_priv->qp_info->port_priv->wq,
b82cab6b 760 &mad_agent_priv->local_work);
1da177e4
LT
761 ret = 1;
762out:
763 return ret;
764}
765
824c8ae7
HR
766static int get_buf_length(int hdr_len, int data_len)
767{
768 int seg_size, pad;
769
770 seg_size = sizeof(struct ib_mad) - hdr_len;
771 if (data_len && seg_size) {
772 pad = seg_size - data_len % seg_size;
773 if (pad == seg_size)
774 pad = 0;
775 } else
776 pad = seg_size;
777 return hdr_len + data_len + pad;
778}
779
780struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
781 u32 remote_qpn, u16 pkey_index,
782 struct ib_ah *ah,
783 int hdr_len, int data_len,
784 unsigned int __nocast gfp_mask)
785{
786 struct ib_mad_agent_private *mad_agent_priv;
787 struct ib_mad_send_buf *send_buf;
788 int buf_size;
789 void *buf;
790
791 mad_agent_priv = container_of(mad_agent,
792 struct ib_mad_agent_private, agent);
793 buf_size = get_buf_length(hdr_len, data_len);
794
795 buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask);
796 if (!buf)
797 return ERR_PTR(-ENOMEM);
798
799 send_buf = buf + buf_size;
800 memset(send_buf, 0, sizeof *send_buf);
801 send_buf->mad = buf;
802
803 send_buf->sge.addr = dma_map_single(mad_agent->device->dma_device,
804 buf, buf_size, DMA_TO_DEVICE);
805 pci_unmap_addr_set(send_buf, mapping, send_buf->sge.addr);
806 send_buf->sge.length = buf_size;
807 send_buf->sge.lkey = mad_agent->mr->lkey;
808
809 send_buf->send_wr.wr_id = (unsigned long) send_buf;
810 send_buf->send_wr.sg_list = &send_buf->sge;
811 send_buf->send_wr.num_sge = 1;
812 send_buf->send_wr.opcode = IB_WR_SEND;
813 send_buf->send_wr.send_flags = IB_SEND_SIGNALED;
814 send_buf->send_wr.wr.ud.ah = ah;
815 send_buf->send_wr.wr.ud.mad_hdr = &send_buf->mad->mad_hdr;
816 send_buf->send_wr.wr.ud.remote_qpn = remote_qpn;
817 send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
818 send_buf->send_wr.wr.ud.pkey_index = pkey_index;
819 send_buf->mad_agent = mad_agent;
820 atomic_inc(&mad_agent_priv->refcount);
821 return send_buf;
822}
823EXPORT_SYMBOL(ib_create_send_mad);
824
825void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
826{
827 struct ib_mad_agent_private *mad_agent_priv;
828
829 mad_agent_priv = container_of(send_buf->mad_agent,
830 struct ib_mad_agent_private, agent);
831
832 dma_unmap_single(send_buf->mad_agent->device->dma_device,
833 pci_unmap_addr(send_buf, mapping),
834 send_buf->sge.length, DMA_TO_DEVICE);
835 kfree(send_buf->mad);
836
837 if (atomic_dec_and_test(&mad_agent_priv->refcount))
838 wake_up(&mad_agent_priv->wait);
839}
840EXPORT_SYMBOL(ib_free_send_mad);
841
d760ce8f 842static int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4
LT
843{
844 struct ib_mad_qp_info *qp_info;
845 struct ib_send_wr *bad_send_wr;
846 unsigned long flags;
847 int ret;
848
f8197a4e 849 /* Set WR ID to find mad_send_wr upon completion */
d760ce8f 850 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1da177e4
LT
851 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
852 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
853
854 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
855 if (qp_info->send_queue.count++ < qp_info->send_queue.max_active) {
856 list_add_tail(&mad_send_wr->mad_list.list,
857 &qp_info->send_queue.list);
858 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
d760ce8f 859 ret = ib_post_send(mad_send_wr->mad_agent_priv->agent.qp,
1da177e4
LT
860 &mad_send_wr->send_wr, &bad_send_wr);
861 if (ret) {
862 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
863 dequeue_mad(&mad_send_wr->mad_list);
864 }
865 } else {
866 list_add_tail(&mad_send_wr->mad_list.list,
867 &qp_info->overflow_list);
868 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
869 ret = 0;
870 }
871 return ret;
872}
873
874/*
875 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
876 * with the registered client
877 */
878int ib_post_send_mad(struct ib_mad_agent *mad_agent,
879 struct ib_send_wr *send_wr,
880 struct ib_send_wr **bad_send_wr)
881{
882 int ret = -EINVAL;
883 struct ib_mad_agent_private *mad_agent_priv;
884
885 /* Validate supplied parameters */
886 if (!bad_send_wr)
887 goto error1;
888
889 if (!mad_agent || !send_wr)
890 goto error2;
891
892 if (!mad_agent->send_handler)
893 goto error2;
894
895 mad_agent_priv = container_of(mad_agent,
896 struct ib_mad_agent_private,
897 agent);
898
899 /* Walk list of send WRs and post each on send list */
900 while (send_wr) {
901 unsigned long flags;
902 struct ib_send_wr *next_send_wr;
903 struct ib_mad_send_wr_private *mad_send_wr;
904 struct ib_smp *smp;
905
906 /* Validate more parameters */
907 if (send_wr->num_sge > IB_MAD_SEND_REQ_MAX_SG)
908 goto error2;
909
910 if (send_wr->wr.ud.timeout_ms && !mad_agent->recv_handler)
911 goto error2;
912
913 if (!send_wr->wr.ud.mad_hdr) {
914 printk(KERN_ERR PFX "MAD header must be supplied "
915 "in WR %p\n", send_wr);
916 goto error2;
917 }
918
919 /*
920 * Save pointer to next work request to post in case the
921 * current one completes, and the user modifies the work
922 * request associated with the completion
923 */
924 next_send_wr = (struct ib_send_wr *)send_wr->next;
925
926 smp = (struct ib_smp *)send_wr->wr.ud.mad_hdr;
927 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
928 ret = handle_outgoing_dr_smp(mad_agent_priv, smp,
929 send_wr);
930 if (ret < 0) /* error */
931 goto error2;
932 else if (ret == 1) /* locally consumed */
933 goto next;
934 }
935
936 /* Allocate MAD send WR tracking structure */
937 mad_send_wr = kmalloc(sizeof *mad_send_wr, GFP_ATOMIC);
938 if (!mad_send_wr) {
939 printk(KERN_ERR PFX "No memory for "
940 "ib_mad_send_wr_private\n");
941 ret = -ENOMEM;
942 goto error2;
943 }
944
945 mad_send_wr->send_wr = *send_wr;
946 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
947 memcpy(mad_send_wr->sg_list, send_wr->sg_list,
948 sizeof *send_wr->sg_list * send_wr->num_sge);
f8197a4e 949 mad_send_wr->wr_id = mad_send_wr->send_wr.wr_id;
1da177e4
LT
950 mad_send_wr->send_wr.next = NULL;
951 mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid;
d760ce8f 952 mad_send_wr->mad_agent_priv = mad_agent_priv;
1da177e4
LT
953 /* Timeout will be updated after send completes */
954 mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr.
955 ud.timeout_ms);
956 mad_send_wr->retry = 0;
957 /* One reference for each work request to QP + response */
958 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
959 mad_send_wr->status = IB_WC_SUCCESS;
960
961 /* Reference MAD agent until send completes */
962 atomic_inc(&mad_agent_priv->refcount);
963 spin_lock_irqsave(&mad_agent_priv->lock, flags);
964 list_add_tail(&mad_send_wr->agent_list,
965 &mad_agent_priv->send_list);
966 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
967
d760ce8f 968 ret = ib_send_mad(mad_send_wr);
1da177e4
LT
969 if (ret) {
970 /* Fail send request */
971 spin_lock_irqsave(&mad_agent_priv->lock, flags);
972 list_del(&mad_send_wr->agent_list);
973 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
974 atomic_dec(&mad_agent_priv->refcount);
975 goto error2;
976 }
977next:
978 send_wr = next_send_wr;
979 }
980 return 0;
981
982error2:
983 *bad_send_wr = send_wr;
984error1:
985 return ret;
986}
987EXPORT_SYMBOL(ib_post_send_mad);
988
989/*
990 * ib_free_recv_mad - Returns data buffers used to receive
991 * a MAD to the access layer
992 */
993void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
994{
995 struct ib_mad_recv_buf *entry;
996 struct ib_mad_private_header *mad_priv_hdr;
997 struct ib_mad_private *priv;
998
999 mad_priv_hdr = container_of(mad_recv_wc,
1000 struct ib_mad_private_header,
1001 recv_wc);
1002 priv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1003
1004 /*
1005 * Walk receive buffer list associated with this WC
1006 * No need to remove them from list of receive buffers
1007 */
1008 list_for_each_entry(entry, &mad_recv_wc->recv_buf.list, list) {
1009 /* Free previous receive buffer */
1010 kmem_cache_free(ib_mad_cache, priv);
1011 mad_priv_hdr = container_of(mad_recv_wc,
1012 struct ib_mad_private_header,
1013 recv_wc);
1014 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1015 header);
1016 }
1017
1018 /* Free last buffer */
1019 kmem_cache_free(ib_mad_cache, priv);
1020}
1021EXPORT_SYMBOL(ib_free_recv_mad);
1022
1023void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc,
1024 void *buf)
1025{
1026 printk(KERN_ERR PFX "ib_coalesce_recv_mad() not implemented yet\n");
1027}
1028EXPORT_SYMBOL(ib_coalesce_recv_mad);
1029
1030struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1031 u8 rmpp_version,
1032 ib_mad_send_handler send_handler,
1033 ib_mad_recv_handler recv_handler,
1034 void *context)
1035{
1036 return ERR_PTR(-EINVAL); /* XXX: for now */
1037}
1038EXPORT_SYMBOL(ib_redirect_mad_qp);
1039
1040int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1041 struct ib_wc *wc)
1042{
1043 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1044 return 0;
1045}
1046EXPORT_SYMBOL(ib_process_mad_wc);
1047
1048static int method_in_use(struct ib_mad_mgmt_method_table **method,
1049 struct ib_mad_reg_req *mad_reg_req)
1050{
1051 int i;
1052
1053 for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1054 i < IB_MGMT_MAX_METHODS;
1055 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1056 1+i)) {
1057 if ((*method)->agent[i]) {
1058 printk(KERN_ERR PFX "Method %d already in use\n", i);
1059 return -EINVAL;
1060 }
1061 }
1062 return 0;
1063}
1064
1065static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1066{
1067 /* Allocate management method table */
1068 *method = kmalloc(sizeof **method, GFP_ATOMIC);
1069 if (!*method) {
1070 printk(KERN_ERR PFX "No memory for "
1071 "ib_mad_mgmt_method_table\n");
1072 return -ENOMEM;
1073 }
1074 /* Clear management method table */
1075 memset(*method, 0, sizeof **method);
1076
1077 return 0;
1078}
1079
1080/*
1081 * Check to see if there are any methods still in use
1082 */
1083static int check_method_table(struct ib_mad_mgmt_method_table *method)
1084{
1085 int i;
1086
1087 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1088 if (method->agent[i])
1089 return 1;
1090 return 0;
1091}
1092
1093/*
1094 * Check to see if there are any method tables for this class still in use
1095 */
1096static int check_class_table(struct ib_mad_mgmt_class_table *class)
1097{
1098 int i;
1099
1100 for (i = 0; i < MAX_MGMT_CLASS; i++)
1101 if (class->method_table[i])
1102 return 1;
1103 return 0;
1104}
1105
1106static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1107{
1108 int i;
1109
1110 for (i = 0; i < MAX_MGMT_OUI; i++)
1111 if (vendor_class->method_table[i])
1112 return 1;
1113 return 0;
1114}
1115
1116static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1117 char *oui)
1118{
1119 int i;
1120
1121 for (i = 0; i < MAX_MGMT_OUI; i++)
1122 /* Is there matching OUI for this vendor class ? */
1123 if (!memcmp(vendor_class->oui[i], oui, 3))
1124 return i;
1125
1126 return -1;
1127}
1128
1129static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1130{
1131 int i;
1132
1133 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1134 if (vendor->vendor_class[i])
1135 return 1;
1136
1137 return 0;
1138}
1139
1140static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1141 struct ib_mad_agent_private *agent)
1142{
1143 int i;
1144
1145 /* Remove any methods for this mad agent */
1146 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1147 if (method->agent[i] == agent) {
1148 method->agent[i] = NULL;
1149 }
1150 }
1151}
1152
1153static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1154 struct ib_mad_agent_private *agent_priv,
1155 u8 mgmt_class)
1156{
1157 struct ib_mad_port_private *port_priv;
1158 struct ib_mad_mgmt_class_table **class;
1159 struct ib_mad_mgmt_method_table **method;
1160 int i, ret;
1161
1162 port_priv = agent_priv->qp_info->port_priv;
1163 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1164 if (!*class) {
1165 /* Allocate management class table for "new" class version */
1166 *class = kmalloc(sizeof **class, GFP_ATOMIC);
1167 if (!*class) {
1168 printk(KERN_ERR PFX "No memory for "
1169 "ib_mad_mgmt_class_table\n");
1170 ret = -ENOMEM;
1171 goto error1;
1172 }
1173 /* Clear management class table */
1174 memset(*class, 0, sizeof(**class));
1175 /* Allocate method table for this management class */
1176 method = &(*class)->method_table[mgmt_class];
1177 if ((ret = allocate_method_table(method)))
1178 goto error2;
1179 } else {
1180 method = &(*class)->method_table[mgmt_class];
1181 if (!*method) {
1182 /* Allocate method table for this management class */
1183 if ((ret = allocate_method_table(method)))
1184 goto error1;
1185 }
1186 }
1187
1188 /* Now, make sure methods are not already in use */
1189 if (method_in_use(method, mad_reg_req))
1190 goto error3;
1191
1192 /* Finally, add in methods being registered */
1193 for (i = find_first_bit(mad_reg_req->method_mask,
1194 IB_MGMT_MAX_METHODS);
1195 i < IB_MGMT_MAX_METHODS;
1196 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1197 1+i)) {
1198 (*method)->agent[i] = agent_priv;
1199 }
1200 return 0;
1201
1202error3:
1203 /* Remove any methods for this mad agent */
1204 remove_methods_mad_agent(*method, agent_priv);
1205 /* Now, check to see if there are any methods in use */
1206 if (!check_method_table(*method)) {
1207 /* If not, release management method table */
1208 kfree(*method);
1209 *method = NULL;
1210 }
1211 ret = -EINVAL;
1212 goto error1;
1213error2:
1214 kfree(*class);
1215 *class = NULL;
1216error1:
1217 return ret;
1218}
1219
1220static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1221 struct ib_mad_agent_private *agent_priv)
1222{
1223 struct ib_mad_port_private *port_priv;
1224 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1225 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1226 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1227 struct ib_mad_mgmt_method_table **method;
1228 int i, ret = -ENOMEM;
1229 u8 vclass;
1230
1231 /* "New" vendor (with OUI) class */
1232 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1233 port_priv = agent_priv->qp_info->port_priv;
1234 vendor_table = &port_priv->version[
1235 mad_reg_req->mgmt_class_version].vendor;
1236 if (!*vendor_table) {
1237 /* Allocate mgmt vendor class table for "new" class version */
1238 vendor = kmalloc(sizeof *vendor, GFP_ATOMIC);
1239 if (!vendor) {
1240 printk(KERN_ERR PFX "No memory for "
1241 "ib_mad_mgmt_vendor_class_table\n");
1242 goto error1;
1243 }
1244 /* Clear management vendor class table */
1245 memset(vendor, 0, sizeof(*vendor));
1246 *vendor_table = vendor;
1247 }
1248 if (!(*vendor_table)->vendor_class[vclass]) {
1249 /* Allocate table for this management vendor class */
1250 vendor_class = kmalloc(sizeof *vendor_class, GFP_ATOMIC);
1251 if (!vendor_class) {
1252 printk(KERN_ERR PFX "No memory for "
1253 "ib_mad_mgmt_vendor_class\n");
1254 goto error2;
1255 }
1256 memset(vendor_class, 0, sizeof(*vendor_class));
1257 (*vendor_table)->vendor_class[vclass] = vendor_class;
1258 }
1259 for (i = 0; i < MAX_MGMT_OUI; i++) {
1260 /* Is there matching OUI for this vendor class ? */
1261 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1262 mad_reg_req->oui, 3)) {
1263 method = &(*vendor_table)->vendor_class[
1264 vclass]->method_table[i];
1265 BUG_ON(!*method);
1266 goto check_in_use;
1267 }
1268 }
1269 for (i = 0; i < MAX_MGMT_OUI; i++) {
1270 /* OUI slot available ? */
1271 if (!is_vendor_oui((*vendor_table)->vendor_class[
1272 vclass]->oui[i])) {
1273 method = &(*vendor_table)->vendor_class[
1274 vclass]->method_table[i];
1275 BUG_ON(*method);
1276 /* Allocate method table for this OUI */
1277 if ((ret = allocate_method_table(method)))
1278 goto error3;
1279 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1280 mad_reg_req->oui, 3);
1281 goto check_in_use;
1282 }
1283 }
1284 printk(KERN_ERR PFX "All OUI slots in use\n");
1285 goto error3;
1286
1287check_in_use:
1288 /* Now, make sure methods are not already in use */
1289 if (method_in_use(method, mad_reg_req))
1290 goto error4;
1291
1292 /* Finally, add in methods being registered */
1293 for (i = find_first_bit(mad_reg_req->method_mask,
1294 IB_MGMT_MAX_METHODS);
1295 i < IB_MGMT_MAX_METHODS;
1296 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1297 1+i)) {
1298 (*method)->agent[i] = agent_priv;
1299 }
1300 return 0;
1301
1302error4:
1303 /* Remove any methods for this mad agent */
1304 remove_methods_mad_agent(*method, agent_priv);
1305 /* Now, check to see if there are any methods in use */
1306 if (!check_method_table(*method)) {
1307 /* If not, release management method table */
1308 kfree(*method);
1309 *method = NULL;
1310 }
1311 ret = -EINVAL;
1312error3:
1313 if (vendor_class) {
1314 (*vendor_table)->vendor_class[vclass] = NULL;
1315 kfree(vendor_class);
1316 }
1317error2:
1318 if (vendor) {
1319 *vendor_table = NULL;
1320 kfree(vendor);
1321 }
1322error1:
1323 return ret;
1324}
1325
1326static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1327{
1328 struct ib_mad_port_private *port_priv;
1329 struct ib_mad_mgmt_class_table *class;
1330 struct ib_mad_mgmt_method_table *method;
1331 struct ib_mad_mgmt_vendor_class_table *vendor;
1332 struct ib_mad_mgmt_vendor_class *vendor_class;
1333 int index;
1334 u8 mgmt_class;
1335
1336 /*
1337 * Was MAD registration request supplied
1338 * with original registration ?
1339 */
1340 if (!agent_priv->reg_req) {
1341 goto out;
1342 }
1343
1344 port_priv = agent_priv->qp_info->port_priv;
1345 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1346 class = port_priv->version[
1347 agent_priv->reg_req->mgmt_class_version].class;
1348 if (!class)
1349 goto vendor_check;
1350
1351 method = class->method_table[mgmt_class];
1352 if (method) {
1353 /* Remove any methods for this mad agent */
1354 remove_methods_mad_agent(method, agent_priv);
1355 /* Now, check to see if there are any methods still in use */
1356 if (!check_method_table(method)) {
1357 /* If not, release management method table */
1358 kfree(method);
1359 class->method_table[mgmt_class] = NULL;
1360 /* Any management classes left ? */
1361 if (!check_class_table(class)) {
1362 /* If not, release management class table */
1363 kfree(class);
1364 port_priv->version[
1365 agent_priv->reg_req->
1366 mgmt_class_version].class = NULL;
1367 }
1368 }
1369 }
1370
1371vendor_check:
1372 if (!is_vendor_class(mgmt_class))
1373 goto out;
1374
1375 /* normalize mgmt_class to vendor range 2 */
1376 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1377 vendor = port_priv->version[
1378 agent_priv->reg_req->mgmt_class_version].vendor;
1379
1380 if (!vendor)
1381 goto out;
1382
1383 vendor_class = vendor->vendor_class[mgmt_class];
1384 if (vendor_class) {
1385 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1386 if (index < 0)
1387 goto out;
1388 method = vendor_class->method_table[index];
1389 if (method) {
1390 /* Remove any methods for this mad agent */
1391 remove_methods_mad_agent(method, agent_priv);
1392 /*
1393 * Now, check to see if there are
1394 * any methods still in use
1395 */
1396 if (!check_method_table(method)) {
1397 /* If not, release management method table */
1398 kfree(method);
1399 vendor_class->method_table[index] = NULL;
1400 memset(vendor_class->oui[index], 0, 3);
1401 /* Any OUIs left ? */
1402 if (!check_vendor_class(vendor_class)) {
1403 /* If not, release vendor class table */
1404 kfree(vendor_class);
1405 vendor->vendor_class[mgmt_class] = NULL;
1406 /* Any other vendor classes left ? */
1407 if (!check_vendor_table(vendor)) {
1408 kfree(vendor);
1409 port_priv->version[
1410 agent_priv->reg_req->
1411 mgmt_class_version].
1412 vendor = NULL;
1413 }
1414 }
1415 }
1416 }
1417 }
1418
1419out:
1420 return;
1421}
1422
1da177e4
LT
1423static struct ib_mad_agent_private *
1424find_mad_agent(struct ib_mad_port_private *port_priv,
4a0754fa 1425 struct ib_mad *mad)
1da177e4
LT
1426{
1427 struct ib_mad_agent_private *mad_agent = NULL;
1428 unsigned long flags;
1429
1430 spin_lock_irqsave(&port_priv->reg_lock, flags);
4a0754fa 1431 if (response_mad(mad)) {
1da177e4
LT
1432 u32 hi_tid;
1433 struct ib_mad_agent_private *entry;
1434
1435 /*
1436 * Routing is based on high 32 bits of transaction ID
1437 * of MAD.
1438 */
1439 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1440 list_for_each_entry(entry, &port_priv->agent_list,
1441 agent_list) {
1442 if (entry->agent.hi_tid == hi_tid) {
1443 mad_agent = entry;
1444 break;
1445 }
1446 }
1447 } else {
1448 struct ib_mad_mgmt_class_table *class;
1449 struct ib_mad_mgmt_method_table *method;
1450 struct ib_mad_mgmt_vendor_class_table *vendor;
1451 struct ib_mad_mgmt_vendor_class *vendor_class;
1452 struct ib_vendor_mad *vendor_mad;
1453 int index;
1454
1455 /*
1456 * Routing is based on version, class, and method
1457 * For "newer" vendor MADs, also based on OUI
1458 */
1459 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1460 goto out;
1461 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1462 class = port_priv->version[
1463 mad->mad_hdr.class_version].class;
1464 if (!class)
1465 goto out;
1466 method = class->method_table[convert_mgmt_class(
1467 mad->mad_hdr.mgmt_class)];
1468 if (method)
1469 mad_agent = method->agent[mad->mad_hdr.method &
1470 ~IB_MGMT_METHOD_RESP];
1471 } else {
1472 vendor = port_priv->version[
1473 mad->mad_hdr.class_version].vendor;
1474 if (!vendor)
1475 goto out;
1476 vendor_class = vendor->vendor_class[vendor_class_index(
1477 mad->mad_hdr.mgmt_class)];
1478 if (!vendor_class)
1479 goto out;
1480 /* Find matching OUI */
1481 vendor_mad = (struct ib_vendor_mad *)mad;
1482 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1483 if (index == -1)
1484 goto out;
1485 method = vendor_class->method_table[index];
1486 if (method) {
1487 mad_agent = method->agent[mad->mad_hdr.method &
1488 ~IB_MGMT_METHOD_RESP];
1489 }
1490 }
1491 }
1492
1493 if (mad_agent) {
1494 if (mad_agent->agent.recv_handler)
1495 atomic_inc(&mad_agent->refcount);
1496 else {
1497 printk(KERN_NOTICE PFX "No receive handler for client "
1498 "%p on port %d\n",
1499 &mad_agent->agent, port_priv->port_num);
1500 mad_agent = NULL;
1501 }
1502 }
1503out:
1504 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1505
1506 return mad_agent;
1507}
1508
1509static int validate_mad(struct ib_mad *mad, u32 qp_num)
1510{
1511 int valid = 0;
1512
1513 /* Make sure MAD base version is understood */
1514 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1515 printk(KERN_ERR PFX "MAD received with unsupported base "
1516 "version %d\n", mad->mad_hdr.base_version);
1517 goto out;
1518 }
1519
1520 /* Filter SMI packets sent to other than QP0 */
1521 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1522 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1523 if (qp_num == 0)
1524 valid = 1;
1525 } else {
1526 /* Filter GSI packets sent to QP0 */
1527 if (qp_num != 0)
1528 valid = 1;
1529 }
1530
1531out:
1532 return valid;
1533}
1534
1da177e4
LT
1535static struct ib_mad_send_wr_private*
1536find_send_req(struct ib_mad_agent_private *mad_agent_priv,
1537 u64 tid)
1538{
1539 struct ib_mad_send_wr_private *mad_send_wr;
1540
1541 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
1542 agent_list) {
1543 if (mad_send_wr->tid == tid)
1544 return mad_send_wr;
1545 }
1546
1547 /*
1548 * It's possible to receive the response before we've
1549 * been notified that the send has completed
1550 */
1551 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1552 agent_list) {
1553 if (mad_send_wr->tid == tid && mad_send_wr->timeout) {
1554 /* Verify request has not been canceled */
1555 return (mad_send_wr->status == IB_WC_SUCCESS) ?
1556 mad_send_wr : NULL;
1557 }
1558 }
1559 return NULL;
1560}
1561
1562static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
4a0754fa 1563 struct ib_mad_recv_wc *mad_recv_wc)
1da177e4
LT
1564{
1565 struct ib_mad_send_wr_private *mad_send_wr;
1566 struct ib_mad_send_wc mad_send_wc;
1567 unsigned long flags;
4a0754fa 1568 u64 tid;
1da177e4 1569
4a0754fa 1570 INIT_LIST_HEAD(&mad_recv_wc->recv_buf.list);
1da177e4 1571 /* Complete corresponding request */
4a0754fa
HR
1572 if (response_mad(mad_recv_wc->recv_buf.mad)) {
1573 tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
1da177e4 1574 spin_lock_irqsave(&mad_agent_priv->lock, flags);
4a0754fa 1575 mad_send_wr = find_send_req(mad_agent_priv, tid);
1da177e4
LT
1576 if (!mad_send_wr) {
1577 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
4a0754fa 1578 ib_free_recv_mad(mad_recv_wc);
1da177e4
LT
1579 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1580 wake_up(&mad_agent_priv->wait);
1581 return;
1582 }
1583 /* Timeout = 0 means that we won't wait for a response */
1584 mad_send_wr->timeout = 0;
1585 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1586
1587 /* Defined behavior is to complete response before request */
4a0754fa
HR
1588 mad_recv_wc->wc->wr_id = mad_send_wr->wr_id;
1589 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1590 mad_recv_wc);
1da177e4
LT
1591 atomic_dec(&mad_agent_priv->refcount);
1592
1593 mad_send_wc.status = IB_WC_SUCCESS;
1594 mad_send_wc.vendor_err = 0;
1595 mad_send_wc.wr_id = mad_send_wr->wr_id;
1596 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1597 } else {
4a0754fa
HR
1598 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1599 mad_recv_wc);
1da177e4
LT
1600 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1601 wake_up(&mad_agent_priv->wait);
1602 }
1603}
1604
1605static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1606 struct ib_wc *wc)
1607{
1608 struct ib_mad_qp_info *qp_info;
1609 struct ib_mad_private_header *mad_priv_hdr;
1610 struct ib_mad_private *recv, *response;
1611 struct ib_mad_list_head *mad_list;
1612 struct ib_mad_agent_private *mad_agent;
1da177e4
LT
1613
1614 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1615 if (!response)
1616 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1617 "for response buffer\n");
1618
1619 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1620 qp_info = mad_list->mad_queue->qp_info;
1621 dequeue_mad(mad_list);
1622
1623 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1624 mad_list);
1625 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1626 dma_unmap_single(port_priv->device->dma_device,
1627 pci_unmap_addr(&recv->header, mapping),
1628 sizeof(struct ib_mad_private) -
1629 sizeof(struct ib_mad_private_header),
1630 DMA_FROM_DEVICE);
1631
1632 /* Setup MAD receive work completion from "normal" work completion */
24239aff
SH
1633 recv->header.wc = *wc;
1634 recv->header.recv_wc.wc = &recv->header.wc;
1da177e4
LT
1635 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1636 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1637 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1638
1639 if (atomic_read(&qp_info->snoop_count))
1640 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1641
1642 /* Validate MAD */
1643 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1644 goto out;
1645
1646 if (recv->mad.mad.mad_hdr.mgmt_class ==
1647 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1648 if (!smi_handle_dr_smp_recv(&recv->mad.smp,
1649 port_priv->device->node_type,
1650 port_priv->port_num,
1651 port_priv->device->phys_port_cnt))
1652 goto out;
1653 if (!smi_check_forward_dr_smp(&recv->mad.smp))
1654 goto local;
1655 if (!smi_handle_dr_smp_send(&recv->mad.smp,
1656 port_priv->device->node_type,
1657 port_priv->port_num))
1658 goto out;
1659 if (!smi_check_local_dr_smp(&recv->mad.smp,
1660 port_priv->device,
1661 port_priv->port_num))
1662 goto out;
1663 }
1664
1665local:
1666 /* Give driver "right of first refusal" on incoming MAD */
1667 if (port_priv->device->process_mad) {
1668 int ret;
1669
1670 if (!response) {
1671 printk(KERN_ERR PFX "No memory for response MAD\n");
1672 /*
1673 * Is it better to assume that
1674 * it wouldn't be processed ?
1675 */
1676 goto out;
1677 }
1678
1679 ret = port_priv->device->process_mad(port_priv->device, 0,
1680 port_priv->port_num,
1681 wc, &recv->grh,
1682 &recv->mad.mad,
1683 &response->mad.mad);
1684 if (ret & IB_MAD_RESULT_SUCCESS) {
1685 if (ret & IB_MAD_RESULT_CONSUMED)
1686 goto out;
1687 if (ret & IB_MAD_RESULT_REPLY) {
1688 /* Send response */
1689 if (!agent_send(response, &recv->grh, wc,
1690 port_priv->device,
1691 port_priv->port_num))
1692 response = NULL;
1693 goto out;
1694 }
1695 }
1696 }
1697
4a0754fa 1698 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1da177e4 1699 if (mad_agent) {
4a0754fa 1700 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1da177e4
LT
1701 /*
1702 * recv is freed up in error cases in ib_mad_complete_recv
1703 * or via recv_handler in ib_mad_complete_recv()
1704 */
1705 recv = NULL;
1706 }
1707
1708out:
1709 /* Post another receive request for this QP */
1710 if (response) {
1711 ib_mad_post_receive_mads(qp_info, response);
1712 if (recv)
1713 kmem_cache_free(ib_mad_cache, recv);
1714 } else
1715 ib_mad_post_receive_mads(qp_info, recv);
1716}
1717
1718static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1719{
1720 struct ib_mad_send_wr_private *mad_send_wr;
1721 unsigned long delay;
1722
1723 if (list_empty(&mad_agent_priv->wait_list)) {
1724 cancel_delayed_work(&mad_agent_priv->timed_work);
1725 } else {
1726 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1727 struct ib_mad_send_wr_private,
1728 agent_list);
1729
1730 if (time_after(mad_agent_priv->timeout,
1731 mad_send_wr->timeout)) {
1732 mad_agent_priv->timeout = mad_send_wr->timeout;
1733 cancel_delayed_work(&mad_agent_priv->timed_work);
1734 delay = mad_send_wr->timeout - jiffies;
1735 if ((long)delay <= 0)
1736 delay = 1;
1737 queue_delayed_work(mad_agent_priv->qp_info->
1738 port_priv->wq,
1739 &mad_agent_priv->timed_work, delay);
1740 }
1741 }
1742}
1743
d760ce8f 1744static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4 1745{
d760ce8f 1746 struct ib_mad_agent_private *mad_agent_priv;
1da177e4
LT
1747 struct ib_mad_send_wr_private *temp_mad_send_wr;
1748 struct list_head *list_item;
1749 unsigned long delay;
1750
d760ce8f 1751 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4
LT
1752 list_del(&mad_send_wr->agent_list);
1753
1754 delay = mad_send_wr->timeout;
1755 mad_send_wr->timeout += jiffies;
1756
1757 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
1758 temp_mad_send_wr = list_entry(list_item,
1759 struct ib_mad_send_wr_private,
1760 agent_list);
1761 if (time_after(mad_send_wr->timeout,
1762 temp_mad_send_wr->timeout))
1763 break;
1764 }
1765 list_add(&mad_send_wr->agent_list, list_item);
1766
1767 /* Reschedule a work item if we have a shorter timeout */
1768 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
1769 cancel_delayed_work(&mad_agent_priv->timed_work);
1770 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
1771 &mad_agent_priv->timed_work, delay);
1772 }
1773}
1774
1775/*
1776 * Process a send work completion
1777 */
1778static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1779 struct ib_mad_send_wc *mad_send_wc)
1780{
1781 struct ib_mad_agent_private *mad_agent_priv;
1782 unsigned long flags;
1783
d760ce8f 1784 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4
LT
1785 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1786 if (mad_send_wc->status != IB_WC_SUCCESS &&
1787 mad_send_wr->status == IB_WC_SUCCESS) {
1788 mad_send_wr->status = mad_send_wc->status;
1789 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1790 }
1791
1792 if (--mad_send_wr->refcount > 0) {
1793 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
1794 mad_send_wr->status == IB_WC_SUCCESS) {
d760ce8f 1795 wait_for_response(mad_send_wr);
1da177e4
LT
1796 }
1797 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1798 return;
1799 }
1800
1801 /* Remove send from MAD agent and notify client of completion */
1802 list_del(&mad_send_wr->agent_list);
1803 adjust_timeout(mad_agent_priv);
1804 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1805
1806 if (mad_send_wr->status != IB_WC_SUCCESS )
1807 mad_send_wc->status = mad_send_wr->status;
1808 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
1809 mad_send_wc);
1810
1811 /* Release reference on agent taken when sending */
1812 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1813 wake_up(&mad_agent_priv->wait);
1814
1815 kfree(mad_send_wr);
1816}
1817
1818static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
1819 struct ib_wc *wc)
1820{
1821 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
1822 struct ib_mad_list_head *mad_list;
1823 struct ib_mad_qp_info *qp_info;
1824 struct ib_mad_queue *send_queue;
1825 struct ib_send_wr *bad_send_wr;
1826 unsigned long flags;
1827 int ret;
1828
1829 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1830 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1831 mad_list);
1832 send_queue = mad_list->mad_queue;
1833 qp_info = send_queue->qp_info;
1834
1835retry:
1836 queued_send_wr = NULL;
1837 spin_lock_irqsave(&send_queue->lock, flags);
1838 list_del(&mad_list->list);
1839
1840 /* Move queued send to the send queue */
1841 if (send_queue->count-- > send_queue->max_active) {
1842 mad_list = container_of(qp_info->overflow_list.next,
1843 struct ib_mad_list_head, list);
1844 queued_send_wr = container_of(mad_list,
1845 struct ib_mad_send_wr_private,
1846 mad_list);
1847 list_del(&mad_list->list);
1848 list_add_tail(&mad_list->list, &send_queue->list);
1849 }
1850 spin_unlock_irqrestore(&send_queue->lock, flags);
1851
1852 /* Restore client wr_id in WC and complete send */
1853 wc->wr_id = mad_send_wr->wr_id;
1854 if (atomic_read(&qp_info->snoop_count))
1855 snoop_send(qp_info, &mad_send_wr->send_wr,
1856 (struct ib_mad_send_wc *)wc,
1857 IB_MAD_SNOOP_SEND_COMPLETIONS);
1858 ib_mad_complete_send_wr(mad_send_wr, (struct ib_mad_send_wc *)wc);
1859
1860 if (queued_send_wr) {
1861 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
1862 &bad_send_wr);
1863 if (ret) {
1864 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
1865 mad_send_wr = queued_send_wr;
1866 wc->status = IB_WC_LOC_QP_OP_ERR;
1867 goto retry;
1868 }
1869 }
1870}
1871
1872static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
1873{
1874 struct ib_mad_send_wr_private *mad_send_wr;
1875 struct ib_mad_list_head *mad_list;
1876 unsigned long flags;
1877
1878 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1879 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
1880 mad_send_wr = container_of(mad_list,
1881 struct ib_mad_send_wr_private,
1882 mad_list);
1883 mad_send_wr->retry = 1;
1884 }
1885 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1886}
1887
1888static void mad_error_handler(struct ib_mad_port_private *port_priv,
1889 struct ib_wc *wc)
1890{
1891 struct ib_mad_list_head *mad_list;
1892 struct ib_mad_qp_info *qp_info;
1893 struct ib_mad_send_wr_private *mad_send_wr;
1894 int ret;
1895
1896 /* Determine if failure was a send or receive */
1897 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1898 qp_info = mad_list->mad_queue->qp_info;
1899 if (mad_list->mad_queue == &qp_info->recv_queue)
1900 /*
1901 * Receive errors indicate that the QP has entered the error
1902 * state - error handling/shutdown code will cleanup
1903 */
1904 return;
1905
1906 /*
1907 * Send errors will transition the QP to SQE - move
1908 * QP to RTS and repost flushed work requests
1909 */
1910 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1911 mad_list);
1912 if (wc->status == IB_WC_WR_FLUSH_ERR) {
1913 if (mad_send_wr->retry) {
1914 /* Repost send */
1915 struct ib_send_wr *bad_send_wr;
1916
1917 mad_send_wr->retry = 0;
1918 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
1919 &bad_send_wr);
1920 if (ret)
1921 ib_mad_send_done_handler(port_priv, wc);
1922 } else
1923 ib_mad_send_done_handler(port_priv, wc);
1924 } else {
1925 struct ib_qp_attr *attr;
1926
1927 /* Transition QP to RTS and fail offending send */
1928 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1929 if (attr) {
1930 attr->qp_state = IB_QPS_RTS;
1931 attr->cur_qp_state = IB_QPS_SQE;
1932 ret = ib_modify_qp(qp_info->qp, attr,
1933 IB_QP_STATE | IB_QP_CUR_STATE);
1934 kfree(attr);
1935 if (ret)
1936 printk(KERN_ERR PFX "mad_error_handler - "
1937 "ib_modify_qp to RTS : %d\n", ret);
1938 else
1939 mark_sends_for_retry(qp_info);
1940 }
1941 ib_mad_send_done_handler(port_priv, wc);
1942 }
1943}
1944
1945/*
1946 * IB MAD completion callback
1947 */
1948static void ib_mad_completion_handler(void *data)
1949{
1950 struct ib_mad_port_private *port_priv;
1951 struct ib_wc wc;
1952
1953 port_priv = (struct ib_mad_port_private *)data;
1954 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
1955
1956 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
1957 if (wc.status == IB_WC_SUCCESS) {
1958 switch (wc.opcode) {
1959 case IB_WC_SEND:
1960 ib_mad_send_done_handler(port_priv, &wc);
1961 break;
1962 case IB_WC_RECV:
1963 ib_mad_recv_done_handler(port_priv, &wc);
1964 break;
1965 default:
1966 BUG_ON(1);
1967 break;
1968 }
1969 } else
1970 mad_error_handler(port_priv, &wc);
1971 }
1972}
1973
1974static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
1975{
1976 unsigned long flags;
1977 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
1978 struct ib_mad_send_wc mad_send_wc;
1979 struct list_head cancel_list;
1980
1981 INIT_LIST_HEAD(&cancel_list);
1982
1983 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1984 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
1985 &mad_agent_priv->send_list, agent_list) {
1986 if (mad_send_wr->status == IB_WC_SUCCESS) {
1987 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
1988 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1989 }
1990 }
1991
1992 /* Empty wait list to prevent receives from finding a request */
1993 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
1994 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1995
1996 /* Report all cancelled requests */
1997 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
1998 mad_send_wc.vendor_err = 0;
1999
2000 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2001 &cancel_list, agent_list) {
2002 mad_send_wc.wr_id = mad_send_wr->wr_id;
2003 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2004 &mad_send_wc);
2005
2006 list_del(&mad_send_wr->agent_list);
2007 kfree(mad_send_wr);
2008 atomic_dec(&mad_agent_priv->refcount);
2009 }
2010}
2011
2012static struct ib_mad_send_wr_private*
2013find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv,
2014 u64 wr_id)
2015{
2016 struct ib_mad_send_wr_private *mad_send_wr;
2017
2018 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2019 agent_list) {
2020 if (mad_send_wr->wr_id == wr_id)
2021 return mad_send_wr;
2022 }
2023
2024 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2025 agent_list) {
2026 if (mad_send_wr->wr_id == wr_id)
2027 return mad_send_wr;
2028 }
2029 return NULL;
2030}
2031
2032void cancel_sends(void *data)
2033{
2034 struct ib_mad_agent_private *mad_agent_priv;
2035 struct ib_mad_send_wr_private *mad_send_wr;
2036 struct ib_mad_send_wc mad_send_wc;
2037 unsigned long flags;
2038
2039 mad_agent_priv = data;
2040
2041 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2042 mad_send_wc.vendor_err = 0;
2043
2044 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2045 while (!list_empty(&mad_agent_priv->canceled_list)) {
2046 mad_send_wr = list_entry(mad_agent_priv->canceled_list.next,
2047 struct ib_mad_send_wr_private,
2048 agent_list);
2049
2050 list_del(&mad_send_wr->agent_list);
2051 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2052
2053 mad_send_wc.wr_id = mad_send_wr->wr_id;
2054 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2055 &mad_send_wc);
2056
2057 kfree(mad_send_wr);
2058 if (atomic_dec_and_test(&mad_agent_priv->refcount))
2059 wake_up(&mad_agent_priv->wait);
2060 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2061 }
2062 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2063}
2064
2065void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2066 u64 wr_id)
2067{
2068 struct ib_mad_agent_private *mad_agent_priv;
2069 struct ib_mad_send_wr_private *mad_send_wr;
2070 unsigned long flags;
2071
2072 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2073 agent);
2074 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2075 mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id);
2076 if (!mad_send_wr) {
2077 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2078 goto out;
2079 }
2080
2081 if (mad_send_wr->status == IB_WC_SUCCESS)
2082 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2083
2084 if (mad_send_wr->refcount != 0) {
2085 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2086 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2087 goto out;
2088 }
2089
2090 list_del(&mad_send_wr->agent_list);
2091 list_add_tail(&mad_send_wr->agent_list, &mad_agent_priv->canceled_list);
2092 adjust_timeout(mad_agent_priv);
2093 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2094
2095 queue_work(mad_agent_priv->qp_info->port_priv->wq,
2096 &mad_agent_priv->canceled_work);
2097out:
2098 return;
2099}
2100EXPORT_SYMBOL(ib_cancel_mad);
2101
2102static void local_completions(void *data)
2103{
2104 struct ib_mad_agent_private *mad_agent_priv;
2105 struct ib_mad_local_private *local;
2106 struct ib_mad_agent_private *recv_mad_agent;
2107 unsigned long flags;
2108 struct ib_wc wc;
2109 struct ib_mad_send_wc mad_send_wc;
2110
2111 mad_agent_priv = (struct ib_mad_agent_private *)data;
2112
2113 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2114 while (!list_empty(&mad_agent_priv->local_list)) {
2115 local = list_entry(mad_agent_priv->local_list.next,
2116 struct ib_mad_local_private,
2117 completion_list);
2118 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2119 if (local->mad_priv) {
2120 recv_mad_agent = local->recv_mad_agent;
2121 if (!recv_mad_agent) {
2122 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2123 kmem_cache_free(ib_mad_cache, local->mad_priv);
2124 goto local_send_completion;
2125 }
2126
2127 /*
2128 * Defined behavior is to complete response
2129 * before request
2130 */
2131 build_smp_wc(local->wr_id, IB_LID_PERMISSIVE,
2132 0 /* pkey index */,
2133 recv_mad_agent->agent.port_num, &wc);
2134
2135 local->mad_priv->header.recv_wc.wc = &wc;
2136 local->mad_priv->header.recv_wc.mad_len =
2137 sizeof(struct ib_mad);
2138 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.recv_buf.list);
2139 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2140 local->mad_priv->header.recv_wc.recv_buf.mad =
2141 &local->mad_priv->mad.mad;
2142 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2143 snoop_recv(recv_mad_agent->qp_info,
2144 &local->mad_priv->header.recv_wc,
2145 IB_MAD_SNOOP_RECVS);
2146 recv_mad_agent->agent.recv_handler(
2147 &recv_mad_agent->agent,
2148 &local->mad_priv->header.recv_wc);
2149 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2150 atomic_dec(&recv_mad_agent->refcount);
2151 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2152 }
2153
2154local_send_completion:
2155 /* Complete send */
2156 mad_send_wc.status = IB_WC_SUCCESS;
2157 mad_send_wc.vendor_err = 0;
2158 mad_send_wc.wr_id = local->wr_id;
2159 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2160 snoop_send(mad_agent_priv->qp_info, &local->send_wr,
2161 &mad_send_wc,
2162 IB_MAD_SNOOP_SEND_COMPLETIONS);
2163 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2164 &mad_send_wc);
2165
2166 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2167 list_del(&local->completion_list);
2168 atomic_dec(&mad_agent_priv->refcount);
2169 kfree(local);
2170 }
2171 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2172}
2173
2174static void timeout_sends(void *data)
2175{
2176 struct ib_mad_agent_private *mad_agent_priv;
2177 struct ib_mad_send_wr_private *mad_send_wr;
2178 struct ib_mad_send_wc mad_send_wc;
2179 unsigned long flags, delay;
2180
2181 mad_agent_priv = (struct ib_mad_agent_private *)data;
2182
2183 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2184 mad_send_wc.vendor_err = 0;
2185
2186 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2187 while (!list_empty(&mad_agent_priv->wait_list)) {
2188 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2189 struct ib_mad_send_wr_private,
2190 agent_list);
2191
2192 if (time_after(mad_send_wr->timeout, jiffies)) {
2193 delay = mad_send_wr->timeout - jiffies;
2194 if ((long)delay <= 0)
2195 delay = 1;
2196 queue_delayed_work(mad_agent_priv->qp_info->
2197 port_priv->wq,
2198 &mad_agent_priv->timed_work, delay);
2199 break;
2200 }
2201
2202 list_del(&mad_send_wr->agent_list);
2203 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2204
2205 mad_send_wc.wr_id = mad_send_wr->wr_id;
2206 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2207 &mad_send_wc);
2208
2209 kfree(mad_send_wr);
2210 atomic_dec(&mad_agent_priv->refcount);
2211 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2212 }
2213 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2214}
2215
2216static void ib_mad_thread_completion_handler(struct ib_cq *cq)
2217{
2218 struct ib_mad_port_private *port_priv = cq->cq_context;
2219
2220 queue_work(port_priv->wq, &port_priv->work);
2221}
2222
2223/*
2224 * Allocate receive MADs and post receive WRs for them
2225 */
2226static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2227 struct ib_mad_private *mad)
2228{
2229 unsigned long flags;
2230 int post, ret;
2231 struct ib_mad_private *mad_priv;
2232 struct ib_sge sg_list;
2233 struct ib_recv_wr recv_wr, *bad_recv_wr;
2234 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2235
2236 /* Initialize common scatter list fields */
2237 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2238 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2239
2240 /* Initialize common receive WR fields */
2241 recv_wr.next = NULL;
2242 recv_wr.sg_list = &sg_list;
2243 recv_wr.num_sge = 1;
2244
2245 do {
2246 /* Allocate and map receive buffer */
2247 if (mad) {
2248 mad_priv = mad;
2249 mad = NULL;
2250 } else {
2251 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2252 if (!mad_priv) {
2253 printk(KERN_ERR PFX "No memory for receive buffer\n");
2254 ret = -ENOMEM;
2255 break;
2256 }
2257 }
2258 sg_list.addr = dma_map_single(qp_info->port_priv->
2259 device->dma_device,
2260 &mad_priv->grh,
2261 sizeof *mad_priv -
2262 sizeof mad_priv->header,
2263 DMA_FROM_DEVICE);
2264 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2265 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2266 mad_priv->header.mad_list.mad_queue = recv_queue;
2267
2268 /* Post receive WR */
2269 spin_lock_irqsave(&recv_queue->lock, flags);
2270 post = (++recv_queue->count < recv_queue->max_active);
2271 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2272 spin_unlock_irqrestore(&recv_queue->lock, flags);
2273 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2274 if (ret) {
2275 spin_lock_irqsave(&recv_queue->lock, flags);
2276 list_del(&mad_priv->header.mad_list.list);
2277 recv_queue->count--;
2278 spin_unlock_irqrestore(&recv_queue->lock, flags);
2279 dma_unmap_single(qp_info->port_priv->device->dma_device,
2280 pci_unmap_addr(&mad_priv->header,
2281 mapping),
2282 sizeof *mad_priv -
2283 sizeof mad_priv->header,
2284 DMA_FROM_DEVICE);
2285 kmem_cache_free(ib_mad_cache, mad_priv);
2286 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2287 break;
2288 }
2289 } while (post);
2290
2291 return ret;
2292}
2293
2294/*
2295 * Return all the posted receive MADs
2296 */
2297static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2298{
2299 struct ib_mad_private_header *mad_priv_hdr;
2300 struct ib_mad_private *recv;
2301 struct ib_mad_list_head *mad_list;
2302
2303 while (!list_empty(&qp_info->recv_queue.list)) {
2304
2305 mad_list = list_entry(qp_info->recv_queue.list.next,
2306 struct ib_mad_list_head, list);
2307 mad_priv_hdr = container_of(mad_list,
2308 struct ib_mad_private_header,
2309 mad_list);
2310 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2311 header);
2312
2313 /* Remove from posted receive MAD list */
2314 list_del(&mad_list->list);
2315
1da177e4
LT
2316 dma_unmap_single(qp_info->port_priv->device->dma_device,
2317 pci_unmap_addr(&recv->header, mapping),
2318 sizeof(struct ib_mad_private) -
2319 sizeof(struct ib_mad_private_header),
2320 DMA_FROM_DEVICE);
2321 kmem_cache_free(ib_mad_cache, recv);
2322 }
2323
2324 qp_info->recv_queue.count = 0;
2325}
2326
2327/*
2328 * Start the port
2329 */
2330static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2331{
2332 int ret, i;
2333 struct ib_qp_attr *attr;
2334 struct ib_qp *qp;
2335
2336 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2337 if (!attr) {
2338 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2339 return -ENOMEM;
2340 }
2341
2342 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2343 qp = port_priv->qp_info[i].qp;
2344 /*
2345 * PKey index for QP1 is irrelevant but
2346 * one is needed for the Reset to Init transition
2347 */
2348 attr->qp_state = IB_QPS_INIT;
2349 attr->pkey_index = 0;
2350 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2351 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2352 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2353 if (ret) {
2354 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2355 "INIT: %d\n", i, ret);
2356 goto out;
2357 }
2358
2359 attr->qp_state = IB_QPS_RTR;
2360 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2361 if (ret) {
2362 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2363 "RTR: %d\n", i, ret);
2364 goto out;
2365 }
2366
2367 attr->qp_state = IB_QPS_RTS;
2368 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2369 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2370 if (ret) {
2371 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2372 "RTS: %d\n", i, ret);
2373 goto out;
2374 }
2375 }
2376
2377 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2378 if (ret) {
2379 printk(KERN_ERR PFX "Failed to request completion "
2380 "notification: %d\n", ret);
2381 goto out;
2382 }
2383
2384 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2385 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2386 if (ret) {
2387 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2388 goto out;
2389 }
2390 }
2391out:
2392 kfree(attr);
2393 return ret;
2394}
2395
2396static void qp_event_handler(struct ib_event *event, void *qp_context)
2397{
2398 struct ib_mad_qp_info *qp_info = qp_context;
2399
2400 /* It's worse than that! He's dead, Jim! */
2401 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2402 event->event, qp_info->qp->qp_num);
2403}
2404
2405static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2406 struct ib_mad_queue *mad_queue)
2407{
2408 mad_queue->qp_info = qp_info;
2409 mad_queue->count = 0;
2410 spin_lock_init(&mad_queue->lock);
2411 INIT_LIST_HEAD(&mad_queue->list);
2412}
2413
2414static void init_mad_qp(struct ib_mad_port_private *port_priv,
2415 struct ib_mad_qp_info *qp_info)
2416{
2417 qp_info->port_priv = port_priv;
2418 init_mad_queue(qp_info, &qp_info->send_queue);
2419 init_mad_queue(qp_info, &qp_info->recv_queue);
2420 INIT_LIST_HEAD(&qp_info->overflow_list);
2421 spin_lock_init(&qp_info->snoop_lock);
2422 qp_info->snoop_table = NULL;
2423 qp_info->snoop_table_size = 0;
2424 atomic_set(&qp_info->snoop_count, 0);
2425}
2426
2427static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2428 enum ib_qp_type qp_type)
2429{
2430 struct ib_qp_init_attr qp_init_attr;
2431 int ret;
2432
2433 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2434 qp_init_attr.send_cq = qp_info->port_priv->cq;
2435 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2436 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2437 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2438 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2439 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2440 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2441 qp_init_attr.qp_type = qp_type;
2442 qp_init_attr.port_num = qp_info->port_priv->port_num;
2443 qp_init_attr.qp_context = qp_info;
2444 qp_init_attr.event_handler = qp_event_handler;
2445 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2446 if (IS_ERR(qp_info->qp)) {
2447 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2448 get_spl_qp_index(qp_type));
2449 ret = PTR_ERR(qp_info->qp);
2450 goto error;
2451 }
2452 /* Use minimum queue sizes unless the CQ is resized */
2453 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2454 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2455 return 0;
2456
2457error:
2458 return ret;
2459}
2460
2461static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2462{
2463 ib_destroy_qp(qp_info->qp);
2464 if (qp_info->snoop_table)
2465 kfree(qp_info->snoop_table);
2466}
2467
2468/*
2469 * Open the port
2470 * Create the QP, PD, MR, and CQ if needed
2471 */
2472static int ib_mad_port_open(struct ib_device *device,
2473 int port_num)
2474{
2475 int ret, cq_size;
2476 struct ib_mad_port_private *port_priv;
2477 unsigned long flags;
2478 char name[sizeof "ib_mad123"];
2479
2480 /* First, check if port already open at MAD layer */
2481 port_priv = ib_get_mad_port(device, port_num);
2482 if (port_priv) {
2483 printk(KERN_DEBUG PFX "%s port %d already open\n",
2484 device->name, port_num);
2485 return 0;
2486 }
2487
2488 /* Create new device info */
2489 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
2490 if (!port_priv) {
2491 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2492 return -ENOMEM;
2493 }
2494 memset(port_priv, 0, sizeof *port_priv);
2495 port_priv->device = device;
2496 port_priv->port_num = port_num;
2497 spin_lock_init(&port_priv->reg_lock);
2498 INIT_LIST_HEAD(&port_priv->agent_list);
2499 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2500 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2501
2502 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2503 port_priv->cq = ib_create_cq(port_priv->device,
2504 (ib_comp_handler)
2505 ib_mad_thread_completion_handler,
2506 NULL, port_priv, cq_size);
2507 if (IS_ERR(port_priv->cq)) {
2508 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2509 ret = PTR_ERR(port_priv->cq);
2510 goto error3;
2511 }
2512
2513 port_priv->pd = ib_alloc_pd(device);
2514 if (IS_ERR(port_priv->pd)) {
2515 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2516 ret = PTR_ERR(port_priv->pd);
2517 goto error4;
2518 }
2519
2520 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2521 if (IS_ERR(port_priv->mr)) {
2522 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2523 ret = PTR_ERR(port_priv->mr);
2524 goto error5;
2525 }
2526
2527 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2528 if (ret)
2529 goto error6;
2530 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2531 if (ret)
2532 goto error7;
2533
2534 snprintf(name, sizeof name, "ib_mad%d", port_num);
2535 port_priv->wq = create_singlethread_workqueue(name);
2536 if (!port_priv->wq) {
2537 ret = -ENOMEM;
2538 goto error8;
2539 }
2540 INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
2541
2542 ret = ib_mad_port_start(port_priv);
2543 if (ret) {
2544 printk(KERN_ERR PFX "Couldn't start port\n");
2545 goto error9;
2546 }
2547
2548 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2549 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2550 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2551 return 0;
2552
2553error9:
2554 destroy_workqueue(port_priv->wq);
2555error8:
2556 destroy_mad_qp(&port_priv->qp_info[1]);
2557error7:
2558 destroy_mad_qp(&port_priv->qp_info[0]);
2559error6:
2560 ib_dereg_mr(port_priv->mr);
2561error5:
2562 ib_dealloc_pd(port_priv->pd);
2563error4:
2564 ib_destroy_cq(port_priv->cq);
2565 cleanup_recv_queue(&port_priv->qp_info[1]);
2566 cleanup_recv_queue(&port_priv->qp_info[0]);
2567error3:
2568 kfree(port_priv);
2569
2570 return ret;
2571}
2572
2573/*
2574 * Close the port
2575 * If there are no classes using the port, free the port
2576 * resources (CQ, MR, PD, QP) and remove the port's info structure
2577 */
2578static int ib_mad_port_close(struct ib_device *device, int port_num)
2579{
2580 struct ib_mad_port_private *port_priv;
2581 unsigned long flags;
2582
2583 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2584 port_priv = __ib_get_mad_port(device, port_num);
2585 if (port_priv == NULL) {
2586 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2587 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2588 return -ENODEV;
2589 }
2590 list_del(&port_priv->port_list);
2591 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2592
2593 /* Stop processing completions. */
2594 flush_workqueue(port_priv->wq);
2595 destroy_workqueue(port_priv->wq);
2596 destroy_mad_qp(&port_priv->qp_info[1]);
2597 destroy_mad_qp(&port_priv->qp_info[0]);
2598 ib_dereg_mr(port_priv->mr);
2599 ib_dealloc_pd(port_priv->pd);
2600 ib_destroy_cq(port_priv->cq);
2601 cleanup_recv_queue(&port_priv->qp_info[1]);
2602 cleanup_recv_queue(&port_priv->qp_info[0]);
2603 /* XXX: Handle deallocation of MAD registration tables */
2604
2605 kfree(port_priv);
2606
2607 return 0;
2608}
2609
2610static void ib_mad_init_device(struct ib_device *device)
2611{
2612 int ret, num_ports, cur_port, i, ret2;
2613
2614 if (device->node_type == IB_NODE_SWITCH) {
2615 num_ports = 1;
2616 cur_port = 0;
2617 } else {
2618 num_ports = device->phys_port_cnt;
2619 cur_port = 1;
2620 }
2621 for (i = 0; i < num_ports; i++, cur_port++) {
2622 ret = ib_mad_port_open(device, cur_port);
2623 if (ret) {
2624 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2625 device->name, cur_port);
2626 goto error_device_open;
2627 }
2628 ret = ib_agent_port_open(device, cur_port);
2629 if (ret) {
2630 printk(KERN_ERR PFX "Couldn't open %s port %d "
2631 "for agents\n",
2632 device->name, cur_port);
2633 goto error_device_open;
2634 }
2635 }
2636
2637 goto error_device_query;
2638
2639error_device_open:
2640 while (i > 0) {
2641 cur_port--;
2642 ret2 = ib_agent_port_close(device, cur_port);
2643 if (ret2) {
2644 printk(KERN_ERR PFX "Couldn't close %s port %d "
2645 "for agents\n",
2646 device->name, cur_port);
2647 }
2648 ret2 = ib_mad_port_close(device, cur_port);
2649 if (ret2) {
2650 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2651 device->name, cur_port);
2652 }
2653 i--;
2654 }
2655
2656error_device_query:
2657 return;
2658}
2659
2660static void ib_mad_remove_device(struct ib_device *device)
2661{
2662 int ret = 0, i, num_ports, cur_port, ret2;
2663
2664 if (device->node_type == IB_NODE_SWITCH) {
2665 num_ports = 1;
2666 cur_port = 0;
2667 } else {
2668 num_ports = device->phys_port_cnt;
2669 cur_port = 1;
2670 }
2671 for (i = 0; i < num_ports; i++, cur_port++) {
2672 ret2 = ib_agent_port_close(device, cur_port);
2673 if (ret2) {
2674 printk(KERN_ERR PFX "Couldn't close %s port %d "
2675 "for agents\n",
2676 device->name, cur_port);
2677 if (!ret)
2678 ret = ret2;
2679 }
2680 ret2 = ib_mad_port_close(device, cur_port);
2681 if (ret2) {
2682 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2683 device->name, cur_port);
2684 if (!ret)
2685 ret = ret2;
2686 }
2687 }
2688}
2689
2690static struct ib_client mad_client = {
2691 .name = "mad",
2692 .add = ib_mad_init_device,
2693 .remove = ib_mad_remove_device
2694};
2695
2696static int __init ib_mad_init_module(void)
2697{
2698 int ret;
2699
2700 spin_lock_init(&ib_mad_port_list_lock);
2701 spin_lock_init(&ib_agent_port_list_lock);
2702
2703 ib_mad_cache = kmem_cache_create("ib_mad",
2704 sizeof(struct ib_mad_private),
2705 0,
2706 SLAB_HWCACHE_ALIGN,
2707 NULL,
2708 NULL);
2709 if (!ib_mad_cache) {
2710 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
2711 ret = -ENOMEM;
2712 goto error1;
2713 }
2714
2715 INIT_LIST_HEAD(&ib_mad_port_list);
2716
2717 if (ib_register_client(&mad_client)) {
2718 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
2719 ret = -EINVAL;
2720 goto error2;
2721 }
2722
2723 return 0;
2724
2725error2:
2726 kmem_cache_destroy(ib_mad_cache);
2727error1:
2728 return ret;
2729}
2730
2731static void __exit ib_mad_cleanup_module(void)
2732{
2733 ib_unregister_client(&mad_client);
2734
2735 if (kmem_cache_destroy(ib_mad_cache)) {
2736 printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n");
2737 }
2738}
2739
2740module_init(ib_mad_init_module);
2741module_exit(ib_mad_cleanup_module);