2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/module.h>
42 #include <rdma/ib_cache.h>
49 MODULE_LICENSE("Dual BSD/GPL");
50 MODULE_DESCRIPTION("kernel IB MAD API");
51 MODULE_AUTHOR("Hal Rosenstock");
52 MODULE_AUTHOR("Sean Hefty");
54 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
55 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
57 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
58 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
59 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
60 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
62 static struct kmem_cache *ib_mad_cache;
64 static struct list_head ib_mad_port_list;
65 static u32 ib_mad_client_id = 0;
68 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
70 /* Forward declarations */
71 static int method_in_use(struct ib_mad_mgmt_method_table **method,
72 struct ib_mad_reg_req *mad_reg_req);
73 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
74 static struct ib_mad_agent_private *find_mad_agent(
75 struct ib_mad_port_private *port_priv,
77 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
78 struct ib_mad_private *mad);
79 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
80 static void timeout_sends(struct work_struct *work);
81 static void local_completions(struct work_struct *work);
82 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
83 struct ib_mad_agent_private *agent_priv,
85 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86 struct ib_mad_agent_private *agent_priv);
89 * Returns a ib_mad_port_private structure or NULL for a device/port
90 * Assumes ib_mad_port_list_lock is being held
92 static inline struct ib_mad_port_private *
93 __ib_get_mad_port(struct ib_device *device, int port_num)
95 struct ib_mad_port_private *entry;
97 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
98 if (entry->device == device && entry->port_num == port_num)
105 * Wrapper function to return a ib_mad_port_private structure or NULL
108 static inline struct ib_mad_port_private *
109 ib_get_mad_port(struct ib_device *device, int port_num)
111 struct ib_mad_port_private *entry;
114 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
115 entry = __ib_get_mad_port(device, port_num);
116 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
121 static inline u8 convert_mgmt_class(u8 mgmt_class)
123 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
124 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
128 static int get_spl_qp_index(enum ib_qp_type qp_type)
141 static int vendor_class_index(u8 mgmt_class)
143 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
146 static int is_vendor_class(u8 mgmt_class)
148 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
149 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
154 static int is_vendor_oui(char *oui)
156 if (oui[0] || oui[1] || oui[2])
161 static int is_vendor_method_in_use(
162 struct ib_mad_mgmt_vendor_class *vendor_class,
163 struct ib_mad_reg_req *mad_reg_req)
165 struct ib_mad_mgmt_method_table *method;
168 for (i = 0; i < MAX_MGMT_OUI; i++) {
169 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
170 method = vendor_class->method_table[i];
172 if (method_in_use(&method, mad_reg_req))
182 int ib_response_mad(struct ib_mad *mad)
184 return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
185 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
186 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
187 (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
189 EXPORT_SYMBOL(ib_response_mad);
192 * ib_register_mad_agent - Register to send/receive MADs
194 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
196 enum ib_qp_type qp_type,
197 struct ib_mad_reg_req *mad_reg_req,
199 ib_mad_send_handler send_handler,
200 ib_mad_recv_handler recv_handler,
202 u32 registration_flags)
204 struct ib_mad_port_private *port_priv;
205 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
206 struct ib_mad_agent_private *mad_agent_priv;
207 struct ib_mad_reg_req *reg_req = NULL;
208 struct ib_mad_mgmt_class_table *class;
209 struct ib_mad_mgmt_vendor_class_table *vendor;
210 struct ib_mad_mgmt_vendor_class *vendor_class;
211 struct ib_mad_mgmt_method_table *method;
214 u8 mgmt_class, vclass;
216 /* Validate parameters */
217 qpn = get_spl_qp_index(qp_type);
219 dev_notice(&device->dev,
220 "ib_register_mad_agent: invalid QP Type %d\n",
225 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
226 dev_notice(&device->dev,
227 "ib_register_mad_agent: invalid RMPP Version %u\n",
232 /* Validate MAD registration request if supplied */
234 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
235 dev_notice(&device->dev,
236 "ib_register_mad_agent: invalid Class Version %u\n",
237 mad_reg_req->mgmt_class_version);
241 dev_notice(&device->dev,
242 "ib_register_mad_agent: no recv_handler\n");
245 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
247 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
248 * one in this range currently allowed
250 if (mad_reg_req->mgmt_class !=
251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
252 dev_notice(&device->dev,
253 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
254 mad_reg_req->mgmt_class);
257 } else if (mad_reg_req->mgmt_class == 0) {
259 * Class 0 is reserved in IBA and is used for
260 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
262 dev_notice(&device->dev,
263 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
265 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
267 * If class is in "new" vendor range,
268 * ensure supplied OUI is not zero
270 if (!is_vendor_oui(mad_reg_req->oui)) {
271 dev_notice(&device->dev,
272 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
273 mad_reg_req->mgmt_class);
277 /* Make sure class supplied is consistent with RMPP */
278 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
280 dev_notice(&device->dev,
281 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
282 mad_reg_req->mgmt_class);
286 /* Make sure class supplied is consistent with QP type */
287 if (qp_type == IB_QPT_SMI) {
288 if ((mad_reg_req->mgmt_class !=
289 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
290 (mad_reg_req->mgmt_class !=
291 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
292 dev_notice(&device->dev,
293 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
294 mad_reg_req->mgmt_class);
298 if ((mad_reg_req->mgmt_class ==
299 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
300 (mad_reg_req->mgmt_class ==
301 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
302 dev_notice(&device->dev,
303 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
304 mad_reg_req->mgmt_class);
309 /* No registration request supplied */
314 /* Validate device and port */
315 port_priv = ib_get_mad_port(device, port_num);
317 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
318 ret = ERR_PTR(-ENODEV);
322 /* Verify the QP requested is supported. For example, Ethernet devices
323 * will not have QP0 */
324 if (!port_priv->qp_info[qpn].qp) {
325 dev_notice(&device->dev,
326 "ib_register_mad_agent: QP %d not supported\n", qpn);
327 ret = ERR_PTR(-EPROTONOSUPPORT);
331 /* Allocate structures */
332 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
333 if (!mad_agent_priv) {
334 ret = ERR_PTR(-ENOMEM);
338 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
339 IB_ACCESS_LOCAL_WRITE);
340 if (IS_ERR(mad_agent_priv->agent.mr)) {
341 ret = ERR_PTR(-ENOMEM);
346 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
348 ret = ERR_PTR(-ENOMEM);
353 /* Now, fill in the various structures */
354 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
355 mad_agent_priv->reg_req = reg_req;
356 mad_agent_priv->agent.rmpp_version = rmpp_version;
357 mad_agent_priv->agent.device = device;
358 mad_agent_priv->agent.recv_handler = recv_handler;
359 mad_agent_priv->agent.send_handler = send_handler;
360 mad_agent_priv->agent.context = context;
361 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
362 mad_agent_priv->agent.port_num = port_num;
363 mad_agent_priv->agent.flags = registration_flags;
364 spin_lock_init(&mad_agent_priv->lock);
365 INIT_LIST_HEAD(&mad_agent_priv->send_list);
366 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
367 INIT_LIST_HEAD(&mad_agent_priv->done_list);
368 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
369 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
370 INIT_LIST_HEAD(&mad_agent_priv->local_list);
371 INIT_WORK(&mad_agent_priv->local_work, local_completions);
372 atomic_set(&mad_agent_priv->refcount, 1);
373 init_completion(&mad_agent_priv->comp);
375 spin_lock_irqsave(&port_priv->reg_lock, flags);
376 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
379 * Make sure MAD registration (if supplied)
380 * is non overlapping with any existing ones
383 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
384 if (!is_vendor_class(mgmt_class)) {
385 class = port_priv->version[mad_reg_req->
386 mgmt_class_version].class;
388 method = class->method_table[mgmt_class];
390 if (method_in_use(&method,
395 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
398 /* "New" vendor class range */
399 vendor = port_priv->version[mad_reg_req->
400 mgmt_class_version].vendor;
402 vclass = vendor_class_index(mgmt_class);
403 vendor_class = vendor->vendor_class[vclass];
405 if (is_vendor_method_in_use(
411 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
419 /* Add mad agent into port's agent list */
420 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
421 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
423 return &mad_agent_priv->agent;
426 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
429 ib_dereg_mr(mad_agent_priv->agent.mr);
431 kfree(mad_agent_priv);
435 EXPORT_SYMBOL(ib_register_mad_agent);
437 static inline int is_snooping_sends(int mad_snoop_flags)
439 return (mad_snoop_flags &
440 (/*IB_MAD_SNOOP_POSTED_SENDS |
441 IB_MAD_SNOOP_RMPP_SENDS |*/
442 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
443 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
446 static inline int is_snooping_recvs(int mad_snoop_flags)
448 return (mad_snoop_flags &
449 (IB_MAD_SNOOP_RECVS /*|
450 IB_MAD_SNOOP_RMPP_RECVS*/));
453 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
454 struct ib_mad_snoop_private *mad_snoop_priv)
456 struct ib_mad_snoop_private **new_snoop_table;
460 spin_lock_irqsave(&qp_info->snoop_lock, flags);
461 /* Check for empty slot in array. */
462 for (i = 0; i < qp_info->snoop_table_size; i++)
463 if (!qp_info->snoop_table[i])
466 if (i == qp_info->snoop_table_size) {
468 new_snoop_table = krealloc(qp_info->snoop_table,
469 sizeof mad_snoop_priv *
470 (qp_info->snoop_table_size + 1),
472 if (!new_snoop_table) {
477 qp_info->snoop_table = new_snoop_table;
478 qp_info->snoop_table_size++;
480 qp_info->snoop_table[i] = mad_snoop_priv;
481 atomic_inc(&qp_info->snoop_count);
483 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
487 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
489 enum ib_qp_type qp_type,
491 ib_mad_snoop_handler snoop_handler,
492 ib_mad_recv_handler recv_handler,
495 struct ib_mad_port_private *port_priv;
496 struct ib_mad_agent *ret;
497 struct ib_mad_snoop_private *mad_snoop_priv;
500 /* Validate parameters */
501 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
502 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
503 ret = ERR_PTR(-EINVAL);
506 qpn = get_spl_qp_index(qp_type);
508 ret = ERR_PTR(-EINVAL);
511 port_priv = ib_get_mad_port(device, port_num);
513 ret = ERR_PTR(-ENODEV);
516 /* Allocate structures */
517 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
518 if (!mad_snoop_priv) {
519 ret = ERR_PTR(-ENOMEM);
523 /* Now, fill in the various structures */
524 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
525 mad_snoop_priv->agent.device = device;
526 mad_snoop_priv->agent.recv_handler = recv_handler;
527 mad_snoop_priv->agent.snoop_handler = snoop_handler;
528 mad_snoop_priv->agent.context = context;
529 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
530 mad_snoop_priv->agent.port_num = port_num;
531 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
532 init_completion(&mad_snoop_priv->comp);
533 mad_snoop_priv->snoop_index = register_snoop_agent(
534 &port_priv->qp_info[qpn],
536 if (mad_snoop_priv->snoop_index < 0) {
537 ret = ERR_PTR(mad_snoop_priv->snoop_index);
541 atomic_set(&mad_snoop_priv->refcount, 1);
542 return &mad_snoop_priv->agent;
545 kfree(mad_snoop_priv);
549 EXPORT_SYMBOL(ib_register_mad_snoop);
551 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
553 if (atomic_dec_and_test(&mad_agent_priv->refcount))
554 complete(&mad_agent_priv->comp);
557 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
559 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
560 complete(&mad_snoop_priv->comp);
563 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
565 struct ib_mad_port_private *port_priv;
568 /* Note that we could still be handling received MADs */
571 * Canceling all sends results in dropping received response
572 * MADs, preventing us from queuing additional work
574 cancel_mads(mad_agent_priv);
575 port_priv = mad_agent_priv->qp_info->port_priv;
576 cancel_delayed_work(&mad_agent_priv->timed_work);
578 spin_lock_irqsave(&port_priv->reg_lock, flags);
579 remove_mad_reg_req(mad_agent_priv);
580 list_del(&mad_agent_priv->agent_list);
581 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
583 flush_workqueue(port_priv->wq);
584 ib_cancel_rmpp_recvs(mad_agent_priv);
586 deref_mad_agent(mad_agent_priv);
587 wait_for_completion(&mad_agent_priv->comp);
589 kfree(mad_agent_priv->reg_req);
590 ib_dereg_mr(mad_agent_priv->agent.mr);
591 kfree(mad_agent_priv);
594 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
596 struct ib_mad_qp_info *qp_info;
599 qp_info = mad_snoop_priv->qp_info;
600 spin_lock_irqsave(&qp_info->snoop_lock, flags);
601 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
602 atomic_dec(&qp_info->snoop_count);
603 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
605 deref_snoop_agent(mad_snoop_priv);
606 wait_for_completion(&mad_snoop_priv->comp);
608 kfree(mad_snoop_priv);
612 * ib_unregister_mad_agent - Unregisters a client from using MAD services
614 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
616 struct ib_mad_agent_private *mad_agent_priv;
617 struct ib_mad_snoop_private *mad_snoop_priv;
619 /* If the TID is zero, the agent can only snoop. */
620 if (mad_agent->hi_tid) {
621 mad_agent_priv = container_of(mad_agent,
622 struct ib_mad_agent_private,
624 unregister_mad_agent(mad_agent_priv);
626 mad_snoop_priv = container_of(mad_agent,
627 struct ib_mad_snoop_private,
629 unregister_mad_snoop(mad_snoop_priv);
633 EXPORT_SYMBOL(ib_unregister_mad_agent);
635 static void dequeue_mad(struct ib_mad_list_head *mad_list)
637 struct ib_mad_queue *mad_queue;
640 BUG_ON(!mad_list->mad_queue);
641 mad_queue = mad_list->mad_queue;
642 spin_lock_irqsave(&mad_queue->lock, flags);
643 list_del(&mad_list->list);
645 spin_unlock_irqrestore(&mad_queue->lock, flags);
648 static void snoop_send(struct ib_mad_qp_info *qp_info,
649 struct ib_mad_send_buf *send_buf,
650 struct ib_mad_send_wc *mad_send_wc,
653 struct ib_mad_snoop_private *mad_snoop_priv;
657 spin_lock_irqsave(&qp_info->snoop_lock, flags);
658 for (i = 0; i < qp_info->snoop_table_size; i++) {
659 mad_snoop_priv = qp_info->snoop_table[i];
660 if (!mad_snoop_priv ||
661 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
664 atomic_inc(&mad_snoop_priv->refcount);
665 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
666 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
667 send_buf, mad_send_wc);
668 deref_snoop_agent(mad_snoop_priv);
669 spin_lock_irqsave(&qp_info->snoop_lock, flags);
671 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
674 static void snoop_recv(struct ib_mad_qp_info *qp_info,
675 struct ib_mad_recv_wc *mad_recv_wc,
678 struct ib_mad_snoop_private *mad_snoop_priv;
682 spin_lock_irqsave(&qp_info->snoop_lock, flags);
683 for (i = 0; i < qp_info->snoop_table_size; i++) {
684 mad_snoop_priv = qp_info->snoop_table[i];
685 if (!mad_snoop_priv ||
686 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
689 atomic_inc(&mad_snoop_priv->refcount);
690 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
691 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
693 deref_snoop_agent(mad_snoop_priv);
694 spin_lock_irqsave(&qp_info->snoop_lock, flags);
696 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
699 static void build_smp_wc(struct ib_qp *qp,
700 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
703 memset(wc, 0, sizeof *wc);
705 wc->status = IB_WC_SUCCESS;
706 wc->opcode = IB_WC_RECV;
707 wc->pkey_index = pkey_index;
708 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
713 wc->dlid_path_bits = 0;
714 wc->port_num = port_num;
718 * Return 0 if SMP is to be sent
719 * Return 1 if SMP was consumed locally (whether or not solicited)
720 * Return < 0 if error
722 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
723 struct ib_mad_send_wr_private *mad_send_wr)
726 struct ib_smp *smp = mad_send_wr->send_buf.mad;
728 struct ib_mad_local_private *local;
729 struct ib_mad_private *mad_priv;
730 struct ib_mad_port_private *port_priv;
731 struct ib_mad_agent_private *recv_mad_agent = NULL;
732 struct ib_device *device = mad_agent_priv->agent.device;
735 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
737 if (device->node_type == RDMA_NODE_IB_SWITCH &&
738 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
739 port_num = send_wr->wr.ud.port_num;
741 port_num = mad_agent_priv->agent.port_num;
744 * Directed route handling starts if the initial LID routed part of
745 * a request or the ending LID routed part of a response is empty.
746 * If we are at the start of the LID routed part, don't update the
747 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
749 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
751 smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
754 dev_err(&device->dev, "Invalid directed route\n");
758 /* Check to post send on QP or process locally */
759 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
760 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
763 local = kmalloc(sizeof *local, GFP_ATOMIC);
766 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
769 local->mad_priv = NULL;
770 local->recv_mad_agent = NULL;
771 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
774 dev_err(&device->dev, "No memory for local response MAD\n");
779 build_smp_wc(mad_agent_priv->agent.qp,
780 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
781 send_wr->wr.ud.pkey_index,
782 send_wr->wr.ud.port_num, &mad_wc);
784 /* No GRH for DR SMP */
785 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
786 (struct ib_mad *)smp,
787 (struct ib_mad *)&mad_priv->mad);
790 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
791 if (ib_response_mad(&mad_priv->mad.mad) &&
792 mad_agent_priv->agent.recv_handler) {
793 local->mad_priv = mad_priv;
794 local->recv_mad_agent = mad_agent_priv;
796 * Reference MAD agent until receive
797 * side of local completion handled
799 atomic_inc(&mad_agent_priv->refcount);
801 kmem_cache_free(ib_mad_cache, mad_priv);
803 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
804 kmem_cache_free(ib_mad_cache, mad_priv);
806 case IB_MAD_RESULT_SUCCESS:
807 /* Treat like an incoming receive MAD */
808 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
809 mad_agent_priv->agent.port_num);
811 memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
812 recv_mad_agent = find_mad_agent(port_priv,
815 if (!port_priv || !recv_mad_agent) {
817 * No receiving agent so drop packet and
818 * generate send completion.
820 kmem_cache_free(ib_mad_cache, mad_priv);
823 local->mad_priv = mad_priv;
824 local->recv_mad_agent = recv_mad_agent;
827 kmem_cache_free(ib_mad_cache, mad_priv);
833 local->mad_send_wr = mad_send_wr;
834 /* Reference MAD agent until send side of local completion handled */
835 atomic_inc(&mad_agent_priv->refcount);
836 /* Queue local completion to local list */
837 spin_lock_irqsave(&mad_agent_priv->lock, flags);
838 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
839 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
840 queue_work(mad_agent_priv->qp_info->port_priv->wq,
841 &mad_agent_priv->local_work);
847 static int get_pad_size(int hdr_len, int data_len)
851 seg_size = sizeof(struct ib_mad) - hdr_len;
852 if (data_len && seg_size) {
853 pad = seg_size - data_len % seg_size;
854 return pad == seg_size ? 0 : pad;
859 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
861 struct ib_rmpp_segment *s, *t;
863 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
869 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
872 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
873 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
874 struct ib_rmpp_segment *seg = NULL;
875 int left, seg_size, pad;
877 send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
878 seg_size = send_buf->seg_size;
881 /* Allocate data segments. */
882 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
883 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
885 dev_err(&send_buf->mad_agent->device->dev,
886 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
887 sizeof (*seg) + seg_size, gfp_mask);
888 free_send_rmpp_list(send_wr);
891 seg->num = ++send_buf->seg_count;
892 list_add_tail(&seg->list, &send_wr->rmpp_list);
895 /* Zero any padding */
897 memset(seg->data + seg_size - pad, 0, pad);
899 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
901 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
902 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
904 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
905 struct ib_rmpp_segment, list);
906 send_wr->last_ack_seg = send_wr->cur_seg;
910 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
911 u32 remote_qpn, u16 pkey_index,
913 int hdr_len, int data_len,
916 struct ib_mad_agent_private *mad_agent_priv;
917 struct ib_mad_send_wr_private *mad_send_wr;
918 int pad, message_size, ret, size;
921 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
923 pad = get_pad_size(hdr_len, data_len);
924 message_size = hdr_len + data_len + pad;
926 if ((!mad_agent->rmpp_version &&
927 (rmpp_active || message_size > sizeof(struct ib_mad))) ||
928 (!rmpp_active && message_size > sizeof(struct ib_mad)))
929 return ERR_PTR(-EINVAL);
931 size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
932 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
934 return ERR_PTR(-ENOMEM);
936 mad_send_wr = buf + size;
937 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
938 mad_send_wr->send_buf.mad = buf;
939 mad_send_wr->send_buf.hdr_len = hdr_len;
940 mad_send_wr->send_buf.data_len = data_len;
941 mad_send_wr->pad = pad;
943 mad_send_wr->mad_agent_priv = mad_agent_priv;
944 mad_send_wr->sg_list[0].length = hdr_len;
945 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
946 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
947 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
949 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
950 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
951 mad_send_wr->send_wr.num_sge = 2;
952 mad_send_wr->send_wr.opcode = IB_WR_SEND;
953 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
954 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
955 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
956 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
959 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
966 mad_send_wr->send_buf.mad_agent = mad_agent;
967 atomic_inc(&mad_agent_priv->refcount);
968 return &mad_send_wr->send_buf;
970 EXPORT_SYMBOL(ib_create_send_mad);
972 int ib_get_mad_data_offset(u8 mgmt_class)
974 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
975 return IB_MGMT_SA_HDR;
976 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
977 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
978 (mgmt_class == IB_MGMT_CLASS_BIS))
979 return IB_MGMT_DEVICE_HDR;
980 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
981 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
982 return IB_MGMT_VENDOR_HDR;
984 return IB_MGMT_MAD_HDR;
986 EXPORT_SYMBOL(ib_get_mad_data_offset);
988 int ib_is_mad_class_rmpp(u8 mgmt_class)
990 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
991 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
992 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
993 (mgmt_class == IB_MGMT_CLASS_BIS) ||
994 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
995 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
999 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1001 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1003 struct ib_mad_send_wr_private *mad_send_wr;
1004 struct list_head *list;
1006 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1008 list = &mad_send_wr->cur_seg->list;
1010 if (mad_send_wr->cur_seg->num < seg_num) {
1011 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1012 if (mad_send_wr->cur_seg->num == seg_num)
1014 } else if (mad_send_wr->cur_seg->num > seg_num) {
1015 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1016 if (mad_send_wr->cur_seg->num == seg_num)
1019 return mad_send_wr->cur_seg->data;
1021 EXPORT_SYMBOL(ib_get_rmpp_segment);
1023 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1025 if (mad_send_wr->send_buf.seg_count)
1026 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1027 mad_send_wr->seg_num);
1029 return mad_send_wr->send_buf.mad +
1030 mad_send_wr->send_buf.hdr_len;
1033 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1035 struct ib_mad_agent_private *mad_agent_priv;
1036 struct ib_mad_send_wr_private *mad_send_wr;
1038 mad_agent_priv = container_of(send_buf->mad_agent,
1039 struct ib_mad_agent_private, agent);
1040 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1043 free_send_rmpp_list(mad_send_wr);
1044 kfree(send_buf->mad);
1045 deref_mad_agent(mad_agent_priv);
1047 EXPORT_SYMBOL(ib_free_send_mad);
1049 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1051 struct ib_mad_qp_info *qp_info;
1052 struct list_head *list;
1053 struct ib_send_wr *bad_send_wr;
1054 struct ib_mad_agent *mad_agent;
1056 unsigned long flags;
1059 /* Set WR ID to find mad_send_wr upon completion */
1060 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1061 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1062 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1064 mad_agent = mad_send_wr->send_buf.mad_agent;
1065 sge = mad_send_wr->sg_list;
1066 sge[0].addr = ib_dma_map_single(mad_agent->device,
1067 mad_send_wr->send_buf.mad,
1070 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1073 mad_send_wr->header_mapping = sge[0].addr;
1075 sge[1].addr = ib_dma_map_single(mad_agent->device,
1076 ib_get_payload(mad_send_wr),
1079 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1080 ib_dma_unmap_single(mad_agent->device,
1081 mad_send_wr->header_mapping,
1082 sge[0].length, DMA_TO_DEVICE);
1085 mad_send_wr->payload_mapping = sge[1].addr;
1087 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1088 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1089 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1091 list = &qp_info->send_queue.list;
1094 list = &qp_info->overflow_list;
1098 qp_info->send_queue.count++;
1099 list_add_tail(&mad_send_wr->mad_list.list, list);
1101 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1103 ib_dma_unmap_single(mad_agent->device,
1104 mad_send_wr->header_mapping,
1105 sge[0].length, DMA_TO_DEVICE);
1106 ib_dma_unmap_single(mad_agent->device,
1107 mad_send_wr->payload_mapping,
1108 sge[1].length, DMA_TO_DEVICE);
1114 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1115 * with the registered client
1117 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1118 struct ib_mad_send_buf **bad_send_buf)
1120 struct ib_mad_agent_private *mad_agent_priv;
1121 struct ib_mad_send_buf *next_send_buf;
1122 struct ib_mad_send_wr_private *mad_send_wr;
1123 unsigned long flags;
1126 /* Walk list of send WRs and post each on send list */
1127 for (; send_buf; send_buf = next_send_buf) {
1129 mad_send_wr = container_of(send_buf,
1130 struct ib_mad_send_wr_private,
1132 mad_agent_priv = mad_send_wr->mad_agent_priv;
1134 if (!send_buf->mad_agent->send_handler ||
1135 (send_buf->timeout_ms &&
1136 !send_buf->mad_agent->recv_handler)) {
1141 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1142 if (mad_agent_priv->agent.rmpp_version) {
1149 * Save pointer to next work request to post in case the
1150 * current one completes, and the user modifies the work
1151 * request associated with the completion
1153 next_send_buf = send_buf->next;
1154 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1156 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1157 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1158 ret = handle_outgoing_dr_smp(mad_agent_priv,
1160 if (ret < 0) /* error */
1162 else if (ret == 1) /* locally consumed */
1166 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1167 /* Timeout will be updated after send completes */
1168 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1169 mad_send_wr->max_retries = send_buf->retries;
1170 mad_send_wr->retries_left = send_buf->retries;
1171 send_buf->retries = 0;
1172 /* Reference for work request to QP + response */
1173 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1174 mad_send_wr->status = IB_WC_SUCCESS;
1176 /* Reference MAD agent until send completes */
1177 atomic_inc(&mad_agent_priv->refcount);
1178 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1179 list_add_tail(&mad_send_wr->agent_list,
1180 &mad_agent_priv->send_list);
1181 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1183 if (mad_agent_priv->agent.rmpp_version) {
1184 ret = ib_send_rmpp_mad(mad_send_wr);
1185 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1186 ret = ib_send_mad(mad_send_wr);
1188 ret = ib_send_mad(mad_send_wr);
1190 /* Fail send request */
1191 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1192 list_del(&mad_send_wr->agent_list);
1193 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1194 atomic_dec(&mad_agent_priv->refcount);
1201 *bad_send_buf = send_buf;
1204 EXPORT_SYMBOL(ib_post_send_mad);
1207 * ib_free_recv_mad - Returns data buffers used to receive
1208 * a MAD to the access layer
1210 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1212 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1213 struct ib_mad_private_header *mad_priv_hdr;
1214 struct ib_mad_private *priv;
1215 struct list_head free_list;
1217 INIT_LIST_HEAD(&free_list);
1218 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1220 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1222 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1224 mad_priv_hdr = container_of(mad_recv_wc,
1225 struct ib_mad_private_header,
1227 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1229 kmem_cache_free(ib_mad_cache, priv);
1232 EXPORT_SYMBOL(ib_free_recv_mad);
1234 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1236 ib_mad_send_handler send_handler,
1237 ib_mad_recv_handler recv_handler,
1240 return ERR_PTR(-EINVAL); /* XXX: for now */
1242 EXPORT_SYMBOL(ib_redirect_mad_qp);
1244 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1247 dev_err(&mad_agent->device->dev,
1248 "ib_process_mad_wc() not implemented yet\n");
1251 EXPORT_SYMBOL(ib_process_mad_wc);
1253 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1254 struct ib_mad_reg_req *mad_reg_req)
1258 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1259 if ((*method)->agent[i]) {
1260 pr_err("Method %d already in use\n", i);
1267 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1269 /* Allocate management method table */
1270 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1272 pr_err("No memory for ib_mad_mgmt_method_table\n");
1280 * Check to see if there are any methods still in use
1282 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1286 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1287 if (method->agent[i])
1293 * Check to see if there are any method tables for this class still in use
1295 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1299 for (i = 0; i < MAX_MGMT_CLASS; i++)
1300 if (class->method_table[i])
1305 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1309 for (i = 0; i < MAX_MGMT_OUI; i++)
1310 if (vendor_class->method_table[i])
1315 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1320 for (i = 0; i < MAX_MGMT_OUI; i++)
1321 /* Is there matching OUI for this vendor class ? */
1322 if (!memcmp(vendor_class->oui[i], oui, 3))
1328 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1332 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1333 if (vendor->vendor_class[i])
1339 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1340 struct ib_mad_agent_private *agent)
1344 /* Remove any methods for this mad agent */
1345 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1346 if (method->agent[i] == agent) {
1347 method->agent[i] = NULL;
1352 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1353 struct ib_mad_agent_private *agent_priv,
1356 struct ib_mad_port_private *port_priv;
1357 struct ib_mad_mgmt_class_table **class;
1358 struct ib_mad_mgmt_method_table **method;
1361 port_priv = agent_priv->qp_info->port_priv;
1362 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1364 /* Allocate management class table for "new" class version */
1365 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1367 dev_err(&agent_priv->agent.device->dev,
1368 "No memory for ib_mad_mgmt_class_table\n");
1373 /* Allocate method table for this management class */
1374 method = &(*class)->method_table[mgmt_class];
1375 if ((ret = allocate_method_table(method)))
1378 method = &(*class)->method_table[mgmt_class];
1380 /* Allocate method table for this management class */
1381 if ((ret = allocate_method_table(method)))
1386 /* Now, make sure methods are not already in use */
1387 if (method_in_use(method, mad_reg_req))
1390 /* Finally, add in methods being registered */
1391 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1392 (*method)->agent[i] = agent_priv;
1397 /* Remove any methods for this mad agent */
1398 remove_methods_mad_agent(*method, agent_priv);
1399 /* Now, check to see if there are any methods in use */
1400 if (!check_method_table(*method)) {
1401 /* If not, release management method table */
1414 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1415 struct ib_mad_agent_private *agent_priv)
1417 struct ib_mad_port_private *port_priv;
1418 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1419 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1420 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1421 struct ib_mad_mgmt_method_table **method;
1422 int i, ret = -ENOMEM;
1425 /* "New" vendor (with OUI) class */
1426 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1427 port_priv = agent_priv->qp_info->port_priv;
1428 vendor_table = &port_priv->version[
1429 mad_reg_req->mgmt_class_version].vendor;
1430 if (!*vendor_table) {
1431 /* Allocate mgmt vendor class table for "new" class version */
1432 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1434 dev_err(&agent_priv->agent.device->dev,
1435 "No memory for ib_mad_mgmt_vendor_class_table\n");
1439 *vendor_table = vendor;
1441 if (!(*vendor_table)->vendor_class[vclass]) {
1442 /* Allocate table for this management vendor class */
1443 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1444 if (!vendor_class) {
1445 dev_err(&agent_priv->agent.device->dev,
1446 "No memory for ib_mad_mgmt_vendor_class\n");
1450 (*vendor_table)->vendor_class[vclass] = vendor_class;
1452 for (i = 0; i < MAX_MGMT_OUI; i++) {
1453 /* Is there matching OUI for this vendor class ? */
1454 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1455 mad_reg_req->oui, 3)) {
1456 method = &(*vendor_table)->vendor_class[
1457 vclass]->method_table[i];
1462 for (i = 0; i < MAX_MGMT_OUI; i++) {
1463 /* OUI slot available ? */
1464 if (!is_vendor_oui((*vendor_table)->vendor_class[
1466 method = &(*vendor_table)->vendor_class[
1467 vclass]->method_table[i];
1469 /* Allocate method table for this OUI */
1470 if ((ret = allocate_method_table(method)))
1472 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1473 mad_reg_req->oui, 3);
1477 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1481 /* Now, make sure methods are not already in use */
1482 if (method_in_use(method, mad_reg_req))
1485 /* Finally, add in methods being registered */
1486 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1487 (*method)->agent[i] = agent_priv;
1492 /* Remove any methods for this mad agent */
1493 remove_methods_mad_agent(*method, agent_priv);
1494 /* Now, check to see if there are any methods in use */
1495 if (!check_method_table(*method)) {
1496 /* If not, release management method table */
1503 (*vendor_table)->vendor_class[vclass] = NULL;
1504 kfree(vendor_class);
1508 *vendor_table = NULL;
1515 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1517 struct ib_mad_port_private *port_priv;
1518 struct ib_mad_mgmt_class_table *class;
1519 struct ib_mad_mgmt_method_table *method;
1520 struct ib_mad_mgmt_vendor_class_table *vendor;
1521 struct ib_mad_mgmt_vendor_class *vendor_class;
1526 * Was MAD registration request supplied
1527 * with original registration ?
1529 if (!agent_priv->reg_req) {
1533 port_priv = agent_priv->qp_info->port_priv;
1534 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1535 class = port_priv->version[
1536 agent_priv->reg_req->mgmt_class_version].class;
1540 method = class->method_table[mgmt_class];
1542 /* Remove any methods for this mad agent */
1543 remove_methods_mad_agent(method, agent_priv);
1544 /* Now, check to see if there are any methods still in use */
1545 if (!check_method_table(method)) {
1546 /* If not, release management method table */
1548 class->method_table[mgmt_class] = NULL;
1549 /* Any management classes left ? */
1550 if (!check_class_table(class)) {
1551 /* If not, release management class table */
1554 agent_priv->reg_req->
1555 mgmt_class_version].class = NULL;
1561 if (!is_vendor_class(mgmt_class))
1564 /* normalize mgmt_class to vendor range 2 */
1565 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1566 vendor = port_priv->version[
1567 agent_priv->reg_req->mgmt_class_version].vendor;
1572 vendor_class = vendor->vendor_class[mgmt_class];
1574 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1577 method = vendor_class->method_table[index];
1579 /* Remove any methods for this mad agent */
1580 remove_methods_mad_agent(method, agent_priv);
1582 * Now, check to see if there are
1583 * any methods still in use
1585 if (!check_method_table(method)) {
1586 /* If not, release management method table */
1588 vendor_class->method_table[index] = NULL;
1589 memset(vendor_class->oui[index], 0, 3);
1590 /* Any OUIs left ? */
1591 if (!check_vendor_class(vendor_class)) {
1592 /* If not, release vendor class table */
1593 kfree(vendor_class);
1594 vendor->vendor_class[mgmt_class] = NULL;
1595 /* Any other vendor classes left ? */
1596 if (!check_vendor_table(vendor)) {
1599 agent_priv->reg_req->
1600 mgmt_class_version].
1612 static struct ib_mad_agent_private *
1613 find_mad_agent(struct ib_mad_port_private *port_priv,
1616 struct ib_mad_agent_private *mad_agent = NULL;
1617 unsigned long flags;
1619 spin_lock_irqsave(&port_priv->reg_lock, flags);
1620 if (ib_response_mad(mad)) {
1622 struct ib_mad_agent_private *entry;
1625 * Routing is based on high 32 bits of transaction ID
1628 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1629 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1630 if (entry->agent.hi_tid == hi_tid) {
1636 struct ib_mad_mgmt_class_table *class;
1637 struct ib_mad_mgmt_method_table *method;
1638 struct ib_mad_mgmt_vendor_class_table *vendor;
1639 struct ib_mad_mgmt_vendor_class *vendor_class;
1640 struct ib_vendor_mad *vendor_mad;
1644 * Routing is based on version, class, and method
1645 * For "newer" vendor MADs, also based on OUI
1647 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1649 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1650 class = port_priv->version[
1651 mad->mad_hdr.class_version].class;
1654 if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >=
1655 IB_MGMT_MAX_METHODS)
1657 method = class->method_table[convert_mgmt_class(
1658 mad->mad_hdr.mgmt_class)];
1660 mad_agent = method->agent[mad->mad_hdr.method &
1661 ~IB_MGMT_METHOD_RESP];
1663 vendor = port_priv->version[
1664 mad->mad_hdr.class_version].vendor;
1667 vendor_class = vendor->vendor_class[vendor_class_index(
1668 mad->mad_hdr.mgmt_class)];
1671 /* Find matching OUI */
1672 vendor_mad = (struct ib_vendor_mad *)mad;
1673 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1676 method = vendor_class->method_table[index];
1678 mad_agent = method->agent[mad->mad_hdr.method &
1679 ~IB_MGMT_METHOD_RESP];
1685 if (mad_agent->agent.recv_handler)
1686 atomic_inc(&mad_agent->refcount);
1688 dev_notice(&port_priv->device->dev,
1689 "No receive handler for client %p on port %d\n",
1690 &mad_agent->agent, port_priv->port_num);
1695 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1700 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1704 /* Make sure MAD base version is understood */
1705 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1706 pr_err("MAD received with unsupported base version %d\n",
1707 mad->mad_hdr.base_version);
1711 /* Filter SMI packets sent to other than QP0 */
1712 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1713 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1717 /* Filter GSI packets sent to QP0 */
1726 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1727 struct ib_mad_hdr *mad_hdr)
1729 struct ib_rmpp_mad *rmpp_mad;
1731 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1732 return !mad_agent_priv->agent.rmpp_version ||
1733 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1734 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1735 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1738 static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1739 struct ib_mad_recv_wc *rwc)
1741 return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1742 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1745 static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1746 struct ib_mad_send_wr_private *wr,
1747 struct ib_mad_recv_wc *rwc )
1749 struct ib_ah_attr attr;
1750 u8 send_resp, rcv_resp;
1752 struct ib_device *device = mad_agent_priv->agent.device;
1753 u8 port_num = mad_agent_priv->agent.port_num;
1756 send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1757 rcv_resp = ib_response_mad(rwc->recv_buf.mad);
1759 if (send_resp == rcv_resp)
1760 /* both requests, or both responses. GIDs different */
1763 if (ib_query_ah(wr->send_buf.ah, &attr))
1764 /* Assume not equal, to avoid false positives. */
1767 if (!!(attr.ah_flags & IB_AH_GRH) !=
1768 !!(rwc->wc->wc_flags & IB_WC_GRH))
1769 /* one has GID, other does not. Assume different */
1772 if (!send_resp && rcv_resp) {
1773 /* is request/response. */
1774 if (!(attr.ah_flags & IB_AH_GRH)) {
1775 if (ib_get_cached_lmc(device, port_num, &lmc))
1777 return (!lmc || !((attr.src_path_bits ^
1778 rwc->wc->dlid_path_bits) &
1781 if (ib_get_cached_gid(device, port_num,
1782 attr.grh.sgid_index, &sgid))
1784 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1789 if (!(attr.ah_flags & IB_AH_GRH))
1790 return attr.dlid == rwc->wc->slid;
1792 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1796 static inline int is_direct(u8 class)
1798 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1801 struct ib_mad_send_wr_private*
1802 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1803 struct ib_mad_recv_wc *wc)
1805 struct ib_mad_send_wr_private *wr;
1808 mad = (struct ib_mad *)wc->recv_buf.mad;
1810 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1811 if ((wr->tid == mad->mad_hdr.tid) &&
1812 rcv_has_same_class(wr, wc) &&
1814 * Don't check GID for direct routed MADs.
1815 * These might have permissive LIDs.
1817 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1818 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1819 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1823 * It's possible to receive the response before we've
1824 * been notified that the send has completed
1826 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1827 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1828 wr->tid == mad->mad_hdr.tid &&
1830 rcv_has_same_class(wr, wc) &&
1832 * Don't check GID for direct routed MADs.
1833 * These might have permissive LIDs.
1835 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1836 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1837 /* Verify request has not been canceled */
1838 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1843 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1845 mad_send_wr->timeout = 0;
1846 if (mad_send_wr->refcount == 1)
1847 list_move_tail(&mad_send_wr->agent_list,
1848 &mad_send_wr->mad_agent_priv->done_list);
1851 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1852 struct ib_mad_recv_wc *mad_recv_wc)
1854 struct ib_mad_send_wr_private *mad_send_wr;
1855 struct ib_mad_send_wc mad_send_wc;
1856 unsigned long flags;
1858 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1859 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1860 if (mad_agent_priv->agent.rmpp_version) {
1861 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1864 deref_mad_agent(mad_agent_priv);
1869 /* Complete corresponding request */
1870 if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1871 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1872 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1874 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1875 ib_free_recv_mad(mad_recv_wc);
1876 deref_mad_agent(mad_agent_priv);
1879 ib_mark_mad_done(mad_send_wr);
1880 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1882 /* Defined behavior is to complete response before request */
1883 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1884 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1886 atomic_dec(&mad_agent_priv->refcount);
1888 mad_send_wc.status = IB_WC_SUCCESS;
1889 mad_send_wc.vendor_err = 0;
1890 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1891 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1893 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1895 deref_mad_agent(mad_agent_priv);
1899 static bool generate_unmatched_resp(struct ib_mad_private *recv,
1900 struct ib_mad_private *response)
1902 if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET ||
1903 recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) {
1904 memcpy(response, recv, sizeof *response);
1905 response->header.recv_wc.wc = &response->header.wc;
1906 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1907 response->header.recv_wc.recv_buf.grh = &response->grh;
1908 response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
1909 response->mad.mad.mad_hdr.status =
1910 cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
1911 if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
1912 response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION;
1919 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1922 struct ib_mad_qp_info *qp_info;
1923 struct ib_mad_private_header *mad_priv_hdr;
1924 struct ib_mad_private *recv, *response = NULL;
1925 struct ib_mad_list_head *mad_list;
1926 struct ib_mad_agent_private *mad_agent;
1928 int ret = IB_MAD_RESULT_SUCCESS;
1930 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1931 qp_info = mad_list->mad_queue->qp_info;
1932 dequeue_mad(mad_list);
1934 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1936 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1937 ib_dma_unmap_single(port_priv->device,
1938 recv->header.mapping,
1939 sizeof(struct ib_mad_private) -
1940 sizeof(struct ib_mad_private_header),
1943 /* Setup MAD receive work completion from "normal" work completion */
1944 recv->header.wc = *wc;
1945 recv->header.recv_wc.wc = &recv->header.wc;
1946 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1947 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1948 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1950 if (atomic_read(&qp_info->snoop_count))
1951 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1954 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1957 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1959 dev_err(&port_priv->device->dev,
1960 "ib_mad_recv_done_handler no memory for response buffer\n");
1964 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1965 port_num = wc->port_num;
1967 port_num = port_priv->port_num;
1969 if (recv->mad.mad.mad_hdr.mgmt_class ==
1970 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1971 enum smi_forward_action retsmi;
1973 if (smi_handle_dr_smp_recv(&recv->mad.smp,
1974 port_priv->device->node_type,
1976 port_priv->device->phys_port_cnt) ==
1980 retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1981 if (retsmi == IB_SMI_LOCAL)
1984 if (retsmi == IB_SMI_SEND) { /* don't forward */
1985 if (smi_handle_dr_smp_send(&recv->mad.smp,
1986 port_priv->device->node_type,
1987 port_num) == IB_SMI_DISCARD)
1990 if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1992 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1993 /* forward case for switches */
1994 memcpy(response, recv, sizeof(*response));
1995 response->header.recv_wc.wc = &response->header.wc;
1996 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1997 response->header.recv_wc.recv_buf.grh = &response->grh;
1999 agent_send_response(&response->mad.mad,
2002 smi_get_fwd_port(&recv->mad.smp),
2003 qp_info->qp->qp_num);
2010 /* Give driver "right of first refusal" on incoming MAD */
2011 if (port_priv->device->process_mad) {
2012 ret = port_priv->device->process_mad(port_priv->device, 0,
2013 port_priv->port_num,
2016 &response->mad.mad);
2017 if (ret & IB_MAD_RESULT_SUCCESS) {
2018 if (ret & IB_MAD_RESULT_CONSUMED)
2020 if (ret & IB_MAD_RESULT_REPLY) {
2021 agent_send_response(&response->mad.mad,
2025 qp_info->qp->qp_num);
2031 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
2033 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2035 * recv is freed up in error cases in ib_mad_complete_recv
2036 * or via recv_handler in ib_mad_complete_recv()
2039 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2040 generate_unmatched_resp(recv, response)) {
2041 agent_send_response(&response->mad.mad, &recv->grh, wc,
2042 port_priv->device, port_num, qp_info->qp->qp_num);
2046 /* Post another receive request for this QP */
2048 ib_mad_post_receive_mads(qp_info, response);
2050 kmem_cache_free(ib_mad_cache, recv);
2052 ib_mad_post_receive_mads(qp_info, recv);
2055 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2057 struct ib_mad_send_wr_private *mad_send_wr;
2058 unsigned long delay;
2060 if (list_empty(&mad_agent_priv->wait_list)) {
2061 cancel_delayed_work(&mad_agent_priv->timed_work);
2063 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2064 struct ib_mad_send_wr_private,
2067 if (time_after(mad_agent_priv->timeout,
2068 mad_send_wr->timeout)) {
2069 mad_agent_priv->timeout = mad_send_wr->timeout;
2070 delay = mad_send_wr->timeout - jiffies;
2071 if ((long)delay <= 0)
2073 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2074 &mad_agent_priv->timed_work, delay);
2079 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2081 struct ib_mad_agent_private *mad_agent_priv;
2082 struct ib_mad_send_wr_private *temp_mad_send_wr;
2083 struct list_head *list_item;
2084 unsigned long delay;
2086 mad_agent_priv = mad_send_wr->mad_agent_priv;
2087 list_del(&mad_send_wr->agent_list);
2089 delay = mad_send_wr->timeout;
2090 mad_send_wr->timeout += jiffies;
2093 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2094 temp_mad_send_wr = list_entry(list_item,
2095 struct ib_mad_send_wr_private,
2097 if (time_after(mad_send_wr->timeout,
2098 temp_mad_send_wr->timeout))
2103 list_item = &mad_agent_priv->wait_list;
2104 list_add(&mad_send_wr->agent_list, list_item);
2106 /* Reschedule a work item if we have a shorter timeout */
2107 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2108 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2109 &mad_agent_priv->timed_work, delay);
2112 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2115 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2116 wait_for_response(mad_send_wr);
2120 * Process a send work completion
2122 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2123 struct ib_mad_send_wc *mad_send_wc)
2125 struct ib_mad_agent_private *mad_agent_priv;
2126 unsigned long flags;
2129 mad_agent_priv = mad_send_wr->mad_agent_priv;
2130 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2131 if (mad_agent_priv->agent.rmpp_version) {
2132 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2133 if (ret == IB_RMPP_RESULT_CONSUMED)
2136 ret = IB_RMPP_RESULT_UNHANDLED;
2138 if (mad_send_wc->status != IB_WC_SUCCESS &&
2139 mad_send_wr->status == IB_WC_SUCCESS) {
2140 mad_send_wr->status = mad_send_wc->status;
2141 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2144 if (--mad_send_wr->refcount > 0) {
2145 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2146 mad_send_wr->status == IB_WC_SUCCESS) {
2147 wait_for_response(mad_send_wr);
2152 /* Remove send from MAD agent and notify client of completion */
2153 list_del(&mad_send_wr->agent_list);
2154 adjust_timeout(mad_agent_priv);
2155 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2157 if (mad_send_wr->status != IB_WC_SUCCESS )
2158 mad_send_wc->status = mad_send_wr->status;
2159 if (ret == IB_RMPP_RESULT_INTERNAL)
2160 ib_rmpp_send_handler(mad_send_wc);
2162 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2165 /* Release reference on agent taken when sending */
2166 deref_mad_agent(mad_agent_priv);
2169 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2172 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2175 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2176 struct ib_mad_list_head *mad_list;
2177 struct ib_mad_qp_info *qp_info;
2178 struct ib_mad_queue *send_queue;
2179 struct ib_send_wr *bad_send_wr;
2180 struct ib_mad_send_wc mad_send_wc;
2181 unsigned long flags;
2184 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2185 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2187 send_queue = mad_list->mad_queue;
2188 qp_info = send_queue->qp_info;
2191 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2192 mad_send_wr->header_mapping,
2193 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2194 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2195 mad_send_wr->payload_mapping,
2196 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2197 queued_send_wr = NULL;
2198 spin_lock_irqsave(&send_queue->lock, flags);
2199 list_del(&mad_list->list);
2201 /* Move queued send to the send queue */
2202 if (send_queue->count-- > send_queue->max_active) {
2203 mad_list = container_of(qp_info->overflow_list.next,
2204 struct ib_mad_list_head, list);
2205 queued_send_wr = container_of(mad_list,
2206 struct ib_mad_send_wr_private,
2208 list_move_tail(&mad_list->list, &send_queue->list);
2210 spin_unlock_irqrestore(&send_queue->lock, flags);
2212 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2213 mad_send_wc.status = wc->status;
2214 mad_send_wc.vendor_err = wc->vendor_err;
2215 if (atomic_read(&qp_info->snoop_count))
2216 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2217 IB_MAD_SNOOP_SEND_COMPLETIONS);
2218 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2220 if (queued_send_wr) {
2221 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2224 dev_err(&port_priv->device->dev,
2225 "ib_post_send failed: %d\n", ret);
2226 mad_send_wr = queued_send_wr;
2227 wc->status = IB_WC_LOC_QP_OP_ERR;
2233 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2235 struct ib_mad_send_wr_private *mad_send_wr;
2236 struct ib_mad_list_head *mad_list;
2237 unsigned long flags;
2239 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2240 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2241 mad_send_wr = container_of(mad_list,
2242 struct ib_mad_send_wr_private,
2244 mad_send_wr->retry = 1;
2246 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2249 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2252 struct ib_mad_list_head *mad_list;
2253 struct ib_mad_qp_info *qp_info;
2254 struct ib_mad_send_wr_private *mad_send_wr;
2257 /* Determine if failure was a send or receive */
2258 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2259 qp_info = mad_list->mad_queue->qp_info;
2260 if (mad_list->mad_queue == &qp_info->recv_queue)
2262 * Receive errors indicate that the QP has entered the error
2263 * state - error handling/shutdown code will cleanup
2268 * Send errors will transition the QP to SQE - move
2269 * QP to RTS and repost flushed work requests
2271 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2273 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2274 if (mad_send_wr->retry) {
2276 struct ib_send_wr *bad_send_wr;
2278 mad_send_wr->retry = 0;
2279 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2282 ib_mad_send_done_handler(port_priv, wc);
2284 ib_mad_send_done_handler(port_priv, wc);
2286 struct ib_qp_attr *attr;
2288 /* Transition QP to RTS and fail offending send */
2289 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2291 attr->qp_state = IB_QPS_RTS;
2292 attr->cur_qp_state = IB_QPS_SQE;
2293 ret = ib_modify_qp(qp_info->qp, attr,
2294 IB_QP_STATE | IB_QP_CUR_STATE);
2297 dev_err(&port_priv->device->dev,
2298 "mad_error_handler - ib_modify_qp to RTS : %d\n",
2301 mark_sends_for_retry(qp_info);
2303 ib_mad_send_done_handler(port_priv, wc);
2308 * IB MAD completion callback
2310 static void ib_mad_completion_handler(struct work_struct *work)
2312 struct ib_mad_port_private *port_priv;
2315 port_priv = container_of(work, struct ib_mad_port_private, work);
2316 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2318 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2319 if (wc.status == IB_WC_SUCCESS) {
2320 switch (wc.opcode) {
2322 ib_mad_send_done_handler(port_priv, &wc);
2325 ib_mad_recv_done_handler(port_priv, &wc);
2332 mad_error_handler(port_priv, &wc);
2336 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2338 unsigned long flags;
2339 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2340 struct ib_mad_send_wc mad_send_wc;
2341 struct list_head cancel_list;
2343 INIT_LIST_HEAD(&cancel_list);
2345 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2346 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2347 &mad_agent_priv->send_list, agent_list) {
2348 if (mad_send_wr->status == IB_WC_SUCCESS) {
2349 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2350 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2354 /* Empty wait list to prevent receives from finding a request */
2355 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2356 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2358 /* Report all cancelled requests */
2359 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2360 mad_send_wc.vendor_err = 0;
2362 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2363 &cancel_list, agent_list) {
2364 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2365 list_del(&mad_send_wr->agent_list);
2366 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2368 atomic_dec(&mad_agent_priv->refcount);
2372 static struct ib_mad_send_wr_private*
2373 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2374 struct ib_mad_send_buf *send_buf)
2376 struct ib_mad_send_wr_private *mad_send_wr;
2378 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2380 if (&mad_send_wr->send_buf == send_buf)
2384 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2386 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2387 &mad_send_wr->send_buf == send_buf)
2393 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2394 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2396 struct ib_mad_agent_private *mad_agent_priv;
2397 struct ib_mad_send_wr_private *mad_send_wr;
2398 unsigned long flags;
2401 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2403 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2404 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2405 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2406 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2410 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2412 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2413 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2416 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2418 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2420 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2422 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2425 EXPORT_SYMBOL(ib_modify_mad);
2427 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2428 struct ib_mad_send_buf *send_buf)
2430 ib_modify_mad(mad_agent, send_buf, 0);
2432 EXPORT_SYMBOL(ib_cancel_mad);
2434 static void local_completions(struct work_struct *work)
2436 struct ib_mad_agent_private *mad_agent_priv;
2437 struct ib_mad_local_private *local;
2438 struct ib_mad_agent_private *recv_mad_agent;
2439 unsigned long flags;
2442 struct ib_mad_send_wc mad_send_wc;
2445 container_of(work, struct ib_mad_agent_private, local_work);
2447 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2448 while (!list_empty(&mad_agent_priv->local_list)) {
2449 local = list_entry(mad_agent_priv->local_list.next,
2450 struct ib_mad_local_private,
2452 list_del(&local->completion_list);
2453 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2455 if (local->mad_priv) {
2456 recv_mad_agent = local->recv_mad_agent;
2457 if (!recv_mad_agent) {
2458 dev_err(&mad_agent_priv->agent.device->dev,
2459 "No receive MAD agent for local completion\n");
2461 goto local_send_completion;
2465 * Defined behavior is to complete response
2468 build_smp_wc(recv_mad_agent->agent.qp,
2469 (unsigned long) local->mad_send_wr,
2470 be16_to_cpu(IB_LID_PERMISSIVE),
2471 0, recv_mad_agent->agent.port_num, &wc);
2473 local->mad_priv->header.recv_wc.wc = &wc;
2474 local->mad_priv->header.recv_wc.mad_len =
2475 sizeof(struct ib_mad);
2476 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2477 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2478 &local->mad_priv->header.recv_wc.rmpp_list);
2479 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2480 local->mad_priv->header.recv_wc.recv_buf.mad =
2481 &local->mad_priv->mad.mad;
2482 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2483 snoop_recv(recv_mad_agent->qp_info,
2484 &local->mad_priv->header.recv_wc,
2485 IB_MAD_SNOOP_RECVS);
2486 recv_mad_agent->agent.recv_handler(
2487 &recv_mad_agent->agent,
2488 &local->mad_priv->header.recv_wc);
2489 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2490 atomic_dec(&recv_mad_agent->refcount);
2491 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2494 local_send_completion:
2496 mad_send_wc.status = IB_WC_SUCCESS;
2497 mad_send_wc.vendor_err = 0;
2498 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2499 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2500 snoop_send(mad_agent_priv->qp_info,
2501 &local->mad_send_wr->send_buf,
2502 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2503 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2506 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2507 atomic_dec(&mad_agent_priv->refcount);
2509 kmem_cache_free(ib_mad_cache, local->mad_priv);
2512 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2515 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2519 if (!mad_send_wr->retries_left)
2522 mad_send_wr->retries_left--;
2523 mad_send_wr->send_buf.retries++;
2525 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2527 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2528 ret = ib_retry_rmpp(mad_send_wr);
2530 case IB_RMPP_RESULT_UNHANDLED:
2531 ret = ib_send_mad(mad_send_wr);
2533 case IB_RMPP_RESULT_CONSUMED:
2541 ret = ib_send_mad(mad_send_wr);
2544 mad_send_wr->refcount++;
2545 list_add_tail(&mad_send_wr->agent_list,
2546 &mad_send_wr->mad_agent_priv->send_list);
2551 static void timeout_sends(struct work_struct *work)
2553 struct ib_mad_agent_private *mad_agent_priv;
2554 struct ib_mad_send_wr_private *mad_send_wr;
2555 struct ib_mad_send_wc mad_send_wc;
2556 unsigned long flags, delay;
2558 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2560 mad_send_wc.vendor_err = 0;
2562 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2563 while (!list_empty(&mad_agent_priv->wait_list)) {
2564 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2565 struct ib_mad_send_wr_private,
2568 if (time_after(mad_send_wr->timeout, jiffies)) {
2569 delay = mad_send_wr->timeout - jiffies;
2570 if ((long)delay <= 0)
2572 queue_delayed_work(mad_agent_priv->qp_info->
2574 &mad_agent_priv->timed_work, delay);
2578 list_del(&mad_send_wr->agent_list);
2579 if (mad_send_wr->status == IB_WC_SUCCESS &&
2580 !retry_send(mad_send_wr))
2583 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2585 if (mad_send_wr->status == IB_WC_SUCCESS)
2586 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2588 mad_send_wc.status = mad_send_wr->status;
2589 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2590 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2593 atomic_dec(&mad_agent_priv->refcount);
2594 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2596 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2599 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2601 struct ib_mad_port_private *port_priv = cq->cq_context;
2602 unsigned long flags;
2604 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2605 if (!list_empty(&port_priv->port_list))
2606 queue_work(port_priv->wq, &port_priv->work);
2607 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2611 * Allocate receive MADs and post receive WRs for them
2613 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2614 struct ib_mad_private *mad)
2616 unsigned long flags;
2618 struct ib_mad_private *mad_priv;
2619 struct ib_sge sg_list;
2620 struct ib_recv_wr recv_wr, *bad_recv_wr;
2621 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2623 /* Initialize common scatter list fields */
2624 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2625 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2627 /* Initialize common receive WR fields */
2628 recv_wr.next = NULL;
2629 recv_wr.sg_list = &sg_list;
2630 recv_wr.num_sge = 1;
2633 /* Allocate and map receive buffer */
2638 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2640 dev_err(&qp_info->port_priv->device->dev,
2641 "No memory for receive buffer\n");
2646 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2649 sizeof mad_priv->header,
2651 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2656 mad_priv->header.mapping = sg_list.addr;
2657 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2658 mad_priv->header.mad_list.mad_queue = recv_queue;
2660 /* Post receive WR */
2661 spin_lock_irqsave(&recv_queue->lock, flags);
2662 post = (++recv_queue->count < recv_queue->max_active);
2663 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2664 spin_unlock_irqrestore(&recv_queue->lock, flags);
2665 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2667 spin_lock_irqsave(&recv_queue->lock, flags);
2668 list_del(&mad_priv->header.mad_list.list);
2669 recv_queue->count--;
2670 spin_unlock_irqrestore(&recv_queue->lock, flags);
2671 ib_dma_unmap_single(qp_info->port_priv->device,
2672 mad_priv->header.mapping,
2674 sizeof mad_priv->header,
2676 kmem_cache_free(ib_mad_cache, mad_priv);
2677 dev_err(&qp_info->port_priv->device->dev,
2678 "ib_post_recv failed: %d\n", ret);
2687 * Return all the posted receive MADs
2689 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2691 struct ib_mad_private_header *mad_priv_hdr;
2692 struct ib_mad_private *recv;
2693 struct ib_mad_list_head *mad_list;
2698 while (!list_empty(&qp_info->recv_queue.list)) {
2700 mad_list = list_entry(qp_info->recv_queue.list.next,
2701 struct ib_mad_list_head, list);
2702 mad_priv_hdr = container_of(mad_list,
2703 struct ib_mad_private_header,
2705 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2708 /* Remove from posted receive MAD list */
2709 list_del(&mad_list->list);
2711 ib_dma_unmap_single(qp_info->port_priv->device,
2712 recv->header.mapping,
2713 sizeof(struct ib_mad_private) -
2714 sizeof(struct ib_mad_private_header),
2716 kmem_cache_free(ib_mad_cache, recv);
2719 qp_info->recv_queue.count = 0;
2725 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2728 struct ib_qp_attr *attr;
2732 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2734 dev_err(&port_priv->device->dev,
2735 "Couldn't kmalloc ib_qp_attr\n");
2739 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2740 IB_DEFAULT_PKEY_FULL, &pkey_index);
2744 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2745 qp = port_priv->qp_info[i].qp;
2750 * PKey index for QP1 is irrelevant but
2751 * one is needed for the Reset to Init transition
2753 attr->qp_state = IB_QPS_INIT;
2754 attr->pkey_index = pkey_index;
2755 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2756 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2757 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2759 dev_err(&port_priv->device->dev,
2760 "Couldn't change QP%d state to INIT: %d\n",
2765 attr->qp_state = IB_QPS_RTR;
2766 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2768 dev_err(&port_priv->device->dev,
2769 "Couldn't change QP%d state to RTR: %d\n",
2774 attr->qp_state = IB_QPS_RTS;
2775 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2776 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2778 dev_err(&port_priv->device->dev,
2779 "Couldn't change QP%d state to RTS: %d\n",
2785 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2787 dev_err(&port_priv->device->dev,
2788 "Failed to request completion notification: %d\n",
2793 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2794 if (!port_priv->qp_info[i].qp)
2797 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2799 dev_err(&port_priv->device->dev,
2800 "Couldn't post receive WRs\n");
2809 static void qp_event_handler(struct ib_event *event, void *qp_context)
2811 struct ib_mad_qp_info *qp_info = qp_context;
2813 /* It's worse than that! He's dead, Jim! */
2814 dev_err(&qp_info->port_priv->device->dev,
2815 "Fatal error (%d) on MAD QP (%d)\n",
2816 event->event, qp_info->qp->qp_num);
2819 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2820 struct ib_mad_queue *mad_queue)
2822 mad_queue->qp_info = qp_info;
2823 mad_queue->count = 0;
2824 spin_lock_init(&mad_queue->lock);
2825 INIT_LIST_HEAD(&mad_queue->list);
2828 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2829 struct ib_mad_qp_info *qp_info)
2831 qp_info->port_priv = port_priv;
2832 init_mad_queue(qp_info, &qp_info->send_queue);
2833 init_mad_queue(qp_info, &qp_info->recv_queue);
2834 INIT_LIST_HEAD(&qp_info->overflow_list);
2835 spin_lock_init(&qp_info->snoop_lock);
2836 qp_info->snoop_table = NULL;
2837 qp_info->snoop_table_size = 0;
2838 atomic_set(&qp_info->snoop_count, 0);
2841 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2842 enum ib_qp_type qp_type)
2844 struct ib_qp_init_attr qp_init_attr;
2847 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2848 qp_init_attr.send_cq = qp_info->port_priv->cq;
2849 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2850 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2851 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2852 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2853 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2854 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2855 qp_init_attr.qp_type = qp_type;
2856 qp_init_attr.port_num = qp_info->port_priv->port_num;
2857 qp_init_attr.qp_context = qp_info;
2858 qp_init_attr.event_handler = qp_event_handler;
2859 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2860 if (IS_ERR(qp_info->qp)) {
2861 dev_err(&qp_info->port_priv->device->dev,
2862 "Couldn't create ib_mad QP%d\n",
2863 get_spl_qp_index(qp_type));
2864 ret = PTR_ERR(qp_info->qp);
2867 /* Use minimum queue sizes unless the CQ is resized */
2868 qp_info->send_queue.max_active = mad_sendq_size;
2869 qp_info->recv_queue.max_active = mad_recvq_size;
2876 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2881 ib_destroy_qp(qp_info->qp);
2882 kfree(qp_info->snoop_table);
2887 * Create the QP, PD, MR, and CQ if needed
2889 static int ib_mad_port_open(struct ib_device *device,
2893 struct ib_mad_port_private *port_priv;
2894 unsigned long flags;
2895 char name[sizeof "ib_mad123"];
2898 /* Create new device info */
2899 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2901 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
2905 port_priv->device = device;
2906 port_priv->port_num = port_num;
2907 spin_lock_init(&port_priv->reg_lock);
2908 INIT_LIST_HEAD(&port_priv->agent_list);
2909 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2910 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2912 cq_size = mad_sendq_size + mad_recvq_size;
2913 has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
2917 port_priv->cq = ib_create_cq(port_priv->device,
2918 ib_mad_thread_completion_handler,
2919 NULL, port_priv, cq_size, 0);
2920 if (IS_ERR(port_priv->cq)) {
2921 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
2922 ret = PTR_ERR(port_priv->cq);
2926 port_priv->pd = ib_alloc_pd(device);
2927 if (IS_ERR(port_priv->pd)) {
2928 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
2929 ret = PTR_ERR(port_priv->pd);
2933 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2934 if (IS_ERR(port_priv->mr)) {
2935 dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n");
2936 ret = PTR_ERR(port_priv->mr);
2941 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2945 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2949 snprintf(name, sizeof name, "ib_mad%d", port_num);
2950 port_priv->wq = create_singlethread_workqueue(name);
2951 if (!port_priv->wq) {
2955 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2957 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2958 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2959 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2961 ret = ib_mad_port_start(port_priv);
2963 dev_err(&device->dev, "Couldn't start port\n");
2970 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2971 list_del_init(&port_priv->port_list);
2972 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2974 destroy_workqueue(port_priv->wq);
2976 destroy_mad_qp(&port_priv->qp_info[1]);
2978 destroy_mad_qp(&port_priv->qp_info[0]);
2980 ib_dereg_mr(port_priv->mr);
2982 ib_dealloc_pd(port_priv->pd);
2984 ib_destroy_cq(port_priv->cq);
2985 cleanup_recv_queue(&port_priv->qp_info[1]);
2986 cleanup_recv_queue(&port_priv->qp_info[0]);
2995 * If there are no classes using the port, free the port
2996 * resources (CQ, MR, PD, QP) and remove the port's info structure
2998 static int ib_mad_port_close(struct ib_device *device, int port_num)
3000 struct ib_mad_port_private *port_priv;
3001 unsigned long flags;
3003 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3004 port_priv = __ib_get_mad_port(device, port_num);
3005 if (port_priv == NULL) {
3006 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3007 dev_err(&device->dev, "Port %d not found\n", port_num);
3010 list_del_init(&port_priv->port_list);
3011 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3013 destroy_workqueue(port_priv->wq);
3014 destroy_mad_qp(&port_priv->qp_info[1]);
3015 destroy_mad_qp(&port_priv->qp_info[0]);
3016 ib_dereg_mr(port_priv->mr);
3017 ib_dealloc_pd(port_priv->pd);
3018 ib_destroy_cq(port_priv->cq);
3019 cleanup_recv_queue(&port_priv->qp_info[1]);
3020 cleanup_recv_queue(&port_priv->qp_info[0]);
3021 /* XXX: Handle deallocation of MAD registration tables */
3028 static void ib_mad_init_device(struct ib_device *device)
3032 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
3035 if (device->node_type == RDMA_NODE_IB_SWITCH) {
3040 end = device->phys_port_cnt;
3043 for (i = start; i <= end; i++) {
3044 if (ib_mad_port_open(device, i)) {
3045 dev_err(&device->dev, "Couldn't open port %d\n", i);
3048 if (ib_agent_port_open(device, i)) {
3049 dev_err(&device->dev,
3050 "Couldn't open port %d for agents\n", i);
3057 if (ib_mad_port_close(device, i))
3058 dev_err(&device->dev, "Couldn't close port %d\n", i);
3063 while (i >= start) {
3064 if (ib_agent_port_close(device, i))
3065 dev_err(&device->dev,
3066 "Couldn't close port %d for agents\n", i);
3067 if (ib_mad_port_close(device, i))
3068 dev_err(&device->dev, "Couldn't close port %d\n", i);
3073 static void ib_mad_remove_device(struct ib_device *device)
3075 int i, num_ports, cur_port;
3077 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
3080 if (device->node_type == RDMA_NODE_IB_SWITCH) {
3084 num_ports = device->phys_port_cnt;
3087 for (i = 0; i < num_ports; i++, cur_port++) {
3088 if (ib_agent_port_close(device, cur_port))
3089 dev_err(&device->dev,
3090 "Couldn't close port %d for agents\n",
3092 if (ib_mad_port_close(device, cur_port))
3093 dev_err(&device->dev, "Couldn't close port %d\n",
3098 static struct ib_client mad_client = {
3100 .add = ib_mad_init_device,
3101 .remove = ib_mad_remove_device
3104 static int __init ib_mad_init_module(void)
3108 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3109 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3111 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3112 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3114 ib_mad_cache = kmem_cache_create("ib_mad",
3115 sizeof(struct ib_mad_private),
3119 if (!ib_mad_cache) {
3120 pr_err("Couldn't create ib_mad cache\n");
3125 INIT_LIST_HEAD(&ib_mad_port_list);
3127 if (ib_register_client(&mad_client)) {
3128 pr_err("Couldn't register ib_mad client\n");
3136 kmem_cache_destroy(ib_mad_cache);
3141 static void __exit ib_mad_cleanup_module(void)
3143 ib_unregister_client(&mad_client);
3144 kmem_cache_destroy(ib_mad_cache);
3147 module_init(ib_mad_init_module);
3148 module_exit(ib_mad_cleanup_module);