2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2014,2018 Intel Corporation. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/security.h>
44 #include <linux/xarray.h>
45 #include <rdma/ib_cache.h>
48 #include "core_priv.h"
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/ib_mad.h>
57 #ifdef CONFIG_TRACEPOINTS
58 static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
59 struct ib_mad_qp_info *qp_info,
60 struct trace_event_raw_ib_mad_send_template *entry)
62 struct ib_ud_wr *wr = &mad_send_wr->send_wr;
63 struct rdma_ah_attr attr = {};
65 rdma_query_ah(wr->ah, &attr);
67 /* These are common */
69 entry->rqpn = wr->remote_qpn;
70 entry->rqkey = wr->remote_qkey;
71 entry->dlid = rdma_ah_get_dlid(&attr);
75 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
76 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
78 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
79 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
80 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
81 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
83 static DEFINE_XARRAY_ALLOC1(ib_mad_clients);
84 static u32 ib_mad_client_next;
85 static struct list_head ib_mad_port_list;
88 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
90 /* Forward declarations */
91 static int method_in_use(struct ib_mad_mgmt_method_table **method,
92 struct ib_mad_reg_req *mad_reg_req);
93 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
94 static struct ib_mad_agent_private *find_mad_agent(
95 struct ib_mad_port_private *port_priv,
96 const struct ib_mad_hdr *mad);
97 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
98 struct ib_mad_private *mad);
99 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
100 static void timeout_sends(struct work_struct *work);
101 static void local_completions(struct work_struct *work);
102 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
103 struct ib_mad_agent_private *agent_priv,
105 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
106 struct ib_mad_agent_private *agent_priv);
107 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
109 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
112 * Returns a ib_mad_port_private structure or NULL for a device/port
113 * Assumes ib_mad_port_list_lock is being held
115 static inline struct ib_mad_port_private *
116 __ib_get_mad_port(struct ib_device *device, u32 port_num)
118 struct ib_mad_port_private *entry;
120 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
121 if (entry->device == device && entry->port_num == port_num)
128 * Wrapper function to return a ib_mad_port_private structure or NULL
131 static inline struct ib_mad_port_private *
132 ib_get_mad_port(struct ib_device *device, u32 port_num)
134 struct ib_mad_port_private *entry;
137 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
138 entry = __ib_get_mad_port(device, port_num);
139 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
144 static inline u8 convert_mgmt_class(u8 mgmt_class)
146 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
147 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
151 static int get_spl_qp_index(enum ib_qp_type qp_type)
163 static int vendor_class_index(u8 mgmt_class)
165 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
168 static int is_vendor_class(u8 mgmt_class)
170 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
171 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
176 static int is_vendor_oui(char *oui)
178 if (oui[0] || oui[1] || oui[2])
183 static int is_vendor_method_in_use(
184 struct ib_mad_mgmt_vendor_class *vendor_class,
185 struct ib_mad_reg_req *mad_reg_req)
187 struct ib_mad_mgmt_method_table *method;
190 for (i = 0; i < MAX_MGMT_OUI; i++) {
191 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
192 method = vendor_class->method_table[i];
194 if (method_in_use(&method, mad_reg_req))
204 int ib_response_mad(const struct ib_mad_hdr *hdr)
206 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
207 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
208 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
209 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
211 EXPORT_SYMBOL(ib_response_mad);
213 #define SOL_FC_MAX_DEFAULT_FRAC 4
214 #define SOL_FC_MAX_SA_FRAC 32
216 static int get_sol_fc_max_outstanding(struct ib_mad_reg_req *mad_reg_req)
219 /* Send only agent */
220 return mad_recvq_size / SOL_FC_MAX_DEFAULT_FRAC;
222 switch (mad_reg_req->mgmt_class) {
223 case IB_MGMT_CLASS_CM:
224 return mad_recvq_size / SOL_FC_MAX_DEFAULT_FRAC;
225 case IB_MGMT_CLASS_SUBN_ADM:
226 return mad_recvq_size / SOL_FC_MAX_SA_FRAC;
227 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
228 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
229 return min(mad_recvq_size, IB_MAD_QP_RECV_SIZE) /
230 SOL_FC_MAX_DEFAULT_FRAC;
237 * ib_register_mad_agent - Register to send/receive MADs
239 * Context: Process context.
241 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
243 enum ib_qp_type qp_type,
244 struct ib_mad_reg_req *mad_reg_req,
246 ib_mad_send_handler send_handler,
247 ib_mad_recv_handler recv_handler,
249 u32 registration_flags)
251 struct ib_mad_port_private *port_priv;
252 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
253 struct ib_mad_agent_private *mad_agent_priv;
254 struct ib_mad_reg_req *reg_req = NULL;
255 struct ib_mad_mgmt_class_table *class;
256 struct ib_mad_mgmt_vendor_class_table *vendor;
257 struct ib_mad_mgmt_vendor_class *vendor_class;
258 struct ib_mad_mgmt_method_table *method;
260 u8 mgmt_class, vclass;
262 if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) ||
263 (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num)))
264 return ERR_PTR(-EPROTONOSUPPORT);
266 /* Validate parameters */
267 qpn = get_spl_qp_index(qp_type);
269 dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n",
274 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
275 dev_dbg_ratelimited(&device->dev,
276 "%s: invalid RMPP Version %u\n",
277 __func__, rmpp_version);
281 /* Validate MAD registration request if supplied */
283 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
284 dev_dbg_ratelimited(&device->dev,
285 "%s: invalid Class Version %u\n",
287 mad_reg_req->mgmt_class_version);
291 dev_dbg_ratelimited(&device->dev,
292 "%s: no recv_handler\n", __func__);
295 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
297 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
298 * one in this range currently allowed
300 if (mad_reg_req->mgmt_class !=
301 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
302 dev_dbg_ratelimited(&device->dev,
303 "%s: Invalid Mgmt Class 0x%x\n",
304 __func__, mad_reg_req->mgmt_class);
307 } else if (mad_reg_req->mgmt_class == 0) {
309 * Class 0 is reserved in IBA and is used for
310 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
312 dev_dbg_ratelimited(&device->dev,
313 "%s: Invalid Mgmt Class 0\n",
316 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
318 * If class is in "new" vendor range,
319 * ensure supplied OUI is not zero
321 if (!is_vendor_oui(mad_reg_req->oui)) {
322 dev_dbg_ratelimited(&device->dev,
323 "%s: No OUI specified for class 0x%x\n",
325 mad_reg_req->mgmt_class);
329 /* Make sure class supplied is consistent with RMPP */
330 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
332 dev_dbg_ratelimited(&device->dev,
333 "%s: RMPP version for non-RMPP class 0x%x\n",
334 __func__, mad_reg_req->mgmt_class);
339 /* Make sure class supplied is consistent with QP type */
340 if (qp_type == IB_QPT_SMI) {
341 if ((mad_reg_req->mgmt_class !=
342 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
343 (mad_reg_req->mgmt_class !=
344 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
345 dev_dbg_ratelimited(&device->dev,
346 "%s: Invalid SM QP type: class 0x%x\n",
347 __func__, mad_reg_req->mgmt_class);
351 if ((mad_reg_req->mgmt_class ==
352 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
353 (mad_reg_req->mgmt_class ==
354 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
355 dev_dbg_ratelimited(&device->dev,
356 "%s: Invalid GS QP type: class 0x%x\n",
357 __func__, mad_reg_req->mgmt_class);
362 /* No registration request supplied */
365 if (registration_flags & IB_MAD_USER_RMPP)
369 /* Validate device and port */
370 port_priv = ib_get_mad_port(device, port_num);
372 dev_dbg_ratelimited(&device->dev, "%s: Invalid port %u\n",
374 ret = ERR_PTR(-ENODEV);
378 /* Verify the QP requested is supported. For example, Ethernet devices
381 if (!port_priv->qp_info[qpn].qp) {
382 dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n",
384 ret = ERR_PTR(-EPROTONOSUPPORT);
388 /* Allocate structures */
389 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
390 if (!mad_agent_priv) {
391 ret = ERR_PTR(-ENOMEM);
396 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
398 ret = ERR_PTR(-ENOMEM);
403 /* Now, fill in the various structures */
404 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
405 mad_agent_priv->reg_req = reg_req;
406 mad_agent_priv->agent.rmpp_version = rmpp_version;
407 mad_agent_priv->agent.device = device;
408 mad_agent_priv->agent.recv_handler = recv_handler;
409 mad_agent_priv->agent.send_handler = send_handler;
410 mad_agent_priv->agent.context = context;
411 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
412 mad_agent_priv->agent.port_num = port_num;
413 mad_agent_priv->agent.flags = registration_flags;
414 spin_lock_init(&mad_agent_priv->lock);
415 INIT_LIST_HEAD(&mad_agent_priv->send_list);
416 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
417 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
418 INIT_LIST_HEAD(&mad_agent_priv->backlog_list);
419 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
420 INIT_LIST_HEAD(&mad_agent_priv->local_list);
421 INIT_WORK(&mad_agent_priv->local_work, local_completions);
422 refcount_set(&mad_agent_priv->refcount, 1);
423 init_completion(&mad_agent_priv->comp);
424 mad_agent_priv->sol_fc_send_count = 0;
425 mad_agent_priv->sol_fc_wait_count = 0;
426 mad_agent_priv->sol_fc_max =
427 recv_handler ? get_sol_fc_max_outstanding(mad_reg_req) : 0;
429 ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
436 * The mlx4 driver uses the top byte to distinguish which virtual
437 * function generated the MAD, so we must avoid using it.
439 ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid,
440 mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1),
441 &ib_mad_client_next, GFP_KERNEL);
448 * Make sure MAD registration (if supplied)
449 * is non overlapping with any existing ones
451 spin_lock_irq(&port_priv->reg_lock);
453 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
454 if (!is_vendor_class(mgmt_class)) {
455 class = port_priv->version[mad_reg_req->
456 mgmt_class_version].class;
458 method = class->method_table[mgmt_class];
460 if (method_in_use(&method,
465 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
468 /* "New" vendor class range */
469 vendor = port_priv->version[mad_reg_req->
470 mgmt_class_version].vendor;
472 vclass = vendor_class_index(mgmt_class);
473 vendor_class = vendor->vendor_class[vclass];
475 if (is_vendor_method_in_use(
481 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
488 spin_unlock_irq(&port_priv->reg_lock);
490 trace_ib_mad_create_agent(mad_agent_priv);
491 return &mad_agent_priv->agent;
493 spin_unlock_irq(&port_priv->reg_lock);
494 xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
496 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
500 kfree(mad_agent_priv);
504 EXPORT_SYMBOL(ib_register_mad_agent);
506 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
508 if (refcount_dec_and_test(&mad_agent_priv->refcount))
509 complete(&mad_agent_priv->comp);
512 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
514 struct ib_mad_port_private *port_priv;
516 /* Note that we could still be handling received MADs */
517 trace_ib_mad_unregister_agent(mad_agent_priv);
520 * Canceling all sends results in dropping received response
521 * MADs, preventing us from queuing additional work
523 cancel_mads(mad_agent_priv);
524 port_priv = mad_agent_priv->qp_info->port_priv;
525 cancel_delayed_work(&mad_agent_priv->timed_work);
527 spin_lock_irq(&port_priv->reg_lock);
528 remove_mad_reg_req(mad_agent_priv);
529 spin_unlock_irq(&port_priv->reg_lock);
530 xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
532 flush_workqueue(port_priv->wq);
534 deref_mad_agent(mad_agent_priv);
535 wait_for_completion(&mad_agent_priv->comp);
536 ib_cancel_rmpp_recvs(mad_agent_priv);
538 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
540 kfree(mad_agent_priv->reg_req);
541 kfree_rcu(mad_agent_priv, rcu);
545 * ib_unregister_mad_agent - Unregisters a client from using MAD services
547 * Context: Process context.
549 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
551 struct ib_mad_agent_private *mad_agent_priv;
553 mad_agent_priv = container_of(mad_agent,
554 struct ib_mad_agent_private,
556 unregister_mad_agent(mad_agent_priv);
558 EXPORT_SYMBOL(ib_unregister_mad_agent);
560 static void dequeue_mad(struct ib_mad_list_head *mad_list)
562 struct ib_mad_queue *mad_queue;
565 mad_queue = mad_list->mad_queue;
566 spin_lock_irqsave(&mad_queue->lock, flags);
567 list_del(&mad_list->list);
569 spin_unlock_irqrestore(&mad_queue->lock, flags);
572 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
573 u16 pkey_index, u32 port_num, struct ib_wc *wc)
575 memset(wc, 0, sizeof *wc);
577 wc->status = IB_WC_SUCCESS;
578 wc->opcode = IB_WC_RECV;
579 wc->pkey_index = pkey_index;
580 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
585 wc->dlid_path_bits = 0;
586 wc->port_num = port_num;
589 static size_t mad_priv_size(const struct ib_mad_private *mp)
591 return sizeof(struct ib_mad_private) + mp->mad_size;
594 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
596 size_t size = sizeof(struct ib_mad_private) + mad_size;
597 struct ib_mad_private *ret = kzalloc(size, flags);
600 ret->mad_size = mad_size;
605 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
607 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
610 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
612 return sizeof(struct ib_grh) + mp->mad_size;
616 * Return 0 if SMP is to be sent
617 * Return 1 if SMP was consumed locally (whether or not solicited)
618 * Return < 0 if error
620 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
621 struct ib_mad_send_wr_private *mad_send_wr)
624 struct ib_smp *smp = mad_send_wr->send_buf.mad;
625 struct opa_smp *opa_smp = (struct opa_smp *)smp;
627 struct ib_mad_local_private *local;
628 struct ib_mad_private *mad_priv;
629 struct ib_mad_port_private *port_priv;
630 struct ib_mad_agent_private *recv_mad_agent = NULL;
631 struct ib_device *device = mad_agent_priv->agent.device;
634 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
635 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
636 u16 out_mad_pkey_index = 0;
638 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
639 mad_agent_priv->qp_info->port_priv->port_num);
641 if (rdma_cap_ib_switch(device) &&
642 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
643 port_num = send_wr->port_num;
645 port_num = mad_agent_priv->agent.port_num;
648 * Directed route handling starts if the initial LID routed part of
649 * a request or the ending LID routed part of a response is empty.
650 * If we are at the start of the LID routed part, don't update the
651 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
653 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
656 trace_ib_mad_handle_out_opa_smi(opa_smp);
658 if ((opa_get_smp_direction(opa_smp)
659 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
660 OPA_LID_PERMISSIVE &&
661 opa_smi_handle_dr_smp_send(opa_smp,
662 rdma_cap_ib_switch(device),
663 port_num) == IB_SMI_DISCARD) {
665 dev_err(&device->dev, "OPA Invalid directed route\n");
668 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
669 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
670 opa_drslid & 0xffff0000) {
672 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
676 drslid = (u16)(opa_drslid & 0x0000ffff);
678 /* Check to post send on QP or process locally */
679 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
680 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
683 trace_ib_mad_handle_out_ib_smi(smp);
685 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
687 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
690 dev_err(&device->dev, "Invalid directed route\n");
693 drslid = be16_to_cpu(smp->dr_slid);
695 /* Check to post send on QP or process locally */
696 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
697 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
701 local = kmalloc(sizeof *local, GFP_ATOMIC);
706 local->mad_priv = NULL;
707 local->recv_mad_agent = NULL;
708 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
715 build_smp_wc(mad_agent_priv->agent.qp,
716 send_wr->wr.wr_cqe, drslid,
718 send_wr->port_num, &mad_wc);
720 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
721 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
722 + mad_send_wr->send_buf.data_len
723 + sizeof(struct ib_grh);
726 /* No GRH for DR SMP */
727 ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
728 (const struct ib_mad *)smp,
729 (struct ib_mad *)mad_priv->mad, &mad_size,
730 &out_mad_pkey_index);
732 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
733 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
734 mad_agent_priv->agent.recv_handler) {
735 local->mad_priv = mad_priv;
736 local->recv_mad_agent = mad_agent_priv;
738 * Reference MAD agent until receive
739 * side of local completion handled
741 refcount_inc(&mad_agent_priv->refcount);
745 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
748 case IB_MAD_RESULT_SUCCESS:
749 /* Treat like an incoming receive MAD */
750 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
751 mad_agent_priv->agent.port_num);
753 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
754 recv_mad_agent = find_mad_agent(port_priv,
755 (const struct ib_mad_hdr *)mad_priv->mad);
757 if (!port_priv || !recv_mad_agent) {
759 * No receiving agent so drop packet and
760 * generate send completion.
765 local->mad_priv = mad_priv;
766 local->recv_mad_agent = recv_mad_agent;
775 local->mad_send_wr = mad_send_wr;
777 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
778 local->return_wc_byte_len = mad_size;
780 /* Reference MAD agent until send side of local completion handled */
781 refcount_inc(&mad_agent_priv->refcount);
782 /* Queue local completion to local list */
783 spin_lock_irqsave(&mad_agent_priv->lock, flags);
784 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
785 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
786 queue_work(mad_agent_priv->qp_info->port_priv->wq,
787 &mad_agent_priv->local_work);
793 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
797 seg_size = mad_size - hdr_len;
798 if (data_len && seg_size) {
799 pad = seg_size - data_len % seg_size;
800 return pad == seg_size ? 0 : pad;
805 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
807 struct ib_rmpp_segment *s, *t;
809 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
815 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
816 size_t mad_size, gfp_t gfp_mask)
818 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
819 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
820 struct ib_rmpp_segment *seg = NULL;
821 int left, seg_size, pad;
823 send_buf->seg_size = mad_size - send_buf->hdr_len;
824 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
825 seg_size = send_buf->seg_size;
828 /* Allocate data segments. */
829 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
830 seg = kmalloc(sizeof(*seg) + seg_size, gfp_mask);
832 free_send_rmpp_list(send_wr);
835 seg->num = ++send_buf->seg_count;
836 list_add_tail(&seg->list, &send_wr->rmpp_list);
839 /* Zero any padding */
841 memset(seg->data + seg_size - pad, 0, pad);
843 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
845 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
846 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
848 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
849 struct ib_rmpp_segment, list);
850 send_wr->last_ack_seg = send_wr->cur_seg;
854 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
856 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
858 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
860 struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
861 u32 remote_qpn, u16 pkey_index,
862 int rmpp_active, int hdr_len,
863 int data_len, gfp_t gfp_mask,
866 struct ib_mad_agent_private *mad_agent_priv;
867 struct ib_mad_send_wr_private *mad_send_wr;
868 int pad, message_size, ret, size;
873 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
876 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
878 if (opa && base_version == OPA_MGMT_BASE_VERSION)
879 mad_size = sizeof(struct opa_mad);
881 mad_size = sizeof(struct ib_mad);
883 pad = get_pad_size(hdr_len, data_len, mad_size);
884 message_size = hdr_len + data_len + pad;
886 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
887 if (!rmpp_active && message_size > mad_size)
888 return ERR_PTR(-EINVAL);
890 if (rmpp_active || message_size > mad_size)
891 return ERR_PTR(-EINVAL);
893 size = rmpp_active ? hdr_len : mad_size;
894 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
896 return ERR_PTR(-ENOMEM);
898 mad_send_wr = buf + size;
899 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
900 mad_send_wr->send_buf.mad = buf;
901 mad_send_wr->send_buf.hdr_len = hdr_len;
902 mad_send_wr->send_buf.data_len = data_len;
903 mad_send_wr->pad = pad;
905 mad_send_wr->mad_agent_priv = mad_agent_priv;
906 mad_send_wr->sg_list[0].length = hdr_len;
907 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
909 /* OPA MADs don't have to be the full 2048 bytes */
910 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
911 data_len < mad_size - hdr_len)
912 mad_send_wr->sg_list[1].length = data_len;
914 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
916 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
918 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
920 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
921 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
922 mad_send_wr->send_wr.wr.num_sge = 2;
923 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
924 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
925 mad_send_wr->send_wr.remote_qpn = remote_qpn;
926 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
927 mad_send_wr->send_wr.pkey_index = pkey_index;
930 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
937 mad_send_wr->send_buf.mad_agent = mad_agent;
938 refcount_inc(&mad_agent_priv->refcount);
939 return &mad_send_wr->send_buf;
941 EXPORT_SYMBOL(ib_create_send_mad);
943 int ib_get_mad_data_offset(u8 mgmt_class)
945 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
946 return IB_MGMT_SA_HDR;
947 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
948 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
949 (mgmt_class == IB_MGMT_CLASS_BIS))
950 return IB_MGMT_DEVICE_HDR;
951 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
952 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
953 return IB_MGMT_VENDOR_HDR;
955 return IB_MGMT_MAD_HDR;
957 EXPORT_SYMBOL(ib_get_mad_data_offset);
959 int ib_is_mad_class_rmpp(u8 mgmt_class)
961 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
962 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
963 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
964 (mgmt_class == IB_MGMT_CLASS_BIS) ||
965 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
966 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
970 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
972 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
974 struct ib_mad_send_wr_private *mad_send_wr;
975 struct list_head *list;
977 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
979 list = &mad_send_wr->cur_seg->list;
981 if (mad_send_wr->cur_seg->num < seg_num) {
982 list_for_each_entry(mad_send_wr->cur_seg, list, list)
983 if (mad_send_wr->cur_seg->num == seg_num)
985 } else if (mad_send_wr->cur_seg->num > seg_num) {
986 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
987 if (mad_send_wr->cur_seg->num == seg_num)
990 return mad_send_wr->cur_seg->data;
992 EXPORT_SYMBOL(ib_get_rmpp_segment);
994 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
996 if (mad_send_wr->send_buf.seg_count)
997 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
998 mad_send_wr->seg_num);
1000 return mad_send_wr->send_buf.mad +
1001 mad_send_wr->send_buf.hdr_len;
1004 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1006 struct ib_mad_agent_private *mad_agent_priv;
1007 struct ib_mad_send_wr_private *mad_send_wr;
1009 mad_agent_priv = container_of(send_buf->mad_agent,
1010 struct ib_mad_agent_private, agent);
1011 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1014 free_send_rmpp_list(mad_send_wr);
1015 kfree(send_buf->mad);
1016 deref_mad_agent(mad_agent_priv);
1018 EXPORT_SYMBOL(ib_free_send_mad);
1020 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1022 struct ib_mad_qp_info *qp_info;
1023 struct list_head *list;
1024 struct ib_mad_agent *mad_agent;
1026 unsigned long flags;
1029 /* Set WR ID to find mad_send_wr upon completion */
1030 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1031 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1032 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1033 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1035 mad_agent = mad_send_wr->send_buf.mad_agent;
1036 sge = mad_send_wr->sg_list;
1037 sge[0].addr = ib_dma_map_single(mad_agent->device,
1038 mad_send_wr->send_buf.mad,
1041 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1044 mad_send_wr->header_mapping = sge[0].addr;
1046 sge[1].addr = ib_dma_map_single(mad_agent->device,
1047 ib_get_payload(mad_send_wr),
1050 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1051 ib_dma_unmap_single(mad_agent->device,
1052 mad_send_wr->header_mapping,
1053 sge[0].length, DMA_TO_DEVICE);
1056 mad_send_wr->payload_mapping = sge[1].addr;
1058 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1059 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1060 trace_ib_mad_ib_send_mad(mad_send_wr, qp_info);
1061 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1063 list = &qp_info->send_queue.list;
1066 list = &qp_info->overflow_list;
1070 qp_info->send_queue.count++;
1071 list_add_tail(&mad_send_wr->mad_list.list, list);
1073 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1075 ib_dma_unmap_single(mad_agent->device,
1076 mad_send_wr->header_mapping,
1077 sge[0].length, DMA_TO_DEVICE);
1078 ib_dma_unmap_single(mad_agent->device,
1079 mad_send_wr->payload_mapping,
1080 sge[1].length, DMA_TO_DEVICE);
1085 static void handle_queued_state(struct ib_mad_send_wr_private *mad_send_wr,
1086 struct ib_mad_agent_private *mad_agent_priv)
1088 if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP) {
1089 mad_agent_priv->sol_fc_wait_count--;
1090 list_move_tail(&mad_send_wr->agent_list,
1091 &mad_agent_priv->backlog_list);
1093 expect_mad_state(mad_send_wr, IB_MAD_STATE_INIT);
1094 list_add_tail(&mad_send_wr->agent_list,
1095 &mad_agent_priv->backlog_list);
1099 static void handle_send_state(struct ib_mad_send_wr_private *mad_send_wr,
1100 struct ib_mad_agent_private *mad_agent_priv)
1102 if (mad_send_wr->state == IB_MAD_STATE_INIT) {
1103 list_add_tail(&mad_send_wr->agent_list,
1104 &mad_agent_priv->send_list);
1106 expect_mad_state2(mad_send_wr, IB_MAD_STATE_WAIT_RESP,
1107 IB_MAD_STATE_QUEUED);
1108 list_move_tail(&mad_send_wr->agent_list,
1109 &mad_agent_priv->send_list);
1112 if (mad_send_wr->is_solicited_fc) {
1113 if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP)
1114 mad_agent_priv->sol_fc_wait_count--;
1115 mad_agent_priv->sol_fc_send_count++;
1119 static void handle_wait_state(struct ib_mad_send_wr_private *mad_send_wr,
1120 struct ib_mad_agent_private *mad_agent_priv)
1122 struct ib_mad_send_wr_private *temp_mad_send_wr;
1123 struct list_head *list_item;
1124 unsigned long delay;
1126 expect_mad_state3(mad_send_wr, IB_MAD_STATE_SEND_START,
1127 IB_MAD_STATE_WAIT_RESP, IB_MAD_STATE_CANCELED);
1128 if (mad_send_wr->state == IB_MAD_STATE_SEND_START &&
1129 mad_send_wr->is_solicited_fc) {
1130 mad_agent_priv->sol_fc_send_count--;
1131 mad_agent_priv->sol_fc_wait_count++;
1134 list_del_init(&mad_send_wr->agent_list);
1135 delay = mad_send_wr->timeout;
1136 mad_send_wr->timeout += jiffies;
1139 list_for_each_prev(list_item,
1140 &mad_agent_priv->wait_list) {
1141 temp_mad_send_wr = list_entry(
1143 struct ib_mad_send_wr_private,
1145 if (time_after(mad_send_wr->timeout,
1146 temp_mad_send_wr->timeout))
1150 list_item = &mad_agent_priv->wait_list;
1153 list_add(&mad_send_wr->agent_list, list_item);
1156 static void handle_early_resp_state(struct ib_mad_send_wr_private *mad_send_wr,
1157 struct ib_mad_agent_private *mad_agent_priv)
1159 expect_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START);
1160 mad_agent_priv->sol_fc_send_count -= mad_send_wr->is_solicited_fc;
1163 static void handle_canceled_state(struct ib_mad_send_wr_private *mad_send_wr,
1164 struct ib_mad_agent_private *mad_agent_priv)
1166 not_expect_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
1167 if (mad_send_wr->is_solicited_fc) {
1168 if (mad_send_wr->state == IB_MAD_STATE_SEND_START)
1169 mad_agent_priv->sol_fc_send_count--;
1170 else if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP)
1171 mad_agent_priv->sol_fc_wait_count--;
1175 static void handle_done_state(struct ib_mad_send_wr_private *mad_send_wr,
1176 struct ib_mad_agent_private *mad_agent_priv)
1178 if (mad_send_wr->is_solicited_fc) {
1179 if (mad_send_wr->state == IB_MAD_STATE_SEND_START)
1180 mad_agent_priv->sol_fc_send_count--;
1181 else if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP)
1182 mad_agent_priv->sol_fc_wait_count--;
1185 list_del_init(&mad_send_wr->agent_list);
1188 void change_mad_state(struct ib_mad_send_wr_private *mad_send_wr,
1189 enum ib_mad_state new_state)
1191 struct ib_mad_agent_private *mad_agent_priv =
1192 mad_send_wr->mad_agent_priv;
1194 switch (new_state) {
1195 case IB_MAD_STATE_INIT:
1197 case IB_MAD_STATE_QUEUED:
1198 handle_queued_state(mad_send_wr, mad_agent_priv);
1200 case IB_MAD_STATE_SEND_START:
1201 handle_send_state(mad_send_wr, mad_agent_priv);
1203 case IB_MAD_STATE_WAIT_RESP:
1204 handle_wait_state(mad_send_wr, mad_agent_priv);
1205 if (mad_send_wr->state == IB_MAD_STATE_CANCELED)
1208 case IB_MAD_STATE_EARLY_RESP:
1209 handle_early_resp_state(mad_send_wr, mad_agent_priv);
1211 case IB_MAD_STATE_CANCELED:
1212 handle_canceled_state(mad_send_wr, mad_agent_priv);
1214 case IB_MAD_STATE_DONE:
1215 handle_done_state(mad_send_wr, mad_agent_priv);
1219 mad_send_wr->state = new_state;
1222 static bool is_solicited_fc_mad(struct ib_mad_send_wr_private *mad_send_wr)
1224 struct ib_rmpp_mad *rmpp_mad;
1227 if (!mad_send_wr->timeout)
1230 rmpp_mad = mad_send_wr->send_buf.mad;
1231 if (mad_send_wr->mad_agent_priv->agent.rmpp_version &&
1232 (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE))
1236 ((struct ib_mad_hdr *)mad_send_wr->send_buf.mad)->mgmt_class;
1237 return mgmt_class == IB_MGMT_CLASS_CM ||
1238 mgmt_class == IB_MGMT_CLASS_SUBN_ADM ||
1239 mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
1240 mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
1243 static bool mad_is_for_backlog(struct ib_mad_send_wr_private *mad_send_wr)
1245 struct ib_mad_agent_private *mad_agent_priv =
1246 mad_send_wr->mad_agent_priv;
1248 if (!mad_send_wr->is_solicited_fc || !mad_agent_priv->sol_fc_max)
1251 if (!list_empty(&mad_agent_priv->backlog_list))
1254 return mad_agent_priv->sol_fc_send_count +
1255 mad_agent_priv->sol_fc_wait_count >=
1256 mad_agent_priv->sol_fc_max;
1260 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1261 * with the registered client
1263 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1264 struct ib_mad_send_buf **bad_send_buf)
1266 struct ib_mad_agent_private *mad_agent_priv;
1267 struct ib_mad_send_buf *next_send_buf;
1268 struct ib_mad_send_wr_private *mad_send_wr;
1269 unsigned long flags;
1272 /* Walk list of send WRs and post each on send list */
1273 for (; send_buf; send_buf = next_send_buf) {
1274 mad_send_wr = container_of(send_buf,
1275 struct ib_mad_send_wr_private,
1277 mad_agent_priv = mad_send_wr->mad_agent_priv;
1279 ret = ib_mad_enforce_security(mad_agent_priv,
1280 mad_send_wr->send_wr.pkey_index);
1284 if (!send_buf->mad_agent->send_handler) {
1289 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1290 if (mad_agent_priv->agent.rmpp_version) {
1297 * Save pointer to next work request to post in case the
1298 * current one completes, and the user modifies the work
1299 * request associated with the completion
1301 next_send_buf = send_buf->next;
1302 mad_send_wr->send_wr.ah = send_buf->ah;
1304 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1305 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1306 ret = handle_outgoing_dr_smp(mad_agent_priv,
1308 if (ret < 0) /* error */
1310 else if (ret == 1) /* locally consumed */
1314 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1315 /* Timeout will be updated after send completes */
1316 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1317 mad_send_wr->max_retries = send_buf->retries;
1318 mad_send_wr->retries_left = send_buf->retries;
1319 send_buf->retries = 0;
1320 change_mad_state(mad_send_wr, IB_MAD_STATE_INIT);
1322 /* Reference MAD agent until send completes */
1323 refcount_inc(&mad_agent_priv->refcount);
1324 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1325 mad_send_wr->is_solicited_fc = is_solicited_fc_mad(mad_send_wr);
1326 if (mad_is_for_backlog(mad_send_wr)) {
1327 change_mad_state(mad_send_wr, IB_MAD_STATE_QUEUED);
1328 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1332 change_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START);
1333 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1335 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1336 ret = ib_send_rmpp_mad(mad_send_wr);
1337 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1338 ret = ib_send_mad(mad_send_wr);
1340 ret = ib_send_mad(mad_send_wr);
1342 /* Fail send request */
1343 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1344 change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
1345 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1346 deref_mad_agent(mad_agent_priv);
1353 *bad_send_buf = send_buf;
1356 EXPORT_SYMBOL(ib_post_send_mad);
1359 * ib_free_recv_mad - Returns data buffers used to receive
1360 * a MAD to the access layer
1362 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1364 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1365 struct ib_mad_private_header *mad_priv_hdr;
1366 struct ib_mad_private *priv;
1367 struct list_head free_list;
1369 INIT_LIST_HEAD(&free_list);
1370 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1372 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1374 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1376 mad_priv_hdr = container_of(mad_recv_wc,
1377 struct ib_mad_private_header,
1379 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1384 EXPORT_SYMBOL(ib_free_recv_mad);
1386 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1387 struct ib_mad_reg_req *mad_reg_req)
1391 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1392 if ((*method)->agent[i]) {
1393 pr_err("Method %d already in use\n", i);
1400 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1402 /* Allocate management method table */
1403 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1404 return (*method) ? 0 : (-ENOMEM);
1408 * Check to see if there are any methods still in use
1410 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1414 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1415 if (method->agent[i])
1421 * Check to see if there are any method tables for this class still in use
1423 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1427 for (i = 0; i < MAX_MGMT_CLASS; i++)
1428 if (class->method_table[i])
1433 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1437 for (i = 0; i < MAX_MGMT_OUI; i++)
1438 if (vendor_class->method_table[i])
1443 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1448 for (i = 0; i < MAX_MGMT_OUI; i++)
1449 /* Is there matching OUI for this vendor class ? */
1450 if (!memcmp(vendor_class->oui[i], oui, 3))
1456 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1460 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1461 if (vendor->vendor_class[i])
1467 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1468 struct ib_mad_agent_private *agent)
1472 /* Remove any methods for this mad agent */
1473 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1474 if (method->agent[i] == agent)
1475 method->agent[i] = NULL;
1478 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1479 struct ib_mad_agent_private *agent_priv,
1482 struct ib_mad_port_private *port_priv;
1483 struct ib_mad_mgmt_class_table **class;
1484 struct ib_mad_mgmt_method_table **method;
1487 port_priv = agent_priv->qp_info->port_priv;
1488 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1490 /* Allocate management class table for "new" class version */
1491 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1497 /* Allocate method table for this management class */
1498 method = &(*class)->method_table[mgmt_class];
1499 if ((ret = allocate_method_table(method)))
1502 method = &(*class)->method_table[mgmt_class];
1504 /* Allocate method table for this management class */
1505 if ((ret = allocate_method_table(method)))
1510 /* Now, make sure methods are not already in use */
1511 if (method_in_use(method, mad_reg_req))
1514 /* Finally, add in methods being registered */
1515 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1516 (*method)->agent[i] = agent_priv;
1521 /* Remove any methods for this mad agent */
1522 remove_methods_mad_agent(*method, agent_priv);
1523 /* Now, check to see if there are any methods in use */
1524 if (!check_method_table(*method)) {
1525 /* If not, release management method table */
1538 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1539 struct ib_mad_agent_private *agent_priv)
1541 struct ib_mad_port_private *port_priv;
1542 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1543 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1544 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1545 struct ib_mad_mgmt_method_table **method;
1546 int i, ret = -ENOMEM;
1549 /* "New" vendor (with OUI) class */
1550 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1551 port_priv = agent_priv->qp_info->port_priv;
1552 vendor_table = &port_priv->version[
1553 mad_reg_req->mgmt_class_version].vendor;
1554 if (!*vendor_table) {
1555 /* Allocate mgmt vendor class table for "new" class version */
1556 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1560 *vendor_table = vendor;
1562 if (!(*vendor_table)->vendor_class[vclass]) {
1563 /* Allocate table for this management vendor class */
1564 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1568 (*vendor_table)->vendor_class[vclass] = vendor_class;
1570 for (i = 0; i < MAX_MGMT_OUI; i++) {
1571 /* Is there matching OUI for this vendor class ? */
1572 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1573 mad_reg_req->oui, 3)) {
1574 method = &(*vendor_table)->vendor_class[
1575 vclass]->method_table[i];
1581 for (i = 0; i < MAX_MGMT_OUI; i++) {
1582 /* OUI slot available ? */
1583 if (!is_vendor_oui((*vendor_table)->vendor_class[
1585 method = &(*vendor_table)->vendor_class[
1586 vclass]->method_table[i];
1587 /* Allocate method table for this OUI */
1589 ret = allocate_method_table(method);
1593 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1594 mad_reg_req->oui, 3);
1598 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1602 /* Now, make sure methods are not already in use */
1603 if (method_in_use(method, mad_reg_req))
1606 /* Finally, add in methods being registered */
1607 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1608 (*method)->agent[i] = agent_priv;
1613 /* Remove any methods for this mad agent */
1614 remove_methods_mad_agent(*method, agent_priv);
1615 /* Now, check to see if there are any methods in use */
1616 if (!check_method_table(*method)) {
1617 /* If not, release management method table */
1624 (*vendor_table)->vendor_class[vclass] = NULL;
1625 kfree(vendor_class);
1629 *vendor_table = NULL;
1636 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1638 struct ib_mad_port_private *port_priv;
1639 struct ib_mad_mgmt_class_table *class;
1640 struct ib_mad_mgmt_method_table *method;
1641 struct ib_mad_mgmt_vendor_class_table *vendor;
1642 struct ib_mad_mgmt_vendor_class *vendor_class;
1647 * Was MAD registration request supplied
1648 * with original registration ?
1650 if (!agent_priv->reg_req)
1653 port_priv = agent_priv->qp_info->port_priv;
1654 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1655 class = port_priv->version[
1656 agent_priv->reg_req->mgmt_class_version].class;
1660 method = class->method_table[mgmt_class];
1662 /* Remove any methods for this mad agent */
1663 remove_methods_mad_agent(method, agent_priv);
1664 /* Now, check to see if there are any methods still in use */
1665 if (!check_method_table(method)) {
1666 /* If not, release management method table */
1668 class->method_table[mgmt_class] = NULL;
1669 /* Any management classes left ? */
1670 if (!check_class_table(class)) {
1671 /* If not, release management class table */
1674 agent_priv->reg_req->
1675 mgmt_class_version].class = NULL;
1681 if (!is_vendor_class(mgmt_class))
1684 /* normalize mgmt_class to vendor range 2 */
1685 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1686 vendor = port_priv->version[
1687 agent_priv->reg_req->mgmt_class_version].vendor;
1692 vendor_class = vendor->vendor_class[mgmt_class];
1694 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1697 method = vendor_class->method_table[index];
1699 /* Remove any methods for this mad agent */
1700 remove_methods_mad_agent(method, agent_priv);
1702 * Now, check to see if there are
1703 * any methods still in use
1705 if (!check_method_table(method)) {
1706 /* If not, release management method table */
1708 vendor_class->method_table[index] = NULL;
1709 memset(vendor_class->oui[index], 0, 3);
1710 /* Any OUIs left ? */
1711 if (!check_vendor_class(vendor_class)) {
1712 /* If not, release vendor class table */
1713 kfree(vendor_class);
1714 vendor->vendor_class[mgmt_class] = NULL;
1715 /* Any other vendor classes left ? */
1716 if (!check_vendor_table(vendor)) {
1719 agent_priv->reg_req->
1720 mgmt_class_version].
1732 static struct ib_mad_agent_private *
1733 find_mad_agent(struct ib_mad_port_private *port_priv,
1734 const struct ib_mad_hdr *mad_hdr)
1736 struct ib_mad_agent_private *mad_agent = NULL;
1737 unsigned long flags;
1739 if (ib_response_mad(mad_hdr)) {
1743 * Routing is based on high 32 bits of transaction ID
1746 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1748 mad_agent = xa_load(&ib_mad_clients, hi_tid);
1749 if (mad_agent && !refcount_inc_not_zero(&mad_agent->refcount))
1753 struct ib_mad_mgmt_class_table *class;
1754 struct ib_mad_mgmt_method_table *method;
1755 struct ib_mad_mgmt_vendor_class_table *vendor;
1756 struct ib_mad_mgmt_vendor_class *vendor_class;
1757 const struct ib_vendor_mad *vendor_mad;
1760 spin_lock_irqsave(&port_priv->reg_lock, flags);
1762 * Routing is based on version, class, and method
1763 * For "newer" vendor MADs, also based on OUI
1765 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1767 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1768 class = port_priv->version[
1769 mad_hdr->class_version].class;
1772 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1773 ARRAY_SIZE(class->method_table))
1775 method = class->method_table[convert_mgmt_class(
1776 mad_hdr->mgmt_class)];
1778 mad_agent = method->agent[mad_hdr->method &
1779 ~IB_MGMT_METHOD_RESP];
1781 vendor = port_priv->version[
1782 mad_hdr->class_version].vendor;
1785 vendor_class = vendor->vendor_class[vendor_class_index(
1786 mad_hdr->mgmt_class)];
1789 /* Find matching OUI */
1790 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1791 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1794 method = vendor_class->method_table[index];
1796 mad_agent = method->agent[mad_hdr->method &
1797 ~IB_MGMT_METHOD_RESP];
1801 refcount_inc(&mad_agent->refcount);
1803 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1806 if (mad_agent && !mad_agent->agent.recv_handler) {
1807 dev_notice(&port_priv->device->dev,
1808 "No receive handler for client %p on port %u\n",
1809 &mad_agent->agent, port_priv->port_num);
1810 deref_mad_agent(mad_agent);
1817 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1818 const struct ib_mad_qp_info *qp_info,
1822 u32 qp_num = qp_info->qp->qp_num;
1824 /* Make sure MAD base version is understood */
1825 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1826 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1827 pr_err("MAD received with unsupported base version %u %s\n",
1828 mad_hdr->base_version, opa ? "(opa)" : "");
1832 /* Filter SMI packets sent to other than QP0 */
1833 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1834 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1838 /* CM attributes other than ClassPortInfo only use Send method */
1839 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1840 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1841 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1843 /* Filter GSI packets sent to QP0 */
1852 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1853 const struct ib_mad_hdr *mad_hdr)
1855 struct ib_rmpp_mad *rmpp_mad;
1857 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1858 return !mad_agent_priv->agent.rmpp_version ||
1859 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1860 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1861 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1862 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1865 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1866 const struct ib_mad_recv_wc *rwc)
1868 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1869 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1873 rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1874 const struct ib_mad_send_wr_private *wr,
1875 const struct ib_mad_recv_wc *rwc)
1877 struct rdma_ah_attr attr;
1878 u8 send_resp, rcv_resp;
1880 struct ib_device *device = mad_agent_priv->agent.device;
1881 u32 port_num = mad_agent_priv->agent.port_num;
1885 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1886 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1888 if (send_resp == rcv_resp)
1889 /* both requests, or both responses. GIDs different */
1892 if (rdma_query_ah(wr->send_buf.ah, &attr))
1893 /* Assume not equal, to avoid false positives. */
1896 has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH);
1897 if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH))
1898 /* one has GID, other does not. Assume different */
1901 if (!send_resp && rcv_resp) {
1902 /* is request/response. */
1904 if (ib_get_cached_lmc(device, port_num, &lmc))
1906 return (!lmc || !((rdma_ah_get_path_bits(&attr) ^
1907 rwc->wc->dlid_path_bits) &
1910 const struct ib_global_route *grh =
1911 rdma_ah_read_grh(&attr);
1913 if (rdma_query_gid(device, port_num,
1914 grh->sgid_index, &sgid))
1916 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1922 return rdma_ah_get_dlid(&attr) == rwc->wc->slid;
1924 return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw,
1925 rwc->recv_buf.grh->sgid.raw,
1929 static inline int is_direct(u8 class)
1931 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1934 struct ib_mad_send_wr_private*
1935 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1936 const struct ib_mad_recv_wc *wc)
1938 struct ib_mad_send_wr_private *wr;
1939 const struct ib_mad_hdr *mad_hdr;
1941 mad_hdr = &wc->recv_buf.mad->mad_hdr;
1943 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1944 if ((wr->tid == mad_hdr->tid) &&
1945 rcv_has_same_class(wr, wc) &&
1947 * Don't check GID for direct routed MADs.
1948 * These might have permissive LIDs.
1950 (is_direct(mad_hdr->mgmt_class) ||
1951 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1952 return (wr->state != IB_MAD_STATE_CANCELED) ? wr : NULL;
1955 list_for_each_entry(wr, &mad_agent_priv->backlog_list, agent_list) {
1956 if ((wr->tid == mad_hdr->tid) &&
1957 rcv_has_same_class(wr, wc) &&
1959 * Don't check GID for direct routed MADs.
1960 * These might have permissive LIDs.
1962 (is_direct(mad_hdr->mgmt_class) ||
1963 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1964 return (wr->state != IB_MAD_STATE_CANCELED) ? wr : NULL;
1968 * It's possible to receive the response before we've
1969 * been notified that the send has completed
1971 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1972 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1973 wr->tid == mad_hdr->tid &&
1975 rcv_has_same_class(wr, wc) &&
1977 * Don't check GID for direct routed MADs.
1978 * These might have permissive LIDs.
1980 (is_direct(mad_hdr->mgmt_class) ||
1981 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1982 /* Verify request has not been canceled */
1983 return (wr->state != IB_MAD_STATE_CANCELED) ? wr : NULL;
1989 process_backlog_mads(struct ib_mad_agent_private *mad_agent_priv)
1991 struct ib_mad_send_wr_private *mad_send_wr;
1992 struct ib_mad_send_wc mad_send_wc = {};
1993 unsigned long flags;
1996 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1997 while (!list_empty(&mad_agent_priv->backlog_list) &&
1998 (mad_agent_priv->sol_fc_send_count +
1999 mad_agent_priv->sol_fc_wait_count <
2000 mad_agent_priv->sol_fc_max)) {
2001 mad_send_wr = list_entry(mad_agent_priv->backlog_list.next,
2002 struct ib_mad_send_wr_private,
2004 change_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START);
2005 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2006 ret = ib_send_mad(mad_send_wr);
2008 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2009 deref_mad_agent(mad_agent_priv);
2010 change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
2011 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2012 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2013 mad_send_wc.status = IB_WC_LOC_QP_OP_ERR;
2014 mad_agent_priv->agent.send_handler(
2015 &mad_agent_priv->agent, &mad_send_wc);
2018 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2021 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2024 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
2026 mad_send_wr->timeout = 0;
2027 if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP ||
2028 mad_send_wr->state == IB_MAD_STATE_QUEUED)
2029 change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
2031 change_mad_state(mad_send_wr, IB_MAD_STATE_EARLY_RESP);
2034 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
2035 struct ib_mad_recv_wc *mad_recv_wc)
2037 struct ib_mad_send_wr_private *mad_send_wr;
2038 struct ib_mad_send_wc mad_send_wc;
2039 unsigned long flags;
2043 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
2044 ret = ib_mad_enforce_security(mad_agent_priv,
2045 mad_recv_wc->wc->pkey_index);
2047 ib_free_recv_mad(mad_recv_wc);
2048 deref_mad_agent(mad_agent_priv);
2052 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
2053 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2054 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
2057 deref_mad_agent(mad_agent_priv);
2062 /* Complete corresponding request */
2063 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
2064 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2065 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
2067 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2068 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
2069 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
2070 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
2071 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
2072 /* user rmpp is in effect
2073 * and this is an active RMPP MAD
2075 mad_agent_priv->agent.recv_handler(
2076 &mad_agent_priv->agent, NULL,
2078 deref_mad_agent(mad_agent_priv);
2080 /* not user rmpp, revert to normal behavior and
2083 ib_free_recv_mad(mad_recv_wc);
2084 deref_mad_agent(mad_agent_priv);
2088 ib_mark_mad_done(mad_send_wr);
2089 is_mad_done = (mad_send_wr->state == IB_MAD_STATE_DONE);
2090 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2092 /* Defined behavior is to complete response before request */
2093 mad_agent_priv->agent.recv_handler(
2094 &mad_agent_priv->agent,
2095 &mad_send_wr->send_buf,
2097 deref_mad_agent(mad_agent_priv);
2100 mad_send_wc.status = IB_WC_SUCCESS;
2101 mad_send_wc.vendor_err = 0;
2102 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2103 ib_mad_complete_send_wr(mad_send_wr,
2108 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2110 deref_mad_agent(mad_agent_priv);
2114 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2115 const struct ib_mad_qp_info *qp_info,
2116 const struct ib_wc *wc,
2118 struct ib_mad_private *recv,
2119 struct ib_mad_private *response)
2121 enum smi_forward_action retsmi;
2122 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2124 trace_ib_mad_handle_ib_smi(smp);
2126 if (smi_handle_dr_smp_recv(smp,
2127 rdma_cap_ib_switch(port_priv->device),
2129 port_priv->device->phys_port_cnt) ==
2131 return IB_SMI_DISCARD;
2133 retsmi = smi_check_forward_dr_smp(smp);
2134 if (retsmi == IB_SMI_LOCAL)
2135 return IB_SMI_HANDLE;
2137 if (retsmi == IB_SMI_SEND) { /* don't forward */
2138 if (smi_handle_dr_smp_send(smp,
2139 rdma_cap_ib_switch(port_priv->device),
2140 port_num) == IB_SMI_DISCARD)
2141 return IB_SMI_DISCARD;
2143 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2144 return IB_SMI_DISCARD;
2145 } else if (rdma_cap_ib_switch(port_priv->device)) {
2146 /* forward case for switches */
2147 memcpy(response, recv, mad_priv_size(response));
2148 response->header.recv_wc.wc = &response->header.wc;
2149 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2150 response->header.recv_wc.recv_buf.grh = &response->grh;
2152 agent_send_response((const struct ib_mad_hdr *)response->mad,
2155 smi_get_fwd_port(smp),
2156 qp_info->qp->qp_num,
2160 return IB_SMI_DISCARD;
2162 return IB_SMI_HANDLE;
2165 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2166 struct ib_mad_private *response,
2167 size_t *resp_len, bool opa)
2169 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2170 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2172 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2173 recv_hdr->method == IB_MGMT_METHOD_SET) {
2174 memcpy(response, recv, mad_priv_size(response));
2175 response->header.recv_wc.wc = &response->header.wc;
2176 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2177 response->header.recv_wc.recv_buf.grh = &response->grh;
2178 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2179 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2180 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2181 resp_hdr->status |= IB_SMP_DIRECTION;
2183 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2184 if (recv_hdr->mgmt_class ==
2185 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2186 recv_hdr->mgmt_class ==
2187 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2188 *resp_len = opa_get_smp_header_size(
2189 (struct opa_smp *)recv->mad);
2191 *resp_len = sizeof(struct ib_mad_hdr);
2200 static enum smi_action
2201 handle_opa_smi(struct ib_mad_port_private *port_priv,
2202 struct ib_mad_qp_info *qp_info,
2205 struct ib_mad_private *recv,
2206 struct ib_mad_private *response)
2208 enum smi_forward_action retsmi;
2209 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2211 trace_ib_mad_handle_opa_smi(smp);
2213 if (opa_smi_handle_dr_smp_recv(smp,
2214 rdma_cap_ib_switch(port_priv->device),
2216 port_priv->device->phys_port_cnt) ==
2218 return IB_SMI_DISCARD;
2220 retsmi = opa_smi_check_forward_dr_smp(smp);
2221 if (retsmi == IB_SMI_LOCAL)
2222 return IB_SMI_HANDLE;
2224 if (retsmi == IB_SMI_SEND) { /* don't forward */
2225 if (opa_smi_handle_dr_smp_send(smp,
2226 rdma_cap_ib_switch(port_priv->device),
2227 port_num) == IB_SMI_DISCARD)
2228 return IB_SMI_DISCARD;
2230 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2232 return IB_SMI_DISCARD;
2234 } else if (rdma_cap_ib_switch(port_priv->device)) {
2235 /* forward case for switches */
2236 memcpy(response, recv, mad_priv_size(response));
2237 response->header.recv_wc.wc = &response->header.wc;
2238 response->header.recv_wc.recv_buf.opa_mad =
2239 (struct opa_mad *)response->mad;
2240 response->header.recv_wc.recv_buf.grh = &response->grh;
2242 agent_send_response((const struct ib_mad_hdr *)response->mad,
2245 opa_smi_get_fwd_port(smp),
2246 qp_info->qp->qp_num,
2247 recv->header.wc.byte_len,
2250 return IB_SMI_DISCARD;
2253 return IB_SMI_HANDLE;
2256 static enum smi_action
2257 handle_smi(struct ib_mad_port_private *port_priv,
2258 struct ib_mad_qp_info *qp_info,
2261 struct ib_mad_private *recv,
2262 struct ib_mad_private *response,
2265 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2267 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2268 mad_hdr->class_version == OPA_SM_CLASS_VERSION)
2269 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2272 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2275 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2277 struct ib_mad_port_private *port_priv = cq->cq_context;
2278 struct ib_mad_list_head *mad_list =
2279 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2280 struct ib_mad_qp_info *qp_info;
2281 struct ib_mad_private_header *mad_priv_hdr;
2282 struct ib_mad_private *recv, *response = NULL;
2283 struct ib_mad_agent_private *mad_agent;
2285 int ret = IB_MAD_RESULT_SUCCESS;
2287 u16 resp_mad_pkey_index = 0;
2290 if (list_empty_careful(&port_priv->port_list))
2293 if (wc->status != IB_WC_SUCCESS) {
2295 * Receive errors indicate that the QP has entered the error
2296 * state - error handling/shutdown code will cleanup
2301 qp_info = mad_list->mad_queue->qp_info;
2302 dequeue_mad(mad_list);
2304 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2305 qp_info->port_priv->port_num);
2307 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2309 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2310 ib_dma_unmap_single(port_priv->device,
2311 recv->header.mapping,
2312 mad_priv_dma_size(recv),
2315 /* Setup MAD receive work completion from "normal" work completion */
2316 recv->header.wc = *wc;
2317 recv->header.recv_wc.wc = &recv->header.wc;
2319 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2320 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2321 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2323 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2324 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2327 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2328 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2331 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2334 trace_ib_mad_recv_done_handler(qp_info, wc,
2335 (struct ib_mad_hdr *)recv->mad);
2337 mad_size = recv->mad_size;
2338 response = alloc_mad_private(mad_size, GFP_KERNEL);
2342 if (rdma_cap_ib_switch(port_priv->device))
2343 port_num = wc->port_num;
2345 port_num = port_priv->port_num;
2347 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2348 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2349 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2355 /* Give driver "right of first refusal" on incoming MAD */
2356 if (port_priv->device->ops.process_mad) {
2357 ret = port_priv->device->ops.process_mad(
2358 port_priv->device, 0, port_priv->port_num, wc,
2359 &recv->grh, (const struct ib_mad *)recv->mad,
2360 (struct ib_mad *)response->mad, &mad_size,
2361 &resp_mad_pkey_index);
2364 wc->pkey_index = resp_mad_pkey_index;
2366 if (ret & IB_MAD_RESULT_SUCCESS) {
2367 if (ret & IB_MAD_RESULT_CONSUMED)
2369 if (ret & IB_MAD_RESULT_REPLY) {
2370 agent_send_response((const struct ib_mad_hdr *)response->mad,
2374 qp_info->qp->qp_num,
2381 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2383 trace_ib_mad_recv_done_agent(mad_agent);
2384 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2386 * recv is freed up in error cases in ib_mad_complete_recv
2387 * or via recv_handler in ib_mad_complete_recv()
2390 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2391 generate_unmatched_resp(recv, response, &mad_size, opa)) {
2392 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2393 port_priv->device, port_num,
2394 qp_info->qp->qp_num, mad_size, opa);
2398 /* Post another receive request for this QP */
2400 ib_mad_post_receive_mads(qp_info, response);
2403 ib_mad_post_receive_mads(qp_info, recv);
2406 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2408 struct ib_mad_send_wr_private *mad_send_wr;
2409 unsigned long delay;
2411 if (list_empty(&mad_agent_priv->wait_list)) {
2412 cancel_delayed_work(&mad_agent_priv->timed_work);
2414 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2415 struct ib_mad_send_wr_private,
2418 if (time_after(mad_agent_priv->timeout,
2419 mad_send_wr->timeout)) {
2420 mad_agent_priv->timeout = mad_send_wr->timeout;
2421 delay = mad_send_wr->timeout - jiffies;
2422 if ((long)delay <= 0)
2424 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2425 &mad_agent_priv->timed_work, delay);
2430 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2432 struct ib_mad_agent_private *mad_agent_priv;
2433 unsigned long delay;
2435 mad_agent_priv = mad_send_wr->mad_agent_priv;
2436 delay = mad_send_wr->timeout;
2437 change_mad_state(mad_send_wr, IB_MAD_STATE_WAIT_RESP);
2439 /* Reschedule a work item if we have a shorter timeout */
2440 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2441 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2442 &mad_agent_priv->timed_work, delay);
2445 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2446 unsigned long timeout_ms)
2448 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2449 wait_for_response(mad_send_wr);
2453 * Process a send work completion
2455 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2456 struct ib_mad_send_wc *mad_send_wc)
2458 struct ib_mad_agent_private *mad_agent_priv;
2459 unsigned long flags;
2462 mad_agent_priv = mad_send_wr->mad_agent_priv;
2463 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2464 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2465 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2466 if (ret == IB_RMPP_RESULT_CONSUMED)
2469 ret = IB_RMPP_RESULT_UNHANDLED;
2471 if (mad_send_wr->state == IB_MAD_STATE_CANCELED)
2472 mad_send_wc->status = IB_WC_WR_FLUSH_ERR;
2473 else if (mad_send_wr->state == IB_MAD_STATE_SEND_START &&
2474 mad_send_wr->timeout) {
2475 wait_for_response(mad_send_wr);
2479 /* Remove send from MAD agent and notify client of completion */
2480 if (mad_send_wr->state != IB_MAD_STATE_DONE)
2481 change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
2482 adjust_timeout(mad_agent_priv);
2483 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2485 if (ret == IB_RMPP_RESULT_INTERNAL) {
2486 ib_rmpp_send_handler(mad_send_wc);
2488 if (mad_send_wr->is_solicited_fc)
2489 process_backlog_mads(mad_agent_priv);
2490 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2494 /* Release reference on agent taken when sending */
2495 deref_mad_agent(mad_agent_priv);
2498 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2501 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2503 struct ib_mad_port_private *port_priv = cq->cq_context;
2504 struct ib_mad_list_head *mad_list =
2505 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2506 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2507 struct ib_mad_qp_info *qp_info;
2508 struct ib_mad_queue *send_queue;
2509 struct ib_mad_send_wc mad_send_wc;
2510 unsigned long flags;
2513 if (list_empty_careful(&port_priv->port_list))
2516 if (wc->status != IB_WC_SUCCESS) {
2517 if (!ib_mad_send_error(port_priv, wc))
2521 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2523 send_queue = mad_list->mad_queue;
2524 qp_info = send_queue->qp_info;
2526 trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv);
2527 trace_ib_mad_send_done_handler(mad_send_wr, wc);
2530 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2531 mad_send_wr->header_mapping,
2532 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2533 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2534 mad_send_wr->payload_mapping,
2535 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2536 queued_send_wr = NULL;
2537 spin_lock_irqsave(&send_queue->lock, flags);
2538 list_del(&mad_list->list);
2540 /* Move queued send to the send queue */
2541 if (send_queue->count-- > send_queue->max_active) {
2542 mad_list = container_of(qp_info->overflow_list.next,
2543 struct ib_mad_list_head, list);
2544 queued_send_wr = container_of(mad_list,
2545 struct ib_mad_send_wr_private,
2547 list_move_tail(&mad_list->list, &send_queue->list);
2549 spin_unlock_irqrestore(&send_queue->lock, flags);
2551 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2552 mad_send_wc.status = wc->status;
2553 mad_send_wc.vendor_err = wc->vendor_err;
2554 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2556 if (queued_send_wr) {
2557 trace_ib_mad_send_done_resend(queued_send_wr, qp_info);
2558 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2561 dev_err(&port_priv->device->dev,
2562 "ib_post_send failed: %d\n", ret);
2563 mad_send_wr = queued_send_wr;
2564 wc->status = IB_WC_LOC_QP_OP_ERR;
2570 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2572 struct ib_mad_send_wr_private *mad_send_wr;
2573 struct ib_mad_list_head *mad_list;
2574 unsigned long flags;
2576 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2577 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2578 mad_send_wr = container_of(mad_list,
2579 struct ib_mad_send_wr_private,
2581 mad_send_wr->retry = 1;
2583 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2586 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2589 struct ib_mad_list_head *mad_list =
2590 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2591 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2592 struct ib_mad_send_wr_private *mad_send_wr;
2596 * Send errors will transition the QP to SQE - move
2597 * QP to RTS and repost flushed work requests
2599 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2601 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2602 if (mad_send_wr->retry) {
2604 mad_send_wr->retry = 0;
2605 trace_ib_mad_error_handler(mad_send_wr, qp_info);
2606 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2612 struct ib_qp_attr *attr;
2614 /* Transition QP to RTS and fail offending send */
2615 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2617 attr->qp_state = IB_QPS_RTS;
2618 attr->cur_qp_state = IB_QPS_SQE;
2619 ret = ib_modify_qp(qp_info->qp, attr,
2620 IB_QP_STATE | IB_QP_CUR_STATE);
2623 dev_err(&port_priv->device->dev,
2624 "%s - ib_modify_qp to RTS: %d\n",
2627 mark_sends_for_retry(qp_info);
2634 static void clear_mad_error_list(struct list_head *list,
2635 enum ib_wc_status wc_status,
2636 struct ib_mad_agent_private *mad_agent_priv)
2638 struct ib_mad_send_wr_private *mad_send_wr, *n;
2639 struct ib_mad_send_wc mad_send_wc;
2641 mad_send_wc.status = wc_status;
2642 mad_send_wc.vendor_err = 0;
2644 list_for_each_entry_safe(mad_send_wr, n, list, agent_list) {
2645 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2646 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2648 deref_mad_agent(mad_agent_priv);
2652 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2654 unsigned long flags;
2655 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2656 struct list_head cancel_list;
2658 INIT_LIST_HEAD(&cancel_list);
2660 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2661 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2662 &mad_agent_priv->send_list, agent_list)
2663 change_mad_state(mad_send_wr, IB_MAD_STATE_CANCELED);
2665 /* Empty wait & backlog list to prevent receives from finding request */
2666 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2667 &mad_agent_priv->wait_list, agent_list) {
2668 change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
2669 list_add_tail(&mad_send_wr->agent_list, &cancel_list);
2672 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2673 &mad_agent_priv->backlog_list, agent_list) {
2674 change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
2675 list_add_tail(&mad_send_wr->agent_list, &cancel_list);
2678 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2679 /* Report all cancelled requests */
2680 clear_mad_error_list(&cancel_list, IB_WC_WR_FLUSH_ERR, mad_agent_priv);
2683 static struct ib_mad_send_wr_private*
2684 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2685 struct ib_mad_send_buf *send_buf)
2687 struct ib_mad_send_wr_private *mad_send_wr;
2689 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2691 if (&mad_send_wr->send_buf == send_buf)
2695 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2697 if (is_rmpp_data_mad(mad_agent_priv,
2698 mad_send_wr->send_buf.mad) &&
2699 &mad_send_wr->send_buf == send_buf)
2703 list_for_each_entry(mad_send_wr, &mad_agent_priv->backlog_list,
2705 if (&mad_send_wr->send_buf == send_buf)
2712 int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2714 struct ib_mad_agent_private *mad_agent_priv;
2715 struct ib_mad_send_wr_private *mad_send_wr;
2716 unsigned long flags;
2722 mad_agent_priv = container_of(send_buf->mad_agent,
2723 struct ib_mad_agent_private, agent);
2724 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2725 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2726 if (!mad_send_wr || mad_send_wr->state == IB_MAD_STATE_CANCELED) {
2727 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2731 active = ((mad_send_wr->state == IB_MAD_STATE_SEND_START) ||
2732 (mad_send_wr->state == IB_MAD_STATE_EARLY_RESP) ||
2733 (mad_send_wr->state == IB_MAD_STATE_QUEUED && timeout_ms));
2735 change_mad_state(mad_send_wr, IB_MAD_STATE_CANCELED);
2737 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2739 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2741 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2743 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2746 EXPORT_SYMBOL(ib_modify_mad);
2748 static void local_completions(struct work_struct *work)
2750 struct ib_mad_agent_private *mad_agent_priv;
2751 struct ib_mad_local_private *local;
2752 struct ib_mad_agent_private *recv_mad_agent;
2753 unsigned long flags;
2756 struct ib_mad_send_wc mad_send_wc;
2760 container_of(work, struct ib_mad_agent_private, local_work);
2762 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2763 mad_agent_priv->qp_info->port_priv->port_num);
2765 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2766 while (!list_empty(&mad_agent_priv->local_list)) {
2767 local = list_entry(mad_agent_priv->local_list.next,
2768 struct ib_mad_local_private,
2770 list_del(&local->completion_list);
2771 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2773 if (local->mad_priv) {
2775 recv_mad_agent = local->recv_mad_agent;
2776 if (!recv_mad_agent) {
2777 dev_err(&mad_agent_priv->agent.device->dev,
2778 "No receive MAD agent for local completion\n");
2780 goto local_send_completion;
2784 * Defined behavior is to complete response
2787 build_smp_wc(recv_mad_agent->agent.qp,
2788 local->mad_send_wr->send_wr.wr.wr_cqe,
2789 be16_to_cpu(IB_LID_PERMISSIVE),
2790 local->mad_send_wr->send_wr.pkey_index,
2791 recv_mad_agent->agent.port_num, &wc);
2793 local->mad_priv->header.recv_wc.wc = &wc;
2795 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2796 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2797 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2798 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2800 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2801 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2804 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2805 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2806 &local->mad_priv->header.recv_wc.rmpp_list);
2807 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2808 local->mad_priv->header.recv_wc.recv_buf.mad =
2809 (struct ib_mad *)local->mad_priv->mad;
2810 recv_mad_agent->agent.recv_handler(
2811 &recv_mad_agent->agent,
2812 &local->mad_send_wr->send_buf,
2813 &local->mad_priv->header.recv_wc);
2814 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2815 deref_mad_agent(recv_mad_agent);
2816 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2819 local_send_completion:
2821 mad_send_wc.status = IB_WC_SUCCESS;
2822 mad_send_wc.vendor_err = 0;
2823 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2824 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2827 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2828 deref_mad_agent(mad_agent_priv);
2830 kfree(local->mad_priv);
2833 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2836 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2840 if (!mad_send_wr->retries_left)
2843 mad_send_wr->retries_left--;
2844 mad_send_wr->send_buf.retries++;
2846 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2847 if (mad_send_wr->is_solicited_fc &&
2848 !list_empty(&mad_send_wr->mad_agent_priv->backlog_list)) {
2849 change_mad_state(mad_send_wr, IB_MAD_STATE_QUEUED);
2853 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2854 ret = ib_retry_rmpp(mad_send_wr);
2856 case IB_RMPP_RESULT_UNHANDLED:
2857 ret = ib_send_mad(mad_send_wr);
2859 case IB_RMPP_RESULT_CONSUMED:
2867 ret = ib_send_mad(mad_send_wr);
2870 change_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START);
2875 static void timeout_sends(struct work_struct *work)
2877 struct ib_mad_send_wr_private *mad_send_wr;
2878 struct ib_mad_agent_private *mad_agent_priv;
2879 struct list_head timeout_list;
2880 struct list_head cancel_list;
2881 struct list_head *list_item;
2882 unsigned long flags, delay;
2884 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2886 INIT_LIST_HEAD(&timeout_list);
2887 INIT_LIST_HEAD(&cancel_list);
2889 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2890 while (!list_empty(&mad_agent_priv->wait_list)) {
2891 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2892 struct ib_mad_send_wr_private,
2895 if (time_after(mad_send_wr->timeout, jiffies)) {
2896 delay = mad_send_wr->timeout - jiffies;
2897 if ((long)delay <= 0)
2899 queue_delayed_work(mad_agent_priv->qp_info->
2901 &mad_agent_priv->timed_work, delay);
2905 if (mad_send_wr->state == IB_MAD_STATE_CANCELED)
2906 list_item = &cancel_list;
2907 else if (retry_send(mad_send_wr))
2908 list_item = &timeout_list;
2912 change_mad_state(mad_send_wr, IB_MAD_STATE_DONE);
2913 list_add_tail(&mad_send_wr->agent_list, list_item);
2916 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2917 process_backlog_mads(mad_agent_priv);
2918 clear_mad_error_list(&timeout_list, IB_WC_RESP_TIMEOUT_ERR,
2920 clear_mad_error_list(&cancel_list, IB_WC_WR_FLUSH_ERR, mad_agent_priv);
2924 * Allocate receive MADs and post receive WRs for them
2926 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2927 struct ib_mad_private *mad)
2929 unsigned long flags;
2930 struct ib_mad_private *mad_priv;
2931 struct ib_sge sg_list;
2932 struct ib_recv_wr recv_wr;
2933 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2936 /* Initialize common scatter list fields */
2937 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2939 /* Initialize common receive WR fields */
2940 recv_wr.next = NULL;
2941 recv_wr.sg_list = &sg_list;
2942 recv_wr.num_sge = 1;
2945 /* Allocate and map receive buffer */
2950 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2955 sg_list.length = mad_priv_dma_size(mad_priv);
2956 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2958 mad_priv_dma_size(mad_priv),
2960 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2965 mad_priv->header.mapping = sg_list.addr;
2966 mad_priv->header.mad_list.mad_queue = recv_queue;
2967 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2968 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2969 spin_lock_irqsave(&recv_queue->lock, flags);
2970 if (recv_queue->count >= recv_queue->max_active) {
2971 /* Fully populated the receive queue */
2972 spin_unlock_irqrestore(&recv_queue->lock, flags);
2975 recv_queue->count++;
2976 list_add_tail(&mad_priv->header.mad_list.list,
2978 spin_unlock_irqrestore(&recv_queue->lock, flags);
2980 ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
2982 spin_lock_irqsave(&recv_queue->lock, flags);
2983 list_del(&mad_priv->header.mad_list.list);
2984 recv_queue->count--;
2985 spin_unlock_irqrestore(&recv_queue->lock, flags);
2986 dev_err(&qp_info->port_priv->device->dev,
2987 "ib_post_recv failed: %d\n", ret);
2992 ib_dma_unmap_single(qp_info->port_priv->device,
2993 mad_priv->header.mapping,
2994 mad_priv_dma_size(mad_priv), DMA_FROM_DEVICE);
3001 * Return all the posted receive MADs
3003 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
3005 struct ib_mad_private_header *mad_priv_hdr;
3006 struct ib_mad_private *recv;
3007 struct ib_mad_list_head *mad_list;
3012 while (!list_empty(&qp_info->recv_queue.list)) {
3014 mad_list = list_entry(qp_info->recv_queue.list.next,
3015 struct ib_mad_list_head, list);
3016 mad_priv_hdr = container_of(mad_list,
3017 struct ib_mad_private_header,
3019 recv = container_of(mad_priv_hdr, struct ib_mad_private,
3022 /* Remove from posted receive MAD list */
3023 list_del(&mad_list->list);
3025 ib_dma_unmap_single(qp_info->port_priv->device,
3026 recv->header.mapping,
3027 mad_priv_dma_size(recv),
3032 qp_info->recv_queue.count = 0;
3038 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
3041 struct ib_qp_attr *attr;
3045 attr = kmalloc(sizeof *attr, GFP_KERNEL);
3049 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
3050 IB_DEFAULT_PKEY_FULL, &pkey_index);
3054 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3055 qp = port_priv->qp_info[i].qp;
3060 * PKey index for QP1 is irrelevant but
3061 * one is needed for the Reset to Init transition
3063 attr->qp_state = IB_QPS_INIT;
3064 attr->pkey_index = pkey_index;
3065 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
3066 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
3067 IB_QP_PKEY_INDEX | IB_QP_QKEY);
3069 dev_err(&port_priv->device->dev,
3070 "Couldn't change QP%d state to INIT: %d\n",
3075 attr->qp_state = IB_QPS_RTR;
3076 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3078 dev_err(&port_priv->device->dev,
3079 "Couldn't change QP%d state to RTR: %d\n",
3084 attr->qp_state = IB_QPS_RTS;
3085 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3086 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3088 dev_err(&port_priv->device->dev,
3089 "Couldn't change QP%d state to RTS: %d\n",
3095 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3097 dev_err(&port_priv->device->dev,
3098 "Failed to request completion notification: %d\n",
3103 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3104 if (!port_priv->qp_info[i].qp)
3107 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3109 dev_err(&port_priv->device->dev,
3110 "Couldn't post receive WRs\n");
3119 static void qp_event_handler(struct ib_event *event, void *qp_context)
3121 struct ib_mad_qp_info *qp_info = qp_context;
3123 /* It's worse than that! He's dead, Jim! */
3124 dev_err(&qp_info->port_priv->device->dev,
3125 "Fatal error (%d) on MAD QP (%u)\n",
3126 event->event, qp_info->qp->qp_num);
3129 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3130 struct ib_mad_queue *mad_queue)
3132 mad_queue->qp_info = qp_info;
3133 mad_queue->count = 0;
3134 spin_lock_init(&mad_queue->lock);
3135 INIT_LIST_HEAD(&mad_queue->list);
3138 static void init_mad_qp(struct ib_mad_port_private *port_priv,
3139 struct ib_mad_qp_info *qp_info)
3141 qp_info->port_priv = port_priv;
3142 init_mad_queue(qp_info, &qp_info->send_queue);
3143 init_mad_queue(qp_info, &qp_info->recv_queue);
3144 INIT_LIST_HEAD(&qp_info->overflow_list);
3147 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3148 enum ib_qp_type qp_type)
3150 struct ib_qp_init_attr qp_init_attr;
3153 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3154 qp_init_attr.send_cq = qp_info->port_priv->cq;
3155 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3156 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3157 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3158 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3159 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3160 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3161 qp_init_attr.qp_type = qp_type;
3162 qp_init_attr.port_num = qp_info->port_priv->port_num;
3163 qp_init_attr.qp_context = qp_info;
3164 qp_init_attr.event_handler = qp_event_handler;
3165 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3166 if (IS_ERR(qp_info->qp)) {
3167 dev_err(&qp_info->port_priv->device->dev,
3168 "Couldn't create ib_mad QP%d\n",
3169 get_spl_qp_index(qp_type));
3170 ret = PTR_ERR(qp_info->qp);
3173 /* Use minimum queue sizes unless the CQ is resized */
3174 qp_info->send_queue.max_active = mad_sendq_size;
3175 qp_info->recv_queue.max_active = mad_recvq_size;
3182 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3187 ib_destroy_qp(qp_info->qp);
3192 * Create the QP, PD, MR, and CQ if needed
3194 static int ib_mad_port_open(struct ib_device *device,
3198 struct ib_mad_port_private *port_priv;
3199 unsigned long flags;
3202 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3205 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3206 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3209 /* Create new device info */
3210 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3214 port_priv->device = device;
3215 port_priv->port_num = port_num;
3216 spin_lock_init(&port_priv->reg_lock);
3217 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3218 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3220 cq_size = mad_sendq_size + mad_recvq_size;
3221 has_smi = rdma_cap_ib_smi(device, port_num);
3225 port_priv->pd = ib_alloc_pd(device, 0);
3226 if (IS_ERR(port_priv->pd)) {
3227 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3228 ret = PTR_ERR(port_priv->pd);
3232 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3233 IB_POLL_UNBOUND_WORKQUEUE);
3234 if (IS_ERR(port_priv->cq)) {
3235 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3236 ret = PTR_ERR(port_priv->cq);
3241 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3246 if (rdma_cap_ib_cm(device, port_num)) {
3247 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3252 port_priv->wq = alloc_ordered_workqueue("ib_mad%u", WQ_MEM_RECLAIM,
3254 if (!port_priv->wq) {
3259 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3260 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3261 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3263 ret = ib_mad_port_start(port_priv);
3265 dev_err(&device->dev, "Couldn't start port\n");
3272 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3273 list_del_init(&port_priv->port_list);
3274 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3276 destroy_workqueue(port_priv->wq);
3278 destroy_mad_qp(&port_priv->qp_info[1]);
3280 destroy_mad_qp(&port_priv->qp_info[0]);
3282 ib_free_cq(port_priv->cq);
3283 cleanup_recv_queue(&port_priv->qp_info[1]);
3284 cleanup_recv_queue(&port_priv->qp_info[0]);
3286 ib_dealloc_pd(port_priv->pd);
3295 * If there are no classes using the port, free the port
3296 * resources (CQ, MR, PD, QP) and remove the port's info structure
3298 static int ib_mad_port_close(struct ib_device *device, u32 port_num)
3300 struct ib_mad_port_private *port_priv;
3301 unsigned long flags;
3303 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3304 port_priv = __ib_get_mad_port(device, port_num);
3305 if (port_priv == NULL) {
3306 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3307 dev_err(&device->dev, "Port %u not found\n", port_num);
3310 list_del_init(&port_priv->port_list);
3311 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3313 destroy_workqueue(port_priv->wq);
3314 destroy_mad_qp(&port_priv->qp_info[1]);
3315 destroy_mad_qp(&port_priv->qp_info[0]);
3316 ib_free_cq(port_priv->cq);
3317 ib_dealloc_pd(port_priv->pd);
3318 cleanup_recv_queue(&port_priv->qp_info[1]);
3319 cleanup_recv_queue(&port_priv->qp_info[0]);
3320 /* XXX: Handle deallocation of MAD registration tables */
3327 static int ib_mad_init_device(struct ib_device *device)
3330 unsigned int count = 0;
3333 start = rdma_start_port(device);
3335 for (i = start; i <= rdma_end_port(device); i++) {
3336 if (!rdma_cap_ib_mad(device, i))
3339 ret = ib_mad_port_open(device, i);
3341 dev_err(&device->dev, "Couldn't open port %d\n", i);
3344 ret = ib_agent_port_open(device, i);
3346 dev_err(&device->dev,
3347 "Couldn't open port %d for agents\n", i);
3358 if (ib_mad_port_close(device, i))
3359 dev_err(&device->dev, "Couldn't close port %d\n", i);
3362 while (--i >= start) {
3363 if (!rdma_cap_ib_mad(device, i))
3366 if (ib_agent_port_close(device, i))
3367 dev_err(&device->dev,
3368 "Couldn't close port %d for agents\n", i);
3369 if (ib_mad_port_close(device, i))
3370 dev_err(&device->dev, "Couldn't close port %d\n", i);
3375 static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3379 rdma_for_each_port (device, i) {
3380 if (!rdma_cap_ib_mad(device, i))
3383 if (ib_agent_port_close(device, i))
3384 dev_err(&device->dev,
3385 "Couldn't close port %u for agents\n", i);
3386 if (ib_mad_port_close(device, i))
3387 dev_err(&device->dev, "Couldn't close port %u\n", i);
3391 static struct ib_client mad_client = {
3393 .add = ib_mad_init_device,
3394 .remove = ib_mad_remove_device
3397 int ib_mad_init(void)
3399 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3400 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3402 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3403 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3405 INIT_LIST_HEAD(&ib_mad_port_list);
3407 if (ib_register_client(&mad_client)) {
3408 pr_err("Couldn't register ib_mad client\n");
3415 void ib_mad_cleanup(void)
3417 ib_unregister_client(&mad_client);