Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
de493d47 | 2 | * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. |
fa619a77 HR |
3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. |
4 | * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. | |
b76aabc3 | 5 | * Copyright (c) 2009 HNR Consulting. All rights reserved. |
8e4349d1 | 6 | * Copyright (c) 2014 Intel Corporation. All rights reserved. |
1da177e4 LT |
7 | * |
8 | * This software is available to you under a choice of one of two | |
9 | * licenses. You may choose to be licensed under the terms of the GNU | |
10 | * General Public License (GPL) Version 2, available from the file | |
11 | * COPYING in the main directory of this source tree, or the | |
12 | * OpenIB.org BSD license below: | |
13 | * | |
14 | * Redistribution and use in source and binary forms, with or | |
15 | * without modification, are permitted provided that the following | |
16 | * conditions are met: | |
17 | * | |
18 | * - Redistributions of source code must retain the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer. | |
21 | * | |
22 | * - Redistributions in binary form must reproduce the above | |
23 | * copyright notice, this list of conditions and the following | |
24 | * disclaimer in the documentation and/or other materials | |
25 | * provided with the distribution. | |
26 | * | |
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
28 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
29 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
30 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
31 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
32 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
34 | * SOFTWARE. | |
35 | * | |
1da177e4 | 36 | */ |
7ef5d4b0 IW |
37 | |
38 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
39 | ||
1da177e4 | 40 | #include <linux/dma-mapping.h> |
5a0e3ad6 | 41 | #include <linux/slab.h> |
e4dd23d7 | 42 | #include <linux/module.h> |
9874e746 | 43 | #include <rdma/ib_cache.h> |
1da177e4 LT |
44 | |
45 | #include "mad_priv.h" | |
fa619a77 | 46 | #include "mad_rmpp.h" |
1da177e4 | 47 | #include "smi.h" |
8e4349d1 | 48 | #include "opa_smi.h" |
1da177e4 LT |
49 | #include "agent.h" |
50 | ||
51 | MODULE_LICENSE("Dual BSD/GPL"); | |
52 | MODULE_DESCRIPTION("kernel IB MAD API"); | |
53 | MODULE_AUTHOR("Hal Rosenstock"); | |
54 | MODULE_AUTHOR("Sean Hefty"); | |
55 | ||
16933955 RD |
56 | static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; |
57 | static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; | |
b76aabc3 HR |
58 | |
59 | module_param_named(send_queue_size, mad_sendq_size, int, 0444); | |
60 | MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); | |
61 | module_param_named(recv_queue_size, mad_recvq_size, int, 0444); | |
62 | MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); | |
63 | ||
1da177e4 LT |
64 | static struct list_head ib_mad_port_list; |
65 | static u32 ib_mad_client_id = 0; | |
66 | ||
67 | /* Port list lock */ | |
6276e08a | 68 | static DEFINE_SPINLOCK(ib_mad_port_list_lock); |
1da177e4 LT |
69 | |
70 | /* Forward declarations */ | |
71 | static int method_in_use(struct ib_mad_mgmt_method_table **method, | |
72 | struct ib_mad_reg_req *mad_reg_req); | |
73 | static void remove_mad_reg_req(struct ib_mad_agent_private *priv); | |
74 | static struct ib_mad_agent_private *find_mad_agent( | |
75 | struct ib_mad_port_private *port_priv, | |
d94bd266 | 76 | const struct ib_mad_hdr *mad); |
1da177e4 LT |
77 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, |
78 | struct ib_mad_private *mad); | |
79 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); | |
c4028958 DH |
80 | static void timeout_sends(struct work_struct *work); |
81 | static void local_completions(struct work_struct *work); | |
1da177e4 LT |
82 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, |
83 | struct ib_mad_agent_private *agent_priv, | |
84 | u8 mgmt_class); | |
85 | static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, | |
86 | struct ib_mad_agent_private *agent_priv); | |
87 | ||
88 | /* | |
89 | * Returns a ib_mad_port_private structure or NULL for a device/port | |
90 | * Assumes ib_mad_port_list_lock is being held | |
91 | */ | |
92 | static inline struct ib_mad_port_private * | |
93 | __ib_get_mad_port(struct ib_device *device, int port_num) | |
94 | { | |
95 | struct ib_mad_port_private *entry; | |
96 | ||
97 | list_for_each_entry(entry, &ib_mad_port_list, port_list) { | |
98 | if (entry->device == device && entry->port_num == port_num) | |
99 | return entry; | |
100 | } | |
101 | return NULL; | |
102 | } | |
103 | ||
104 | /* | |
105 | * Wrapper function to return a ib_mad_port_private structure or NULL | |
106 | * for a device/port | |
107 | */ | |
108 | static inline struct ib_mad_port_private * | |
109 | ib_get_mad_port(struct ib_device *device, int port_num) | |
110 | { | |
111 | struct ib_mad_port_private *entry; | |
112 | unsigned long flags; | |
113 | ||
114 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); | |
115 | entry = __ib_get_mad_port(device, port_num); | |
116 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | |
117 | ||
118 | return entry; | |
119 | } | |
120 | ||
121 | static inline u8 convert_mgmt_class(u8 mgmt_class) | |
122 | { | |
123 | /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ | |
124 | return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? | |
125 | 0 : mgmt_class; | |
126 | } | |
127 | ||
128 | static int get_spl_qp_index(enum ib_qp_type qp_type) | |
129 | { | |
130 | switch (qp_type) | |
131 | { | |
132 | case IB_QPT_SMI: | |
133 | return 0; | |
134 | case IB_QPT_GSI: | |
135 | return 1; | |
136 | default: | |
137 | return -1; | |
138 | } | |
139 | } | |
140 | ||
141 | static int vendor_class_index(u8 mgmt_class) | |
142 | { | |
143 | return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; | |
144 | } | |
145 | ||
146 | static int is_vendor_class(u8 mgmt_class) | |
147 | { | |
148 | if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || | |
149 | (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) | |
150 | return 0; | |
151 | return 1; | |
152 | } | |
153 | ||
154 | static int is_vendor_oui(char *oui) | |
155 | { | |
156 | if (oui[0] || oui[1] || oui[2]) | |
157 | return 1; | |
158 | return 0; | |
159 | } | |
160 | ||
161 | static int is_vendor_method_in_use( | |
162 | struct ib_mad_mgmt_vendor_class *vendor_class, | |
163 | struct ib_mad_reg_req *mad_reg_req) | |
164 | { | |
165 | struct ib_mad_mgmt_method_table *method; | |
166 | int i; | |
167 | ||
168 | for (i = 0; i < MAX_MGMT_OUI; i++) { | |
169 | if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { | |
170 | method = vendor_class->method_table[i]; | |
171 | if (method) { | |
172 | if (method_in_use(&method, mad_reg_req)) | |
173 | return 1; | |
174 | else | |
175 | break; | |
176 | } | |
177 | } | |
178 | } | |
179 | return 0; | |
180 | } | |
181 | ||
96909308 | 182 | int ib_response_mad(const struct ib_mad_hdr *hdr) |
2527e681 | 183 | { |
96909308 IW |
184 | return ((hdr->method & IB_MGMT_METHOD_RESP) || |
185 | (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || | |
186 | ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && | |
187 | (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); | |
2527e681 SH |
188 | } |
189 | EXPORT_SYMBOL(ib_response_mad); | |
190 | ||
1da177e4 LT |
191 | /* |
192 | * ib_register_mad_agent - Register to send/receive MADs | |
193 | */ | |
194 | struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |
195 | u8 port_num, | |
196 | enum ib_qp_type qp_type, | |
197 | struct ib_mad_reg_req *mad_reg_req, | |
198 | u8 rmpp_version, | |
199 | ib_mad_send_handler send_handler, | |
200 | ib_mad_recv_handler recv_handler, | |
0f29b46d IW |
201 | void *context, |
202 | u32 registration_flags) | |
1da177e4 LT |
203 | { |
204 | struct ib_mad_port_private *port_priv; | |
205 | struct ib_mad_agent *ret = ERR_PTR(-EINVAL); | |
206 | struct ib_mad_agent_private *mad_agent_priv; | |
207 | struct ib_mad_reg_req *reg_req = NULL; | |
208 | struct ib_mad_mgmt_class_table *class; | |
209 | struct ib_mad_mgmt_vendor_class_table *vendor; | |
210 | struct ib_mad_mgmt_vendor_class *vendor_class; | |
211 | struct ib_mad_mgmt_method_table *method; | |
212 | int ret2, qpn; | |
213 | unsigned long flags; | |
214 | u8 mgmt_class, vclass; | |
215 | ||
216 | /* Validate parameters */ | |
217 | qpn = get_spl_qp_index(qp_type); | |
9ad13a42 IW |
218 | if (qpn == -1) { |
219 | dev_notice(&device->dev, | |
220 | "ib_register_mad_agent: invalid QP Type %d\n", | |
221 | qp_type); | |
1da177e4 | 222 | goto error1; |
9ad13a42 | 223 | } |
1da177e4 | 224 | |
9ad13a42 IW |
225 | if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { |
226 | dev_notice(&device->dev, | |
227 | "ib_register_mad_agent: invalid RMPP Version %u\n", | |
228 | rmpp_version); | |
fa619a77 | 229 | goto error1; |
9ad13a42 | 230 | } |
1da177e4 LT |
231 | |
232 | /* Validate MAD registration request if supplied */ | |
233 | if (mad_reg_req) { | |
9ad13a42 IW |
234 | if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { |
235 | dev_notice(&device->dev, | |
236 | "ib_register_mad_agent: invalid Class Version %u\n", | |
237 | mad_reg_req->mgmt_class_version); | |
1da177e4 | 238 | goto error1; |
9ad13a42 IW |
239 | } |
240 | if (!recv_handler) { | |
241 | dev_notice(&device->dev, | |
242 | "ib_register_mad_agent: no recv_handler\n"); | |
1da177e4 | 243 | goto error1; |
9ad13a42 | 244 | } |
1da177e4 LT |
245 | if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { |
246 | /* | |
247 | * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only | |
248 | * one in this range currently allowed | |
249 | */ | |
250 | if (mad_reg_req->mgmt_class != | |
9ad13a42 IW |
251 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { |
252 | dev_notice(&device->dev, | |
253 | "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", | |
254 | mad_reg_req->mgmt_class); | |
1da177e4 | 255 | goto error1; |
9ad13a42 | 256 | } |
1da177e4 LT |
257 | } else if (mad_reg_req->mgmt_class == 0) { |
258 | /* | |
259 | * Class 0 is reserved in IBA and is used for | |
260 | * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE | |
261 | */ | |
9ad13a42 IW |
262 | dev_notice(&device->dev, |
263 | "ib_register_mad_agent: Invalid Mgmt Class 0\n"); | |
1da177e4 LT |
264 | goto error1; |
265 | } else if (is_vendor_class(mad_reg_req->mgmt_class)) { | |
266 | /* | |
267 | * If class is in "new" vendor range, | |
268 | * ensure supplied OUI is not zero | |
269 | */ | |
9ad13a42 IW |
270 | if (!is_vendor_oui(mad_reg_req->oui)) { |
271 | dev_notice(&device->dev, | |
272 | "ib_register_mad_agent: No OUI specified for class 0x%x\n", | |
273 | mad_reg_req->mgmt_class); | |
1da177e4 | 274 | goto error1; |
9ad13a42 | 275 | } |
1da177e4 | 276 | } |
618a3c03 | 277 | /* Make sure class supplied is consistent with RMPP */ |
64cb9c6a | 278 | if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { |
9ad13a42 IW |
279 | if (rmpp_version) { |
280 | dev_notice(&device->dev, | |
281 | "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", | |
282 | mad_reg_req->mgmt_class); | |
618a3c03 | 283 | goto error1; |
9ad13a42 | 284 | } |
618a3c03 | 285 | } |
1471cb6c | 286 | |
1da177e4 LT |
287 | /* Make sure class supplied is consistent with QP type */ |
288 | if (qp_type == IB_QPT_SMI) { | |
289 | if ((mad_reg_req->mgmt_class != | |
290 | IB_MGMT_CLASS_SUBN_LID_ROUTED) && | |
291 | (mad_reg_req->mgmt_class != | |
9ad13a42 IW |
292 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { |
293 | dev_notice(&device->dev, | |
294 | "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", | |
295 | mad_reg_req->mgmt_class); | |
1da177e4 | 296 | goto error1; |
9ad13a42 | 297 | } |
1da177e4 LT |
298 | } else { |
299 | if ((mad_reg_req->mgmt_class == | |
300 | IB_MGMT_CLASS_SUBN_LID_ROUTED) || | |
301 | (mad_reg_req->mgmt_class == | |
9ad13a42 IW |
302 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { |
303 | dev_notice(&device->dev, | |
304 | "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", | |
305 | mad_reg_req->mgmt_class); | |
1da177e4 | 306 | goto error1; |
9ad13a42 | 307 | } |
1da177e4 LT |
308 | } |
309 | } else { | |
310 | /* No registration request supplied */ | |
311 | if (!send_handler) | |
312 | goto error1; | |
1471cb6c IW |
313 | if (registration_flags & IB_MAD_USER_RMPP) |
314 | goto error1; | |
1da177e4 LT |
315 | } |
316 | ||
317 | /* Validate device and port */ | |
318 | port_priv = ib_get_mad_port(device, port_num); | |
319 | if (!port_priv) { | |
9ad13a42 | 320 | dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n"); |
1da177e4 LT |
321 | ret = ERR_PTR(-ENODEV); |
322 | goto error1; | |
323 | } | |
324 | ||
c8367c4c IW |
325 | /* Verify the QP requested is supported. For example, Ethernet devices |
326 | * will not have QP0 */ | |
327 | if (!port_priv->qp_info[qpn].qp) { | |
9ad13a42 IW |
328 | dev_notice(&device->dev, |
329 | "ib_register_mad_agent: QP %d not supported\n", qpn); | |
c8367c4c IW |
330 | ret = ERR_PTR(-EPROTONOSUPPORT); |
331 | goto error1; | |
332 | } | |
333 | ||
1da177e4 | 334 | /* Allocate structures */ |
de6eb66b | 335 | mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); |
1da177e4 LT |
336 | if (!mad_agent_priv) { |
337 | ret = ERR_PTR(-ENOMEM); | |
338 | goto error1; | |
339 | } | |
b82cab6b | 340 | |
1da177e4 | 341 | if (mad_reg_req) { |
9893e742 | 342 | reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); |
1da177e4 LT |
343 | if (!reg_req) { |
344 | ret = ERR_PTR(-ENOMEM); | |
b82cab6b | 345 | goto error3; |
1da177e4 | 346 | } |
1da177e4 LT |
347 | } |
348 | ||
349 | /* Now, fill in the various structures */ | |
1da177e4 LT |
350 | mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; |
351 | mad_agent_priv->reg_req = reg_req; | |
fa619a77 | 352 | mad_agent_priv->agent.rmpp_version = rmpp_version; |
1da177e4 LT |
353 | mad_agent_priv->agent.device = device; |
354 | mad_agent_priv->agent.recv_handler = recv_handler; | |
355 | mad_agent_priv->agent.send_handler = send_handler; | |
356 | mad_agent_priv->agent.context = context; | |
357 | mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; | |
358 | mad_agent_priv->agent.port_num = port_num; | |
0f29b46d | 359 | mad_agent_priv->agent.flags = registration_flags; |
d9620a4c RC |
360 | spin_lock_init(&mad_agent_priv->lock); |
361 | INIT_LIST_HEAD(&mad_agent_priv->send_list); | |
362 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); | |
363 | INIT_LIST_HEAD(&mad_agent_priv->done_list); | |
364 | INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); | |
365 | INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); | |
366 | INIT_LIST_HEAD(&mad_agent_priv->local_list); | |
367 | INIT_WORK(&mad_agent_priv->local_work, local_completions); | |
368 | atomic_set(&mad_agent_priv->refcount, 1); | |
369 | init_completion(&mad_agent_priv->comp); | |
1da177e4 LT |
370 | |
371 | spin_lock_irqsave(&port_priv->reg_lock, flags); | |
372 | mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; | |
373 | ||
374 | /* | |
375 | * Make sure MAD registration (if supplied) | |
376 | * is non overlapping with any existing ones | |
377 | */ | |
378 | if (mad_reg_req) { | |
379 | mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); | |
380 | if (!is_vendor_class(mgmt_class)) { | |
381 | class = port_priv->version[mad_reg_req-> | |
382 | mgmt_class_version].class; | |
383 | if (class) { | |
384 | method = class->method_table[mgmt_class]; | |
385 | if (method) { | |
386 | if (method_in_use(&method, | |
387 | mad_reg_req)) | |
b82cab6b | 388 | goto error4; |
1da177e4 LT |
389 | } |
390 | } | |
391 | ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, | |
392 | mgmt_class); | |
393 | } else { | |
394 | /* "New" vendor class range */ | |
395 | vendor = port_priv->version[mad_reg_req-> | |
396 | mgmt_class_version].vendor; | |
397 | if (vendor) { | |
398 | vclass = vendor_class_index(mgmt_class); | |
399 | vendor_class = vendor->vendor_class[vclass]; | |
400 | if (vendor_class) { | |
401 | if (is_vendor_method_in_use( | |
402 | vendor_class, | |
403 | mad_reg_req)) | |
b82cab6b | 404 | goto error4; |
1da177e4 LT |
405 | } |
406 | } | |
407 | ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); | |
408 | } | |
409 | if (ret2) { | |
410 | ret = ERR_PTR(ret2); | |
b82cab6b | 411 | goto error4; |
1da177e4 LT |
412 | } |
413 | } | |
414 | ||
415 | /* Add mad agent into port's agent list */ | |
416 | list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); | |
417 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | |
418 | ||
1da177e4 LT |
419 | return &mad_agent_priv->agent; |
420 | ||
b82cab6b | 421 | error4: |
1da177e4 LT |
422 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); |
423 | kfree(reg_req); | |
b82cab6b | 424 | error3: |
2012a116 | 425 | kfree(mad_agent_priv); |
1da177e4 LT |
426 | error1: |
427 | return ret; | |
428 | } | |
429 | EXPORT_SYMBOL(ib_register_mad_agent); | |
430 | ||
431 | static inline int is_snooping_sends(int mad_snoop_flags) | |
432 | { | |
433 | return (mad_snoop_flags & | |
434 | (/*IB_MAD_SNOOP_POSTED_SENDS | | |
435 | IB_MAD_SNOOP_RMPP_SENDS |*/ | |
436 | IB_MAD_SNOOP_SEND_COMPLETIONS /*| | |
437 | IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/)); | |
438 | } | |
439 | ||
440 | static inline int is_snooping_recvs(int mad_snoop_flags) | |
441 | { | |
442 | return (mad_snoop_flags & | |
443 | (IB_MAD_SNOOP_RECVS /*| | |
444 | IB_MAD_SNOOP_RMPP_RECVS*/)); | |
445 | } | |
446 | ||
447 | static int register_snoop_agent(struct ib_mad_qp_info *qp_info, | |
448 | struct ib_mad_snoop_private *mad_snoop_priv) | |
449 | { | |
450 | struct ib_mad_snoop_private **new_snoop_table; | |
451 | unsigned long flags; | |
452 | int i; | |
453 | ||
454 | spin_lock_irqsave(&qp_info->snoop_lock, flags); | |
455 | /* Check for empty slot in array. */ | |
456 | for (i = 0; i < qp_info->snoop_table_size; i++) | |
457 | if (!qp_info->snoop_table[i]) | |
458 | break; | |
459 | ||
460 | if (i == qp_info->snoop_table_size) { | |
461 | /* Grow table. */ | |
52805174 RD |
462 | new_snoop_table = krealloc(qp_info->snoop_table, |
463 | sizeof mad_snoop_priv * | |
464 | (qp_info->snoop_table_size + 1), | |
465 | GFP_ATOMIC); | |
1da177e4 LT |
466 | if (!new_snoop_table) { |
467 | i = -ENOMEM; | |
468 | goto out; | |
469 | } | |
52805174 | 470 | |
1da177e4 LT |
471 | qp_info->snoop_table = new_snoop_table; |
472 | qp_info->snoop_table_size++; | |
473 | } | |
474 | qp_info->snoop_table[i] = mad_snoop_priv; | |
475 | atomic_inc(&qp_info->snoop_count); | |
476 | out: | |
477 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | |
478 | return i; | |
479 | } | |
480 | ||
481 | struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, | |
482 | u8 port_num, | |
483 | enum ib_qp_type qp_type, | |
484 | int mad_snoop_flags, | |
485 | ib_mad_snoop_handler snoop_handler, | |
486 | ib_mad_recv_handler recv_handler, | |
487 | void *context) | |
488 | { | |
489 | struct ib_mad_port_private *port_priv; | |
490 | struct ib_mad_agent *ret; | |
491 | struct ib_mad_snoop_private *mad_snoop_priv; | |
492 | int qpn; | |
493 | ||
494 | /* Validate parameters */ | |
495 | if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || | |
496 | (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) { | |
497 | ret = ERR_PTR(-EINVAL); | |
498 | goto error1; | |
499 | } | |
500 | qpn = get_spl_qp_index(qp_type); | |
501 | if (qpn == -1) { | |
502 | ret = ERR_PTR(-EINVAL); | |
503 | goto error1; | |
504 | } | |
505 | port_priv = ib_get_mad_port(device, port_num); | |
506 | if (!port_priv) { | |
507 | ret = ERR_PTR(-ENODEV); | |
508 | goto error1; | |
509 | } | |
510 | /* Allocate structures */ | |
de6eb66b | 511 | mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL); |
1da177e4 LT |
512 | if (!mad_snoop_priv) { |
513 | ret = ERR_PTR(-ENOMEM); | |
514 | goto error1; | |
515 | } | |
516 | ||
517 | /* Now, fill in the various structures */ | |
1da177e4 LT |
518 | mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; |
519 | mad_snoop_priv->agent.device = device; | |
520 | mad_snoop_priv->agent.recv_handler = recv_handler; | |
521 | mad_snoop_priv->agent.snoop_handler = snoop_handler; | |
522 | mad_snoop_priv->agent.context = context; | |
523 | mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; | |
524 | mad_snoop_priv->agent.port_num = port_num; | |
525 | mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; | |
1b52fa98 | 526 | init_completion(&mad_snoop_priv->comp); |
1da177e4 LT |
527 | mad_snoop_priv->snoop_index = register_snoop_agent( |
528 | &port_priv->qp_info[qpn], | |
529 | mad_snoop_priv); | |
530 | if (mad_snoop_priv->snoop_index < 0) { | |
531 | ret = ERR_PTR(mad_snoop_priv->snoop_index); | |
532 | goto error2; | |
533 | } | |
534 | ||
535 | atomic_set(&mad_snoop_priv->refcount, 1); | |
536 | return &mad_snoop_priv->agent; | |
537 | ||
538 | error2: | |
539 | kfree(mad_snoop_priv); | |
540 | error1: | |
541 | return ret; | |
542 | } | |
543 | EXPORT_SYMBOL(ib_register_mad_snoop); | |
544 | ||
1b52fa98 SH |
545 | static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) |
546 | { | |
547 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | |
548 | complete(&mad_agent_priv->comp); | |
549 | } | |
550 | ||
551 | static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) | |
552 | { | |
553 | if (atomic_dec_and_test(&mad_snoop_priv->refcount)) | |
554 | complete(&mad_snoop_priv->comp); | |
555 | } | |
556 | ||
1da177e4 LT |
557 | static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) |
558 | { | |
559 | struct ib_mad_port_private *port_priv; | |
560 | unsigned long flags; | |
561 | ||
562 | /* Note that we could still be handling received MADs */ | |
563 | ||
564 | /* | |
565 | * Canceling all sends results in dropping received response | |
566 | * MADs, preventing us from queuing additional work | |
567 | */ | |
568 | cancel_mads(mad_agent_priv); | |
1da177e4 | 569 | port_priv = mad_agent_priv->qp_info->port_priv; |
1da177e4 | 570 | cancel_delayed_work(&mad_agent_priv->timed_work); |
1da177e4 LT |
571 | |
572 | spin_lock_irqsave(&port_priv->reg_lock, flags); | |
573 | remove_mad_reg_req(mad_agent_priv); | |
574 | list_del(&mad_agent_priv->agent_list); | |
575 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | |
576 | ||
b82cab6b | 577 | flush_workqueue(port_priv->wq); |
fa619a77 | 578 | ib_cancel_rmpp_recvs(mad_agent_priv); |
1da177e4 | 579 | |
1b52fa98 SH |
580 | deref_mad_agent(mad_agent_priv); |
581 | wait_for_completion(&mad_agent_priv->comp); | |
1da177e4 | 582 | |
6044ec88 | 583 | kfree(mad_agent_priv->reg_req); |
1da177e4 LT |
584 | kfree(mad_agent_priv); |
585 | } | |
586 | ||
587 | static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) | |
588 | { | |
589 | struct ib_mad_qp_info *qp_info; | |
590 | unsigned long flags; | |
591 | ||
592 | qp_info = mad_snoop_priv->qp_info; | |
593 | spin_lock_irqsave(&qp_info->snoop_lock, flags); | |
594 | qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; | |
595 | atomic_dec(&qp_info->snoop_count); | |
596 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | |
597 | ||
1b52fa98 SH |
598 | deref_snoop_agent(mad_snoop_priv); |
599 | wait_for_completion(&mad_snoop_priv->comp); | |
1da177e4 LT |
600 | |
601 | kfree(mad_snoop_priv); | |
602 | } | |
603 | ||
604 | /* | |
605 | * ib_unregister_mad_agent - Unregisters a client from using MAD services | |
606 | */ | |
607 | int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) | |
608 | { | |
609 | struct ib_mad_agent_private *mad_agent_priv; | |
610 | struct ib_mad_snoop_private *mad_snoop_priv; | |
611 | ||
612 | /* If the TID is zero, the agent can only snoop. */ | |
613 | if (mad_agent->hi_tid) { | |
614 | mad_agent_priv = container_of(mad_agent, | |
615 | struct ib_mad_agent_private, | |
616 | agent); | |
617 | unregister_mad_agent(mad_agent_priv); | |
618 | } else { | |
619 | mad_snoop_priv = container_of(mad_agent, | |
620 | struct ib_mad_snoop_private, | |
621 | agent); | |
622 | unregister_mad_snoop(mad_snoop_priv); | |
623 | } | |
624 | return 0; | |
625 | } | |
626 | EXPORT_SYMBOL(ib_unregister_mad_agent); | |
627 | ||
628 | static void dequeue_mad(struct ib_mad_list_head *mad_list) | |
629 | { | |
630 | struct ib_mad_queue *mad_queue; | |
631 | unsigned long flags; | |
632 | ||
633 | BUG_ON(!mad_list->mad_queue); | |
634 | mad_queue = mad_list->mad_queue; | |
635 | spin_lock_irqsave(&mad_queue->lock, flags); | |
636 | list_del(&mad_list->list); | |
637 | mad_queue->count--; | |
638 | spin_unlock_irqrestore(&mad_queue->lock, flags); | |
639 | } | |
640 | ||
641 | static void snoop_send(struct ib_mad_qp_info *qp_info, | |
34816ad9 | 642 | struct ib_mad_send_buf *send_buf, |
1da177e4 LT |
643 | struct ib_mad_send_wc *mad_send_wc, |
644 | int mad_snoop_flags) | |
645 | { | |
646 | struct ib_mad_snoop_private *mad_snoop_priv; | |
647 | unsigned long flags; | |
648 | int i; | |
649 | ||
650 | spin_lock_irqsave(&qp_info->snoop_lock, flags); | |
651 | for (i = 0; i < qp_info->snoop_table_size; i++) { | |
652 | mad_snoop_priv = qp_info->snoop_table[i]; | |
653 | if (!mad_snoop_priv || | |
654 | !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) | |
655 | continue; | |
656 | ||
657 | atomic_inc(&mad_snoop_priv->refcount); | |
658 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | |
659 | mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, | |
34816ad9 | 660 | send_buf, mad_send_wc); |
1b52fa98 | 661 | deref_snoop_agent(mad_snoop_priv); |
1da177e4 LT |
662 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
663 | } | |
664 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | |
665 | } | |
666 | ||
667 | static void snoop_recv(struct ib_mad_qp_info *qp_info, | |
668 | struct ib_mad_recv_wc *mad_recv_wc, | |
669 | int mad_snoop_flags) | |
670 | { | |
671 | struct ib_mad_snoop_private *mad_snoop_priv; | |
672 | unsigned long flags; | |
673 | int i; | |
674 | ||
675 | spin_lock_irqsave(&qp_info->snoop_lock, flags); | |
676 | for (i = 0; i < qp_info->snoop_table_size; i++) { | |
677 | mad_snoop_priv = qp_info->snoop_table[i]; | |
678 | if (!mad_snoop_priv || | |
679 | !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) | |
680 | continue; | |
681 | ||
682 | atomic_inc(&mad_snoop_priv->refcount); | |
683 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | |
684 | mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, | |
685 | mad_recv_wc); | |
1b52fa98 | 686 | deref_snoop_agent(mad_snoop_priv); |
1da177e4 LT |
687 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
688 | } | |
689 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | |
690 | } | |
691 | ||
062dbb69 MT |
692 | static void build_smp_wc(struct ib_qp *qp, |
693 | u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, | |
1da177e4 LT |
694 | struct ib_wc *wc) |
695 | { | |
696 | memset(wc, 0, sizeof *wc); | |
697 | wc->wr_id = wr_id; | |
698 | wc->status = IB_WC_SUCCESS; | |
699 | wc->opcode = IB_WC_RECV; | |
700 | wc->pkey_index = pkey_index; | |
701 | wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); | |
702 | wc->src_qp = IB_QP0; | |
062dbb69 | 703 | wc->qp = qp; |
1da177e4 LT |
704 | wc->slid = slid; |
705 | wc->sl = 0; | |
706 | wc->dlid_path_bits = 0; | |
707 | wc->port_num = port_num; | |
708 | } | |
709 | ||
c9082e51 IW |
710 | static size_t mad_priv_size(const struct ib_mad_private *mp) |
711 | { | |
712 | return sizeof(struct ib_mad_private) + mp->mad_size; | |
713 | } | |
714 | ||
715 | static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags) | |
716 | { | |
717 | size_t size = sizeof(struct ib_mad_private) + mad_size; | |
718 | struct ib_mad_private *ret = kzalloc(size, flags); | |
719 | ||
720 | if (ret) | |
721 | ret->mad_size = mad_size; | |
722 | ||
723 | return ret; | |
724 | } | |
725 | ||
726 | static size_t port_mad_size(const struct ib_mad_port_private *port_priv) | |
727 | { | |
728 | return rdma_max_mad_size(port_priv->device, port_priv->port_num); | |
729 | } | |
730 | ||
731 | static size_t mad_priv_dma_size(const struct ib_mad_private *mp) | |
732 | { | |
733 | return sizeof(struct ib_grh) + mp->mad_size; | |
734 | } | |
735 | ||
1da177e4 LT |
736 | /* |
737 | * Return 0 if SMP is to be sent | |
738 | * Return 1 if SMP was consumed locally (whether or not solicited) | |
739 | * Return < 0 if error | |
740 | */ | |
741 | static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |
34816ad9 | 742 | struct ib_mad_send_wr_private *mad_send_wr) |
1da177e4 | 743 | { |
de493d47 | 744 | int ret = 0; |
34816ad9 | 745 | struct ib_smp *smp = mad_send_wr->send_buf.mad; |
8e4349d1 | 746 | struct opa_smp *opa_smp = (struct opa_smp *)smp; |
1da177e4 LT |
747 | unsigned long flags; |
748 | struct ib_mad_local_private *local; | |
749 | struct ib_mad_private *mad_priv; | |
750 | struct ib_mad_port_private *port_priv; | |
751 | struct ib_mad_agent_private *recv_mad_agent = NULL; | |
752 | struct ib_device *device = mad_agent_priv->agent.device; | |
1bae4dbf | 753 | u8 port_num; |
1da177e4 | 754 | struct ib_wc mad_wc; |
34816ad9 | 755 | struct ib_send_wr *send_wr = &mad_send_wr->send_wr; |
c9082e51 | 756 | size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); |
4cd7c947 | 757 | u16 out_mad_pkey_index = 0; |
8e4349d1 IW |
758 | u16 drslid; |
759 | bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, | |
760 | mad_agent_priv->qp_info->port_priv->port_num); | |
1da177e4 | 761 | |
4139032b | 762 | if (rdma_cap_ib_switch(device) && |
1bae4dbf HR |
763 | smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) |
764 | port_num = send_wr->wr.ud.port_num; | |
765 | else | |
766 | port_num = mad_agent_priv->agent.port_num; | |
767 | ||
8cf3f04f RC |
768 | /* |
769 | * Directed route handling starts if the initial LID routed part of | |
770 | * a request or the ending LID routed part of a response is empty. | |
771 | * If we are at the start of the LID routed part, don't update the | |
772 | * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. | |
773 | */ | |
8e4349d1 IW |
774 | if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) { |
775 | u32 opa_drslid; | |
776 | ||
777 | if ((opa_get_smp_direction(opa_smp) | |
778 | ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == | |
779 | OPA_LID_PERMISSIVE && | |
4139032b HR |
780 | opa_smi_handle_dr_smp_send(opa_smp, |
781 | rdma_cap_ib_switch(device), | |
8e4349d1 IW |
782 | port_num) == IB_SMI_DISCARD) { |
783 | ret = -EINVAL; | |
784 | dev_err(&device->dev, "OPA Invalid directed route\n"); | |
785 | goto out; | |
786 | } | |
787 | opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); | |
cd4cd565 | 788 | if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) && |
8e4349d1 IW |
789 | opa_drslid & 0xffff0000) { |
790 | ret = -EINVAL; | |
791 | dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", | |
792 | opa_drslid); | |
793 | goto out; | |
794 | } | |
795 | drslid = (u16)(opa_drslid & 0x0000ffff); | |
de493d47 | 796 | |
8e4349d1 IW |
797 | /* Check to post send on QP or process locally */ |
798 | if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD && | |
799 | opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) | |
800 | goto out; | |
801 | } else { | |
802 | if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == | |
803 | IB_LID_PERMISSIVE && | |
4139032b | 804 | smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == |
8e4349d1 IW |
805 | IB_SMI_DISCARD) { |
806 | ret = -EINVAL; | |
807 | dev_err(&device->dev, "Invalid directed route\n"); | |
808 | goto out; | |
809 | } | |
810 | drslid = be16_to_cpu(smp->dr_slid); | |
811 | ||
812 | /* Check to post send on QP or process locally */ | |
813 | if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && | |
814 | smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) | |
815 | goto out; | |
816 | } | |
1da177e4 LT |
817 | |
818 | local = kmalloc(sizeof *local, GFP_ATOMIC); | |
819 | if (!local) { | |
820 | ret = -ENOMEM; | |
7ef5d4b0 | 821 | dev_err(&device->dev, "No memory for ib_mad_local_private\n"); |
1da177e4 LT |
822 | goto out; |
823 | } | |
824 | local->mad_priv = NULL; | |
825 | local->recv_mad_agent = NULL; | |
c9082e51 | 826 | mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); |
1da177e4 LT |
827 | if (!mad_priv) { |
828 | ret = -ENOMEM; | |
7ef5d4b0 | 829 | dev_err(&device->dev, "No memory for local response MAD\n"); |
1da177e4 LT |
830 | kfree(local); |
831 | goto out; | |
832 | } | |
833 | ||
062dbb69 | 834 | build_smp_wc(mad_agent_priv->agent.qp, |
8e4349d1 | 835 | send_wr->wr_id, drslid, |
97f52eb4 | 836 | send_wr->wr.ud.pkey_index, |
1da177e4 LT |
837 | send_wr->wr.ud.port_num, &mad_wc); |
838 | ||
8e4349d1 IW |
839 | if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { |
840 | mad_wc.byte_len = mad_send_wr->send_buf.hdr_len | |
841 | + mad_send_wr->send_buf.data_len | |
842 | + sizeof(struct ib_grh); | |
843 | } | |
844 | ||
1da177e4 LT |
845 | /* No GRH for DR SMP */ |
846 | ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, | |
4cd7c947 IW |
847 | (const struct ib_mad_hdr *)smp, mad_size, |
848 | (struct ib_mad_hdr *)mad_priv->mad, | |
849 | &mad_size, &out_mad_pkey_index); | |
1da177e4 LT |
850 | switch (ret) |
851 | { | |
852 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: | |
c9082e51 | 853 | if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && |
1da177e4 LT |
854 | mad_agent_priv->agent.recv_handler) { |
855 | local->mad_priv = mad_priv; | |
856 | local->recv_mad_agent = mad_agent_priv; | |
857 | /* | |
858 | * Reference MAD agent until receive | |
859 | * side of local completion handled | |
860 | */ | |
861 | atomic_inc(&mad_agent_priv->refcount); | |
862 | } else | |
c9082e51 | 863 | kfree(mad_priv); |
1da177e4 LT |
864 | break; |
865 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: | |
c9082e51 | 866 | kfree(mad_priv); |
4780c195 | 867 | break; |
1da177e4 LT |
868 | case IB_MAD_RESULT_SUCCESS: |
869 | /* Treat like an incoming receive MAD */ | |
1da177e4 LT |
870 | port_priv = ib_get_mad_port(mad_agent_priv->agent.device, |
871 | mad_agent_priv->agent.port_num); | |
872 | if (port_priv) { | |
c9082e51 | 873 | memcpy(mad_priv->mad, smp, mad_priv->mad_size); |
1da177e4 | 874 | recv_mad_agent = find_mad_agent(port_priv, |
c9082e51 | 875 | (const struct ib_mad_hdr *)mad_priv->mad); |
1da177e4 LT |
876 | } |
877 | if (!port_priv || !recv_mad_agent) { | |
4780c195 RC |
878 | /* |
879 | * No receiving agent so drop packet and | |
880 | * generate send completion. | |
881 | */ | |
c9082e51 | 882 | kfree(mad_priv); |
4780c195 | 883 | break; |
1da177e4 LT |
884 | } |
885 | local->mad_priv = mad_priv; | |
886 | local->recv_mad_agent = recv_mad_agent; | |
887 | break; | |
888 | default: | |
c9082e51 | 889 | kfree(mad_priv); |
1da177e4 LT |
890 | kfree(local); |
891 | ret = -EINVAL; | |
892 | goto out; | |
893 | } | |
894 | ||
34816ad9 | 895 | local->mad_send_wr = mad_send_wr; |
8e4349d1 IW |
896 | if (opa) { |
897 | local->mad_send_wr->send_wr.wr.ud.pkey_index = out_mad_pkey_index; | |
898 | local->return_wc_byte_len = mad_size; | |
899 | } | |
1da177e4 LT |
900 | /* Reference MAD agent until send side of local completion handled */ |
901 | atomic_inc(&mad_agent_priv->refcount); | |
902 | /* Queue local completion to local list */ | |
903 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
904 | list_add_tail(&local->completion_list, &mad_agent_priv->local_list); | |
905 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
906 | queue_work(mad_agent_priv->qp_info->port_priv->wq, | |
b82cab6b | 907 | &mad_agent_priv->local_work); |
1da177e4 LT |
908 | ret = 1; |
909 | out: | |
910 | return ret; | |
911 | } | |
912 | ||
548ead17 | 913 | static int get_pad_size(int hdr_len, int data_len, size_t mad_size) |
824c8ae7 HR |
914 | { |
915 | int seg_size, pad; | |
916 | ||
548ead17 | 917 | seg_size = mad_size - hdr_len; |
824c8ae7 HR |
918 | if (data_len && seg_size) { |
919 | pad = seg_size - data_len % seg_size; | |
f36e1793 | 920 | return pad == seg_size ? 0 : pad; |
824c8ae7 | 921 | } else |
f36e1793 JM |
922 | return seg_size; |
923 | } | |
924 | ||
925 | static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) | |
926 | { | |
927 | struct ib_rmpp_segment *s, *t; | |
928 | ||
929 | list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { | |
930 | list_del(&s->list); | |
931 | kfree(s); | |
932 | } | |
933 | } | |
934 | ||
935 | static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, | |
548ead17 | 936 | size_t mad_size, gfp_t gfp_mask) |
f36e1793 JM |
937 | { |
938 | struct ib_mad_send_buf *send_buf = &send_wr->send_buf; | |
939 | struct ib_rmpp_mad *rmpp_mad = send_buf->mad; | |
940 | struct ib_rmpp_segment *seg = NULL; | |
941 | int left, seg_size, pad; | |
942 | ||
548ead17 IW |
943 | send_buf->seg_size = mad_size - send_buf->hdr_len; |
944 | send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR; | |
f36e1793 JM |
945 | seg_size = send_buf->seg_size; |
946 | pad = send_wr->pad; | |
947 | ||
948 | /* Allocate data segments. */ | |
949 | for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { | |
950 | seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); | |
951 | if (!seg) { | |
7ef5d4b0 IW |
952 | dev_err(&send_buf->mad_agent->device->dev, |
953 | "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n", | |
954 | sizeof (*seg) + seg_size, gfp_mask); | |
f36e1793 JM |
955 | free_send_rmpp_list(send_wr); |
956 | return -ENOMEM; | |
957 | } | |
958 | seg->num = ++send_buf->seg_count; | |
959 | list_add_tail(&seg->list, &send_wr->rmpp_list); | |
960 | } | |
961 | ||
962 | /* Zero any padding */ | |
963 | if (pad) | |
964 | memset(seg->data + seg_size - pad, 0, pad); | |
965 | ||
966 | rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> | |
967 | agent.rmpp_version; | |
968 | rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; | |
969 | ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); | |
970 | ||
971 | send_wr->cur_seg = container_of(send_wr->rmpp_list.next, | |
972 | struct ib_rmpp_segment, list); | |
973 | send_wr->last_ack_seg = send_wr->cur_seg; | |
974 | return 0; | |
824c8ae7 HR |
975 | } |
976 | ||
f766c58f | 977 | int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) |
1471cb6c IW |
978 | { |
979 | return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); | |
980 | } | |
981 | EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); | |
982 | ||
824c8ae7 HR |
983 | struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, |
984 | u32 remote_qpn, u16 pkey_index, | |
34816ad9 | 985 | int rmpp_active, |
824c8ae7 | 986 | int hdr_len, int data_len, |
da2dfaa3 IW |
987 | gfp_t gfp_mask, |
988 | u8 base_version) | |
824c8ae7 HR |
989 | { |
990 | struct ib_mad_agent_private *mad_agent_priv; | |
34816ad9 | 991 | struct ib_mad_send_wr_private *mad_send_wr; |
f36e1793 | 992 | int pad, message_size, ret, size; |
824c8ae7 | 993 | void *buf; |
548ead17 IW |
994 | size_t mad_size; |
995 | bool opa; | |
824c8ae7 | 996 | |
34816ad9 SH |
997 | mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, |
998 | agent); | |
548ead17 IW |
999 | |
1000 | opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num); | |
1001 | ||
1002 | if (opa && base_version == OPA_MGMT_BASE_VERSION) | |
1003 | mad_size = sizeof(struct opa_mad); | |
1004 | else | |
1005 | mad_size = sizeof(struct ib_mad); | |
1006 | ||
1007 | pad = get_pad_size(hdr_len, data_len, mad_size); | |
f36e1793 | 1008 | message_size = hdr_len + data_len + pad; |
824c8ae7 | 1009 | |
1471cb6c | 1010 | if (ib_mad_kernel_rmpp_agent(mad_agent)) { |
548ead17 | 1011 | if (!rmpp_active && message_size > mad_size) |
1471cb6c IW |
1012 | return ERR_PTR(-EINVAL); |
1013 | } else | |
548ead17 | 1014 | if (rmpp_active || message_size > mad_size) |
1471cb6c | 1015 | return ERR_PTR(-EINVAL); |
fa619a77 | 1016 | |
548ead17 | 1017 | size = rmpp_active ? hdr_len : mad_size; |
f36e1793 | 1018 | buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); |
824c8ae7 HR |
1019 | if (!buf) |
1020 | return ERR_PTR(-ENOMEM); | |
34816ad9 | 1021 | |
f36e1793 JM |
1022 | mad_send_wr = buf + size; |
1023 | INIT_LIST_HEAD(&mad_send_wr->rmpp_list); | |
34816ad9 | 1024 | mad_send_wr->send_buf.mad = buf; |
f36e1793 JM |
1025 | mad_send_wr->send_buf.hdr_len = hdr_len; |
1026 | mad_send_wr->send_buf.data_len = data_len; | |
1027 | mad_send_wr->pad = pad; | |
34816ad9 SH |
1028 | |
1029 | mad_send_wr->mad_agent_priv = mad_agent_priv; | |
f36e1793 | 1030 | mad_send_wr->sg_list[0].length = hdr_len; |
4be90bc6 | 1031 | mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; |
548ead17 IW |
1032 | |
1033 | /* OPA MADs don't have to be the full 2048 bytes */ | |
1034 | if (opa && base_version == OPA_MGMT_BASE_VERSION && | |
1035 | data_len < mad_size - hdr_len) | |
1036 | mad_send_wr->sg_list[1].length = data_len; | |
1037 | else | |
1038 | mad_send_wr->sg_list[1].length = mad_size - hdr_len; | |
1039 | ||
4be90bc6 | 1040 | mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; |
34816ad9 SH |
1041 | |
1042 | mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr; | |
1043 | mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; | |
f36e1793 | 1044 | mad_send_wr->send_wr.num_sge = 2; |
34816ad9 SH |
1045 | mad_send_wr->send_wr.opcode = IB_WR_SEND; |
1046 | mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED; | |
1047 | mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn; | |
1048 | mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; | |
1049 | mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index; | |
fa619a77 HR |
1050 | |
1051 | if (rmpp_active) { | |
548ead17 | 1052 | ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); |
f36e1793 JM |
1053 | if (ret) { |
1054 | kfree(buf); | |
1055 | return ERR_PTR(ret); | |
1056 | } | |
fa619a77 HR |
1057 | } |
1058 | ||
34816ad9 | 1059 | mad_send_wr->send_buf.mad_agent = mad_agent; |
824c8ae7 | 1060 | atomic_inc(&mad_agent_priv->refcount); |
34816ad9 | 1061 | return &mad_send_wr->send_buf; |
824c8ae7 HR |
1062 | } |
1063 | EXPORT_SYMBOL(ib_create_send_mad); | |
1064 | ||
618a3c03 HR |
1065 | int ib_get_mad_data_offset(u8 mgmt_class) |
1066 | { | |
1067 | if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) | |
1068 | return IB_MGMT_SA_HDR; | |
1069 | else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || | |
1070 | (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || | |
1071 | (mgmt_class == IB_MGMT_CLASS_BIS)) | |
1072 | return IB_MGMT_DEVICE_HDR; | |
1073 | else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && | |
1074 | (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) | |
1075 | return IB_MGMT_VENDOR_HDR; | |
1076 | else | |
1077 | return IB_MGMT_MAD_HDR; | |
1078 | } | |
1079 | EXPORT_SYMBOL(ib_get_mad_data_offset); | |
1080 | ||
1081 | int ib_is_mad_class_rmpp(u8 mgmt_class) | |
1082 | { | |
1083 | if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || | |
1084 | (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || | |
1085 | (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || | |
1086 | (mgmt_class == IB_MGMT_CLASS_BIS) || | |
1087 | ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && | |
1088 | (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) | |
1089 | return 1; | |
1090 | return 0; | |
1091 | } | |
1092 | EXPORT_SYMBOL(ib_is_mad_class_rmpp); | |
1093 | ||
f36e1793 JM |
1094 | void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) |
1095 | { | |
1096 | struct ib_mad_send_wr_private *mad_send_wr; | |
1097 | struct list_head *list; | |
1098 | ||
1099 | mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, | |
1100 | send_buf); | |
1101 | list = &mad_send_wr->cur_seg->list; | |
1102 | ||
1103 | if (mad_send_wr->cur_seg->num < seg_num) { | |
1104 | list_for_each_entry(mad_send_wr->cur_seg, list, list) | |
1105 | if (mad_send_wr->cur_seg->num == seg_num) | |
1106 | break; | |
1107 | } else if (mad_send_wr->cur_seg->num > seg_num) { | |
1108 | list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) | |
1109 | if (mad_send_wr->cur_seg->num == seg_num) | |
1110 | break; | |
1111 | } | |
1112 | return mad_send_wr->cur_seg->data; | |
1113 | } | |
1114 | EXPORT_SYMBOL(ib_get_rmpp_segment); | |
1115 | ||
1116 | static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) | |
1117 | { | |
1118 | if (mad_send_wr->send_buf.seg_count) | |
1119 | return ib_get_rmpp_segment(&mad_send_wr->send_buf, | |
1120 | mad_send_wr->seg_num); | |
1121 | else | |
1122 | return mad_send_wr->send_buf.mad + | |
1123 | mad_send_wr->send_buf.hdr_len; | |
1124 | } | |
1125 | ||
824c8ae7 HR |
1126 | void ib_free_send_mad(struct ib_mad_send_buf *send_buf) |
1127 | { | |
1128 | struct ib_mad_agent_private *mad_agent_priv; | |
f36e1793 | 1129 | struct ib_mad_send_wr_private *mad_send_wr; |
824c8ae7 HR |
1130 | |
1131 | mad_agent_priv = container_of(send_buf->mad_agent, | |
1132 | struct ib_mad_agent_private, agent); | |
f36e1793 JM |
1133 | mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, |
1134 | send_buf); | |
824c8ae7 | 1135 | |
f36e1793 JM |
1136 | free_send_rmpp_list(mad_send_wr); |
1137 | kfree(send_buf->mad); | |
1b52fa98 | 1138 | deref_mad_agent(mad_agent_priv); |
824c8ae7 HR |
1139 | } |
1140 | EXPORT_SYMBOL(ib_free_send_mad); | |
1141 | ||
fa619a77 | 1142 | int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) |
1da177e4 LT |
1143 | { |
1144 | struct ib_mad_qp_info *qp_info; | |
cabe3cbc | 1145 | struct list_head *list; |
34816ad9 SH |
1146 | struct ib_send_wr *bad_send_wr; |
1147 | struct ib_mad_agent *mad_agent; | |
1148 | struct ib_sge *sge; | |
1da177e4 LT |
1149 | unsigned long flags; |
1150 | int ret; | |
1151 | ||
f8197a4e | 1152 | /* Set WR ID to find mad_send_wr upon completion */ |
d760ce8f | 1153 | qp_info = mad_send_wr->mad_agent_priv->qp_info; |
1da177e4 LT |
1154 | mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; |
1155 | mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; | |
1156 | ||
34816ad9 SH |
1157 | mad_agent = mad_send_wr->send_buf.mad_agent; |
1158 | sge = mad_send_wr->sg_list; | |
1527106f RC |
1159 | sge[0].addr = ib_dma_map_single(mad_agent->device, |
1160 | mad_send_wr->send_buf.mad, | |
1161 | sge[0].length, | |
1162 | DMA_TO_DEVICE); | |
2c34e68f YB |
1163 | if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) |
1164 | return -ENOMEM; | |
1165 | ||
1527106f RC |
1166 | mad_send_wr->header_mapping = sge[0].addr; |
1167 | ||
1168 | sge[1].addr = ib_dma_map_single(mad_agent->device, | |
1169 | ib_get_payload(mad_send_wr), | |
1170 | sge[1].length, | |
1171 | DMA_TO_DEVICE); | |
2c34e68f YB |
1172 | if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { |
1173 | ib_dma_unmap_single(mad_agent->device, | |
1174 | mad_send_wr->header_mapping, | |
1175 | sge[0].length, DMA_TO_DEVICE); | |
1176 | return -ENOMEM; | |
1177 | } | |
1527106f | 1178 | mad_send_wr->payload_mapping = sge[1].addr; |
34816ad9 | 1179 | |
1da177e4 | 1180 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); |
cabe3cbc | 1181 | if (qp_info->send_queue.count < qp_info->send_queue.max_active) { |
34816ad9 SH |
1182 | ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr, |
1183 | &bad_send_wr); | |
cabe3cbc | 1184 | list = &qp_info->send_queue.list; |
1da177e4 | 1185 | } else { |
1da177e4 | 1186 | ret = 0; |
cabe3cbc | 1187 | list = &qp_info->overflow_list; |
1da177e4 | 1188 | } |
cabe3cbc HR |
1189 | |
1190 | if (!ret) { | |
1191 | qp_info->send_queue.count++; | |
1192 | list_add_tail(&mad_send_wr->mad_list.list, list); | |
1193 | } | |
1194 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | |
f36e1793 | 1195 | if (ret) { |
1527106f RC |
1196 | ib_dma_unmap_single(mad_agent->device, |
1197 | mad_send_wr->header_mapping, | |
1198 | sge[0].length, DMA_TO_DEVICE); | |
1199 | ib_dma_unmap_single(mad_agent->device, | |
1200 | mad_send_wr->payload_mapping, | |
1201 | sge[1].length, DMA_TO_DEVICE); | |
f36e1793 | 1202 | } |
1da177e4 LT |
1203 | return ret; |
1204 | } | |
1205 | ||
1206 | /* | |
1207 | * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated | |
1208 | * with the registered client | |
1209 | */ | |
34816ad9 SH |
1210 | int ib_post_send_mad(struct ib_mad_send_buf *send_buf, |
1211 | struct ib_mad_send_buf **bad_send_buf) | |
1da177e4 | 1212 | { |
1da177e4 | 1213 | struct ib_mad_agent_private *mad_agent_priv; |
34816ad9 SH |
1214 | struct ib_mad_send_buf *next_send_buf; |
1215 | struct ib_mad_send_wr_private *mad_send_wr; | |
1216 | unsigned long flags; | |
1217 | int ret = -EINVAL; | |
1da177e4 LT |
1218 | |
1219 | /* Walk list of send WRs and post each on send list */ | |
34816ad9 | 1220 | for (; send_buf; send_buf = next_send_buf) { |
1da177e4 | 1221 | |
34816ad9 SH |
1222 | mad_send_wr = container_of(send_buf, |
1223 | struct ib_mad_send_wr_private, | |
1224 | send_buf); | |
1225 | mad_agent_priv = mad_send_wr->mad_agent_priv; | |
1da177e4 | 1226 | |
34816ad9 SH |
1227 | if (!send_buf->mad_agent->send_handler || |
1228 | (send_buf->timeout_ms && | |
1229 | !send_buf->mad_agent->recv_handler)) { | |
1230 | ret = -EINVAL; | |
1231 | goto error; | |
1da177e4 LT |
1232 | } |
1233 | ||
618a3c03 HR |
1234 | if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { |
1235 | if (mad_agent_priv->agent.rmpp_version) { | |
1236 | ret = -EINVAL; | |
1237 | goto error; | |
1238 | } | |
1239 | } | |
1240 | ||
1da177e4 LT |
1241 | /* |
1242 | * Save pointer to next work request to post in case the | |
1243 | * current one completes, and the user modifies the work | |
1244 | * request associated with the completion | |
1245 | */ | |
34816ad9 SH |
1246 | next_send_buf = send_buf->next; |
1247 | mad_send_wr->send_wr.wr.ud.ah = send_buf->ah; | |
1da177e4 | 1248 | |
34816ad9 SH |
1249 | if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == |
1250 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | |
1251 | ret = handle_outgoing_dr_smp(mad_agent_priv, | |
1252 | mad_send_wr); | |
1da177e4 | 1253 | if (ret < 0) /* error */ |
34816ad9 | 1254 | goto error; |
1da177e4 | 1255 | else if (ret == 1) /* locally consumed */ |
34816ad9 | 1256 | continue; |
1da177e4 LT |
1257 | } |
1258 | ||
34816ad9 | 1259 | mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; |
1da177e4 | 1260 | /* Timeout will be updated after send completes */ |
34816ad9 | 1261 | mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); |
4fc8cd49 SH |
1262 | mad_send_wr->max_retries = send_buf->retries; |
1263 | mad_send_wr->retries_left = send_buf->retries; | |
1264 | send_buf->retries = 0; | |
34816ad9 | 1265 | /* Reference for work request to QP + response */ |
1da177e4 LT |
1266 | mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); |
1267 | mad_send_wr->status = IB_WC_SUCCESS; | |
1268 | ||
1269 | /* Reference MAD agent until send completes */ | |
1270 | atomic_inc(&mad_agent_priv->refcount); | |
1271 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
1272 | list_add_tail(&mad_send_wr->agent_list, | |
1273 | &mad_agent_priv->send_list); | |
1274 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
1275 | ||
1471cb6c | 1276 | if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { |
fa619a77 HR |
1277 | ret = ib_send_rmpp_mad(mad_send_wr); |
1278 | if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) | |
1279 | ret = ib_send_mad(mad_send_wr); | |
1280 | } else | |
1281 | ret = ib_send_mad(mad_send_wr); | |
1282 | if (ret < 0) { | |
1da177e4 LT |
1283 | /* Fail send request */ |
1284 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
1285 | list_del(&mad_send_wr->agent_list); | |
1286 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
1287 | atomic_dec(&mad_agent_priv->refcount); | |
34816ad9 | 1288 | goto error; |
1da177e4 | 1289 | } |
1da177e4 LT |
1290 | } |
1291 | return 0; | |
34816ad9 SH |
1292 | error: |
1293 | if (bad_send_buf) | |
1294 | *bad_send_buf = send_buf; | |
1da177e4 LT |
1295 | return ret; |
1296 | } | |
1297 | EXPORT_SYMBOL(ib_post_send_mad); | |
1298 | ||
1299 | /* | |
1300 | * ib_free_recv_mad - Returns data buffers used to receive | |
1301 | * a MAD to the access layer | |
1302 | */ | |
1303 | void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) | |
1304 | { | |
fa619a77 | 1305 | struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; |
1da177e4 LT |
1306 | struct ib_mad_private_header *mad_priv_hdr; |
1307 | struct ib_mad_private *priv; | |
fa619a77 | 1308 | struct list_head free_list; |
1da177e4 | 1309 | |
fa619a77 HR |
1310 | INIT_LIST_HEAD(&free_list); |
1311 | list_splice_init(&mad_recv_wc->rmpp_list, &free_list); | |
1da177e4 | 1312 | |
fa619a77 HR |
1313 | list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, |
1314 | &free_list, list) { | |
1315 | mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, | |
1316 | recv_buf); | |
1da177e4 LT |
1317 | mad_priv_hdr = container_of(mad_recv_wc, |
1318 | struct ib_mad_private_header, | |
1319 | recv_wc); | |
1320 | priv = container_of(mad_priv_hdr, struct ib_mad_private, | |
1321 | header); | |
c9082e51 | 1322 | kfree(priv); |
1da177e4 | 1323 | } |
1da177e4 LT |
1324 | } |
1325 | EXPORT_SYMBOL(ib_free_recv_mad); | |
1326 | ||
1da177e4 LT |
1327 | struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, |
1328 | u8 rmpp_version, | |
1329 | ib_mad_send_handler send_handler, | |
1330 | ib_mad_recv_handler recv_handler, | |
1331 | void *context) | |
1332 | { | |
1333 | return ERR_PTR(-EINVAL); /* XXX: for now */ | |
1334 | } | |
1335 | EXPORT_SYMBOL(ib_redirect_mad_qp); | |
1336 | ||
1337 | int ib_process_mad_wc(struct ib_mad_agent *mad_agent, | |
1338 | struct ib_wc *wc) | |
1339 | { | |
7ef5d4b0 IW |
1340 | dev_err(&mad_agent->device->dev, |
1341 | "ib_process_mad_wc() not implemented yet\n"); | |
1da177e4 LT |
1342 | return 0; |
1343 | } | |
1344 | EXPORT_SYMBOL(ib_process_mad_wc); | |
1345 | ||
1346 | static int method_in_use(struct ib_mad_mgmt_method_table **method, | |
1347 | struct ib_mad_reg_req *mad_reg_req) | |
1348 | { | |
1349 | int i; | |
1350 | ||
19b629f5 | 1351 | for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { |
1da177e4 | 1352 | if ((*method)->agent[i]) { |
7ef5d4b0 | 1353 | pr_err("Method %d already in use\n", i); |
1da177e4 LT |
1354 | return -EINVAL; |
1355 | } | |
1356 | } | |
1357 | return 0; | |
1358 | } | |
1359 | ||
1360 | static int allocate_method_table(struct ib_mad_mgmt_method_table **method) | |
1361 | { | |
1362 | /* Allocate management method table */ | |
de6eb66b | 1363 | *method = kzalloc(sizeof **method, GFP_ATOMIC); |
1da177e4 | 1364 | if (!*method) { |
7ef5d4b0 | 1365 | pr_err("No memory for ib_mad_mgmt_method_table\n"); |
1da177e4 LT |
1366 | return -ENOMEM; |
1367 | } | |
1da177e4 LT |
1368 | |
1369 | return 0; | |
1370 | } | |
1371 | ||
1372 | /* | |
1373 | * Check to see if there are any methods still in use | |
1374 | */ | |
1375 | static int check_method_table(struct ib_mad_mgmt_method_table *method) | |
1376 | { | |
1377 | int i; | |
1378 | ||
1379 | for (i = 0; i < IB_MGMT_MAX_METHODS; i++) | |
1380 | if (method->agent[i]) | |
1381 | return 1; | |
1382 | return 0; | |
1383 | } | |
1384 | ||
1385 | /* | |
1386 | * Check to see if there are any method tables for this class still in use | |
1387 | */ | |
1388 | static int check_class_table(struct ib_mad_mgmt_class_table *class) | |
1389 | { | |
1390 | int i; | |
1391 | ||
1392 | for (i = 0; i < MAX_MGMT_CLASS; i++) | |
1393 | if (class->method_table[i]) | |
1394 | return 1; | |
1395 | return 0; | |
1396 | } | |
1397 | ||
1398 | static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) | |
1399 | { | |
1400 | int i; | |
1401 | ||
1402 | for (i = 0; i < MAX_MGMT_OUI; i++) | |
1403 | if (vendor_class->method_table[i]) | |
1404 | return 1; | |
1405 | return 0; | |
1406 | } | |
1407 | ||
1408 | static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, | |
d94bd266 | 1409 | const char *oui) |
1da177e4 LT |
1410 | { |
1411 | int i; | |
1412 | ||
1413 | for (i = 0; i < MAX_MGMT_OUI; i++) | |
3cd96564 RD |
1414 | /* Is there matching OUI for this vendor class ? */ |
1415 | if (!memcmp(vendor_class->oui[i], oui, 3)) | |
1da177e4 LT |
1416 | return i; |
1417 | ||
1418 | return -1; | |
1419 | } | |
1420 | ||
1421 | static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) | |
1422 | { | |
1423 | int i; | |
1424 | ||
1425 | for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) | |
1426 | if (vendor->vendor_class[i]) | |
1427 | return 1; | |
1428 | ||
1429 | return 0; | |
1430 | } | |
1431 | ||
1432 | static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, | |
1433 | struct ib_mad_agent_private *agent) | |
1434 | { | |
1435 | int i; | |
1436 | ||
1437 | /* Remove any methods for this mad agent */ | |
1438 | for (i = 0; i < IB_MGMT_MAX_METHODS; i++) { | |
1439 | if (method->agent[i] == agent) { | |
1440 | method->agent[i] = NULL; | |
1441 | } | |
1442 | } | |
1443 | } | |
1444 | ||
1445 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, | |
1446 | struct ib_mad_agent_private *agent_priv, | |
1447 | u8 mgmt_class) | |
1448 | { | |
1449 | struct ib_mad_port_private *port_priv; | |
1450 | struct ib_mad_mgmt_class_table **class; | |
1451 | struct ib_mad_mgmt_method_table **method; | |
1452 | int i, ret; | |
1453 | ||
1454 | port_priv = agent_priv->qp_info->port_priv; | |
1455 | class = &port_priv->version[mad_reg_req->mgmt_class_version].class; | |
1456 | if (!*class) { | |
1457 | /* Allocate management class table for "new" class version */ | |
de6eb66b | 1458 | *class = kzalloc(sizeof **class, GFP_ATOMIC); |
1da177e4 | 1459 | if (!*class) { |
7ef5d4b0 IW |
1460 | dev_err(&agent_priv->agent.device->dev, |
1461 | "No memory for ib_mad_mgmt_class_table\n"); | |
1da177e4 LT |
1462 | ret = -ENOMEM; |
1463 | goto error1; | |
1464 | } | |
de6eb66b | 1465 | |
1da177e4 LT |
1466 | /* Allocate method table for this management class */ |
1467 | method = &(*class)->method_table[mgmt_class]; | |
1468 | if ((ret = allocate_method_table(method))) | |
1469 | goto error2; | |
1470 | } else { | |
1471 | method = &(*class)->method_table[mgmt_class]; | |
1472 | if (!*method) { | |
1473 | /* Allocate method table for this management class */ | |
1474 | if ((ret = allocate_method_table(method))) | |
1475 | goto error1; | |
1476 | } | |
1477 | } | |
1478 | ||
1479 | /* Now, make sure methods are not already in use */ | |
1480 | if (method_in_use(method, mad_reg_req)) | |
1481 | goto error3; | |
1482 | ||
1483 | /* Finally, add in methods being registered */ | |
19b629f5 | 1484 | for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) |
1da177e4 | 1485 | (*method)->agent[i] = agent_priv; |
19b629f5 | 1486 | |
1da177e4 LT |
1487 | return 0; |
1488 | ||
1489 | error3: | |
1490 | /* Remove any methods for this mad agent */ | |
1491 | remove_methods_mad_agent(*method, agent_priv); | |
1492 | /* Now, check to see if there are any methods in use */ | |
1493 | if (!check_method_table(*method)) { | |
1494 | /* If not, release management method table */ | |
1495 | kfree(*method); | |
1496 | *method = NULL; | |
1497 | } | |
1498 | ret = -EINVAL; | |
1499 | goto error1; | |
1500 | error2: | |
1501 | kfree(*class); | |
1502 | *class = NULL; | |
1503 | error1: | |
1504 | return ret; | |
1505 | } | |
1506 | ||
1507 | static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, | |
1508 | struct ib_mad_agent_private *agent_priv) | |
1509 | { | |
1510 | struct ib_mad_port_private *port_priv; | |
1511 | struct ib_mad_mgmt_vendor_class_table **vendor_table; | |
1512 | struct ib_mad_mgmt_vendor_class_table *vendor = NULL; | |
1513 | struct ib_mad_mgmt_vendor_class *vendor_class = NULL; | |
1514 | struct ib_mad_mgmt_method_table **method; | |
1515 | int i, ret = -ENOMEM; | |
1516 | u8 vclass; | |
1517 | ||
1518 | /* "New" vendor (with OUI) class */ | |
1519 | vclass = vendor_class_index(mad_reg_req->mgmt_class); | |
1520 | port_priv = agent_priv->qp_info->port_priv; | |
1521 | vendor_table = &port_priv->version[ | |
1522 | mad_reg_req->mgmt_class_version].vendor; | |
1523 | if (!*vendor_table) { | |
1524 | /* Allocate mgmt vendor class table for "new" class version */ | |
de6eb66b | 1525 | vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); |
1da177e4 | 1526 | if (!vendor) { |
7ef5d4b0 IW |
1527 | dev_err(&agent_priv->agent.device->dev, |
1528 | "No memory for ib_mad_mgmt_vendor_class_table\n"); | |
1da177e4 LT |
1529 | goto error1; |
1530 | } | |
de6eb66b | 1531 | |
1da177e4 LT |
1532 | *vendor_table = vendor; |
1533 | } | |
1534 | if (!(*vendor_table)->vendor_class[vclass]) { | |
1535 | /* Allocate table for this management vendor class */ | |
de6eb66b | 1536 | vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); |
1da177e4 | 1537 | if (!vendor_class) { |
7ef5d4b0 IW |
1538 | dev_err(&agent_priv->agent.device->dev, |
1539 | "No memory for ib_mad_mgmt_vendor_class\n"); | |
1da177e4 LT |
1540 | goto error2; |
1541 | } | |
de6eb66b | 1542 | |
1da177e4 LT |
1543 | (*vendor_table)->vendor_class[vclass] = vendor_class; |
1544 | } | |
1545 | for (i = 0; i < MAX_MGMT_OUI; i++) { | |
1546 | /* Is there matching OUI for this vendor class ? */ | |
1547 | if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], | |
1548 | mad_reg_req->oui, 3)) { | |
1549 | method = &(*vendor_table)->vendor_class[ | |
1550 | vclass]->method_table[i]; | |
1551 | BUG_ON(!*method); | |
1552 | goto check_in_use; | |
1553 | } | |
1554 | } | |
1555 | for (i = 0; i < MAX_MGMT_OUI; i++) { | |
1556 | /* OUI slot available ? */ | |
1557 | if (!is_vendor_oui((*vendor_table)->vendor_class[ | |
1558 | vclass]->oui[i])) { | |
1559 | method = &(*vendor_table)->vendor_class[ | |
1560 | vclass]->method_table[i]; | |
1561 | BUG_ON(*method); | |
1562 | /* Allocate method table for this OUI */ | |
1563 | if ((ret = allocate_method_table(method))) | |
1564 | goto error3; | |
1565 | memcpy((*vendor_table)->vendor_class[vclass]->oui[i], | |
1566 | mad_reg_req->oui, 3); | |
1567 | goto check_in_use; | |
1568 | } | |
1569 | } | |
7ef5d4b0 | 1570 | dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); |
1da177e4 LT |
1571 | goto error3; |
1572 | ||
1573 | check_in_use: | |
1574 | /* Now, make sure methods are not already in use */ | |
1575 | if (method_in_use(method, mad_reg_req)) | |
1576 | goto error4; | |
1577 | ||
1578 | /* Finally, add in methods being registered */ | |
19b629f5 | 1579 | for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) |
1da177e4 | 1580 | (*method)->agent[i] = agent_priv; |
19b629f5 | 1581 | |
1da177e4 LT |
1582 | return 0; |
1583 | ||
1584 | error4: | |
1585 | /* Remove any methods for this mad agent */ | |
1586 | remove_methods_mad_agent(*method, agent_priv); | |
1587 | /* Now, check to see if there are any methods in use */ | |
1588 | if (!check_method_table(*method)) { | |
1589 | /* If not, release management method table */ | |
1590 | kfree(*method); | |
1591 | *method = NULL; | |
1592 | } | |
1593 | ret = -EINVAL; | |
1594 | error3: | |
1595 | if (vendor_class) { | |
1596 | (*vendor_table)->vendor_class[vclass] = NULL; | |
1597 | kfree(vendor_class); | |
1598 | } | |
1599 | error2: | |
1600 | if (vendor) { | |
1601 | *vendor_table = NULL; | |
1602 | kfree(vendor); | |
1603 | } | |
1604 | error1: | |
1605 | return ret; | |
1606 | } | |
1607 | ||
1608 | static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) | |
1609 | { | |
1610 | struct ib_mad_port_private *port_priv; | |
1611 | struct ib_mad_mgmt_class_table *class; | |
1612 | struct ib_mad_mgmt_method_table *method; | |
1613 | struct ib_mad_mgmt_vendor_class_table *vendor; | |
1614 | struct ib_mad_mgmt_vendor_class *vendor_class; | |
1615 | int index; | |
1616 | u8 mgmt_class; | |
1617 | ||
1618 | /* | |
1619 | * Was MAD registration request supplied | |
1620 | * with original registration ? | |
1621 | */ | |
1622 | if (!agent_priv->reg_req) { | |
1623 | goto out; | |
1624 | } | |
1625 | ||
1626 | port_priv = agent_priv->qp_info->port_priv; | |
1627 | mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); | |
1628 | class = port_priv->version[ | |
1629 | agent_priv->reg_req->mgmt_class_version].class; | |
1630 | if (!class) | |
1631 | goto vendor_check; | |
1632 | ||
1633 | method = class->method_table[mgmt_class]; | |
1634 | if (method) { | |
1635 | /* Remove any methods for this mad agent */ | |
1636 | remove_methods_mad_agent(method, agent_priv); | |
1637 | /* Now, check to see if there are any methods still in use */ | |
1638 | if (!check_method_table(method)) { | |
1639 | /* If not, release management method table */ | |
1640 | kfree(method); | |
1641 | class->method_table[mgmt_class] = NULL; | |
1642 | /* Any management classes left ? */ | |
1643 | if (!check_class_table(class)) { | |
1644 | /* If not, release management class table */ | |
1645 | kfree(class); | |
1646 | port_priv->version[ | |
1647 | agent_priv->reg_req-> | |
1648 | mgmt_class_version].class = NULL; | |
1649 | } | |
1650 | } | |
1651 | } | |
1652 | ||
1653 | vendor_check: | |
1654 | if (!is_vendor_class(mgmt_class)) | |
1655 | goto out; | |
1656 | ||
1657 | /* normalize mgmt_class to vendor range 2 */ | |
1658 | mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); | |
1659 | vendor = port_priv->version[ | |
1660 | agent_priv->reg_req->mgmt_class_version].vendor; | |
1661 | ||
1662 | if (!vendor) | |
1663 | goto out; | |
1664 | ||
1665 | vendor_class = vendor->vendor_class[mgmt_class]; | |
1666 | if (vendor_class) { | |
1667 | index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); | |
1668 | if (index < 0) | |
1669 | goto out; | |
1670 | method = vendor_class->method_table[index]; | |
1671 | if (method) { | |
1672 | /* Remove any methods for this mad agent */ | |
1673 | remove_methods_mad_agent(method, agent_priv); | |
1674 | /* | |
1675 | * Now, check to see if there are | |
1676 | * any methods still in use | |
1677 | */ | |
1678 | if (!check_method_table(method)) { | |
1679 | /* If not, release management method table */ | |
1680 | kfree(method); | |
1681 | vendor_class->method_table[index] = NULL; | |
1682 | memset(vendor_class->oui[index], 0, 3); | |
1683 | /* Any OUIs left ? */ | |
1684 | if (!check_vendor_class(vendor_class)) { | |
1685 | /* If not, release vendor class table */ | |
1686 | kfree(vendor_class); | |
1687 | vendor->vendor_class[mgmt_class] = NULL; | |
1688 | /* Any other vendor classes left ? */ | |
1689 | if (!check_vendor_table(vendor)) { | |
1690 | kfree(vendor); | |
1691 | port_priv->version[ | |
1692 | agent_priv->reg_req-> | |
1693 | mgmt_class_version]. | |
1694 | vendor = NULL; | |
1695 | } | |
1696 | } | |
1697 | } | |
1698 | } | |
1699 | } | |
1700 | ||
1701 | out: | |
1702 | return; | |
1703 | } | |
1704 | ||
1da177e4 LT |
1705 | static struct ib_mad_agent_private * |
1706 | find_mad_agent(struct ib_mad_port_private *port_priv, | |
d94bd266 | 1707 | const struct ib_mad_hdr *mad_hdr) |
1da177e4 LT |
1708 | { |
1709 | struct ib_mad_agent_private *mad_agent = NULL; | |
1710 | unsigned long flags; | |
1711 | ||
1712 | spin_lock_irqsave(&port_priv->reg_lock, flags); | |
d94bd266 | 1713 | if (ib_response_mad(mad_hdr)) { |
1da177e4 LT |
1714 | u32 hi_tid; |
1715 | struct ib_mad_agent_private *entry; | |
1716 | ||
1717 | /* | |
1718 | * Routing is based on high 32 bits of transaction ID | |
1719 | * of MAD. | |
1720 | */ | |
d94bd266 | 1721 | hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; |
34816ad9 | 1722 | list_for_each_entry(entry, &port_priv->agent_list, agent_list) { |
1da177e4 LT |
1723 | if (entry->agent.hi_tid == hi_tid) { |
1724 | mad_agent = entry; | |
1725 | break; | |
1726 | } | |
1727 | } | |
1728 | } else { | |
1729 | struct ib_mad_mgmt_class_table *class; | |
1730 | struct ib_mad_mgmt_method_table *method; | |
1731 | struct ib_mad_mgmt_vendor_class_table *vendor; | |
1732 | struct ib_mad_mgmt_vendor_class *vendor_class; | |
d94bd266 | 1733 | const struct ib_vendor_mad *vendor_mad; |
1da177e4 LT |
1734 | int index; |
1735 | ||
1736 | /* | |
1737 | * Routing is based on version, class, and method | |
1738 | * For "newer" vendor MADs, also based on OUI | |
1739 | */ | |
d94bd266 | 1740 | if (mad_hdr->class_version >= MAX_MGMT_VERSION) |
1da177e4 | 1741 | goto out; |
d94bd266 | 1742 | if (!is_vendor_class(mad_hdr->mgmt_class)) { |
1da177e4 | 1743 | class = port_priv->version[ |
d94bd266 | 1744 | mad_hdr->class_version].class; |
1da177e4 LT |
1745 | if (!class) |
1746 | goto out; | |
d94bd266 | 1747 | if (convert_mgmt_class(mad_hdr->mgmt_class) >= |
b7ab0b19 HS |
1748 | IB_MGMT_MAX_METHODS) |
1749 | goto out; | |
1da177e4 | 1750 | method = class->method_table[convert_mgmt_class( |
d94bd266 | 1751 | mad_hdr->mgmt_class)]; |
1da177e4 | 1752 | if (method) |
d94bd266 | 1753 | mad_agent = method->agent[mad_hdr->method & |
1da177e4 LT |
1754 | ~IB_MGMT_METHOD_RESP]; |
1755 | } else { | |
1756 | vendor = port_priv->version[ | |
d94bd266 | 1757 | mad_hdr->class_version].vendor; |
1da177e4 LT |
1758 | if (!vendor) |
1759 | goto out; | |
1760 | vendor_class = vendor->vendor_class[vendor_class_index( | |
d94bd266 | 1761 | mad_hdr->mgmt_class)]; |
1da177e4 LT |
1762 | if (!vendor_class) |
1763 | goto out; | |
1764 | /* Find matching OUI */ | |
d94bd266 | 1765 | vendor_mad = (const struct ib_vendor_mad *)mad_hdr; |
1da177e4 LT |
1766 | index = find_vendor_oui(vendor_class, vendor_mad->oui); |
1767 | if (index == -1) | |
1768 | goto out; | |
1769 | method = vendor_class->method_table[index]; | |
1770 | if (method) { | |
d94bd266 | 1771 | mad_agent = method->agent[mad_hdr->method & |
1da177e4 LT |
1772 | ~IB_MGMT_METHOD_RESP]; |
1773 | } | |
1774 | } | |
1775 | } | |
1776 | ||
1777 | if (mad_agent) { | |
1778 | if (mad_agent->agent.recv_handler) | |
1779 | atomic_inc(&mad_agent->refcount); | |
1780 | else { | |
7ef5d4b0 IW |
1781 | dev_notice(&port_priv->device->dev, |
1782 | "No receive handler for client %p on port %d\n", | |
1783 | &mad_agent->agent, port_priv->port_num); | |
1da177e4 LT |
1784 | mad_agent = NULL; |
1785 | } | |
1786 | } | |
1787 | out: | |
1788 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | |
1789 | ||
1790 | return mad_agent; | |
1791 | } | |
1792 | ||
8e4349d1 IW |
1793 | static int validate_mad(const struct ib_mad_hdr *mad_hdr, |
1794 | const struct ib_mad_qp_info *qp_info, | |
1795 | bool opa) | |
1da177e4 LT |
1796 | { |
1797 | int valid = 0; | |
8e4349d1 | 1798 | u32 qp_num = qp_info->qp->qp_num; |
1da177e4 LT |
1799 | |
1800 | /* Make sure MAD base version is understood */ | |
8e4349d1 IW |
1801 | if (mad_hdr->base_version != IB_MGMT_BASE_VERSION && |
1802 | (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) { | |
1803 | pr_err("MAD received with unsupported base version %d %s\n", | |
1804 | mad_hdr->base_version, opa ? "(opa)" : ""); | |
1da177e4 LT |
1805 | goto out; |
1806 | } | |
1807 | ||
1808 | /* Filter SMI packets sent to other than QP0 */ | |
77f60833 IW |
1809 | if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || |
1810 | (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { | |
1da177e4 LT |
1811 | if (qp_num == 0) |
1812 | valid = 1; | |
1813 | } else { | |
1814 | /* Filter GSI packets sent to QP0 */ | |
1815 | if (qp_num != 0) | |
1816 | valid = 1; | |
1817 | } | |
1818 | ||
1819 | out: | |
1820 | return valid; | |
1821 | } | |
1822 | ||
f766c58f IW |
1823 | static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, |
1824 | const struct ib_mad_hdr *mad_hdr) | |
fa619a77 HR |
1825 | { |
1826 | struct ib_rmpp_mad *rmpp_mad; | |
1827 | ||
1828 | rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; | |
1829 | return !mad_agent_priv->agent.rmpp_version || | |
1471cb6c | 1830 | !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || |
fa619a77 HR |
1831 | !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & |
1832 | IB_MGMT_RMPP_FLAG_ACTIVE) || | |
1833 | (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); | |
1834 | } | |
1835 | ||
8bf4b30c IW |
1836 | static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, |
1837 | const struct ib_mad_recv_wc *rwc) | |
fa9656bb | 1838 | { |
8bf4b30c | 1839 | return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == |
fa9656bb JM |
1840 | rwc->recv_buf.mad->mad_hdr.mgmt_class; |
1841 | } | |
1842 | ||
f766c58f IW |
1843 | static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, |
1844 | const struct ib_mad_send_wr_private *wr, | |
1845 | const struct ib_mad_recv_wc *rwc ) | |
fa9656bb JM |
1846 | { |
1847 | struct ib_ah_attr attr; | |
1848 | u8 send_resp, rcv_resp; | |
9874e746 JM |
1849 | union ib_gid sgid; |
1850 | struct ib_device *device = mad_agent_priv->agent.device; | |
1851 | u8 port_num = mad_agent_priv->agent.port_num; | |
1852 | u8 lmc; | |
fa9656bb | 1853 | |
96909308 IW |
1854 | send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); |
1855 | rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); | |
fa9656bb | 1856 | |
fa9656bb JM |
1857 | if (send_resp == rcv_resp) |
1858 | /* both requests, or both responses. GIDs different */ | |
1859 | return 0; | |
1860 | ||
1861 | if (ib_query_ah(wr->send_buf.ah, &attr)) | |
1862 | /* Assume not equal, to avoid false positives. */ | |
1863 | return 0; | |
1864 | ||
9874e746 JM |
1865 | if (!!(attr.ah_flags & IB_AH_GRH) != |
1866 | !!(rwc->wc->wc_flags & IB_WC_GRH)) | |
fa9656bb JM |
1867 | /* one has GID, other does not. Assume different */ |
1868 | return 0; | |
9874e746 JM |
1869 | |
1870 | if (!send_resp && rcv_resp) { | |
1871 | /* is request/response. */ | |
1872 | if (!(attr.ah_flags & IB_AH_GRH)) { | |
1873 | if (ib_get_cached_lmc(device, port_num, &lmc)) | |
1874 | return 0; | |
1875 | return (!lmc || !((attr.src_path_bits ^ | |
1876 | rwc->wc->dlid_path_bits) & | |
1877 | ((1 << lmc) - 1))); | |
1878 | } else { | |
1879 | if (ib_get_cached_gid(device, port_num, | |
55ee3ab2 | 1880 | attr.grh.sgid_index, &sgid, NULL)) |
9874e746 JM |
1881 | return 0; |
1882 | return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, | |
1883 | 16); | |
1884 | } | |
1885 | } | |
1886 | ||
1887 | if (!(attr.ah_flags & IB_AH_GRH)) | |
1888 | return attr.dlid == rwc->wc->slid; | |
1889 | else | |
1890 | return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw, | |
1891 | 16); | |
1892 | } | |
1893 | ||
1894 | static inline int is_direct(u8 class) | |
1895 | { | |
1896 | return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); | |
fa9656bb | 1897 | } |
9874e746 | 1898 | |
fa619a77 | 1899 | struct ib_mad_send_wr_private* |
f766c58f IW |
1900 | ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, |
1901 | const struct ib_mad_recv_wc *wc) | |
1da177e4 | 1902 | { |
9874e746 | 1903 | struct ib_mad_send_wr_private *wr; |
83a1d228 | 1904 | const struct ib_mad_hdr *mad_hdr; |
fa9656bb | 1905 | |
83a1d228 | 1906 | mad_hdr = &wc->recv_buf.mad->mad_hdr; |
9874e746 JM |
1907 | |
1908 | list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { | |
83a1d228 | 1909 | if ((wr->tid == mad_hdr->tid) && |
9874e746 JM |
1910 | rcv_has_same_class(wr, wc) && |
1911 | /* | |
1912 | * Don't check GID for direct routed MADs. | |
1913 | * These might have permissive LIDs. | |
1914 | */ | |
83a1d228 | 1915 | (is_direct(mad_hdr->mgmt_class) || |
9874e746 | 1916 | rcv_has_same_gid(mad_agent_priv, wr, wc))) |
39798695 | 1917 | return (wr->status == IB_WC_SUCCESS) ? wr : NULL; |
1da177e4 LT |
1918 | } |
1919 | ||
1920 | /* | |
1921 | * It's possible to receive the response before we've | |
1922 | * been notified that the send has completed | |
1923 | */ | |
9874e746 | 1924 | list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { |
c597eee5 | 1925 | if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && |
83a1d228 | 1926 | wr->tid == mad_hdr->tid && |
9874e746 JM |
1927 | wr->timeout && |
1928 | rcv_has_same_class(wr, wc) && | |
1929 | /* | |
1930 | * Don't check GID for direct routed MADs. | |
1931 | * These might have permissive LIDs. | |
1932 | */ | |
83a1d228 | 1933 | (is_direct(mad_hdr->mgmt_class) || |
9874e746 | 1934 | rcv_has_same_gid(mad_agent_priv, wr, wc))) |
1da177e4 | 1935 | /* Verify request has not been canceled */ |
9874e746 | 1936 | return (wr->status == IB_WC_SUCCESS) ? wr : NULL; |
1da177e4 LT |
1937 | } |
1938 | return NULL; | |
1939 | } | |
1940 | ||
fa619a77 | 1941 | void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) |
6a0c435e HR |
1942 | { |
1943 | mad_send_wr->timeout = 0; | |
179e0917 AM |
1944 | if (mad_send_wr->refcount == 1) |
1945 | list_move_tail(&mad_send_wr->agent_list, | |
6a0c435e | 1946 | &mad_send_wr->mad_agent_priv->done_list); |
6a0c435e HR |
1947 | } |
1948 | ||
1da177e4 | 1949 | static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, |
4a0754fa | 1950 | struct ib_mad_recv_wc *mad_recv_wc) |
1da177e4 LT |
1951 | { |
1952 | struct ib_mad_send_wr_private *mad_send_wr; | |
1953 | struct ib_mad_send_wc mad_send_wc; | |
1954 | unsigned long flags; | |
1955 | ||
fa619a77 HR |
1956 | INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); |
1957 | list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); | |
1471cb6c | 1958 | if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { |
fa619a77 HR |
1959 | mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, |
1960 | mad_recv_wc); | |
1961 | if (!mad_recv_wc) { | |
1b52fa98 | 1962 | deref_mad_agent(mad_agent_priv); |
fa619a77 HR |
1963 | return; |
1964 | } | |
1965 | } | |
1966 | ||
1da177e4 | 1967 | /* Complete corresponding request */ |
96909308 | 1968 | if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { |
1da177e4 | 1969 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
fa9656bb | 1970 | mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); |
1da177e4 LT |
1971 | if (!mad_send_wr) { |
1972 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
1471cb6c IW |
1973 | if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) |
1974 | && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) | |
1975 | && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) | |
1976 | & IB_MGMT_RMPP_FLAG_ACTIVE)) { | |
1977 | /* user rmpp is in effect | |
1978 | * and this is an active RMPP MAD | |
1979 | */ | |
1980 | mad_recv_wc->wc->wr_id = 0; | |
1981 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | |
1982 | mad_recv_wc); | |
1983 | atomic_dec(&mad_agent_priv->refcount); | |
1984 | } else { | |
1985 | /* not user rmpp, revert to normal behavior and | |
1986 | * drop the mad */ | |
1987 | ib_free_recv_mad(mad_recv_wc); | |
1988 | deref_mad_agent(mad_agent_priv); | |
1989 | return; | |
1990 | } | |
1991 | } else { | |
1992 | ib_mark_mad_done(mad_send_wr); | |
1993 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
1da177e4 | 1994 | |
1471cb6c IW |
1995 | /* Defined behavior is to complete response before request */ |
1996 | mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; | |
1997 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | |
1998 | mad_recv_wc); | |
1999 | atomic_dec(&mad_agent_priv->refcount); | |
1da177e4 | 2000 | |
1471cb6c IW |
2001 | mad_send_wc.status = IB_WC_SUCCESS; |
2002 | mad_send_wc.vendor_err = 0; | |
2003 | mad_send_wc.send_buf = &mad_send_wr->send_buf; | |
2004 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); | |
2005 | } | |
1da177e4 | 2006 | } else { |
4a0754fa HR |
2007 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, |
2008 | mad_recv_wc); | |
1b52fa98 | 2009 | deref_mad_agent(mad_agent_priv); |
1da177e4 LT |
2010 | } |
2011 | } | |
2012 | ||
e11ae8aa IW |
2013 | static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, |
2014 | const struct ib_mad_qp_info *qp_info, | |
2015 | const struct ib_wc *wc, | |
2016 | int port_num, | |
2017 | struct ib_mad_private *recv, | |
2018 | struct ib_mad_private *response) | |
2019 | { | |
2020 | enum smi_forward_action retsmi; | |
c9082e51 | 2021 | struct ib_smp *smp = (struct ib_smp *)recv->mad; |
e11ae8aa | 2022 | |
c9082e51 | 2023 | if (smi_handle_dr_smp_recv(smp, |
4139032b | 2024 | rdma_cap_ib_switch(port_priv->device), |
e11ae8aa IW |
2025 | port_num, |
2026 | port_priv->device->phys_port_cnt) == | |
2027 | IB_SMI_DISCARD) | |
2028 | return IB_SMI_DISCARD; | |
2029 | ||
c9082e51 | 2030 | retsmi = smi_check_forward_dr_smp(smp); |
e11ae8aa IW |
2031 | if (retsmi == IB_SMI_LOCAL) |
2032 | return IB_SMI_HANDLE; | |
2033 | ||
2034 | if (retsmi == IB_SMI_SEND) { /* don't forward */ | |
c9082e51 | 2035 | if (smi_handle_dr_smp_send(smp, |
4139032b | 2036 | rdma_cap_ib_switch(port_priv->device), |
e11ae8aa IW |
2037 | port_num) == IB_SMI_DISCARD) |
2038 | return IB_SMI_DISCARD; | |
2039 | ||
c9082e51 | 2040 | if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) |
e11ae8aa | 2041 | return IB_SMI_DISCARD; |
4139032b | 2042 | } else if (rdma_cap_ib_switch(port_priv->device)) { |
e11ae8aa | 2043 | /* forward case for switches */ |
c9082e51 | 2044 | memcpy(response, recv, mad_priv_size(response)); |
e11ae8aa | 2045 | response->header.recv_wc.wc = &response->header.wc; |
c9082e51 | 2046 | response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; |
e11ae8aa IW |
2047 | response->header.recv_wc.recv_buf.grh = &response->grh; |
2048 | ||
c9082e51 | 2049 | agent_send_response((const struct ib_mad_hdr *)response->mad, |
e11ae8aa IW |
2050 | &response->grh, wc, |
2051 | port_priv->device, | |
c9082e51 IW |
2052 | smi_get_fwd_port(smp), |
2053 | qp_info->qp->qp_num, | |
8e4349d1 IW |
2054 | response->mad_size, |
2055 | false); | |
e11ae8aa IW |
2056 | |
2057 | return IB_SMI_DISCARD; | |
2058 | } | |
2059 | return IB_SMI_HANDLE; | |
2060 | } | |
2061 | ||
c9082e51 | 2062 | static bool generate_unmatched_resp(const struct ib_mad_private *recv, |
8e4349d1 IW |
2063 | struct ib_mad_private *response, |
2064 | size_t *resp_len, bool opa) | |
0b307043 | 2065 | { |
c9082e51 IW |
2066 | const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad; |
2067 | struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad; | |
2068 | ||
2069 | if (recv_hdr->method == IB_MGMT_METHOD_GET || | |
2070 | recv_hdr->method == IB_MGMT_METHOD_SET) { | |
2071 | memcpy(response, recv, mad_priv_size(response)); | |
0b307043 | 2072 | response->header.recv_wc.wc = &response->header.wc; |
c9082e51 | 2073 | response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; |
0b307043 | 2074 | response->header.recv_wc.recv_buf.grh = &response->grh; |
c9082e51 IW |
2075 | resp_hdr->method = IB_MGMT_METHOD_GET_RESP; |
2076 | resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); | |
2077 | if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | |
2078 | resp_hdr->status |= IB_SMP_DIRECTION; | |
0b307043 | 2079 | |
8e4349d1 IW |
2080 | if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) { |
2081 | if (recv_hdr->mgmt_class == | |
2082 | IB_MGMT_CLASS_SUBN_LID_ROUTED || | |
2083 | recv_hdr->mgmt_class == | |
2084 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | |
2085 | *resp_len = opa_get_smp_header_size( | |
2086 | (struct opa_smp *)recv->mad); | |
2087 | else | |
2088 | *resp_len = sizeof(struct ib_mad_hdr); | |
2089 | } | |
2090 | ||
0b307043 ST |
2091 | return true; |
2092 | } else { | |
2093 | return false; | |
2094 | } | |
2095 | } | |
8e4349d1 IW |
2096 | |
2097 | static enum smi_action | |
2098 | handle_opa_smi(struct ib_mad_port_private *port_priv, | |
2099 | struct ib_mad_qp_info *qp_info, | |
2100 | struct ib_wc *wc, | |
2101 | int port_num, | |
2102 | struct ib_mad_private *recv, | |
2103 | struct ib_mad_private *response) | |
2104 | { | |
2105 | enum smi_forward_action retsmi; | |
2106 | struct opa_smp *smp = (struct opa_smp *)recv->mad; | |
2107 | ||
2108 | if (opa_smi_handle_dr_smp_recv(smp, | |
4139032b | 2109 | rdma_cap_ib_switch(port_priv->device), |
8e4349d1 IW |
2110 | port_num, |
2111 | port_priv->device->phys_port_cnt) == | |
2112 | IB_SMI_DISCARD) | |
2113 | return IB_SMI_DISCARD; | |
2114 | ||
2115 | retsmi = opa_smi_check_forward_dr_smp(smp); | |
2116 | if (retsmi == IB_SMI_LOCAL) | |
2117 | return IB_SMI_HANDLE; | |
2118 | ||
2119 | if (retsmi == IB_SMI_SEND) { /* don't forward */ | |
2120 | if (opa_smi_handle_dr_smp_send(smp, | |
4139032b | 2121 | rdma_cap_ib_switch(port_priv->device), |
8e4349d1 IW |
2122 | port_num) == IB_SMI_DISCARD) |
2123 | return IB_SMI_DISCARD; | |
2124 | ||
2125 | if (opa_smi_check_local_smp(smp, port_priv->device) == | |
2126 | IB_SMI_DISCARD) | |
2127 | return IB_SMI_DISCARD; | |
2128 | ||
4139032b | 2129 | } else if (rdma_cap_ib_switch(port_priv->device)) { |
8e4349d1 IW |
2130 | /* forward case for switches */ |
2131 | memcpy(response, recv, mad_priv_size(response)); | |
2132 | response->header.recv_wc.wc = &response->header.wc; | |
2133 | response->header.recv_wc.recv_buf.opa_mad = | |
2134 | (struct opa_mad *)response->mad; | |
2135 | response->header.recv_wc.recv_buf.grh = &response->grh; | |
2136 | ||
2137 | agent_send_response((const struct ib_mad_hdr *)response->mad, | |
2138 | &response->grh, wc, | |
2139 | port_priv->device, | |
2140 | opa_smi_get_fwd_port(smp), | |
2141 | qp_info->qp->qp_num, | |
2142 | recv->header.wc.byte_len, | |
2143 | true); | |
2144 | ||
2145 | return IB_SMI_DISCARD; | |
2146 | } | |
2147 | ||
2148 | return IB_SMI_HANDLE; | |
2149 | } | |
2150 | ||
2151 | static enum smi_action | |
2152 | handle_smi(struct ib_mad_port_private *port_priv, | |
2153 | struct ib_mad_qp_info *qp_info, | |
2154 | struct ib_wc *wc, | |
2155 | int port_num, | |
2156 | struct ib_mad_private *recv, | |
2157 | struct ib_mad_private *response, | |
2158 | bool opa) | |
2159 | { | |
2160 | struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; | |
2161 | ||
2162 | if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && | |
2163 | mad_hdr->class_version == OPA_SMI_CLASS_VERSION) | |
2164 | return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, | |
2165 | response); | |
2166 | ||
2167 | return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); | |
2168 | } | |
2169 | ||
1da177e4 LT |
2170 | static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, |
2171 | struct ib_wc *wc) | |
2172 | { | |
2173 | struct ib_mad_qp_info *qp_info; | |
2174 | struct ib_mad_private_header *mad_priv_hdr; | |
445d6807 | 2175 | struct ib_mad_private *recv, *response = NULL; |
1da177e4 LT |
2176 | struct ib_mad_list_head *mad_list; |
2177 | struct ib_mad_agent_private *mad_agent; | |
1bae4dbf | 2178 | int port_num; |
a9e74323 | 2179 | int ret = IB_MAD_RESULT_SUCCESS; |
4cd7c947 IW |
2180 | size_t mad_size; |
2181 | u16 resp_mad_pkey_index = 0; | |
8e4349d1 | 2182 | bool opa; |
1da177e4 | 2183 | |
1da177e4 LT |
2184 | mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; |
2185 | qp_info = mad_list->mad_queue->qp_info; | |
2186 | dequeue_mad(mad_list); | |
2187 | ||
8e4349d1 IW |
2188 | opa = rdma_cap_opa_mad(qp_info->port_priv->device, |
2189 | qp_info->port_priv->port_num); | |
2190 | ||
1da177e4 LT |
2191 | mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, |
2192 | mad_list); | |
2193 | recv = container_of(mad_priv_hdr, struct ib_mad_private, header); | |
1527106f RC |
2194 | ib_dma_unmap_single(port_priv->device, |
2195 | recv->header.mapping, | |
c9082e51 | 2196 | mad_priv_dma_size(recv), |
1527106f | 2197 | DMA_FROM_DEVICE); |
1da177e4 LT |
2198 | |
2199 | /* Setup MAD receive work completion from "normal" work completion */ | |
24239aff SH |
2200 | recv->header.wc = *wc; |
2201 | recv->header.recv_wc.wc = &recv->header.wc; | |
8e4349d1 IW |
2202 | |
2203 | if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) { | |
2204 | recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh); | |
2205 | recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); | |
2206 | } else { | |
2207 | recv->header.recv_wc.mad_len = sizeof(struct ib_mad); | |
2208 | recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); | |
2209 | } | |
2210 | ||
c9082e51 | 2211 | recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; |
1da177e4 LT |
2212 | recv->header.recv_wc.recv_buf.grh = &recv->grh; |
2213 | ||
2214 | if (atomic_read(&qp_info->snoop_count)) | |
2215 | snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); | |
2216 | ||
2217 | /* Validate MAD */ | |
8e4349d1 | 2218 | if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) |
1da177e4 LT |
2219 | goto out; |
2220 | ||
4cd7c947 IW |
2221 | mad_size = recv->mad_size; |
2222 | response = alloc_mad_private(mad_size, GFP_KERNEL); | |
445d6807 | 2223 | if (!response) { |
7ef5d4b0 IW |
2224 | dev_err(&port_priv->device->dev, |
2225 | "ib_mad_recv_done_handler no memory for response buffer\n"); | |
445d6807 HR |
2226 | goto out; |
2227 | } | |
2228 | ||
4139032b | 2229 | if (rdma_cap_ib_switch(port_priv->device)) |
1bae4dbf HR |
2230 | port_num = wc->port_num; |
2231 | else | |
2232 | port_num = port_priv->port_num; | |
2233 | ||
c9082e51 | 2234 | if (((struct ib_mad_hdr *)recv->mad)->mgmt_class == |
1da177e4 | 2235 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { |
8e4349d1 IW |
2236 | if (handle_smi(port_priv, qp_info, wc, port_num, recv, |
2237 | response, opa) | |
e11ae8aa | 2238 | == IB_SMI_DISCARD) |
1da177e4 | 2239 | goto out; |
1da177e4 LT |
2240 | } |
2241 | ||
1da177e4 LT |
2242 | /* Give driver "right of first refusal" on incoming MAD */ |
2243 | if (port_priv->device->process_mad) { | |
1da177e4 LT |
2244 | ret = port_priv->device->process_mad(port_priv->device, 0, |
2245 | port_priv->port_num, | |
2246 | wc, &recv->grh, | |
4cd7c947 IW |
2247 | (const struct ib_mad_hdr *)recv->mad, |
2248 | recv->mad_size, | |
2249 | (struct ib_mad_hdr *)response->mad, | |
2250 | &mad_size, &resp_mad_pkey_index); | |
8e4349d1 IW |
2251 | |
2252 | if (opa) | |
2253 | wc->pkey_index = resp_mad_pkey_index; | |
2254 | ||
1da177e4 LT |
2255 | if (ret & IB_MAD_RESULT_SUCCESS) { |
2256 | if (ret & IB_MAD_RESULT_CONSUMED) | |
2257 | goto out; | |
2258 | if (ret & IB_MAD_RESULT_REPLY) { | |
c9082e51 | 2259 | agent_send_response((const struct ib_mad_hdr *)response->mad, |
34816ad9 SH |
2260 | &recv->grh, wc, |
2261 | port_priv->device, | |
1bae4dbf | 2262 | port_num, |
c9082e51 | 2263 | qp_info->qp->qp_num, |
8e4349d1 | 2264 | mad_size, opa); |
1da177e4 LT |
2265 | goto out; |
2266 | } | |
2267 | } | |
2268 | } | |
2269 | ||
c9082e51 | 2270 | mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); |
1da177e4 | 2271 | if (mad_agent) { |
4a0754fa | 2272 | ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); |
1da177e4 LT |
2273 | /* |
2274 | * recv is freed up in error cases in ib_mad_complete_recv | |
2275 | * or via recv_handler in ib_mad_complete_recv() | |
2276 | */ | |
2277 | recv = NULL; | |
a9e74323 | 2278 | } else if ((ret & IB_MAD_RESULT_SUCCESS) && |
8e4349d1 | 2279 | generate_unmatched_resp(recv, response, &mad_size, opa)) { |
c9082e51 IW |
2280 | agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, |
2281 | port_priv->device, port_num, | |
8e4349d1 | 2282 | qp_info->qp->qp_num, mad_size, opa); |
1da177e4 LT |
2283 | } |
2284 | ||
2285 | out: | |
2286 | /* Post another receive request for this QP */ | |
2287 | if (response) { | |
2288 | ib_mad_post_receive_mads(qp_info, response); | |
c9082e51 | 2289 | kfree(recv); |
1da177e4 LT |
2290 | } else |
2291 | ib_mad_post_receive_mads(qp_info, recv); | |
2292 | } | |
2293 | ||
2294 | static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) | |
2295 | { | |
2296 | struct ib_mad_send_wr_private *mad_send_wr; | |
2297 | unsigned long delay; | |
2298 | ||
2299 | if (list_empty(&mad_agent_priv->wait_list)) { | |
136b5721 | 2300 | cancel_delayed_work(&mad_agent_priv->timed_work); |
1da177e4 LT |
2301 | } else { |
2302 | mad_send_wr = list_entry(mad_agent_priv->wait_list.next, | |
2303 | struct ib_mad_send_wr_private, | |
2304 | agent_list); | |
2305 | ||
2306 | if (time_after(mad_agent_priv->timeout, | |
2307 | mad_send_wr->timeout)) { | |
2308 | mad_agent_priv->timeout = mad_send_wr->timeout; | |
1da177e4 LT |
2309 | delay = mad_send_wr->timeout - jiffies; |
2310 | if ((long)delay <= 0) | |
2311 | delay = 1; | |
e7c2f967 TH |
2312 | mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, |
2313 | &mad_agent_priv->timed_work, delay); | |
1da177e4 LT |
2314 | } |
2315 | } | |
2316 | } | |
2317 | ||
d760ce8f | 2318 | static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) |
1da177e4 | 2319 | { |
d760ce8f | 2320 | struct ib_mad_agent_private *mad_agent_priv; |
1da177e4 LT |
2321 | struct ib_mad_send_wr_private *temp_mad_send_wr; |
2322 | struct list_head *list_item; | |
2323 | unsigned long delay; | |
2324 | ||
d760ce8f | 2325 | mad_agent_priv = mad_send_wr->mad_agent_priv; |
1da177e4 LT |
2326 | list_del(&mad_send_wr->agent_list); |
2327 | ||
2328 | delay = mad_send_wr->timeout; | |
2329 | mad_send_wr->timeout += jiffies; | |
2330 | ||
29bb33dd HR |
2331 | if (delay) { |
2332 | list_for_each_prev(list_item, &mad_agent_priv->wait_list) { | |
2333 | temp_mad_send_wr = list_entry(list_item, | |
2334 | struct ib_mad_send_wr_private, | |
2335 | agent_list); | |
2336 | if (time_after(mad_send_wr->timeout, | |
2337 | temp_mad_send_wr->timeout)) | |
2338 | break; | |
2339 | } | |
1da177e4 | 2340 | } |
29bb33dd HR |
2341 | else |
2342 | list_item = &mad_agent_priv->wait_list; | |
1da177e4 LT |
2343 | list_add(&mad_send_wr->agent_list, list_item); |
2344 | ||
2345 | /* Reschedule a work item if we have a shorter timeout */ | |
e7c2f967 TH |
2346 | if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) |
2347 | mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, | |
2348 | &mad_agent_priv->timed_work, delay); | |
1da177e4 LT |
2349 | } |
2350 | ||
03b61ad2 HR |
2351 | void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, |
2352 | int timeout_ms) | |
2353 | { | |
2354 | mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); | |
2355 | wait_for_response(mad_send_wr); | |
2356 | } | |
2357 | ||
1da177e4 LT |
2358 | /* |
2359 | * Process a send work completion | |
2360 | */ | |
fa619a77 HR |
2361 | void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, |
2362 | struct ib_mad_send_wc *mad_send_wc) | |
1da177e4 LT |
2363 | { |
2364 | struct ib_mad_agent_private *mad_agent_priv; | |
2365 | unsigned long flags; | |
fa619a77 | 2366 | int ret; |
1da177e4 | 2367 | |
d760ce8f | 2368 | mad_agent_priv = mad_send_wr->mad_agent_priv; |
1da177e4 | 2369 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
1471cb6c | 2370 | if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { |
fa619a77 HR |
2371 | ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); |
2372 | if (ret == IB_RMPP_RESULT_CONSUMED) | |
2373 | goto done; | |
2374 | } else | |
2375 | ret = IB_RMPP_RESULT_UNHANDLED; | |
2376 | ||
1da177e4 LT |
2377 | if (mad_send_wc->status != IB_WC_SUCCESS && |
2378 | mad_send_wr->status == IB_WC_SUCCESS) { | |
2379 | mad_send_wr->status = mad_send_wc->status; | |
2380 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); | |
2381 | } | |
2382 | ||
2383 | if (--mad_send_wr->refcount > 0) { | |
2384 | if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && | |
2385 | mad_send_wr->status == IB_WC_SUCCESS) { | |
d760ce8f | 2386 | wait_for_response(mad_send_wr); |
1da177e4 | 2387 | } |
fa619a77 | 2388 | goto done; |
1da177e4 LT |
2389 | } |
2390 | ||
2391 | /* Remove send from MAD agent and notify client of completion */ | |
2392 | list_del(&mad_send_wr->agent_list); | |
2393 | adjust_timeout(mad_agent_priv); | |
2394 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
2395 | ||
2396 | if (mad_send_wr->status != IB_WC_SUCCESS ) | |
2397 | mad_send_wc->status = mad_send_wr->status; | |
34816ad9 SH |
2398 | if (ret == IB_RMPP_RESULT_INTERNAL) |
2399 | ib_rmpp_send_handler(mad_send_wc); | |
2400 | else | |
fa619a77 HR |
2401 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
2402 | mad_send_wc); | |
1da177e4 LT |
2403 | |
2404 | /* Release reference on agent taken when sending */ | |
1b52fa98 | 2405 | deref_mad_agent(mad_agent_priv); |
fa619a77 HR |
2406 | return; |
2407 | done: | |
2408 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
1da177e4 LT |
2409 | } |
2410 | ||
2411 | static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, | |
2412 | struct ib_wc *wc) | |
2413 | { | |
2414 | struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; | |
2415 | struct ib_mad_list_head *mad_list; | |
2416 | struct ib_mad_qp_info *qp_info; | |
2417 | struct ib_mad_queue *send_queue; | |
2418 | struct ib_send_wr *bad_send_wr; | |
34816ad9 | 2419 | struct ib_mad_send_wc mad_send_wc; |
1da177e4 LT |
2420 | unsigned long flags; |
2421 | int ret; | |
2422 | ||
2423 | mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; | |
2424 | mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, | |
2425 | mad_list); | |
2426 | send_queue = mad_list->mad_queue; | |
2427 | qp_info = send_queue->qp_info; | |
2428 | ||
2429 | retry: | |
1527106f RC |
2430 | ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, |
2431 | mad_send_wr->header_mapping, | |
2432 | mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); | |
2433 | ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, | |
2434 | mad_send_wr->payload_mapping, | |
2435 | mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); | |
1da177e4 LT |
2436 | queued_send_wr = NULL; |
2437 | spin_lock_irqsave(&send_queue->lock, flags); | |
2438 | list_del(&mad_list->list); | |
2439 | ||
2440 | /* Move queued send to the send queue */ | |
2441 | if (send_queue->count-- > send_queue->max_active) { | |
2442 | mad_list = container_of(qp_info->overflow_list.next, | |
2443 | struct ib_mad_list_head, list); | |
2444 | queued_send_wr = container_of(mad_list, | |
2445 | struct ib_mad_send_wr_private, | |
2446 | mad_list); | |
179e0917 | 2447 | list_move_tail(&mad_list->list, &send_queue->list); |
1da177e4 LT |
2448 | } |
2449 | spin_unlock_irqrestore(&send_queue->lock, flags); | |
2450 | ||
34816ad9 SH |
2451 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
2452 | mad_send_wc.status = wc->status; | |
2453 | mad_send_wc.vendor_err = wc->vendor_err; | |
1da177e4 | 2454 | if (atomic_read(&qp_info->snoop_count)) |
34816ad9 | 2455 | snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, |
1da177e4 | 2456 | IB_MAD_SNOOP_SEND_COMPLETIONS); |
34816ad9 | 2457 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); |
1da177e4 LT |
2458 | |
2459 | if (queued_send_wr) { | |
2460 | ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, | |
34816ad9 | 2461 | &bad_send_wr); |
1da177e4 | 2462 | if (ret) { |
7ef5d4b0 IW |
2463 | dev_err(&port_priv->device->dev, |
2464 | "ib_post_send failed: %d\n", ret); | |
1da177e4 LT |
2465 | mad_send_wr = queued_send_wr; |
2466 | wc->status = IB_WC_LOC_QP_OP_ERR; | |
2467 | goto retry; | |
2468 | } | |
2469 | } | |
2470 | } | |
2471 | ||
2472 | static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) | |
2473 | { | |
2474 | struct ib_mad_send_wr_private *mad_send_wr; | |
2475 | struct ib_mad_list_head *mad_list; | |
2476 | unsigned long flags; | |
2477 | ||
2478 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); | |
2479 | list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { | |
2480 | mad_send_wr = container_of(mad_list, | |
2481 | struct ib_mad_send_wr_private, | |
2482 | mad_list); | |
2483 | mad_send_wr->retry = 1; | |
2484 | } | |
2485 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | |
2486 | } | |
2487 | ||
2488 | static void mad_error_handler(struct ib_mad_port_private *port_priv, | |
2489 | struct ib_wc *wc) | |
2490 | { | |
2491 | struct ib_mad_list_head *mad_list; | |
2492 | struct ib_mad_qp_info *qp_info; | |
2493 | struct ib_mad_send_wr_private *mad_send_wr; | |
2494 | int ret; | |
2495 | ||
2496 | /* Determine if failure was a send or receive */ | |
2497 | mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; | |
2498 | qp_info = mad_list->mad_queue->qp_info; | |
2499 | if (mad_list->mad_queue == &qp_info->recv_queue) | |
2500 | /* | |
2501 | * Receive errors indicate that the QP has entered the error | |
2502 | * state - error handling/shutdown code will cleanup | |
2503 | */ | |
2504 | return; | |
2505 | ||
2506 | /* | |
2507 | * Send errors will transition the QP to SQE - move | |
2508 | * QP to RTS and repost flushed work requests | |
2509 | */ | |
2510 | mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, | |
2511 | mad_list); | |
2512 | if (wc->status == IB_WC_WR_FLUSH_ERR) { | |
2513 | if (mad_send_wr->retry) { | |
2514 | /* Repost send */ | |
2515 | struct ib_send_wr *bad_send_wr; | |
2516 | ||
2517 | mad_send_wr->retry = 0; | |
2518 | ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr, | |
2519 | &bad_send_wr); | |
2520 | if (ret) | |
2521 | ib_mad_send_done_handler(port_priv, wc); | |
2522 | } else | |
2523 | ib_mad_send_done_handler(port_priv, wc); | |
2524 | } else { | |
2525 | struct ib_qp_attr *attr; | |
2526 | ||
2527 | /* Transition QP to RTS and fail offending send */ | |
2528 | attr = kmalloc(sizeof *attr, GFP_KERNEL); | |
2529 | if (attr) { | |
2530 | attr->qp_state = IB_QPS_RTS; | |
2531 | attr->cur_qp_state = IB_QPS_SQE; | |
2532 | ret = ib_modify_qp(qp_info->qp, attr, | |
2533 | IB_QP_STATE | IB_QP_CUR_STATE); | |
2534 | kfree(attr); | |
2535 | if (ret) | |
7ef5d4b0 IW |
2536 | dev_err(&port_priv->device->dev, |
2537 | "mad_error_handler - ib_modify_qp to RTS : %d\n", | |
2538 | ret); | |
1da177e4 LT |
2539 | else |
2540 | mark_sends_for_retry(qp_info); | |
2541 | } | |
2542 | ib_mad_send_done_handler(port_priv, wc); | |
2543 | } | |
2544 | } | |
2545 | ||
2546 | /* | |
2547 | * IB MAD completion callback | |
2548 | */ | |
c4028958 | 2549 | static void ib_mad_completion_handler(struct work_struct *work) |
1da177e4 LT |
2550 | { |
2551 | struct ib_mad_port_private *port_priv; | |
2552 | struct ib_wc wc; | |
2553 | ||
c4028958 | 2554 | port_priv = container_of(work, struct ib_mad_port_private, work); |
1da177e4 LT |
2555 | ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); |
2556 | ||
2557 | while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { | |
2558 | if (wc.status == IB_WC_SUCCESS) { | |
2559 | switch (wc.opcode) { | |
2560 | case IB_WC_SEND: | |
2561 | ib_mad_send_done_handler(port_priv, &wc); | |
2562 | break; | |
2563 | case IB_WC_RECV: | |
2564 | ib_mad_recv_done_handler(port_priv, &wc); | |
2565 | break; | |
2566 | default: | |
2567 | BUG_ON(1); | |
2568 | break; | |
2569 | } | |
2570 | } else | |
2571 | mad_error_handler(port_priv, &wc); | |
2572 | } | |
2573 | } | |
2574 | ||
2575 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) | |
2576 | { | |
2577 | unsigned long flags; | |
2578 | struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; | |
2579 | struct ib_mad_send_wc mad_send_wc; | |
2580 | struct list_head cancel_list; | |
2581 | ||
2582 | INIT_LIST_HEAD(&cancel_list); | |
2583 | ||
2584 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
2585 | list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, | |
2586 | &mad_agent_priv->send_list, agent_list) { | |
2587 | if (mad_send_wr->status == IB_WC_SUCCESS) { | |
3cd96564 | 2588 | mad_send_wr->status = IB_WC_WR_FLUSH_ERR; |
1da177e4 LT |
2589 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); |
2590 | } | |
2591 | } | |
2592 | ||
2593 | /* Empty wait list to prevent receives from finding a request */ | |
2594 | list_splice_init(&mad_agent_priv->wait_list, &cancel_list); | |
2595 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
2596 | ||
2597 | /* Report all cancelled requests */ | |
2598 | mad_send_wc.status = IB_WC_WR_FLUSH_ERR; | |
2599 | mad_send_wc.vendor_err = 0; | |
2600 | ||
2601 | list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, | |
2602 | &cancel_list, agent_list) { | |
34816ad9 SH |
2603 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
2604 | list_del(&mad_send_wr->agent_list); | |
1da177e4 LT |
2605 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
2606 | &mad_send_wc); | |
1da177e4 LT |
2607 | atomic_dec(&mad_agent_priv->refcount); |
2608 | } | |
2609 | } | |
2610 | ||
2611 | static struct ib_mad_send_wr_private* | |
34816ad9 SH |
2612 | find_send_wr(struct ib_mad_agent_private *mad_agent_priv, |
2613 | struct ib_mad_send_buf *send_buf) | |
1da177e4 LT |
2614 | { |
2615 | struct ib_mad_send_wr_private *mad_send_wr; | |
2616 | ||
2617 | list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, | |
2618 | agent_list) { | |
34816ad9 | 2619 | if (&mad_send_wr->send_buf == send_buf) |
1da177e4 LT |
2620 | return mad_send_wr; |
2621 | } | |
2622 | ||
2623 | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, | |
2624 | agent_list) { | |
c597eee5 IW |
2625 | if (is_rmpp_data_mad(mad_agent_priv, |
2626 | mad_send_wr->send_buf.mad) && | |
34816ad9 | 2627 | &mad_send_wr->send_buf == send_buf) |
1da177e4 LT |
2628 | return mad_send_wr; |
2629 | } | |
2630 | return NULL; | |
2631 | } | |
2632 | ||
34816ad9 SH |
2633 | int ib_modify_mad(struct ib_mad_agent *mad_agent, |
2634 | struct ib_mad_send_buf *send_buf, u32 timeout_ms) | |
1da177e4 LT |
2635 | { |
2636 | struct ib_mad_agent_private *mad_agent_priv; | |
2637 | struct ib_mad_send_wr_private *mad_send_wr; | |
2638 | unsigned long flags; | |
cabe3cbc | 2639 | int active; |
1da177e4 LT |
2640 | |
2641 | mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, | |
2642 | agent); | |
2643 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
34816ad9 | 2644 | mad_send_wr = find_send_wr(mad_agent_priv, send_buf); |
03b61ad2 | 2645 | if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { |
1da177e4 | 2646 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
03b61ad2 | 2647 | return -EINVAL; |
1da177e4 LT |
2648 | } |
2649 | ||
cabe3cbc | 2650 | active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); |
03b61ad2 | 2651 | if (!timeout_ms) { |
1da177e4 | 2652 | mad_send_wr->status = IB_WC_WR_FLUSH_ERR; |
03b61ad2 | 2653 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); |
1da177e4 LT |
2654 | } |
2655 | ||
34816ad9 | 2656 | mad_send_wr->send_buf.timeout_ms = timeout_ms; |
cabe3cbc | 2657 | if (active) |
03b61ad2 HR |
2658 | mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); |
2659 | else | |
2660 | ib_reset_mad_timeout(mad_send_wr, timeout_ms); | |
2661 | ||
1da177e4 | 2662 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
03b61ad2 HR |
2663 | return 0; |
2664 | } | |
2665 | EXPORT_SYMBOL(ib_modify_mad); | |
1da177e4 | 2666 | |
34816ad9 SH |
2667 | void ib_cancel_mad(struct ib_mad_agent *mad_agent, |
2668 | struct ib_mad_send_buf *send_buf) | |
03b61ad2 | 2669 | { |
34816ad9 | 2670 | ib_modify_mad(mad_agent, send_buf, 0); |
1da177e4 LT |
2671 | } |
2672 | EXPORT_SYMBOL(ib_cancel_mad); | |
2673 | ||
c4028958 | 2674 | static void local_completions(struct work_struct *work) |
1da177e4 LT |
2675 | { |
2676 | struct ib_mad_agent_private *mad_agent_priv; | |
2677 | struct ib_mad_local_private *local; | |
2678 | struct ib_mad_agent_private *recv_mad_agent; | |
2679 | unsigned long flags; | |
1d9bc6d6 | 2680 | int free_mad; |
1da177e4 LT |
2681 | struct ib_wc wc; |
2682 | struct ib_mad_send_wc mad_send_wc; | |
8e4349d1 | 2683 | bool opa; |
1da177e4 | 2684 | |
c4028958 DH |
2685 | mad_agent_priv = |
2686 | container_of(work, struct ib_mad_agent_private, local_work); | |
1da177e4 | 2687 | |
8e4349d1 IW |
2688 | opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, |
2689 | mad_agent_priv->qp_info->port_priv->port_num); | |
2690 | ||
1da177e4 LT |
2691 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
2692 | while (!list_empty(&mad_agent_priv->local_list)) { | |
2693 | local = list_entry(mad_agent_priv->local_list.next, | |
2694 | struct ib_mad_local_private, | |
2695 | completion_list); | |
37289efe | 2696 | list_del(&local->completion_list); |
1da177e4 | 2697 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
1d9bc6d6 | 2698 | free_mad = 0; |
1da177e4 | 2699 | if (local->mad_priv) { |
8e4349d1 | 2700 | u8 base_version; |
1da177e4 LT |
2701 | recv_mad_agent = local->recv_mad_agent; |
2702 | if (!recv_mad_agent) { | |
7ef5d4b0 IW |
2703 | dev_err(&mad_agent_priv->agent.device->dev, |
2704 | "No receive MAD agent for local completion\n"); | |
1d9bc6d6 | 2705 | free_mad = 1; |
1da177e4 LT |
2706 | goto local_send_completion; |
2707 | } | |
2708 | ||
2709 | /* | |
2710 | * Defined behavior is to complete response | |
2711 | * before request | |
2712 | */ | |
062dbb69 MT |
2713 | build_smp_wc(recv_mad_agent->agent.qp, |
2714 | (unsigned long) local->mad_send_wr, | |
97f52eb4 | 2715 | be16_to_cpu(IB_LID_PERMISSIVE), |
8e4349d1 IW |
2716 | local->mad_send_wr->send_wr.wr.ud.pkey_index, |
2717 | recv_mad_agent->agent.port_num, &wc); | |
1da177e4 LT |
2718 | |
2719 | local->mad_priv->header.recv_wc.wc = &wc; | |
8e4349d1 IW |
2720 | |
2721 | base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version; | |
2722 | if (opa && base_version == OPA_MGMT_BASE_VERSION) { | |
2723 | local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len; | |
2724 | local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); | |
2725 | } else { | |
2726 | local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); | |
2727 | local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); | |
2728 | } | |
2729 | ||
fa619a77 HR |
2730 | INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); |
2731 | list_add(&local->mad_priv->header.recv_wc.recv_buf.list, | |
2732 | &local->mad_priv->header.recv_wc.rmpp_list); | |
1da177e4 LT |
2733 | local->mad_priv->header.recv_wc.recv_buf.grh = NULL; |
2734 | local->mad_priv->header.recv_wc.recv_buf.mad = | |
c9082e51 | 2735 | (struct ib_mad *)local->mad_priv->mad; |
1da177e4 LT |
2736 | if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) |
2737 | snoop_recv(recv_mad_agent->qp_info, | |
2738 | &local->mad_priv->header.recv_wc, | |
2739 | IB_MAD_SNOOP_RECVS); | |
2740 | recv_mad_agent->agent.recv_handler( | |
2741 | &recv_mad_agent->agent, | |
2742 | &local->mad_priv->header.recv_wc); | |
2743 | spin_lock_irqsave(&recv_mad_agent->lock, flags); | |
2744 | atomic_dec(&recv_mad_agent->refcount); | |
2745 | spin_unlock_irqrestore(&recv_mad_agent->lock, flags); | |
2746 | } | |
2747 | ||
2748 | local_send_completion: | |
2749 | /* Complete send */ | |
2750 | mad_send_wc.status = IB_WC_SUCCESS; | |
2751 | mad_send_wc.vendor_err = 0; | |
34816ad9 | 2752 | mad_send_wc.send_buf = &local->mad_send_wr->send_buf; |
1da177e4 | 2753 | if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) |
34816ad9 SH |
2754 | snoop_send(mad_agent_priv->qp_info, |
2755 | &local->mad_send_wr->send_buf, | |
2756 | &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); | |
1da177e4 LT |
2757 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
2758 | &mad_send_wc); | |
2759 | ||
2760 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
1da177e4 | 2761 | atomic_dec(&mad_agent_priv->refcount); |
1d9bc6d6 | 2762 | if (free_mad) |
c9082e51 | 2763 | kfree(local->mad_priv); |
1da177e4 LT |
2764 | kfree(local); |
2765 | } | |
2766 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
2767 | } | |
2768 | ||
f75b7a52 HR |
2769 | static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) |
2770 | { | |
2771 | int ret; | |
2772 | ||
4fc8cd49 | 2773 | if (!mad_send_wr->retries_left) |
f75b7a52 HR |
2774 | return -ETIMEDOUT; |
2775 | ||
4fc8cd49 SH |
2776 | mad_send_wr->retries_left--; |
2777 | mad_send_wr->send_buf.retries++; | |
2778 | ||
34816ad9 | 2779 | mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); |
f75b7a52 | 2780 | |
1471cb6c | 2781 | if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { |
fa619a77 HR |
2782 | ret = ib_retry_rmpp(mad_send_wr); |
2783 | switch (ret) { | |
2784 | case IB_RMPP_RESULT_UNHANDLED: | |
2785 | ret = ib_send_mad(mad_send_wr); | |
2786 | break; | |
2787 | case IB_RMPP_RESULT_CONSUMED: | |
2788 | ret = 0; | |
2789 | break; | |
2790 | default: | |
2791 | ret = -ECOMM; | |
2792 | break; | |
2793 | } | |
2794 | } else | |
2795 | ret = ib_send_mad(mad_send_wr); | |
f75b7a52 HR |
2796 | |
2797 | if (!ret) { | |
2798 | mad_send_wr->refcount++; | |
f75b7a52 HR |
2799 | list_add_tail(&mad_send_wr->agent_list, |
2800 | &mad_send_wr->mad_agent_priv->send_list); | |
2801 | } | |
2802 | return ret; | |
2803 | } | |
2804 | ||
c4028958 | 2805 | static void timeout_sends(struct work_struct *work) |
1da177e4 LT |
2806 | { |
2807 | struct ib_mad_agent_private *mad_agent_priv; | |
2808 | struct ib_mad_send_wr_private *mad_send_wr; | |
2809 | struct ib_mad_send_wc mad_send_wc; | |
2810 | unsigned long flags, delay; | |
2811 | ||
c4028958 DH |
2812 | mad_agent_priv = container_of(work, struct ib_mad_agent_private, |
2813 | timed_work.work); | |
1da177e4 LT |
2814 | mad_send_wc.vendor_err = 0; |
2815 | ||
2816 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
2817 | while (!list_empty(&mad_agent_priv->wait_list)) { | |
2818 | mad_send_wr = list_entry(mad_agent_priv->wait_list.next, | |
2819 | struct ib_mad_send_wr_private, | |
2820 | agent_list); | |
2821 | ||
2822 | if (time_after(mad_send_wr->timeout, jiffies)) { | |
2823 | delay = mad_send_wr->timeout - jiffies; | |
2824 | if ((long)delay <= 0) | |
2825 | delay = 1; | |
2826 | queue_delayed_work(mad_agent_priv->qp_info-> | |
2827 | port_priv->wq, | |
2828 | &mad_agent_priv->timed_work, delay); | |
2829 | break; | |
2830 | } | |
2831 | ||
dbf9227b | 2832 | list_del(&mad_send_wr->agent_list); |
29bb33dd HR |
2833 | if (mad_send_wr->status == IB_WC_SUCCESS && |
2834 | !retry_send(mad_send_wr)) | |
f75b7a52 HR |
2835 | continue; |
2836 | ||
1da177e4 LT |
2837 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
2838 | ||
03b61ad2 HR |
2839 | if (mad_send_wr->status == IB_WC_SUCCESS) |
2840 | mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; | |
2841 | else | |
2842 | mad_send_wc.status = mad_send_wr->status; | |
34816ad9 | 2843 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
1da177e4 LT |
2844 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
2845 | &mad_send_wc); | |
2846 | ||
1da177e4 LT |
2847 | atomic_dec(&mad_agent_priv->refcount); |
2848 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | |
2849 | } | |
2850 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | |
2851 | } | |
2852 | ||
5dd2ce12 | 2853 | static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg) |
1da177e4 LT |
2854 | { |
2855 | struct ib_mad_port_private *port_priv = cq->cq_context; | |
dc05980d | 2856 | unsigned long flags; |
1da177e4 | 2857 | |
dc05980d MT |
2858 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); |
2859 | if (!list_empty(&port_priv->port_list)) | |
2860 | queue_work(port_priv->wq, &port_priv->work); | |
2861 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | |
1da177e4 LT |
2862 | } |
2863 | ||
2864 | /* | |
2865 | * Allocate receive MADs and post receive WRs for them | |
2866 | */ | |
2867 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | |
2868 | struct ib_mad_private *mad) | |
2869 | { | |
2870 | unsigned long flags; | |
2871 | int post, ret; | |
2872 | struct ib_mad_private *mad_priv; | |
2873 | struct ib_sge sg_list; | |
2874 | struct ib_recv_wr recv_wr, *bad_recv_wr; | |
2875 | struct ib_mad_queue *recv_queue = &qp_info->recv_queue; | |
2876 | ||
2877 | /* Initialize common scatter list fields */ | |
4be90bc6 | 2878 | sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; |
1da177e4 LT |
2879 | |
2880 | /* Initialize common receive WR fields */ | |
2881 | recv_wr.next = NULL; | |
2882 | recv_wr.sg_list = &sg_list; | |
2883 | recv_wr.num_sge = 1; | |
2884 | ||
2885 | do { | |
2886 | /* Allocate and map receive buffer */ | |
2887 | if (mad) { | |
2888 | mad_priv = mad; | |
2889 | mad = NULL; | |
2890 | } else { | |
c9082e51 IW |
2891 | mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), |
2892 | GFP_ATOMIC); | |
1da177e4 | 2893 | if (!mad_priv) { |
7ef5d4b0 IW |
2894 | dev_err(&qp_info->port_priv->device->dev, |
2895 | "No memory for receive buffer\n"); | |
1da177e4 LT |
2896 | ret = -ENOMEM; |
2897 | break; | |
2898 | } | |
2899 | } | |
c9082e51 | 2900 | sg_list.length = mad_priv_dma_size(mad_priv); |
1527106f RC |
2901 | sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, |
2902 | &mad_priv->grh, | |
c9082e51 | 2903 | mad_priv_dma_size(mad_priv), |
1527106f | 2904 | DMA_FROM_DEVICE); |
2c34e68f YB |
2905 | if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, |
2906 | sg_list.addr))) { | |
2907 | ret = -ENOMEM; | |
2908 | break; | |
2909 | } | |
1527106f | 2910 | mad_priv->header.mapping = sg_list.addr; |
1da177e4 LT |
2911 | recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; |
2912 | mad_priv->header.mad_list.mad_queue = recv_queue; | |
2913 | ||
2914 | /* Post receive WR */ | |
2915 | spin_lock_irqsave(&recv_queue->lock, flags); | |
2916 | post = (++recv_queue->count < recv_queue->max_active); | |
2917 | list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); | |
2918 | spin_unlock_irqrestore(&recv_queue->lock, flags); | |
2919 | ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); | |
2920 | if (ret) { | |
2921 | spin_lock_irqsave(&recv_queue->lock, flags); | |
2922 | list_del(&mad_priv->header.mad_list.list); | |
2923 | recv_queue->count--; | |
2924 | spin_unlock_irqrestore(&recv_queue->lock, flags); | |
1527106f RC |
2925 | ib_dma_unmap_single(qp_info->port_priv->device, |
2926 | mad_priv->header.mapping, | |
c9082e51 | 2927 | mad_priv_dma_size(mad_priv), |
1527106f | 2928 | DMA_FROM_DEVICE); |
c9082e51 | 2929 | kfree(mad_priv); |
7ef5d4b0 IW |
2930 | dev_err(&qp_info->port_priv->device->dev, |
2931 | "ib_post_recv failed: %d\n", ret); | |
1da177e4 LT |
2932 | break; |
2933 | } | |
2934 | } while (post); | |
2935 | ||
2936 | return ret; | |
2937 | } | |
2938 | ||
2939 | /* | |
2940 | * Return all the posted receive MADs | |
2941 | */ | |
2942 | static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) | |
2943 | { | |
2944 | struct ib_mad_private_header *mad_priv_hdr; | |
2945 | struct ib_mad_private *recv; | |
2946 | struct ib_mad_list_head *mad_list; | |
2947 | ||
fac70d51 EC |
2948 | if (!qp_info->qp) |
2949 | return; | |
2950 | ||
1da177e4 LT |
2951 | while (!list_empty(&qp_info->recv_queue.list)) { |
2952 | ||
2953 | mad_list = list_entry(qp_info->recv_queue.list.next, | |
2954 | struct ib_mad_list_head, list); | |
2955 | mad_priv_hdr = container_of(mad_list, | |
2956 | struct ib_mad_private_header, | |
2957 | mad_list); | |
2958 | recv = container_of(mad_priv_hdr, struct ib_mad_private, | |
2959 | header); | |
2960 | ||
2961 | /* Remove from posted receive MAD list */ | |
2962 | list_del(&mad_list->list); | |
2963 | ||
1527106f RC |
2964 | ib_dma_unmap_single(qp_info->port_priv->device, |
2965 | recv->header.mapping, | |
c9082e51 | 2966 | mad_priv_dma_size(recv), |
1527106f | 2967 | DMA_FROM_DEVICE); |
c9082e51 | 2968 | kfree(recv); |
1da177e4 LT |
2969 | } |
2970 | ||
2971 | qp_info->recv_queue.count = 0; | |
2972 | } | |
2973 | ||
2974 | /* | |
2975 | * Start the port | |
2976 | */ | |
2977 | static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |
2978 | { | |
2979 | int ret, i; | |
2980 | struct ib_qp_attr *attr; | |
2981 | struct ib_qp *qp; | |
ef5ed416 | 2982 | u16 pkey_index; |
1da177e4 LT |
2983 | |
2984 | attr = kmalloc(sizeof *attr, GFP_KERNEL); | |
3cd96564 | 2985 | if (!attr) { |
7ef5d4b0 IW |
2986 | dev_err(&port_priv->device->dev, |
2987 | "Couldn't kmalloc ib_qp_attr\n"); | |
1da177e4 LT |
2988 | return -ENOMEM; |
2989 | } | |
2990 | ||
ef5ed416 JM |
2991 | ret = ib_find_pkey(port_priv->device, port_priv->port_num, |
2992 | IB_DEFAULT_PKEY_FULL, &pkey_index); | |
2993 | if (ret) | |
2994 | pkey_index = 0; | |
2995 | ||
1da177e4 LT |
2996 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { |
2997 | qp = port_priv->qp_info[i].qp; | |
fac70d51 EC |
2998 | if (!qp) |
2999 | continue; | |
3000 | ||
1da177e4 LT |
3001 | /* |
3002 | * PKey index for QP1 is irrelevant but | |
3003 | * one is needed for the Reset to Init transition | |
3004 | */ | |
3005 | attr->qp_state = IB_QPS_INIT; | |
ef5ed416 | 3006 | attr->pkey_index = pkey_index; |
1da177e4 LT |
3007 | attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; |
3008 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | | |
3009 | IB_QP_PKEY_INDEX | IB_QP_QKEY); | |
3010 | if (ret) { | |
7ef5d4b0 IW |
3011 | dev_err(&port_priv->device->dev, |
3012 | "Couldn't change QP%d state to INIT: %d\n", | |
3013 | i, ret); | |
1da177e4 LT |
3014 | goto out; |
3015 | } | |
3016 | ||
3017 | attr->qp_state = IB_QPS_RTR; | |
3018 | ret = ib_modify_qp(qp, attr, IB_QP_STATE); | |
3019 | if (ret) { | |
7ef5d4b0 IW |
3020 | dev_err(&port_priv->device->dev, |
3021 | "Couldn't change QP%d state to RTR: %d\n", | |
3022 | i, ret); | |
1da177e4 LT |
3023 | goto out; |
3024 | } | |
3025 | ||
3026 | attr->qp_state = IB_QPS_RTS; | |
3027 | attr->sq_psn = IB_MAD_SEND_Q_PSN; | |
3028 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); | |
3029 | if (ret) { | |
7ef5d4b0 IW |
3030 | dev_err(&port_priv->device->dev, |
3031 | "Couldn't change QP%d state to RTS: %d\n", | |
3032 | i, ret); | |
1da177e4 LT |
3033 | goto out; |
3034 | } | |
3035 | } | |
3036 | ||
3037 | ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); | |
3038 | if (ret) { | |
7ef5d4b0 IW |
3039 | dev_err(&port_priv->device->dev, |
3040 | "Failed to request completion notification: %d\n", | |
3041 | ret); | |
1da177e4 LT |
3042 | goto out; |
3043 | } | |
3044 | ||
3045 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { | |
fac70d51 EC |
3046 | if (!port_priv->qp_info[i].qp) |
3047 | continue; | |
3048 | ||
1da177e4 LT |
3049 | ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); |
3050 | if (ret) { | |
7ef5d4b0 IW |
3051 | dev_err(&port_priv->device->dev, |
3052 | "Couldn't post receive WRs\n"); | |
1da177e4 LT |
3053 | goto out; |
3054 | } | |
3055 | } | |
3056 | out: | |
3057 | kfree(attr); | |
3058 | return ret; | |
3059 | } | |
3060 | ||
3061 | static void qp_event_handler(struct ib_event *event, void *qp_context) | |
3062 | { | |
3063 | struct ib_mad_qp_info *qp_info = qp_context; | |
3064 | ||
3065 | /* It's worse than that! He's dead, Jim! */ | |
7ef5d4b0 IW |
3066 | dev_err(&qp_info->port_priv->device->dev, |
3067 | "Fatal error (%d) on MAD QP (%d)\n", | |
1da177e4 LT |
3068 | event->event, qp_info->qp->qp_num); |
3069 | } | |
3070 | ||
3071 | static void init_mad_queue(struct ib_mad_qp_info *qp_info, | |
3072 | struct ib_mad_queue *mad_queue) | |
3073 | { | |
3074 | mad_queue->qp_info = qp_info; | |
3075 | mad_queue->count = 0; | |
3076 | spin_lock_init(&mad_queue->lock); | |
3077 | INIT_LIST_HEAD(&mad_queue->list); | |
3078 | } | |
3079 | ||
3080 | static void init_mad_qp(struct ib_mad_port_private *port_priv, | |
3081 | struct ib_mad_qp_info *qp_info) | |
3082 | { | |
3083 | qp_info->port_priv = port_priv; | |
3084 | init_mad_queue(qp_info, &qp_info->send_queue); | |
3085 | init_mad_queue(qp_info, &qp_info->recv_queue); | |
3086 | INIT_LIST_HEAD(&qp_info->overflow_list); | |
3087 | spin_lock_init(&qp_info->snoop_lock); | |
3088 | qp_info->snoop_table = NULL; | |
3089 | qp_info->snoop_table_size = 0; | |
3090 | atomic_set(&qp_info->snoop_count, 0); | |
3091 | } | |
3092 | ||
3093 | static int create_mad_qp(struct ib_mad_qp_info *qp_info, | |
3094 | enum ib_qp_type qp_type) | |
3095 | { | |
3096 | struct ib_qp_init_attr qp_init_attr; | |
3097 | int ret; | |
3098 | ||
3099 | memset(&qp_init_attr, 0, sizeof qp_init_attr); | |
3100 | qp_init_attr.send_cq = qp_info->port_priv->cq; | |
3101 | qp_init_attr.recv_cq = qp_info->port_priv->cq; | |
3102 | qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; | |
b76aabc3 HR |
3103 | qp_init_attr.cap.max_send_wr = mad_sendq_size; |
3104 | qp_init_attr.cap.max_recv_wr = mad_recvq_size; | |
1da177e4 LT |
3105 | qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; |
3106 | qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; | |
3107 | qp_init_attr.qp_type = qp_type; | |
3108 | qp_init_attr.port_num = qp_info->port_priv->port_num; | |
3109 | qp_init_attr.qp_context = qp_info; | |
3110 | qp_init_attr.event_handler = qp_event_handler; | |
3111 | qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); | |
3112 | if (IS_ERR(qp_info->qp)) { | |
7ef5d4b0 IW |
3113 | dev_err(&qp_info->port_priv->device->dev, |
3114 | "Couldn't create ib_mad QP%d\n", | |
3115 | get_spl_qp_index(qp_type)); | |
1da177e4 LT |
3116 | ret = PTR_ERR(qp_info->qp); |
3117 | goto error; | |
3118 | } | |
3119 | /* Use minimum queue sizes unless the CQ is resized */ | |
b76aabc3 HR |
3120 | qp_info->send_queue.max_active = mad_sendq_size; |
3121 | qp_info->recv_queue.max_active = mad_recvq_size; | |
1da177e4 LT |
3122 | return 0; |
3123 | ||
3124 | error: | |
3125 | return ret; | |
3126 | } | |
3127 | ||
3128 | static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) | |
3129 | { | |
fac70d51 EC |
3130 | if (!qp_info->qp) |
3131 | return; | |
3132 | ||
1da177e4 | 3133 | ib_destroy_qp(qp_info->qp); |
6044ec88 | 3134 | kfree(qp_info->snoop_table); |
1da177e4 LT |
3135 | } |
3136 | ||
3137 | /* | |
3138 | * Open the port | |
3139 | * Create the QP, PD, MR, and CQ if needed | |
3140 | */ | |
3141 | static int ib_mad_port_open(struct ib_device *device, | |
3142 | int port_num) | |
3143 | { | |
3144 | int ret, cq_size; | |
3145 | struct ib_mad_port_private *port_priv; | |
3146 | unsigned long flags; | |
3147 | char name[sizeof "ib_mad123"]; | |
fac70d51 | 3148 | int has_smi; |
8e37210b | 3149 | struct ib_cq_init_attr cq_attr = {}; |
1da177e4 | 3150 | |
337877a4 IW |
3151 | if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE)) |
3152 | return -EFAULT; | |
3153 | ||
548ead17 IW |
3154 | if (WARN_ON(rdma_cap_opa_mad(device, port_num) && |
3155 | rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE)) | |
3156 | return -EFAULT; | |
3157 | ||
1da177e4 | 3158 | /* Create new device info */ |
de6eb66b | 3159 | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); |
1da177e4 | 3160 | if (!port_priv) { |
7ef5d4b0 | 3161 | dev_err(&device->dev, "No memory for ib_mad_port_private\n"); |
1da177e4 LT |
3162 | return -ENOMEM; |
3163 | } | |
de6eb66b | 3164 | |
1da177e4 LT |
3165 | port_priv->device = device; |
3166 | port_priv->port_num = port_num; | |
3167 | spin_lock_init(&port_priv->reg_lock); | |
3168 | INIT_LIST_HEAD(&port_priv->agent_list); | |
3169 | init_mad_qp(port_priv, &port_priv->qp_info[0]); | |
3170 | init_mad_qp(port_priv, &port_priv->qp_info[1]); | |
3171 | ||
fac70d51 | 3172 | cq_size = mad_sendq_size + mad_recvq_size; |
29541e3a | 3173 | has_smi = rdma_cap_ib_smi(device, port_num); |
fac70d51 EC |
3174 | if (has_smi) |
3175 | cq_size *= 2; | |
3176 | ||
8e37210b | 3177 | cq_attr.cqe = cq_size; |
1da177e4 | 3178 | port_priv->cq = ib_create_cq(port_priv->device, |
5dd2ce12 | 3179 | ib_mad_thread_completion_handler, |
8e37210b | 3180 | NULL, port_priv, &cq_attr); |
1da177e4 | 3181 | if (IS_ERR(port_priv->cq)) { |
7ef5d4b0 | 3182 | dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); |
1da177e4 LT |
3183 | ret = PTR_ERR(port_priv->cq); |
3184 | goto error3; | |
3185 | } | |
3186 | ||
3187 | port_priv->pd = ib_alloc_pd(device); | |
3188 | if (IS_ERR(port_priv->pd)) { | |
7ef5d4b0 | 3189 | dev_err(&device->dev, "Couldn't create ib_mad PD\n"); |
1da177e4 LT |
3190 | ret = PTR_ERR(port_priv->pd); |
3191 | goto error4; | |
3192 | } | |
3193 | ||
fac70d51 EC |
3194 | if (has_smi) { |
3195 | ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); | |
3196 | if (ret) | |
3197 | goto error6; | |
3198 | } | |
1da177e4 LT |
3199 | ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); |
3200 | if (ret) | |
3201 | goto error7; | |
3202 | ||
3203 | snprintf(name, sizeof name, "ib_mad%d", port_num); | |
3204 | port_priv->wq = create_singlethread_workqueue(name); | |
3205 | if (!port_priv->wq) { | |
3206 | ret = -ENOMEM; | |
3207 | goto error8; | |
3208 | } | |
c4028958 | 3209 | INIT_WORK(&port_priv->work, ib_mad_completion_handler); |
1da177e4 | 3210 | |
dc05980d MT |
3211 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); |
3212 | list_add_tail(&port_priv->port_list, &ib_mad_port_list); | |
3213 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | |
3214 | ||
1da177e4 LT |
3215 | ret = ib_mad_port_start(port_priv); |
3216 | if (ret) { | |
7ef5d4b0 | 3217 | dev_err(&device->dev, "Couldn't start port\n"); |
1da177e4 LT |
3218 | goto error9; |
3219 | } | |
3220 | ||
1da177e4 LT |
3221 | return 0; |
3222 | ||
3223 | error9: | |
dc05980d MT |
3224 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); |
3225 | list_del_init(&port_priv->port_list); | |
3226 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | |
3227 | ||
1da177e4 LT |
3228 | destroy_workqueue(port_priv->wq); |
3229 | error8: | |
3230 | destroy_mad_qp(&port_priv->qp_info[1]); | |
3231 | error7: | |
3232 | destroy_mad_qp(&port_priv->qp_info[0]); | |
3233 | error6: | |
1da177e4 LT |
3234 | ib_dealloc_pd(port_priv->pd); |
3235 | error4: | |
3236 | ib_destroy_cq(port_priv->cq); | |
3237 | cleanup_recv_queue(&port_priv->qp_info[1]); | |
3238 | cleanup_recv_queue(&port_priv->qp_info[0]); | |
3239 | error3: | |
3240 | kfree(port_priv); | |
3241 | ||
3242 | return ret; | |
3243 | } | |
3244 | ||
3245 | /* | |
3246 | * Close the port | |
3247 | * If there are no classes using the port, free the port | |
3248 | * resources (CQ, MR, PD, QP) and remove the port's info structure | |
3249 | */ | |
3250 | static int ib_mad_port_close(struct ib_device *device, int port_num) | |
3251 | { | |
3252 | struct ib_mad_port_private *port_priv; | |
3253 | unsigned long flags; | |
3254 | ||
3255 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); | |
3256 | port_priv = __ib_get_mad_port(device, port_num); | |
3257 | if (port_priv == NULL) { | |
3258 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | |
7ef5d4b0 | 3259 | dev_err(&device->dev, "Port %d not found\n", port_num); |
1da177e4 LT |
3260 | return -ENODEV; |
3261 | } | |
dc05980d | 3262 | list_del_init(&port_priv->port_list); |
1da177e4 LT |
3263 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); |
3264 | ||
1da177e4 LT |
3265 | destroy_workqueue(port_priv->wq); |
3266 | destroy_mad_qp(&port_priv->qp_info[1]); | |
3267 | destroy_mad_qp(&port_priv->qp_info[0]); | |
1da177e4 LT |
3268 | ib_dealloc_pd(port_priv->pd); |
3269 | ib_destroy_cq(port_priv->cq); | |
3270 | cleanup_recv_queue(&port_priv->qp_info[1]); | |
3271 | cleanup_recv_queue(&port_priv->qp_info[0]); | |
3272 | /* XXX: Handle deallocation of MAD registration tables */ | |
3273 | ||
3274 | kfree(port_priv); | |
3275 | ||
3276 | return 0; | |
3277 | } | |
3278 | ||
3279 | static void ib_mad_init_device(struct ib_device *device) | |
3280 | { | |
4139032b | 3281 | int start, i; |
1da177e4 | 3282 | |
4139032b | 3283 | start = rdma_start_port(device); |
4ab6fb7e | 3284 | |
4139032b | 3285 | for (i = start; i <= rdma_end_port(device); i++) { |
c757dea8 | 3286 | if (!rdma_cap_ib_mad(device, i)) |
827f2a8b MW |
3287 | continue; |
3288 | ||
4ab6fb7e | 3289 | if (ib_mad_port_open(device, i)) { |
7ef5d4b0 | 3290 | dev_err(&device->dev, "Couldn't open port %d\n", i); |
4ab6fb7e | 3291 | goto error; |
1da177e4 | 3292 | } |
4ab6fb7e | 3293 | if (ib_agent_port_open(device, i)) { |
7ef5d4b0 IW |
3294 | dev_err(&device->dev, |
3295 | "Couldn't open port %d for agents\n", i); | |
4ab6fb7e | 3296 | goto error_agent; |
1da177e4 LT |
3297 | } |
3298 | } | |
f68bcc2d | 3299 | return; |
1da177e4 | 3300 | |
4ab6fb7e RD |
3301 | error_agent: |
3302 | if (ib_mad_port_close(device, i)) | |
7ef5d4b0 | 3303 | dev_err(&device->dev, "Couldn't close port %d\n", i); |
4ab6fb7e RD |
3304 | |
3305 | error: | |
827f2a8b | 3306 | while (--i >= start) { |
c757dea8 | 3307 | if (!rdma_cap_ib_mad(device, i)) |
827f2a8b | 3308 | continue; |
4ab6fb7e | 3309 | |
4ab6fb7e | 3310 | if (ib_agent_port_close(device, i)) |
7ef5d4b0 IW |
3311 | dev_err(&device->dev, |
3312 | "Couldn't close port %d for agents\n", i); | |
4ab6fb7e | 3313 | if (ib_mad_port_close(device, i)) |
7ef5d4b0 | 3314 | dev_err(&device->dev, "Couldn't close port %d\n", i); |
1da177e4 | 3315 | } |
1da177e4 LT |
3316 | } |
3317 | ||
7c1eb45a | 3318 | static void ib_mad_remove_device(struct ib_device *device, void *client_data) |
1da177e4 | 3319 | { |
4139032b | 3320 | int i; |
827f2a8b | 3321 | |
4139032b | 3322 | for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { |
c757dea8 | 3323 | if (!rdma_cap_ib_mad(device, i)) |
827f2a8b MW |
3324 | continue; |
3325 | ||
3326 | if (ib_agent_port_close(device, i)) | |
7ef5d4b0 | 3327 | dev_err(&device->dev, |
827f2a8b MW |
3328 | "Couldn't close port %d for agents\n", i); |
3329 | if (ib_mad_port_close(device, i)) | |
3330 | dev_err(&device->dev, "Couldn't close port %d\n", i); | |
1da177e4 LT |
3331 | } |
3332 | } | |
3333 | ||
3334 | static struct ib_client mad_client = { | |
3335 | .name = "mad", | |
3336 | .add = ib_mad_init_device, | |
3337 | .remove = ib_mad_remove_device | |
3338 | }; | |
3339 | ||
3340 | static int __init ib_mad_init_module(void) | |
3341 | { | |
b76aabc3 HR |
3342 | mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); |
3343 | mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); | |
3344 | ||
3345 | mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); | |
3346 | mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); | |
3347 | ||
1da177e4 LT |
3348 | INIT_LIST_HEAD(&ib_mad_port_list); |
3349 | ||
3350 | if (ib_register_client(&mad_client)) { | |
7ef5d4b0 | 3351 | pr_err("Couldn't register ib_mad client\n"); |
c9082e51 | 3352 | return -EINVAL; |
1da177e4 LT |
3353 | } |
3354 | ||
3355 | return 0; | |
1da177e4 LT |
3356 | } |
3357 | ||
3358 | static void __exit ib_mad_cleanup_module(void) | |
3359 | { | |
3360 | ib_unregister_client(&mad_client); | |
1da177e4 LT |
3361 | } |
3362 | ||
3363 | module_init(ib_mad_init_module); | |
3364 | module_exit(ib_mad_cleanup_module); |