Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
[linux-block.git] / drivers / infiniband / core / mad.c
1 /*
2  * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * $Id: mad.c 2817 2005-07-07 11:29:26Z halr $
35  */
36 #include <linux/dma-mapping.h>
37
38 #include "mad_priv.h"
39 #include "mad_rmpp.h"
40 #include "smi.h"
41 #include "agent.h"
42
43 MODULE_LICENSE("Dual BSD/GPL");
44 MODULE_DESCRIPTION("kernel IB MAD API");
45 MODULE_AUTHOR("Hal Rosenstock");
46 MODULE_AUTHOR("Sean Hefty");
47
48
49 kmem_cache_t *ib_mad_cache;
50
51 static struct list_head ib_mad_port_list;
52 static u32 ib_mad_client_id = 0;
53
54 /* Port list lock */
55 static spinlock_t ib_mad_port_list_lock;
56
57
58 /* Forward declarations */
59 static int method_in_use(struct ib_mad_mgmt_method_table **method,
60                          struct ib_mad_reg_req *mad_reg_req);
61 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
62 static struct ib_mad_agent_private *find_mad_agent(
63                                         struct ib_mad_port_private *port_priv,
64                                         struct ib_mad *mad);
65 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
66                                     struct ib_mad_private *mad);
67 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
68 static void timeout_sends(void *data);
69 static void local_completions(void *data);
70 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
71                               struct ib_mad_agent_private *agent_priv,
72                               u8 mgmt_class);
73 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
74                            struct ib_mad_agent_private *agent_priv);
75
76 /*
77  * Returns a ib_mad_port_private structure or NULL for a device/port
78  * Assumes ib_mad_port_list_lock is being held
79  */
80 static inline struct ib_mad_port_private *
81 __ib_get_mad_port(struct ib_device *device, int port_num)
82 {
83         struct ib_mad_port_private *entry;
84
85         list_for_each_entry(entry, &ib_mad_port_list, port_list) {
86                 if (entry->device == device && entry->port_num == port_num)
87                         return entry;
88         }
89         return NULL;
90 }
91
92 /*
93  * Wrapper function to return a ib_mad_port_private structure or NULL
94  * for a device/port
95  */
96 static inline struct ib_mad_port_private *
97 ib_get_mad_port(struct ib_device *device, int port_num)
98 {
99         struct ib_mad_port_private *entry;
100         unsigned long flags;
101
102         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
103         entry = __ib_get_mad_port(device, port_num);
104         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
105
106         return entry;
107 }
108
109 static inline u8 convert_mgmt_class(u8 mgmt_class)
110 {
111         /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
112         return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
113                 0 : mgmt_class;
114 }
115
116 static int get_spl_qp_index(enum ib_qp_type qp_type)
117 {
118         switch (qp_type)
119         {
120         case IB_QPT_SMI:
121                 return 0;
122         case IB_QPT_GSI:
123                 return 1;
124         default:
125                 return -1;
126         }
127 }
128
129 static int vendor_class_index(u8 mgmt_class)
130 {
131         return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
132 }
133
134 static int is_vendor_class(u8 mgmt_class)
135 {
136         if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
137             (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
138                 return 0;
139         return 1;
140 }
141
142 static int is_vendor_oui(char *oui)
143 {
144         if (oui[0] || oui[1] || oui[2])
145                 return 1;
146         return 0;
147 }
148
149 static int is_vendor_method_in_use(
150                 struct ib_mad_mgmt_vendor_class *vendor_class,
151                 struct ib_mad_reg_req *mad_reg_req)
152 {
153         struct ib_mad_mgmt_method_table *method;
154         int i;
155
156         for (i = 0; i < MAX_MGMT_OUI; i++) {
157                 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
158                         method = vendor_class->method_table[i];
159                         if (method) {
160                                 if (method_in_use(&method, mad_reg_req))
161                                         return 1;
162                                 else
163                                         break;
164                         }
165                 }
166         }
167         return 0;
168 }
169
170 /*
171  * ib_register_mad_agent - Register to send/receive MADs
172  */
173 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
174                                            u8 port_num,
175                                            enum ib_qp_type qp_type,
176                                            struct ib_mad_reg_req *mad_reg_req,
177                                            u8 rmpp_version,
178                                            ib_mad_send_handler send_handler,
179                                            ib_mad_recv_handler recv_handler,
180                                            void *context)
181 {
182         struct ib_mad_port_private *port_priv;
183         struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
184         struct ib_mad_agent_private *mad_agent_priv;
185         struct ib_mad_reg_req *reg_req = NULL;
186         struct ib_mad_mgmt_class_table *class;
187         struct ib_mad_mgmt_vendor_class_table *vendor;
188         struct ib_mad_mgmt_vendor_class *vendor_class;
189         struct ib_mad_mgmt_method_table *method;
190         int ret2, qpn;
191         unsigned long flags;
192         u8 mgmt_class, vclass;
193
194         /* Validate parameters */
195         qpn = get_spl_qp_index(qp_type);
196         if (qpn == -1)
197                 goto error1;
198
199         if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
200                 goto error1;
201
202         /* Validate MAD registration request if supplied */
203         if (mad_reg_req) {
204                 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
205                         goto error1;
206                 if (!recv_handler)
207                         goto error1;
208                 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
209                         /*
210                          * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
211                          * one in this range currently allowed
212                          */
213                         if (mad_reg_req->mgmt_class !=
214                             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
215                                 goto error1;
216                 } else if (mad_reg_req->mgmt_class == 0) {
217                         /*
218                          * Class 0 is reserved in IBA and is used for
219                          * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
220                          */
221                         goto error1;
222                 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
223                         /*
224                          * If class is in "new" vendor range,
225                          * ensure supplied OUI is not zero
226                          */
227                         if (!is_vendor_oui(mad_reg_req->oui))
228                                 goto error1;
229                 }
230                 /* Make sure class supplied is consistent with QP type */
231                 if (qp_type == IB_QPT_SMI) {
232                         if ((mad_reg_req->mgmt_class !=
233                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
234                             (mad_reg_req->mgmt_class !=
235                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
236                                 goto error1;
237                 } else {
238                         if ((mad_reg_req->mgmt_class ==
239                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
240                             (mad_reg_req->mgmt_class ==
241                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
242                                 goto error1;
243                 }
244         } else {
245                 /* No registration request supplied */
246                 if (!send_handler)
247                         goto error1;
248         }
249
250         /* Validate device and port */
251         port_priv = ib_get_mad_port(device, port_num);
252         if (!port_priv) {
253                 ret = ERR_PTR(-ENODEV);
254                 goto error1;
255         }
256
257         /* Allocate structures */
258         mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
259         if (!mad_agent_priv) {
260                 ret = ERR_PTR(-ENOMEM);
261                 goto error1;
262         }
263
264         mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
265                                                  IB_ACCESS_LOCAL_WRITE);
266         if (IS_ERR(mad_agent_priv->agent.mr)) {
267                 ret = ERR_PTR(-ENOMEM);
268                 goto error2;
269         }
270
271         if (mad_reg_req) {
272                 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
273                 if (!reg_req) {
274                         ret = ERR_PTR(-ENOMEM);
275                         goto error3;
276                 }
277                 /* Make a copy of the MAD registration request */
278                 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
279         }
280
281         /* Now, fill in the various structures */
282         mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
283         mad_agent_priv->reg_req = reg_req;
284         mad_agent_priv->agent.rmpp_version = rmpp_version;
285         mad_agent_priv->agent.device = device;
286         mad_agent_priv->agent.recv_handler = recv_handler;
287         mad_agent_priv->agent.send_handler = send_handler;
288         mad_agent_priv->agent.context = context;
289         mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
290         mad_agent_priv->agent.port_num = port_num;
291
292         spin_lock_irqsave(&port_priv->reg_lock, flags);
293         mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
294
295         /*
296          * Make sure MAD registration (if supplied)
297          * is non overlapping with any existing ones
298          */
299         if (mad_reg_req) {
300                 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
301                 if (!is_vendor_class(mgmt_class)) {
302                         class = port_priv->version[mad_reg_req->
303                                                    mgmt_class_version].class;
304                         if (class) {
305                                 method = class->method_table[mgmt_class];
306                                 if (method) {
307                                         if (method_in_use(&method,
308                                                            mad_reg_req))
309                                                 goto error4;
310                                 }
311                         }
312                         ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
313                                                   mgmt_class);
314                 } else {
315                         /* "New" vendor class range */
316                         vendor = port_priv->version[mad_reg_req->
317                                                     mgmt_class_version].vendor;
318                         if (vendor) {
319                                 vclass = vendor_class_index(mgmt_class);
320                                 vendor_class = vendor->vendor_class[vclass];
321                                 if (vendor_class) {
322                                         if (is_vendor_method_in_use(
323                                                         vendor_class,
324                                                         mad_reg_req))
325                                                 goto error4;
326                                 }
327                         }
328                         ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
329                 }
330                 if (ret2) {
331                         ret = ERR_PTR(ret2);
332                         goto error4;
333                 }
334         }
335
336         /* Add mad agent into port's agent list */
337         list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
338         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
339
340         spin_lock_init(&mad_agent_priv->lock);
341         INIT_LIST_HEAD(&mad_agent_priv->send_list);
342         INIT_LIST_HEAD(&mad_agent_priv->wait_list);
343         INIT_LIST_HEAD(&mad_agent_priv->done_list);
344         INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
345         INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
346         INIT_LIST_HEAD(&mad_agent_priv->local_list);
347         INIT_WORK(&mad_agent_priv->local_work, local_completions,
348                    mad_agent_priv);
349         atomic_set(&mad_agent_priv->refcount, 1);
350         init_waitqueue_head(&mad_agent_priv->wait);
351
352         return &mad_agent_priv->agent;
353
354 error4:
355         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
356         kfree(reg_req);
357 error3:
358         kfree(mad_agent_priv);
359 error2:
360         ib_dereg_mr(mad_agent_priv->agent.mr);
361 error1:
362         return ret;
363 }
364 EXPORT_SYMBOL(ib_register_mad_agent);
365
366 static inline int is_snooping_sends(int mad_snoop_flags)
367 {
368         return (mad_snoop_flags &
369                 (/*IB_MAD_SNOOP_POSTED_SENDS |
370                  IB_MAD_SNOOP_RMPP_SENDS |*/
371                  IB_MAD_SNOOP_SEND_COMPLETIONS /*|
372                  IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
373 }
374
375 static inline int is_snooping_recvs(int mad_snoop_flags)
376 {
377         return (mad_snoop_flags &
378                 (IB_MAD_SNOOP_RECVS /*|
379                  IB_MAD_SNOOP_RMPP_RECVS*/));
380 }
381
382 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
383                                 struct ib_mad_snoop_private *mad_snoop_priv)
384 {
385         struct ib_mad_snoop_private **new_snoop_table;
386         unsigned long flags;
387         int i;
388
389         spin_lock_irqsave(&qp_info->snoop_lock, flags);
390         /* Check for empty slot in array. */
391         for (i = 0; i < qp_info->snoop_table_size; i++)
392                 if (!qp_info->snoop_table[i])
393                         break;
394
395         if (i == qp_info->snoop_table_size) {
396                 /* Grow table. */
397                 new_snoop_table = kmalloc(sizeof mad_snoop_priv *
398                                           qp_info->snoop_table_size + 1,
399                                           GFP_ATOMIC);
400                 if (!new_snoop_table) {
401                         i = -ENOMEM;
402                         goto out;
403                 }
404                 if (qp_info->snoop_table) {
405                         memcpy(new_snoop_table, qp_info->snoop_table,
406                                sizeof mad_snoop_priv *
407                                qp_info->snoop_table_size);
408                         kfree(qp_info->snoop_table);
409                 }
410                 qp_info->snoop_table = new_snoop_table;
411                 qp_info->snoop_table_size++;
412         }
413         qp_info->snoop_table[i] = mad_snoop_priv;
414         atomic_inc(&qp_info->snoop_count);
415 out:
416         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
417         return i;
418 }
419
420 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
421                                            u8 port_num,
422                                            enum ib_qp_type qp_type,
423                                            int mad_snoop_flags,
424                                            ib_mad_snoop_handler snoop_handler,
425                                            ib_mad_recv_handler recv_handler,
426                                            void *context)
427 {
428         struct ib_mad_port_private *port_priv;
429         struct ib_mad_agent *ret;
430         struct ib_mad_snoop_private *mad_snoop_priv;
431         int qpn;
432
433         /* Validate parameters */
434         if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
435             (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
436                 ret = ERR_PTR(-EINVAL);
437                 goto error1;
438         }
439         qpn = get_spl_qp_index(qp_type);
440         if (qpn == -1) {
441                 ret = ERR_PTR(-EINVAL);
442                 goto error1;
443         }
444         port_priv = ib_get_mad_port(device, port_num);
445         if (!port_priv) {
446                 ret = ERR_PTR(-ENODEV);
447                 goto error1;
448         }
449         /* Allocate structures */
450         mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
451         if (!mad_snoop_priv) {
452                 ret = ERR_PTR(-ENOMEM);
453                 goto error1;
454         }
455
456         /* Now, fill in the various structures */
457         mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
458         mad_snoop_priv->agent.device = device;
459         mad_snoop_priv->agent.recv_handler = recv_handler;
460         mad_snoop_priv->agent.snoop_handler = snoop_handler;
461         mad_snoop_priv->agent.context = context;
462         mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
463         mad_snoop_priv->agent.port_num = port_num;
464         mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
465         init_waitqueue_head(&mad_snoop_priv->wait);
466         mad_snoop_priv->snoop_index = register_snoop_agent(
467                                                 &port_priv->qp_info[qpn],
468                                                 mad_snoop_priv);
469         if (mad_snoop_priv->snoop_index < 0) {
470                 ret = ERR_PTR(mad_snoop_priv->snoop_index);
471                 goto error2;
472         }
473
474         atomic_set(&mad_snoop_priv->refcount, 1);
475         return &mad_snoop_priv->agent;
476
477 error2:
478         kfree(mad_snoop_priv);
479 error1:
480         return ret;
481 }
482 EXPORT_SYMBOL(ib_register_mad_snoop);
483
484 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
485 {
486         struct ib_mad_port_private *port_priv;
487         unsigned long flags;
488
489         /* Note that we could still be handling received MADs */
490
491         /*
492          * Canceling all sends results in dropping received response
493          * MADs, preventing us from queuing additional work
494          */
495         cancel_mads(mad_agent_priv);
496         port_priv = mad_agent_priv->qp_info->port_priv;
497         cancel_delayed_work(&mad_agent_priv->timed_work);
498
499         spin_lock_irqsave(&port_priv->reg_lock, flags);
500         remove_mad_reg_req(mad_agent_priv);
501         list_del(&mad_agent_priv->agent_list);
502         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
503
504         flush_workqueue(port_priv->wq);
505         ib_cancel_rmpp_recvs(mad_agent_priv);
506
507         atomic_dec(&mad_agent_priv->refcount);
508         wait_event(mad_agent_priv->wait,
509                    !atomic_read(&mad_agent_priv->refcount));
510
511         if (mad_agent_priv->reg_req)
512                 kfree(mad_agent_priv->reg_req);
513         ib_dereg_mr(mad_agent_priv->agent.mr);
514         kfree(mad_agent_priv);
515 }
516
517 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
518 {
519         struct ib_mad_qp_info *qp_info;
520         unsigned long flags;
521
522         qp_info = mad_snoop_priv->qp_info;
523         spin_lock_irqsave(&qp_info->snoop_lock, flags);
524         qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
525         atomic_dec(&qp_info->snoop_count);
526         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
527
528         atomic_dec(&mad_snoop_priv->refcount);
529         wait_event(mad_snoop_priv->wait,
530                    !atomic_read(&mad_snoop_priv->refcount));
531
532         kfree(mad_snoop_priv);
533 }
534
535 /*
536  * ib_unregister_mad_agent - Unregisters a client from using MAD services
537  */
538 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
539 {
540         struct ib_mad_agent_private *mad_agent_priv;
541         struct ib_mad_snoop_private *mad_snoop_priv;
542
543         /* If the TID is zero, the agent can only snoop. */
544         if (mad_agent->hi_tid) {
545                 mad_agent_priv = container_of(mad_agent,
546                                               struct ib_mad_agent_private,
547                                               agent);
548                 unregister_mad_agent(mad_agent_priv);
549         } else {
550                 mad_snoop_priv = container_of(mad_agent,
551                                               struct ib_mad_snoop_private,
552                                               agent);
553                 unregister_mad_snoop(mad_snoop_priv);
554         }
555         return 0;
556 }
557 EXPORT_SYMBOL(ib_unregister_mad_agent);
558
559 static inline int response_mad(struct ib_mad *mad)
560 {
561         /* Trap represses are responses although response bit is reset */
562         return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
563                 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
564 }
565
566 static void dequeue_mad(struct ib_mad_list_head *mad_list)
567 {
568         struct ib_mad_queue *mad_queue;
569         unsigned long flags;
570
571         BUG_ON(!mad_list->mad_queue);
572         mad_queue = mad_list->mad_queue;
573         spin_lock_irqsave(&mad_queue->lock, flags);
574         list_del(&mad_list->list);
575         mad_queue->count--;
576         spin_unlock_irqrestore(&mad_queue->lock, flags);
577 }
578
579 static void snoop_send(struct ib_mad_qp_info *qp_info,
580                        struct ib_mad_send_buf *send_buf,
581                        struct ib_mad_send_wc *mad_send_wc,
582                        int mad_snoop_flags)
583 {
584         struct ib_mad_snoop_private *mad_snoop_priv;
585         unsigned long flags;
586         int i;
587
588         spin_lock_irqsave(&qp_info->snoop_lock, flags);
589         for (i = 0; i < qp_info->snoop_table_size; i++) {
590                 mad_snoop_priv = qp_info->snoop_table[i];
591                 if (!mad_snoop_priv ||
592                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
593                         continue;
594
595                 atomic_inc(&mad_snoop_priv->refcount);
596                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
597                 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
598                                                     send_buf, mad_send_wc);
599                 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
600                         wake_up(&mad_snoop_priv->wait);
601                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
602         }
603         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
604 }
605
606 static void snoop_recv(struct ib_mad_qp_info *qp_info,
607                        struct ib_mad_recv_wc *mad_recv_wc,
608                        int mad_snoop_flags)
609 {
610         struct ib_mad_snoop_private *mad_snoop_priv;
611         unsigned long flags;
612         int i;
613
614         spin_lock_irqsave(&qp_info->snoop_lock, flags);
615         for (i = 0; i < qp_info->snoop_table_size; i++) {
616                 mad_snoop_priv = qp_info->snoop_table[i];
617                 if (!mad_snoop_priv ||
618                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
619                         continue;
620
621                 atomic_inc(&mad_snoop_priv->refcount);
622                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
623                 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
624                                                    mad_recv_wc);
625                 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
626                         wake_up(&mad_snoop_priv->wait);
627                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
628         }
629         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
630 }
631
632 static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
633                          struct ib_wc *wc)
634 {
635         memset(wc, 0, sizeof *wc);
636         wc->wr_id = wr_id;
637         wc->status = IB_WC_SUCCESS;
638         wc->opcode = IB_WC_RECV;
639         wc->pkey_index = pkey_index;
640         wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
641         wc->src_qp = IB_QP0;
642         wc->qp_num = IB_QP0;
643         wc->slid = slid;
644         wc->sl = 0;
645         wc->dlid_path_bits = 0;
646         wc->port_num = port_num;
647 }
648
649 /*
650  * Return 0 if SMP is to be sent
651  * Return 1 if SMP was consumed locally (whether or not solicited)
652  * Return < 0 if error
653  */
654 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
655                                   struct ib_mad_send_wr_private *mad_send_wr)
656 {
657         int ret;
658         struct ib_smp *smp = mad_send_wr->send_buf.mad;
659         unsigned long flags;
660         struct ib_mad_local_private *local;
661         struct ib_mad_private *mad_priv;
662         struct ib_mad_port_private *port_priv;
663         struct ib_mad_agent_private *recv_mad_agent = NULL;
664         struct ib_device *device = mad_agent_priv->agent.device;
665         u8 port_num = mad_agent_priv->agent.port_num;
666         struct ib_wc mad_wc;
667         struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
668
669         if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
670                 ret = -EINVAL;
671                 printk(KERN_ERR PFX "Invalid directed route\n");
672                 goto out;
673         }
674         /* Check to post send on QP or process locally */
675         ret = smi_check_local_dr_smp(smp, device, port_num);
676         if (!ret || !device->process_mad)
677                 goto out;
678
679         local = kmalloc(sizeof *local, GFP_ATOMIC);
680         if (!local) {
681                 ret = -ENOMEM;
682                 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
683                 goto out;
684         }
685         local->mad_priv = NULL;
686         local->recv_mad_agent = NULL;
687         mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
688         if (!mad_priv) {
689                 ret = -ENOMEM;
690                 printk(KERN_ERR PFX "No memory for local response MAD\n");
691                 kfree(local);
692                 goto out;
693         }
694
695         build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid),
696                      send_wr->wr.ud.pkey_index,
697                      send_wr->wr.ud.port_num, &mad_wc);
698
699         /* No GRH for DR SMP */
700         ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
701                                   (struct ib_mad *)smp,
702                                   (struct ib_mad *)&mad_priv->mad);
703         switch (ret)
704         {
705         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
706                 if (response_mad(&mad_priv->mad.mad) &&
707                     mad_agent_priv->agent.recv_handler) {
708                         local->mad_priv = mad_priv;
709                         local->recv_mad_agent = mad_agent_priv;
710                         /*
711                          * Reference MAD agent until receive
712                          * side of local completion handled
713                          */
714                         atomic_inc(&mad_agent_priv->refcount);
715                 } else
716                         kmem_cache_free(ib_mad_cache, mad_priv);
717                 break;
718         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
719                 kmem_cache_free(ib_mad_cache, mad_priv);
720                 break;
721         case IB_MAD_RESULT_SUCCESS:
722                 /* Treat like an incoming receive MAD */
723                 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
724                                             mad_agent_priv->agent.port_num);
725                 if (port_priv) {
726                         mad_priv->mad.mad.mad_hdr.tid =
727                                 ((struct ib_mad *)smp)->mad_hdr.tid;
728                         recv_mad_agent = find_mad_agent(port_priv,
729                                                         &mad_priv->mad.mad);
730                 }
731                 if (!port_priv || !recv_mad_agent) {
732                         kmem_cache_free(ib_mad_cache, mad_priv);
733                         kfree(local);
734                         ret = 0;
735                         goto out;
736                 }
737                 local->mad_priv = mad_priv;
738                 local->recv_mad_agent = recv_mad_agent;
739                 break;
740         default:
741                 kmem_cache_free(ib_mad_cache, mad_priv);
742                 kfree(local);
743                 ret = -EINVAL;
744                 goto out;
745         }
746
747         local->mad_send_wr = mad_send_wr;
748         /* Reference MAD agent until send side of local completion handled */
749         atomic_inc(&mad_agent_priv->refcount);
750         /* Queue local completion to local list */
751         spin_lock_irqsave(&mad_agent_priv->lock, flags);
752         list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
753         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
754         queue_work(mad_agent_priv->qp_info->port_priv->wq,
755                    &mad_agent_priv->local_work);
756         ret = 1;
757 out:
758         return ret;
759 }
760
761 static int get_buf_length(int hdr_len, int data_len)
762 {
763         int seg_size, pad;
764
765         seg_size = sizeof(struct ib_mad) - hdr_len;
766         if (data_len && seg_size) {
767                 pad = seg_size - data_len % seg_size;
768                 if (pad == seg_size)
769                         pad = 0;
770         } else
771                 pad = seg_size;
772         return hdr_len + data_len + pad;
773 }
774
775 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
776                                             u32 remote_qpn, u16 pkey_index,
777                                             int rmpp_active,
778                                             int hdr_len, int data_len,
779                                             gfp_t gfp_mask)
780 {
781         struct ib_mad_agent_private *mad_agent_priv;
782         struct ib_mad_send_wr_private *mad_send_wr;
783         int buf_size;
784         void *buf;
785
786         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
787                                       agent);
788         buf_size = get_buf_length(hdr_len, data_len);
789
790         if ((!mad_agent->rmpp_version &&
791              (rmpp_active || buf_size > sizeof(struct ib_mad))) ||
792             (!rmpp_active && buf_size > sizeof(struct ib_mad)))
793                 return ERR_PTR(-EINVAL);
794
795         buf = kzalloc(sizeof *mad_send_wr + buf_size, gfp_mask);
796         if (!buf)
797                 return ERR_PTR(-ENOMEM);
798
799         mad_send_wr = buf + buf_size;
800         mad_send_wr->send_buf.mad = buf;
801
802         mad_send_wr->mad_agent_priv = mad_agent_priv;
803         mad_send_wr->sg_list[0].length = buf_size;
804         mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
805
806         mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
807         mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
808         mad_send_wr->send_wr.num_sge = 1;
809         mad_send_wr->send_wr.opcode = IB_WR_SEND;
810         mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
811         mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
812         mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
813         mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
814
815         if (rmpp_active) {
816                 struct ib_rmpp_mad *rmpp_mad = mad_send_wr->send_buf.mad;
817                 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(hdr_len -
818                                                    IB_MGMT_RMPP_HDR + data_len);
819                 rmpp_mad->rmpp_hdr.rmpp_version = mad_agent->rmpp_version;
820                 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
821                 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr,
822                                   IB_MGMT_RMPP_FLAG_ACTIVE);
823         }
824
825         mad_send_wr->send_buf.mad_agent = mad_agent;
826         atomic_inc(&mad_agent_priv->refcount);
827         return &mad_send_wr->send_buf;
828 }
829 EXPORT_SYMBOL(ib_create_send_mad);
830
831 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
832 {
833         struct ib_mad_agent_private *mad_agent_priv;
834
835         mad_agent_priv = container_of(send_buf->mad_agent,
836                                       struct ib_mad_agent_private, agent);
837         kfree(send_buf->mad);
838
839         if (atomic_dec_and_test(&mad_agent_priv->refcount))
840                 wake_up(&mad_agent_priv->wait);
841 }
842 EXPORT_SYMBOL(ib_free_send_mad);
843
844 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
845 {
846         struct ib_mad_qp_info *qp_info;
847         struct list_head *list;
848         struct ib_send_wr *bad_send_wr;
849         struct ib_mad_agent *mad_agent;
850         struct ib_sge *sge;
851         unsigned long flags;
852         int ret;
853
854         /* Set WR ID to find mad_send_wr upon completion */
855         qp_info = mad_send_wr->mad_agent_priv->qp_info;
856         mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
857         mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
858
859         mad_agent = mad_send_wr->send_buf.mad_agent;
860         sge = mad_send_wr->sg_list;
861         sge->addr = dma_map_single(mad_agent->device->dma_device,
862                                    mad_send_wr->send_buf.mad, sge->length,
863                                    DMA_TO_DEVICE);
864         pci_unmap_addr_set(mad_send_wr, mapping, sge->addr);
865
866         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
867         if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
868                 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
869                                    &bad_send_wr);
870                 list = &qp_info->send_queue.list;
871         } else {
872                 ret = 0;
873                 list = &qp_info->overflow_list;
874         }
875
876         if (!ret) {
877                 qp_info->send_queue.count++;
878                 list_add_tail(&mad_send_wr->mad_list.list, list);
879         }
880         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
881         if (ret)
882                 dma_unmap_single(mad_agent->device->dma_device,
883                                  pci_unmap_addr(mad_send_wr, mapping),
884                                  sge->length, DMA_TO_DEVICE);
885
886         return ret;
887 }
888
889 /*
890  * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
891  *  with the registered client
892  */
893 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
894                      struct ib_mad_send_buf **bad_send_buf)
895 {
896         struct ib_mad_agent_private *mad_agent_priv;
897         struct ib_mad_send_buf *next_send_buf;
898         struct ib_mad_send_wr_private *mad_send_wr;
899         unsigned long flags;
900         int ret = -EINVAL;
901
902         /* Walk list of send WRs and post each on send list */
903         for (; send_buf; send_buf = next_send_buf) {
904
905                 mad_send_wr = container_of(send_buf,
906                                            struct ib_mad_send_wr_private,
907                                            send_buf);
908                 mad_agent_priv = mad_send_wr->mad_agent_priv;
909
910                 if (!send_buf->mad_agent->send_handler ||
911                     (send_buf->timeout_ms &&
912                      !send_buf->mad_agent->recv_handler)) {
913                         ret = -EINVAL;
914                         goto error;
915                 }
916
917                 /*
918                  * Save pointer to next work request to post in case the
919                  * current one completes, and the user modifies the work
920                  * request associated with the completion
921                  */
922                 next_send_buf = send_buf->next;
923                 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
924
925                 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
926                     IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
927                         ret = handle_outgoing_dr_smp(mad_agent_priv,
928                                                      mad_send_wr);
929                         if (ret < 0)            /* error */
930                                 goto error;
931                         else if (ret == 1)      /* locally consumed */
932                                 continue;
933                 }
934
935                 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
936                 /* Timeout will be updated after send completes */
937                 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
938                 mad_send_wr->retries = send_buf->retries;
939                 /* Reference for work request to QP + response */
940                 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
941                 mad_send_wr->status = IB_WC_SUCCESS;
942
943                 /* Reference MAD agent until send completes */
944                 atomic_inc(&mad_agent_priv->refcount);
945                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
946                 list_add_tail(&mad_send_wr->agent_list,
947                               &mad_agent_priv->send_list);
948                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
949
950                 if (mad_agent_priv->agent.rmpp_version) {
951                         ret = ib_send_rmpp_mad(mad_send_wr);
952                         if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
953                                 ret = ib_send_mad(mad_send_wr);
954                 } else
955                         ret = ib_send_mad(mad_send_wr);
956                 if (ret < 0) {
957                         /* Fail send request */
958                         spin_lock_irqsave(&mad_agent_priv->lock, flags);
959                         list_del(&mad_send_wr->agent_list);
960                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
961                         atomic_dec(&mad_agent_priv->refcount);
962                         goto error;
963                 }
964         }
965         return 0;
966 error:
967         if (bad_send_buf)
968                 *bad_send_buf = send_buf;
969         return ret;
970 }
971 EXPORT_SYMBOL(ib_post_send_mad);
972
973 /*
974  * ib_free_recv_mad - Returns data buffers used to receive
975  *  a MAD to the access layer
976  */
977 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
978 {
979         struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
980         struct ib_mad_private_header *mad_priv_hdr;
981         struct ib_mad_private *priv;
982         struct list_head free_list;
983
984         INIT_LIST_HEAD(&free_list);
985         list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
986
987         list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
988                                         &free_list, list) {
989                 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
990                                            recv_buf);
991                 mad_priv_hdr = container_of(mad_recv_wc,
992                                             struct ib_mad_private_header,
993                                             recv_wc);
994                 priv = container_of(mad_priv_hdr, struct ib_mad_private,
995                                     header);
996                 kmem_cache_free(ib_mad_cache, priv);
997         }
998 }
999 EXPORT_SYMBOL(ib_free_recv_mad);
1000
1001 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1002                                         u8 rmpp_version,
1003                                         ib_mad_send_handler send_handler,
1004                                         ib_mad_recv_handler recv_handler,
1005                                         void *context)
1006 {
1007         return ERR_PTR(-EINVAL);        /* XXX: for now */
1008 }
1009 EXPORT_SYMBOL(ib_redirect_mad_qp);
1010
1011 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1012                       struct ib_wc *wc)
1013 {
1014         printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1015         return 0;
1016 }
1017 EXPORT_SYMBOL(ib_process_mad_wc);
1018
1019 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1020                          struct ib_mad_reg_req *mad_reg_req)
1021 {
1022         int i;
1023
1024         for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1025              i < IB_MGMT_MAX_METHODS;
1026              i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1027                                1+i)) {
1028                 if ((*method)->agent[i]) {
1029                         printk(KERN_ERR PFX "Method %d already in use\n", i);
1030                         return -EINVAL;
1031                 }
1032         }
1033         return 0;
1034 }
1035
1036 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1037 {
1038         /* Allocate management method table */
1039         *method = kzalloc(sizeof **method, GFP_ATOMIC);
1040         if (!*method) {
1041                 printk(KERN_ERR PFX "No memory for "
1042                        "ib_mad_mgmt_method_table\n");
1043                 return -ENOMEM;
1044         }
1045
1046         return 0;
1047 }
1048
1049 /*
1050  * Check to see if there are any methods still in use
1051  */
1052 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1053 {
1054         int i;
1055
1056         for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1057                 if (method->agent[i])
1058                         return 1;
1059         return 0;
1060 }
1061
1062 /*
1063  * Check to see if there are any method tables for this class still in use
1064  */
1065 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1066 {
1067         int i;
1068
1069         for (i = 0; i < MAX_MGMT_CLASS; i++)
1070                 if (class->method_table[i])
1071                         return 1;
1072         return 0;
1073 }
1074
1075 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1076 {
1077         int i;
1078
1079         for (i = 0; i < MAX_MGMT_OUI; i++)
1080                 if (vendor_class->method_table[i])
1081                         return 1;
1082         return 0;
1083 }
1084
1085 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1086                            char *oui)
1087 {
1088         int i;
1089
1090         for (i = 0; i < MAX_MGMT_OUI; i++)
1091                 /* Is there matching OUI for this vendor class ? */
1092                 if (!memcmp(vendor_class->oui[i], oui, 3))
1093                         return i;
1094
1095         return -1;
1096 }
1097
1098 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1099 {
1100         int i;
1101
1102         for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1103                 if (vendor->vendor_class[i])
1104                         return 1;
1105
1106         return 0;
1107 }
1108
1109 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1110                                      struct ib_mad_agent_private *agent)
1111 {
1112         int i;
1113
1114         /* Remove any methods for this mad agent */
1115         for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1116                 if (method->agent[i] == agent) {
1117                         method->agent[i] = NULL;
1118                 }
1119         }
1120 }
1121
1122 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1123                               struct ib_mad_agent_private *agent_priv,
1124                               u8 mgmt_class)
1125 {
1126         struct ib_mad_port_private *port_priv;
1127         struct ib_mad_mgmt_class_table **class;
1128         struct ib_mad_mgmt_method_table **method;
1129         int i, ret;
1130
1131         port_priv = agent_priv->qp_info->port_priv;
1132         class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1133         if (!*class) {
1134                 /* Allocate management class table for "new" class version */
1135                 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1136                 if (!*class) {
1137                         printk(KERN_ERR PFX "No memory for "
1138                                "ib_mad_mgmt_class_table\n");
1139                         ret = -ENOMEM;
1140                         goto error1;
1141                 }
1142
1143                 /* Allocate method table for this management class */
1144                 method = &(*class)->method_table[mgmt_class];
1145                 if ((ret = allocate_method_table(method)))
1146                         goto error2;
1147         } else {
1148                 method = &(*class)->method_table[mgmt_class];
1149                 if (!*method) {
1150                         /* Allocate method table for this management class */
1151                         if ((ret = allocate_method_table(method)))
1152                                 goto error1;
1153                 }
1154         }
1155
1156         /* Now, make sure methods are not already in use */
1157         if (method_in_use(method, mad_reg_req))
1158                 goto error3;
1159
1160         /* Finally, add in methods being registered */
1161         for (i = find_first_bit(mad_reg_req->method_mask,
1162                                 IB_MGMT_MAX_METHODS);
1163              i < IB_MGMT_MAX_METHODS;
1164              i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1165                                1+i)) {
1166                 (*method)->agent[i] = agent_priv;
1167         }
1168         return 0;
1169
1170 error3:
1171         /* Remove any methods for this mad agent */
1172         remove_methods_mad_agent(*method, agent_priv);
1173         /* Now, check to see if there are any methods in use */
1174         if (!check_method_table(*method)) {
1175                 /* If not, release management method table */
1176                 kfree(*method);
1177                 *method = NULL;
1178         }
1179         ret = -EINVAL;
1180         goto error1;
1181 error2:
1182         kfree(*class);
1183         *class = NULL;
1184 error1:
1185         return ret;
1186 }
1187
1188 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1189                            struct ib_mad_agent_private *agent_priv)
1190 {
1191         struct ib_mad_port_private *port_priv;
1192         struct ib_mad_mgmt_vendor_class_table **vendor_table;
1193         struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1194         struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1195         struct ib_mad_mgmt_method_table **method;
1196         int i, ret = -ENOMEM;
1197         u8 vclass;
1198
1199         /* "New" vendor (with OUI) class */
1200         vclass = vendor_class_index(mad_reg_req->mgmt_class);
1201         port_priv = agent_priv->qp_info->port_priv;
1202         vendor_table = &port_priv->version[
1203                                 mad_reg_req->mgmt_class_version].vendor;
1204         if (!*vendor_table) {
1205                 /* Allocate mgmt vendor class table for "new" class version */
1206                 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1207                 if (!vendor) {
1208                         printk(KERN_ERR PFX "No memory for "
1209                                "ib_mad_mgmt_vendor_class_table\n");
1210                         goto error1;
1211                 }
1212
1213                 *vendor_table = vendor;
1214         }
1215         if (!(*vendor_table)->vendor_class[vclass]) {
1216                 /* Allocate table for this management vendor class */
1217                 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1218                 if (!vendor_class) {
1219                         printk(KERN_ERR PFX "No memory for "
1220                                "ib_mad_mgmt_vendor_class\n");
1221                         goto error2;
1222                 }
1223
1224                 (*vendor_table)->vendor_class[vclass] = vendor_class;
1225         }
1226         for (i = 0; i < MAX_MGMT_OUI; i++) {
1227                 /* Is there matching OUI for this vendor class ? */
1228                 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1229                             mad_reg_req->oui, 3)) {
1230                         method = &(*vendor_table)->vendor_class[
1231                                                 vclass]->method_table[i];
1232                         BUG_ON(!*method);
1233                         goto check_in_use;
1234                 }
1235         }
1236         for (i = 0; i < MAX_MGMT_OUI; i++) {
1237                 /* OUI slot available ? */
1238                 if (!is_vendor_oui((*vendor_table)->vendor_class[
1239                                 vclass]->oui[i])) {
1240                         method = &(*vendor_table)->vendor_class[
1241                                 vclass]->method_table[i];
1242                         BUG_ON(*method);
1243                         /* Allocate method table for this OUI */
1244                         if ((ret = allocate_method_table(method)))
1245                                 goto error3;
1246                         memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1247                                mad_reg_req->oui, 3);
1248                         goto check_in_use;
1249                 }
1250         }
1251         printk(KERN_ERR PFX "All OUI slots in use\n");
1252         goto error3;
1253
1254 check_in_use:
1255         /* Now, make sure methods are not already in use */
1256         if (method_in_use(method, mad_reg_req))
1257                 goto error4;
1258
1259         /* Finally, add in methods being registered */
1260         for (i = find_first_bit(mad_reg_req->method_mask,
1261                                 IB_MGMT_MAX_METHODS);
1262              i < IB_MGMT_MAX_METHODS;
1263              i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1264                                1+i)) {
1265                 (*method)->agent[i] = agent_priv;
1266         }
1267         return 0;
1268
1269 error4:
1270         /* Remove any methods for this mad agent */
1271         remove_methods_mad_agent(*method, agent_priv);
1272         /* Now, check to see if there are any methods in use */
1273         if (!check_method_table(*method)) {
1274                 /* If not, release management method table */
1275                 kfree(*method);
1276                 *method = NULL;
1277         }
1278         ret = -EINVAL;
1279 error3:
1280         if (vendor_class) {
1281                 (*vendor_table)->vendor_class[vclass] = NULL;
1282                 kfree(vendor_class);
1283         }
1284 error2:
1285         if (vendor) {
1286                 *vendor_table = NULL;
1287                 kfree(vendor);
1288         }
1289 error1:
1290         return ret;
1291 }
1292
1293 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1294 {
1295         struct ib_mad_port_private *port_priv;
1296         struct ib_mad_mgmt_class_table *class;
1297         struct ib_mad_mgmt_method_table *method;
1298         struct ib_mad_mgmt_vendor_class_table *vendor;
1299         struct ib_mad_mgmt_vendor_class *vendor_class;
1300         int index;
1301         u8 mgmt_class;
1302
1303         /*
1304          * Was MAD registration request supplied
1305          * with original registration ?
1306          */
1307         if (!agent_priv->reg_req) {
1308                 goto out;
1309         }
1310
1311         port_priv = agent_priv->qp_info->port_priv;
1312         mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1313         class = port_priv->version[
1314                         agent_priv->reg_req->mgmt_class_version].class;
1315         if (!class)
1316                 goto vendor_check;
1317
1318         method = class->method_table[mgmt_class];
1319         if (method) {
1320                 /* Remove any methods for this mad agent */
1321                 remove_methods_mad_agent(method, agent_priv);
1322                 /* Now, check to see if there are any methods still in use */
1323                 if (!check_method_table(method)) {
1324                         /* If not, release management method table */
1325                          kfree(method);
1326                          class->method_table[mgmt_class] = NULL;
1327                          /* Any management classes left ? */
1328                         if (!check_class_table(class)) {
1329                                 /* If not, release management class table */
1330                                 kfree(class);
1331                                 port_priv->version[
1332                                         agent_priv->reg_req->
1333                                         mgmt_class_version].class = NULL;
1334                         }
1335                 }
1336         }
1337
1338 vendor_check:
1339         if (!is_vendor_class(mgmt_class))
1340                 goto out;
1341
1342         /* normalize mgmt_class to vendor range 2 */
1343         mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1344         vendor = port_priv->version[
1345                         agent_priv->reg_req->mgmt_class_version].vendor;
1346
1347         if (!vendor)
1348                 goto out;
1349
1350         vendor_class = vendor->vendor_class[mgmt_class];
1351         if (vendor_class) {
1352                 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1353                 if (index < 0)
1354                         goto out;
1355                 method = vendor_class->method_table[index];
1356                 if (method) {
1357                         /* Remove any methods for this mad agent */
1358                         remove_methods_mad_agent(method, agent_priv);
1359                         /*
1360                          * Now, check to see if there are
1361                          * any methods still in use
1362                          */
1363                         if (!check_method_table(method)) {
1364                                 /* If not, release management method table */
1365                                 kfree(method);
1366                                 vendor_class->method_table[index] = NULL;
1367                                 memset(vendor_class->oui[index], 0, 3);
1368                                 /* Any OUIs left ? */
1369                                 if (!check_vendor_class(vendor_class)) {
1370                                         /* If not, release vendor class table */
1371                                         kfree(vendor_class);
1372                                         vendor->vendor_class[mgmt_class] = NULL;
1373                                         /* Any other vendor classes left ? */
1374                                         if (!check_vendor_table(vendor)) {
1375                                                 kfree(vendor);
1376                                                 port_priv->version[
1377                                                         agent_priv->reg_req->
1378                                                         mgmt_class_version].
1379                                                         vendor = NULL;
1380                                         }
1381                                 }
1382                         }
1383                 }
1384         }
1385
1386 out:
1387         return;
1388 }
1389
1390 static struct ib_mad_agent_private *
1391 find_mad_agent(struct ib_mad_port_private *port_priv,
1392                struct ib_mad *mad)
1393 {
1394         struct ib_mad_agent_private *mad_agent = NULL;
1395         unsigned long flags;
1396
1397         spin_lock_irqsave(&port_priv->reg_lock, flags);
1398         if (response_mad(mad)) {
1399                 u32 hi_tid;
1400                 struct ib_mad_agent_private *entry;
1401
1402                 /*
1403                  * Routing is based on high 32 bits of transaction ID
1404                  * of MAD.
1405                  */
1406                 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1407                 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1408                         if (entry->agent.hi_tid == hi_tid) {
1409                                 mad_agent = entry;
1410                                 break;
1411                         }
1412                 }
1413         } else {
1414                 struct ib_mad_mgmt_class_table *class;
1415                 struct ib_mad_mgmt_method_table *method;
1416                 struct ib_mad_mgmt_vendor_class_table *vendor;
1417                 struct ib_mad_mgmt_vendor_class *vendor_class;
1418                 struct ib_vendor_mad *vendor_mad;
1419                 int index;
1420
1421                 /*
1422                  * Routing is based on version, class, and method
1423                  * For "newer" vendor MADs, also based on OUI
1424                  */
1425                 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1426                         goto out;
1427                 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1428                         class = port_priv->version[
1429                                         mad->mad_hdr.class_version].class;
1430                         if (!class)
1431                                 goto out;
1432                         method = class->method_table[convert_mgmt_class(
1433                                                         mad->mad_hdr.mgmt_class)];
1434                         if (method)
1435                                 mad_agent = method->agent[mad->mad_hdr.method &
1436                                                           ~IB_MGMT_METHOD_RESP];
1437                 } else {
1438                         vendor = port_priv->version[
1439                                         mad->mad_hdr.class_version].vendor;
1440                         if (!vendor)
1441                                 goto out;
1442                         vendor_class = vendor->vendor_class[vendor_class_index(
1443                                                 mad->mad_hdr.mgmt_class)];
1444                         if (!vendor_class)
1445                                 goto out;
1446                         /* Find matching OUI */
1447                         vendor_mad = (struct ib_vendor_mad *)mad;
1448                         index = find_vendor_oui(vendor_class, vendor_mad->oui);
1449                         if (index == -1)
1450                                 goto out;
1451                         method = vendor_class->method_table[index];
1452                         if (method) {
1453                                 mad_agent = method->agent[mad->mad_hdr.method &
1454                                                           ~IB_MGMT_METHOD_RESP];
1455                         }
1456                 }
1457         }
1458
1459         if (mad_agent) {
1460                 if (mad_agent->agent.recv_handler)
1461                         atomic_inc(&mad_agent->refcount);
1462                 else {
1463                         printk(KERN_NOTICE PFX "No receive handler for client "
1464                                "%p on port %d\n",
1465                                &mad_agent->agent, port_priv->port_num);
1466                         mad_agent = NULL;
1467                 }
1468         }
1469 out:
1470         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1471
1472         return mad_agent;
1473 }
1474
1475 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1476 {
1477         int valid = 0;
1478
1479         /* Make sure MAD base version is understood */
1480         if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1481                 printk(KERN_ERR PFX "MAD received with unsupported base "
1482                        "version %d\n", mad->mad_hdr.base_version);
1483                 goto out;
1484         }
1485
1486         /* Filter SMI packets sent to other than QP0 */
1487         if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1488             (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1489                 if (qp_num == 0)
1490                         valid = 1;
1491         } else {
1492                 /* Filter GSI packets sent to QP0 */
1493                 if (qp_num != 0)
1494                         valid = 1;
1495         }
1496
1497 out:
1498         return valid;
1499 }
1500
1501 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1502                        struct ib_mad_hdr *mad_hdr)
1503 {
1504         struct ib_rmpp_mad *rmpp_mad;
1505
1506         rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1507         return !mad_agent_priv->agent.rmpp_version ||
1508                 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1509                                     IB_MGMT_RMPP_FLAG_ACTIVE) ||
1510                 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1511 }
1512
1513 struct ib_mad_send_wr_private*
1514 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid)
1515 {
1516         struct ib_mad_send_wr_private *mad_send_wr;
1517
1518         list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
1519                             agent_list) {
1520                 if (mad_send_wr->tid == tid)
1521                         return mad_send_wr;
1522         }
1523
1524         /*
1525          * It's possible to receive the response before we've
1526          * been notified that the send has completed
1527          */
1528         list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1529                             agent_list) {
1530                 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
1531                     mad_send_wr->tid == tid && mad_send_wr->timeout) {
1532                         /* Verify request has not been canceled */
1533                         return (mad_send_wr->status == IB_WC_SUCCESS) ?
1534                                 mad_send_wr : NULL;
1535                 }
1536         }
1537         return NULL;
1538 }
1539
1540 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1541 {
1542         mad_send_wr->timeout = 0;
1543         if (mad_send_wr->refcount == 1) {
1544                 list_del(&mad_send_wr->agent_list);
1545                 list_add_tail(&mad_send_wr->agent_list,
1546                               &mad_send_wr->mad_agent_priv->done_list);
1547         }
1548 }
1549
1550 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1551                                  struct ib_mad_recv_wc *mad_recv_wc)
1552 {
1553         struct ib_mad_send_wr_private *mad_send_wr;
1554         struct ib_mad_send_wc mad_send_wc;
1555         unsigned long flags;
1556         __be64 tid;
1557
1558         INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1559         list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1560         if (mad_agent_priv->agent.rmpp_version) {
1561                 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1562                                                       mad_recv_wc);
1563                 if (!mad_recv_wc) {
1564                         if (atomic_dec_and_test(&mad_agent_priv->refcount))
1565                                 wake_up(&mad_agent_priv->wait);
1566                         return;
1567                 }
1568         }
1569
1570         /* Complete corresponding request */
1571         if (response_mad(mad_recv_wc->recv_buf.mad)) {
1572                 tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
1573                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1574                 mad_send_wr = ib_find_send_mad(mad_agent_priv, tid);
1575                 if (!mad_send_wr) {
1576                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1577                         ib_free_recv_mad(mad_recv_wc);
1578                         if (atomic_dec_and_test(&mad_agent_priv->refcount))
1579                                 wake_up(&mad_agent_priv->wait);
1580                         return;
1581                 }
1582                 ib_mark_mad_done(mad_send_wr);
1583                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1584
1585                 /* Defined behavior is to complete response before request */
1586                 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1587                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1588                                                    mad_recv_wc);
1589                 atomic_dec(&mad_agent_priv->refcount);
1590
1591                 mad_send_wc.status = IB_WC_SUCCESS;
1592                 mad_send_wc.vendor_err = 0;
1593                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1594                 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1595         } else {
1596                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1597                                                    mad_recv_wc);
1598                 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1599                         wake_up(&mad_agent_priv->wait);
1600         }
1601 }
1602
1603 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1604                                      struct ib_wc *wc)
1605 {
1606         struct ib_mad_qp_info *qp_info;
1607         struct ib_mad_private_header *mad_priv_hdr;
1608         struct ib_mad_private *recv, *response;
1609         struct ib_mad_list_head *mad_list;
1610         struct ib_mad_agent_private *mad_agent;
1611
1612         response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1613         if (!response)
1614                 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1615                        "for response buffer\n");
1616
1617         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1618         qp_info = mad_list->mad_queue->qp_info;
1619         dequeue_mad(mad_list);
1620
1621         mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1622                                     mad_list);
1623         recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1624         dma_unmap_single(port_priv->device->dma_device,
1625                          pci_unmap_addr(&recv->header, mapping),
1626                          sizeof(struct ib_mad_private) -
1627                          sizeof(struct ib_mad_private_header),
1628                          DMA_FROM_DEVICE);
1629
1630         /* Setup MAD receive work completion from "normal" work completion */
1631         recv->header.wc = *wc;
1632         recv->header.recv_wc.wc = &recv->header.wc;
1633         recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1634         recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1635         recv->header.recv_wc.recv_buf.grh = &recv->grh;
1636
1637         if (atomic_read(&qp_info->snoop_count))
1638                 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1639
1640         /* Validate MAD */
1641         if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1642                 goto out;
1643
1644         if (recv->mad.mad.mad_hdr.mgmt_class ==
1645             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1646                 if (!smi_handle_dr_smp_recv(&recv->mad.smp,
1647                                             port_priv->device->node_type,
1648                                             port_priv->port_num,
1649                                             port_priv->device->phys_port_cnt))
1650                         goto out;
1651                 if (!smi_check_forward_dr_smp(&recv->mad.smp))
1652                         goto local;
1653                 if (!smi_handle_dr_smp_send(&recv->mad.smp,
1654                                             port_priv->device->node_type,
1655                                             port_priv->port_num))
1656                         goto out;
1657                 if (!smi_check_local_dr_smp(&recv->mad.smp,
1658                                             port_priv->device,
1659                                             port_priv->port_num))
1660                         goto out;
1661         }
1662
1663 local:
1664         /* Give driver "right of first refusal" on incoming MAD */
1665         if (port_priv->device->process_mad) {
1666                 int ret;
1667
1668                 if (!response) {
1669                         printk(KERN_ERR PFX "No memory for response MAD\n");
1670                         /*
1671                          * Is it better to assume that
1672                          * it wouldn't be processed ?
1673                          */
1674                         goto out;
1675                 }
1676
1677                 ret = port_priv->device->process_mad(port_priv->device, 0,
1678                                                      port_priv->port_num,
1679                                                      wc, &recv->grh,
1680                                                      &recv->mad.mad,
1681                                                      &response->mad.mad);
1682                 if (ret & IB_MAD_RESULT_SUCCESS) {
1683                         if (ret & IB_MAD_RESULT_CONSUMED)
1684                                 goto out;
1685                         if (ret & IB_MAD_RESULT_REPLY) {
1686                                 agent_send_response(&response->mad.mad,
1687                                                     &recv->grh, wc,
1688                                                     port_priv->device,
1689                                                     port_priv->port_num,
1690                                                     qp_info->qp->qp_num);
1691                                 goto out;
1692                         }
1693                 }
1694         }
1695
1696         mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1697         if (mad_agent) {
1698                 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1699                 /*
1700                  * recv is freed up in error cases in ib_mad_complete_recv
1701                  * or via recv_handler in ib_mad_complete_recv()
1702                  */
1703                 recv = NULL;
1704         }
1705
1706 out:
1707         /* Post another receive request for this QP */
1708         if (response) {
1709                 ib_mad_post_receive_mads(qp_info, response);
1710                 if (recv)
1711                         kmem_cache_free(ib_mad_cache, recv);
1712         } else
1713                 ib_mad_post_receive_mads(qp_info, recv);
1714 }
1715
1716 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1717 {
1718         struct ib_mad_send_wr_private *mad_send_wr;
1719         unsigned long delay;
1720
1721         if (list_empty(&mad_agent_priv->wait_list)) {
1722                 cancel_delayed_work(&mad_agent_priv->timed_work);
1723         } else {
1724                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1725                                          struct ib_mad_send_wr_private,
1726                                          agent_list);
1727
1728                 if (time_after(mad_agent_priv->timeout,
1729                                mad_send_wr->timeout)) {
1730                         mad_agent_priv->timeout = mad_send_wr->timeout;
1731                         cancel_delayed_work(&mad_agent_priv->timed_work);
1732                         delay = mad_send_wr->timeout - jiffies;
1733                         if ((long)delay <= 0)
1734                                 delay = 1;
1735                         queue_delayed_work(mad_agent_priv->qp_info->
1736                                            port_priv->wq,
1737                                            &mad_agent_priv->timed_work, delay);
1738                 }
1739         }
1740 }
1741
1742 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1743 {
1744         struct ib_mad_agent_private *mad_agent_priv;
1745         struct ib_mad_send_wr_private *temp_mad_send_wr;
1746         struct list_head *list_item;
1747         unsigned long delay;
1748
1749         mad_agent_priv = mad_send_wr->mad_agent_priv;
1750         list_del(&mad_send_wr->agent_list);
1751
1752         delay = mad_send_wr->timeout;
1753         mad_send_wr->timeout += jiffies;
1754
1755         if (delay) {
1756                 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
1757                         temp_mad_send_wr = list_entry(list_item,
1758                                                 struct ib_mad_send_wr_private,
1759                                                 agent_list);
1760                         if (time_after(mad_send_wr->timeout,
1761                                        temp_mad_send_wr->timeout))
1762                                 break;
1763                 }
1764         }
1765         else
1766                 list_item = &mad_agent_priv->wait_list;
1767         list_add(&mad_send_wr->agent_list, list_item);
1768
1769         /* Reschedule a work item if we have a shorter timeout */
1770         if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
1771                 cancel_delayed_work(&mad_agent_priv->timed_work);
1772                 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
1773                                    &mad_agent_priv->timed_work, delay);
1774         }
1775 }
1776
1777 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
1778                           int timeout_ms)
1779 {
1780         mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
1781         wait_for_response(mad_send_wr);
1782 }
1783
1784 /*
1785  * Process a send work completion
1786  */
1787 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1788                              struct ib_mad_send_wc *mad_send_wc)
1789 {
1790         struct ib_mad_agent_private     *mad_agent_priv;
1791         unsigned long                   flags;
1792         int                             ret;
1793
1794         mad_agent_priv = mad_send_wr->mad_agent_priv;
1795         spin_lock_irqsave(&mad_agent_priv->lock, flags);
1796         if (mad_agent_priv->agent.rmpp_version) {
1797                 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
1798                 if (ret == IB_RMPP_RESULT_CONSUMED)
1799                         goto done;
1800         } else
1801                 ret = IB_RMPP_RESULT_UNHANDLED;
1802
1803         if (mad_send_wc->status != IB_WC_SUCCESS &&
1804             mad_send_wr->status == IB_WC_SUCCESS) {
1805                 mad_send_wr->status = mad_send_wc->status;
1806                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1807         }
1808
1809         if (--mad_send_wr->refcount > 0) {
1810                 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
1811                     mad_send_wr->status == IB_WC_SUCCESS) {
1812                         wait_for_response(mad_send_wr);
1813                 }
1814                 goto done;
1815         }
1816
1817         /* Remove send from MAD agent and notify client of completion */
1818         list_del(&mad_send_wr->agent_list);
1819         adjust_timeout(mad_agent_priv);
1820         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1821
1822         if (mad_send_wr->status != IB_WC_SUCCESS )
1823                 mad_send_wc->status = mad_send_wr->status;
1824         if (ret == IB_RMPP_RESULT_INTERNAL)
1825                 ib_rmpp_send_handler(mad_send_wc);
1826         else
1827                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
1828                                                    mad_send_wc);
1829
1830         /* Release reference on agent taken when sending */
1831         if (atomic_dec_and_test(&mad_agent_priv->refcount))
1832                 wake_up(&mad_agent_priv->wait);
1833         return;
1834 done:
1835         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1836 }
1837
1838 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
1839                                      struct ib_wc *wc)
1840 {
1841         struct ib_mad_send_wr_private   *mad_send_wr, *queued_send_wr;
1842         struct ib_mad_list_head         *mad_list;
1843         struct ib_mad_qp_info           *qp_info;
1844         struct ib_mad_queue             *send_queue;
1845         struct ib_send_wr               *bad_send_wr;
1846         struct ib_mad_send_wc           mad_send_wc;
1847         unsigned long flags;
1848         int ret;
1849
1850         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1851         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1852                                    mad_list);
1853         send_queue = mad_list->mad_queue;
1854         qp_info = send_queue->qp_info;
1855
1856 retry:
1857         dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
1858                          pci_unmap_addr(mad_send_wr, mapping),
1859                          mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
1860         queued_send_wr = NULL;
1861         spin_lock_irqsave(&send_queue->lock, flags);
1862         list_del(&mad_list->list);
1863
1864         /* Move queued send to the send queue */
1865         if (send_queue->count-- > send_queue->max_active) {
1866                 mad_list = container_of(qp_info->overflow_list.next,
1867                                         struct ib_mad_list_head, list);
1868                 queued_send_wr = container_of(mad_list,
1869                                         struct ib_mad_send_wr_private,
1870                                         mad_list);
1871                 list_del(&mad_list->list);
1872                 list_add_tail(&mad_list->list, &send_queue->list);
1873         }
1874         spin_unlock_irqrestore(&send_queue->lock, flags);
1875
1876         mad_send_wc.send_buf = &mad_send_wr->send_buf;
1877         mad_send_wc.status = wc->status;
1878         mad_send_wc.vendor_err = wc->vendor_err;
1879         if (atomic_read(&qp_info->snoop_count))
1880                 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
1881                            IB_MAD_SNOOP_SEND_COMPLETIONS);
1882         ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1883
1884         if (queued_send_wr) {
1885                 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
1886                                    &bad_send_wr);
1887                 if (ret) {
1888                         printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
1889                         mad_send_wr = queued_send_wr;
1890                         wc->status = IB_WC_LOC_QP_OP_ERR;
1891                         goto retry;
1892                 }
1893         }
1894 }
1895
1896 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
1897 {
1898         struct ib_mad_send_wr_private *mad_send_wr;
1899         struct ib_mad_list_head *mad_list;
1900         unsigned long flags;
1901
1902         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1903         list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
1904                 mad_send_wr = container_of(mad_list,
1905                                            struct ib_mad_send_wr_private,
1906                                            mad_list);
1907                 mad_send_wr->retry = 1;
1908         }
1909         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1910 }
1911
1912 static void mad_error_handler(struct ib_mad_port_private *port_priv,
1913                               struct ib_wc *wc)
1914 {
1915         struct ib_mad_list_head *mad_list;
1916         struct ib_mad_qp_info *qp_info;
1917         struct ib_mad_send_wr_private *mad_send_wr;
1918         int ret;
1919
1920         /* Determine if failure was a send or receive */
1921         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1922         qp_info = mad_list->mad_queue->qp_info;
1923         if (mad_list->mad_queue == &qp_info->recv_queue)
1924                 /*
1925                  * Receive errors indicate that the QP has entered the error
1926                  * state - error handling/shutdown code will cleanup
1927                  */
1928                 return;
1929
1930         /*
1931          * Send errors will transition the QP to SQE - move
1932          * QP to RTS and repost flushed work requests
1933          */
1934         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1935                                    mad_list);
1936         if (wc->status == IB_WC_WR_FLUSH_ERR) {
1937                 if (mad_send_wr->retry) {
1938                         /* Repost send */
1939                         struct ib_send_wr *bad_send_wr;
1940
1941                         mad_send_wr->retry = 0;
1942                         ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
1943                                         &bad_send_wr);
1944                         if (ret)
1945                                 ib_mad_send_done_handler(port_priv, wc);
1946                 } else
1947                         ib_mad_send_done_handler(port_priv, wc);
1948         } else {
1949                 struct ib_qp_attr *attr;
1950
1951                 /* Transition QP to RTS and fail offending send */
1952                 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1953                 if (attr) {
1954                         attr->qp_state = IB_QPS_RTS;
1955                         attr->cur_qp_state = IB_QPS_SQE;
1956                         ret = ib_modify_qp(qp_info->qp, attr,
1957                                            IB_QP_STATE | IB_QP_CUR_STATE);
1958                         kfree(attr);
1959                         if (ret)
1960                                 printk(KERN_ERR PFX "mad_error_handler - "
1961                                        "ib_modify_qp to RTS : %d\n", ret);
1962                         else
1963                                 mark_sends_for_retry(qp_info);
1964                 }
1965                 ib_mad_send_done_handler(port_priv, wc);
1966         }
1967 }
1968
1969 /*
1970  * IB MAD completion callback
1971  */
1972 static void ib_mad_completion_handler(void *data)
1973 {
1974         struct ib_mad_port_private *port_priv;
1975         struct ib_wc wc;
1976
1977         port_priv = (struct ib_mad_port_private *)data;
1978         ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
1979
1980         while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
1981                 if (wc.status == IB_WC_SUCCESS) {
1982                         switch (wc.opcode) {
1983                         case IB_WC_SEND:
1984                                 ib_mad_send_done_handler(port_priv, &wc);
1985                                 break;
1986                         case IB_WC_RECV:
1987                                 ib_mad_recv_done_handler(port_priv, &wc);
1988                                 break;
1989                         default:
1990                                 BUG_ON(1);
1991                                 break;
1992                         }
1993                 } else
1994                         mad_error_handler(port_priv, &wc);
1995         }
1996 }
1997
1998 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
1999 {
2000         unsigned long flags;
2001         struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2002         struct ib_mad_send_wc mad_send_wc;
2003         struct list_head cancel_list;
2004
2005         INIT_LIST_HEAD(&cancel_list);
2006
2007         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2008         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2009                                  &mad_agent_priv->send_list, agent_list) {
2010                 if (mad_send_wr->status == IB_WC_SUCCESS) {
2011                         mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2012                         mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2013                 }
2014         }
2015
2016         /* Empty wait list to prevent receives from finding a request */
2017         list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2018         /* Empty local completion list as well */
2019         list_splice_init(&mad_agent_priv->local_list, &cancel_list);
2020         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2021
2022         /* Report all cancelled requests */
2023         mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2024         mad_send_wc.vendor_err = 0;
2025
2026         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2027                                  &cancel_list, agent_list) {
2028                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2029                 list_del(&mad_send_wr->agent_list);
2030                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2031                                                    &mad_send_wc);
2032                 atomic_dec(&mad_agent_priv->refcount);
2033         }
2034 }
2035
2036 static struct ib_mad_send_wr_private*
2037 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2038              struct ib_mad_send_buf *send_buf)
2039 {
2040         struct ib_mad_send_wr_private *mad_send_wr;
2041
2042         list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2043                             agent_list) {
2044                 if (&mad_send_wr->send_buf == send_buf)
2045                         return mad_send_wr;
2046         }
2047
2048         list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2049                             agent_list) {
2050                 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2051                     &mad_send_wr->send_buf == send_buf)
2052                         return mad_send_wr;
2053         }
2054         return NULL;
2055 }
2056
2057 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2058                   struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2059 {
2060         struct ib_mad_agent_private *mad_agent_priv;
2061         struct ib_mad_send_wr_private *mad_send_wr;
2062         unsigned long flags;
2063         int active;
2064
2065         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2066                                       agent);
2067         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2068         mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2069         if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2070                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2071                 return -EINVAL;
2072         }
2073
2074         active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2075         if (!timeout_ms) {
2076                 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2077                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2078         }
2079
2080         mad_send_wr->send_buf.timeout_ms = timeout_ms;
2081         if (active)
2082                 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2083         else
2084                 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2085
2086         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2087         return 0;
2088 }
2089 EXPORT_SYMBOL(ib_modify_mad);
2090
2091 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2092                    struct ib_mad_send_buf *send_buf)
2093 {
2094         ib_modify_mad(mad_agent, send_buf, 0);
2095 }
2096 EXPORT_SYMBOL(ib_cancel_mad);
2097
2098 static void local_completions(void *data)
2099 {
2100         struct ib_mad_agent_private *mad_agent_priv;
2101         struct ib_mad_local_private *local;
2102         struct ib_mad_agent_private *recv_mad_agent;
2103         unsigned long flags;
2104         int recv = 0;
2105         struct ib_wc wc;
2106         struct ib_mad_send_wc mad_send_wc;
2107
2108         mad_agent_priv = (struct ib_mad_agent_private *)data;
2109
2110         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2111         while (!list_empty(&mad_agent_priv->local_list)) {
2112                 local = list_entry(mad_agent_priv->local_list.next,
2113                                    struct ib_mad_local_private,
2114                                    completion_list);
2115                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2116                 if (local->mad_priv) {
2117                         recv_mad_agent = local->recv_mad_agent;
2118                         if (!recv_mad_agent) {
2119                                 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2120                                 goto local_send_completion;
2121                         }
2122
2123                         recv = 1;
2124                         /*
2125                          * Defined behavior is to complete response
2126                          * before request
2127                          */
2128                         build_smp_wc((unsigned long) local->mad_send_wr,
2129                                      be16_to_cpu(IB_LID_PERMISSIVE),
2130                                      0, recv_mad_agent->agent.port_num, &wc);
2131
2132                         local->mad_priv->header.recv_wc.wc = &wc;
2133                         local->mad_priv->header.recv_wc.mad_len =
2134                                                 sizeof(struct ib_mad);
2135                         INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2136                         list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2137                                  &local->mad_priv->header.recv_wc.rmpp_list);
2138                         local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2139                         local->mad_priv->header.recv_wc.recv_buf.mad =
2140                                                 &local->mad_priv->mad.mad;
2141                         if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2142                                 snoop_recv(recv_mad_agent->qp_info,
2143                                           &local->mad_priv->header.recv_wc,
2144                                            IB_MAD_SNOOP_RECVS);
2145                         recv_mad_agent->agent.recv_handler(
2146                                                 &recv_mad_agent->agent,
2147                                                 &local->mad_priv->header.recv_wc);
2148                         spin_lock_irqsave(&recv_mad_agent->lock, flags);
2149                         atomic_dec(&recv_mad_agent->refcount);
2150                         spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2151                 }
2152
2153 local_send_completion:
2154                 /* Complete send */
2155                 mad_send_wc.status = IB_WC_SUCCESS;
2156                 mad_send_wc.vendor_err = 0;
2157                 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2158                 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2159                         snoop_send(mad_agent_priv->qp_info,
2160                                    &local->mad_send_wr->send_buf,
2161                                    &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2162                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2163                                                    &mad_send_wc);
2164
2165                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2166                 list_del(&local->completion_list);
2167                 atomic_dec(&mad_agent_priv->refcount);
2168                 if (!recv)
2169                         kmem_cache_free(ib_mad_cache, local->mad_priv);
2170                 kfree(local);
2171         }
2172         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2173 }
2174
2175 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2176 {
2177         int ret;
2178
2179         if (!mad_send_wr->retries--)
2180                 return -ETIMEDOUT;
2181
2182         mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2183
2184         if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2185                 ret = ib_retry_rmpp(mad_send_wr);
2186                 switch (ret) {
2187                 case IB_RMPP_RESULT_UNHANDLED:
2188                         ret = ib_send_mad(mad_send_wr);
2189                         break;
2190                 case IB_RMPP_RESULT_CONSUMED:
2191                         ret = 0;
2192                         break;
2193                 default:
2194                         ret = -ECOMM;
2195                         break;
2196                 }
2197         } else
2198                 ret = ib_send_mad(mad_send_wr);
2199
2200         if (!ret) {
2201                 mad_send_wr->refcount++;
2202                 list_add_tail(&mad_send_wr->agent_list,
2203                               &mad_send_wr->mad_agent_priv->send_list);
2204         }
2205         return ret;
2206 }
2207
2208 static void timeout_sends(void *data)
2209 {
2210         struct ib_mad_agent_private *mad_agent_priv;
2211         struct ib_mad_send_wr_private *mad_send_wr;
2212         struct ib_mad_send_wc mad_send_wc;
2213         unsigned long flags, delay;
2214
2215         mad_agent_priv = (struct ib_mad_agent_private *)data;
2216         mad_send_wc.vendor_err = 0;
2217
2218         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2219         while (!list_empty(&mad_agent_priv->wait_list)) {
2220                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2221                                          struct ib_mad_send_wr_private,
2222                                          agent_list);
2223
2224                 if (time_after(mad_send_wr->timeout, jiffies)) {
2225                         delay = mad_send_wr->timeout - jiffies;
2226                         if ((long)delay <= 0)
2227                                 delay = 1;
2228                         queue_delayed_work(mad_agent_priv->qp_info->
2229                                            port_priv->wq,
2230                                            &mad_agent_priv->timed_work, delay);
2231                         break;
2232                 }
2233
2234                 list_del(&mad_send_wr->agent_list);
2235                 if (mad_send_wr->status == IB_WC_SUCCESS &&
2236                     !retry_send(mad_send_wr))
2237                         continue;
2238
2239                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2240
2241                 if (mad_send_wr->status == IB_WC_SUCCESS)
2242                         mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2243                 else
2244                         mad_send_wc.status = mad_send_wr->status;
2245                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2246                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2247                                                    &mad_send_wc);
2248
2249                 atomic_dec(&mad_agent_priv->refcount);
2250                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2251         }
2252         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2253 }
2254
2255 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2256 {
2257         struct ib_mad_port_private *port_priv = cq->cq_context;
2258
2259         queue_work(port_priv->wq, &port_priv->work);
2260 }
2261
2262 /*
2263  * Allocate receive MADs and post receive WRs for them
2264  */
2265 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2266                                     struct ib_mad_private *mad)
2267 {
2268         unsigned long flags;
2269         int post, ret;
2270         struct ib_mad_private *mad_priv;
2271         struct ib_sge sg_list;
2272         struct ib_recv_wr recv_wr, *bad_recv_wr;
2273         struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2274
2275         /* Initialize common scatter list fields */
2276         sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2277         sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2278
2279         /* Initialize common receive WR fields */
2280         recv_wr.next = NULL;
2281         recv_wr.sg_list = &sg_list;
2282         recv_wr.num_sge = 1;
2283
2284         do {
2285                 /* Allocate and map receive buffer */
2286                 if (mad) {
2287                         mad_priv = mad;
2288                         mad = NULL;
2289                 } else {
2290                         mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2291                         if (!mad_priv) {
2292                                 printk(KERN_ERR PFX "No memory for receive buffer\n");
2293                                 ret = -ENOMEM;
2294                                 break;
2295                         }
2296                 }
2297                 sg_list.addr = dma_map_single(qp_info->port_priv->
2298                                                 device->dma_device,
2299                                         &mad_priv->grh,
2300                                         sizeof *mad_priv -
2301                                                 sizeof mad_priv->header,
2302                                         DMA_FROM_DEVICE);
2303                 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2304                 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2305                 mad_priv->header.mad_list.mad_queue = recv_queue;
2306
2307                 /* Post receive WR */
2308                 spin_lock_irqsave(&recv_queue->lock, flags);
2309                 post = (++recv_queue->count < recv_queue->max_active);
2310                 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2311                 spin_unlock_irqrestore(&recv_queue->lock, flags);
2312                 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2313                 if (ret) {
2314                         spin_lock_irqsave(&recv_queue->lock, flags);
2315                         list_del(&mad_priv->header.mad_list.list);
2316                         recv_queue->count--;
2317                         spin_unlock_irqrestore(&recv_queue->lock, flags);
2318                         dma_unmap_single(qp_info->port_priv->device->dma_device,
2319                                          pci_unmap_addr(&mad_priv->header,
2320                                                         mapping),
2321                                          sizeof *mad_priv -
2322                                            sizeof mad_priv->header,
2323                                          DMA_FROM_DEVICE);
2324                         kmem_cache_free(ib_mad_cache, mad_priv);
2325                         printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2326                         break;
2327                 }
2328         } while (post);
2329
2330         return ret;
2331 }
2332
2333 /*
2334  * Return all the posted receive MADs
2335  */
2336 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2337 {
2338         struct ib_mad_private_header *mad_priv_hdr;
2339         struct ib_mad_private *recv;
2340         struct ib_mad_list_head *mad_list;
2341
2342         while (!list_empty(&qp_info->recv_queue.list)) {
2343
2344                 mad_list = list_entry(qp_info->recv_queue.list.next,
2345                                       struct ib_mad_list_head, list);
2346                 mad_priv_hdr = container_of(mad_list,
2347                                             struct ib_mad_private_header,
2348                                             mad_list);
2349                 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2350                                     header);
2351
2352                 /* Remove from posted receive MAD list */
2353                 list_del(&mad_list->list);
2354
2355                 dma_unmap_single(qp_info->port_priv->device->dma_device,
2356                                  pci_unmap_addr(&recv->header, mapping),
2357                                  sizeof(struct ib_mad_private) -
2358                                  sizeof(struct ib_mad_private_header),
2359                                  DMA_FROM_DEVICE);
2360                 kmem_cache_free(ib_mad_cache, recv);
2361         }
2362
2363         qp_info->recv_queue.count = 0;
2364 }
2365
2366 /*
2367  * Start the port
2368  */
2369 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2370 {
2371         int ret, i;
2372         struct ib_qp_attr *attr;
2373         struct ib_qp *qp;
2374
2375         attr = kmalloc(sizeof *attr, GFP_KERNEL);
2376         if (!attr) {
2377                 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2378                 return -ENOMEM;
2379         }
2380
2381         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2382                 qp = port_priv->qp_info[i].qp;
2383                 /*
2384                  * PKey index for QP1 is irrelevant but
2385                  * one is needed for the Reset to Init transition
2386                  */
2387                 attr->qp_state = IB_QPS_INIT;
2388                 attr->pkey_index = 0;
2389                 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2390                 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2391                                              IB_QP_PKEY_INDEX | IB_QP_QKEY);
2392                 if (ret) {
2393                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2394                                "INIT: %d\n", i, ret);
2395                         goto out;
2396                 }
2397
2398                 attr->qp_state = IB_QPS_RTR;
2399                 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2400                 if (ret) {
2401                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2402                                "RTR: %d\n", i, ret);
2403                         goto out;
2404                 }
2405
2406                 attr->qp_state = IB_QPS_RTS;
2407                 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2408                 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2409                 if (ret) {
2410                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2411                                "RTS: %d\n", i, ret);
2412                         goto out;
2413                 }
2414         }
2415
2416         ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2417         if (ret) {
2418                 printk(KERN_ERR PFX "Failed to request completion "
2419                        "notification: %d\n", ret);
2420                 goto out;
2421         }
2422
2423         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2424                 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2425                 if (ret) {
2426                         printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2427                         goto out;
2428                 }
2429         }
2430 out:
2431         kfree(attr);
2432         return ret;
2433 }
2434
2435 static void qp_event_handler(struct ib_event *event, void *qp_context)
2436 {
2437         struct ib_mad_qp_info   *qp_info = qp_context;
2438
2439         /* It's worse than that! He's dead, Jim! */
2440         printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2441                 event->event, qp_info->qp->qp_num);
2442 }
2443
2444 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2445                            struct ib_mad_queue *mad_queue)
2446 {
2447         mad_queue->qp_info = qp_info;
2448         mad_queue->count = 0;
2449         spin_lock_init(&mad_queue->lock);
2450         INIT_LIST_HEAD(&mad_queue->list);
2451 }
2452
2453 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2454                         struct ib_mad_qp_info *qp_info)
2455 {
2456         qp_info->port_priv = port_priv;
2457         init_mad_queue(qp_info, &qp_info->send_queue);
2458         init_mad_queue(qp_info, &qp_info->recv_queue);
2459         INIT_LIST_HEAD(&qp_info->overflow_list);
2460         spin_lock_init(&qp_info->snoop_lock);
2461         qp_info->snoop_table = NULL;
2462         qp_info->snoop_table_size = 0;
2463         atomic_set(&qp_info->snoop_count, 0);
2464 }
2465
2466 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2467                          enum ib_qp_type qp_type)
2468 {
2469         struct ib_qp_init_attr  qp_init_attr;
2470         int ret;
2471
2472         memset(&qp_init_attr, 0, sizeof qp_init_attr);
2473         qp_init_attr.send_cq = qp_info->port_priv->cq;
2474         qp_init_attr.recv_cq = qp_info->port_priv->cq;
2475         qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2476         qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2477         qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2478         qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2479         qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2480         qp_init_attr.qp_type = qp_type;
2481         qp_init_attr.port_num = qp_info->port_priv->port_num;
2482         qp_init_attr.qp_context = qp_info;
2483         qp_init_attr.event_handler = qp_event_handler;
2484         qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2485         if (IS_ERR(qp_info->qp)) {
2486                 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2487                        get_spl_qp_index(qp_type));
2488                 ret = PTR_ERR(qp_info->qp);
2489                 goto error;
2490         }
2491         /* Use minimum queue sizes unless the CQ is resized */
2492         qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2493         qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2494         return 0;
2495
2496 error:
2497         return ret;
2498 }
2499
2500 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2501 {
2502         ib_destroy_qp(qp_info->qp);
2503         if (qp_info->snoop_table)
2504                 kfree(qp_info->snoop_table);
2505 }
2506
2507 /*
2508  * Open the port
2509  * Create the QP, PD, MR, and CQ if needed
2510  */
2511 static int ib_mad_port_open(struct ib_device *device,
2512                             int port_num)
2513 {
2514         int ret, cq_size;
2515         struct ib_mad_port_private *port_priv;
2516         unsigned long flags;
2517         char name[sizeof "ib_mad123"];
2518
2519         /* Create new device info */
2520         port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2521         if (!port_priv) {
2522                 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2523                 return -ENOMEM;
2524         }
2525
2526         port_priv->device = device;
2527         port_priv->port_num = port_num;
2528         spin_lock_init(&port_priv->reg_lock);
2529         INIT_LIST_HEAD(&port_priv->agent_list);
2530         init_mad_qp(port_priv, &port_priv->qp_info[0]);
2531         init_mad_qp(port_priv, &port_priv->qp_info[1]);
2532
2533         cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2534         port_priv->cq = ib_create_cq(port_priv->device,
2535                                      ib_mad_thread_completion_handler,
2536                                      NULL, port_priv, cq_size);
2537         if (IS_ERR(port_priv->cq)) {
2538                 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2539                 ret = PTR_ERR(port_priv->cq);
2540                 goto error3;
2541         }
2542
2543         port_priv->pd = ib_alloc_pd(device);
2544         if (IS_ERR(port_priv->pd)) {
2545                 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2546                 ret = PTR_ERR(port_priv->pd);
2547                 goto error4;
2548         }
2549
2550         port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2551         if (IS_ERR(port_priv->mr)) {
2552                 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2553                 ret = PTR_ERR(port_priv->mr);
2554                 goto error5;
2555         }
2556
2557         ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2558         if (ret)
2559                 goto error6;
2560         ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2561         if (ret)
2562                 goto error7;
2563
2564         snprintf(name, sizeof name, "ib_mad%d", port_num);
2565         port_priv->wq = create_singlethread_workqueue(name);
2566         if (!port_priv->wq) {
2567                 ret = -ENOMEM;
2568                 goto error8;
2569         }
2570         INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
2571
2572         ret = ib_mad_port_start(port_priv);
2573         if (ret) {
2574                 printk(KERN_ERR PFX "Couldn't start port\n");
2575                 goto error9;
2576         }
2577
2578         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2579         list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2580         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2581         return 0;
2582
2583 error9:
2584         destroy_workqueue(port_priv->wq);
2585 error8:
2586         destroy_mad_qp(&port_priv->qp_info[1]);
2587 error7:
2588         destroy_mad_qp(&port_priv->qp_info[0]);
2589 error6:
2590         ib_dereg_mr(port_priv->mr);
2591 error5:
2592         ib_dealloc_pd(port_priv->pd);
2593 error4:
2594         ib_destroy_cq(port_priv->cq);
2595         cleanup_recv_queue(&port_priv->qp_info[1]);
2596         cleanup_recv_queue(&port_priv->qp_info[0]);
2597 error3:
2598         kfree(port_priv);
2599
2600         return ret;
2601 }
2602
2603 /*
2604  * Close the port
2605  * If there are no classes using the port, free the port
2606  * resources (CQ, MR, PD, QP) and remove the port's info structure
2607  */
2608 static int ib_mad_port_close(struct ib_device *device, int port_num)
2609 {
2610         struct ib_mad_port_private *port_priv;
2611         unsigned long flags;
2612
2613         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2614         port_priv = __ib_get_mad_port(device, port_num);
2615         if (port_priv == NULL) {
2616                 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2617                 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2618                 return -ENODEV;
2619         }
2620         list_del(&port_priv->port_list);
2621         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2622
2623         /* Stop processing completions. */
2624         flush_workqueue(port_priv->wq);
2625         destroy_workqueue(port_priv->wq);
2626         destroy_mad_qp(&port_priv->qp_info[1]);
2627         destroy_mad_qp(&port_priv->qp_info[0]);
2628         ib_dereg_mr(port_priv->mr);
2629         ib_dealloc_pd(port_priv->pd);
2630         ib_destroy_cq(port_priv->cq);
2631         cleanup_recv_queue(&port_priv->qp_info[1]);
2632         cleanup_recv_queue(&port_priv->qp_info[0]);
2633         /* XXX: Handle deallocation of MAD registration tables */
2634
2635         kfree(port_priv);
2636
2637         return 0;
2638 }
2639
2640 static void ib_mad_init_device(struct ib_device *device)
2641 {
2642         int start, end, i;
2643
2644         if (device->node_type == IB_NODE_SWITCH) {
2645                 start = 0;
2646                 end   = 0;
2647         } else {
2648                 start = 1;
2649                 end   = device->phys_port_cnt;
2650         }
2651
2652         for (i = start; i <= end; i++) {
2653                 if (ib_mad_port_open(device, i)) {
2654                         printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2655                                device->name, i);
2656                         goto error;
2657                 }
2658                 if (ib_agent_port_open(device, i)) {
2659                         printk(KERN_ERR PFX "Couldn't open %s port %d "
2660                                "for agents\n",
2661                                device->name, i);
2662                         goto error_agent;
2663                 }
2664         }
2665         return;
2666
2667 error_agent:
2668         if (ib_mad_port_close(device, i))
2669                 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2670                        device->name, i);
2671
2672 error:
2673         i--;
2674
2675         while (i >= start) {
2676                 if (ib_agent_port_close(device, i))
2677                         printk(KERN_ERR PFX "Couldn't close %s port %d "
2678                                "for agents\n",
2679                                device->name, i);
2680                 if (ib_mad_port_close(device, i))
2681                         printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2682                                device->name, i);
2683                 i--;
2684         }
2685 }
2686
2687 static void ib_mad_remove_device(struct ib_device *device)
2688 {
2689         int i, num_ports, cur_port;
2690
2691         if (device->node_type == IB_NODE_SWITCH) {
2692                 num_ports = 1;
2693                 cur_port = 0;
2694         } else {
2695                 num_ports = device->phys_port_cnt;
2696                 cur_port = 1;
2697         }
2698         for (i = 0; i < num_ports; i++, cur_port++) {
2699                 if (ib_agent_port_close(device, cur_port))
2700                         printk(KERN_ERR PFX "Couldn't close %s port %d "
2701                                "for agents\n",
2702                                device->name, cur_port);
2703                 if (ib_mad_port_close(device, cur_port))
2704                         printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2705                                device->name, cur_port);
2706         }
2707 }
2708
2709 static struct ib_client mad_client = {
2710         .name   = "mad",
2711         .add = ib_mad_init_device,
2712         .remove = ib_mad_remove_device
2713 };
2714
2715 static int __init ib_mad_init_module(void)
2716 {
2717         int ret;
2718
2719         spin_lock_init(&ib_mad_port_list_lock);
2720
2721         ib_mad_cache = kmem_cache_create("ib_mad",
2722                                          sizeof(struct ib_mad_private),
2723                                          0,
2724                                          SLAB_HWCACHE_ALIGN,
2725                                          NULL,
2726                                          NULL);
2727         if (!ib_mad_cache) {
2728                 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
2729                 ret = -ENOMEM;
2730                 goto error1;
2731         }
2732
2733         INIT_LIST_HEAD(&ib_mad_port_list);
2734
2735         if (ib_register_client(&mad_client)) {
2736                 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
2737                 ret = -EINVAL;
2738                 goto error2;
2739         }
2740
2741         return 0;
2742
2743 error2:
2744         kmem_cache_destroy(ib_mad_cache);
2745 error1:
2746         return ret;
2747 }
2748
2749 static void __exit ib_mad_cleanup_module(void)
2750 {
2751         ib_unregister_client(&mad_client);
2752
2753         if (kmem_cache_destroy(ib_mad_cache)) {
2754                 printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n");
2755         }
2756 }
2757
2758 module_init(ib_mad_init_module);
2759 module_exit(ib_mad_cleanup_module);
2760