Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
2a1d9b7f | 3 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. |
cb183a06 | 4 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
1da177e4 LT |
5 | * |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | * | |
cb183a06 | 34 | * $Id: user_mad.c 2814 2005-07-06 19:14:09Z halr $ |
1da177e4 LT |
35 | */ |
36 | ||
37 | #include <linux/module.h> | |
38 | #include <linux/init.h> | |
39 | #include <linux/device.h> | |
40 | #include <linux/err.h> | |
41 | #include <linux/fs.h> | |
42 | #include <linux/cdev.h> | |
43 | #include <linux/pci.h> | |
44 | #include <linux/dma-mapping.h> | |
45 | #include <linux/poll.h> | |
46 | #include <linux/rwsem.h> | |
47 | #include <linux/kref.h> | |
48 | ||
49 | #include <asm/uaccess.h> | |
50 | #include <asm/semaphore.h> | |
51 | ||
52 | #include <ib_mad.h> | |
53 | #include <ib_user_mad.h> | |
54 | ||
55 | MODULE_AUTHOR("Roland Dreier"); | |
56 | MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); | |
57 | MODULE_LICENSE("Dual BSD/GPL"); | |
58 | ||
59 | enum { | |
60 | IB_UMAD_MAX_PORTS = 64, | |
61 | IB_UMAD_MAX_AGENTS = 32, | |
62 | ||
63 | IB_UMAD_MAJOR = 231, | |
64 | IB_UMAD_MINOR_BASE = 0 | |
65 | }; | |
66 | ||
67 | struct ib_umad_port { | |
68 | int devnum; | |
69 | struct cdev dev; | |
70 | struct class_device class_dev; | |
71 | ||
72 | int sm_devnum; | |
73 | struct cdev sm_dev; | |
74 | struct class_device sm_class_dev; | |
75 | struct semaphore sm_sem; | |
76 | ||
77 | struct ib_device *ib_dev; | |
78 | struct ib_umad_device *umad_dev; | |
79 | u8 port_num; | |
80 | }; | |
81 | ||
82 | struct ib_umad_device { | |
83 | int start_port, end_port; | |
84 | struct kref ref; | |
85 | struct ib_umad_port port[0]; | |
86 | }; | |
87 | ||
88 | struct ib_umad_file { | |
89 | struct ib_umad_port *port; | |
90 | spinlock_t recv_lock; | |
91 | struct list_head recv_list; | |
92 | wait_queue_head_t recv_wait; | |
93 | struct rw_semaphore agent_mutex; | |
94 | struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; | |
95 | struct ib_mr *mr[IB_UMAD_MAX_AGENTS]; | |
96 | }; | |
97 | ||
98 | struct ib_umad_packet { | |
1da177e4 | 99 | struct ib_ah *ah; |
cb183a06 | 100 | struct ib_mad_send_buf *msg; |
1da177e4 | 101 | struct list_head list; |
cb183a06 | 102 | int length; |
1da177e4 | 103 | DECLARE_PCI_UNMAP_ADDR(mapping) |
cb183a06 | 104 | struct ib_user_mad mad; |
1da177e4 LT |
105 | }; |
106 | ||
107 | static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); | |
108 | static spinlock_t map_lock; | |
109 | static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS * 2); | |
110 | ||
111 | static void ib_umad_add_one(struct ib_device *device); | |
112 | static void ib_umad_remove_one(struct ib_device *device); | |
113 | ||
114 | static int queue_packet(struct ib_umad_file *file, | |
115 | struct ib_mad_agent *agent, | |
116 | struct ib_umad_packet *packet) | |
117 | { | |
118 | int ret = 1; | |
119 | ||
120 | down_read(&file->agent_mutex); | |
cb183a06 HR |
121 | for (packet->mad.hdr.id = 0; |
122 | packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; | |
123 | packet->mad.hdr.id++) | |
124 | if (agent == file->agent[packet->mad.hdr.id]) { | |
1da177e4 LT |
125 | spin_lock_irq(&file->recv_lock); |
126 | list_add_tail(&packet->list, &file->recv_list); | |
127 | spin_unlock_irq(&file->recv_lock); | |
128 | wake_up_interruptible(&file->recv_wait); | |
129 | ret = 0; | |
130 | break; | |
131 | } | |
132 | ||
133 | up_read(&file->agent_mutex); | |
134 | ||
135 | return ret; | |
136 | } | |
137 | ||
138 | static void send_handler(struct ib_mad_agent *agent, | |
139 | struct ib_mad_send_wc *send_wc) | |
140 | { | |
141 | struct ib_umad_file *file = agent->context; | |
cb183a06 | 142 | struct ib_umad_packet *timeout, *packet = |
1da177e4 LT |
143 | (void *) (unsigned long) send_wc->wr_id; |
144 | ||
cb183a06 HR |
145 | ib_destroy_ah(packet->msg->send_wr.wr.ud.ah); |
146 | ib_free_send_mad(packet->msg); | |
1da177e4 LT |
147 | |
148 | if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { | |
cb183a06 HR |
149 | timeout = kmalloc(sizeof *timeout + sizeof (struct ib_mad_hdr), |
150 | GFP_KERNEL); | |
151 | if (!timeout) | |
152 | goto out; | |
1da177e4 | 153 | |
cb183a06 | 154 | memset(timeout, 0, sizeof *timeout + sizeof (struct ib_mad_hdr)); |
1da177e4 | 155 | |
cb183a06 HR |
156 | timeout->length = sizeof (struct ib_mad_hdr); |
157 | timeout->mad.hdr.id = packet->mad.hdr.id; | |
158 | timeout->mad.hdr.status = ETIMEDOUT; | |
159 | memcpy(timeout->mad.data, packet->mad.data, | |
160 | sizeof (struct ib_mad_hdr)); | |
161 | ||
162 | if (!queue_packet(file, agent, timeout)) | |
163 | return; | |
164 | } | |
165 | out: | |
1da177e4 LT |
166 | kfree(packet); |
167 | } | |
168 | ||
169 | static void recv_handler(struct ib_mad_agent *agent, | |
170 | struct ib_mad_recv_wc *mad_recv_wc) | |
171 | { | |
172 | struct ib_umad_file *file = agent->context; | |
173 | struct ib_umad_packet *packet; | |
cb183a06 | 174 | int length; |
1da177e4 LT |
175 | |
176 | if (mad_recv_wc->wc->status != IB_WC_SUCCESS) | |
177 | goto out; | |
178 | ||
cb183a06 HR |
179 | length = mad_recv_wc->mad_len; |
180 | packet = kmalloc(sizeof *packet + length, GFP_KERNEL); | |
1da177e4 LT |
181 | if (!packet) |
182 | goto out; | |
183 | ||
cb183a06 HR |
184 | memset(packet, 0, sizeof *packet + length); |
185 | packet->length = length; | |
186 | ||
187 | ib_coalesce_recv_mad(mad_recv_wc, packet->mad.data); | |
1da177e4 | 188 | |
cb183a06 HR |
189 | packet->mad.hdr.status = 0; |
190 | packet->mad.hdr.length = length + sizeof (struct ib_user_mad); | |
191 | packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); | |
192 | packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid); | |
193 | packet->mad.hdr.sl = mad_recv_wc->wc->sl; | |
194 | packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; | |
195 | packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); | |
196 | if (packet->mad.hdr.grh_present) { | |
1da177e4 | 197 | /* XXX parse GRH */ |
cb183a06 HR |
198 | packet->mad.hdr.gid_index = 0; |
199 | packet->mad.hdr.hop_limit = 0; | |
200 | packet->mad.hdr.traffic_class = 0; | |
201 | memset(packet->mad.hdr.gid, 0, 16); | |
202 | packet->mad.hdr.flow_label = 0; | |
1da177e4 LT |
203 | } |
204 | ||
205 | if (queue_packet(file, agent, packet)) | |
206 | kfree(packet); | |
207 | ||
208 | out: | |
209 | ib_free_recv_mad(mad_recv_wc); | |
210 | } | |
211 | ||
212 | static ssize_t ib_umad_read(struct file *filp, char __user *buf, | |
213 | size_t count, loff_t *pos) | |
214 | { | |
215 | struct ib_umad_file *file = filp->private_data; | |
216 | struct ib_umad_packet *packet; | |
217 | ssize_t ret; | |
218 | ||
cb183a06 | 219 | if (count < sizeof (struct ib_user_mad) + sizeof (struct ib_mad)) |
1da177e4 LT |
220 | return -EINVAL; |
221 | ||
222 | spin_lock_irq(&file->recv_lock); | |
223 | ||
224 | while (list_empty(&file->recv_list)) { | |
225 | spin_unlock_irq(&file->recv_lock); | |
226 | ||
227 | if (filp->f_flags & O_NONBLOCK) | |
228 | return -EAGAIN; | |
229 | ||
230 | if (wait_event_interruptible(file->recv_wait, | |
231 | !list_empty(&file->recv_list))) | |
232 | return -ERESTARTSYS; | |
233 | ||
234 | spin_lock_irq(&file->recv_lock); | |
235 | } | |
236 | ||
237 | packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); | |
238 | list_del(&packet->list); | |
239 | ||
240 | spin_unlock_irq(&file->recv_lock); | |
241 | ||
cb183a06 HR |
242 | if (count < packet->length + sizeof (struct ib_user_mad)) { |
243 | /* Return length needed (and first RMPP segment) if too small */ | |
244 | if (copy_to_user(buf, &packet->mad, | |
245 | sizeof (struct ib_user_mad) + sizeof (struct ib_mad))) | |
246 | ret = -EFAULT; | |
247 | else | |
248 | ret = -ENOSPC; | |
249 | } else if (copy_to_user(buf, &packet->mad, | |
250 | packet->length + sizeof (struct ib_user_mad))) | |
1da177e4 LT |
251 | ret = -EFAULT; |
252 | else | |
cb183a06 HR |
253 | ret = packet->length + sizeof (struct ib_user_mad); |
254 | if (ret < 0) { | |
255 | /* Requeue packet */ | |
256 | spin_lock_irq(&file->recv_lock); | |
257 | list_add(&packet->list, &file->recv_list); | |
258 | spin_unlock_irq(&file->recv_lock); | |
259 | } else | |
260 | kfree(packet); | |
1da177e4 LT |
261 | return ret; |
262 | } | |
263 | ||
264 | static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |
265 | size_t count, loff_t *pos) | |
266 | { | |
267 | struct ib_umad_file *file = filp->private_data; | |
268 | struct ib_umad_packet *packet; | |
269 | struct ib_mad_agent *agent; | |
270 | struct ib_ah_attr ah_attr; | |
cb183a06 HR |
271 | struct ib_send_wr *bad_wr; |
272 | struct ib_rmpp_mad *rmpp_mad; | |
1da177e4 LT |
273 | u8 method; |
274 | u64 *tid; | |
cb183a06 HR |
275 | int ret, length, hdr_len, data_len, rmpp_hdr_size; |
276 | int rmpp_active = 0; | |
1da177e4 LT |
277 | |
278 | if (count < sizeof (struct ib_user_mad)) | |
279 | return -EINVAL; | |
280 | ||
cb183a06 HR |
281 | length = count - sizeof (struct ib_user_mad); |
282 | packet = kmalloc(sizeof *packet + sizeof(struct ib_mad_hdr) + | |
283 | sizeof(struct ib_rmpp_hdr), GFP_KERNEL); | |
1da177e4 LT |
284 | if (!packet) |
285 | return -ENOMEM; | |
286 | ||
cb183a06 HR |
287 | if (copy_from_user(&packet->mad, buf, |
288 | sizeof (struct ib_user_mad) + | |
289 | sizeof(struct ib_mad_hdr) + | |
290 | sizeof(struct ib_rmpp_hdr))) { | |
291 | ret = -EFAULT; | |
292 | goto err; | |
1da177e4 LT |
293 | } |
294 | ||
cb183a06 HR |
295 | if (packet->mad.hdr.id < 0 || |
296 | packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { | |
1da177e4 LT |
297 | ret = -EINVAL; |
298 | goto err; | |
299 | } | |
300 | ||
cb183a06 HR |
301 | packet->length = length; |
302 | ||
1da177e4 LT |
303 | down_read(&file->agent_mutex); |
304 | ||
cb183a06 | 305 | agent = file->agent[packet->mad.hdr.id]; |
1da177e4 LT |
306 | if (!agent) { |
307 | ret = -EINVAL; | |
308 | goto err_up; | |
309 | } | |
310 | ||
1da177e4 | 311 | memset(&ah_attr, 0, sizeof ah_attr); |
cb183a06 HR |
312 | ah_attr.dlid = be16_to_cpu(packet->mad.hdr.lid); |
313 | ah_attr.sl = packet->mad.hdr.sl; | |
314 | ah_attr.src_path_bits = packet->mad.hdr.path_bits; | |
1da177e4 | 315 | ah_attr.port_num = file->port->port_num; |
cb183a06 | 316 | if (packet->mad.hdr.grh_present) { |
1da177e4 | 317 | ah_attr.ah_flags = IB_AH_GRH; |
cb183a06 HR |
318 | memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); |
319 | ah_attr.grh.flow_label = packet->mad.hdr.flow_label; | |
320 | ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; | |
321 | ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; | |
1da177e4 LT |
322 | } |
323 | ||
324 | packet->ah = ib_create_ah(agent->qp->pd, &ah_attr); | |
325 | if (IS_ERR(packet->ah)) { | |
326 | ret = PTR_ERR(packet->ah); | |
327 | goto err_up; | |
328 | } | |
329 | ||
cb183a06 HR |
330 | rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; |
331 | if (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE) { | |
332 | /* RMPP active */ | |
333 | if (!agent->rmpp_version) { | |
334 | ret = -EINVAL; | |
335 | goto err_ah; | |
336 | } | |
337 | /* Validate that management class can support RMPP */ | |
338 | if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { | |
339 | hdr_len = offsetof(struct ib_sa_mad, data); | |
340 | data_len = length; | |
341 | } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && | |
342 | (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) { | |
343 | hdr_len = offsetof(struct ib_vendor_mad, data); | |
344 | data_len = length - hdr_len; | |
345 | } else { | |
346 | ret = -EINVAL; | |
347 | goto err_ah; | |
348 | } | |
349 | rmpp_active = 1; | |
350 | } else { | |
351 | if (length > sizeof(struct ib_mad)) { | |
352 | ret = -EINVAL; | |
353 | goto err_ah; | |
354 | } | |
355 | hdr_len = offsetof(struct ib_mad, data); | |
356 | data_len = length - hdr_len; | |
357 | } | |
358 | ||
359 | packet->msg = ib_create_send_mad(agent, | |
360 | be32_to_cpu(packet->mad.hdr.qpn), | |
361 | 0, packet->ah, rmpp_active, | |
362 | hdr_len, data_len, | |
363 | GFP_KERNEL); | |
364 | if (IS_ERR(packet->msg)) { | |
365 | ret = PTR_ERR(packet->msg); | |
366 | goto err_ah; | |
367 | } | |
1da177e4 | 368 | |
cb183a06 HR |
369 | packet->msg->send_wr.wr.ud.timeout_ms = packet->mad.hdr.timeout_ms; |
370 | packet->msg->send_wr.wr.ud.retries = packet->mad.hdr.retries; | |
1da177e4 | 371 | |
cb183a06 HR |
372 | /* Override send WR WRID initialized in ib_create_send_mad */ |
373 | packet->msg->send_wr.wr_id = (unsigned long) packet; | |
1da177e4 | 374 | |
cb183a06 HR |
375 | if (!rmpp_active) { |
376 | /* Copy message from user into send buffer */ | |
377 | if (copy_from_user(packet->msg->mad, | |
378 | buf + sizeof(struct ib_user_mad), length)) { | |
379 | ret = -EFAULT; | |
380 | goto err_msg; | |
381 | } | |
382 | } else { | |
383 | rmpp_hdr_size = sizeof(struct ib_mad_hdr) + | |
384 | sizeof(struct ib_rmpp_hdr); | |
385 | ||
386 | /* Only copy MAD headers (RMPP header in place) */ | |
387 | memcpy(packet->msg->mad, packet->mad.data, | |
388 | sizeof(struct ib_mad_hdr)); | |
389 | ||
390 | /* Now, copy rest of message from user into send buffer */ | |
391 | if (copy_from_user(((struct ib_rmpp_mad *) packet->msg->mad)->data, | |
392 | buf + sizeof (struct ib_user_mad) + rmpp_hdr_size, | |
393 | length - rmpp_hdr_size)) { | |
394 | ret = -EFAULT; | |
395 | goto err_msg; | |
396 | } | |
397 | } | |
398 | ||
399 | /* | |
400 | * If userspace is generating a request that will generate a | |
401 | * response, we need to make sure the high-order part of the | |
402 | * transaction ID matches the agent being used to send the | |
403 | * MAD. | |
404 | */ | |
405 | method = packet->msg->mad->mad_hdr.method; | |
406 | ||
407 | if (!(method & IB_MGMT_METHOD_RESP) && | |
408 | method != IB_MGMT_METHOD_TRAP_REPRESS && | |
409 | method != IB_MGMT_METHOD_SEND) { | |
410 | tid = &packet->msg->mad->mad_hdr.tid; | |
411 | *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | | |
412 | (be64_to_cpup(tid) & 0xffffffff)); | |
1da177e4 LT |
413 | } |
414 | ||
cb183a06 HR |
415 | ret = ib_post_send_mad(agent, &packet->msg->send_wr, &bad_wr); |
416 | if (ret) | |
417 | goto err_msg; | |
418 | ||
1da177e4 LT |
419 | up_read(&file->agent_mutex); |
420 | ||
cb183a06 HR |
421 | return sizeof (struct ib_user_mad_hdr) + packet->length; |
422 | ||
423 | err_msg: | |
424 | ib_free_send_mad(packet->msg); | |
425 | ||
426 | err_ah: | |
427 | ib_destroy_ah(packet->ah); | |
1da177e4 LT |
428 | |
429 | err_up: | |
430 | up_read(&file->agent_mutex); | |
431 | ||
432 | err: | |
433 | kfree(packet); | |
434 | return ret; | |
435 | } | |
436 | ||
437 | static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait) | |
438 | { | |
439 | struct ib_umad_file *file = filp->private_data; | |
440 | ||
441 | /* we will always be able to post a MAD send */ | |
442 | unsigned int mask = POLLOUT | POLLWRNORM; | |
443 | ||
444 | poll_wait(filp, &file->recv_wait, wait); | |
445 | ||
446 | if (!list_empty(&file->recv_list)) | |
447 | mask |= POLLIN | POLLRDNORM; | |
448 | ||
449 | return mask; | |
450 | } | |
451 | ||
452 | static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg) | |
453 | { | |
454 | struct ib_user_mad_reg_req ureq; | |
455 | struct ib_mad_reg_req req; | |
456 | struct ib_mad_agent *agent; | |
457 | int agent_id; | |
458 | int ret; | |
459 | ||
460 | down_write(&file->agent_mutex); | |
461 | ||
462 | if (copy_from_user(&ureq, (void __user *) arg, sizeof ureq)) { | |
463 | ret = -EFAULT; | |
464 | goto out; | |
465 | } | |
466 | ||
467 | if (ureq.qpn != 0 && ureq.qpn != 1) { | |
468 | ret = -EINVAL; | |
469 | goto out; | |
470 | } | |
471 | ||
472 | for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) | |
473 | if (!file->agent[agent_id]) | |
474 | goto found; | |
475 | ||
476 | ret = -ENOMEM; | |
477 | goto out; | |
478 | ||
479 | found: | |
0df3bb13 RD |
480 | if (ureq.mgmt_class) { |
481 | req.mgmt_class = ureq.mgmt_class; | |
482 | req.mgmt_class_version = ureq.mgmt_class_version; | |
483 | memcpy(req.method_mask, ureq.method_mask, sizeof req.method_mask); | |
484 | memcpy(req.oui, ureq.oui, sizeof req.oui); | |
485 | } | |
1da177e4 LT |
486 | |
487 | agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, | |
488 | ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, | |
0df3bb13 | 489 | ureq.mgmt_class ? &req : NULL, |
cb183a06 HR |
490 | ureq.rmpp_version, |
491 | send_handler, recv_handler, file); | |
1da177e4 LT |
492 | if (IS_ERR(agent)) { |
493 | ret = PTR_ERR(agent); | |
494 | goto out; | |
495 | } | |
496 | ||
497 | file->agent[agent_id] = agent; | |
498 | ||
499 | file->mr[agent_id] = ib_get_dma_mr(agent->qp->pd, IB_ACCESS_LOCAL_WRITE); | |
500 | if (IS_ERR(file->mr[agent_id])) { | |
501 | ret = -ENOMEM; | |
502 | goto err; | |
503 | } | |
504 | ||
505 | if (put_user(agent_id, | |
506 | (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) { | |
507 | ret = -EFAULT; | |
508 | goto err_mr; | |
509 | } | |
510 | ||
511 | ret = 0; | |
512 | goto out; | |
513 | ||
514 | err_mr: | |
515 | ib_dereg_mr(file->mr[agent_id]); | |
516 | ||
517 | err: | |
518 | file->agent[agent_id] = NULL; | |
519 | ib_unregister_mad_agent(agent); | |
520 | ||
521 | out: | |
522 | up_write(&file->agent_mutex); | |
523 | return ret; | |
524 | } | |
525 | ||
526 | static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg) | |
527 | { | |
528 | u32 id; | |
529 | int ret = 0; | |
530 | ||
531 | down_write(&file->agent_mutex); | |
532 | ||
533 | if (get_user(id, (u32 __user *) arg)) { | |
534 | ret = -EFAULT; | |
535 | goto out; | |
536 | } | |
537 | ||
538 | if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !file->agent[id]) { | |
539 | ret = -EINVAL; | |
540 | goto out; | |
541 | } | |
542 | ||
543 | ib_dereg_mr(file->mr[id]); | |
544 | ib_unregister_mad_agent(file->agent[id]); | |
545 | file->agent[id] = NULL; | |
546 | ||
547 | out: | |
548 | up_write(&file->agent_mutex); | |
549 | return ret; | |
550 | } | |
551 | ||
cb183a06 HR |
552 | static long ib_umad_ioctl(struct file *filp, unsigned int cmd, |
553 | unsigned long arg) | |
1da177e4 LT |
554 | { |
555 | switch (cmd) { | |
556 | case IB_USER_MAD_REGISTER_AGENT: | |
557 | return ib_umad_reg_agent(filp->private_data, arg); | |
558 | case IB_USER_MAD_UNREGISTER_AGENT: | |
559 | return ib_umad_unreg_agent(filp->private_data, arg); | |
560 | default: | |
561 | return -ENOIOCTLCMD; | |
562 | } | |
563 | } | |
564 | ||
565 | static int ib_umad_open(struct inode *inode, struct file *filp) | |
566 | { | |
567 | struct ib_umad_port *port = | |
568 | container_of(inode->i_cdev, struct ib_umad_port, dev); | |
569 | struct ib_umad_file *file; | |
570 | ||
571 | file = kmalloc(sizeof *file, GFP_KERNEL); | |
572 | if (!file) | |
573 | return -ENOMEM; | |
574 | ||
575 | memset(file, 0, sizeof *file); | |
576 | ||
577 | spin_lock_init(&file->recv_lock); | |
578 | init_rwsem(&file->agent_mutex); | |
579 | INIT_LIST_HEAD(&file->recv_list); | |
580 | init_waitqueue_head(&file->recv_wait); | |
581 | ||
582 | file->port = port; | |
583 | filp->private_data = file; | |
584 | ||
585 | return 0; | |
586 | } | |
587 | ||
588 | static int ib_umad_close(struct inode *inode, struct file *filp) | |
589 | { | |
590 | struct ib_umad_file *file = filp->private_data; | |
561e148e | 591 | struct ib_umad_packet *packet, *tmp; |
1da177e4 LT |
592 | int i; |
593 | ||
594 | for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i) | |
595 | if (file->agent[i]) { | |
596 | ib_dereg_mr(file->mr[i]); | |
597 | ib_unregister_mad_agent(file->agent[i]); | |
598 | } | |
599 | ||
561e148e RD |
600 | list_for_each_entry_safe(packet, tmp, &file->recv_list, list) |
601 | kfree(packet); | |
602 | ||
1da177e4 LT |
603 | kfree(file); |
604 | ||
605 | return 0; | |
606 | } | |
607 | ||
608 | static struct file_operations umad_fops = { | |
cb183a06 HR |
609 | .owner = THIS_MODULE, |
610 | .read = ib_umad_read, | |
611 | .write = ib_umad_write, | |
612 | .poll = ib_umad_poll, | |
1da177e4 | 613 | .unlocked_ioctl = ib_umad_ioctl, |
cb183a06 HR |
614 | .compat_ioctl = ib_umad_ioctl, |
615 | .open = ib_umad_open, | |
616 | .release = ib_umad_close | |
1da177e4 LT |
617 | }; |
618 | ||
619 | static int ib_umad_sm_open(struct inode *inode, struct file *filp) | |
620 | { | |
621 | struct ib_umad_port *port = | |
622 | container_of(inode->i_cdev, struct ib_umad_port, sm_dev); | |
623 | struct ib_port_modify props = { | |
624 | .set_port_cap_mask = IB_PORT_SM | |
625 | }; | |
626 | int ret; | |
627 | ||
628 | if (filp->f_flags & O_NONBLOCK) { | |
629 | if (down_trylock(&port->sm_sem)) | |
630 | return -EAGAIN; | |
631 | } else { | |
632 | if (down_interruptible(&port->sm_sem)) | |
633 | return -ERESTARTSYS; | |
634 | } | |
635 | ||
636 | ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); | |
637 | if (ret) { | |
638 | up(&port->sm_sem); | |
639 | return ret; | |
640 | } | |
641 | ||
642 | filp->private_data = port; | |
643 | ||
644 | return 0; | |
645 | } | |
646 | ||
647 | static int ib_umad_sm_close(struct inode *inode, struct file *filp) | |
648 | { | |
649 | struct ib_umad_port *port = filp->private_data; | |
650 | struct ib_port_modify props = { | |
651 | .clr_port_cap_mask = IB_PORT_SM | |
652 | }; | |
653 | int ret; | |
654 | ||
655 | ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); | |
656 | up(&port->sm_sem); | |
657 | ||
658 | return ret; | |
659 | } | |
660 | ||
661 | static struct file_operations umad_sm_fops = { | |
662 | .owner = THIS_MODULE, | |
663 | .open = ib_umad_sm_open, | |
664 | .release = ib_umad_sm_close | |
665 | }; | |
666 | ||
667 | static struct ib_client umad_client = { | |
668 | .name = "umad", | |
669 | .add = ib_umad_add_one, | |
670 | .remove = ib_umad_remove_one | |
671 | }; | |
672 | ||
673 | static ssize_t show_dev(struct class_device *class_dev, char *buf) | |
674 | { | |
675 | struct ib_umad_port *port = class_get_devdata(class_dev); | |
676 | ||
677 | if (class_dev == &port->class_dev) | |
678 | return print_dev_t(buf, port->dev.dev); | |
679 | else | |
680 | return print_dev_t(buf, port->sm_dev.dev); | |
681 | } | |
682 | static CLASS_DEVICE_ATTR(dev, S_IRUGO, show_dev, NULL); | |
683 | ||
684 | static ssize_t show_ibdev(struct class_device *class_dev, char *buf) | |
685 | { | |
686 | struct ib_umad_port *port = class_get_devdata(class_dev); | |
687 | ||
688 | return sprintf(buf, "%s\n", port->ib_dev->name); | |
689 | } | |
690 | static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); | |
691 | ||
692 | static ssize_t show_port(struct class_device *class_dev, char *buf) | |
693 | { | |
694 | struct ib_umad_port *port = class_get_devdata(class_dev); | |
695 | ||
696 | return sprintf(buf, "%d\n", port->port_num); | |
697 | } | |
698 | static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL); | |
699 | ||
700 | static void ib_umad_release_dev(struct kref *ref) | |
701 | { | |
702 | struct ib_umad_device *dev = | |
703 | container_of(ref, struct ib_umad_device, ref); | |
704 | ||
705 | kfree(dev); | |
706 | } | |
707 | ||
708 | static void ib_umad_release_port(struct class_device *class_dev) | |
709 | { | |
710 | struct ib_umad_port *port = class_get_devdata(class_dev); | |
711 | ||
712 | if (class_dev == &port->class_dev) { | |
713 | cdev_del(&port->dev); | |
714 | clear_bit(port->devnum, dev_map); | |
715 | } else { | |
716 | cdev_del(&port->sm_dev); | |
717 | clear_bit(port->sm_devnum, dev_map); | |
718 | } | |
719 | ||
720 | kref_put(&port->umad_dev->ref, ib_umad_release_dev); | |
721 | } | |
722 | ||
723 | static struct class umad_class = { | |
724 | .name = "infiniband_mad", | |
725 | .release = ib_umad_release_port | |
726 | }; | |
727 | ||
728 | static ssize_t show_abi_version(struct class *class, char *buf) | |
729 | { | |
730 | return sprintf(buf, "%d\n", IB_USER_MAD_ABI_VERSION); | |
731 | } | |
732 | static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); | |
733 | ||
734 | static int ib_umad_init_port(struct ib_device *device, int port_num, | |
735 | struct ib_umad_port *port) | |
736 | { | |
737 | spin_lock(&map_lock); | |
738 | port->devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); | |
739 | if (port->devnum >= IB_UMAD_MAX_PORTS) { | |
740 | spin_unlock(&map_lock); | |
741 | return -1; | |
742 | } | |
743 | port->sm_devnum = find_next_zero_bit(dev_map, IB_UMAD_MAX_PORTS * 2, IB_UMAD_MAX_PORTS); | |
744 | if (port->sm_devnum >= IB_UMAD_MAX_PORTS * 2) { | |
745 | spin_unlock(&map_lock); | |
746 | return -1; | |
747 | } | |
748 | set_bit(port->devnum, dev_map); | |
749 | set_bit(port->sm_devnum, dev_map); | |
750 | spin_unlock(&map_lock); | |
751 | ||
752 | port->ib_dev = device; | |
753 | port->port_num = port_num; | |
754 | init_MUTEX(&port->sm_sem); | |
755 | ||
756 | cdev_init(&port->dev, &umad_fops); | |
757 | port->dev.owner = THIS_MODULE; | |
758 | kobject_set_name(&port->dev.kobj, "umad%d", port->devnum); | |
759 | if (cdev_add(&port->dev, base_dev + port->devnum, 1)) | |
760 | return -1; | |
761 | ||
762 | port->class_dev.class = &umad_class; | |
763 | port->class_dev.dev = device->dma_device; | |
764 | ||
765 | snprintf(port->class_dev.class_id, BUS_ID_SIZE, "umad%d", port->devnum); | |
766 | ||
767 | if (class_device_register(&port->class_dev)) | |
768 | goto err_cdev; | |
769 | ||
770 | class_set_devdata(&port->class_dev, port); | |
771 | kref_get(&port->umad_dev->ref); | |
772 | ||
773 | if (class_device_create_file(&port->class_dev, &class_device_attr_dev)) | |
774 | goto err_class; | |
775 | if (class_device_create_file(&port->class_dev, &class_device_attr_ibdev)) | |
776 | goto err_class; | |
777 | if (class_device_create_file(&port->class_dev, &class_device_attr_port)) | |
778 | goto err_class; | |
779 | ||
780 | cdev_init(&port->sm_dev, &umad_sm_fops); | |
781 | port->sm_dev.owner = THIS_MODULE; | |
782 | kobject_set_name(&port->dev.kobj, "issm%d", port->sm_devnum - IB_UMAD_MAX_PORTS); | |
783 | if (cdev_add(&port->sm_dev, base_dev + port->sm_devnum, 1)) | |
784 | return -1; | |
785 | ||
786 | port->sm_class_dev.class = &umad_class; | |
787 | port->sm_class_dev.dev = device->dma_device; | |
788 | ||
789 | snprintf(port->sm_class_dev.class_id, BUS_ID_SIZE, "issm%d", port->sm_devnum - IB_UMAD_MAX_PORTS); | |
790 | ||
791 | if (class_device_register(&port->sm_class_dev)) | |
792 | goto err_sm_cdev; | |
793 | ||
794 | class_set_devdata(&port->sm_class_dev, port); | |
795 | kref_get(&port->umad_dev->ref); | |
796 | ||
797 | if (class_device_create_file(&port->sm_class_dev, &class_device_attr_dev)) | |
798 | goto err_sm_class; | |
799 | if (class_device_create_file(&port->sm_class_dev, &class_device_attr_ibdev)) | |
800 | goto err_sm_class; | |
801 | if (class_device_create_file(&port->sm_class_dev, &class_device_attr_port)) | |
802 | goto err_sm_class; | |
803 | ||
804 | return 0; | |
805 | ||
806 | err_sm_class: | |
807 | class_device_unregister(&port->sm_class_dev); | |
808 | ||
809 | err_sm_cdev: | |
810 | cdev_del(&port->sm_dev); | |
811 | ||
812 | err_class: | |
813 | class_device_unregister(&port->class_dev); | |
814 | ||
815 | err_cdev: | |
816 | cdev_del(&port->dev); | |
817 | clear_bit(port->devnum, dev_map); | |
818 | ||
819 | return -1; | |
820 | } | |
821 | ||
822 | static void ib_umad_add_one(struct ib_device *device) | |
823 | { | |
824 | struct ib_umad_device *umad_dev; | |
825 | int s, e, i; | |
826 | ||
827 | if (device->node_type == IB_NODE_SWITCH) | |
828 | s = e = 0; | |
829 | else { | |
830 | s = 1; | |
831 | e = device->phys_port_cnt; | |
832 | } | |
833 | ||
834 | umad_dev = kmalloc(sizeof *umad_dev + | |
835 | (e - s + 1) * sizeof (struct ib_umad_port), | |
836 | GFP_KERNEL); | |
837 | if (!umad_dev) | |
838 | return; | |
839 | ||
840 | memset(umad_dev, 0, sizeof *umad_dev + | |
841 | (e - s + 1) * sizeof (struct ib_umad_port)); | |
842 | ||
843 | kref_init(&umad_dev->ref); | |
844 | ||
845 | umad_dev->start_port = s; | |
846 | umad_dev->end_port = e; | |
847 | ||
848 | for (i = s; i <= e; ++i) { | |
849 | umad_dev->port[i - s].umad_dev = umad_dev; | |
850 | ||
851 | if (ib_umad_init_port(device, i, &umad_dev->port[i - s])) | |
852 | goto err; | |
853 | } | |
854 | ||
855 | ib_set_client_data(device, &umad_client, umad_dev); | |
856 | ||
857 | return; | |
858 | ||
859 | err: | |
860 | while (--i >= s) { | |
861 | class_device_unregister(&umad_dev->port[i - s].class_dev); | |
862 | class_device_unregister(&umad_dev->port[i - s].sm_class_dev); | |
863 | } | |
864 | ||
865 | kref_put(&umad_dev->ref, ib_umad_release_dev); | |
866 | } | |
867 | ||
868 | static void ib_umad_remove_one(struct ib_device *device) | |
869 | { | |
870 | struct ib_umad_device *umad_dev = ib_get_client_data(device, &umad_client); | |
871 | int i; | |
872 | ||
873 | if (!umad_dev) | |
874 | return; | |
875 | ||
876 | for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i) { | |
877 | class_device_unregister(&umad_dev->port[i].class_dev); | |
878 | class_device_unregister(&umad_dev->port[i].sm_class_dev); | |
879 | } | |
880 | ||
881 | kref_put(&umad_dev->ref, ib_umad_release_dev); | |
882 | } | |
883 | ||
884 | static int __init ib_umad_init(void) | |
885 | { | |
886 | int ret; | |
887 | ||
888 | spin_lock_init(&map_lock); | |
889 | ||
890 | ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2, | |
891 | "infiniband_mad"); | |
892 | if (ret) { | |
893 | printk(KERN_ERR "user_mad: couldn't register device number\n"); | |
894 | goto out; | |
895 | } | |
896 | ||
897 | ret = class_register(&umad_class); | |
898 | if (ret) { | |
899 | printk(KERN_ERR "user_mad: couldn't create class infiniband_mad\n"); | |
900 | goto out_chrdev; | |
901 | } | |
902 | ||
903 | ret = class_create_file(&umad_class, &class_attr_abi_version); | |
904 | if (ret) { | |
905 | printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n"); | |
906 | goto out_class; | |
907 | } | |
908 | ||
909 | ret = ib_register_client(&umad_client); | |
910 | if (ret) { | |
911 | printk(KERN_ERR "user_mad: couldn't register ib_umad client\n"); | |
912 | goto out_class; | |
913 | } | |
914 | ||
915 | return 0; | |
916 | ||
917 | out_class: | |
918 | class_unregister(&umad_class); | |
919 | ||
920 | out_chrdev: | |
921 | unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); | |
922 | ||
923 | out: | |
924 | return ret; | |
925 | } | |
926 | ||
927 | static void __exit ib_umad_cleanup(void) | |
928 | { | |
929 | ib_unregister_client(&umad_client); | |
930 | class_unregister(&umad_class); | |
931 | unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); | |
932 | } | |
933 | ||
934 | module_init(ib_umad_init); | |
935 | module_exit(ib_umad_cleanup); |