mm: mark page accessed before we write_end()
[linux-block.git] / drivers / usb / mon / mon_bin.c
CommitLineData
6f23ee1f
PZ
1/*
2 * The USB Monitor, inspired by Dave Harding's USBMon.
3 *
4 * This is a binary format reader.
5 *
6 * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it)
ce7cd137 7 * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com)
6f23ee1f
PZ
8 */
9
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/fs.h>
13#include <linux/cdev.h>
14#include <linux/usb.h>
15#include <linux/poll.h>
16#include <linux/compat.h>
17#include <linux/mm.h>
1af46fd7 18#include <linux/smp_lock.h>
6f23ee1f
PZ
19
20#include <asm/uaccess.h>
21
22#include "usb_mon.h"
23
24/*
25 * Defined by USB 2.0 clause 9.3, table 9.2.
26 */
27#define SETUP_LEN 8
28
29/* ioctl macros */
30#define MON_IOC_MAGIC 0x92
31
32#define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1)
33/* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */
34#define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats)
35#define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4)
36#define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5)
37#define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get)
38#define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch)
39#define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8)
471c604d
PZ
40/* #9 was MON_IOCT_SETAPI */
41#define MON_IOCX_GETX _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get)
7abce6be 42
6f23ee1f
PZ
43#ifdef CONFIG_COMPAT
44#define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32)
45#define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32)
471c604d 46#define MON_IOCX_GETX32 _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get32)
6f23ee1f
PZ
47#endif
48
49/*
50 * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc).
51 * But it's all right. Just use a simple way to make sure the chunk is never
52 * smaller than a page.
53 *
54 * N.B. An application does not know our chunk size.
55 *
56 * Woops, get_zeroed_page() returns a single page. I guess we're stuck with
57 * page-sized chunks for the time being.
58 */
59#define CHUNK_SIZE PAGE_SIZE
60#define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1))
61
62/*
63 * The magic limit was calculated so that it allows the monitoring
64 * application to pick data once in two ticks. This way, another application,
65 * which presumably drives the bus, gets to hog CPU, yet we collect our data.
66 * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an
67 * enormous overhead built into the bus protocol, so we need about 1000 KB.
68 *
69 * This is still too much for most cases, where we just snoop a few
70 * descriptor fetches for enumeration. So, the default is a "reasonable"
71 * amount for systems with HZ=250 and incomplete bus saturation.
72 *
73 * XXX What about multi-megabyte URBs which take minutes to transfer?
74 */
75#define BUFF_MAX CHUNK_ALIGN(1200*1024)
76#define BUFF_DFL CHUNK_ALIGN(300*1024)
77#define BUFF_MIN CHUNK_ALIGN(8*1024)
78
79/*
80 * The per-event API header (2 per URB).
81 *
82 * This structure is seen in userland as defined by the documentation.
83 */
84struct mon_bin_hdr {
85 u64 id; /* URB ID - from submission to callback */
86 unsigned char type; /* Same as in text API; extensible. */
87 unsigned char xfer_type; /* ISO, Intr, Control, Bulk */
88 unsigned char epnum; /* Endpoint number and transfer direction */
89 unsigned char devnum; /* Device address */
90 unsigned short busnum; /* Bus number */
91 char flag_setup;
92 char flag_data;
93 s64 ts_sec; /* gettimeofday */
94 s32 ts_usec; /* gettimeofday */
95 int status;
96 unsigned int len_urb; /* Length of data (submitted or actual) */
97 unsigned int len_cap; /* Delivered length */
471c604d
PZ
98 union {
99 unsigned char setup[SETUP_LEN]; /* Only for Control S-type */
100 struct iso_rec {
101 int error_count;
102 int numdesc;
103 } iso;
104 } s;
105 int interval;
106 int start_frame;
107 unsigned int xfer_flags;
108 unsigned int ndesc; /* Actual number of ISO descriptors */
109};
110
111/*
112 * ISO vector, packed into the head of data stream.
113 * This has to take 16 bytes to make sure that the end of buffer
114 * wrap is not happening in the middle of a descriptor.
115 */
116struct mon_bin_isodesc {
117 int iso_status;
118 unsigned int iso_off;
119 unsigned int iso_len;
120 u32 _pad;
6f23ee1f
PZ
121};
122
123/* per file statistic */
124struct mon_bin_stats {
125 u32 queued;
126 u32 dropped;
127};
128
129struct mon_bin_get {
471c604d 130 struct mon_bin_hdr __user *hdr; /* Can be 48 bytes or 64. */
6f23ee1f
PZ
131 void __user *data;
132 size_t alloc; /* Length of data (can be zero) */
133};
134
135struct mon_bin_mfetch {
136 u32 __user *offvec; /* Vector of events fetched */
137 u32 nfetch; /* Number of events to fetch (out: fetched) */
138 u32 nflush; /* Number of events to flush */
139};
140
141#ifdef CONFIG_COMPAT
142struct mon_bin_get32 {
143 u32 hdr32;
144 u32 data32;
145 u32 alloc32;
146};
147
148struct mon_bin_mfetch32 {
149 u32 offvec32;
150 u32 nfetch32;
151 u32 nflush32;
152};
153#endif
154
155/* Having these two values same prevents wrapping of the mon_bin_hdr */
156#define PKT_ALIGN 64
157#define PKT_SIZE 64
158
471c604d
PZ
159#define PKT_SZ_API0 48 /* API 0 (2.6.20) size */
160#define PKT_SZ_API1 64 /* API 1 size: extra fields */
161
162#define ISODESC_MAX 128 /* Same number as usbfs allows, 2048 bytes. */
163
6f23ee1f
PZ
164/* max number of USB bus supported */
165#define MON_BIN_MAX_MINOR 128
166
167/*
168 * The buffer: map of used pages.
169 */
170struct mon_pgmap {
171 struct page *pg;
172 unsigned char *ptr; /* XXX just use page_to_virt everywhere? */
173};
174
175/*
176 * This gets associated with an open file struct.
177 */
178struct mon_reader_bin {
179 /* The buffer: one per open. */
180 spinlock_t b_lock; /* Protect b_cnt, b_in */
181 unsigned int b_size; /* Current size of the buffer - bytes */
182 unsigned int b_cnt; /* Bytes used */
183 unsigned int b_in, b_out; /* Offsets into buffer - bytes */
184 unsigned int b_read; /* Amount of read data in curr. pkt. */
185 struct mon_pgmap *b_vec; /* The map array */
186 wait_queue_head_t b_wait; /* Wait for data here */
187
188 struct mutex fetch_lock; /* Protect b_read, b_out */
189 int mmap_active;
190
191 /* A list of these is needed for "bus 0". Some time later. */
192 struct mon_reader r;
193
194 /* Stats */
195 unsigned int cnt_lost;
196};
197
198static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp,
199 unsigned int offset)
200{
201 return (struct mon_bin_hdr *)
202 (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE);
203}
204
205#define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0)
206
30c7431d
PZ
207static unsigned char xfer_to_pipe[4] = {
208 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
209};
210
ce7cd137 211static struct class *mon_bin_class;
6f23ee1f
PZ
212static dev_t mon_bin_dev0;
213static struct cdev mon_bin_cdev;
214
215static void mon_buff_area_fill(const struct mon_reader_bin *rp,
216 unsigned int offset, unsigned int size);
217static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp);
218static int mon_alloc_buff(struct mon_pgmap *map, int npages);
219static void mon_free_buff(struct mon_pgmap *map, int npages);
220
221/*
222 * This is a "chunked memcpy". It does not manipulate any counters.
223 * But it returns the new offset for repeated application.
224 */
225unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
226 unsigned int off, const unsigned char *from, unsigned int length)
227{
228 unsigned int step_len;
229 unsigned char *buf;
230 unsigned int in_page;
231
232 while (length) {
233 /*
234 * Determine step_len.
235 */
236 step_len = length;
237 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1));
238 if (in_page < step_len)
239 step_len = in_page;
240
241 /*
242 * Copy data and advance pointers.
243 */
244 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE;
245 memcpy(buf, from, step_len);
246 if ((off += step_len) >= this->b_size) off = 0;
247 from += step_len;
248 length -= step_len;
249 }
250 return off;
251}
252
253/*
254 * This is a little worse than the above because it's "chunked copy_to_user".
255 * The return value is an error code, not an offset.
256 */
257static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off,
258 char __user *to, int length)
259{
260 unsigned int step_len;
261 unsigned char *buf;
262 unsigned int in_page;
263
264 while (length) {
265 /*
266 * Determine step_len.
267 */
268 step_len = length;
269 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1));
270 if (in_page < step_len)
271 step_len = in_page;
272
273 /*
274 * Copy data and advance pointers.
275 */
276 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE;
277 if (copy_to_user(to, buf, step_len))
278 return -EINVAL;
279 if ((off += step_len) >= this->b_size) off = 0;
280 to += step_len;
281 length -= step_len;
282 }
283 return 0;
284}
285
286/*
287 * Allocate an (aligned) area in the buffer.
288 * This is called under b_lock.
289 * Returns ~0 on failure.
290 */
291static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp,
292 unsigned int size)
293{
294 unsigned int offset;
295
296 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
297 if (rp->b_cnt + size > rp->b_size)
298 return ~0;
299 offset = rp->b_in;
300 rp->b_cnt += size;
301 if ((rp->b_in += size) >= rp->b_size)
302 rp->b_in -= rp->b_size;
303 return offset;
304}
305
306/*
307 * This is the same thing as mon_buff_area_alloc, only it does not allow
308 * buffers to wrap. This is needed by applications which pass references
309 * into mmap-ed buffers up their stacks (libpcap can do that).
310 *
311 * Currently, we always have the header stuck with the data, although
312 * it is not strictly speaking necessary.
313 *
314 * When a buffer would wrap, we place a filler packet to mark the space.
315 */
316static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp,
317 unsigned int size)
318{
319 unsigned int offset;
320 unsigned int fill_size;
321
322 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
323 if (rp->b_cnt + size > rp->b_size)
324 return ~0;
325 if (rp->b_in + size > rp->b_size) {
326 /*
327 * This would wrap. Find if we still have space after
328 * skipping to the end of the buffer. If we do, place
329 * a filler packet and allocate a new packet.
330 */
331 fill_size = rp->b_size - rp->b_in;
332 if (rp->b_cnt + size + fill_size > rp->b_size)
333 return ~0;
334 mon_buff_area_fill(rp, rp->b_in, fill_size);
335
336 offset = 0;
337 rp->b_in = size;
338 rp->b_cnt += size + fill_size;
339 } else if (rp->b_in + size == rp->b_size) {
340 offset = rp->b_in;
341 rp->b_in = 0;
342 rp->b_cnt += size;
343 } else {
344 offset = rp->b_in;
345 rp->b_in += size;
346 rp->b_cnt += size;
347 }
348 return offset;
349}
350
351/*
352 * Return a few (kilo-)bytes to the head of the buffer.
353 * This is used if a DMA fetch fails.
354 */
355static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size)
356{
357
358 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
359 rp->b_cnt -= size;
360 if (rp->b_in < size)
361 rp->b_in += rp->b_size;
362 rp->b_in -= size;
363}
364
365/*
366 * This has to be called under both b_lock and fetch_lock, because
367 * it accesses both b_cnt and b_out.
368 */
369static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size)
370{
371
372 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
373 rp->b_cnt -= size;
374 if ((rp->b_out += size) >= rp->b_size)
375 rp->b_out -= rp->b_size;
376}
377
378static void mon_buff_area_fill(const struct mon_reader_bin *rp,
379 unsigned int offset, unsigned int size)
380{
381 struct mon_bin_hdr *ep;
382
383 ep = MON_OFF2HDR(rp, offset);
384 memset(ep, 0, PKT_SIZE);
385 ep->type = '@';
386 ep->len_cap = size - PKT_SIZE;
387}
388
389static inline char mon_bin_get_setup(unsigned char *setupb,
390 const struct urb *urb, char ev_type)
391{
392
6f23ee1f
PZ
393 if (urb->setup_packet == NULL)
394 return 'Z';
6f23ee1f
PZ
395 memcpy(setupb, urb->setup_packet, SETUP_LEN);
396 return 0;
397}
398
399static char mon_bin_get_data(const struct mon_reader_bin *rp,
400 unsigned int offset, struct urb *urb, unsigned int length)
401{
402
ecb658d3
PZ
403 if (urb->dev->bus->uses_dma &&
404 (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
6f23ee1f
PZ
405 mon_dmapeek_vec(rp, offset, urb->transfer_dma, length);
406 return 0;
407 }
408
409 if (urb->transfer_buffer == NULL)
410 return 'Z';
411
412 mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
413 return 0;
414}
415
471c604d
PZ
416static void mon_bin_get_isodesc(const struct mon_reader_bin *rp,
417 unsigned int offset, struct urb *urb, char ev_type, unsigned int ndesc)
418{
419 struct mon_bin_isodesc *dp;
420 struct usb_iso_packet_descriptor *fp;
421
422 fp = urb->iso_frame_desc;
423 while (ndesc-- != 0) {
424 dp = (struct mon_bin_isodesc *)
425 (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE);
426 dp->iso_status = fp->status;
427 dp->iso_off = fp->offset;
428 dp->iso_len = (ev_type == 'S') ? fp->length : fp->actual_length;
429 dp->_pad = 0;
430 if ((offset += sizeof(struct mon_bin_isodesc)) >= rp->b_size)
431 offset = 0;
432 fp++;
433 }
434}
435
6f23ee1f 436static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
9347d51c 437 char ev_type, int status)
6f23ee1f 438{
30c7431d 439 const struct usb_endpoint_descriptor *epd = &urb->ep->desc;
6f23ee1f
PZ
440 unsigned long flags;
441 struct timeval ts;
442 unsigned int urb_length;
443 unsigned int offset;
444 unsigned int length;
471c604d 445 unsigned int ndesc, lendesc;
30c7431d 446 unsigned char dir;
6f23ee1f
PZ
447 struct mon_bin_hdr *ep;
448 char data_tag = 0;
449
450 do_gettimeofday(&ts);
451
452 spin_lock_irqsave(&rp->b_lock, flags);
453
454 /*
455 * Find the maximum allowable length, then allocate space.
456 */
471c604d
PZ
457 if (usb_endpoint_xfer_isoc(epd)) {
458 if (urb->number_of_packets < 0) {
459 ndesc = 0;
460 } else if (urb->number_of_packets >= ISODESC_MAX) {
461 ndesc = ISODESC_MAX;
462 } else {
463 ndesc = urb->number_of_packets;
464 }
465 } else {
466 ndesc = 0;
467 }
468 lendesc = ndesc*sizeof(struct mon_bin_isodesc);
469
6f23ee1f
PZ
470 urb_length = (ev_type == 'S') ?
471 urb->transfer_buffer_length : urb->actual_length;
472 length = urb_length;
473
474 if (length >= rp->b_size/5)
475 length = rp->b_size/5;
476
18ea5d00 477 if (usb_urb_dir_in(urb)) {
6f23ee1f
PZ
478 if (ev_type == 'S') {
479 length = 0;
480 data_tag = '<';
481 }
30c7431d
PZ
482 /* Cannot rely on endpoint number in case of control ep.0 */
483 dir = USB_DIR_IN;
6f23ee1f
PZ
484 } else {
485 if (ev_type == 'C') {
486 length = 0;
487 data_tag = '>';
488 }
30c7431d 489 dir = 0;
6f23ee1f
PZ
490 }
491
471c604d
PZ
492 if (rp->mmap_active) {
493 offset = mon_buff_area_alloc_contiguous(rp,
494 length + PKT_SIZE + lendesc);
495 } else {
496 offset = mon_buff_area_alloc(rp, length + PKT_SIZE + lendesc);
497 }
6f23ee1f
PZ
498 if (offset == ~0) {
499 rp->cnt_lost++;
500 spin_unlock_irqrestore(&rp->b_lock, flags);
501 return;
502 }
503
504 ep = MON_OFF2HDR(rp, offset);
505 if ((offset += PKT_SIZE) >= rp->b_size) offset = 0;
506
507 /*
508 * Fill the allocated area.
509 */
510 memset(ep, 0, PKT_SIZE);
511 ep->type = ev_type;
30c7431d
PZ
512 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)];
513 ep->epnum = dir | usb_endpoint_num(epd);
18ea5d00 514 ep->devnum = urb->dev->devnum;
ecb658d3 515 ep->busnum = urb->dev->bus->busnum;
6f23ee1f
PZ
516 ep->id = (unsigned long) urb;
517 ep->ts_sec = ts.tv_sec;
518 ep->ts_usec = ts.tv_usec;
9347d51c 519 ep->status = status;
6f23ee1f 520 ep->len_urb = urb_length;
471c604d
PZ
521 ep->len_cap = length + lendesc;
522 ep->xfer_flags = urb->transfer_flags;
523
524 if (usb_endpoint_xfer_int(epd)) {
525 ep->interval = urb->interval;
526 } else if (usb_endpoint_xfer_isoc(epd)) {
527 ep->interval = urb->interval;
528 ep->start_frame = urb->start_frame;
529 ep->s.iso.error_count = urb->error_count;
530 ep->s.iso.numdesc = urb->number_of_packets;
531 }
532
533 if (usb_endpoint_xfer_control(epd) && ev_type == 'S') {
534 ep->flag_setup = mon_bin_get_setup(ep->s.setup, urb, ev_type);
535 } else {
536 ep->flag_setup = '-';
537 }
538
539 if (ndesc != 0) {
540 ep->ndesc = ndesc;
541 mon_bin_get_isodesc(rp, offset, urb, ev_type, ndesc);
542 if ((offset += lendesc) >= rp->b_size)
543 offset -= rp->b_size;
544 }
6f23ee1f 545
6f23ee1f
PZ
546 if (length != 0) {
547 ep->flag_data = mon_bin_get_data(rp, offset, urb, length);
548 if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */
549 ep->len_cap = 0;
550 mon_buff_area_shrink(rp, length);
551 }
552 } else {
553 ep->flag_data = data_tag;
554 }
555
556 spin_unlock_irqrestore(&rp->b_lock, flags);
557
558 wake_up(&rp->b_wait);
559}
560
561static void mon_bin_submit(void *data, struct urb *urb)
562{
563 struct mon_reader_bin *rp = data;
9347d51c 564 mon_bin_event(rp, urb, 'S', -EINPROGRESS);
6f23ee1f
PZ
565}
566
9347d51c 567static void mon_bin_complete(void *data, struct urb *urb, int status)
6f23ee1f
PZ
568{
569 struct mon_reader_bin *rp = data;
9347d51c 570 mon_bin_event(rp, urb, 'C', status);
6f23ee1f
PZ
571}
572
573static void mon_bin_error(void *data, struct urb *urb, int error)
574{
575 struct mon_reader_bin *rp = data;
576 unsigned long flags;
577 unsigned int offset;
578 struct mon_bin_hdr *ep;
579
580 spin_lock_irqsave(&rp->b_lock, flags);
581
582 offset = mon_buff_area_alloc(rp, PKT_SIZE);
583 if (offset == ~0) {
584 /* Not incrementing cnt_lost. Just because. */
585 spin_unlock_irqrestore(&rp->b_lock, flags);
586 return;
587 }
588
589 ep = MON_OFF2HDR(rp, offset);
590
591 memset(ep, 0, PKT_SIZE);
592 ep->type = 'E';
30c7431d
PZ
593 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)];
594 ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0;
595 ep->epnum |= usb_endpoint_num(&urb->ep->desc);
18ea5d00 596 ep->devnum = urb->dev->devnum;
ecb658d3 597 ep->busnum = urb->dev->bus->busnum;
6f23ee1f
PZ
598 ep->id = (unsigned long) urb;
599 ep->status = error;
600
601 ep->flag_setup = '-';
602 ep->flag_data = 'E';
603
604 spin_unlock_irqrestore(&rp->b_lock, flags);
605
606 wake_up(&rp->b_wait);
607}
608
609static int mon_bin_open(struct inode *inode, struct file *file)
610{
611 struct mon_bus *mbus;
6f23ee1f
PZ
612 struct mon_reader_bin *rp;
613 size_t size;
614 int rc;
615
1af46fd7 616 lock_kernel();
6f23ee1f
PZ
617 mutex_lock(&mon_lock);
618 if ((mbus = mon_bus_lookup(iminor(inode))) == NULL) {
619 mutex_unlock(&mon_lock);
1af46fd7 620 unlock_kernel();
6f23ee1f
PZ
621 return -ENODEV;
622 }
ecb658d3 623 if (mbus != &mon_bus0 && mbus->u_bus == NULL) {
6f23ee1f
PZ
624 printk(KERN_ERR TAG ": consistency error on open\n");
625 mutex_unlock(&mon_lock);
1af46fd7 626 unlock_kernel();
6f23ee1f
PZ
627 return -ENODEV;
628 }
629
630 rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL);
631 if (rp == NULL) {
632 rc = -ENOMEM;
633 goto err_alloc;
634 }
635 spin_lock_init(&rp->b_lock);
636 init_waitqueue_head(&rp->b_wait);
637 mutex_init(&rp->fetch_lock);
638
639 rp->b_size = BUFF_DFL;
640
641 size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE);
642 if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) {
643 rc = -ENOMEM;
644 goto err_allocvec;
645 }
646
647 if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0)
648 goto err_allocbuff;
649
650 rp->r.m_bus = mbus;
651 rp->r.r_data = rp;
652 rp->r.rnf_submit = mon_bin_submit;
653 rp->r.rnf_error = mon_bin_error;
654 rp->r.rnf_complete = mon_bin_complete;
655
656 mon_reader_add(mbus, &rp->r);
657
658 file->private_data = rp;
659 mutex_unlock(&mon_lock);
1af46fd7 660 unlock_kernel();
6f23ee1f
PZ
661 return 0;
662
663err_allocbuff:
664 kfree(rp->b_vec);
665err_allocvec:
666 kfree(rp);
667err_alloc:
668 mutex_unlock(&mon_lock);
1af46fd7 669 unlock_kernel();
6f23ee1f
PZ
670 return rc;
671}
672
673/*
674 * Extract an event from buffer and copy it to user space.
675 * Wait if there is no event ready.
676 * Returns zero or error.
677 */
678static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp,
471c604d
PZ
679 struct mon_bin_hdr __user *hdr, unsigned int hdrbytes,
680 void __user *data, unsigned int nbytes)
6f23ee1f
PZ
681{
682 unsigned long flags;
683 struct mon_bin_hdr *ep;
684 size_t step_len;
685 unsigned int offset;
686 int rc;
687
688 mutex_lock(&rp->fetch_lock);
689
690 if ((rc = mon_bin_wait_event(file, rp)) < 0) {
691 mutex_unlock(&rp->fetch_lock);
692 return rc;
693 }
694
695 ep = MON_OFF2HDR(rp, rp->b_out);
696
471c604d 697 if (copy_to_user(hdr, ep, hdrbytes)) {
6f23ee1f
PZ
698 mutex_unlock(&rp->fetch_lock);
699 return -EFAULT;
700 }
701
702 step_len = min(ep->len_cap, nbytes);
703 if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0;
704
705 if (copy_from_buf(rp, offset, data, step_len)) {
706 mutex_unlock(&rp->fetch_lock);
707 return -EFAULT;
708 }
709
710 spin_lock_irqsave(&rp->b_lock, flags);
711 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
712 spin_unlock_irqrestore(&rp->b_lock, flags);
713 rp->b_read = 0;
714
715 mutex_unlock(&rp->fetch_lock);
716 return 0;
717}
718
719static int mon_bin_release(struct inode *inode, struct file *file)
720{
721 struct mon_reader_bin *rp = file->private_data;
722 struct mon_bus* mbus = rp->r.m_bus;
723
724 mutex_lock(&mon_lock);
725
726 if (mbus->nreaders <= 0) {
727 printk(KERN_ERR TAG ": consistency error on close\n");
728 mutex_unlock(&mon_lock);
729 return 0;
730 }
731 mon_reader_del(mbus, &rp->r);
732
733 mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
734 kfree(rp->b_vec);
735 kfree(rp);
736
737 mutex_unlock(&mon_lock);
738 return 0;
739}
740
741static ssize_t mon_bin_read(struct file *file, char __user *buf,
742 size_t nbytes, loff_t *ppos)
743{
744 struct mon_reader_bin *rp = file->private_data;
471c604d 745 unsigned int hdrbytes = PKT_SZ_API0;
6f23ee1f
PZ
746 unsigned long flags;
747 struct mon_bin_hdr *ep;
748 unsigned int offset;
749 size_t step_len;
750 char *ptr;
751 ssize_t done = 0;
752 int rc;
753
754 mutex_lock(&rp->fetch_lock);
755
756 if ((rc = mon_bin_wait_event(file, rp)) < 0) {
757 mutex_unlock(&rp->fetch_lock);
758 return rc;
759 }
760
761 ep = MON_OFF2HDR(rp, rp->b_out);
762
471c604d
PZ
763 if (rp->b_read < hdrbytes) {
764 step_len = min(nbytes, (size_t)(hdrbytes - rp->b_read));
6f23ee1f
PZ
765 ptr = ((char *)ep) + rp->b_read;
766 if (step_len && copy_to_user(buf, ptr, step_len)) {
767 mutex_unlock(&rp->fetch_lock);
768 return -EFAULT;
769 }
770 nbytes -= step_len;
771 buf += step_len;
772 rp->b_read += step_len;
773 done += step_len;
774 }
775
471c604d 776 if (rp->b_read >= hdrbytes) {
f1c0a2a3 777 step_len = ep->len_cap;
471c604d 778 step_len -= rp->b_read - hdrbytes;
f1c0a2a3
PZ
779 if (step_len > nbytes)
780 step_len = nbytes;
6f23ee1f 781 offset = rp->b_out + PKT_SIZE;
471c604d 782 offset += rp->b_read - hdrbytes;
6f23ee1f
PZ
783 if (offset >= rp->b_size)
784 offset -= rp->b_size;
785 if (copy_from_buf(rp, offset, buf, step_len)) {
786 mutex_unlock(&rp->fetch_lock);
787 return -EFAULT;
788 }
789 nbytes -= step_len;
790 buf += step_len;
791 rp->b_read += step_len;
792 done += step_len;
793 }
794
795 /*
796 * Check if whole packet was read, and if so, jump to the next one.
797 */
471c604d 798 if (rp->b_read >= hdrbytes + ep->len_cap) {
6f23ee1f
PZ
799 spin_lock_irqsave(&rp->b_lock, flags);
800 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
801 spin_unlock_irqrestore(&rp->b_lock, flags);
802 rp->b_read = 0;
803 }
804
805 mutex_unlock(&rp->fetch_lock);
806 return done;
807}
808
809/*
810 * Remove at most nevents from chunked buffer.
811 * Returns the number of removed events.
812 */
813static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents)
814{
815 unsigned long flags;
816 struct mon_bin_hdr *ep;
817 int i;
818
819 mutex_lock(&rp->fetch_lock);
820 spin_lock_irqsave(&rp->b_lock, flags);
821 for (i = 0; i < nevents; ++i) {
822 if (MON_RING_EMPTY(rp))
823 break;
824
825 ep = MON_OFF2HDR(rp, rp->b_out);
826 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
827 }
828 spin_unlock_irqrestore(&rp->b_lock, flags);
829 rp->b_read = 0;
830 mutex_unlock(&rp->fetch_lock);
831 return i;
832}
833
834/*
835 * Fetch at most max event offsets into the buffer and put them into vec.
836 * The events are usually freed later with mon_bin_flush.
837 * Return the effective number of events fetched.
838 */
839static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp,
840 u32 __user *vec, unsigned int max)
841{
842 unsigned int cur_out;
843 unsigned int bytes, avail;
844 unsigned int size;
845 unsigned int nevents;
846 struct mon_bin_hdr *ep;
847 unsigned long flags;
848 int rc;
849
850 mutex_lock(&rp->fetch_lock);
851
852 if ((rc = mon_bin_wait_event(file, rp)) < 0) {
853 mutex_unlock(&rp->fetch_lock);
854 return rc;
855 }
856
857 spin_lock_irqsave(&rp->b_lock, flags);
858 avail = rp->b_cnt;
859 spin_unlock_irqrestore(&rp->b_lock, flags);
860
861 cur_out = rp->b_out;
862 nevents = 0;
863 bytes = 0;
864 while (bytes < avail) {
865 if (nevents >= max)
866 break;
867
868 ep = MON_OFF2HDR(rp, cur_out);
869 if (put_user(cur_out, &vec[nevents])) {
870 mutex_unlock(&rp->fetch_lock);
871 return -EFAULT;
872 }
873
874 nevents++;
875 size = ep->len_cap + PKT_SIZE;
876 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
877 if ((cur_out += size) >= rp->b_size)
878 cur_out -= rp->b_size;
879 bytes += size;
880 }
881
882 mutex_unlock(&rp->fetch_lock);
883 return nevents;
884}
885
886/*
887 * Count events. This is almost the same as the above mon_bin_fetch,
888 * only we do not store offsets into user vector, and we have no limit.
889 */
890static int mon_bin_queued(struct mon_reader_bin *rp)
891{
892 unsigned int cur_out;
893 unsigned int bytes, avail;
894 unsigned int size;
895 unsigned int nevents;
896 struct mon_bin_hdr *ep;
897 unsigned long flags;
898
899 mutex_lock(&rp->fetch_lock);
900
901 spin_lock_irqsave(&rp->b_lock, flags);
902 avail = rp->b_cnt;
903 spin_unlock_irqrestore(&rp->b_lock, flags);
904
905 cur_out = rp->b_out;
906 nevents = 0;
907 bytes = 0;
908 while (bytes < avail) {
909 ep = MON_OFF2HDR(rp, cur_out);
910
911 nevents++;
912 size = ep->len_cap + PKT_SIZE;
913 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
914 if ((cur_out += size) >= rp->b_size)
915 cur_out -= rp->b_size;
916 bytes += size;
917 }
918
919 mutex_unlock(&rp->fetch_lock);
920 return nevents;
921}
922
923/*
924 */
925static int mon_bin_ioctl(struct inode *inode, struct file *file,
926 unsigned int cmd, unsigned long arg)
927{
928 struct mon_reader_bin *rp = file->private_data;
929 // struct mon_bus* mbus = rp->r.m_bus;
930 int ret = 0;
931 struct mon_bin_hdr *ep;
932 unsigned long flags;
933
934 switch (cmd) {
935
936 case MON_IOCQ_URB_LEN:
937 /*
938 * N.B. This only returns the size of data, without the header.
939 */
940 spin_lock_irqsave(&rp->b_lock, flags);
941 if (!MON_RING_EMPTY(rp)) {
942 ep = MON_OFF2HDR(rp, rp->b_out);
943 ret = ep->len_cap;
944 }
945 spin_unlock_irqrestore(&rp->b_lock, flags);
946 break;
947
948 case MON_IOCQ_RING_SIZE:
949 ret = rp->b_size;
950 break;
951
952 case MON_IOCT_RING_SIZE:
953 /*
954 * Changing the buffer size will flush it's contents; the new
955 * buffer is allocated before releasing the old one to be sure
956 * the device will stay functional also in case of memory
957 * pressure.
958 */
959 {
960 int size;
961 struct mon_pgmap *vec;
962
963 if (arg < BUFF_MIN || arg > BUFF_MAX)
964 return -EINVAL;
965
966 size = CHUNK_ALIGN(arg);
967 if ((vec = kzalloc(sizeof(struct mon_pgmap) * (size/CHUNK_SIZE),
968 GFP_KERNEL)) == NULL) {
969 ret = -ENOMEM;
970 break;
971 }
972
973 ret = mon_alloc_buff(vec, size/CHUNK_SIZE);
974 if (ret < 0) {
975 kfree(vec);
976 break;
977 }
978
979 mutex_lock(&rp->fetch_lock);
980 spin_lock_irqsave(&rp->b_lock, flags);
981 mon_free_buff(rp->b_vec, size/CHUNK_SIZE);
982 kfree(rp->b_vec);
983 rp->b_vec = vec;
984 rp->b_size = size;
985 rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
986 rp->cnt_lost = 0;
987 spin_unlock_irqrestore(&rp->b_lock, flags);
988 mutex_unlock(&rp->fetch_lock);
989 }
990 break;
991
992 case MON_IOCH_MFLUSH:
993 ret = mon_bin_flush(rp, arg);
994 break;
995
996 case MON_IOCX_GET:
471c604d 997 case MON_IOCX_GETX:
6f23ee1f
PZ
998 {
999 struct mon_bin_get getb;
1000
1001 if (copy_from_user(&getb, (void __user *)arg,
1002 sizeof(struct mon_bin_get)))
1003 return -EFAULT;
1004
1005 if (getb.alloc > 0x10000000) /* Want to cast to u32 */
1006 return -EINVAL;
471c604d
PZ
1007 ret = mon_bin_get_event(file, rp, getb.hdr,
1008 (cmd == MON_IOCX_GET)? PKT_SZ_API0: PKT_SZ_API1,
1009 getb.data, (unsigned int)getb.alloc);
6f23ee1f
PZ
1010 }
1011 break;
1012
6f23ee1f
PZ
1013 case MON_IOCX_MFETCH:
1014 {
1015 struct mon_bin_mfetch mfetch;
1016 struct mon_bin_mfetch __user *uptr;
1017
1018 uptr = (struct mon_bin_mfetch __user *)arg;
1019
1020 if (copy_from_user(&mfetch, uptr, sizeof(mfetch)))
1021 return -EFAULT;
1022
1023 if (mfetch.nflush) {
1024 ret = mon_bin_flush(rp, mfetch.nflush);
1025 if (ret < 0)
1026 return ret;
1027 if (put_user(ret, &uptr->nflush))
1028 return -EFAULT;
1029 }
1030 ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch);
1031 if (ret < 0)
1032 return ret;
1033 if (put_user(ret, &uptr->nfetch))
1034 return -EFAULT;
1035 ret = 0;
1036 }
1037 break;
1038
7abce6be
PZ
1039 case MON_IOCG_STATS: {
1040 struct mon_bin_stats __user *sp;
1041 unsigned int nevents;
1042 unsigned int ndropped;
1043
1044 spin_lock_irqsave(&rp->b_lock, flags);
1045 ndropped = rp->cnt_lost;
1046 rp->cnt_lost = 0;
1047 spin_unlock_irqrestore(&rp->b_lock, flags);
1048 nevents = mon_bin_queued(rp);
1049
1050 sp = (struct mon_bin_stats __user *)arg;
1051 if (put_user(rp->cnt_lost, &sp->dropped))
1052 return -EFAULT;
1053 if (put_user(nevents, &sp->queued))
1054 return -EFAULT;
1055
1056 }
1057 break;
1058
1059 default:
1060 return -ENOTTY;
1061 }
1062
1063 return ret;
1064}
1065
6f23ee1f 1066#ifdef CONFIG_COMPAT
7abce6be
PZ
1067static long mon_bin_compat_ioctl(struct file *file,
1068 unsigned int cmd, unsigned long arg)
1069{
1070 struct mon_reader_bin *rp = file->private_data;
1071 int ret;
1072
1073 switch (cmd) {
1074
471c604d
PZ
1075 case MON_IOCX_GET32:
1076 case MON_IOCX_GETX32:
1077 {
7abce6be
PZ
1078 struct mon_bin_get32 getb;
1079
1080 if (copy_from_user(&getb, (void __user *)arg,
1081 sizeof(struct mon_bin_get32)))
1082 return -EFAULT;
1083
471c604d
PZ
1084 ret = mon_bin_get_event(file, rp, compat_ptr(getb.hdr32),
1085 (cmd == MON_IOCX_GET32)? PKT_SZ_API0: PKT_SZ_API1,
1086 compat_ptr(getb.data32), getb.alloc32);
7abce6be
PZ
1087 if (ret < 0)
1088 return ret;
1089 }
1090 return 0;
1091
6f23ee1f
PZ
1092 case MON_IOCX_MFETCH32:
1093 {
1094 struct mon_bin_mfetch32 mfetch;
1095 struct mon_bin_mfetch32 __user *uptr;
1096
1097 uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg);
1098
1099 if (copy_from_user(&mfetch, uptr, sizeof(mfetch)))
1100 return -EFAULT;
1101
1102 if (mfetch.nflush32) {
1103 ret = mon_bin_flush(rp, mfetch.nflush32);
1104 if (ret < 0)
1105 return ret;
1106 if (put_user(ret, &uptr->nflush32))
1107 return -EFAULT;
1108 }
1109 ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32),
1110 mfetch.nfetch32);
1111 if (ret < 0)
1112 return ret;
1113 if (put_user(ret, &uptr->nfetch32))
1114 return -EFAULT;
6f23ee1f 1115 }
7abce6be 1116 return 0;
6f23ee1f 1117
7abce6be
PZ
1118 case MON_IOCG_STATS:
1119 return mon_bin_ioctl(NULL, file, cmd,
1120 (unsigned long) compat_ptr(arg));
6f23ee1f 1121
7abce6be
PZ
1122 case MON_IOCQ_URB_LEN:
1123 case MON_IOCQ_RING_SIZE:
1124 case MON_IOCT_RING_SIZE:
1125 case MON_IOCH_MFLUSH:
1126 return mon_bin_ioctl(NULL, file, cmd, arg);
6f23ee1f
PZ
1127
1128 default:
7abce6be 1129 ;
6f23ee1f 1130 }
7abce6be 1131 return -ENOTTY;
6f23ee1f 1132}
7abce6be 1133#endif /* CONFIG_COMPAT */
6f23ee1f
PZ
1134
1135static unsigned int
1136mon_bin_poll(struct file *file, struct poll_table_struct *wait)
1137{
1138 struct mon_reader_bin *rp = file->private_data;
1139 unsigned int mask = 0;
1140 unsigned long flags;
1141
1142 if (file->f_mode & FMODE_READ)
1143 poll_wait(file, &rp->b_wait, wait);
1144
1145 spin_lock_irqsave(&rp->b_lock, flags);
1146 if (!MON_RING_EMPTY(rp))
1147 mask |= POLLIN | POLLRDNORM; /* readable */
1148 spin_unlock_irqrestore(&rp->b_lock, flags);
1149 return mask;
1150}
1151
1152/*
1153 * open and close: just keep track of how many times the device is
1154 * mapped, to use the proper memory allocation function.
1155 */
1156static void mon_bin_vma_open(struct vm_area_struct *vma)
1157{
1158 struct mon_reader_bin *rp = vma->vm_private_data;
1159 rp->mmap_active++;
1160}
1161
1162static void mon_bin_vma_close(struct vm_area_struct *vma)
1163{
1164 struct mon_reader_bin *rp = vma->vm_private_data;
1165 rp->mmap_active--;
1166}
1167
1168/*
1169 * Map ring pages to user space.
1170 */
041509db 1171static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6f23ee1f
PZ
1172{
1173 struct mon_reader_bin *rp = vma->vm_private_data;
1174 unsigned long offset, chunk_idx;
1175 struct page *pageptr;
1176
041509db 1177 offset = vmf->pgoff << PAGE_SHIFT;
6f23ee1f 1178 if (offset >= rp->b_size)
041509db 1179 return VM_FAULT_SIGBUS;
6f23ee1f
PZ
1180 chunk_idx = offset / CHUNK_SIZE;
1181 pageptr = rp->b_vec[chunk_idx].pg;
1182 get_page(pageptr);
041509db
NP
1183 vmf->page = pageptr;
1184 return 0;
6f23ee1f
PZ
1185}
1186
454459b0 1187static struct vm_operations_struct mon_bin_vm_ops = {
6f23ee1f
PZ
1188 .open = mon_bin_vma_open,
1189 .close = mon_bin_vma_close,
041509db 1190 .fault = mon_bin_vma_fault,
6f23ee1f
PZ
1191};
1192
454459b0 1193static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
6f23ee1f 1194{
041509db 1195 /* don't do anything here: "fault" will set up page table entries */
6f23ee1f
PZ
1196 vma->vm_ops = &mon_bin_vm_ops;
1197 vma->vm_flags |= VM_RESERVED;
1198 vma->vm_private_data = filp->private_data;
1199 mon_bin_vma_open(vma);
1200 return 0;
1201}
1202
0b3f5fe6 1203static const struct file_operations mon_fops_binary = {
6f23ee1f
PZ
1204 .owner = THIS_MODULE,
1205 .open = mon_bin_open,
1206 .llseek = no_llseek,
1207 .read = mon_bin_read,
1208 /* .write = mon_text_write, */
1209 .poll = mon_bin_poll,
1210 .ioctl = mon_bin_ioctl,
7abce6be
PZ
1211#ifdef CONFIG_COMPAT
1212 .compat_ioctl = mon_bin_compat_ioctl,
1213#endif
6f23ee1f 1214 .release = mon_bin_release,
454459b0 1215 .mmap = mon_bin_mmap,
6f23ee1f
PZ
1216};
1217
1218static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp)
1219{
1220 DECLARE_WAITQUEUE(waita, current);
1221 unsigned long flags;
1222
1223 add_wait_queue(&rp->b_wait, &waita);
1224 set_current_state(TASK_INTERRUPTIBLE);
1225
1226 spin_lock_irqsave(&rp->b_lock, flags);
1227 while (MON_RING_EMPTY(rp)) {
1228 spin_unlock_irqrestore(&rp->b_lock, flags);
1229
1230 if (file->f_flags & O_NONBLOCK) {
1231 set_current_state(TASK_RUNNING);
1232 remove_wait_queue(&rp->b_wait, &waita);
1233 return -EWOULDBLOCK; /* Same as EAGAIN in Linux */
1234 }
1235 schedule();
1236 if (signal_pending(current)) {
1237 remove_wait_queue(&rp->b_wait, &waita);
1238 return -EINTR;
1239 }
1240 set_current_state(TASK_INTERRUPTIBLE);
1241
1242 spin_lock_irqsave(&rp->b_lock, flags);
1243 }
1244 spin_unlock_irqrestore(&rp->b_lock, flags);
1245
1246 set_current_state(TASK_RUNNING);
1247 remove_wait_queue(&rp->b_wait, &waita);
1248 return 0;
1249}
1250
1251static int mon_alloc_buff(struct mon_pgmap *map, int npages)
1252{
1253 int n;
1254 unsigned long vaddr;
1255
1256 for (n = 0; n < npages; n++) {
1257 vaddr = get_zeroed_page(GFP_KERNEL);
1258 if (vaddr == 0) {
1259 while (n-- != 0)
1260 free_page((unsigned long) map[n].ptr);
1261 return -ENOMEM;
1262 }
1263 map[n].ptr = (unsigned char *) vaddr;
1264 map[n].pg = virt_to_page(vaddr);
1265 }
1266 return 0;
1267}
1268
1269static void mon_free_buff(struct mon_pgmap *map, int npages)
1270{
1271 int n;
1272
1273 for (n = 0; n < npages; n++)
1274 free_page((unsigned long) map[n].ptr);
1275}
1276
ce7cd137
PZ
1277int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus)
1278{
1279 struct device *dev;
1280 unsigned minor = ubus? ubus->busnum: 0;
1281
1282 if (minor >= MON_BIN_MAX_MINOR)
1283 return 0;
1284
b0b090e5
GKH
1285 dev = device_create(mon_bin_class, ubus ? ubus->controller : NULL,
1286 MKDEV(MAJOR(mon_bin_dev0), minor), NULL,
1287 "usbmon%d", minor);
ce7cd137
PZ
1288 if (IS_ERR(dev))
1289 return 0;
1290
1291 mbus->classdev = dev;
1292 return 1;
1293}
1294
1295void mon_bin_del(struct mon_bus *mbus)
1296{
1297 device_destroy(mon_bin_class, mbus->classdev->devt);
1298}
1299
6f23ee1f
PZ
1300int __init mon_bin_init(void)
1301{
1302 int rc;
1303
ce7cd137
PZ
1304 mon_bin_class = class_create(THIS_MODULE, "usbmon");
1305 if (IS_ERR(mon_bin_class)) {
1306 rc = PTR_ERR(mon_bin_class);
1307 goto err_class;
1308 }
1309
6f23ee1f
PZ
1310 rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon");
1311 if (rc < 0)
1312 goto err_dev;
1313
1314 cdev_init(&mon_bin_cdev, &mon_fops_binary);
1315 mon_bin_cdev.owner = THIS_MODULE;
1316
1317 rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR);
1318 if (rc < 0)
1319 goto err_add;
1320
1321 return 0;
1322
1323err_add:
1324 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
1325err_dev:
ce7cd137
PZ
1326 class_destroy(mon_bin_class);
1327err_class:
6f23ee1f
PZ
1328 return rc;
1329}
1330
21641e3f 1331void mon_bin_exit(void)
6f23ee1f
PZ
1332{
1333 cdev_del(&mon_bin_cdev);
1334 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
ce7cd137 1335 class_destroy(mon_bin_class);
6f23ee1f 1336}