Commit | Line | Data |
---|---|---|
6f23ee1f PZ |
1 | /* |
2 | * The USB Monitor, inspired by Dave Harding's USBMon. | |
3 | * | |
4 | * This is a binary format reader. | |
5 | * | |
6 | * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it) | |
7 | * Copyright (C) 2006 Pete Zaitcev (zaitcev@redhat.com) | |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/types.h> | |
12 | #include <linux/fs.h> | |
13 | #include <linux/cdev.h> | |
14 | #include <linux/usb.h> | |
15 | #include <linux/poll.h> | |
16 | #include <linux/compat.h> | |
17 | #include <linux/mm.h> | |
18 | ||
19 | #include <asm/uaccess.h> | |
20 | ||
21 | #include "usb_mon.h" | |
22 | ||
23 | /* | |
24 | * Defined by USB 2.0 clause 9.3, table 9.2. | |
25 | */ | |
26 | #define SETUP_LEN 8 | |
27 | ||
28 | /* ioctl macros */ | |
29 | #define MON_IOC_MAGIC 0x92 | |
30 | ||
31 | #define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1) | |
32 | /* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */ | |
33 | #define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats) | |
34 | #define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4) | |
35 | #define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5) | |
36 | #define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get) | |
37 | #define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch) | |
38 | #define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8) | |
39 | #ifdef CONFIG_COMPAT | |
40 | #define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32) | |
41 | #define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32) | |
42 | #endif | |
43 | ||
44 | /* | |
45 | * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc). | |
46 | * But it's all right. Just use a simple way to make sure the chunk is never | |
47 | * smaller than a page. | |
48 | * | |
49 | * N.B. An application does not know our chunk size. | |
50 | * | |
51 | * Woops, get_zeroed_page() returns a single page. I guess we're stuck with | |
52 | * page-sized chunks for the time being. | |
53 | */ | |
54 | #define CHUNK_SIZE PAGE_SIZE | |
55 | #define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1)) | |
56 | ||
57 | /* | |
58 | * The magic limit was calculated so that it allows the monitoring | |
59 | * application to pick data once in two ticks. This way, another application, | |
60 | * which presumably drives the bus, gets to hog CPU, yet we collect our data. | |
61 | * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an | |
62 | * enormous overhead built into the bus protocol, so we need about 1000 KB. | |
63 | * | |
64 | * This is still too much for most cases, where we just snoop a few | |
65 | * descriptor fetches for enumeration. So, the default is a "reasonable" | |
66 | * amount for systems with HZ=250 and incomplete bus saturation. | |
67 | * | |
68 | * XXX What about multi-megabyte URBs which take minutes to transfer? | |
69 | */ | |
70 | #define BUFF_MAX CHUNK_ALIGN(1200*1024) | |
71 | #define BUFF_DFL CHUNK_ALIGN(300*1024) | |
72 | #define BUFF_MIN CHUNK_ALIGN(8*1024) | |
73 | ||
74 | /* | |
75 | * The per-event API header (2 per URB). | |
76 | * | |
77 | * This structure is seen in userland as defined by the documentation. | |
78 | */ | |
79 | struct mon_bin_hdr { | |
80 | u64 id; /* URB ID - from submission to callback */ | |
81 | unsigned char type; /* Same as in text API; extensible. */ | |
82 | unsigned char xfer_type; /* ISO, Intr, Control, Bulk */ | |
83 | unsigned char epnum; /* Endpoint number and transfer direction */ | |
84 | unsigned char devnum; /* Device address */ | |
85 | unsigned short busnum; /* Bus number */ | |
86 | char flag_setup; | |
87 | char flag_data; | |
88 | s64 ts_sec; /* gettimeofday */ | |
89 | s32 ts_usec; /* gettimeofday */ | |
90 | int status; | |
91 | unsigned int len_urb; /* Length of data (submitted or actual) */ | |
92 | unsigned int len_cap; /* Delivered length */ | |
93 | unsigned char setup[SETUP_LEN]; /* Only for Control S-type */ | |
94 | }; | |
95 | ||
96 | /* per file statistic */ | |
97 | struct mon_bin_stats { | |
98 | u32 queued; | |
99 | u32 dropped; | |
100 | }; | |
101 | ||
102 | struct mon_bin_get { | |
103 | struct mon_bin_hdr __user *hdr; /* Only 48 bytes, not 64. */ | |
104 | void __user *data; | |
105 | size_t alloc; /* Length of data (can be zero) */ | |
106 | }; | |
107 | ||
108 | struct mon_bin_mfetch { | |
109 | u32 __user *offvec; /* Vector of events fetched */ | |
110 | u32 nfetch; /* Number of events to fetch (out: fetched) */ | |
111 | u32 nflush; /* Number of events to flush */ | |
112 | }; | |
113 | ||
114 | #ifdef CONFIG_COMPAT | |
115 | struct mon_bin_get32 { | |
116 | u32 hdr32; | |
117 | u32 data32; | |
118 | u32 alloc32; | |
119 | }; | |
120 | ||
121 | struct mon_bin_mfetch32 { | |
122 | u32 offvec32; | |
123 | u32 nfetch32; | |
124 | u32 nflush32; | |
125 | }; | |
126 | #endif | |
127 | ||
128 | /* Having these two values same prevents wrapping of the mon_bin_hdr */ | |
129 | #define PKT_ALIGN 64 | |
130 | #define PKT_SIZE 64 | |
131 | ||
132 | /* max number of USB bus supported */ | |
133 | #define MON_BIN_MAX_MINOR 128 | |
134 | ||
135 | /* | |
136 | * The buffer: map of used pages. | |
137 | */ | |
138 | struct mon_pgmap { | |
139 | struct page *pg; | |
140 | unsigned char *ptr; /* XXX just use page_to_virt everywhere? */ | |
141 | }; | |
142 | ||
143 | /* | |
144 | * This gets associated with an open file struct. | |
145 | */ | |
146 | struct mon_reader_bin { | |
147 | /* The buffer: one per open. */ | |
148 | spinlock_t b_lock; /* Protect b_cnt, b_in */ | |
149 | unsigned int b_size; /* Current size of the buffer - bytes */ | |
150 | unsigned int b_cnt; /* Bytes used */ | |
151 | unsigned int b_in, b_out; /* Offsets into buffer - bytes */ | |
152 | unsigned int b_read; /* Amount of read data in curr. pkt. */ | |
153 | struct mon_pgmap *b_vec; /* The map array */ | |
154 | wait_queue_head_t b_wait; /* Wait for data here */ | |
155 | ||
156 | struct mutex fetch_lock; /* Protect b_read, b_out */ | |
157 | int mmap_active; | |
158 | ||
159 | /* A list of these is needed for "bus 0". Some time later. */ | |
160 | struct mon_reader r; | |
161 | ||
162 | /* Stats */ | |
163 | unsigned int cnt_lost; | |
164 | }; | |
165 | ||
166 | static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp, | |
167 | unsigned int offset) | |
168 | { | |
169 | return (struct mon_bin_hdr *) | |
170 | (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); | |
171 | } | |
172 | ||
173 | #define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0) | |
174 | ||
175 | static dev_t mon_bin_dev0; | |
176 | static struct cdev mon_bin_cdev; | |
177 | ||
178 | static void mon_buff_area_fill(const struct mon_reader_bin *rp, | |
179 | unsigned int offset, unsigned int size); | |
180 | static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp); | |
181 | static int mon_alloc_buff(struct mon_pgmap *map, int npages); | |
182 | static void mon_free_buff(struct mon_pgmap *map, int npages); | |
183 | ||
184 | /* | |
185 | * This is a "chunked memcpy". It does not manipulate any counters. | |
186 | * But it returns the new offset for repeated application. | |
187 | */ | |
188 | unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, | |
189 | unsigned int off, const unsigned char *from, unsigned int length) | |
190 | { | |
191 | unsigned int step_len; | |
192 | unsigned char *buf; | |
193 | unsigned int in_page; | |
194 | ||
195 | while (length) { | |
196 | /* | |
197 | * Determine step_len. | |
198 | */ | |
199 | step_len = length; | |
200 | in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); | |
201 | if (in_page < step_len) | |
202 | step_len = in_page; | |
203 | ||
204 | /* | |
205 | * Copy data and advance pointers. | |
206 | */ | |
207 | buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; | |
208 | memcpy(buf, from, step_len); | |
209 | if ((off += step_len) >= this->b_size) off = 0; | |
210 | from += step_len; | |
211 | length -= step_len; | |
212 | } | |
213 | return off; | |
214 | } | |
215 | ||
216 | /* | |
217 | * This is a little worse than the above because it's "chunked copy_to_user". | |
218 | * The return value is an error code, not an offset. | |
219 | */ | |
220 | static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off, | |
221 | char __user *to, int length) | |
222 | { | |
223 | unsigned int step_len; | |
224 | unsigned char *buf; | |
225 | unsigned int in_page; | |
226 | ||
227 | while (length) { | |
228 | /* | |
229 | * Determine step_len. | |
230 | */ | |
231 | step_len = length; | |
232 | in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); | |
233 | if (in_page < step_len) | |
234 | step_len = in_page; | |
235 | ||
236 | /* | |
237 | * Copy data and advance pointers. | |
238 | */ | |
239 | buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; | |
240 | if (copy_to_user(to, buf, step_len)) | |
241 | return -EINVAL; | |
242 | if ((off += step_len) >= this->b_size) off = 0; | |
243 | to += step_len; | |
244 | length -= step_len; | |
245 | } | |
246 | return 0; | |
247 | } | |
248 | ||
249 | /* | |
250 | * Allocate an (aligned) area in the buffer. | |
251 | * This is called under b_lock. | |
252 | * Returns ~0 on failure. | |
253 | */ | |
254 | static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp, | |
255 | unsigned int size) | |
256 | { | |
257 | unsigned int offset; | |
258 | ||
259 | size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); | |
260 | if (rp->b_cnt + size > rp->b_size) | |
261 | return ~0; | |
262 | offset = rp->b_in; | |
263 | rp->b_cnt += size; | |
264 | if ((rp->b_in += size) >= rp->b_size) | |
265 | rp->b_in -= rp->b_size; | |
266 | return offset; | |
267 | } | |
268 | ||
269 | /* | |
270 | * This is the same thing as mon_buff_area_alloc, only it does not allow | |
271 | * buffers to wrap. This is needed by applications which pass references | |
272 | * into mmap-ed buffers up their stacks (libpcap can do that). | |
273 | * | |
274 | * Currently, we always have the header stuck with the data, although | |
275 | * it is not strictly speaking necessary. | |
276 | * | |
277 | * When a buffer would wrap, we place a filler packet to mark the space. | |
278 | */ | |
279 | static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp, | |
280 | unsigned int size) | |
281 | { | |
282 | unsigned int offset; | |
283 | unsigned int fill_size; | |
284 | ||
285 | size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); | |
286 | if (rp->b_cnt + size > rp->b_size) | |
287 | return ~0; | |
288 | if (rp->b_in + size > rp->b_size) { | |
289 | /* | |
290 | * This would wrap. Find if we still have space after | |
291 | * skipping to the end of the buffer. If we do, place | |
292 | * a filler packet and allocate a new packet. | |
293 | */ | |
294 | fill_size = rp->b_size - rp->b_in; | |
295 | if (rp->b_cnt + size + fill_size > rp->b_size) | |
296 | return ~0; | |
297 | mon_buff_area_fill(rp, rp->b_in, fill_size); | |
298 | ||
299 | offset = 0; | |
300 | rp->b_in = size; | |
301 | rp->b_cnt += size + fill_size; | |
302 | } else if (rp->b_in + size == rp->b_size) { | |
303 | offset = rp->b_in; | |
304 | rp->b_in = 0; | |
305 | rp->b_cnt += size; | |
306 | } else { | |
307 | offset = rp->b_in; | |
308 | rp->b_in += size; | |
309 | rp->b_cnt += size; | |
310 | } | |
311 | return offset; | |
312 | } | |
313 | ||
314 | /* | |
315 | * Return a few (kilo-)bytes to the head of the buffer. | |
316 | * This is used if a DMA fetch fails. | |
317 | */ | |
318 | static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size) | |
319 | { | |
320 | ||
321 | size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); | |
322 | rp->b_cnt -= size; | |
323 | if (rp->b_in < size) | |
324 | rp->b_in += rp->b_size; | |
325 | rp->b_in -= size; | |
326 | } | |
327 | ||
328 | /* | |
329 | * This has to be called under both b_lock and fetch_lock, because | |
330 | * it accesses both b_cnt and b_out. | |
331 | */ | |
332 | static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size) | |
333 | { | |
334 | ||
335 | size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); | |
336 | rp->b_cnt -= size; | |
337 | if ((rp->b_out += size) >= rp->b_size) | |
338 | rp->b_out -= rp->b_size; | |
339 | } | |
340 | ||
341 | static void mon_buff_area_fill(const struct mon_reader_bin *rp, | |
342 | unsigned int offset, unsigned int size) | |
343 | { | |
344 | struct mon_bin_hdr *ep; | |
345 | ||
346 | ep = MON_OFF2HDR(rp, offset); | |
347 | memset(ep, 0, PKT_SIZE); | |
348 | ep->type = '@'; | |
349 | ep->len_cap = size - PKT_SIZE; | |
350 | } | |
351 | ||
352 | static inline char mon_bin_get_setup(unsigned char *setupb, | |
353 | const struct urb *urb, char ev_type) | |
354 | { | |
355 | ||
356 | if (!usb_pipecontrol(urb->pipe) || ev_type != 'S') | |
357 | return '-'; | |
358 | ||
ecb658d3 PZ |
359 | if (urb->dev->bus->uses_dma && |
360 | (urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) { | |
6f23ee1f | 361 | return mon_dmapeek(setupb, urb->setup_dma, SETUP_LEN); |
ecb658d3 | 362 | } |
6f23ee1f PZ |
363 | if (urb->setup_packet == NULL) |
364 | return 'Z'; | |
365 | ||
366 | memcpy(setupb, urb->setup_packet, SETUP_LEN); | |
367 | return 0; | |
368 | } | |
369 | ||
370 | static char mon_bin_get_data(const struct mon_reader_bin *rp, | |
371 | unsigned int offset, struct urb *urb, unsigned int length) | |
372 | { | |
373 | ||
ecb658d3 PZ |
374 | if (urb->dev->bus->uses_dma && |
375 | (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { | |
6f23ee1f PZ |
376 | mon_dmapeek_vec(rp, offset, urb->transfer_dma, length); |
377 | return 0; | |
378 | } | |
379 | ||
380 | if (urb->transfer_buffer == NULL) | |
381 | return 'Z'; | |
382 | ||
383 | mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); | |
384 | return 0; | |
385 | } | |
386 | ||
387 | static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, | |
388 | char ev_type) | |
389 | { | |
390 | unsigned long flags; | |
391 | struct timeval ts; | |
392 | unsigned int urb_length; | |
393 | unsigned int offset; | |
394 | unsigned int length; | |
395 | struct mon_bin_hdr *ep; | |
396 | char data_tag = 0; | |
397 | ||
398 | do_gettimeofday(&ts); | |
399 | ||
400 | spin_lock_irqsave(&rp->b_lock, flags); | |
401 | ||
402 | /* | |
403 | * Find the maximum allowable length, then allocate space. | |
404 | */ | |
405 | urb_length = (ev_type == 'S') ? | |
406 | urb->transfer_buffer_length : urb->actual_length; | |
407 | length = urb_length; | |
408 | ||
409 | if (length >= rp->b_size/5) | |
410 | length = rp->b_size/5; | |
411 | ||
412 | if (usb_pipein(urb->pipe)) { | |
413 | if (ev_type == 'S') { | |
414 | length = 0; | |
415 | data_tag = '<'; | |
416 | } | |
417 | } else { | |
418 | if (ev_type == 'C') { | |
419 | length = 0; | |
420 | data_tag = '>'; | |
421 | } | |
422 | } | |
423 | ||
424 | if (rp->mmap_active) | |
425 | offset = mon_buff_area_alloc_contiguous(rp, length + PKT_SIZE); | |
426 | else | |
427 | offset = mon_buff_area_alloc(rp, length + PKT_SIZE); | |
428 | if (offset == ~0) { | |
429 | rp->cnt_lost++; | |
430 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
431 | return; | |
432 | } | |
433 | ||
434 | ep = MON_OFF2HDR(rp, offset); | |
435 | if ((offset += PKT_SIZE) >= rp->b_size) offset = 0; | |
436 | ||
437 | /* | |
438 | * Fill the allocated area. | |
439 | */ | |
440 | memset(ep, 0, PKT_SIZE); | |
441 | ep->type = ev_type; | |
442 | ep->xfer_type = usb_pipetype(urb->pipe); | |
443 | /* We use the fact that usb_pipein() returns 0x80 */ | |
444 | ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe); | |
445 | ep->devnum = usb_pipedevice(urb->pipe); | |
ecb658d3 | 446 | ep->busnum = urb->dev->bus->busnum; |
6f23ee1f PZ |
447 | ep->id = (unsigned long) urb; |
448 | ep->ts_sec = ts.tv_sec; | |
449 | ep->ts_usec = ts.tv_usec; | |
450 | ep->status = urb->status; | |
451 | ep->len_urb = urb_length; | |
452 | ep->len_cap = length; | |
453 | ||
454 | ep->flag_setup = mon_bin_get_setup(ep->setup, urb, ev_type); | |
455 | if (length != 0) { | |
456 | ep->flag_data = mon_bin_get_data(rp, offset, urb, length); | |
457 | if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */ | |
458 | ep->len_cap = 0; | |
459 | mon_buff_area_shrink(rp, length); | |
460 | } | |
461 | } else { | |
462 | ep->flag_data = data_tag; | |
463 | } | |
464 | ||
465 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
466 | ||
467 | wake_up(&rp->b_wait); | |
468 | } | |
469 | ||
470 | static void mon_bin_submit(void *data, struct urb *urb) | |
471 | { | |
472 | struct mon_reader_bin *rp = data; | |
473 | mon_bin_event(rp, urb, 'S'); | |
474 | } | |
475 | ||
476 | static void mon_bin_complete(void *data, struct urb *urb) | |
477 | { | |
478 | struct mon_reader_bin *rp = data; | |
479 | mon_bin_event(rp, urb, 'C'); | |
480 | } | |
481 | ||
482 | static void mon_bin_error(void *data, struct urb *urb, int error) | |
483 | { | |
484 | struct mon_reader_bin *rp = data; | |
485 | unsigned long flags; | |
486 | unsigned int offset; | |
487 | struct mon_bin_hdr *ep; | |
488 | ||
489 | spin_lock_irqsave(&rp->b_lock, flags); | |
490 | ||
491 | offset = mon_buff_area_alloc(rp, PKT_SIZE); | |
492 | if (offset == ~0) { | |
493 | /* Not incrementing cnt_lost. Just because. */ | |
494 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
495 | return; | |
496 | } | |
497 | ||
498 | ep = MON_OFF2HDR(rp, offset); | |
499 | ||
500 | memset(ep, 0, PKT_SIZE); | |
501 | ep->type = 'E'; | |
502 | ep->xfer_type = usb_pipetype(urb->pipe); | |
503 | /* We use the fact that usb_pipein() returns 0x80 */ | |
504 | ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe); | |
505 | ep->devnum = usb_pipedevice(urb->pipe); | |
ecb658d3 | 506 | ep->busnum = urb->dev->bus->busnum; |
6f23ee1f PZ |
507 | ep->id = (unsigned long) urb; |
508 | ep->status = error; | |
509 | ||
510 | ep->flag_setup = '-'; | |
511 | ep->flag_data = 'E'; | |
512 | ||
513 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
514 | ||
515 | wake_up(&rp->b_wait); | |
516 | } | |
517 | ||
518 | static int mon_bin_open(struct inode *inode, struct file *file) | |
519 | { | |
520 | struct mon_bus *mbus; | |
6f23ee1f PZ |
521 | struct mon_reader_bin *rp; |
522 | size_t size; | |
523 | int rc; | |
524 | ||
525 | mutex_lock(&mon_lock); | |
526 | if ((mbus = mon_bus_lookup(iminor(inode))) == NULL) { | |
527 | mutex_unlock(&mon_lock); | |
528 | return -ENODEV; | |
529 | } | |
ecb658d3 | 530 | if (mbus != &mon_bus0 && mbus->u_bus == NULL) { |
6f23ee1f PZ |
531 | printk(KERN_ERR TAG ": consistency error on open\n"); |
532 | mutex_unlock(&mon_lock); | |
533 | return -ENODEV; | |
534 | } | |
535 | ||
536 | rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL); | |
537 | if (rp == NULL) { | |
538 | rc = -ENOMEM; | |
539 | goto err_alloc; | |
540 | } | |
541 | spin_lock_init(&rp->b_lock); | |
542 | init_waitqueue_head(&rp->b_wait); | |
543 | mutex_init(&rp->fetch_lock); | |
544 | ||
545 | rp->b_size = BUFF_DFL; | |
546 | ||
547 | size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE); | |
548 | if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) { | |
549 | rc = -ENOMEM; | |
550 | goto err_allocvec; | |
551 | } | |
552 | ||
553 | if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0) | |
554 | goto err_allocbuff; | |
555 | ||
556 | rp->r.m_bus = mbus; | |
557 | rp->r.r_data = rp; | |
558 | rp->r.rnf_submit = mon_bin_submit; | |
559 | rp->r.rnf_error = mon_bin_error; | |
560 | rp->r.rnf_complete = mon_bin_complete; | |
561 | ||
562 | mon_reader_add(mbus, &rp->r); | |
563 | ||
564 | file->private_data = rp; | |
565 | mutex_unlock(&mon_lock); | |
566 | return 0; | |
567 | ||
568 | err_allocbuff: | |
569 | kfree(rp->b_vec); | |
570 | err_allocvec: | |
571 | kfree(rp); | |
572 | err_alloc: | |
573 | mutex_unlock(&mon_lock); | |
574 | return rc; | |
575 | } | |
576 | ||
577 | /* | |
578 | * Extract an event from buffer and copy it to user space. | |
579 | * Wait if there is no event ready. | |
580 | * Returns zero or error. | |
581 | */ | |
582 | static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp, | |
583 | struct mon_bin_hdr __user *hdr, void __user *data, unsigned int nbytes) | |
584 | { | |
585 | unsigned long flags; | |
586 | struct mon_bin_hdr *ep; | |
587 | size_t step_len; | |
588 | unsigned int offset; | |
589 | int rc; | |
590 | ||
591 | mutex_lock(&rp->fetch_lock); | |
592 | ||
593 | if ((rc = mon_bin_wait_event(file, rp)) < 0) { | |
594 | mutex_unlock(&rp->fetch_lock); | |
595 | return rc; | |
596 | } | |
597 | ||
598 | ep = MON_OFF2HDR(rp, rp->b_out); | |
599 | ||
600 | if (copy_to_user(hdr, ep, sizeof(struct mon_bin_hdr))) { | |
601 | mutex_unlock(&rp->fetch_lock); | |
602 | return -EFAULT; | |
603 | } | |
604 | ||
605 | step_len = min(ep->len_cap, nbytes); | |
606 | if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0; | |
607 | ||
608 | if (copy_from_buf(rp, offset, data, step_len)) { | |
609 | mutex_unlock(&rp->fetch_lock); | |
610 | return -EFAULT; | |
611 | } | |
612 | ||
613 | spin_lock_irqsave(&rp->b_lock, flags); | |
614 | mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); | |
615 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
616 | rp->b_read = 0; | |
617 | ||
618 | mutex_unlock(&rp->fetch_lock); | |
619 | return 0; | |
620 | } | |
621 | ||
622 | static int mon_bin_release(struct inode *inode, struct file *file) | |
623 | { | |
624 | struct mon_reader_bin *rp = file->private_data; | |
625 | struct mon_bus* mbus = rp->r.m_bus; | |
626 | ||
627 | mutex_lock(&mon_lock); | |
628 | ||
629 | if (mbus->nreaders <= 0) { | |
630 | printk(KERN_ERR TAG ": consistency error on close\n"); | |
631 | mutex_unlock(&mon_lock); | |
632 | return 0; | |
633 | } | |
634 | mon_reader_del(mbus, &rp->r); | |
635 | ||
636 | mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); | |
637 | kfree(rp->b_vec); | |
638 | kfree(rp); | |
639 | ||
640 | mutex_unlock(&mon_lock); | |
641 | return 0; | |
642 | } | |
643 | ||
644 | static ssize_t mon_bin_read(struct file *file, char __user *buf, | |
645 | size_t nbytes, loff_t *ppos) | |
646 | { | |
647 | struct mon_reader_bin *rp = file->private_data; | |
648 | unsigned long flags; | |
649 | struct mon_bin_hdr *ep; | |
650 | unsigned int offset; | |
651 | size_t step_len; | |
652 | char *ptr; | |
653 | ssize_t done = 0; | |
654 | int rc; | |
655 | ||
656 | mutex_lock(&rp->fetch_lock); | |
657 | ||
658 | if ((rc = mon_bin_wait_event(file, rp)) < 0) { | |
659 | mutex_unlock(&rp->fetch_lock); | |
660 | return rc; | |
661 | } | |
662 | ||
663 | ep = MON_OFF2HDR(rp, rp->b_out); | |
664 | ||
665 | if (rp->b_read < sizeof(struct mon_bin_hdr)) { | |
666 | step_len = min(nbytes, sizeof(struct mon_bin_hdr) - rp->b_read); | |
667 | ptr = ((char *)ep) + rp->b_read; | |
668 | if (step_len && copy_to_user(buf, ptr, step_len)) { | |
669 | mutex_unlock(&rp->fetch_lock); | |
670 | return -EFAULT; | |
671 | } | |
672 | nbytes -= step_len; | |
673 | buf += step_len; | |
674 | rp->b_read += step_len; | |
675 | done += step_len; | |
676 | } | |
677 | ||
678 | if (rp->b_read >= sizeof(struct mon_bin_hdr)) { | |
679 | step_len = min(nbytes, (size_t)ep->len_cap); | |
680 | offset = rp->b_out + PKT_SIZE; | |
681 | offset += rp->b_read - sizeof(struct mon_bin_hdr); | |
682 | if (offset >= rp->b_size) | |
683 | offset -= rp->b_size; | |
684 | if (copy_from_buf(rp, offset, buf, step_len)) { | |
685 | mutex_unlock(&rp->fetch_lock); | |
686 | return -EFAULT; | |
687 | } | |
688 | nbytes -= step_len; | |
689 | buf += step_len; | |
690 | rp->b_read += step_len; | |
691 | done += step_len; | |
692 | } | |
693 | ||
694 | /* | |
695 | * Check if whole packet was read, and if so, jump to the next one. | |
696 | */ | |
697 | if (rp->b_read >= sizeof(struct mon_bin_hdr) + ep->len_cap) { | |
698 | spin_lock_irqsave(&rp->b_lock, flags); | |
699 | mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); | |
700 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
701 | rp->b_read = 0; | |
702 | } | |
703 | ||
704 | mutex_unlock(&rp->fetch_lock); | |
705 | return done; | |
706 | } | |
707 | ||
708 | /* | |
709 | * Remove at most nevents from chunked buffer. | |
710 | * Returns the number of removed events. | |
711 | */ | |
712 | static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents) | |
713 | { | |
714 | unsigned long flags; | |
715 | struct mon_bin_hdr *ep; | |
716 | int i; | |
717 | ||
718 | mutex_lock(&rp->fetch_lock); | |
719 | spin_lock_irqsave(&rp->b_lock, flags); | |
720 | for (i = 0; i < nevents; ++i) { | |
721 | if (MON_RING_EMPTY(rp)) | |
722 | break; | |
723 | ||
724 | ep = MON_OFF2HDR(rp, rp->b_out); | |
725 | mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); | |
726 | } | |
727 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
728 | rp->b_read = 0; | |
729 | mutex_unlock(&rp->fetch_lock); | |
730 | return i; | |
731 | } | |
732 | ||
733 | /* | |
734 | * Fetch at most max event offsets into the buffer and put them into vec. | |
735 | * The events are usually freed later with mon_bin_flush. | |
736 | * Return the effective number of events fetched. | |
737 | */ | |
738 | static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp, | |
739 | u32 __user *vec, unsigned int max) | |
740 | { | |
741 | unsigned int cur_out; | |
742 | unsigned int bytes, avail; | |
743 | unsigned int size; | |
744 | unsigned int nevents; | |
745 | struct mon_bin_hdr *ep; | |
746 | unsigned long flags; | |
747 | int rc; | |
748 | ||
749 | mutex_lock(&rp->fetch_lock); | |
750 | ||
751 | if ((rc = mon_bin_wait_event(file, rp)) < 0) { | |
752 | mutex_unlock(&rp->fetch_lock); | |
753 | return rc; | |
754 | } | |
755 | ||
756 | spin_lock_irqsave(&rp->b_lock, flags); | |
757 | avail = rp->b_cnt; | |
758 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
759 | ||
760 | cur_out = rp->b_out; | |
761 | nevents = 0; | |
762 | bytes = 0; | |
763 | while (bytes < avail) { | |
764 | if (nevents >= max) | |
765 | break; | |
766 | ||
767 | ep = MON_OFF2HDR(rp, cur_out); | |
768 | if (put_user(cur_out, &vec[nevents])) { | |
769 | mutex_unlock(&rp->fetch_lock); | |
770 | return -EFAULT; | |
771 | } | |
772 | ||
773 | nevents++; | |
774 | size = ep->len_cap + PKT_SIZE; | |
775 | size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); | |
776 | if ((cur_out += size) >= rp->b_size) | |
777 | cur_out -= rp->b_size; | |
778 | bytes += size; | |
779 | } | |
780 | ||
781 | mutex_unlock(&rp->fetch_lock); | |
782 | return nevents; | |
783 | } | |
784 | ||
785 | /* | |
786 | * Count events. This is almost the same as the above mon_bin_fetch, | |
787 | * only we do not store offsets into user vector, and we have no limit. | |
788 | */ | |
789 | static int mon_bin_queued(struct mon_reader_bin *rp) | |
790 | { | |
791 | unsigned int cur_out; | |
792 | unsigned int bytes, avail; | |
793 | unsigned int size; | |
794 | unsigned int nevents; | |
795 | struct mon_bin_hdr *ep; | |
796 | unsigned long flags; | |
797 | ||
798 | mutex_lock(&rp->fetch_lock); | |
799 | ||
800 | spin_lock_irqsave(&rp->b_lock, flags); | |
801 | avail = rp->b_cnt; | |
802 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
803 | ||
804 | cur_out = rp->b_out; | |
805 | nevents = 0; | |
806 | bytes = 0; | |
807 | while (bytes < avail) { | |
808 | ep = MON_OFF2HDR(rp, cur_out); | |
809 | ||
810 | nevents++; | |
811 | size = ep->len_cap + PKT_SIZE; | |
812 | size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); | |
813 | if ((cur_out += size) >= rp->b_size) | |
814 | cur_out -= rp->b_size; | |
815 | bytes += size; | |
816 | } | |
817 | ||
818 | mutex_unlock(&rp->fetch_lock); | |
819 | return nevents; | |
820 | } | |
821 | ||
822 | /* | |
823 | */ | |
824 | static int mon_bin_ioctl(struct inode *inode, struct file *file, | |
825 | unsigned int cmd, unsigned long arg) | |
826 | { | |
827 | struct mon_reader_bin *rp = file->private_data; | |
828 | // struct mon_bus* mbus = rp->r.m_bus; | |
829 | int ret = 0; | |
830 | struct mon_bin_hdr *ep; | |
831 | unsigned long flags; | |
832 | ||
833 | switch (cmd) { | |
834 | ||
835 | case MON_IOCQ_URB_LEN: | |
836 | /* | |
837 | * N.B. This only returns the size of data, without the header. | |
838 | */ | |
839 | spin_lock_irqsave(&rp->b_lock, flags); | |
840 | if (!MON_RING_EMPTY(rp)) { | |
841 | ep = MON_OFF2HDR(rp, rp->b_out); | |
842 | ret = ep->len_cap; | |
843 | } | |
844 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
845 | break; | |
846 | ||
847 | case MON_IOCQ_RING_SIZE: | |
848 | ret = rp->b_size; | |
849 | break; | |
850 | ||
851 | case MON_IOCT_RING_SIZE: | |
852 | /* | |
853 | * Changing the buffer size will flush it's contents; the new | |
854 | * buffer is allocated before releasing the old one to be sure | |
855 | * the device will stay functional also in case of memory | |
856 | * pressure. | |
857 | */ | |
858 | { | |
859 | int size; | |
860 | struct mon_pgmap *vec; | |
861 | ||
862 | if (arg < BUFF_MIN || arg > BUFF_MAX) | |
863 | return -EINVAL; | |
864 | ||
865 | size = CHUNK_ALIGN(arg); | |
866 | if ((vec = kzalloc(sizeof(struct mon_pgmap) * (size/CHUNK_SIZE), | |
867 | GFP_KERNEL)) == NULL) { | |
868 | ret = -ENOMEM; | |
869 | break; | |
870 | } | |
871 | ||
872 | ret = mon_alloc_buff(vec, size/CHUNK_SIZE); | |
873 | if (ret < 0) { | |
874 | kfree(vec); | |
875 | break; | |
876 | } | |
877 | ||
878 | mutex_lock(&rp->fetch_lock); | |
879 | spin_lock_irqsave(&rp->b_lock, flags); | |
880 | mon_free_buff(rp->b_vec, size/CHUNK_SIZE); | |
881 | kfree(rp->b_vec); | |
882 | rp->b_vec = vec; | |
883 | rp->b_size = size; | |
884 | rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; | |
885 | rp->cnt_lost = 0; | |
886 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
887 | mutex_unlock(&rp->fetch_lock); | |
888 | } | |
889 | break; | |
890 | ||
891 | case MON_IOCH_MFLUSH: | |
892 | ret = mon_bin_flush(rp, arg); | |
893 | break; | |
894 | ||
895 | case MON_IOCX_GET: | |
896 | { | |
897 | struct mon_bin_get getb; | |
898 | ||
899 | if (copy_from_user(&getb, (void __user *)arg, | |
900 | sizeof(struct mon_bin_get))) | |
901 | return -EFAULT; | |
902 | ||
903 | if (getb.alloc > 0x10000000) /* Want to cast to u32 */ | |
904 | return -EINVAL; | |
905 | ret = mon_bin_get_event(file, rp, | |
906 | getb.hdr, getb.data, (unsigned int)getb.alloc); | |
907 | } | |
908 | break; | |
909 | ||
910 | #ifdef CONFIG_COMPAT | |
911 | case MON_IOCX_GET32: { | |
912 | struct mon_bin_get32 getb; | |
913 | ||
914 | if (copy_from_user(&getb, (void __user *)arg, | |
915 | sizeof(struct mon_bin_get32))) | |
916 | return -EFAULT; | |
917 | ||
918 | ret = mon_bin_get_event(file, rp, | |
919 | compat_ptr(getb.hdr32), compat_ptr(getb.data32), | |
920 | getb.alloc32); | |
921 | } | |
922 | break; | |
923 | #endif | |
924 | ||
925 | case MON_IOCX_MFETCH: | |
926 | { | |
927 | struct mon_bin_mfetch mfetch; | |
928 | struct mon_bin_mfetch __user *uptr; | |
929 | ||
930 | uptr = (struct mon_bin_mfetch __user *)arg; | |
931 | ||
932 | if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) | |
933 | return -EFAULT; | |
934 | ||
935 | if (mfetch.nflush) { | |
936 | ret = mon_bin_flush(rp, mfetch.nflush); | |
937 | if (ret < 0) | |
938 | return ret; | |
939 | if (put_user(ret, &uptr->nflush)) | |
940 | return -EFAULT; | |
941 | } | |
942 | ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch); | |
943 | if (ret < 0) | |
944 | return ret; | |
945 | if (put_user(ret, &uptr->nfetch)) | |
946 | return -EFAULT; | |
947 | ret = 0; | |
948 | } | |
949 | break; | |
950 | ||
951 | #ifdef CONFIG_COMPAT | |
952 | case MON_IOCX_MFETCH32: | |
953 | { | |
954 | struct mon_bin_mfetch32 mfetch; | |
955 | struct mon_bin_mfetch32 __user *uptr; | |
956 | ||
957 | uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg); | |
958 | ||
959 | if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) | |
960 | return -EFAULT; | |
961 | ||
962 | if (mfetch.nflush32) { | |
963 | ret = mon_bin_flush(rp, mfetch.nflush32); | |
964 | if (ret < 0) | |
965 | return ret; | |
966 | if (put_user(ret, &uptr->nflush32)) | |
967 | return -EFAULT; | |
968 | } | |
969 | ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32), | |
970 | mfetch.nfetch32); | |
971 | if (ret < 0) | |
972 | return ret; | |
973 | if (put_user(ret, &uptr->nfetch32)) | |
974 | return -EFAULT; | |
975 | ret = 0; | |
976 | } | |
977 | break; | |
978 | #endif | |
979 | ||
980 | case MON_IOCG_STATS: { | |
981 | struct mon_bin_stats __user *sp; | |
982 | unsigned int nevents; | |
983 | unsigned int ndropped; | |
984 | ||
985 | spin_lock_irqsave(&rp->b_lock, flags); | |
986 | ndropped = rp->cnt_lost; | |
987 | rp->cnt_lost = 0; | |
988 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
989 | nevents = mon_bin_queued(rp); | |
990 | ||
991 | sp = (struct mon_bin_stats __user *)arg; | |
992 | if (put_user(rp->cnt_lost, &sp->dropped)) | |
993 | return -EFAULT; | |
994 | if (put_user(nevents, &sp->queued)) | |
995 | return -EFAULT; | |
996 | ||
997 | } | |
998 | break; | |
999 | ||
1000 | default: | |
1001 | return -ENOTTY; | |
1002 | } | |
1003 | ||
1004 | return ret; | |
1005 | } | |
1006 | ||
1007 | static unsigned int | |
1008 | mon_bin_poll(struct file *file, struct poll_table_struct *wait) | |
1009 | { | |
1010 | struct mon_reader_bin *rp = file->private_data; | |
1011 | unsigned int mask = 0; | |
1012 | unsigned long flags; | |
1013 | ||
1014 | if (file->f_mode & FMODE_READ) | |
1015 | poll_wait(file, &rp->b_wait, wait); | |
1016 | ||
1017 | spin_lock_irqsave(&rp->b_lock, flags); | |
1018 | if (!MON_RING_EMPTY(rp)) | |
1019 | mask |= POLLIN | POLLRDNORM; /* readable */ | |
1020 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
1021 | return mask; | |
1022 | } | |
1023 | ||
1024 | /* | |
1025 | * open and close: just keep track of how many times the device is | |
1026 | * mapped, to use the proper memory allocation function. | |
1027 | */ | |
1028 | static void mon_bin_vma_open(struct vm_area_struct *vma) | |
1029 | { | |
1030 | struct mon_reader_bin *rp = vma->vm_private_data; | |
1031 | rp->mmap_active++; | |
1032 | } | |
1033 | ||
1034 | static void mon_bin_vma_close(struct vm_area_struct *vma) | |
1035 | { | |
1036 | struct mon_reader_bin *rp = vma->vm_private_data; | |
1037 | rp->mmap_active--; | |
1038 | } | |
1039 | ||
1040 | /* | |
1041 | * Map ring pages to user space. | |
1042 | */ | |
1043 | struct page *mon_bin_vma_nopage(struct vm_area_struct *vma, | |
1044 | unsigned long address, int *type) | |
1045 | { | |
1046 | struct mon_reader_bin *rp = vma->vm_private_data; | |
1047 | unsigned long offset, chunk_idx; | |
1048 | struct page *pageptr; | |
1049 | ||
1050 | offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); | |
1051 | if (offset >= rp->b_size) | |
1052 | return NOPAGE_SIGBUS; | |
1053 | chunk_idx = offset / CHUNK_SIZE; | |
1054 | pageptr = rp->b_vec[chunk_idx].pg; | |
1055 | get_page(pageptr); | |
1056 | if (type) | |
1057 | *type = VM_FAULT_MINOR; | |
1058 | return pageptr; | |
1059 | } | |
1060 | ||
1061 | struct vm_operations_struct mon_bin_vm_ops = { | |
1062 | .open = mon_bin_vma_open, | |
1063 | .close = mon_bin_vma_close, | |
1064 | .nopage = mon_bin_vma_nopage, | |
1065 | }; | |
1066 | ||
1067 | int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) | |
1068 | { | |
1069 | /* don't do anything here: "nopage" will set up page table entries */ | |
1070 | vma->vm_ops = &mon_bin_vm_ops; | |
1071 | vma->vm_flags |= VM_RESERVED; | |
1072 | vma->vm_private_data = filp->private_data; | |
1073 | mon_bin_vma_open(vma); | |
1074 | return 0; | |
1075 | } | |
1076 | ||
1077 | struct file_operations mon_fops_binary = { | |
1078 | .owner = THIS_MODULE, | |
1079 | .open = mon_bin_open, | |
1080 | .llseek = no_llseek, | |
1081 | .read = mon_bin_read, | |
1082 | /* .write = mon_text_write, */ | |
1083 | .poll = mon_bin_poll, | |
1084 | .ioctl = mon_bin_ioctl, | |
1085 | .release = mon_bin_release, | |
1086 | }; | |
1087 | ||
1088 | static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp) | |
1089 | { | |
1090 | DECLARE_WAITQUEUE(waita, current); | |
1091 | unsigned long flags; | |
1092 | ||
1093 | add_wait_queue(&rp->b_wait, &waita); | |
1094 | set_current_state(TASK_INTERRUPTIBLE); | |
1095 | ||
1096 | spin_lock_irqsave(&rp->b_lock, flags); | |
1097 | while (MON_RING_EMPTY(rp)) { | |
1098 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
1099 | ||
1100 | if (file->f_flags & O_NONBLOCK) { | |
1101 | set_current_state(TASK_RUNNING); | |
1102 | remove_wait_queue(&rp->b_wait, &waita); | |
1103 | return -EWOULDBLOCK; /* Same as EAGAIN in Linux */ | |
1104 | } | |
1105 | schedule(); | |
1106 | if (signal_pending(current)) { | |
1107 | remove_wait_queue(&rp->b_wait, &waita); | |
1108 | return -EINTR; | |
1109 | } | |
1110 | set_current_state(TASK_INTERRUPTIBLE); | |
1111 | ||
1112 | spin_lock_irqsave(&rp->b_lock, flags); | |
1113 | } | |
1114 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
1115 | ||
1116 | set_current_state(TASK_RUNNING); | |
1117 | remove_wait_queue(&rp->b_wait, &waita); | |
1118 | return 0; | |
1119 | } | |
1120 | ||
1121 | static int mon_alloc_buff(struct mon_pgmap *map, int npages) | |
1122 | { | |
1123 | int n; | |
1124 | unsigned long vaddr; | |
1125 | ||
1126 | for (n = 0; n < npages; n++) { | |
1127 | vaddr = get_zeroed_page(GFP_KERNEL); | |
1128 | if (vaddr == 0) { | |
1129 | while (n-- != 0) | |
1130 | free_page((unsigned long) map[n].ptr); | |
1131 | return -ENOMEM; | |
1132 | } | |
1133 | map[n].ptr = (unsigned char *) vaddr; | |
1134 | map[n].pg = virt_to_page(vaddr); | |
1135 | } | |
1136 | return 0; | |
1137 | } | |
1138 | ||
1139 | static void mon_free_buff(struct mon_pgmap *map, int npages) | |
1140 | { | |
1141 | int n; | |
1142 | ||
1143 | for (n = 0; n < npages; n++) | |
1144 | free_page((unsigned long) map[n].ptr); | |
1145 | } | |
1146 | ||
1147 | int __init mon_bin_init(void) | |
1148 | { | |
1149 | int rc; | |
1150 | ||
1151 | rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon"); | |
1152 | if (rc < 0) | |
1153 | goto err_dev; | |
1154 | ||
1155 | cdev_init(&mon_bin_cdev, &mon_fops_binary); | |
1156 | mon_bin_cdev.owner = THIS_MODULE; | |
1157 | ||
1158 | rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR); | |
1159 | if (rc < 0) | |
1160 | goto err_add; | |
1161 | ||
1162 | return 0; | |
1163 | ||
1164 | err_add: | |
1165 | unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); | |
1166 | err_dev: | |
1167 | return rc; | |
1168 | } | |
1169 | ||
21641e3f | 1170 | void mon_bin_exit(void) |
6f23ee1f PZ |
1171 | { |
1172 | cdev_del(&mon_bin_cdev); | |
1173 | unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); | |
1174 | } |