Commit | Line | Data |
---|---|---|
6f23ee1f PZ |
1 | /* |
2 | * The USB Monitor, inspired by Dave Harding's USBMon. | |
3 | * | |
4 | * This is a binary format reader. | |
5 | * | |
6 | * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it) | |
ce7cd137 | 7 | * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com) |
6f23ee1f PZ |
8 | */ |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/types.h> | |
12 | #include <linux/fs.h> | |
13 | #include <linux/cdev.h> | |
14 | #include <linux/usb.h> | |
15 | #include <linux/poll.h> | |
16 | #include <linux/compat.h> | |
17 | #include <linux/mm.h> | |
1af46fd7 | 18 | #include <linux/smp_lock.h> |
6f23ee1f PZ |
19 | |
20 | #include <asm/uaccess.h> | |
21 | ||
22 | #include "usb_mon.h" | |
23 | ||
24 | /* | |
25 | * Defined by USB 2.0 clause 9.3, table 9.2. | |
26 | */ | |
27 | #define SETUP_LEN 8 | |
28 | ||
29 | /* ioctl macros */ | |
30 | #define MON_IOC_MAGIC 0x92 | |
31 | ||
32 | #define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1) | |
33 | /* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */ | |
34 | #define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats) | |
35 | #define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4) | |
36 | #define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5) | |
37 | #define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get) | |
38 | #define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch) | |
39 | #define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8) | |
40 | #ifdef CONFIG_COMPAT | |
41 | #define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32) | |
42 | #define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32) | |
43 | #endif | |
44 | ||
45 | /* | |
46 | * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc). | |
47 | * But it's all right. Just use a simple way to make sure the chunk is never | |
48 | * smaller than a page. | |
49 | * | |
50 | * N.B. An application does not know our chunk size. | |
51 | * | |
52 | * Woops, get_zeroed_page() returns a single page. I guess we're stuck with | |
53 | * page-sized chunks for the time being. | |
54 | */ | |
55 | #define CHUNK_SIZE PAGE_SIZE | |
56 | #define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1)) | |
57 | ||
58 | /* | |
59 | * The magic limit was calculated so that it allows the monitoring | |
60 | * application to pick data once in two ticks. This way, another application, | |
61 | * which presumably drives the bus, gets to hog CPU, yet we collect our data. | |
62 | * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an | |
63 | * enormous overhead built into the bus protocol, so we need about 1000 KB. | |
64 | * | |
65 | * This is still too much for most cases, where we just snoop a few | |
66 | * descriptor fetches for enumeration. So, the default is a "reasonable" | |
67 | * amount for systems with HZ=250 and incomplete bus saturation. | |
68 | * | |
69 | * XXX What about multi-megabyte URBs which take minutes to transfer? | |
70 | */ | |
71 | #define BUFF_MAX CHUNK_ALIGN(1200*1024) | |
72 | #define BUFF_DFL CHUNK_ALIGN(300*1024) | |
73 | #define BUFF_MIN CHUNK_ALIGN(8*1024) | |
74 | ||
75 | /* | |
76 | * The per-event API header (2 per URB). | |
77 | * | |
78 | * This structure is seen in userland as defined by the documentation. | |
79 | */ | |
80 | struct mon_bin_hdr { | |
81 | u64 id; /* URB ID - from submission to callback */ | |
82 | unsigned char type; /* Same as in text API; extensible. */ | |
83 | unsigned char xfer_type; /* ISO, Intr, Control, Bulk */ | |
84 | unsigned char epnum; /* Endpoint number and transfer direction */ | |
85 | unsigned char devnum; /* Device address */ | |
86 | unsigned short busnum; /* Bus number */ | |
87 | char flag_setup; | |
88 | char flag_data; | |
89 | s64 ts_sec; /* gettimeofday */ | |
90 | s32 ts_usec; /* gettimeofday */ | |
91 | int status; | |
92 | unsigned int len_urb; /* Length of data (submitted or actual) */ | |
93 | unsigned int len_cap; /* Delivered length */ | |
94 | unsigned char setup[SETUP_LEN]; /* Only for Control S-type */ | |
95 | }; | |
96 | ||
97 | /* per file statistic */ | |
98 | struct mon_bin_stats { | |
99 | u32 queued; | |
100 | u32 dropped; | |
101 | }; | |
102 | ||
103 | struct mon_bin_get { | |
104 | struct mon_bin_hdr __user *hdr; /* Only 48 bytes, not 64. */ | |
105 | void __user *data; | |
106 | size_t alloc; /* Length of data (can be zero) */ | |
107 | }; | |
108 | ||
109 | struct mon_bin_mfetch { | |
110 | u32 __user *offvec; /* Vector of events fetched */ | |
111 | u32 nfetch; /* Number of events to fetch (out: fetched) */ | |
112 | u32 nflush; /* Number of events to flush */ | |
113 | }; | |
114 | ||
115 | #ifdef CONFIG_COMPAT | |
116 | struct mon_bin_get32 { | |
117 | u32 hdr32; | |
118 | u32 data32; | |
119 | u32 alloc32; | |
120 | }; | |
121 | ||
122 | struct mon_bin_mfetch32 { | |
123 | u32 offvec32; | |
124 | u32 nfetch32; | |
125 | u32 nflush32; | |
126 | }; | |
127 | #endif | |
128 | ||
129 | /* Having these two values same prevents wrapping of the mon_bin_hdr */ | |
130 | #define PKT_ALIGN 64 | |
131 | #define PKT_SIZE 64 | |
132 | ||
133 | /* max number of USB bus supported */ | |
134 | #define MON_BIN_MAX_MINOR 128 | |
135 | ||
136 | /* | |
137 | * The buffer: map of used pages. | |
138 | */ | |
139 | struct mon_pgmap { | |
140 | struct page *pg; | |
141 | unsigned char *ptr; /* XXX just use page_to_virt everywhere? */ | |
142 | }; | |
143 | ||
144 | /* | |
145 | * This gets associated with an open file struct. | |
146 | */ | |
147 | struct mon_reader_bin { | |
148 | /* The buffer: one per open. */ | |
149 | spinlock_t b_lock; /* Protect b_cnt, b_in */ | |
150 | unsigned int b_size; /* Current size of the buffer - bytes */ | |
151 | unsigned int b_cnt; /* Bytes used */ | |
152 | unsigned int b_in, b_out; /* Offsets into buffer - bytes */ | |
153 | unsigned int b_read; /* Amount of read data in curr. pkt. */ | |
154 | struct mon_pgmap *b_vec; /* The map array */ | |
155 | wait_queue_head_t b_wait; /* Wait for data here */ | |
156 | ||
157 | struct mutex fetch_lock; /* Protect b_read, b_out */ | |
158 | int mmap_active; | |
159 | ||
160 | /* A list of these is needed for "bus 0". Some time later. */ | |
161 | struct mon_reader r; | |
162 | ||
163 | /* Stats */ | |
164 | unsigned int cnt_lost; | |
165 | }; | |
166 | ||
167 | static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp, | |
168 | unsigned int offset) | |
169 | { | |
170 | return (struct mon_bin_hdr *) | |
171 | (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); | |
172 | } | |
173 | ||
174 | #define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0) | |
175 | ||
30c7431d PZ |
176 | static unsigned char xfer_to_pipe[4] = { |
177 | PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT | |
178 | }; | |
179 | ||
ce7cd137 | 180 | static struct class *mon_bin_class; |
6f23ee1f PZ |
181 | static dev_t mon_bin_dev0; |
182 | static struct cdev mon_bin_cdev; | |
183 | ||
184 | static void mon_buff_area_fill(const struct mon_reader_bin *rp, | |
185 | unsigned int offset, unsigned int size); | |
186 | static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp); | |
187 | static int mon_alloc_buff(struct mon_pgmap *map, int npages); | |
188 | static void mon_free_buff(struct mon_pgmap *map, int npages); | |
189 | ||
190 | /* | |
191 | * This is a "chunked memcpy". It does not manipulate any counters. | |
192 | * But it returns the new offset for repeated application. | |
193 | */ | |
194 | unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, | |
195 | unsigned int off, const unsigned char *from, unsigned int length) | |
196 | { | |
197 | unsigned int step_len; | |
198 | unsigned char *buf; | |
199 | unsigned int in_page; | |
200 | ||
201 | while (length) { | |
202 | /* | |
203 | * Determine step_len. | |
204 | */ | |
205 | step_len = length; | |
206 | in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); | |
207 | if (in_page < step_len) | |
208 | step_len = in_page; | |
209 | ||
210 | /* | |
211 | * Copy data and advance pointers. | |
212 | */ | |
213 | buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; | |
214 | memcpy(buf, from, step_len); | |
215 | if ((off += step_len) >= this->b_size) off = 0; | |
216 | from += step_len; | |
217 | length -= step_len; | |
218 | } | |
219 | return off; | |
220 | } | |
221 | ||
222 | /* | |
223 | * This is a little worse than the above because it's "chunked copy_to_user". | |
224 | * The return value is an error code, not an offset. | |
225 | */ | |
226 | static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off, | |
227 | char __user *to, int length) | |
228 | { | |
229 | unsigned int step_len; | |
230 | unsigned char *buf; | |
231 | unsigned int in_page; | |
232 | ||
233 | while (length) { | |
234 | /* | |
235 | * Determine step_len. | |
236 | */ | |
237 | step_len = length; | |
238 | in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); | |
239 | if (in_page < step_len) | |
240 | step_len = in_page; | |
241 | ||
242 | /* | |
243 | * Copy data and advance pointers. | |
244 | */ | |
245 | buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; | |
246 | if (copy_to_user(to, buf, step_len)) | |
247 | return -EINVAL; | |
248 | if ((off += step_len) >= this->b_size) off = 0; | |
249 | to += step_len; | |
250 | length -= step_len; | |
251 | } | |
252 | return 0; | |
253 | } | |
254 | ||
255 | /* | |
256 | * Allocate an (aligned) area in the buffer. | |
257 | * This is called under b_lock. | |
258 | * Returns ~0 on failure. | |
259 | */ | |
260 | static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp, | |
261 | unsigned int size) | |
262 | { | |
263 | unsigned int offset; | |
264 | ||
265 | size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); | |
266 | if (rp->b_cnt + size > rp->b_size) | |
267 | return ~0; | |
268 | offset = rp->b_in; | |
269 | rp->b_cnt += size; | |
270 | if ((rp->b_in += size) >= rp->b_size) | |
271 | rp->b_in -= rp->b_size; | |
272 | return offset; | |
273 | } | |
274 | ||
275 | /* | |
276 | * This is the same thing as mon_buff_area_alloc, only it does not allow | |
277 | * buffers to wrap. This is needed by applications which pass references | |
278 | * into mmap-ed buffers up their stacks (libpcap can do that). | |
279 | * | |
280 | * Currently, we always have the header stuck with the data, although | |
281 | * it is not strictly speaking necessary. | |
282 | * | |
283 | * When a buffer would wrap, we place a filler packet to mark the space. | |
284 | */ | |
285 | static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp, | |
286 | unsigned int size) | |
287 | { | |
288 | unsigned int offset; | |
289 | unsigned int fill_size; | |
290 | ||
291 | size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); | |
292 | if (rp->b_cnt + size > rp->b_size) | |
293 | return ~0; | |
294 | if (rp->b_in + size > rp->b_size) { | |
295 | /* | |
296 | * This would wrap. Find if we still have space after | |
297 | * skipping to the end of the buffer. If we do, place | |
298 | * a filler packet and allocate a new packet. | |
299 | */ | |
300 | fill_size = rp->b_size - rp->b_in; | |
301 | if (rp->b_cnt + size + fill_size > rp->b_size) | |
302 | return ~0; | |
303 | mon_buff_area_fill(rp, rp->b_in, fill_size); | |
304 | ||
305 | offset = 0; | |
306 | rp->b_in = size; | |
307 | rp->b_cnt += size + fill_size; | |
308 | } else if (rp->b_in + size == rp->b_size) { | |
309 | offset = rp->b_in; | |
310 | rp->b_in = 0; | |
311 | rp->b_cnt += size; | |
312 | } else { | |
313 | offset = rp->b_in; | |
314 | rp->b_in += size; | |
315 | rp->b_cnt += size; | |
316 | } | |
317 | return offset; | |
318 | } | |
319 | ||
320 | /* | |
321 | * Return a few (kilo-)bytes to the head of the buffer. | |
322 | * This is used if a DMA fetch fails. | |
323 | */ | |
324 | static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size) | |
325 | { | |
326 | ||
327 | size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); | |
328 | rp->b_cnt -= size; | |
329 | if (rp->b_in < size) | |
330 | rp->b_in += rp->b_size; | |
331 | rp->b_in -= size; | |
332 | } | |
333 | ||
334 | /* | |
335 | * This has to be called under both b_lock and fetch_lock, because | |
336 | * it accesses both b_cnt and b_out. | |
337 | */ | |
338 | static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size) | |
339 | { | |
340 | ||
341 | size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); | |
342 | rp->b_cnt -= size; | |
343 | if ((rp->b_out += size) >= rp->b_size) | |
344 | rp->b_out -= rp->b_size; | |
345 | } | |
346 | ||
347 | static void mon_buff_area_fill(const struct mon_reader_bin *rp, | |
348 | unsigned int offset, unsigned int size) | |
349 | { | |
350 | struct mon_bin_hdr *ep; | |
351 | ||
352 | ep = MON_OFF2HDR(rp, offset); | |
353 | memset(ep, 0, PKT_SIZE); | |
354 | ep->type = '@'; | |
355 | ep->len_cap = size - PKT_SIZE; | |
356 | } | |
357 | ||
358 | static inline char mon_bin_get_setup(unsigned char *setupb, | |
359 | const struct urb *urb, char ev_type) | |
360 | { | |
361 | ||
18ea5d00 | 362 | if (!usb_endpoint_xfer_control(&urb->ep->desc) || ev_type != 'S') |
6f23ee1f PZ |
363 | return '-'; |
364 | ||
6f23ee1f PZ |
365 | if (urb->setup_packet == NULL) |
366 | return 'Z'; | |
367 | ||
368 | memcpy(setupb, urb->setup_packet, SETUP_LEN); | |
369 | return 0; | |
370 | } | |
371 | ||
372 | static char mon_bin_get_data(const struct mon_reader_bin *rp, | |
373 | unsigned int offset, struct urb *urb, unsigned int length) | |
374 | { | |
375 | ||
ecb658d3 PZ |
376 | if (urb->dev->bus->uses_dma && |
377 | (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { | |
6f23ee1f PZ |
378 | mon_dmapeek_vec(rp, offset, urb->transfer_dma, length); |
379 | return 0; | |
380 | } | |
381 | ||
382 | if (urb->transfer_buffer == NULL) | |
383 | return 'Z'; | |
384 | ||
385 | mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); | |
386 | return 0; | |
387 | } | |
388 | ||
389 | static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, | |
9347d51c | 390 | char ev_type, int status) |
6f23ee1f | 391 | { |
30c7431d | 392 | const struct usb_endpoint_descriptor *epd = &urb->ep->desc; |
6f23ee1f PZ |
393 | unsigned long flags; |
394 | struct timeval ts; | |
395 | unsigned int urb_length; | |
396 | unsigned int offset; | |
397 | unsigned int length; | |
30c7431d | 398 | unsigned char dir; |
6f23ee1f PZ |
399 | struct mon_bin_hdr *ep; |
400 | char data_tag = 0; | |
401 | ||
402 | do_gettimeofday(&ts); | |
403 | ||
404 | spin_lock_irqsave(&rp->b_lock, flags); | |
405 | ||
406 | /* | |
407 | * Find the maximum allowable length, then allocate space. | |
408 | */ | |
409 | urb_length = (ev_type == 'S') ? | |
410 | urb->transfer_buffer_length : urb->actual_length; | |
411 | length = urb_length; | |
412 | ||
413 | if (length >= rp->b_size/5) | |
414 | length = rp->b_size/5; | |
415 | ||
18ea5d00 | 416 | if (usb_urb_dir_in(urb)) { |
6f23ee1f PZ |
417 | if (ev_type == 'S') { |
418 | length = 0; | |
419 | data_tag = '<'; | |
420 | } | |
30c7431d PZ |
421 | /* Cannot rely on endpoint number in case of control ep.0 */ |
422 | dir = USB_DIR_IN; | |
6f23ee1f PZ |
423 | } else { |
424 | if (ev_type == 'C') { | |
425 | length = 0; | |
426 | data_tag = '>'; | |
427 | } | |
30c7431d | 428 | dir = 0; |
6f23ee1f PZ |
429 | } |
430 | ||
431 | if (rp->mmap_active) | |
432 | offset = mon_buff_area_alloc_contiguous(rp, length + PKT_SIZE); | |
433 | else | |
434 | offset = mon_buff_area_alloc(rp, length + PKT_SIZE); | |
435 | if (offset == ~0) { | |
436 | rp->cnt_lost++; | |
437 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
438 | return; | |
439 | } | |
440 | ||
441 | ep = MON_OFF2HDR(rp, offset); | |
442 | if ((offset += PKT_SIZE) >= rp->b_size) offset = 0; | |
443 | ||
444 | /* | |
445 | * Fill the allocated area. | |
446 | */ | |
447 | memset(ep, 0, PKT_SIZE); | |
448 | ep->type = ev_type; | |
30c7431d PZ |
449 | ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)]; |
450 | ep->epnum = dir | usb_endpoint_num(epd); | |
18ea5d00 | 451 | ep->devnum = urb->dev->devnum; |
ecb658d3 | 452 | ep->busnum = urb->dev->bus->busnum; |
6f23ee1f PZ |
453 | ep->id = (unsigned long) urb; |
454 | ep->ts_sec = ts.tv_sec; | |
455 | ep->ts_usec = ts.tv_usec; | |
9347d51c | 456 | ep->status = status; |
6f23ee1f PZ |
457 | ep->len_urb = urb_length; |
458 | ep->len_cap = length; | |
459 | ||
460 | ep->flag_setup = mon_bin_get_setup(ep->setup, urb, ev_type); | |
461 | if (length != 0) { | |
462 | ep->flag_data = mon_bin_get_data(rp, offset, urb, length); | |
463 | if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */ | |
464 | ep->len_cap = 0; | |
465 | mon_buff_area_shrink(rp, length); | |
466 | } | |
467 | } else { | |
468 | ep->flag_data = data_tag; | |
469 | } | |
470 | ||
471 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
472 | ||
473 | wake_up(&rp->b_wait); | |
474 | } | |
475 | ||
476 | static void mon_bin_submit(void *data, struct urb *urb) | |
477 | { | |
478 | struct mon_reader_bin *rp = data; | |
9347d51c | 479 | mon_bin_event(rp, urb, 'S', -EINPROGRESS); |
6f23ee1f PZ |
480 | } |
481 | ||
9347d51c | 482 | static void mon_bin_complete(void *data, struct urb *urb, int status) |
6f23ee1f PZ |
483 | { |
484 | struct mon_reader_bin *rp = data; | |
9347d51c | 485 | mon_bin_event(rp, urb, 'C', status); |
6f23ee1f PZ |
486 | } |
487 | ||
488 | static void mon_bin_error(void *data, struct urb *urb, int error) | |
489 | { | |
490 | struct mon_reader_bin *rp = data; | |
491 | unsigned long flags; | |
492 | unsigned int offset; | |
493 | struct mon_bin_hdr *ep; | |
494 | ||
495 | spin_lock_irqsave(&rp->b_lock, flags); | |
496 | ||
497 | offset = mon_buff_area_alloc(rp, PKT_SIZE); | |
498 | if (offset == ~0) { | |
499 | /* Not incrementing cnt_lost. Just because. */ | |
500 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
501 | return; | |
502 | } | |
503 | ||
504 | ep = MON_OFF2HDR(rp, offset); | |
505 | ||
506 | memset(ep, 0, PKT_SIZE); | |
507 | ep->type = 'E'; | |
30c7431d PZ |
508 | ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)]; |
509 | ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0; | |
510 | ep->epnum |= usb_endpoint_num(&urb->ep->desc); | |
18ea5d00 | 511 | ep->devnum = urb->dev->devnum; |
ecb658d3 | 512 | ep->busnum = urb->dev->bus->busnum; |
6f23ee1f PZ |
513 | ep->id = (unsigned long) urb; |
514 | ep->status = error; | |
515 | ||
516 | ep->flag_setup = '-'; | |
517 | ep->flag_data = 'E'; | |
518 | ||
519 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
520 | ||
521 | wake_up(&rp->b_wait); | |
522 | } | |
523 | ||
524 | static int mon_bin_open(struct inode *inode, struct file *file) | |
525 | { | |
526 | struct mon_bus *mbus; | |
6f23ee1f PZ |
527 | struct mon_reader_bin *rp; |
528 | size_t size; | |
529 | int rc; | |
530 | ||
1af46fd7 | 531 | lock_kernel(); |
6f23ee1f PZ |
532 | mutex_lock(&mon_lock); |
533 | if ((mbus = mon_bus_lookup(iminor(inode))) == NULL) { | |
534 | mutex_unlock(&mon_lock); | |
1af46fd7 | 535 | unlock_kernel(); |
6f23ee1f PZ |
536 | return -ENODEV; |
537 | } | |
ecb658d3 | 538 | if (mbus != &mon_bus0 && mbus->u_bus == NULL) { |
6f23ee1f PZ |
539 | printk(KERN_ERR TAG ": consistency error on open\n"); |
540 | mutex_unlock(&mon_lock); | |
1af46fd7 | 541 | unlock_kernel(); |
6f23ee1f PZ |
542 | return -ENODEV; |
543 | } | |
544 | ||
545 | rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL); | |
546 | if (rp == NULL) { | |
547 | rc = -ENOMEM; | |
548 | goto err_alloc; | |
549 | } | |
550 | spin_lock_init(&rp->b_lock); | |
551 | init_waitqueue_head(&rp->b_wait); | |
552 | mutex_init(&rp->fetch_lock); | |
553 | ||
554 | rp->b_size = BUFF_DFL; | |
555 | ||
556 | size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE); | |
557 | if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) { | |
558 | rc = -ENOMEM; | |
559 | goto err_allocvec; | |
560 | } | |
561 | ||
562 | if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0) | |
563 | goto err_allocbuff; | |
564 | ||
565 | rp->r.m_bus = mbus; | |
566 | rp->r.r_data = rp; | |
567 | rp->r.rnf_submit = mon_bin_submit; | |
568 | rp->r.rnf_error = mon_bin_error; | |
569 | rp->r.rnf_complete = mon_bin_complete; | |
570 | ||
571 | mon_reader_add(mbus, &rp->r); | |
572 | ||
573 | file->private_data = rp; | |
574 | mutex_unlock(&mon_lock); | |
1af46fd7 | 575 | unlock_kernel(); |
6f23ee1f PZ |
576 | return 0; |
577 | ||
578 | err_allocbuff: | |
579 | kfree(rp->b_vec); | |
580 | err_allocvec: | |
581 | kfree(rp); | |
582 | err_alloc: | |
583 | mutex_unlock(&mon_lock); | |
1af46fd7 | 584 | unlock_kernel(); |
6f23ee1f PZ |
585 | return rc; |
586 | } | |
587 | ||
588 | /* | |
589 | * Extract an event from buffer and copy it to user space. | |
590 | * Wait if there is no event ready. | |
591 | * Returns zero or error. | |
592 | */ | |
593 | static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp, | |
594 | struct mon_bin_hdr __user *hdr, void __user *data, unsigned int nbytes) | |
595 | { | |
596 | unsigned long flags; | |
597 | struct mon_bin_hdr *ep; | |
598 | size_t step_len; | |
599 | unsigned int offset; | |
600 | int rc; | |
601 | ||
602 | mutex_lock(&rp->fetch_lock); | |
603 | ||
604 | if ((rc = mon_bin_wait_event(file, rp)) < 0) { | |
605 | mutex_unlock(&rp->fetch_lock); | |
606 | return rc; | |
607 | } | |
608 | ||
609 | ep = MON_OFF2HDR(rp, rp->b_out); | |
610 | ||
611 | if (copy_to_user(hdr, ep, sizeof(struct mon_bin_hdr))) { | |
612 | mutex_unlock(&rp->fetch_lock); | |
613 | return -EFAULT; | |
614 | } | |
615 | ||
616 | step_len = min(ep->len_cap, nbytes); | |
617 | if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0; | |
618 | ||
619 | if (copy_from_buf(rp, offset, data, step_len)) { | |
620 | mutex_unlock(&rp->fetch_lock); | |
621 | return -EFAULT; | |
622 | } | |
623 | ||
624 | spin_lock_irqsave(&rp->b_lock, flags); | |
625 | mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); | |
626 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
627 | rp->b_read = 0; | |
628 | ||
629 | mutex_unlock(&rp->fetch_lock); | |
630 | return 0; | |
631 | } | |
632 | ||
633 | static int mon_bin_release(struct inode *inode, struct file *file) | |
634 | { | |
635 | struct mon_reader_bin *rp = file->private_data; | |
636 | struct mon_bus* mbus = rp->r.m_bus; | |
637 | ||
638 | mutex_lock(&mon_lock); | |
639 | ||
640 | if (mbus->nreaders <= 0) { | |
641 | printk(KERN_ERR TAG ": consistency error on close\n"); | |
642 | mutex_unlock(&mon_lock); | |
643 | return 0; | |
644 | } | |
645 | mon_reader_del(mbus, &rp->r); | |
646 | ||
647 | mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); | |
648 | kfree(rp->b_vec); | |
649 | kfree(rp); | |
650 | ||
651 | mutex_unlock(&mon_lock); | |
652 | return 0; | |
653 | } | |
654 | ||
655 | static ssize_t mon_bin_read(struct file *file, char __user *buf, | |
656 | size_t nbytes, loff_t *ppos) | |
657 | { | |
658 | struct mon_reader_bin *rp = file->private_data; | |
659 | unsigned long flags; | |
660 | struct mon_bin_hdr *ep; | |
661 | unsigned int offset; | |
662 | size_t step_len; | |
663 | char *ptr; | |
664 | ssize_t done = 0; | |
665 | int rc; | |
666 | ||
667 | mutex_lock(&rp->fetch_lock); | |
668 | ||
669 | if ((rc = mon_bin_wait_event(file, rp)) < 0) { | |
670 | mutex_unlock(&rp->fetch_lock); | |
671 | return rc; | |
672 | } | |
673 | ||
674 | ep = MON_OFF2HDR(rp, rp->b_out); | |
675 | ||
676 | if (rp->b_read < sizeof(struct mon_bin_hdr)) { | |
677 | step_len = min(nbytes, sizeof(struct mon_bin_hdr) - rp->b_read); | |
678 | ptr = ((char *)ep) + rp->b_read; | |
679 | if (step_len && copy_to_user(buf, ptr, step_len)) { | |
680 | mutex_unlock(&rp->fetch_lock); | |
681 | return -EFAULT; | |
682 | } | |
683 | nbytes -= step_len; | |
684 | buf += step_len; | |
685 | rp->b_read += step_len; | |
686 | done += step_len; | |
687 | } | |
688 | ||
689 | if (rp->b_read >= sizeof(struct mon_bin_hdr)) { | |
690 | step_len = min(nbytes, (size_t)ep->len_cap); | |
691 | offset = rp->b_out + PKT_SIZE; | |
692 | offset += rp->b_read - sizeof(struct mon_bin_hdr); | |
693 | if (offset >= rp->b_size) | |
694 | offset -= rp->b_size; | |
695 | if (copy_from_buf(rp, offset, buf, step_len)) { | |
696 | mutex_unlock(&rp->fetch_lock); | |
697 | return -EFAULT; | |
698 | } | |
699 | nbytes -= step_len; | |
700 | buf += step_len; | |
701 | rp->b_read += step_len; | |
702 | done += step_len; | |
703 | } | |
704 | ||
705 | /* | |
706 | * Check if whole packet was read, and if so, jump to the next one. | |
707 | */ | |
708 | if (rp->b_read >= sizeof(struct mon_bin_hdr) + ep->len_cap) { | |
709 | spin_lock_irqsave(&rp->b_lock, flags); | |
710 | mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); | |
711 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
712 | rp->b_read = 0; | |
713 | } | |
714 | ||
715 | mutex_unlock(&rp->fetch_lock); | |
716 | return done; | |
717 | } | |
718 | ||
719 | /* | |
720 | * Remove at most nevents from chunked buffer. | |
721 | * Returns the number of removed events. | |
722 | */ | |
723 | static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents) | |
724 | { | |
725 | unsigned long flags; | |
726 | struct mon_bin_hdr *ep; | |
727 | int i; | |
728 | ||
729 | mutex_lock(&rp->fetch_lock); | |
730 | spin_lock_irqsave(&rp->b_lock, flags); | |
731 | for (i = 0; i < nevents; ++i) { | |
732 | if (MON_RING_EMPTY(rp)) | |
733 | break; | |
734 | ||
735 | ep = MON_OFF2HDR(rp, rp->b_out); | |
736 | mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); | |
737 | } | |
738 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
739 | rp->b_read = 0; | |
740 | mutex_unlock(&rp->fetch_lock); | |
741 | return i; | |
742 | } | |
743 | ||
744 | /* | |
745 | * Fetch at most max event offsets into the buffer and put them into vec. | |
746 | * The events are usually freed later with mon_bin_flush. | |
747 | * Return the effective number of events fetched. | |
748 | */ | |
749 | static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp, | |
750 | u32 __user *vec, unsigned int max) | |
751 | { | |
752 | unsigned int cur_out; | |
753 | unsigned int bytes, avail; | |
754 | unsigned int size; | |
755 | unsigned int nevents; | |
756 | struct mon_bin_hdr *ep; | |
757 | unsigned long flags; | |
758 | int rc; | |
759 | ||
760 | mutex_lock(&rp->fetch_lock); | |
761 | ||
762 | if ((rc = mon_bin_wait_event(file, rp)) < 0) { | |
763 | mutex_unlock(&rp->fetch_lock); | |
764 | return rc; | |
765 | } | |
766 | ||
767 | spin_lock_irqsave(&rp->b_lock, flags); | |
768 | avail = rp->b_cnt; | |
769 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
770 | ||
771 | cur_out = rp->b_out; | |
772 | nevents = 0; | |
773 | bytes = 0; | |
774 | while (bytes < avail) { | |
775 | if (nevents >= max) | |
776 | break; | |
777 | ||
778 | ep = MON_OFF2HDR(rp, cur_out); | |
779 | if (put_user(cur_out, &vec[nevents])) { | |
780 | mutex_unlock(&rp->fetch_lock); | |
781 | return -EFAULT; | |
782 | } | |
783 | ||
784 | nevents++; | |
785 | size = ep->len_cap + PKT_SIZE; | |
786 | size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); | |
787 | if ((cur_out += size) >= rp->b_size) | |
788 | cur_out -= rp->b_size; | |
789 | bytes += size; | |
790 | } | |
791 | ||
792 | mutex_unlock(&rp->fetch_lock); | |
793 | return nevents; | |
794 | } | |
795 | ||
796 | /* | |
797 | * Count events. This is almost the same as the above mon_bin_fetch, | |
798 | * only we do not store offsets into user vector, and we have no limit. | |
799 | */ | |
800 | static int mon_bin_queued(struct mon_reader_bin *rp) | |
801 | { | |
802 | unsigned int cur_out; | |
803 | unsigned int bytes, avail; | |
804 | unsigned int size; | |
805 | unsigned int nevents; | |
806 | struct mon_bin_hdr *ep; | |
807 | unsigned long flags; | |
808 | ||
809 | mutex_lock(&rp->fetch_lock); | |
810 | ||
811 | spin_lock_irqsave(&rp->b_lock, flags); | |
812 | avail = rp->b_cnt; | |
813 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
814 | ||
815 | cur_out = rp->b_out; | |
816 | nevents = 0; | |
817 | bytes = 0; | |
818 | while (bytes < avail) { | |
819 | ep = MON_OFF2HDR(rp, cur_out); | |
820 | ||
821 | nevents++; | |
822 | size = ep->len_cap + PKT_SIZE; | |
823 | size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); | |
824 | if ((cur_out += size) >= rp->b_size) | |
825 | cur_out -= rp->b_size; | |
826 | bytes += size; | |
827 | } | |
828 | ||
829 | mutex_unlock(&rp->fetch_lock); | |
830 | return nevents; | |
831 | } | |
832 | ||
833 | /* | |
834 | */ | |
835 | static int mon_bin_ioctl(struct inode *inode, struct file *file, | |
836 | unsigned int cmd, unsigned long arg) | |
837 | { | |
838 | struct mon_reader_bin *rp = file->private_data; | |
839 | // struct mon_bus* mbus = rp->r.m_bus; | |
840 | int ret = 0; | |
841 | struct mon_bin_hdr *ep; | |
842 | unsigned long flags; | |
843 | ||
844 | switch (cmd) { | |
845 | ||
846 | case MON_IOCQ_URB_LEN: | |
847 | /* | |
848 | * N.B. This only returns the size of data, without the header. | |
849 | */ | |
850 | spin_lock_irqsave(&rp->b_lock, flags); | |
851 | if (!MON_RING_EMPTY(rp)) { | |
852 | ep = MON_OFF2HDR(rp, rp->b_out); | |
853 | ret = ep->len_cap; | |
854 | } | |
855 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
856 | break; | |
857 | ||
858 | case MON_IOCQ_RING_SIZE: | |
859 | ret = rp->b_size; | |
860 | break; | |
861 | ||
862 | case MON_IOCT_RING_SIZE: | |
863 | /* | |
864 | * Changing the buffer size will flush it's contents; the new | |
865 | * buffer is allocated before releasing the old one to be sure | |
866 | * the device will stay functional also in case of memory | |
867 | * pressure. | |
868 | */ | |
869 | { | |
870 | int size; | |
871 | struct mon_pgmap *vec; | |
872 | ||
873 | if (arg < BUFF_MIN || arg > BUFF_MAX) | |
874 | return -EINVAL; | |
875 | ||
876 | size = CHUNK_ALIGN(arg); | |
877 | if ((vec = kzalloc(sizeof(struct mon_pgmap) * (size/CHUNK_SIZE), | |
878 | GFP_KERNEL)) == NULL) { | |
879 | ret = -ENOMEM; | |
880 | break; | |
881 | } | |
882 | ||
883 | ret = mon_alloc_buff(vec, size/CHUNK_SIZE); | |
884 | if (ret < 0) { | |
885 | kfree(vec); | |
886 | break; | |
887 | } | |
888 | ||
889 | mutex_lock(&rp->fetch_lock); | |
890 | spin_lock_irqsave(&rp->b_lock, flags); | |
891 | mon_free_buff(rp->b_vec, size/CHUNK_SIZE); | |
892 | kfree(rp->b_vec); | |
893 | rp->b_vec = vec; | |
894 | rp->b_size = size; | |
895 | rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; | |
896 | rp->cnt_lost = 0; | |
897 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
898 | mutex_unlock(&rp->fetch_lock); | |
899 | } | |
900 | break; | |
901 | ||
902 | case MON_IOCH_MFLUSH: | |
903 | ret = mon_bin_flush(rp, arg); | |
904 | break; | |
905 | ||
906 | case MON_IOCX_GET: | |
907 | { | |
908 | struct mon_bin_get getb; | |
909 | ||
910 | if (copy_from_user(&getb, (void __user *)arg, | |
911 | sizeof(struct mon_bin_get))) | |
912 | return -EFAULT; | |
913 | ||
914 | if (getb.alloc > 0x10000000) /* Want to cast to u32 */ | |
915 | return -EINVAL; | |
916 | ret = mon_bin_get_event(file, rp, | |
917 | getb.hdr, getb.data, (unsigned int)getb.alloc); | |
918 | } | |
919 | break; | |
920 | ||
921 | #ifdef CONFIG_COMPAT | |
922 | case MON_IOCX_GET32: { | |
923 | struct mon_bin_get32 getb; | |
924 | ||
925 | if (copy_from_user(&getb, (void __user *)arg, | |
926 | sizeof(struct mon_bin_get32))) | |
927 | return -EFAULT; | |
928 | ||
929 | ret = mon_bin_get_event(file, rp, | |
930 | compat_ptr(getb.hdr32), compat_ptr(getb.data32), | |
931 | getb.alloc32); | |
932 | } | |
933 | break; | |
934 | #endif | |
935 | ||
936 | case MON_IOCX_MFETCH: | |
937 | { | |
938 | struct mon_bin_mfetch mfetch; | |
939 | struct mon_bin_mfetch __user *uptr; | |
940 | ||
941 | uptr = (struct mon_bin_mfetch __user *)arg; | |
942 | ||
943 | if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) | |
944 | return -EFAULT; | |
945 | ||
946 | if (mfetch.nflush) { | |
947 | ret = mon_bin_flush(rp, mfetch.nflush); | |
948 | if (ret < 0) | |
949 | return ret; | |
950 | if (put_user(ret, &uptr->nflush)) | |
951 | return -EFAULT; | |
952 | } | |
953 | ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch); | |
954 | if (ret < 0) | |
955 | return ret; | |
956 | if (put_user(ret, &uptr->nfetch)) | |
957 | return -EFAULT; | |
958 | ret = 0; | |
959 | } | |
960 | break; | |
961 | ||
962 | #ifdef CONFIG_COMPAT | |
963 | case MON_IOCX_MFETCH32: | |
964 | { | |
965 | struct mon_bin_mfetch32 mfetch; | |
966 | struct mon_bin_mfetch32 __user *uptr; | |
967 | ||
968 | uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg); | |
969 | ||
970 | if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) | |
971 | return -EFAULT; | |
972 | ||
973 | if (mfetch.nflush32) { | |
974 | ret = mon_bin_flush(rp, mfetch.nflush32); | |
975 | if (ret < 0) | |
976 | return ret; | |
977 | if (put_user(ret, &uptr->nflush32)) | |
978 | return -EFAULT; | |
979 | } | |
980 | ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32), | |
981 | mfetch.nfetch32); | |
982 | if (ret < 0) | |
983 | return ret; | |
984 | if (put_user(ret, &uptr->nfetch32)) | |
985 | return -EFAULT; | |
986 | ret = 0; | |
987 | } | |
988 | break; | |
989 | #endif | |
990 | ||
991 | case MON_IOCG_STATS: { | |
992 | struct mon_bin_stats __user *sp; | |
993 | unsigned int nevents; | |
994 | unsigned int ndropped; | |
995 | ||
996 | spin_lock_irqsave(&rp->b_lock, flags); | |
997 | ndropped = rp->cnt_lost; | |
998 | rp->cnt_lost = 0; | |
999 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
1000 | nevents = mon_bin_queued(rp); | |
1001 | ||
1002 | sp = (struct mon_bin_stats __user *)arg; | |
1003 | if (put_user(rp->cnt_lost, &sp->dropped)) | |
1004 | return -EFAULT; | |
1005 | if (put_user(nevents, &sp->queued)) | |
1006 | return -EFAULT; | |
1007 | ||
1008 | } | |
1009 | break; | |
1010 | ||
1011 | default: | |
1012 | return -ENOTTY; | |
1013 | } | |
1014 | ||
1015 | return ret; | |
1016 | } | |
1017 | ||
1018 | static unsigned int | |
1019 | mon_bin_poll(struct file *file, struct poll_table_struct *wait) | |
1020 | { | |
1021 | struct mon_reader_bin *rp = file->private_data; | |
1022 | unsigned int mask = 0; | |
1023 | unsigned long flags; | |
1024 | ||
1025 | if (file->f_mode & FMODE_READ) | |
1026 | poll_wait(file, &rp->b_wait, wait); | |
1027 | ||
1028 | spin_lock_irqsave(&rp->b_lock, flags); | |
1029 | if (!MON_RING_EMPTY(rp)) | |
1030 | mask |= POLLIN | POLLRDNORM; /* readable */ | |
1031 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
1032 | return mask; | |
1033 | } | |
1034 | ||
1035 | /* | |
1036 | * open and close: just keep track of how many times the device is | |
1037 | * mapped, to use the proper memory allocation function. | |
1038 | */ | |
1039 | static void mon_bin_vma_open(struct vm_area_struct *vma) | |
1040 | { | |
1041 | struct mon_reader_bin *rp = vma->vm_private_data; | |
1042 | rp->mmap_active++; | |
1043 | } | |
1044 | ||
1045 | static void mon_bin_vma_close(struct vm_area_struct *vma) | |
1046 | { | |
1047 | struct mon_reader_bin *rp = vma->vm_private_data; | |
1048 | rp->mmap_active--; | |
1049 | } | |
1050 | ||
1051 | /* | |
1052 | * Map ring pages to user space. | |
1053 | */ | |
041509db | 1054 | static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
6f23ee1f PZ |
1055 | { |
1056 | struct mon_reader_bin *rp = vma->vm_private_data; | |
1057 | unsigned long offset, chunk_idx; | |
1058 | struct page *pageptr; | |
1059 | ||
041509db | 1060 | offset = vmf->pgoff << PAGE_SHIFT; |
6f23ee1f | 1061 | if (offset >= rp->b_size) |
041509db | 1062 | return VM_FAULT_SIGBUS; |
6f23ee1f PZ |
1063 | chunk_idx = offset / CHUNK_SIZE; |
1064 | pageptr = rp->b_vec[chunk_idx].pg; | |
1065 | get_page(pageptr); | |
041509db NP |
1066 | vmf->page = pageptr; |
1067 | return 0; | |
6f23ee1f PZ |
1068 | } |
1069 | ||
454459b0 | 1070 | static struct vm_operations_struct mon_bin_vm_ops = { |
6f23ee1f PZ |
1071 | .open = mon_bin_vma_open, |
1072 | .close = mon_bin_vma_close, | |
041509db | 1073 | .fault = mon_bin_vma_fault, |
6f23ee1f PZ |
1074 | }; |
1075 | ||
454459b0 | 1076 | static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) |
6f23ee1f | 1077 | { |
041509db | 1078 | /* don't do anything here: "fault" will set up page table entries */ |
6f23ee1f PZ |
1079 | vma->vm_ops = &mon_bin_vm_ops; |
1080 | vma->vm_flags |= VM_RESERVED; | |
1081 | vma->vm_private_data = filp->private_data; | |
1082 | mon_bin_vma_open(vma); | |
1083 | return 0; | |
1084 | } | |
1085 | ||
0b3f5fe6 | 1086 | static const struct file_operations mon_fops_binary = { |
6f23ee1f PZ |
1087 | .owner = THIS_MODULE, |
1088 | .open = mon_bin_open, | |
1089 | .llseek = no_llseek, | |
1090 | .read = mon_bin_read, | |
1091 | /* .write = mon_text_write, */ | |
1092 | .poll = mon_bin_poll, | |
1093 | .ioctl = mon_bin_ioctl, | |
1094 | .release = mon_bin_release, | |
454459b0 | 1095 | .mmap = mon_bin_mmap, |
6f23ee1f PZ |
1096 | }; |
1097 | ||
1098 | static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp) | |
1099 | { | |
1100 | DECLARE_WAITQUEUE(waita, current); | |
1101 | unsigned long flags; | |
1102 | ||
1103 | add_wait_queue(&rp->b_wait, &waita); | |
1104 | set_current_state(TASK_INTERRUPTIBLE); | |
1105 | ||
1106 | spin_lock_irqsave(&rp->b_lock, flags); | |
1107 | while (MON_RING_EMPTY(rp)) { | |
1108 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
1109 | ||
1110 | if (file->f_flags & O_NONBLOCK) { | |
1111 | set_current_state(TASK_RUNNING); | |
1112 | remove_wait_queue(&rp->b_wait, &waita); | |
1113 | return -EWOULDBLOCK; /* Same as EAGAIN in Linux */ | |
1114 | } | |
1115 | schedule(); | |
1116 | if (signal_pending(current)) { | |
1117 | remove_wait_queue(&rp->b_wait, &waita); | |
1118 | return -EINTR; | |
1119 | } | |
1120 | set_current_state(TASK_INTERRUPTIBLE); | |
1121 | ||
1122 | spin_lock_irqsave(&rp->b_lock, flags); | |
1123 | } | |
1124 | spin_unlock_irqrestore(&rp->b_lock, flags); | |
1125 | ||
1126 | set_current_state(TASK_RUNNING); | |
1127 | remove_wait_queue(&rp->b_wait, &waita); | |
1128 | return 0; | |
1129 | } | |
1130 | ||
1131 | static int mon_alloc_buff(struct mon_pgmap *map, int npages) | |
1132 | { | |
1133 | int n; | |
1134 | unsigned long vaddr; | |
1135 | ||
1136 | for (n = 0; n < npages; n++) { | |
1137 | vaddr = get_zeroed_page(GFP_KERNEL); | |
1138 | if (vaddr == 0) { | |
1139 | while (n-- != 0) | |
1140 | free_page((unsigned long) map[n].ptr); | |
1141 | return -ENOMEM; | |
1142 | } | |
1143 | map[n].ptr = (unsigned char *) vaddr; | |
1144 | map[n].pg = virt_to_page(vaddr); | |
1145 | } | |
1146 | return 0; | |
1147 | } | |
1148 | ||
1149 | static void mon_free_buff(struct mon_pgmap *map, int npages) | |
1150 | { | |
1151 | int n; | |
1152 | ||
1153 | for (n = 0; n < npages; n++) | |
1154 | free_page((unsigned long) map[n].ptr); | |
1155 | } | |
1156 | ||
ce7cd137 PZ |
1157 | int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus) |
1158 | { | |
1159 | struct device *dev; | |
1160 | unsigned minor = ubus? ubus->busnum: 0; | |
1161 | ||
1162 | if (minor >= MON_BIN_MAX_MINOR) | |
1163 | return 0; | |
1164 | ||
bc00bc92 GKH |
1165 | dev = device_create_drvdata(mon_bin_class, ubus? ubus->controller: NULL, |
1166 | MKDEV(MAJOR(mon_bin_dev0), minor), NULL, | |
1167 | "usbmon%d", minor); | |
ce7cd137 PZ |
1168 | if (IS_ERR(dev)) |
1169 | return 0; | |
1170 | ||
1171 | mbus->classdev = dev; | |
1172 | return 1; | |
1173 | } | |
1174 | ||
1175 | void mon_bin_del(struct mon_bus *mbus) | |
1176 | { | |
1177 | device_destroy(mon_bin_class, mbus->classdev->devt); | |
1178 | } | |
1179 | ||
6f23ee1f PZ |
1180 | int __init mon_bin_init(void) |
1181 | { | |
1182 | int rc; | |
1183 | ||
ce7cd137 PZ |
1184 | mon_bin_class = class_create(THIS_MODULE, "usbmon"); |
1185 | if (IS_ERR(mon_bin_class)) { | |
1186 | rc = PTR_ERR(mon_bin_class); | |
1187 | goto err_class; | |
1188 | } | |
1189 | ||
6f23ee1f PZ |
1190 | rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon"); |
1191 | if (rc < 0) | |
1192 | goto err_dev; | |
1193 | ||
1194 | cdev_init(&mon_bin_cdev, &mon_fops_binary); | |
1195 | mon_bin_cdev.owner = THIS_MODULE; | |
1196 | ||
1197 | rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR); | |
1198 | if (rc < 0) | |
1199 | goto err_add; | |
1200 | ||
1201 | return 0; | |
1202 | ||
1203 | err_add: | |
1204 | unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); | |
1205 | err_dev: | |
ce7cd137 PZ |
1206 | class_destroy(mon_bin_class); |
1207 | err_class: | |
6f23ee1f PZ |
1208 | return rc; |
1209 | } | |
1210 | ||
21641e3f | 1211 | void mon_bin_exit(void) |
6f23ee1f PZ |
1212 | { |
1213 | cdev_del(&mon_bin_cdev); | |
1214 | unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); | |
ce7cd137 | 1215 | class_destroy(mon_bin_class); |
6f23ee1f | 1216 | } |