fs_parse: fold fs_parameter_desc/fs_parameter_spec
[linux-block.git] / drivers / usb / gadget / function / f_fs.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * f_fs.c -- user mode file system API for USB composite function controllers
4  *
5  * Copyright (C) 2010 Samsung Electronics
6  * Author: Michal Nazarewicz <mina86@mina86.com>
7  *
8  * Based on inode.c (GadgetFS) which was:
9  * Copyright (C) 2003-2004 David Brownell
10  * Copyright (C) 2003 Agilent Technologies
11  */
12
13
14 /* #define DEBUG */
15 /* #define VERBOSE_DEBUG */
16
17 #include <linux/blkdev.h>
18 #include <linux/pagemap.h>
19 #include <linux/export.h>
20 #include <linux/fs_parser.h>
21 #include <linux/hid.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/scatterlist.h>
25 #include <linux/sched/signal.h>
26 #include <linux/uio.h>
27 #include <linux/vmalloc.h>
28 #include <asm/unaligned.h>
29
30 #include <linux/usb/ccid.h>
31 #include <linux/usb/composite.h>
32 #include <linux/usb/functionfs.h>
33
34 #include <linux/aio.h>
35 #include <linux/mmu_context.h>
36 #include <linux/poll.h>
37 #include <linux/eventfd.h>
38
39 #include "u_fs.h"
40 #include "u_f.h"
41 #include "u_os_desc.h"
42 #include "configfs.h"
43
44 #define FUNCTIONFS_MAGIC        0xa647361 /* Chosen by a honest dice roll ;) */
45
46 /* Reference counter handling */
47 static void ffs_data_get(struct ffs_data *ffs);
48 static void ffs_data_put(struct ffs_data *ffs);
49 /* Creates new ffs_data object. */
50 static struct ffs_data *__must_check ffs_data_new(const char *dev_name)
51         __attribute__((malloc));
52
53 /* Opened counter handling. */
54 static void ffs_data_opened(struct ffs_data *ffs);
55 static void ffs_data_closed(struct ffs_data *ffs);
56
57 /* Called with ffs->mutex held; take over ownership of data. */
58 static int __must_check
59 __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
60 static int __must_check
61 __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
62
63
64 /* The function structure ***************************************************/
65
66 struct ffs_ep;
67
68 struct ffs_function {
69         struct usb_configuration        *conf;
70         struct usb_gadget               *gadget;
71         struct ffs_data                 *ffs;
72
73         struct ffs_ep                   *eps;
74         u8                              eps_revmap[16];
75         short                           *interfaces_nums;
76
77         struct usb_function             function;
78 };
79
80
81 static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
82 {
83         return container_of(f, struct ffs_function, function);
84 }
85
86
87 static inline enum ffs_setup_state
88 ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
89 {
90         return (enum ffs_setup_state)
91                 cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
92 }
93
94
95 static void ffs_func_eps_disable(struct ffs_function *func);
96 static int __must_check ffs_func_eps_enable(struct ffs_function *func);
97
98 static int ffs_func_bind(struct usb_configuration *,
99                          struct usb_function *);
100 static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
101 static void ffs_func_disable(struct usb_function *);
102 static int ffs_func_setup(struct usb_function *,
103                           const struct usb_ctrlrequest *);
104 static bool ffs_func_req_match(struct usb_function *,
105                                const struct usb_ctrlrequest *,
106                                bool config0);
107 static void ffs_func_suspend(struct usb_function *);
108 static void ffs_func_resume(struct usb_function *);
109
110
111 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
112 static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
113
114
115 /* The endpoints structures *************************************************/
116
117 struct ffs_ep {
118         struct usb_ep                   *ep;    /* P: ffs->eps_lock */
119         struct usb_request              *req;   /* P: epfile->mutex */
120
121         /* [0]: full speed, [1]: high speed, [2]: super speed */
122         struct usb_endpoint_descriptor  *descs[3];
123
124         u8                              num;
125
126         int                             status; /* P: epfile->mutex */
127 };
128
129 struct ffs_epfile {
130         /* Protects ep->ep and ep->req. */
131         struct mutex                    mutex;
132
133         struct ffs_data                 *ffs;
134         struct ffs_ep                   *ep;    /* P: ffs->eps_lock */
135
136         struct dentry                   *dentry;
137
138         /*
139          * Buffer for holding data from partial reads which may happen since
140          * we’re rounding user read requests to a multiple of a max packet size.
141          *
142          * The pointer is initialised with NULL value and may be set by
143          * __ffs_epfile_read_data function to point to a temporary buffer.
144          *
145          * In normal operation, calls to __ffs_epfile_read_buffered will consume
146          * data from said buffer and eventually free it.  Importantly, while the
147          * function is using the buffer, it sets the pointer to NULL.  This is
148          * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered
149          * can never run concurrently (they are synchronised by epfile->mutex)
150          * so the latter will not assign a new value to the pointer.
151          *
152          * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is
153          * valid) and sets the pointer to READ_BUFFER_DROP value.  This special
154          * value is crux of the synchronisation between ffs_func_eps_disable and
155          * __ffs_epfile_read_data.
156          *
157          * Once __ffs_epfile_read_data is about to finish it will try to set the
158          * pointer back to its old value (as described above), but seeing as the
159          * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free
160          * the buffer.
161          *
162          * == State transitions ==
163          *
164          * • ptr == NULL:  (initial state)
165          *   ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP
166          *   ◦ __ffs_epfile_read_buffered:    nop
167          *   ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf
168          *   ◦ reading finishes:              n/a, not in ‘and reading’ state
169          * • ptr == DROP:
170          *   ◦ __ffs_epfile_read_buffer_free: nop
171          *   ◦ __ffs_epfile_read_buffered:    go to ptr == NULL
172          *   ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop
173          *   ◦ reading finishes:              n/a, not in ‘and reading’ state
174          * • ptr == buf:
175          *   ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP
176          *   ◦ __ffs_epfile_read_buffered:    go to ptr == NULL and reading
177          *   ◦ __ffs_epfile_read_data:        n/a, __ffs_epfile_read_buffered
178          *                                    is always called first
179          *   ◦ reading finishes:              n/a, not in ‘and reading’ state
180          * • ptr == NULL and reading:
181          *   ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading
182          *   ◦ __ffs_epfile_read_buffered:    n/a, mutex is held
183          *   ◦ __ffs_epfile_read_data:        n/a, mutex is held
184          *   ◦ reading finishes and …
185          *     … all data read:               free buf, go to ptr == NULL
186          *     … otherwise:                   go to ptr == buf and reading
187          * • ptr == DROP and reading:
188          *   ◦ __ffs_epfile_read_buffer_free: nop
189          *   ◦ __ffs_epfile_read_buffered:    n/a, mutex is held
190          *   ◦ __ffs_epfile_read_data:        n/a, mutex is held
191          *   ◦ reading finishes:              free buf, go to ptr == DROP
192          */
193         struct ffs_buffer               *read_buffer;
194 #define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))
195
196         char                            name[5];
197
198         unsigned char                   in;     /* P: ffs->eps_lock */
199         unsigned char                   isoc;   /* P: ffs->eps_lock */
200
201         unsigned char                   _pad;
202 };
203
204 struct ffs_buffer {
205         size_t length;
206         char *data;
207         char storage[];
208 };
209
210 /*  ffs_io_data structure ***************************************************/
211
212 struct ffs_io_data {
213         bool aio;
214         bool read;
215
216         struct kiocb *kiocb;
217         struct iov_iter data;
218         const void *to_free;
219         char *buf;
220
221         struct mm_struct *mm;
222         struct work_struct work;
223
224         struct usb_ep *ep;
225         struct usb_request *req;
226         struct sg_table sgt;
227         bool use_sg;
228
229         struct ffs_data *ffs;
230 };
231
232 struct ffs_desc_helper {
233         struct ffs_data *ffs;
234         unsigned interfaces_count;
235         unsigned eps_count;
236 };
237
238 static int  __must_check ffs_epfiles_create(struct ffs_data *ffs);
239 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
240
241 static struct dentry *
242 ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
243                    const struct file_operations *fops);
244
245 /* Devices management *******************************************************/
246
247 DEFINE_MUTEX(ffs_lock);
248 EXPORT_SYMBOL_GPL(ffs_lock);
249
250 static struct ffs_dev *_ffs_find_dev(const char *name);
251 static struct ffs_dev *_ffs_alloc_dev(void);
252 static void _ffs_free_dev(struct ffs_dev *dev);
253 static void *ffs_acquire_dev(const char *dev_name);
254 static void ffs_release_dev(struct ffs_data *ffs_data);
255 static int ffs_ready(struct ffs_data *ffs);
256 static void ffs_closed(struct ffs_data *ffs);
257
258 /* Misc helper functions ****************************************************/
259
260 static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
261         __attribute__((warn_unused_result, nonnull));
262 static char *ffs_prepare_buffer(const char __user *buf, size_t len)
263         __attribute__((warn_unused_result, nonnull));
264
265
266 /* Control file aka ep0 *****************************************************/
267
268 static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
269 {
270         struct ffs_data *ffs = req->context;
271
272         complete(&ffs->ep0req_completion);
273 }
274
275 static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
276         __releases(&ffs->ev.waitq.lock)
277 {
278         struct usb_request *req = ffs->ep0req;
279         int ret;
280
281         req->zero     = len < le16_to_cpu(ffs->ev.setup.wLength);
282
283         spin_unlock_irq(&ffs->ev.waitq.lock);
284
285         req->buf      = data;
286         req->length   = len;
287
288         /*
289          * UDC layer requires to provide a buffer even for ZLP, but should
290          * not use it at all. Let's provide some poisoned pointer to catch
291          * possible bug in the driver.
292          */
293         if (req->buf == NULL)
294                 req->buf = (void *)0xDEADBABE;
295
296         reinit_completion(&ffs->ep0req_completion);
297
298         ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
299         if (unlikely(ret < 0))
300                 return ret;
301
302         ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
303         if (unlikely(ret)) {
304                 usb_ep_dequeue(ffs->gadget->ep0, req);
305                 return -EINTR;
306         }
307
308         ffs->setup_state = FFS_NO_SETUP;
309         return req->status ? req->status : req->actual;
310 }
311
312 static int __ffs_ep0_stall(struct ffs_data *ffs)
313 {
314         if (ffs->ev.can_stall) {
315                 pr_vdebug("ep0 stall\n");
316                 usb_ep_set_halt(ffs->gadget->ep0);
317                 ffs->setup_state = FFS_NO_SETUP;
318                 return -EL2HLT;
319         } else {
320                 pr_debug("bogus ep0 stall!\n");
321                 return -ESRCH;
322         }
323 }
324
325 static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
326                              size_t len, loff_t *ptr)
327 {
328         struct ffs_data *ffs = file->private_data;
329         ssize_t ret;
330         char *data;
331
332         ENTER();
333
334         /* Fast check if setup was canceled */
335         if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
336                 return -EIDRM;
337
338         /* Acquire mutex */
339         ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
340         if (unlikely(ret < 0))
341                 return ret;
342
343         /* Check state */
344         switch (ffs->state) {
345         case FFS_READ_DESCRIPTORS:
346         case FFS_READ_STRINGS:
347                 /* Copy data */
348                 if (unlikely(len < 16)) {
349                         ret = -EINVAL;
350                         break;
351                 }
352
353                 data = ffs_prepare_buffer(buf, len);
354                 if (IS_ERR(data)) {
355                         ret = PTR_ERR(data);
356                         break;
357                 }
358
359                 /* Handle data */
360                 if (ffs->state == FFS_READ_DESCRIPTORS) {
361                         pr_info("read descriptors\n");
362                         ret = __ffs_data_got_descs(ffs, data, len);
363                         if (unlikely(ret < 0))
364                                 break;
365
366                         ffs->state = FFS_READ_STRINGS;
367                         ret = len;
368                 } else {
369                         pr_info("read strings\n");
370                         ret = __ffs_data_got_strings(ffs, data, len);
371                         if (unlikely(ret < 0))
372                                 break;
373
374                         ret = ffs_epfiles_create(ffs);
375                         if (unlikely(ret)) {
376                                 ffs->state = FFS_CLOSING;
377                                 break;
378                         }
379
380                         ffs->state = FFS_ACTIVE;
381                         mutex_unlock(&ffs->mutex);
382
383                         ret = ffs_ready(ffs);
384                         if (unlikely(ret < 0)) {
385                                 ffs->state = FFS_CLOSING;
386                                 return ret;
387                         }
388
389                         return len;
390                 }
391                 break;
392
393         case FFS_ACTIVE:
394                 data = NULL;
395                 /*
396                  * We're called from user space, we can use _irq
397                  * rather then _irqsave
398                  */
399                 spin_lock_irq(&ffs->ev.waitq.lock);
400                 switch (ffs_setup_state_clear_cancelled(ffs)) {
401                 case FFS_SETUP_CANCELLED:
402                         ret = -EIDRM;
403                         goto done_spin;
404
405                 case FFS_NO_SETUP:
406                         ret = -ESRCH;
407                         goto done_spin;
408
409                 case FFS_SETUP_PENDING:
410                         break;
411                 }
412
413                 /* FFS_SETUP_PENDING */
414                 if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
415                         spin_unlock_irq(&ffs->ev.waitq.lock);
416                         ret = __ffs_ep0_stall(ffs);
417                         break;
418                 }
419
420                 /* FFS_SETUP_PENDING and not stall */
421                 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
422
423                 spin_unlock_irq(&ffs->ev.waitq.lock);
424
425                 data = ffs_prepare_buffer(buf, len);
426                 if (IS_ERR(data)) {
427                         ret = PTR_ERR(data);
428                         break;
429                 }
430
431                 spin_lock_irq(&ffs->ev.waitq.lock);
432
433                 /*
434                  * We are guaranteed to be still in FFS_ACTIVE state
435                  * but the state of setup could have changed from
436                  * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
437                  * to check for that.  If that happened we copied data
438                  * from user space in vain but it's unlikely.
439                  *
440                  * For sure we are not in FFS_NO_SETUP since this is
441                  * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
442                  * transition can be performed and it's protected by
443                  * mutex.
444                  */
445                 if (ffs_setup_state_clear_cancelled(ffs) ==
446                     FFS_SETUP_CANCELLED) {
447                         ret = -EIDRM;
448 done_spin:
449                         spin_unlock_irq(&ffs->ev.waitq.lock);
450                 } else {
451                         /* unlocks spinlock */
452                         ret = __ffs_ep0_queue_wait(ffs, data, len);
453                 }
454                 kfree(data);
455                 break;
456
457         default:
458                 ret = -EBADFD;
459                 break;
460         }
461
462         mutex_unlock(&ffs->mutex);
463         return ret;
464 }
465
466 /* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
467 static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
468                                      size_t n)
469         __releases(&ffs->ev.waitq.lock)
470 {
471         /*
472          * n cannot be bigger than ffs->ev.count, which cannot be bigger than
473          * size of ffs->ev.types array (which is four) so that's how much space
474          * we reserve.
475          */
476         struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)];
477         const size_t size = n * sizeof *events;
478         unsigned i = 0;
479
480         memset(events, 0, size);
481
482         do {
483                 events[i].type = ffs->ev.types[i];
484                 if (events[i].type == FUNCTIONFS_SETUP) {
485                         events[i].u.setup = ffs->ev.setup;
486                         ffs->setup_state = FFS_SETUP_PENDING;
487                 }
488         } while (++i < n);
489
490         ffs->ev.count -= n;
491         if (ffs->ev.count)
492                 memmove(ffs->ev.types, ffs->ev.types + n,
493                         ffs->ev.count * sizeof *ffs->ev.types);
494
495         spin_unlock_irq(&ffs->ev.waitq.lock);
496         mutex_unlock(&ffs->mutex);
497
498         return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
499 }
500
501 static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
502                             size_t len, loff_t *ptr)
503 {
504         struct ffs_data *ffs = file->private_data;
505         char *data = NULL;
506         size_t n;
507         int ret;
508
509         ENTER();
510
511         /* Fast check if setup was canceled */
512         if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
513                 return -EIDRM;
514
515         /* Acquire mutex */
516         ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
517         if (unlikely(ret < 0))
518                 return ret;
519
520         /* Check state */
521         if (ffs->state != FFS_ACTIVE) {
522                 ret = -EBADFD;
523                 goto done_mutex;
524         }
525
526         /*
527          * We're called from user space, we can use _irq rather then
528          * _irqsave
529          */
530         spin_lock_irq(&ffs->ev.waitq.lock);
531
532         switch (ffs_setup_state_clear_cancelled(ffs)) {
533         case FFS_SETUP_CANCELLED:
534                 ret = -EIDRM;
535                 break;
536
537         case FFS_NO_SETUP:
538                 n = len / sizeof(struct usb_functionfs_event);
539                 if (unlikely(!n)) {
540                         ret = -EINVAL;
541                         break;
542                 }
543
544                 if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
545                         ret = -EAGAIN;
546                         break;
547                 }
548
549                 if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
550                                                         ffs->ev.count)) {
551                         ret = -EINTR;
552                         break;
553                 }
554
555                 /* unlocks spinlock */
556                 return __ffs_ep0_read_events(ffs, buf,
557                                              min(n, (size_t)ffs->ev.count));
558
559         case FFS_SETUP_PENDING:
560                 if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
561                         spin_unlock_irq(&ffs->ev.waitq.lock);
562                         ret = __ffs_ep0_stall(ffs);
563                         goto done_mutex;
564                 }
565
566                 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
567
568                 spin_unlock_irq(&ffs->ev.waitq.lock);
569
570                 if (likely(len)) {
571                         data = kmalloc(len, GFP_KERNEL);
572                         if (unlikely(!data)) {
573                                 ret = -ENOMEM;
574                                 goto done_mutex;
575                         }
576                 }
577
578                 spin_lock_irq(&ffs->ev.waitq.lock);
579
580                 /* See ffs_ep0_write() */
581                 if (ffs_setup_state_clear_cancelled(ffs) ==
582                     FFS_SETUP_CANCELLED) {
583                         ret = -EIDRM;
584                         break;
585                 }
586
587                 /* unlocks spinlock */
588                 ret = __ffs_ep0_queue_wait(ffs, data, len);
589                 if (likely(ret > 0) && unlikely(copy_to_user(buf, data, len)))
590                         ret = -EFAULT;
591                 goto done_mutex;
592
593         default:
594                 ret = -EBADFD;
595                 break;
596         }
597
598         spin_unlock_irq(&ffs->ev.waitq.lock);
599 done_mutex:
600         mutex_unlock(&ffs->mutex);
601         kfree(data);
602         return ret;
603 }
604
605 static int ffs_ep0_open(struct inode *inode, struct file *file)
606 {
607         struct ffs_data *ffs = inode->i_private;
608
609         ENTER();
610
611         if (unlikely(ffs->state == FFS_CLOSING))
612                 return -EBUSY;
613
614         file->private_data = ffs;
615         ffs_data_opened(ffs);
616
617         return 0;
618 }
619
620 static int ffs_ep0_release(struct inode *inode, struct file *file)
621 {
622         struct ffs_data *ffs = file->private_data;
623
624         ENTER();
625
626         ffs_data_closed(ffs);
627
628         return 0;
629 }
630
631 static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
632 {
633         struct ffs_data *ffs = file->private_data;
634         struct usb_gadget *gadget = ffs->gadget;
635         long ret;
636
637         ENTER();
638
639         if (code == FUNCTIONFS_INTERFACE_REVMAP) {
640                 struct ffs_function *func = ffs->func;
641                 ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
642         } else if (gadget && gadget->ops->ioctl) {
643                 ret = gadget->ops->ioctl(gadget, code, value);
644         } else {
645                 ret = -ENOTTY;
646         }
647
648         return ret;
649 }
650
651 static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
652 {
653         struct ffs_data *ffs = file->private_data;
654         __poll_t mask = EPOLLWRNORM;
655         int ret;
656
657         poll_wait(file, &ffs->ev.waitq, wait);
658
659         ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
660         if (unlikely(ret < 0))
661                 return mask;
662
663         switch (ffs->state) {
664         case FFS_READ_DESCRIPTORS:
665         case FFS_READ_STRINGS:
666                 mask |= EPOLLOUT;
667                 break;
668
669         case FFS_ACTIVE:
670                 switch (ffs->setup_state) {
671                 case FFS_NO_SETUP:
672                         if (ffs->ev.count)
673                                 mask |= EPOLLIN;
674                         break;
675
676                 case FFS_SETUP_PENDING:
677                 case FFS_SETUP_CANCELLED:
678                         mask |= (EPOLLIN | EPOLLOUT);
679                         break;
680                 }
681         case FFS_CLOSING:
682                 break;
683         case FFS_DEACTIVATED:
684                 break;
685         }
686
687         mutex_unlock(&ffs->mutex);
688
689         return mask;
690 }
691
692 static const struct file_operations ffs_ep0_operations = {
693         .llseek =       no_llseek,
694
695         .open =         ffs_ep0_open,
696         .write =        ffs_ep0_write,
697         .read =         ffs_ep0_read,
698         .release =      ffs_ep0_release,
699         .unlocked_ioctl =       ffs_ep0_ioctl,
700         .poll =         ffs_ep0_poll,
701 };
702
703
704 /* "Normal" endpoints operations ********************************************/
705
706 static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
707 {
708         ENTER();
709         if (likely(req->context)) {
710                 struct ffs_ep *ep = _ep->driver_data;
711                 ep->status = req->status ? req->status : req->actual;
712                 complete(req->context);
713         }
714 }
715
716 static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
717 {
718         ssize_t ret = copy_to_iter(data, data_len, iter);
719         if (likely(ret == data_len))
720                 return ret;
721
722         if (unlikely(iov_iter_count(iter)))
723                 return -EFAULT;
724
725         /*
726          * Dear user space developer!
727          *
728          * TL;DR: To stop getting below error message in your kernel log, change
729          * user space code using functionfs to align read buffers to a max
730          * packet size.
731          *
732          * Some UDCs (e.g. dwc3) require request sizes to be a multiple of a max
733          * packet size.  When unaligned buffer is passed to functionfs, it
734          * internally uses a larger, aligned buffer so that such UDCs are happy.
735          *
736          * Unfortunately, this means that host may send more data than was
737          * requested in read(2) system call.  f_fs doesn’t know what to do with
738          * that excess data so it simply drops it.
739          *
740          * Was the buffer aligned in the first place, no such problem would
741          * happen.
742          *
743          * Data may be dropped only in AIO reads.  Synchronous reads are handled
744          * by splitting a request into multiple parts.  This splitting may still
745          * be a problem though so it’s likely best to align the buffer
746          * regardless of it being AIO or not..
747          *
748          * This only affects OUT endpoints, i.e. reading data with a read(2),
749          * aio_read(2) etc. system calls.  Writing data to an IN endpoint is not
750          * affected.
751          */
752         pr_err("functionfs read size %d > requested size %zd, dropping excess data. "
753                "Align read buffer size to max packet size to avoid the problem.\n",
754                data_len, ret);
755
756         return ret;
757 }
758
759 /*
760  * allocate a virtually contiguous buffer and create a scatterlist describing it
761  * @sg_table    - pointer to a place to be filled with sg_table contents
762  * @size        - required buffer size
763  */
764 static void *ffs_build_sg_list(struct sg_table *sgt, size_t sz)
765 {
766         struct page **pages;
767         void *vaddr, *ptr;
768         unsigned int n_pages;
769         int i;
770
771         vaddr = vmalloc(sz);
772         if (!vaddr)
773                 return NULL;
774
775         n_pages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
776         pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL);
777         if (!pages) {
778                 vfree(vaddr);
779
780                 return NULL;
781         }
782         for (i = 0, ptr = vaddr; i < n_pages; ++i, ptr += PAGE_SIZE)
783                 pages[i] = vmalloc_to_page(ptr);
784
785         if (sg_alloc_table_from_pages(sgt, pages, n_pages, 0, sz, GFP_KERNEL)) {
786                 kvfree(pages);
787                 vfree(vaddr);
788
789                 return NULL;
790         }
791         kvfree(pages);
792
793         return vaddr;
794 }
795
796 static inline void *ffs_alloc_buffer(struct ffs_io_data *io_data,
797         size_t data_len)
798 {
799         if (io_data->use_sg)
800                 return ffs_build_sg_list(&io_data->sgt, data_len);
801
802         return kmalloc(data_len, GFP_KERNEL);
803 }
804
805 static inline void ffs_free_buffer(struct ffs_io_data *io_data)
806 {
807         if (!io_data->buf)
808                 return;
809
810         if (io_data->use_sg) {
811                 sg_free_table(&io_data->sgt);
812                 vfree(io_data->buf);
813         } else {
814                 kfree(io_data->buf);
815         }
816 }
817
818 static void ffs_user_copy_worker(struct work_struct *work)
819 {
820         struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
821                                                    work);
822         int ret = io_data->req->status ? io_data->req->status :
823                                          io_data->req->actual;
824         bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
825
826         if (io_data->read && ret > 0) {
827                 mm_segment_t oldfs = get_fs();
828
829                 set_fs(USER_DS);
830                 use_mm(io_data->mm);
831                 ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
832                 unuse_mm(io_data->mm);
833                 set_fs(oldfs);
834         }
835
836         io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
837
838         if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
839                 eventfd_signal(io_data->ffs->ffs_eventfd, 1);
840
841         usb_ep_free_request(io_data->ep, io_data->req);
842
843         if (io_data->read)
844                 kfree(io_data->to_free);
845         ffs_free_buffer(io_data);
846         kfree(io_data);
847 }
848
849 static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
850                                          struct usb_request *req)
851 {
852         struct ffs_io_data *io_data = req->context;
853         struct ffs_data *ffs = io_data->ffs;
854
855         ENTER();
856
857         INIT_WORK(&io_data->work, ffs_user_copy_worker);
858         queue_work(ffs->io_completion_wq, &io_data->work);
859 }
860
861 static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
862 {
863         /*
864          * See comment in struct ffs_epfile for full read_buffer pointer
865          * synchronisation story.
866          */
867         struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP);
868         if (buf && buf != READ_BUFFER_DROP)
869                 kfree(buf);
870 }
871
872 /* Assumes epfile->mutex is held. */
873 static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,
874                                           struct iov_iter *iter)
875 {
876         /*
877          * Null out epfile->read_buffer so ffs_func_eps_disable does not free
878          * the buffer while we are using it.  See comment in struct ffs_epfile
879          * for full read_buffer pointer synchronisation story.
880          */
881         struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL);
882         ssize_t ret;
883         if (!buf || buf == READ_BUFFER_DROP)
884                 return 0;
885
886         ret = copy_to_iter(buf->data, buf->length, iter);
887         if (buf->length == ret) {
888                 kfree(buf);
889                 return ret;
890         }
891
892         if (unlikely(iov_iter_count(iter))) {
893                 ret = -EFAULT;
894         } else {
895                 buf->length -= ret;
896                 buf->data += ret;
897         }
898
899         if (cmpxchg(&epfile->read_buffer, NULL, buf))
900                 kfree(buf);
901
902         return ret;
903 }
904
905 /* Assumes epfile->mutex is held. */
906 static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
907                                       void *data, int data_len,
908                                       struct iov_iter *iter)
909 {
910         struct ffs_buffer *buf;
911
912         ssize_t ret = copy_to_iter(data, data_len, iter);
913         if (likely(data_len == ret))
914                 return ret;
915
916         if (unlikely(iov_iter_count(iter)))
917                 return -EFAULT;
918
919         /* See ffs_copy_to_iter for more context. */
920         pr_warn("functionfs read size %d > requested size %zd, splitting request into multiple reads.",
921                 data_len, ret);
922
923         data_len -= ret;
924         buf = kmalloc(sizeof(*buf) + data_len, GFP_KERNEL);
925         if (!buf)
926                 return -ENOMEM;
927         buf->length = data_len;
928         buf->data = buf->storage;
929         memcpy(buf->storage, data + ret, data_len);
930
931         /*
932          * At this point read_buffer is NULL or READ_BUFFER_DROP (if
933          * ffs_func_eps_disable has been called in the meanwhile).  See comment
934          * in struct ffs_epfile for full read_buffer pointer synchronisation
935          * story.
936          */
937         if (unlikely(cmpxchg(&epfile->read_buffer, NULL, buf)))
938                 kfree(buf);
939
940         return ret;
941 }
942
943 static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
944 {
945         struct ffs_epfile *epfile = file->private_data;
946         struct usb_request *req;
947         struct ffs_ep *ep;
948         char *data = NULL;
949         ssize_t ret, data_len = -EINVAL;
950         int halt;
951
952         /* Are we still active? */
953         if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
954                 return -ENODEV;
955
956         /* Wait for endpoint to be enabled */
957         ep = epfile->ep;
958         if (!ep) {
959                 if (file->f_flags & O_NONBLOCK)
960                         return -EAGAIN;
961
962                 ret = wait_event_interruptible(
963                                 epfile->ffs->wait, (ep = epfile->ep));
964                 if (ret)
965                         return -EINTR;
966         }
967
968         /* Do we halt? */
969         halt = (!io_data->read == !epfile->in);
970         if (halt && epfile->isoc)
971                 return -EINVAL;
972
973         /* We will be using request and read_buffer */
974         ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
975         if (unlikely(ret))
976                 goto error;
977
978         /* Allocate & copy */
979         if (!halt) {
980                 struct usb_gadget *gadget;
981
982                 /*
983                  * Do we have buffered data from previous partial read?  Check
984                  * that for synchronous case only because we do not have
985                  * facility to ‘wake up’ a pending asynchronous read and push
986                  * buffered data to it which we would need to make things behave
987                  * consistently.
988                  */
989                 if (!io_data->aio && io_data->read) {
990                         ret = __ffs_epfile_read_buffered(epfile, &io_data->data);
991                         if (ret)
992                                 goto error_mutex;
993                 }
994
995                 /*
996                  * if we _do_ wait above, the epfile->ffs->gadget might be NULL
997                  * before the waiting completes, so do not assign to 'gadget'
998                  * earlier
999                  */
1000                 gadget = epfile->ffs->gadget;
1001
1002                 spin_lock_irq(&epfile->ffs->eps_lock);
1003                 /* In the meantime, endpoint got disabled or changed. */
1004                 if (epfile->ep != ep) {
1005                         ret = -ESHUTDOWN;
1006                         goto error_lock;
1007                 }
1008                 data_len = iov_iter_count(&io_data->data);
1009                 /*
1010                  * Controller may require buffer size to be aligned to
1011                  * maxpacketsize of an out endpoint.
1012                  */
1013                 if (io_data->read)
1014                         data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
1015
1016                 io_data->use_sg = gadget->sg_supported && data_len > PAGE_SIZE;
1017                 spin_unlock_irq(&epfile->ffs->eps_lock);
1018
1019                 data = ffs_alloc_buffer(io_data, data_len);
1020                 if (unlikely(!data)) {
1021                         ret = -ENOMEM;
1022                         goto error_mutex;
1023                 }
1024                 if (!io_data->read &&
1025                     !copy_from_iter_full(data, data_len, &io_data->data)) {
1026                         ret = -EFAULT;
1027                         goto error_mutex;
1028                 }
1029         }
1030
1031         spin_lock_irq(&epfile->ffs->eps_lock);
1032
1033         if (epfile->ep != ep) {
1034                 /* In the meantime, endpoint got disabled or changed. */
1035                 ret = -ESHUTDOWN;
1036         } else if (halt) {
1037                 ret = usb_ep_set_halt(ep->ep);
1038                 if (!ret)
1039                         ret = -EBADMSG;
1040         } else if (unlikely(data_len == -EINVAL)) {
1041                 /*
1042                  * Sanity Check: even though data_len can't be used
1043                  * uninitialized at the time I write this comment, some
1044                  * compilers complain about this situation.
1045                  * In order to keep the code clean from warnings, data_len is
1046                  * being initialized to -EINVAL during its declaration, which
1047                  * means we can't rely on compiler anymore to warn no future
1048                  * changes won't result in data_len being used uninitialized.
1049                  * For such reason, we're adding this redundant sanity check
1050                  * here.
1051                  */
1052                 WARN(1, "%s: data_len == -EINVAL\n", __func__);
1053                 ret = -EINVAL;
1054         } else if (!io_data->aio) {
1055                 DECLARE_COMPLETION_ONSTACK(done);
1056                 bool interrupted = false;
1057
1058                 req = ep->req;
1059                 if (io_data->use_sg) {
1060                         req->buf = NULL;
1061                         req->sg = io_data->sgt.sgl;
1062                         req->num_sgs = io_data->sgt.nents;
1063                 } else {
1064                         req->buf = data;
1065                 }
1066                 req->length = data_len;
1067
1068                 io_data->buf = data;
1069
1070                 req->context  = &done;
1071                 req->complete = ffs_epfile_io_complete;
1072
1073                 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
1074                 if (unlikely(ret < 0))
1075                         goto error_lock;
1076
1077                 spin_unlock_irq(&epfile->ffs->eps_lock);
1078
1079                 if (unlikely(wait_for_completion_interruptible(&done))) {
1080                         /*
1081                          * To avoid race condition with ffs_epfile_io_complete,
1082                          * dequeue the request first then check
1083                          * status. usb_ep_dequeue API should guarantee no race
1084                          * condition with req->complete callback.
1085                          */
1086                         usb_ep_dequeue(ep->ep, req);
1087                         wait_for_completion(&done);
1088                         interrupted = ep->status < 0;
1089                 }
1090
1091                 if (interrupted)
1092                         ret = -EINTR;
1093                 else if (io_data->read && ep->status > 0)
1094                         ret = __ffs_epfile_read_data(epfile, data, ep->status,
1095                                                      &io_data->data);
1096                 else
1097                         ret = ep->status;
1098                 goto error_mutex;
1099         } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
1100                 ret = -ENOMEM;
1101         } else {
1102                 if (io_data->use_sg) {
1103                         req->buf = NULL;
1104                         req->sg = io_data->sgt.sgl;
1105                         req->num_sgs = io_data->sgt.nents;
1106                 } else {
1107                         req->buf = data;
1108                 }
1109                 req->length = data_len;
1110
1111                 io_data->buf = data;
1112                 io_data->ep = ep->ep;
1113                 io_data->req = req;
1114                 io_data->ffs = epfile->ffs;
1115
1116                 req->context  = io_data;
1117                 req->complete = ffs_epfile_async_io_complete;
1118
1119                 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
1120                 if (unlikely(ret)) {
1121                         usb_ep_free_request(ep->ep, req);
1122                         goto error_lock;
1123                 }
1124
1125                 ret = -EIOCBQUEUED;
1126                 /*
1127                  * Do not kfree the buffer in this function.  It will be freed
1128                  * by ffs_user_copy_worker.
1129                  */
1130                 data = NULL;
1131         }
1132
1133 error_lock:
1134         spin_unlock_irq(&epfile->ffs->eps_lock);
1135 error_mutex:
1136         mutex_unlock(&epfile->mutex);
1137 error:
1138         if (ret != -EIOCBQUEUED) /* don't free if there is iocb queued */
1139                 ffs_free_buffer(io_data);
1140         return ret;
1141 }
1142
1143 static int
1144 ffs_epfile_open(struct inode *inode, struct file *file)
1145 {
1146         struct ffs_epfile *epfile = inode->i_private;
1147
1148         ENTER();
1149
1150         if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1151                 return -ENODEV;
1152
1153         file->private_data = epfile;
1154         ffs_data_opened(epfile->ffs);
1155
1156         return 0;
1157 }
1158
1159 static int ffs_aio_cancel(struct kiocb *kiocb)
1160 {
1161         struct ffs_io_data *io_data = kiocb->private;
1162         struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
1163         int value;
1164
1165         ENTER();
1166
1167         spin_lock_irq(&epfile->ffs->eps_lock);
1168
1169         if (likely(io_data && io_data->ep && io_data->req))
1170                 value = usb_ep_dequeue(io_data->ep, io_data->req);
1171         else
1172                 value = -EINVAL;
1173
1174         spin_unlock_irq(&epfile->ffs->eps_lock);
1175
1176         return value;
1177 }
1178
1179 static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
1180 {
1181         struct ffs_io_data io_data, *p = &io_data;
1182         ssize_t res;
1183
1184         ENTER();
1185
1186         if (!is_sync_kiocb(kiocb)) {
1187                 p = kzalloc(sizeof(io_data), GFP_KERNEL);
1188                 if (unlikely(!p))
1189                         return -ENOMEM;
1190                 p->aio = true;
1191         } else {
1192                 memset(p, 0, sizeof(*p));
1193                 p->aio = false;
1194         }
1195
1196         p->read = false;
1197         p->kiocb = kiocb;
1198         p->data = *from;
1199         p->mm = current->mm;
1200
1201         kiocb->private = p;
1202
1203         if (p->aio)
1204                 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
1205
1206         res = ffs_epfile_io(kiocb->ki_filp, p);
1207         if (res == -EIOCBQUEUED)
1208                 return res;
1209         if (p->aio)
1210                 kfree(p);
1211         else
1212                 *from = p->data;
1213         return res;
1214 }
1215
1216 static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
1217 {
1218         struct ffs_io_data io_data, *p = &io_data;
1219         ssize_t res;
1220
1221         ENTER();
1222
1223         if (!is_sync_kiocb(kiocb)) {
1224                 p = kzalloc(sizeof(io_data), GFP_KERNEL);
1225                 if (unlikely(!p))
1226                         return -ENOMEM;
1227                 p->aio = true;
1228         } else {
1229                 memset(p, 0, sizeof(*p));
1230                 p->aio = false;
1231         }
1232
1233         p->read = true;
1234         p->kiocb = kiocb;
1235         if (p->aio) {
1236                 p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
1237                 if (!p->to_free) {
1238                         kfree(p);
1239                         return -ENOMEM;
1240                 }
1241         } else {
1242                 p->data = *to;
1243                 p->to_free = NULL;
1244         }
1245         p->mm = current->mm;
1246
1247         kiocb->private = p;
1248
1249         if (p->aio)
1250                 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
1251
1252         res = ffs_epfile_io(kiocb->ki_filp, p);
1253         if (res == -EIOCBQUEUED)
1254                 return res;
1255
1256         if (p->aio) {
1257                 kfree(p->to_free);
1258                 kfree(p);
1259         } else {
1260                 *to = p->data;
1261         }
1262         return res;
1263 }
1264
1265 static int
1266 ffs_epfile_release(struct inode *inode, struct file *file)
1267 {
1268         struct ffs_epfile *epfile = inode->i_private;
1269
1270         ENTER();
1271
1272         __ffs_epfile_read_buffer_free(epfile);
1273         ffs_data_closed(epfile->ffs);
1274
1275         return 0;
1276 }
1277
1278 static long ffs_epfile_ioctl(struct file *file, unsigned code,
1279                              unsigned long value)
1280 {
1281         struct ffs_epfile *epfile = file->private_data;
1282         struct ffs_ep *ep;
1283         int ret;
1284
1285         ENTER();
1286
1287         if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1288                 return -ENODEV;
1289
1290         /* Wait for endpoint to be enabled */
1291         ep = epfile->ep;
1292         if (!ep) {
1293                 if (file->f_flags & O_NONBLOCK)
1294                         return -EAGAIN;
1295
1296                 ret = wait_event_interruptible(
1297                                 epfile->ffs->wait, (ep = epfile->ep));
1298                 if (ret)
1299                         return -EINTR;
1300         }
1301
1302         spin_lock_irq(&epfile->ffs->eps_lock);
1303
1304         /* In the meantime, endpoint got disabled or changed. */
1305         if (epfile->ep != ep) {
1306                 spin_unlock_irq(&epfile->ffs->eps_lock);
1307                 return -ESHUTDOWN;
1308         }
1309
1310         switch (code) {
1311         case FUNCTIONFS_FIFO_STATUS:
1312                 ret = usb_ep_fifo_status(epfile->ep->ep);
1313                 break;
1314         case FUNCTIONFS_FIFO_FLUSH:
1315                 usb_ep_fifo_flush(epfile->ep->ep);
1316                 ret = 0;
1317                 break;
1318         case FUNCTIONFS_CLEAR_HALT:
1319                 ret = usb_ep_clear_halt(epfile->ep->ep);
1320                 break;
1321         case FUNCTIONFS_ENDPOINT_REVMAP:
1322                 ret = epfile->ep->num;
1323                 break;
1324         case FUNCTIONFS_ENDPOINT_DESC:
1325         {
1326                 int desc_idx;
1327                 struct usb_endpoint_descriptor *desc;
1328
1329                 switch (epfile->ffs->gadget->speed) {
1330                 case USB_SPEED_SUPER:
1331                         desc_idx = 2;
1332                         break;
1333                 case USB_SPEED_HIGH:
1334                         desc_idx = 1;
1335                         break;
1336                 default:
1337                         desc_idx = 0;
1338                 }
1339                 desc = epfile->ep->descs[desc_idx];
1340
1341                 spin_unlock_irq(&epfile->ffs->eps_lock);
1342                 ret = copy_to_user((void __user *)value, desc, desc->bLength);
1343                 if (ret)
1344                         ret = -EFAULT;
1345                 return ret;
1346         }
1347         default:
1348                 ret = -ENOTTY;
1349         }
1350         spin_unlock_irq(&epfile->ffs->eps_lock);
1351
1352         return ret;
1353 }
1354
1355 static const struct file_operations ffs_epfile_operations = {
1356         .llseek =       no_llseek,
1357
1358         .open =         ffs_epfile_open,
1359         .write_iter =   ffs_epfile_write_iter,
1360         .read_iter =    ffs_epfile_read_iter,
1361         .release =      ffs_epfile_release,
1362         .unlocked_ioctl =       ffs_epfile_ioctl,
1363         .compat_ioctl = compat_ptr_ioctl,
1364 };
1365
1366
1367 /* File system and super block operations ***********************************/
1368
1369 /*
1370  * Mounting the file system creates a controller file, used first for
1371  * function configuration then later for event monitoring.
1372  */
1373
1374 static struct inode *__must_check
1375 ffs_sb_make_inode(struct super_block *sb, void *data,
1376                   const struct file_operations *fops,
1377                   const struct inode_operations *iops,
1378                   struct ffs_file_perms *perms)
1379 {
1380         struct inode *inode;
1381
1382         ENTER();
1383
1384         inode = new_inode(sb);
1385
1386         if (likely(inode)) {
1387                 struct timespec64 ts = current_time(inode);
1388
1389                 inode->i_ino     = get_next_ino();
1390                 inode->i_mode    = perms->mode;
1391                 inode->i_uid     = perms->uid;
1392                 inode->i_gid     = perms->gid;
1393                 inode->i_atime   = ts;
1394                 inode->i_mtime   = ts;
1395                 inode->i_ctime   = ts;
1396                 inode->i_private = data;
1397                 if (fops)
1398                         inode->i_fop = fops;
1399                 if (iops)
1400                         inode->i_op  = iops;
1401         }
1402
1403         return inode;
1404 }
1405
1406 /* Create "regular" file */
1407 static struct dentry *ffs_sb_create_file(struct super_block *sb,
1408                                         const char *name, void *data,
1409                                         const struct file_operations *fops)
1410 {
1411         struct ffs_data *ffs = sb->s_fs_info;
1412         struct dentry   *dentry;
1413         struct inode    *inode;
1414
1415         ENTER();
1416
1417         dentry = d_alloc_name(sb->s_root, name);
1418         if (unlikely(!dentry))
1419                 return NULL;
1420
1421         inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
1422         if (unlikely(!inode)) {
1423                 dput(dentry);
1424                 return NULL;
1425         }
1426
1427         d_add(dentry, inode);
1428         return dentry;
1429 }
1430
1431 /* Super block */
1432 static const struct super_operations ffs_sb_operations = {
1433         .statfs =       simple_statfs,
1434         .drop_inode =   generic_delete_inode,
1435 };
1436
1437 struct ffs_sb_fill_data {
1438         struct ffs_file_perms perms;
1439         umode_t root_mode;
1440         const char *dev_name;
1441         bool no_disconnect;
1442         struct ffs_data *ffs_data;
1443 };
1444
1445 static int ffs_sb_fill(struct super_block *sb, struct fs_context *fc)
1446 {
1447         struct ffs_sb_fill_data *data = fc->fs_private;
1448         struct inode    *inode;
1449         struct ffs_data *ffs = data->ffs_data;
1450
1451         ENTER();
1452
1453         ffs->sb              = sb;
1454         data->ffs_data       = NULL;
1455         sb->s_fs_info        = ffs;
1456         sb->s_blocksize      = PAGE_SIZE;
1457         sb->s_blocksize_bits = PAGE_SHIFT;
1458         sb->s_magic          = FUNCTIONFS_MAGIC;
1459         sb->s_op             = &ffs_sb_operations;
1460         sb->s_time_gran      = 1;
1461
1462         /* Root inode */
1463         data->perms.mode = data->root_mode;
1464         inode = ffs_sb_make_inode(sb, NULL,
1465                                   &simple_dir_operations,
1466                                   &simple_dir_inode_operations,
1467                                   &data->perms);
1468         sb->s_root = d_make_root(inode);
1469         if (unlikely(!sb->s_root))
1470                 return -ENOMEM;
1471
1472         /* EP0 file */
1473         if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
1474                                          &ffs_ep0_operations)))
1475                 return -ENOMEM;
1476
1477         return 0;
1478 }
1479
1480 enum {
1481         Opt_no_disconnect,
1482         Opt_rmode,
1483         Opt_fmode,
1484         Opt_mode,
1485         Opt_uid,
1486         Opt_gid,
1487 };
1488
1489 static const struct fs_parameter_spec ffs_fs_fs_parameters[] = {
1490         fsparam_bool    ("no_disconnect",       Opt_no_disconnect),
1491         fsparam_u32     ("rmode",               Opt_rmode),
1492         fsparam_u32     ("fmode",               Opt_fmode),
1493         fsparam_u32     ("mode",                Opt_mode),
1494         fsparam_u32     ("uid",                 Opt_uid),
1495         fsparam_u32     ("gid",                 Opt_gid),
1496         {}
1497 };
1498
1499 static int ffs_fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
1500 {
1501         struct ffs_sb_fill_data *data = fc->fs_private;
1502         struct fs_parse_result result;
1503         int opt;
1504
1505         ENTER();
1506
1507         opt = fs_parse(fc, ffs_fs_fs_parameters, param, &result);
1508         if (opt < 0)
1509                 return opt;
1510
1511         switch (opt) {
1512         case Opt_no_disconnect:
1513                 data->no_disconnect = result.boolean;
1514                 break;
1515         case Opt_rmode:
1516                 data->root_mode  = (result.uint_32 & 0555) | S_IFDIR;
1517                 break;
1518         case Opt_fmode:
1519                 data->perms.mode = (result.uint_32 & 0666) | S_IFREG;
1520                 break;
1521         case Opt_mode:
1522                 data->root_mode  = (result.uint_32 & 0555) | S_IFDIR;
1523                 data->perms.mode = (result.uint_32 & 0666) | S_IFREG;
1524                 break;
1525
1526         case Opt_uid:
1527                 data->perms.uid = make_kuid(current_user_ns(), result.uint_32);
1528                 if (!uid_valid(data->perms.uid))
1529                         goto unmapped_value;
1530                 break;
1531         case Opt_gid:
1532                 data->perms.gid = make_kgid(current_user_ns(), result.uint_32);
1533                 if (!gid_valid(data->perms.gid))
1534                         goto unmapped_value;
1535                 break;
1536
1537         default:
1538                 return -ENOPARAM;
1539         }
1540
1541         return 0;
1542
1543 unmapped_value:
1544         return invalf(fc, "%s: unmapped value: %u", param->key, result.uint_32);
1545 }
1546
1547 /*
1548  * Set up the superblock for a mount.
1549  */
1550 static int ffs_fs_get_tree(struct fs_context *fc)
1551 {
1552         struct ffs_sb_fill_data *ctx = fc->fs_private;
1553         void *ffs_dev;
1554         struct ffs_data *ffs;
1555
1556         ENTER();
1557
1558         if (!fc->source)
1559                 return invalf(fc, "No source specified");
1560
1561         ffs = ffs_data_new(fc->source);
1562         if (unlikely(!ffs))
1563                 return -ENOMEM;
1564         ffs->file_perms = ctx->perms;
1565         ffs->no_disconnect = ctx->no_disconnect;
1566
1567         ffs->dev_name = kstrdup(fc->source, GFP_KERNEL);
1568         if (unlikely(!ffs->dev_name)) {
1569                 ffs_data_put(ffs);
1570                 return -ENOMEM;
1571         }
1572
1573         ffs_dev = ffs_acquire_dev(ffs->dev_name);
1574         if (IS_ERR(ffs_dev)) {
1575                 ffs_data_put(ffs);
1576                 return PTR_ERR(ffs_dev);
1577         }
1578
1579         ffs->private_data = ffs_dev;
1580         ctx->ffs_data = ffs;
1581         return get_tree_nodev(fc, ffs_sb_fill);
1582 }
1583
1584 static void ffs_fs_free_fc(struct fs_context *fc)
1585 {
1586         struct ffs_sb_fill_data *ctx = fc->fs_private;
1587
1588         if (ctx) {
1589                 if (ctx->ffs_data) {
1590                         ffs_release_dev(ctx->ffs_data);
1591                         ffs_data_put(ctx->ffs_data);
1592                 }
1593
1594                 kfree(ctx);
1595         }
1596 }
1597
1598 static const struct fs_context_operations ffs_fs_context_ops = {
1599         .free           = ffs_fs_free_fc,
1600         .parse_param    = ffs_fs_parse_param,
1601         .get_tree       = ffs_fs_get_tree,
1602 };
1603
1604 static int ffs_fs_init_fs_context(struct fs_context *fc)
1605 {
1606         struct ffs_sb_fill_data *ctx;
1607
1608         ctx = kzalloc(sizeof(struct ffs_sb_fill_data), GFP_KERNEL);
1609         if (!ctx)
1610                 return -ENOMEM;
1611
1612         ctx->perms.mode = S_IFREG | 0600;
1613         ctx->perms.uid = GLOBAL_ROOT_UID;
1614         ctx->perms.gid = GLOBAL_ROOT_GID;
1615         ctx->root_mode = S_IFDIR | 0500;
1616         ctx->no_disconnect = false;
1617
1618         fc->fs_private = ctx;
1619         fc->ops = &ffs_fs_context_ops;
1620         return 0;
1621 }
1622
1623 static void
1624 ffs_fs_kill_sb(struct super_block *sb)
1625 {
1626         ENTER();
1627
1628         kill_litter_super(sb);
1629         if (sb->s_fs_info) {
1630                 ffs_release_dev(sb->s_fs_info);
1631                 ffs_data_closed(sb->s_fs_info);
1632         }
1633 }
1634
1635 static struct file_system_type ffs_fs_type = {
1636         .owner          = THIS_MODULE,
1637         .name           = "functionfs",
1638         .init_fs_context = ffs_fs_init_fs_context,
1639         .parameters     = ffs_fs_fs_parameters,
1640         .kill_sb        = ffs_fs_kill_sb,
1641 };
1642 MODULE_ALIAS_FS("functionfs");
1643
1644
1645 /* Driver's main init/cleanup functions *************************************/
1646
1647 static int functionfs_init(void)
1648 {
1649         int ret;
1650
1651         ENTER();
1652
1653         ret = register_filesystem(&ffs_fs_type);
1654         if (likely(!ret))
1655                 pr_info("file system registered\n");
1656         else
1657                 pr_err("failed registering file system (%d)\n", ret);
1658
1659         return ret;
1660 }
1661
1662 static void functionfs_cleanup(void)
1663 {
1664         ENTER();
1665
1666         pr_info("unloading\n");
1667         unregister_filesystem(&ffs_fs_type);
1668 }
1669
1670
1671 /* ffs_data and ffs_function construction and destruction code **************/
1672
1673 static void ffs_data_clear(struct ffs_data *ffs);
1674 static void ffs_data_reset(struct ffs_data *ffs);
1675
1676 static void ffs_data_get(struct ffs_data *ffs)
1677 {
1678         ENTER();
1679
1680         refcount_inc(&ffs->ref);
1681 }
1682
1683 static void ffs_data_opened(struct ffs_data *ffs)
1684 {
1685         ENTER();
1686
1687         refcount_inc(&ffs->ref);
1688         if (atomic_add_return(1, &ffs->opened) == 1 &&
1689                         ffs->state == FFS_DEACTIVATED) {
1690                 ffs->state = FFS_CLOSING;
1691                 ffs_data_reset(ffs);
1692         }
1693 }
1694
1695 static void ffs_data_put(struct ffs_data *ffs)
1696 {
1697         ENTER();
1698
1699         if (unlikely(refcount_dec_and_test(&ffs->ref))) {
1700                 pr_info("%s(): freeing\n", __func__);
1701                 ffs_data_clear(ffs);
1702                 BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
1703                        waitqueue_active(&ffs->ep0req_completion.wait) ||
1704                        waitqueue_active(&ffs->wait));
1705                 destroy_workqueue(ffs->io_completion_wq);
1706                 kfree(ffs->dev_name);
1707                 kfree(ffs);
1708         }
1709 }
1710
1711 static void ffs_data_closed(struct ffs_data *ffs)
1712 {
1713         ENTER();
1714
1715         if (atomic_dec_and_test(&ffs->opened)) {
1716                 if (ffs->no_disconnect) {
1717                         ffs->state = FFS_DEACTIVATED;
1718                         if (ffs->epfiles) {
1719                                 ffs_epfiles_destroy(ffs->epfiles,
1720                                                    ffs->eps_count);
1721                                 ffs->epfiles = NULL;
1722                         }
1723                         if (ffs->setup_state == FFS_SETUP_PENDING)
1724                                 __ffs_ep0_stall(ffs);
1725                 } else {
1726                         ffs->state = FFS_CLOSING;
1727                         ffs_data_reset(ffs);
1728                 }
1729         }
1730         if (atomic_read(&ffs->opened) < 0) {
1731                 ffs->state = FFS_CLOSING;
1732                 ffs_data_reset(ffs);
1733         }
1734
1735         ffs_data_put(ffs);
1736 }
1737
1738 static struct ffs_data *ffs_data_new(const char *dev_name)
1739 {
1740         struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
1741         if (unlikely(!ffs))
1742                 return NULL;
1743
1744         ENTER();
1745
1746         ffs->io_completion_wq = alloc_ordered_workqueue("%s", 0, dev_name);
1747         if (!ffs->io_completion_wq) {
1748                 kfree(ffs);
1749                 return NULL;
1750         }
1751
1752         refcount_set(&ffs->ref, 1);
1753         atomic_set(&ffs->opened, 0);
1754         ffs->state = FFS_READ_DESCRIPTORS;
1755         mutex_init(&ffs->mutex);
1756         spin_lock_init(&ffs->eps_lock);
1757         init_waitqueue_head(&ffs->ev.waitq);
1758         init_waitqueue_head(&ffs->wait);
1759         init_completion(&ffs->ep0req_completion);
1760
1761         /* XXX REVISIT need to update it in some places, or do we? */
1762         ffs->ev.can_stall = 1;
1763
1764         return ffs;
1765 }
1766
1767 static void ffs_data_clear(struct ffs_data *ffs)
1768 {
1769         ENTER();
1770
1771         ffs_closed(ffs);
1772
1773         BUG_ON(ffs->gadget);
1774
1775         if (ffs->epfiles)
1776                 ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
1777
1778         if (ffs->ffs_eventfd)
1779                 eventfd_ctx_put(ffs->ffs_eventfd);
1780
1781         kfree(ffs->raw_descs_data);
1782         kfree(ffs->raw_strings);
1783         kfree(ffs->stringtabs);
1784 }
1785
1786 static void ffs_data_reset(struct ffs_data *ffs)
1787 {
1788         ENTER();
1789
1790         ffs_data_clear(ffs);
1791
1792         ffs->epfiles = NULL;
1793         ffs->raw_descs_data = NULL;
1794         ffs->raw_descs = NULL;
1795         ffs->raw_strings = NULL;
1796         ffs->stringtabs = NULL;
1797
1798         ffs->raw_descs_length = 0;
1799         ffs->fs_descs_count = 0;
1800         ffs->hs_descs_count = 0;
1801         ffs->ss_descs_count = 0;
1802
1803         ffs->strings_count = 0;
1804         ffs->interfaces_count = 0;
1805         ffs->eps_count = 0;
1806
1807         ffs->ev.count = 0;
1808
1809         ffs->state = FFS_READ_DESCRIPTORS;
1810         ffs->setup_state = FFS_NO_SETUP;
1811         ffs->flags = 0;
1812 }
1813
1814
1815 static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
1816 {
1817         struct usb_gadget_strings **lang;
1818         int first_id;
1819
1820         ENTER();
1821
1822         if (WARN_ON(ffs->state != FFS_ACTIVE
1823                  || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
1824                 return -EBADFD;
1825
1826         first_id = usb_string_ids_n(cdev, ffs->strings_count);
1827         if (unlikely(first_id < 0))
1828                 return first_id;
1829
1830         ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
1831         if (unlikely(!ffs->ep0req))
1832                 return -ENOMEM;
1833         ffs->ep0req->complete = ffs_ep0_complete;
1834         ffs->ep0req->context = ffs;
1835
1836         lang = ffs->stringtabs;
1837         if (lang) {
1838                 for (; *lang; ++lang) {
1839                         struct usb_string *str = (*lang)->strings;
1840                         int id = first_id;
1841                         for (; str->s; ++id, ++str)
1842                                 str->id = id;
1843                 }
1844         }
1845
1846         ffs->gadget = cdev->gadget;
1847         ffs_data_get(ffs);
1848         return 0;
1849 }
1850
1851 static void functionfs_unbind(struct ffs_data *ffs)
1852 {
1853         ENTER();
1854
1855         if (!WARN_ON(!ffs->gadget)) {
1856                 usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
1857                 ffs->ep0req = NULL;
1858                 ffs->gadget = NULL;
1859                 clear_bit(FFS_FL_BOUND, &ffs->flags);
1860                 ffs_data_put(ffs);
1861         }
1862 }
1863
1864 static int ffs_epfiles_create(struct ffs_data *ffs)
1865 {
1866         struct ffs_epfile *epfile, *epfiles;
1867         unsigned i, count;
1868
1869         ENTER();
1870
1871         count = ffs->eps_count;
1872         epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
1873         if (!epfiles)
1874                 return -ENOMEM;
1875
1876         epfile = epfiles;
1877         for (i = 1; i <= count; ++i, ++epfile) {
1878                 epfile->ffs = ffs;
1879                 mutex_init(&epfile->mutex);
1880                 if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
1881                         sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
1882                 else
1883                         sprintf(epfile->name, "ep%u", i);
1884                 epfile->dentry = ffs_sb_create_file(ffs->sb, epfile->name,
1885                                                  epfile,
1886                                                  &ffs_epfile_operations);
1887                 if (unlikely(!epfile->dentry)) {
1888                         ffs_epfiles_destroy(epfiles, i - 1);
1889                         return -ENOMEM;
1890                 }
1891         }
1892
1893         ffs->epfiles = epfiles;
1894         return 0;
1895 }
1896
1897 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
1898 {
1899         struct ffs_epfile *epfile = epfiles;
1900
1901         ENTER();
1902
1903         for (; count; --count, ++epfile) {
1904                 BUG_ON(mutex_is_locked(&epfile->mutex));
1905                 if (epfile->dentry) {
1906                         d_delete(epfile->dentry);
1907                         dput(epfile->dentry);
1908                         epfile->dentry = NULL;
1909                 }
1910         }
1911
1912         kfree(epfiles);
1913 }
1914
1915 static void ffs_func_eps_disable(struct ffs_function *func)
1916 {
1917         struct ffs_ep *ep         = func->eps;
1918         struct ffs_epfile *epfile = func->ffs->epfiles;
1919         unsigned count            = func->ffs->eps_count;
1920         unsigned long flags;
1921
1922         spin_lock_irqsave(&func->ffs->eps_lock, flags);
1923         while (count--) {
1924                 /* pending requests get nuked */
1925                 if (likely(ep->ep))
1926                         usb_ep_disable(ep->ep);
1927                 ++ep;
1928
1929                 if (epfile) {
1930                         epfile->ep = NULL;
1931                         __ffs_epfile_read_buffer_free(epfile);
1932                         ++epfile;
1933                 }
1934         }
1935         spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1936 }
1937
1938 static int ffs_func_eps_enable(struct ffs_function *func)
1939 {
1940         struct ffs_data *ffs      = func->ffs;
1941         struct ffs_ep *ep         = func->eps;
1942         struct ffs_epfile *epfile = ffs->epfiles;
1943         unsigned count            = ffs->eps_count;
1944         unsigned long flags;
1945         int ret = 0;
1946
1947         spin_lock_irqsave(&func->ffs->eps_lock, flags);
1948         while(count--) {
1949                 ep->ep->driver_data = ep;
1950
1951                 ret = config_ep_by_speed(func->gadget, &func->function, ep->ep);
1952                 if (ret) {
1953                         pr_err("%s: config_ep_by_speed(%s) returned %d\n",
1954                                         __func__, ep->ep->name, ret);
1955                         break;
1956                 }
1957
1958                 ret = usb_ep_enable(ep->ep);
1959                 if (likely(!ret)) {
1960                         epfile->ep = ep;
1961                         epfile->in = usb_endpoint_dir_in(ep->ep->desc);
1962                         epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc);
1963                 } else {
1964                         break;
1965                 }
1966
1967                 ++ep;
1968                 ++epfile;
1969         }
1970
1971         wake_up_interruptible(&ffs->wait);
1972         spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1973
1974         return ret;
1975 }
1976
1977
1978 /* Parsing and building descriptors and strings *****************************/
1979
1980 /*
1981  * This validates if data pointed by data is a valid USB descriptor as
1982  * well as record how many interfaces, endpoints and strings are
1983  * required by given configuration.  Returns address after the
1984  * descriptor or NULL if data is invalid.
1985  */
1986
1987 enum ffs_entity_type {
1988         FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
1989 };
1990
1991 enum ffs_os_desc_type {
1992         FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP
1993 };
1994
1995 typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
1996                                    u8 *valuep,
1997                                    struct usb_descriptor_header *desc,
1998                                    void *priv);
1999
2000 typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
2001                                     struct usb_os_desc_header *h, void *data,
2002                                     unsigned len, void *priv);
2003
2004 static int __must_check ffs_do_single_desc(char *data, unsigned len,
2005                                            ffs_entity_callback entity,
2006                                            void *priv, int *current_class)
2007 {
2008         struct usb_descriptor_header *_ds = (void *)data;
2009         u8 length;
2010         int ret;
2011
2012         ENTER();
2013
2014         /* At least two bytes are required: length and type */
2015         if (len < 2) {
2016                 pr_vdebug("descriptor too short\n");
2017                 return -EINVAL;
2018         }
2019
2020         /* If we have at least as many bytes as the descriptor takes? */
2021         length = _ds->bLength;
2022         if (len < length) {
2023                 pr_vdebug("descriptor longer then available data\n");
2024                 return -EINVAL;
2025         }
2026
2027 #define __entity_check_INTERFACE(val)  1
2028 #define __entity_check_STRING(val)     (val)
2029 #define __entity_check_ENDPOINT(val)   ((val) & USB_ENDPOINT_NUMBER_MASK)
2030 #define __entity(type, val) do {                                        \
2031                 pr_vdebug("entity " #type "(%02x)\n", (val));           \
2032                 if (unlikely(!__entity_check_ ##type(val))) {           \
2033                         pr_vdebug("invalid entity's value\n");          \
2034                         return -EINVAL;                                 \
2035                 }                                                       \
2036                 ret = entity(FFS_ ##type, &val, _ds, priv);             \
2037                 if (unlikely(ret < 0)) {                                \
2038                         pr_debug("entity " #type "(%02x); ret = %d\n",  \
2039                                  (val), ret);                           \
2040                         return ret;                                     \
2041                 }                                                       \
2042         } while (0)
2043
2044         /* Parse descriptor depending on type. */
2045         switch (_ds->bDescriptorType) {
2046         case USB_DT_DEVICE:
2047         case USB_DT_CONFIG:
2048         case USB_DT_STRING:
2049         case USB_DT_DEVICE_QUALIFIER:
2050                 /* function can't have any of those */
2051                 pr_vdebug("descriptor reserved for gadget: %d\n",
2052                       _ds->bDescriptorType);
2053                 return -EINVAL;
2054
2055         case USB_DT_INTERFACE: {
2056                 struct usb_interface_descriptor *ds = (void *)_ds;
2057                 pr_vdebug("interface descriptor\n");
2058                 if (length != sizeof *ds)
2059                         goto inv_length;
2060
2061                 __entity(INTERFACE, ds->bInterfaceNumber);
2062                 if (ds->iInterface)
2063                         __entity(STRING, ds->iInterface);
2064                 *current_class = ds->bInterfaceClass;
2065         }
2066                 break;
2067
2068         case USB_DT_ENDPOINT: {
2069                 struct usb_endpoint_descriptor *ds = (void *)_ds;
2070                 pr_vdebug("endpoint descriptor\n");
2071                 if (length != USB_DT_ENDPOINT_SIZE &&
2072                     length != USB_DT_ENDPOINT_AUDIO_SIZE)
2073                         goto inv_length;
2074                 __entity(ENDPOINT, ds->bEndpointAddress);
2075         }
2076                 break;
2077
2078         case USB_TYPE_CLASS | 0x01:
2079                 if (*current_class == USB_INTERFACE_CLASS_HID) {
2080                         pr_vdebug("hid descriptor\n");
2081                         if (length != sizeof(struct hid_descriptor))
2082                                 goto inv_length;
2083                         break;
2084                 } else if (*current_class == USB_INTERFACE_CLASS_CCID) {
2085                         pr_vdebug("ccid descriptor\n");
2086                         if (length != sizeof(struct ccid_descriptor))
2087                                 goto inv_length;
2088                         break;
2089                 } else {
2090                         pr_vdebug("unknown descriptor: %d for class %d\n",
2091                               _ds->bDescriptorType, *current_class);
2092                         return -EINVAL;
2093                 }
2094
2095         case USB_DT_OTG:
2096                 if (length != sizeof(struct usb_otg_descriptor))
2097                         goto inv_length;
2098                 break;
2099
2100         case USB_DT_INTERFACE_ASSOCIATION: {
2101                 struct usb_interface_assoc_descriptor *ds = (void *)_ds;
2102                 pr_vdebug("interface association descriptor\n");
2103                 if (length != sizeof *ds)
2104                         goto inv_length;
2105                 if (ds->iFunction)
2106                         __entity(STRING, ds->iFunction);
2107         }
2108                 break;
2109
2110         case USB_DT_SS_ENDPOINT_COMP:
2111                 pr_vdebug("EP SS companion descriptor\n");
2112                 if (length != sizeof(struct usb_ss_ep_comp_descriptor))
2113                         goto inv_length;
2114                 break;
2115
2116         case USB_DT_OTHER_SPEED_CONFIG:
2117         case USB_DT_INTERFACE_POWER:
2118         case USB_DT_DEBUG:
2119         case USB_DT_SECURITY:
2120         case USB_DT_CS_RADIO_CONTROL:
2121                 /* TODO */
2122                 pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
2123                 return -EINVAL;
2124
2125         default:
2126                 /* We should never be here */
2127                 pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
2128                 return -EINVAL;
2129
2130 inv_length:
2131                 pr_vdebug("invalid length: %d (descriptor %d)\n",
2132                           _ds->bLength, _ds->bDescriptorType);
2133                 return -EINVAL;
2134         }
2135
2136 #undef __entity
2137 #undef __entity_check_DESCRIPTOR
2138 #undef __entity_check_INTERFACE
2139 #undef __entity_check_STRING
2140 #undef __entity_check_ENDPOINT
2141
2142         return length;
2143 }
2144
2145 static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
2146                                      ffs_entity_callback entity, void *priv)
2147 {
2148         const unsigned _len = len;
2149         unsigned long num = 0;
2150         int current_class = -1;
2151
2152         ENTER();
2153
2154         for (;;) {
2155                 int ret;
2156
2157                 if (num == count)
2158                         data = NULL;
2159
2160                 /* Record "descriptor" entity */
2161                 ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
2162                 if (unlikely(ret < 0)) {
2163                         pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
2164                                  num, ret);
2165                         return ret;
2166                 }
2167
2168                 if (!data)
2169                         return _len - len;
2170
2171                 ret = ffs_do_single_desc(data, len, entity, priv,
2172                         &current_class);
2173                 if (unlikely(ret < 0)) {
2174                         pr_debug("%s returns %d\n", __func__, ret);
2175                         return ret;
2176                 }
2177
2178                 len -= ret;
2179                 data += ret;
2180                 ++num;
2181         }
2182 }
2183
2184 static int __ffs_data_do_entity(enum ffs_entity_type type,
2185                                 u8 *valuep, struct usb_descriptor_header *desc,
2186                                 void *priv)
2187 {
2188         struct ffs_desc_helper *helper = priv;
2189         struct usb_endpoint_descriptor *d;
2190
2191         ENTER();
2192
2193         switch (type) {
2194         case FFS_DESCRIPTOR:
2195                 break;
2196
2197         case FFS_INTERFACE:
2198                 /*
2199                  * Interfaces are indexed from zero so if we
2200                  * encountered interface "n" then there are at least
2201                  * "n+1" interfaces.
2202                  */
2203                 if (*valuep >= helper->interfaces_count)
2204                         helper->interfaces_count = *valuep + 1;
2205                 break;
2206
2207         case FFS_STRING:
2208                 /*
2209                  * Strings are indexed from 1 (0 is reserved
2210                  * for languages list)
2211                  */
2212                 if (*valuep > helper->ffs->strings_count)
2213                         helper->ffs->strings_count = *valuep;
2214                 break;
2215
2216         case FFS_ENDPOINT:
2217                 d = (void *)desc;
2218                 helper->eps_count++;
2219                 if (helper->eps_count >= FFS_MAX_EPS_COUNT)
2220                         return -EINVAL;
2221                 /* Check if descriptors for any speed were already parsed */
2222                 if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
2223                         helper->ffs->eps_addrmap[helper->eps_count] =
2224                                 d->bEndpointAddress;
2225                 else if (helper->ffs->eps_addrmap[helper->eps_count] !=
2226                                 d->bEndpointAddress)
2227                         return -EINVAL;
2228                 break;
2229         }
2230
2231         return 0;
2232 }
2233
2234 static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
2235                                    struct usb_os_desc_header *desc)
2236 {
2237         u16 bcd_version = le16_to_cpu(desc->bcdVersion);
2238         u16 w_index = le16_to_cpu(desc->wIndex);
2239
2240         if (bcd_version != 1) {
2241                 pr_vdebug("unsupported os descriptors version: %d",
2242                           bcd_version);
2243                 return -EINVAL;
2244         }
2245         switch (w_index) {
2246         case 0x4:
2247                 *next_type = FFS_OS_DESC_EXT_COMPAT;
2248                 break;
2249         case 0x5:
2250                 *next_type = FFS_OS_DESC_EXT_PROP;
2251                 break;
2252         default:
2253                 pr_vdebug("unsupported os descriptor type: %d", w_index);
2254                 return -EINVAL;
2255         }
2256
2257         return sizeof(*desc);
2258 }
2259
2260 /*
2261  * Process all extended compatibility/extended property descriptors
2262  * of a feature descriptor
2263  */
2264 static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
2265                                               enum ffs_os_desc_type type,
2266                                               u16 feature_count,
2267                                               ffs_os_desc_callback entity,
2268                                               void *priv,
2269                                               struct usb_os_desc_header *h)
2270 {
2271         int ret;
2272         const unsigned _len = len;
2273
2274         ENTER();
2275
2276         /* loop over all ext compat/ext prop descriptors */
2277         while (feature_count--) {
2278                 ret = entity(type, h, data, len, priv);
2279                 if (unlikely(ret < 0)) {
2280                         pr_debug("bad OS descriptor, type: %d\n", type);
2281                         return ret;
2282                 }
2283                 data += ret;
2284                 len -= ret;
2285         }
2286         return _len - len;
2287 }
2288
2289 /* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
2290 static int __must_check ffs_do_os_descs(unsigned count,
2291                                         char *data, unsigned len,
2292                                         ffs_os_desc_callback entity, void *priv)
2293 {
2294         const unsigned _len = len;
2295         unsigned long num = 0;
2296
2297         ENTER();
2298
2299         for (num = 0; num < count; ++num) {
2300                 int ret;
2301                 enum ffs_os_desc_type type;
2302                 u16 feature_count;
2303                 struct usb_os_desc_header *desc = (void *)data;
2304
2305                 if (len < sizeof(*desc))
2306                         return -EINVAL;
2307
2308                 /*
2309                  * Record "descriptor" entity.
2310                  * Process dwLength, bcdVersion, wIndex, get b/wCount.
2311                  * Move the data pointer to the beginning of extended
2312                  * compatibilities proper or extended properties proper
2313                  * portions of the data
2314                  */
2315                 if (le32_to_cpu(desc->dwLength) > len)
2316                         return -EINVAL;
2317
2318                 ret = __ffs_do_os_desc_header(&type, desc);
2319                 if (unlikely(ret < 0)) {
2320                         pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
2321                                  num, ret);
2322                         return ret;
2323                 }
2324                 /*
2325                  * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
2326                  */
2327                 feature_count = le16_to_cpu(desc->wCount);
2328                 if (type == FFS_OS_DESC_EXT_COMPAT &&
2329                     (feature_count > 255 || desc->Reserved))
2330                                 return -EINVAL;
2331                 len -= ret;
2332                 data += ret;
2333
2334                 /*
2335                  * Process all function/property descriptors
2336                  * of this Feature Descriptor
2337                  */
2338                 ret = ffs_do_single_os_desc(data, len, type,
2339                                             feature_count, entity, priv, desc);
2340                 if (unlikely(ret < 0)) {
2341                         pr_debug("%s returns %d\n", __func__, ret);
2342                         return ret;
2343                 }
2344
2345                 len -= ret;
2346                 data += ret;
2347         }
2348         return _len - len;
2349 }
2350
2351 /**
2352  * Validate contents of the buffer from userspace related to OS descriptors.
2353  */
2354 static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2355                                  struct usb_os_desc_header *h, void *data,
2356                                  unsigned len, void *priv)
2357 {
2358         struct ffs_data *ffs = priv;
2359         u8 length;
2360
2361         ENTER();
2362
2363         switch (type) {
2364         case FFS_OS_DESC_EXT_COMPAT: {
2365                 struct usb_ext_compat_desc *d = data;
2366                 int i;
2367
2368                 if (len < sizeof(*d) ||
2369                     d->bFirstInterfaceNumber >= ffs->interfaces_count)
2370                         return -EINVAL;
2371                 if (d->Reserved1 != 1) {
2372                         /*
2373                          * According to the spec, Reserved1 must be set to 1
2374                          * but older kernels incorrectly rejected non-zero
2375                          * values.  We fix it here to avoid returning EINVAL
2376                          * in response to values we used to accept.
2377                          */
2378                         pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
2379                         d->Reserved1 = 1;
2380                 }
2381                 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2382                         if (d->Reserved2[i])
2383                                 return -EINVAL;
2384
2385                 length = sizeof(struct usb_ext_compat_desc);
2386         }
2387                 break;
2388         case FFS_OS_DESC_EXT_PROP: {
2389                 struct usb_ext_prop_desc *d = data;
2390                 u32 type, pdl;
2391                 u16 pnl;
2392
2393                 if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
2394                         return -EINVAL;
2395                 length = le32_to_cpu(d->dwSize);
2396                 if (len < length)
2397                         return -EINVAL;
2398                 type = le32_to_cpu(d->dwPropertyDataType);
2399                 if (type < USB_EXT_PROP_UNICODE ||
2400                     type > USB_EXT_PROP_UNICODE_MULTI) {
2401                         pr_vdebug("unsupported os descriptor property type: %d",
2402                                   type);
2403                         return -EINVAL;
2404                 }
2405                 pnl = le16_to_cpu(d->wPropertyNameLength);
2406                 if (length < 14 + pnl) {
2407                         pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
2408                                   length, pnl, type);
2409                         return -EINVAL;
2410                 }
2411                 pdl = le32_to_cpu(*(__le32 *)((u8 *)data + 10 + pnl));
2412                 if (length != 14 + pnl + pdl) {
2413                         pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
2414                                   length, pnl, pdl, type);
2415                         return -EINVAL;
2416                 }
2417                 ++ffs->ms_os_descs_ext_prop_count;
2418                 /* property name reported to the host as "WCHAR"s */
2419                 ffs->ms_os_descs_ext_prop_name_len += pnl * 2;
2420                 ffs->ms_os_descs_ext_prop_data_len += pdl;
2421         }
2422                 break;
2423         default:
2424                 pr_vdebug("unknown descriptor: %d\n", type);
2425                 return -EINVAL;
2426         }
2427         return length;
2428 }
2429
2430 static int __ffs_data_got_descs(struct ffs_data *ffs,
2431                                 char *const _data, size_t len)
2432 {
2433         char *data = _data, *raw_descs;
2434         unsigned os_descs_count = 0, counts[3], flags;
2435         int ret = -EINVAL, i;
2436         struct ffs_desc_helper helper;
2437
2438         ENTER();
2439
2440         if (get_unaligned_le32(data + 4) != len)
2441                 goto error;
2442
2443         switch (get_unaligned_le32(data)) {
2444         case FUNCTIONFS_DESCRIPTORS_MAGIC:
2445                 flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
2446                 data += 8;
2447                 len  -= 8;
2448                 break;
2449         case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
2450                 flags = get_unaligned_le32(data + 8);
2451                 ffs->user_flags = flags;
2452                 if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
2453                               FUNCTIONFS_HAS_HS_DESC |
2454                               FUNCTIONFS_HAS_SS_DESC |
2455                               FUNCTIONFS_HAS_MS_OS_DESC |
2456                               FUNCTIONFS_VIRTUAL_ADDR |
2457                               FUNCTIONFS_EVENTFD |
2458                               FUNCTIONFS_ALL_CTRL_RECIP |
2459                               FUNCTIONFS_CONFIG0_SETUP)) {
2460                         ret = -ENOSYS;
2461                         goto error;
2462                 }
2463                 data += 12;
2464                 len  -= 12;
2465                 break;
2466         default:
2467                 goto error;
2468         }
2469
2470         if (flags & FUNCTIONFS_EVENTFD) {
2471                 if (len < 4)
2472                         goto error;
2473                 ffs->ffs_eventfd =
2474                         eventfd_ctx_fdget((int)get_unaligned_le32(data));
2475                 if (IS_ERR(ffs->ffs_eventfd)) {
2476                         ret = PTR_ERR(ffs->ffs_eventfd);
2477                         ffs->ffs_eventfd = NULL;
2478                         goto error;
2479                 }
2480                 data += 4;
2481                 len  -= 4;
2482         }
2483
2484         /* Read fs_count, hs_count and ss_count (if present) */
2485         for (i = 0; i < 3; ++i) {
2486                 if (!(flags & (1 << i))) {
2487                         counts[i] = 0;
2488                 } else if (len < 4) {
2489                         goto error;
2490                 } else {
2491                         counts[i] = get_unaligned_le32(data);
2492                         data += 4;
2493                         len  -= 4;
2494                 }
2495         }
2496         if (flags & (1 << i)) {
2497                 if (len < 4) {
2498                         goto error;
2499                 }
2500                 os_descs_count = get_unaligned_le32(data);
2501                 data += 4;
2502                 len -= 4;
2503         };
2504
2505         /* Read descriptors */
2506         raw_descs = data;
2507         helper.ffs = ffs;
2508         for (i = 0; i < 3; ++i) {
2509                 if (!counts[i])
2510                         continue;
2511                 helper.interfaces_count = 0;
2512                 helper.eps_count = 0;
2513                 ret = ffs_do_descs(counts[i], data, len,
2514                                    __ffs_data_do_entity, &helper);
2515                 if (ret < 0)
2516                         goto error;
2517                 if (!ffs->eps_count && !ffs->interfaces_count) {
2518                         ffs->eps_count = helper.eps_count;
2519                         ffs->interfaces_count = helper.interfaces_count;
2520                 } else {
2521                         if (ffs->eps_count != helper.eps_count) {
2522                                 ret = -EINVAL;
2523                                 goto error;
2524                         }
2525                         if (ffs->interfaces_count != helper.interfaces_count) {
2526                                 ret = -EINVAL;
2527                                 goto error;
2528                         }
2529                 }
2530                 data += ret;
2531                 len  -= ret;
2532         }
2533         if (os_descs_count) {
2534                 ret = ffs_do_os_descs(os_descs_count, data, len,
2535                                       __ffs_data_do_os_desc, ffs);
2536                 if (ret < 0)
2537                         goto error;
2538                 data += ret;
2539                 len -= ret;
2540         }
2541
2542         if (raw_descs == data || len) {
2543                 ret = -EINVAL;
2544                 goto error;
2545         }
2546
2547         ffs->raw_descs_data     = _data;
2548         ffs->raw_descs          = raw_descs;
2549         ffs->raw_descs_length   = data - raw_descs;
2550         ffs->fs_descs_count     = counts[0];
2551         ffs->hs_descs_count     = counts[1];
2552         ffs->ss_descs_count     = counts[2];
2553         ffs->ms_os_descs_count  = os_descs_count;
2554
2555         return 0;
2556
2557 error:
2558         kfree(_data);
2559         return ret;
2560 }
2561
2562 static int __ffs_data_got_strings(struct ffs_data *ffs,
2563                                   char *const _data, size_t len)
2564 {
2565         u32 str_count, needed_count, lang_count;
2566         struct usb_gadget_strings **stringtabs, *t;
2567         const char *data = _data;
2568         struct usb_string *s;
2569
2570         ENTER();
2571
2572         if (unlikely(len < 16 ||
2573                      get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
2574                      get_unaligned_le32(data + 4) != len))
2575                 goto error;
2576         str_count  = get_unaligned_le32(data + 8);
2577         lang_count = get_unaligned_le32(data + 12);
2578
2579         /* if one is zero the other must be zero */
2580         if (unlikely(!str_count != !lang_count))
2581                 goto error;
2582
2583         /* Do we have at least as many strings as descriptors need? */
2584         needed_count = ffs->strings_count;
2585         if (unlikely(str_count < needed_count))
2586                 goto error;
2587
2588         /*
2589          * If we don't need any strings just return and free all
2590          * memory.
2591          */
2592         if (!needed_count) {
2593                 kfree(_data);
2594                 return 0;
2595         }
2596
2597         /* Allocate everything in one chunk so there's less maintenance. */
2598         {
2599                 unsigned i = 0;
2600                 vla_group(d);
2601                 vla_item(d, struct usb_gadget_strings *, stringtabs,
2602                         lang_count + 1);
2603                 vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
2604                 vla_item(d, struct usb_string, strings,
2605                         lang_count*(needed_count+1));
2606
2607                 char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
2608
2609                 if (unlikely(!vlabuf)) {
2610                         kfree(_data);
2611                         return -ENOMEM;
2612                 }
2613
2614                 /* Initialize the VLA pointers */
2615                 stringtabs = vla_ptr(vlabuf, d, stringtabs);
2616                 t = vla_ptr(vlabuf, d, stringtab);
2617                 i = lang_count;
2618                 do {
2619                         *stringtabs++ = t++;
2620                 } while (--i);
2621                 *stringtabs = NULL;
2622
2623                 /* stringtabs = vlabuf = d_stringtabs for later kfree */
2624                 stringtabs = vla_ptr(vlabuf, d, stringtabs);
2625                 t = vla_ptr(vlabuf, d, stringtab);
2626                 s = vla_ptr(vlabuf, d, strings);
2627         }
2628
2629         /* For each language */
2630         data += 16;
2631         len -= 16;
2632
2633         do { /* lang_count > 0 so we can use do-while */
2634                 unsigned needed = needed_count;
2635
2636                 if (unlikely(len < 3))
2637                         goto error_free;
2638                 t->language = get_unaligned_le16(data);
2639                 t->strings  = s;
2640                 ++t;
2641
2642                 data += 2;
2643                 len -= 2;
2644
2645                 /* For each string */
2646                 do { /* str_count > 0 so we can use do-while */
2647                         size_t length = strnlen(data, len);
2648
2649                         if (unlikely(length == len))
2650                                 goto error_free;
2651
2652                         /*
2653                          * User may provide more strings then we need,
2654                          * if that's the case we simply ignore the
2655                          * rest
2656                          */
2657                         if (likely(needed)) {
2658                                 /*
2659                                  * s->id will be set while adding
2660                                  * function to configuration so for
2661                                  * now just leave garbage here.
2662                                  */
2663                                 s->s = data;
2664                                 --needed;
2665                                 ++s;
2666                         }
2667
2668                         data += length + 1;
2669                         len -= length + 1;
2670                 } while (--str_count);
2671
2672                 s->id = 0;   /* terminator */
2673                 s->s = NULL;
2674                 ++s;
2675
2676         } while (--lang_count);
2677
2678         /* Some garbage left? */
2679         if (unlikely(len))
2680                 goto error_free;
2681
2682         /* Done! */
2683         ffs->stringtabs = stringtabs;
2684         ffs->raw_strings = _data;
2685
2686         return 0;
2687
2688 error_free:
2689         kfree(stringtabs);
2690 error:
2691         kfree(_data);
2692         return -EINVAL;
2693 }
2694
2695
2696 /* Events handling and management *******************************************/
2697
2698 static void __ffs_event_add(struct ffs_data *ffs,
2699                             enum usb_functionfs_event_type type)
2700 {
2701         enum usb_functionfs_event_type rem_type1, rem_type2 = type;
2702         int neg = 0;
2703
2704         /*
2705          * Abort any unhandled setup
2706          *
2707          * We do not need to worry about some cmpxchg() changing value
2708          * of ffs->setup_state without holding the lock because when
2709          * state is FFS_SETUP_PENDING cmpxchg() in several places in
2710          * the source does nothing.
2711          */
2712         if (ffs->setup_state == FFS_SETUP_PENDING)
2713                 ffs->setup_state = FFS_SETUP_CANCELLED;
2714
2715         /*
2716          * Logic of this function guarantees that there are at most four pending
2717          * evens on ffs->ev.types queue.  This is important because the queue
2718          * has space for four elements only and __ffs_ep0_read_events function
2719          * depends on that limit as well.  If more event types are added, those
2720          * limits have to be revisited or guaranteed to still hold.
2721          */
2722         switch (type) {
2723         case FUNCTIONFS_RESUME:
2724                 rem_type2 = FUNCTIONFS_SUSPEND;
2725                 /* FALL THROUGH */
2726         case FUNCTIONFS_SUSPEND:
2727         case FUNCTIONFS_SETUP:
2728                 rem_type1 = type;
2729                 /* Discard all similar events */
2730                 break;
2731
2732         case FUNCTIONFS_BIND:
2733         case FUNCTIONFS_UNBIND:
2734         case FUNCTIONFS_DISABLE:
2735         case FUNCTIONFS_ENABLE:
2736                 /* Discard everything other then power management. */
2737                 rem_type1 = FUNCTIONFS_SUSPEND;
2738                 rem_type2 = FUNCTIONFS_RESUME;
2739                 neg = 1;
2740                 break;
2741
2742         default:
2743                 WARN(1, "%d: unknown event, this should not happen\n", type);
2744                 return;
2745         }
2746
2747         {
2748                 u8 *ev  = ffs->ev.types, *out = ev;
2749                 unsigned n = ffs->ev.count;
2750                 for (; n; --n, ++ev)
2751                         if ((*ev == rem_type1 || *ev == rem_type2) == neg)
2752                                 *out++ = *ev;
2753                         else
2754                                 pr_vdebug("purging event %d\n", *ev);
2755                 ffs->ev.count = out - ffs->ev.types;
2756         }
2757
2758         pr_vdebug("adding event %d\n", type);
2759         ffs->ev.types[ffs->ev.count++] = type;
2760         wake_up_locked(&ffs->ev.waitq);
2761         if (ffs->ffs_eventfd)
2762                 eventfd_signal(ffs->ffs_eventfd, 1);
2763 }
2764
2765 static void ffs_event_add(struct ffs_data *ffs,
2766                           enum usb_functionfs_event_type type)
2767 {
2768         unsigned long flags;
2769         spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
2770         __ffs_event_add(ffs, type);
2771         spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
2772 }
2773
2774 /* Bind/unbind USB function hooks *******************************************/
2775
2776 static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
2777 {
2778         int i;
2779
2780         for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
2781                 if (ffs->eps_addrmap[i] == endpoint_address)
2782                         return i;
2783         return -ENOENT;
2784 }
2785
2786 static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
2787                                     struct usb_descriptor_header *desc,
2788                                     void *priv)
2789 {
2790         struct usb_endpoint_descriptor *ds = (void *)desc;
2791         struct ffs_function *func = priv;
2792         struct ffs_ep *ffs_ep;
2793         unsigned ep_desc_id;
2794         int idx;
2795         static const char *speed_names[] = { "full", "high", "super" };
2796
2797         if (type != FFS_DESCRIPTOR)
2798                 return 0;
2799
2800         /*
2801          * If ss_descriptors is not NULL, we are reading super speed
2802          * descriptors; if hs_descriptors is not NULL, we are reading high
2803          * speed descriptors; otherwise, we are reading full speed
2804          * descriptors.
2805          */
2806         if (func->function.ss_descriptors) {
2807                 ep_desc_id = 2;
2808                 func->function.ss_descriptors[(long)valuep] = desc;
2809         } else if (func->function.hs_descriptors) {
2810                 ep_desc_id = 1;
2811                 func->function.hs_descriptors[(long)valuep] = desc;
2812         } else {
2813                 ep_desc_id = 0;
2814                 func->function.fs_descriptors[(long)valuep]    = desc;
2815         }
2816
2817         if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
2818                 return 0;
2819
2820         idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
2821         if (idx < 0)
2822                 return idx;
2823
2824         ffs_ep = func->eps + idx;
2825
2826         if (unlikely(ffs_ep->descs[ep_desc_id])) {
2827                 pr_err("two %sspeed descriptors for EP %d\n",
2828                           speed_names[ep_desc_id],
2829                           ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2830                 return -EINVAL;
2831         }
2832         ffs_ep->descs[ep_desc_id] = ds;
2833
2834         ffs_dump_mem(": Original  ep desc", ds, ds->bLength);
2835         if (ffs_ep->ep) {
2836                 ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
2837                 if (!ds->wMaxPacketSize)
2838                         ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
2839         } else {
2840                 struct usb_request *req;
2841                 struct usb_ep *ep;
2842                 u8 bEndpointAddress;
2843                 u16 wMaxPacketSize;
2844
2845                 /*
2846                  * We back up bEndpointAddress because autoconfig overwrites
2847                  * it with physical endpoint address.
2848                  */
2849                 bEndpointAddress = ds->bEndpointAddress;
2850                 /*
2851                  * We back up wMaxPacketSize because autoconfig treats
2852                  * endpoint descriptors as if they were full speed.
2853                  */
2854                 wMaxPacketSize = ds->wMaxPacketSize;
2855                 pr_vdebug("autoconfig\n");
2856                 ep = usb_ep_autoconfig(func->gadget, ds);
2857                 if (unlikely(!ep))
2858                         return -ENOTSUPP;
2859                 ep->driver_data = func->eps + idx;
2860
2861                 req = usb_ep_alloc_request(ep, GFP_KERNEL);
2862                 if (unlikely(!req))
2863                         return -ENOMEM;
2864
2865                 ffs_ep->ep  = ep;
2866                 ffs_ep->req = req;
2867                 func->eps_revmap[ds->bEndpointAddress &
2868                                  USB_ENDPOINT_NUMBER_MASK] = idx + 1;
2869                 /*
2870                  * If we use virtual address mapping, we restore
2871                  * original bEndpointAddress value.
2872                  */
2873                 if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
2874                         ds->bEndpointAddress = bEndpointAddress;
2875                 /*
2876                  * Restore wMaxPacketSize which was potentially
2877                  * overwritten by autoconfig.
2878                  */
2879                 ds->wMaxPacketSize = wMaxPacketSize;
2880         }
2881         ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
2882
2883         return 0;
2884 }
2885
2886 static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
2887                                    struct usb_descriptor_header *desc,
2888                                    void *priv)
2889 {
2890         struct ffs_function *func = priv;
2891         unsigned idx;
2892         u8 newValue;
2893
2894         switch (type) {
2895         default:
2896         case FFS_DESCRIPTOR:
2897                 /* Handled in previous pass by __ffs_func_bind_do_descs() */
2898                 return 0;
2899
2900         case FFS_INTERFACE:
2901                 idx = *valuep;
2902                 if (func->interfaces_nums[idx] < 0) {
2903                         int id = usb_interface_id(func->conf, &func->function);
2904                         if (unlikely(id < 0))
2905                                 return id;
2906                         func->interfaces_nums[idx] = id;
2907                 }
2908                 newValue = func->interfaces_nums[idx];
2909                 break;
2910
2911         case FFS_STRING:
2912                 /* String' IDs are allocated when fsf_data is bound to cdev */
2913                 newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
2914                 break;
2915
2916         case FFS_ENDPOINT:
2917                 /*
2918                  * USB_DT_ENDPOINT are handled in
2919                  * __ffs_func_bind_do_descs().
2920                  */
2921                 if (desc->bDescriptorType == USB_DT_ENDPOINT)
2922                         return 0;
2923
2924                 idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
2925                 if (unlikely(!func->eps[idx].ep))
2926                         return -EINVAL;
2927
2928                 {
2929                         struct usb_endpoint_descriptor **descs;
2930                         descs = func->eps[idx].descs;
2931                         newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
2932                 }
2933                 break;
2934         }
2935
2936         pr_vdebug("%02x -> %02x\n", *valuep, newValue);
2937         *valuep = newValue;
2938         return 0;
2939 }
2940
2941 static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
2942                                       struct usb_os_desc_header *h, void *data,
2943                                       unsigned len, void *priv)
2944 {
2945         struct ffs_function *func = priv;
2946         u8 length = 0;
2947
2948         switch (type) {
2949         case FFS_OS_DESC_EXT_COMPAT: {
2950                 struct usb_ext_compat_desc *desc = data;
2951                 struct usb_os_desc_table *t;
2952
2953                 t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
2954                 t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
2955                 memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
2956                        ARRAY_SIZE(desc->CompatibleID) +
2957                        ARRAY_SIZE(desc->SubCompatibleID));
2958                 length = sizeof(*desc);
2959         }
2960                 break;
2961         case FFS_OS_DESC_EXT_PROP: {
2962                 struct usb_ext_prop_desc *desc = data;
2963                 struct usb_os_desc_table *t;
2964                 struct usb_os_desc_ext_prop *ext_prop;
2965                 char *ext_prop_name;
2966                 char *ext_prop_data;
2967
2968                 t = &func->function.os_desc_table[h->interface];
2969                 t->if_id = func->interfaces_nums[h->interface];
2970
2971                 ext_prop = func->ffs->ms_os_descs_ext_prop_avail;
2972                 func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop);
2973
2974                 ext_prop->type = le32_to_cpu(desc->dwPropertyDataType);
2975                 ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength);
2976                 ext_prop->data_len = le32_to_cpu(*(__le32 *)
2977                         usb_ext_prop_data_len_ptr(data, ext_prop->name_len));
2978                 length = ext_prop->name_len + ext_prop->data_len + 14;
2979
2980                 ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail;
2981                 func->ffs->ms_os_descs_ext_prop_name_avail +=
2982                         ext_prop->name_len;
2983
2984                 ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail;
2985                 func->ffs->ms_os_descs_ext_prop_data_avail +=
2986                         ext_prop->data_len;
2987                 memcpy(ext_prop_data,
2988                        usb_ext_prop_data_ptr(data, ext_prop->name_len),
2989                        ext_prop->data_len);
2990                 /* unicode data reported to the host as "WCHAR"s */
2991                 switch (ext_prop->type) {
2992                 case USB_EXT_PROP_UNICODE:
2993                 case USB_EXT_PROP_UNICODE_ENV:
2994                 case USB_EXT_PROP_UNICODE_LINK:
2995                 case USB_EXT_PROP_UNICODE_MULTI:
2996                         ext_prop->data_len *= 2;
2997                         break;
2998                 }
2999                 ext_prop->data = ext_prop_data;
3000
3001                 memcpy(ext_prop_name, usb_ext_prop_name_ptr(data),
3002                        ext_prop->name_len);
3003                 /* property name reported to the host as "WCHAR"s */
3004                 ext_prop->name_len *= 2;
3005                 ext_prop->name = ext_prop_name;
3006
3007                 t->os_desc->ext_prop_len +=
3008                         ext_prop->name_len + ext_prop->data_len + 14;
3009                 ++t->os_desc->ext_prop_count;
3010                 list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop);
3011         }
3012                 break;
3013         default:
3014                 pr_vdebug("unknown descriptor: %d\n", type);
3015         }
3016
3017         return length;
3018 }
3019
3020 static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
3021                                                 struct usb_configuration *c)
3022 {
3023         struct ffs_function *func = ffs_func_from_usb(f);
3024         struct f_fs_opts *ffs_opts =
3025                 container_of(f->fi, struct f_fs_opts, func_inst);
3026         int ret;
3027
3028         ENTER();
3029
3030         /*
3031          * Legacy gadget triggers binding in functionfs_ready_callback,
3032          * which already uses locking; taking the same lock here would
3033          * cause a deadlock.
3034          *
3035          * Configfs-enabled gadgets however do need ffs_dev_lock.
3036          */
3037         if (!ffs_opts->no_configfs)
3038                 ffs_dev_lock();
3039         ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
3040         func->ffs = ffs_opts->dev->ffs_data;
3041         if (!ffs_opts->no_configfs)
3042                 ffs_dev_unlock();
3043         if (ret)
3044                 return ERR_PTR(ret);
3045
3046         func->conf = c;
3047         func->gadget = c->cdev->gadget;
3048
3049         /*
3050          * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
3051          * configurations are bound in sequence with list_for_each_entry,
3052          * in each configuration its functions are bound in sequence
3053          * with list_for_each_entry, so we assume no race condition
3054          * with regard to ffs_opts->bound access
3055          */
3056         if (!ffs_opts->refcnt) {
3057                 ret = functionfs_bind(func->ffs, c->cdev);
3058                 if (ret)
3059                         return ERR_PTR(ret);
3060         }
3061         ffs_opts->refcnt++;
3062         func->function.strings = func->ffs->stringtabs;
3063
3064         return ffs_opts;
3065 }
3066
3067 static int _ffs_func_bind(struct usb_configuration *c,
3068                           struct usb_function *f)
3069 {
3070         struct ffs_function *func = ffs_func_from_usb(f);
3071         struct ffs_data *ffs = func->ffs;
3072
3073         const int full = !!func->ffs->fs_descs_count;
3074         const int high = !!func->ffs->hs_descs_count;
3075         const int super = !!func->ffs->ss_descs_count;
3076
3077         int fs_len, hs_len, ss_len, ret, i;
3078         struct ffs_ep *eps_ptr;
3079
3080         /* Make it a single chunk, less management later on */
3081         vla_group(d);
3082         vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
3083         vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
3084                 full ? ffs->fs_descs_count + 1 : 0);
3085         vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
3086                 high ? ffs->hs_descs_count + 1 : 0);
3087         vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
3088                 super ? ffs->ss_descs_count + 1 : 0);
3089         vla_item_with_sz(d, short, inums, ffs->interfaces_count);
3090         vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table,
3091                          c->cdev->use_os_string ? ffs->interfaces_count : 0);
3092         vla_item_with_sz(d, char[16], ext_compat,
3093                          c->cdev->use_os_string ? ffs->interfaces_count : 0);
3094         vla_item_with_sz(d, struct usb_os_desc, os_desc,
3095                          c->cdev->use_os_string ? ffs->interfaces_count : 0);
3096         vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop,
3097                          ffs->ms_os_descs_ext_prop_count);
3098         vla_item_with_sz(d, char, ext_prop_name,
3099                          ffs->ms_os_descs_ext_prop_name_len);
3100         vla_item_with_sz(d, char, ext_prop_data,
3101                          ffs->ms_os_descs_ext_prop_data_len);
3102         vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
3103         char *vlabuf;
3104
3105         ENTER();
3106
3107         /* Has descriptors only for speeds gadget does not support */
3108         if (unlikely(!(full | high | super)))
3109                 return -ENOTSUPP;
3110
3111         /* Allocate a single chunk, less management later on */
3112         vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL);
3113         if (unlikely(!vlabuf))
3114                 return -ENOMEM;
3115
3116         ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop);
3117         ffs->ms_os_descs_ext_prop_name_avail =
3118                 vla_ptr(vlabuf, d, ext_prop_name);
3119         ffs->ms_os_descs_ext_prop_data_avail =
3120                 vla_ptr(vlabuf, d, ext_prop_data);
3121
3122         /* Copy descriptors  */
3123         memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs,
3124                ffs->raw_descs_length);
3125
3126         memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
3127         eps_ptr = vla_ptr(vlabuf, d, eps);
3128         for (i = 0; i < ffs->eps_count; i++)
3129                 eps_ptr[i].num = -1;
3130
3131         /* Save pointers
3132          * d_eps == vlabuf, func->eps used to kfree vlabuf later
3133         */
3134         func->eps             = vla_ptr(vlabuf, d, eps);
3135         func->interfaces_nums = vla_ptr(vlabuf, d, inums);
3136
3137         /*
3138          * Go through all the endpoint descriptors and allocate
3139          * endpoints first, so that later we can rewrite the endpoint
3140          * numbers without worrying that it may be described later on.
3141          */
3142         if (likely(full)) {
3143                 func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
3144                 fs_len = ffs_do_descs(ffs->fs_descs_count,
3145                                       vla_ptr(vlabuf, d, raw_descs),
3146                                       d_raw_descs__sz,
3147                                       __ffs_func_bind_do_descs, func);
3148                 if (unlikely(fs_len < 0)) {
3149                         ret = fs_len;
3150                         goto error;
3151                 }
3152         } else {
3153                 fs_len = 0;
3154         }
3155
3156         if (likely(high)) {
3157                 func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
3158                 hs_len = ffs_do_descs(ffs->hs_descs_count,
3159                                       vla_ptr(vlabuf, d, raw_descs) + fs_len,
3160                                       d_raw_descs__sz - fs_len,
3161                                       __ffs_func_bind_do_descs, func);
3162                 if (unlikely(hs_len < 0)) {
3163                         ret = hs_len;
3164                         goto error;
3165                 }
3166         } else {
3167                 hs_len = 0;
3168         }
3169
3170         if (likely(super)) {
3171                 func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
3172                 ss_len = ffs_do_descs(ffs->ss_descs_count,
3173                                 vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
3174                                 d_raw_descs__sz - fs_len - hs_len,
3175                                 __ffs_func_bind_do_descs, func);
3176                 if (unlikely(ss_len < 0)) {
3177                         ret = ss_len;
3178                         goto error;
3179                 }
3180         } else {
3181                 ss_len = 0;
3182         }
3183
3184         /*
3185          * Now handle interface numbers allocation and interface and
3186          * endpoint numbers rewriting.  We can do that in one go
3187          * now.
3188          */
3189         ret = ffs_do_descs(ffs->fs_descs_count +
3190                            (high ? ffs->hs_descs_count : 0) +
3191                            (super ? ffs->ss_descs_count : 0),
3192                            vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
3193                            __ffs_func_bind_do_nums, func);
3194         if (unlikely(ret < 0))
3195                 goto error;
3196
3197         func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
3198         if (c->cdev->use_os_string) {
3199                 for (i = 0; i < ffs->interfaces_count; ++i) {
3200                         struct usb_os_desc *desc;
3201
3202                         desc = func->function.os_desc_table[i].os_desc =
3203                                 vla_ptr(vlabuf, d, os_desc) +
3204                                 i * sizeof(struct usb_os_desc);
3205                         desc->ext_compat_id =
3206                                 vla_ptr(vlabuf, d, ext_compat) + i * 16;
3207                         INIT_LIST_HEAD(&desc->ext_prop);
3208                 }
3209                 ret = ffs_do_os_descs(ffs->ms_os_descs_count,
3210                                       vla_ptr(vlabuf, d, raw_descs) +
3211                                       fs_len + hs_len + ss_len,
3212                                       d_raw_descs__sz - fs_len - hs_len -
3213                                       ss_len,
3214                                       __ffs_func_bind_do_os_desc, func);
3215                 if (unlikely(ret < 0))
3216                         goto error;
3217         }
3218         func->function.os_desc_n =
3219                 c->cdev->use_os_string ? ffs->interfaces_count : 0;
3220
3221         /* And we're done */
3222         ffs_event_add(ffs, FUNCTIONFS_BIND);
3223         return 0;
3224
3225 error:
3226         /* XXX Do we need to release all claimed endpoints here? */
3227         return ret;
3228 }
3229
3230 static int ffs_func_bind(struct usb_configuration *c,
3231                          struct usb_function *f)
3232 {
3233         struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
3234         struct ffs_function *func = ffs_func_from_usb(f);
3235         int ret;
3236
3237         if (IS_ERR(ffs_opts))
3238                 return PTR_ERR(ffs_opts);
3239
3240         ret = _ffs_func_bind(c, f);
3241         if (ret && !--ffs_opts->refcnt)
3242                 functionfs_unbind(func->ffs);
3243
3244         return ret;
3245 }
3246
3247
3248 /* Other USB function hooks *************************************************/
3249
3250 static void ffs_reset_work(struct work_struct *work)
3251 {
3252         struct ffs_data *ffs = container_of(work,
3253                 struct ffs_data, reset_work);
3254         ffs_data_reset(ffs);
3255 }
3256
3257 static int ffs_func_set_alt(struct usb_function *f,
3258                             unsigned interface, unsigned alt)
3259 {
3260         struct ffs_function *func = ffs_func_from_usb(f);
3261         struct ffs_data *ffs = func->ffs;
3262         int ret = 0, intf;
3263
3264         if (alt != (unsigned)-1) {
3265                 intf = ffs_func_revmap_intf(func, interface);
3266                 if (unlikely(intf < 0))
3267                         return intf;
3268         }
3269
3270         if (ffs->func)
3271                 ffs_func_eps_disable(ffs->func);
3272
3273         if (ffs->state == FFS_DEACTIVATED) {
3274                 ffs->state = FFS_CLOSING;
3275                 INIT_WORK(&ffs->reset_work, ffs_reset_work);
3276                 schedule_work(&ffs->reset_work);
3277                 return -ENODEV;
3278         }
3279
3280         if (ffs->state != FFS_ACTIVE)
3281                 return -ENODEV;
3282
3283         if (alt == (unsigned)-1) {
3284                 ffs->func = NULL;
3285                 ffs_event_add(ffs, FUNCTIONFS_DISABLE);
3286                 return 0;
3287         }
3288
3289         ffs->func = func;
3290         ret = ffs_func_eps_enable(func);
3291         if (likely(ret >= 0))
3292                 ffs_event_add(ffs, FUNCTIONFS_ENABLE);
3293         return ret;
3294 }
3295
3296 static void ffs_func_disable(struct usb_function *f)
3297 {
3298         ffs_func_set_alt(f, 0, (unsigned)-1);
3299 }
3300
3301 static int ffs_func_setup(struct usb_function *f,
3302                           const struct usb_ctrlrequest *creq)
3303 {
3304         struct ffs_function *func = ffs_func_from_usb(f);
3305         struct ffs_data *ffs = func->ffs;
3306         unsigned long flags;
3307         int ret;
3308
3309         ENTER();
3310
3311         pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
3312         pr_vdebug("creq->bRequest     = %02x\n", creq->bRequest);
3313         pr_vdebug("creq->wValue       = %04x\n", le16_to_cpu(creq->wValue));
3314         pr_vdebug("creq->wIndex       = %04x\n", le16_to_cpu(creq->wIndex));
3315         pr_vdebug("creq->wLength      = %04x\n", le16_to_cpu(creq->wLength));
3316
3317         /*
3318          * Most requests directed to interface go through here
3319          * (notable exceptions are set/get interface) so we need to
3320          * handle them.  All other either handled by composite or
3321          * passed to usb_configuration->setup() (if one is set).  No
3322          * matter, we will handle requests directed to endpoint here
3323          * as well (as it's straightforward).  Other request recipient
3324          * types are only handled when the user flag FUNCTIONFS_ALL_CTRL_RECIP
3325          * is being used.
3326          */
3327         if (ffs->state != FFS_ACTIVE)
3328                 return -ENODEV;
3329
3330         switch (creq->bRequestType & USB_RECIP_MASK) {
3331         case USB_RECIP_INTERFACE:
3332                 ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
3333                 if (unlikely(ret < 0))
3334                         return ret;
3335                 break;
3336
3337         case USB_RECIP_ENDPOINT:
3338                 ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
3339                 if (unlikely(ret < 0))
3340                         return ret;
3341                 if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
3342                         ret = func->ffs->eps_addrmap[ret];
3343                 break;
3344
3345         default:
3346                 if (func->ffs->user_flags & FUNCTIONFS_ALL_CTRL_RECIP)
3347                         ret = le16_to_cpu(creq->wIndex);
3348                 else
3349                         return -EOPNOTSUPP;
3350         }
3351
3352         spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
3353         ffs->ev.setup = *creq;
3354         ffs->ev.setup.wIndex = cpu_to_le16(ret);
3355         __ffs_event_add(ffs, FUNCTIONFS_SETUP);
3356         spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
3357
3358         return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
3359 }
3360
3361 static bool ffs_func_req_match(struct usb_function *f,
3362                                const struct usb_ctrlrequest *creq,
3363                                bool config0)
3364 {
3365         struct ffs_function *func = ffs_func_from_usb(f);
3366
3367         if (config0 && !(func->ffs->user_flags & FUNCTIONFS_CONFIG0_SETUP))
3368                 return false;
3369
3370         switch (creq->bRequestType & USB_RECIP_MASK) {
3371         case USB_RECIP_INTERFACE:
3372                 return (ffs_func_revmap_intf(func,
3373                                              le16_to_cpu(creq->wIndex)) >= 0);
3374         case USB_RECIP_ENDPOINT:
3375                 return (ffs_func_revmap_ep(func,
3376                                            le16_to_cpu(creq->wIndex)) >= 0);
3377         default:
3378                 return (bool) (func->ffs->user_flags &
3379                                FUNCTIONFS_ALL_CTRL_RECIP);
3380         }
3381 }
3382
3383 static void ffs_func_suspend(struct usb_function *f)
3384 {
3385         ENTER();
3386         ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
3387 }
3388
3389 static void ffs_func_resume(struct usb_function *f)
3390 {
3391         ENTER();
3392         ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
3393 }
3394
3395
3396 /* Endpoint and interface numbers reverse mapping ***************************/
3397
3398 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
3399 {
3400         num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
3401         return num ? num : -EDOM;
3402 }
3403
3404 static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
3405 {
3406         short *nums = func->interfaces_nums;
3407         unsigned count = func->ffs->interfaces_count;
3408
3409         for (; count; --count, ++nums) {
3410                 if (*nums >= 0 && *nums == intf)
3411                         return nums - func->interfaces_nums;
3412         }
3413
3414         return -EDOM;
3415 }
3416
3417
3418 /* Devices management *******************************************************/
3419
3420 static LIST_HEAD(ffs_devices);
3421
3422 static struct ffs_dev *_ffs_do_find_dev(const char *name)
3423 {
3424         struct ffs_dev *dev;
3425
3426         if (!name)
3427                 return NULL;
3428
3429         list_for_each_entry(dev, &ffs_devices, entry) {
3430                 if (strcmp(dev->name, name) == 0)
3431                         return dev;
3432         }
3433
3434         return NULL;
3435 }
3436
3437 /*
3438  * ffs_lock must be taken by the caller of this function
3439  */
3440 static struct ffs_dev *_ffs_get_single_dev(void)
3441 {
3442         struct ffs_dev *dev;
3443
3444         if (list_is_singular(&ffs_devices)) {
3445                 dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
3446                 if (dev->single)
3447                         return dev;
3448         }
3449
3450         return NULL;
3451 }
3452
3453 /*
3454  * ffs_lock must be taken by the caller of this function
3455  */
3456 static struct ffs_dev *_ffs_find_dev(const char *name)
3457 {
3458         struct ffs_dev *dev;
3459
3460         dev = _ffs_get_single_dev();
3461         if (dev)
3462                 return dev;
3463
3464         return _ffs_do_find_dev(name);
3465 }
3466
3467 /* Configfs support *********************************************************/
3468
3469 static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
3470 {
3471         return container_of(to_config_group(item), struct f_fs_opts,
3472                             func_inst.group);
3473 }
3474
3475 static void ffs_attr_release(struct config_item *item)
3476 {
3477         struct f_fs_opts *opts = to_ffs_opts(item);
3478
3479         usb_put_function_instance(&opts->func_inst);
3480 }
3481
3482 static struct configfs_item_operations ffs_item_ops = {
3483         .release        = ffs_attr_release,
3484 };
3485
3486 static const struct config_item_type ffs_func_type = {
3487         .ct_item_ops    = &ffs_item_ops,
3488         .ct_owner       = THIS_MODULE,
3489 };
3490
3491
3492 /* Function registration interface ******************************************/
3493
3494 static void ffs_free_inst(struct usb_function_instance *f)
3495 {
3496         struct f_fs_opts *opts;
3497
3498         opts = to_f_fs_opts(f);
3499         ffs_dev_lock();
3500         _ffs_free_dev(opts->dev);
3501         ffs_dev_unlock();
3502         kfree(opts);
3503 }
3504
3505 static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
3506 {
3507         if (strlen(name) >= sizeof_field(struct ffs_dev, name))
3508                 return -ENAMETOOLONG;
3509         return ffs_name_dev(to_f_fs_opts(fi)->dev, name);
3510 }
3511
3512 static struct usb_function_instance *ffs_alloc_inst(void)
3513 {
3514         struct f_fs_opts *opts;
3515         struct ffs_dev *dev;
3516
3517         opts = kzalloc(sizeof(*opts), GFP_KERNEL);
3518         if (!opts)
3519                 return ERR_PTR(-ENOMEM);
3520
3521         opts->func_inst.set_inst_name = ffs_set_inst_name;
3522         opts->func_inst.free_func_inst = ffs_free_inst;
3523         ffs_dev_lock();
3524         dev = _ffs_alloc_dev();
3525         ffs_dev_unlock();
3526         if (IS_ERR(dev)) {
3527                 kfree(opts);
3528                 return ERR_CAST(dev);
3529         }
3530         opts->dev = dev;
3531         dev->opts = opts;
3532
3533         config_group_init_type_name(&opts->func_inst.group, "",
3534                                     &ffs_func_type);
3535         return &opts->func_inst;
3536 }
3537
3538 static void ffs_free(struct usb_function *f)
3539 {
3540         kfree(ffs_func_from_usb(f));
3541 }
3542
3543 static void ffs_func_unbind(struct usb_configuration *c,
3544                             struct usb_function *f)
3545 {
3546         struct ffs_function *func = ffs_func_from_usb(f);
3547         struct ffs_data *ffs = func->ffs;
3548         struct f_fs_opts *opts =
3549                 container_of(f->fi, struct f_fs_opts, func_inst);
3550         struct ffs_ep *ep = func->eps;
3551         unsigned count = ffs->eps_count;
3552         unsigned long flags;
3553
3554         ENTER();
3555         if (ffs->func == func) {
3556                 ffs_func_eps_disable(func);
3557                 ffs->func = NULL;
3558         }
3559
3560         if (!--opts->refcnt)
3561                 functionfs_unbind(ffs);
3562
3563         /* cleanup after autoconfig */
3564         spin_lock_irqsave(&func->ffs->eps_lock, flags);
3565         while (count--) {
3566                 if (ep->ep && ep->req)
3567                         usb_ep_free_request(ep->ep, ep->req);
3568                 ep->req = NULL;
3569                 ++ep;
3570         }
3571         spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
3572         kfree(func->eps);
3573         func->eps = NULL;
3574         /*
3575          * eps, descriptors and interfaces_nums are allocated in the
3576          * same chunk so only one free is required.
3577          */
3578         func->function.fs_descriptors = NULL;
3579         func->function.hs_descriptors = NULL;
3580         func->function.ss_descriptors = NULL;
3581         func->interfaces_nums = NULL;
3582
3583         ffs_event_add(ffs, FUNCTIONFS_UNBIND);
3584 }
3585
3586 static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
3587 {
3588         struct ffs_function *func;
3589
3590         ENTER();
3591
3592         func = kzalloc(sizeof(*func), GFP_KERNEL);
3593         if (unlikely(!func))
3594                 return ERR_PTR(-ENOMEM);
3595
3596         func->function.name    = "Function FS Gadget";
3597
3598         func->function.bind    = ffs_func_bind;
3599         func->function.unbind  = ffs_func_unbind;
3600         func->function.set_alt = ffs_func_set_alt;
3601         func->function.disable = ffs_func_disable;
3602         func->function.setup   = ffs_func_setup;
3603         func->function.req_match = ffs_func_req_match;
3604         func->function.suspend = ffs_func_suspend;
3605         func->function.resume  = ffs_func_resume;
3606         func->function.free_func = ffs_free;
3607
3608         return &func->function;
3609 }
3610
3611 /*
3612  * ffs_lock must be taken by the caller of this function
3613  */
3614 static struct ffs_dev *_ffs_alloc_dev(void)
3615 {
3616         struct ffs_dev *dev;
3617         int ret;
3618
3619         if (_ffs_get_single_dev())
3620                         return ERR_PTR(-EBUSY);
3621
3622         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3623         if (!dev)
3624                 return ERR_PTR(-ENOMEM);
3625
3626         if (list_empty(&ffs_devices)) {
3627                 ret = functionfs_init();
3628                 if (ret) {
3629                         kfree(dev);
3630                         return ERR_PTR(ret);
3631                 }
3632         }
3633
3634         list_add(&dev->entry, &ffs_devices);
3635
3636         return dev;
3637 }
3638
3639 int ffs_name_dev(struct ffs_dev *dev, const char *name)
3640 {
3641         struct ffs_dev *existing;
3642         int ret = 0;
3643
3644         ffs_dev_lock();
3645
3646         existing = _ffs_do_find_dev(name);
3647         if (!existing)
3648                 strlcpy(dev->name, name, ARRAY_SIZE(dev->name));
3649         else if (existing != dev)
3650                 ret = -EBUSY;
3651
3652         ffs_dev_unlock();
3653
3654         return ret;
3655 }
3656 EXPORT_SYMBOL_GPL(ffs_name_dev);
3657
3658 int ffs_single_dev(struct ffs_dev *dev)
3659 {
3660         int ret;
3661
3662         ret = 0;
3663         ffs_dev_lock();
3664
3665         if (!list_is_singular(&ffs_devices))
3666                 ret = -EBUSY;
3667         else
3668                 dev->single = true;
3669
3670         ffs_dev_unlock();
3671         return ret;
3672 }
3673 EXPORT_SYMBOL_GPL(ffs_single_dev);
3674
3675 /*
3676  * ffs_lock must be taken by the caller of this function
3677  */
3678 static void _ffs_free_dev(struct ffs_dev *dev)
3679 {
3680         list_del(&dev->entry);
3681
3682         /* Clear the private_data pointer to stop incorrect dev access */
3683         if (dev->ffs_data)
3684                 dev->ffs_data->private_data = NULL;
3685
3686         kfree(dev);
3687         if (list_empty(&ffs_devices))
3688                 functionfs_cleanup();
3689 }
3690
3691 static void *ffs_acquire_dev(const char *dev_name)
3692 {
3693         struct ffs_dev *ffs_dev;
3694
3695         ENTER();
3696         ffs_dev_lock();
3697
3698         ffs_dev = _ffs_find_dev(dev_name);
3699         if (!ffs_dev)
3700                 ffs_dev = ERR_PTR(-ENOENT);
3701         else if (ffs_dev->mounted)
3702                 ffs_dev = ERR_PTR(-EBUSY);
3703         else if (ffs_dev->ffs_acquire_dev_callback &&
3704             ffs_dev->ffs_acquire_dev_callback(ffs_dev))
3705                 ffs_dev = ERR_PTR(-ENOENT);
3706         else
3707                 ffs_dev->mounted = true;
3708
3709         ffs_dev_unlock();
3710         return ffs_dev;
3711 }
3712
3713 static void ffs_release_dev(struct ffs_data *ffs_data)
3714 {
3715         struct ffs_dev *ffs_dev;
3716
3717         ENTER();
3718         ffs_dev_lock();
3719
3720         ffs_dev = ffs_data->private_data;
3721         if (ffs_dev) {
3722                 ffs_dev->mounted = false;
3723
3724                 if (ffs_dev->ffs_release_dev_callback)
3725                         ffs_dev->ffs_release_dev_callback(ffs_dev);
3726         }
3727
3728         ffs_dev_unlock();
3729 }
3730
3731 static int ffs_ready(struct ffs_data *ffs)
3732 {
3733         struct ffs_dev *ffs_obj;
3734         int ret = 0;
3735
3736         ENTER();
3737         ffs_dev_lock();
3738
3739         ffs_obj = ffs->private_data;
3740         if (!ffs_obj) {
3741                 ret = -EINVAL;
3742                 goto done;
3743         }
3744         if (WARN_ON(ffs_obj->desc_ready)) {
3745                 ret = -EBUSY;
3746                 goto done;
3747         }
3748
3749         ffs_obj->desc_ready = true;
3750         ffs_obj->ffs_data = ffs;
3751
3752         if (ffs_obj->ffs_ready_callback) {
3753                 ret = ffs_obj->ffs_ready_callback(ffs);
3754                 if (ret)
3755                         goto done;
3756         }
3757
3758         set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
3759 done:
3760         ffs_dev_unlock();
3761         return ret;
3762 }
3763
3764 static void ffs_closed(struct ffs_data *ffs)
3765 {
3766         struct ffs_dev *ffs_obj;
3767         struct f_fs_opts *opts;
3768         struct config_item *ci;
3769
3770         ENTER();
3771         ffs_dev_lock();
3772
3773         ffs_obj = ffs->private_data;
3774         if (!ffs_obj)
3775                 goto done;
3776
3777         ffs_obj->desc_ready = false;
3778         ffs_obj->ffs_data = NULL;
3779
3780         if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
3781             ffs_obj->ffs_closed_callback)
3782                 ffs_obj->ffs_closed_callback(ffs);
3783
3784         if (ffs_obj->opts)
3785                 opts = ffs_obj->opts;
3786         else
3787                 goto done;
3788
3789         if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
3790             || !kref_read(&opts->func_inst.group.cg_item.ci_kref))
3791                 goto done;
3792
3793         ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
3794         ffs_dev_unlock();
3795
3796         if (test_bit(FFS_FL_BOUND, &ffs->flags))
3797                 unregister_gadget_item(ci);
3798         return;
3799 done:
3800         ffs_dev_unlock();
3801 }
3802
3803 /* Misc helper functions ****************************************************/
3804
3805 static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
3806 {
3807         return nonblock
3808                 ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
3809                 : mutex_lock_interruptible(mutex);
3810 }
3811
3812 static char *ffs_prepare_buffer(const char __user *buf, size_t len)
3813 {
3814         char *data;
3815
3816         if (unlikely(!len))
3817                 return NULL;
3818
3819         data = kmalloc(len, GFP_KERNEL);
3820         if (unlikely(!data))
3821                 return ERR_PTR(-ENOMEM);
3822
3823         if (unlikely(copy_from_user(data, buf, len))) {
3824                 kfree(data);
3825                 return ERR_PTR(-EFAULT);
3826         }
3827
3828         pr_vdebug("Buffer from user space:\n");
3829         ffs_dump_mem("", data, len);
3830
3831         return data;
3832 }
3833
3834 DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
3835 MODULE_LICENSE("GPL");
3836 MODULE_AUTHOR("Michal Nazarewicz");