staging: most: fix comment sections
[linux-2.6-block.git] / drivers / staging / most / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * core.c - Implementation of core module of MOST Linux driver stack
4  *
5  * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/module.h>
10 #include <linux/fs.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/device.h>
14 #include <linux/list.h>
15 #include <linux/poll.h>
16 #include <linux/wait.h>
17 #include <linux/kobject.h>
18 #include <linux/mutex.h>
19 #include <linux/completion.h>
20 #include <linux/sysfs.h>
21 #include <linux/kthread.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/idr.h>
24 #include <most/core.h>
25
26 #define MAX_CHANNELS    64
27 #define STRING_SIZE     80
28
29 static struct ida mdev_id;
30 static int dummy_num_buffers;
31
32 static struct mostcore {
33         struct device dev;
34         struct device_driver drv;
35         struct bus_type bus;
36         struct class *class;
37         struct list_head comp_list;
38 } mc;
39
40 #define to_driver(d) container_of(d, struct mostcore, drv);
41
42 struct pipe {
43         struct core_component *comp;
44         int refs;
45         int num_buffers;
46 };
47
48 struct most_channel {
49         struct device dev;
50         struct completion cleanup;
51         atomic_t mbo_ref;
52         atomic_t mbo_nq_level;
53         u16 channel_id;
54         char name[STRING_SIZE];
55         bool is_poisoned;
56         struct mutex start_mutex;
57         struct mutex nq_mutex; /* nq thread synchronization */
58         int is_starving;
59         struct most_interface *iface;
60         struct most_channel_config cfg;
61         bool keep_mbo;
62         bool enqueue_halt;
63         struct list_head fifo;
64         spinlock_t fifo_lock;
65         struct list_head halt_fifo;
66         struct list_head list;
67         struct pipe pipe0;
68         struct pipe pipe1;
69         struct list_head trash_fifo;
70         struct task_struct *hdm_enqueue_task;
71         wait_queue_head_t hdm_fifo_wq;
72
73 };
74
75 #define to_channel(d) container_of(d, struct most_channel, dev)
76
77 struct interface_private {
78         int dev_id;
79         char name[STRING_SIZE];
80         struct most_channel *channel[MAX_CHANNELS];
81         struct list_head channel_list;
82 };
83
84 static const struct {
85         int most_ch_data_type;
86         const char *name;
87 } ch_data_type[] = {
88         { MOST_CH_CONTROL, "control\n" },
89         { MOST_CH_ASYNC, "async\n" },
90         { MOST_CH_SYNC, "sync\n" },
91         { MOST_CH_ISOC, "isoc\n"},
92         { MOST_CH_ISOC, "isoc_avp\n"},
93 };
94
95 /**
96  * list_pop_mbo - retrieves the first MBO of the list and removes it
97  * @ptr: the list head to grab the MBO from.
98  */
99 #define list_pop_mbo(ptr)                                               \
100 ({                                                                      \
101         struct mbo *_mbo = list_first_entry(ptr, struct mbo, list);     \
102         list_del(&_mbo->list);                                          \
103         _mbo;                                                           \
104 })
105
106 /**
107  * most_free_mbo_coherent - free an MBO and its coherent buffer
108  * @mbo: most buffer
109  */
110 static void most_free_mbo_coherent(struct mbo *mbo)
111 {
112         struct most_channel *c = mbo->context;
113         u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
114
115         dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
116                           mbo->bus_address);
117         kfree(mbo);
118         if (atomic_sub_and_test(1, &c->mbo_ref))
119                 complete(&c->cleanup);
120 }
121
122 /**
123  * flush_channel_fifos - clear the channel fifos
124  * @c: pointer to channel object
125  */
126 static void flush_channel_fifos(struct most_channel *c)
127 {
128         unsigned long flags, hf_flags;
129         struct mbo *mbo, *tmp;
130
131         if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
132                 return;
133
134         spin_lock_irqsave(&c->fifo_lock, flags);
135         list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
136                 list_del(&mbo->list);
137                 spin_unlock_irqrestore(&c->fifo_lock, flags);
138                 most_free_mbo_coherent(mbo);
139                 spin_lock_irqsave(&c->fifo_lock, flags);
140         }
141         spin_unlock_irqrestore(&c->fifo_lock, flags);
142
143         spin_lock_irqsave(&c->fifo_lock, hf_flags);
144         list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
145                 list_del(&mbo->list);
146                 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
147                 most_free_mbo_coherent(mbo);
148                 spin_lock_irqsave(&c->fifo_lock, hf_flags);
149         }
150         spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
151
152         if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
153                 pr_info("WARN: fifo | trash fifo not empty\n");
154 }
155
156 /**
157  * flush_trash_fifo - clear the trash fifo
158  * @c: pointer to channel object
159  */
160 static int flush_trash_fifo(struct most_channel *c)
161 {
162         struct mbo *mbo, *tmp;
163         unsigned long flags;
164
165         spin_lock_irqsave(&c->fifo_lock, flags);
166         list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
167                 list_del(&mbo->list);
168                 spin_unlock_irqrestore(&c->fifo_lock, flags);
169                 most_free_mbo_coherent(mbo);
170                 spin_lock_irqsave(&c->fifo_lock, flags);
171         }
172         spin_unlock_irqrestore(&c->fifo_lock, flags);
173         return 0;
174 }
175
176 static ssize_t available_directions_show(struct device *dev,
177                                          struct device_attribute *attr,
178                                          char *buf)
179 {
180         struct most_channel *c = to_channel(dev);
181         unsigned int i = c->channel_id;
182
183         strcpy(buf, "");
184         if (c->iface->channel_vector[i].direction & MOST_CH_RX)
185                 strcat(buf, "rx ");
186         if (c->iface->channel_vector[i].direction & MOST_CH_TX)
187                 strcat(buf, "tx ");
188         strcat(buf, "\n");
189         return strlen(buf);
190 }
191
192 static ssize_t available_datatypes_show(struct device *dev,
193                                         struct device_attribute *attr,
194                                         char *buf)
195 {
196         struct most_channel *c = to_channel(dev);
197         unsigned int i = c->channel_id;
198
199         strcpy(buf, "");
200         if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
201                 strcat(buf, "control ");
202         if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
203                 strcat(buf, "async ");
204         if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
205                 strcat(buf, "sync ");
206         if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
207                 strcat(buf, "isoc ");
208         strcat(buf, "\n");
209         return strlen(buf);
210 }
211
212 static ssize_t number_of_packet_buffers_show(struct device *dev,
213                                              struct device_attribute *attr,
214                                              char *buf)
215 {
216         struct most_channel *c = to_channel(dev);
217         unsigned int i = c->channel_id;
218
219         return snprintf(buf, PAGE_SIZE, "%d\n",
220                         c->iface->channel_vector[i].num_buffers_packet);
221 }
222
223 static ssize_t number_of_stream_buffers_show(struct device *dev,
224                                              struct device_attribute *attr,
225                                              char *buf)
226 {
227         struct most_channel *c = to_channel(dev);
228         unsigned int i = c->channel_id;
229
230         return snprintf(buf, PAGE_SIZE, "%d\n",
231                         c->iface->channel_vector[i].num_buffers_streaming);
232 }
233
234 static ssize_t size_of_packet_buffer_show(struct device *dev,
235                                           struct device_attribute *attr,
236                                           char *buf)
237 {
238         struct most_channel *c = to_channel(dev);
239         unsigned int i = c->channel_id;
240
241         return snprintf(buf, PAGE_SIZE, "%d\n",
242                         c->iface->channel_vector[i].buffer_size_packet);
243 }
244
245 static ssize_t size_of_stream_buffer_show(struct device *dev,
246                                           struct device_attribute *attr,
247                                           char *buf)
248 {
249         struct most_channel *c = to_channel(dev);
250         unsigned int i = c->channel_id;
251
252         return snprintf(buf, PAGE_SIZE, "%d\n",
253                         c->iface->channel_vector[i].buffer_size_streaming);
254 }
255
256 static ssize_t channel_starving_show(struct device *dev,
257                                      struct device_attribute *attr,
258                                      char *buf)
259 {
260         struct most_channel *c = to_channel(dev);
261
262         return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
263 }
264
265 static ssize_t set_number_of_buffers_show(struct device *dev,
266                                           struct device_attribute *attr,
267                                           char *buf)
268 {
269         struct most_channel *c = to_channel(dev);
270
271         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
272 }
273
274 static ssize_t set_number_of_buffers_store(struct device *dev,
275                                            struct device_attribute *attr,
276                                            const char *buf,
277                                            size_t count)
278 {
279         struct most_channel *c = to_channel(dev);
280
281         int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
282
283         if (ret)
284                 return ret;
285         return count;
286 }
287
288 static ssize_t set_buffer_size_show(struct device *dev,
289                                     struct device_attribute *attr,
290                                     char *buf)
291 {
292         struct most_channel *c = to_channel(dev);
293
294         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
295 }
296
297 static ssize_t set_buffer_size_store(struct device *dev,
298                                      struct device_attribute *attr,
299                                      const char *buf,
300                                      size_t count)
301 {
302         struct most_channel *c = to_channel(dev);
303         int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
304
305         if (ret)
306                 return ret;
307         return count;
308 }
309
310 static ssize_t set_direction_show(struct device *dev,
311                                   struct device_attribute *attr,
312                                   char *buf)
313 {
314         struct most_channel *c = to_channel(dev);
315
316         if (c->cfg.direction & MOST_CH_TX)
317                 return snprintf(buf, PAGE_SIZE, "tx\n");
318         else if (c->cfg.direction & MOST_CH_RX)
319                 return snprintf(buf, PAGE_SIZE, "rx\n");
320         return snprintf(buf, PAGE_SIZE, "unconfigured\n");
321 }
322
323 static ssize_t set_direction_store(struct device *dev,
324                                    struct device_attribute *attr,
325                                    const char *buf,
326                                    size_t count)
327 {
328         struct most_channel *c = to_channel(dev);
329
330         if (!strcmp(buf, "dir_rx\n")) {
331                 c->cfg.direction = MOST_CH_RX;
332         } else if (!strcmp(buf, "rx\n")) {
333                 c->cfg.direction = MOST_CH_RX;
334         } else if (!strcmp(buf, "dir_tx\n")) {
335                 c->cfg.direction = MOST_CH_TX;
336         } else if (!strcmp(buf, "tx\n")) {
337                 c->cfg.direction = MOST_CH_TX;
338         } else {
339                 pr_info("WARN: invalid attribute settings\n");
340                 return -EINVAL;
341         }
342         return count;
343 }
344
345 static ssize_t set_datatype_show(struct device *dev,
346                                  struct device_attribute *attr,
347                                  char *buf)
348 {
349         int i;
350         struct most_channel *c = to_channel(dev);
351
352         for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
353                 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
354                         return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
355         }
356         return snprintf(buf, PAGE_SIZE, "unconfigured\n");
357 }
358
359 static ssize_t set_datatype_store(struct device *dev,
360                                   struct device_attribute *attr,
361                                   const char *buf,
362                                   size_t count)
363 {
364         int i;
365         struct most_channel *c = to_channel(dev);
366
367         for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
368                 if (!strcmp(buf, ch_data_type[i].name)) {
369                         c->cfg.data_type = ch_data_type[i].most_ch_data_type;
370                         break;
371                 }
372         }
373
374         if (i == ARRAY_SIZE(ch_data_type)) {
375                 pr_info("WARN: invalid attribute settings\n");
376                 return -EINVAL;
377         }
378         return count;
379 }
380
381 static ssize_t set_subbuffer_size_show(struct device *dev,
382                                        struct device_attribute *attr,
383                                        char *buf)
384 {
385         struct most_channel *c = to_channel(dev);
386
387         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
388 }
389
390 static ssize_t set_subbuffer_size_store(struct device *dev,
391                                         struct device_attribute *attr,
392                                         const char *buf,
393                                         size_t count)
394 {
395         struct most_channel *c = to_channel(dev);
396         int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
397
398         if (ret)
399                 return ret;
400         return count;
401 }
402
403 static ssize_t set_packets_per_xact_show(struct device *dev,
404                                          struct device_attribute *attr,
405                                          char *buf)
406 {
407         struct most_channel *c = to_channel(dev);
408
409         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
410 }
411
412 static ssize_t set_packets_per_xact_store(struct device *dev,
413                                           struct device_attribute *attr,
414                                           const char *buf,
415                                           size_t count)
416 {
417         struct most_channel *c = to_channel(dev);
418         int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
419
420         if (ret)
421                 return ret;
422         return count;
423 }
424
425 #define DEV_ATTR(_name)  (&dev_attr_##_name.attr)
426
427 static DEVICE_ATTR_RO(available_directions);
428 static DEVICE_ATTR_RO(available_datatypes);
429 static DEVICE_ATTR_RO(number_of_packet_buffers);
430 static DEVICE_ATTR_RO(number_of_stream_buffers);
431 static DEVICE_ATTR_RO(size_of_stream_buffer);
432 static DEVICE_ATTR_RO(size_of_packet_buffer);
433 static DEVICE_ATTR_RO(channel_starving);
434 static DEVICE_ATTR_RW(set_buffer_size);
435 static DEVICE_ATTR_RW(set_number_of_buffers);
436 static DEVICE_ATTR_RW(set_direction);
437 static DEVICE_ATTR_RW(set_datatype);
438 static DEVICE_ATTR_RW(set_subbuffer_size);
439 static DEVICE_ATTR_RW(set_packets_per_xact);
440
441 static struct attribute *channel_attrs[] = {
442         DEV_ATTR(available_directions),
443         DEV_ATTR(available_datatypes),
444         DEV_ATTR(number_of_packet_buffers),
445         DEV_ATTR(number_of_stream_buffers),
446         DEV_ATTR(size_of_stream_buffer),
447         DEV_ATTR(size_of_packet_buffer),
448         DEV_ATTR(channel_starving),
449         DEV_ATTR(set_buffer_size),
450         DEV_ATTR(set_number_of_buffers),
451         DEV_ATTR(set_direction),
452         DEV_ATTR(set_datatype),
453         DEV_ATTR(set_subbuffer_size),
454         DEV_ATTR(set_packets_per_xact),
455         NULL,
456 };
457
458 static struct attribute_group channel_attr_group = {
459         .attrs = channel_attrs,
460 };
461
462 static const struct attribute_group *channel_attr_groups[] = {
463         &channel_attr_group,
464         NULL,
465 };
466
467 static ssize_t description_show(struct device *dev,
468                                 struct device_attribute *attr,
469                                 char *buf)
470 {
471         struct most_interface *iface = to_most_interface(dev);
472
473         return snprintf(buf, PAGE_SIZE, "%s\n", iface->description);
474 }
475
476 static ssize_t interface_show(struct device *dev,
477                               struct device_attribute *attr,
478                               char *buf)
479 {
480         struct most_interface *iface = to_most_interface(dev);
481
482         switch (iface->interface) {
483         case ITYPE_LOOPBACK:
484                 return snprintf(buf, PAGE_SIZE, "loopback\n");
485         case ITYPE_I2C:
486                 return snprintf(buf, PAGE_SIZE, "i2c\n");
487         case ITYPE_I2S:
488                 return snprintf(buf, PAGE_SIZE, "i2s\n");
489         case ITYPE_TSI:
490                 return snprintf(buf, PAGE_SIZE, "tsi\n");
491         case ITYPE_HBI:
492                 return snprintf(buf, PAGE_SIZE, "hbi\n");
493         case ITYPE_MEDIALB_DIM:
494                 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
495         case ITYPE_MEDIALB_DIM2:
496                 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
497         case ITYPE_USB:
498                 return snprintf(buf, PAGE_SIZE, "usb\n");
499         case ITYPE_PCIE:
500                 return snprintf(buf, PAGE_SIZE, "pcie\n");
501         }
502         return snprintf(buf, PAGE_SIZE, "unknown\n");
503 }
504
505 static DEVICE_ATTR_RO(description);
506 static DEVICE_ATTR_RO(interface);
507
508 static struct attribute *interface_attrs[] = {
509         DEV_ATTR(description),
510         DEV_ATTR(interface),
511         NULL,
512 };
513
514 static struct attribute_group interface_attr_group = {
515         .attrs = interface_attrs,
516 };
517
518 static const struct attribute_group *interface_attr_groups[] = {
519         &interface_attr_group,
520         NULL,
521 };
522
523 static struct core_component *match_module(char *name)
524 {
525         struct core_component *comp;
526
527         list_for_each_entry(comp, &mc.comp_list, list) {
528                 if (!strcmp(comp->name, name))
529                         return comp;
530         }
531         return NULL;
532 }
533
534 int print_links(struct device *dev, void *data)
535 {
536         int offs = 0;
537         char *buf = data;
538         struct most_channel *c;
539         struct most_interface *iface = to_most_interface(dev);
540
541         list_for_each_entry(c, &iface->p->channel_list, list) {
542                 if (c->pipe0.comp) {
543                         offs += snprintf(buf + offs,
544                                          PAGE_SIZE - offs,
545                                          "%s:%s:%s\n",
546                                          c->pipe0.comp->name,
547                                          dev_name(&iface->dev),
548                                          dev_name(&c->dev));
549                 }
550                 if (c->pipe1.comp) {
551                         offs += snprintf(buf + offs,
552                                          PAGE_SIZE - offs,
553                                          "%s:%s:%s\n",
554                                          c->pipe1.comp->name,
555                                          dev_name(&iface->dev),
556                                          dev_name(&c->dev));
557                 }
558         }
559         return 0;
560 }
561
562 static ssize_t links_show(struct device_driver *drv, char *buf)
563 {
564         bus_for_each_dev(&mc.bus, NULL, buf, print_links);
565         return strlen(buf);
566 }
567
568 static ssize_t modules_show(struct device_driver *drv, char *buf)
569 {
570         struct core_component *comp;
571         int offs = 0;
572
573         list_for_each_entry(comp, &mc.comp_list, list) {
574                 offs += snprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
575                                  comp->name);
576         }
577         return offs;
578 }
579 /**
580  * split_string - parses buf and extracts ':' separated substrings.
581  *
582  * @buf: complete string from attribute 'add_channel'
583  * @a: storage for 1st substring (=interface name)
584  * @b: storage for 2nd substring (=channel name)
585  * @c: storage for 3rd substring (=component name)
586  * @d: storage optional 4th substring (=user defined name)
587  *
588  * Examples:
589  *
590  * Input: "mdev0:ch6:cdev:my_channel\n" or
591  *        "mdev0:ch6:cdev:my_channel"
592  *
593  * Output: *a -> "mdev0", *b -> "ch6", *c -> "cdev" *d -> "my_channel"
594  *
595  * Input: "mdev1:ep81:cdev\n"
596  * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d -> ""
597  *
598  * Input: "mdev1:ep81"
599  * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d == NULL
600  */
601 static int split_string(char *buf, char **a, char **b, char **c, char **d)
602 {
603         *a = strsep(&buf, ":");
604         if (!*a)
605                 return -EIO;
606
607         *b = strsep(&buf, ":\n");
608         if (!*b)
609                 return -EIO;
610
611         *c = strsep(&buf, ":\n");
612         if (!*c)
613                 return -EIO;
614
615         if (d)
616                 *d = strsep(&buf, ":\n");
617
618         return 0;
619 }
620
621 static int match_bus_dev(struct device *dev, void *data)
622 {
623         char *mdev_name = data;
624
625         return !strcmp(dev_name(dev), mdev_name);
626 }
627
628 /**
629  * get_channel - get pointer to channel
630  * @mdev: name of the device interface
631  * @mdev_ch: name of channel
632  */
633 static struct most_channel *get_channel(char *mdev, char *mdev_ch)
634 {
635         struct device *dev = NULL;
636         struct most_interface *iface;
637         struct most_channel *c, *tmp;
638
639         dev = bus_find_device(&mc.bus, NULL, mdev, match_bus_dev);
640         if (!dev)
641                 return NULL;
642         iface = to_most_interface(dev);
643         list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
644                 if (!strcmp(dev_name(&c->dev), mdev_ch))
645                         return c;
646         }
647         return NULL;
648 }
649
650 static
651 inline int link_channel_to_component(struct most_channel *c,
652                                      struct core_component *comp,
653                                      char *comp_param)
654 {
655         int ret;
656         struct core_component **comp_ptr;
657
658         if (!c->pipe0.comp)
659                 comp_ptr = &c->pipe0.comp;
660         else if (!c->pipe1.comp)
661                 comp_ptr = &c->pipe1.comp;
662         else
663                 return -ENOSPC;
664
665         *comp_ptr = comp;
666         ret = comp->probe_channel(c->iface, c->channel_id,
667                                  &c->cfg, comp_param);
668         if (ret) {
669                 *comp_ptr = NULL;
670                 return ret;
671         }
672
673         return 0;
674 }
675
676 /**
677  * add_link_store - store function for add_link attribute
678  * @drv: device driver
679  * @buf: buffer
680  * @len: buffer length
681  *
682  * This parses the string given by buf and splits it into
683  * four substrings. Note: last substring is optional. In case a cdev
684  * component is loaded the optional 4th substring will make up the name of
685  * device node in the /dev directory. If omitted, the device node will
686  * inherit the channel's name within sysfs.
687  *
688  * Searches for (device, channel) pair and probes the component
689  *
690  * Example:
691  * (1) echo "mdev0:ch6:cdev:my_rxchannel" >add_link
692  * (2) echo "mdev1:ep81:cdev" >add_link
693  *
694  * (1) would create the device node /dev/my_rxchannel
695  * (2) would create the device node /dev/mdev1-ep81
696  */
697 static ssize_t add_link_store(struct device_driver *drv,
698                               const char *buf,
699                               size_t len)
700 {
701         struct most_channel *c;
702         struct core_component *comp;
703         char buffer[STRING_SIZE];
704         char *mdev;
705         char *mdev_ch;
706         char *comp_name;
707         char *comp_param;
708         char devnod_buf[STRING_SIZE];
709         int ret;
710         size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
711
712         strlcpy(buffer, buf, max_len);
713
714         ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, &comp_param);
715         if (ret)
716                 return ret;
717         comp = match_module(comp_name);
718         if (!comp_param || *comp_param == 0) {
719                 snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
720                          mdev_ch);
721                 comp_param = devnod_buf;
722         }
723
724         c = get_channel(mdev, mdev_ch);
725         if (!c)
726                 return -ENODEV;
727
728         ret = link_channel_to_component(c, comp, comp_param);
729         if (ret)
730                 return ret;
731
732         return len;
733 }
734
735 /**
736  * remove_link_store - store function for remove_link attribute
737  * @drv: device driver
738  * @buf: buffer
739  * @len: buffer length
740  *
741  * Example:
742  * echo "mdev0:ep81" >remove_link
743  */
744 static ssize_t remove_link_store(struct device_driver *drv,
745                                  const char *buf,
746                                  size_t len)
747 {
748         struct most_channel *c;
749         struct core_component *comp;
750         char buffer[STRING_SIZE];
751         char *mdev;
752         char *mdev_ch;
753         char *comp_name;
754         int ret;
755         size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
756
757         strlcpy(buffer, buf, max_len);
758         ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, NULL);
759         if (ret)
760                 return ret;
761         comp = match_module(comp_name);
762         c = get_channel(mdev, mdev_ch);
763         if (!c)
764                 return -ENODEV;
765
766         if (comp->disconnect_channel(c->iface, c->channel_id))
767                 return -EIO;
768         if (c->pipe0.comp == comp)
769                 c->pipe0.comp = NULL;
770         if (c->pipe1.comp == comp)
771                 c->pipe1.comp = NULL;
772         return len;
773 }
774
775 #define DRV_ATTR(_name)  (&driver_attr_##_name.attr)
776
777 static DRIVER_ATTR_RO(links);
778 static DRIVER_ATTR_RO(modules);
779 static DRIVER_ATTR_WO(add_link);
780 static DRIVER_ATTR_WO(remove_link);
781
782 static struct attribute *module_attrs[] = {
783         DRV_ATTR(links),
784         DRV_ATTR(modules),
785         DRV_ATTR(add_link),
786         DRV_ATTR(remove_link),
787         NULL,
788 };
789
790 static struct attribute_group module_attr_group = {
791         .attrs = module_attrs,
792 };
793
794 static const struct attribute_group *module_attr_groups[] = {
795         &module_attr_group,
796         NULL,
797 };
798
799 int most_match(struct device *dev, struct device_driver *drv)
800 {
801         if (!strcmp(dev_name(dev), "most"))
802                 return 0;
803         else
804                 return 1;
805 }
806
807 static inline void trash_mbo(struct mbo *mbo)
808 {
809         unsigned long flags;
810         struct most_channel *c = mbo->context;
811
812         spin_lock_irqsave(&c->fifo_lock, flags);
813         list_add(&mbo->list, &c->trash_fifo);
814         spin_unlock_irqrestore(&c->fifo_lock, flags);
815 }
816
817 static bool hdm_mbo_ready(struct most_channel *c)
818 {
819         bool empty;
820
821         if (c->enqueue_halt)
822                 return false;
823
824         spin_lock_irq(&c->fifo_lock);
825         empty = list_empty(&c->halt_fifo);
826         spin_unlock_irq(&c->fifo_lock);
827
828         return !empty;
829 }
830
831 static void nq_hdm_mbo(struct mbo *mbo)
832 {
833         unsigned long flags;
834         struct most_channel *c = mbo->context;
835
836         spin_lock_irqsave(&c->fifo_lock, flags);
837         list_add_tail(&mbo->list, &c->halt_fifo);
838         spin_unlock_irqrestore(&c->fifo_lock, flags);
839         wake_up_interruptible(&c->hdm_fifo_wq);
840 }
841
842 static int hdm_enqueue_thread(void *data)
843 {
844         struct most_channel *c = data;
845         struct mbo *mbo;
846         int ret;
847         typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
848
849         while (likely(!kthread_should_stop())) {
850                 wait_event_interruptible(c->hdm_fifo_wq,
851                                          hdm_mbo_ready(c) ||
852                                          kthread_should_stop());
853
854                 mutex_lock(&c->nq_mutex);
855                 spin_lock_irq(&c->fifo_lock);
856                 if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
857                         spin_unlock_irq(&c->fifo_lock);
858                         mutex_unlock(&c->nq_mutex);
859                         continue;
860                 }
861
862                 mbo = list_pop_mbo(&c->halt_fifo);
863                 spin_unlock_irq(&c->fifo_lock);
864
865                 if (c->cfg.direction == MOST_CH_RX)
866                         mbo->buffer_length = c->cfg.buffer_size;
867
868                 ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
869                 mutex_unlock(&c->nq_mutex);
870
871                 if (unlikely(ret)) {
872                         pr_err("hdm enqueue failed\n");
873                         nq_hdm_mbo(mbo);
874                         c->hdm_enqueue_task = NULL;
875                         return 0;
876                 }
877         }
878
879         return 0;
880 }
881
882 static int run_enqueue_thread(struct most_channel *c, int channel_id)
883 {
884         struct task_struct *task =
885                 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
886                             channel_id);
887
888         if (IS_ERR(task))
889                 return PTR_ERR(task);
890
891         c->hdm_enqueue_task = task;
892         return 0;
893 }
894
895 /**
896  * arm_mbo - recycle MBO for further usage
897  * @mbo: most buffer
898  *
899  * This puts an MBO back to the list to have it ready for up coming
900  * tx transactions.
901  *
902  * In case the MBO belongs to a channel that recently has been
903  * poisoned, the MBO is scheduled to be trashed.
904  * Calls the completion handler of an attached component.
905  */
906 static void arm_mbo(struct mbo *mbo)
907 {
908         unsigned long flags;
909         struct most_channel *c;
910
911         BUG_ON((!mbo) || (!mbo->context));
912         c = mbo->context;
913
914         if (c->is_poisoned) {
915                 trash_mbo(mbo);
916                 return;
917         }
918
919         spin_lock_irqsave(&c->fifo_lock, flags);
920         ++*mbo->num_buffers_ptr;
921         list_add_tail(&mbo->list, &c->fifo);
922         spin_unlock_irqrestore(&c->fifo_lock, flags);
923
924         if (c->pipe0.refs && c->pipe0.comp->tx_completion)
925                 c->pipe0.comp->tx_completion(c->iface, c->channel_id);
926
927         if (c->pipe1.refs && c->pipe1.comp->tx_completion)
928                 c->pipe1.comp->tx_completion(c->iface, c->channel_id);
929 }
930
931 /**
932  * arm_mbo_chain - helper function that arms an MBO chain for the HDM
933  * @c: pointer to interface channel
934  * @dir: direction of the channel
935  * @compl: pointer to completion function
936  *
937  * This allocates buffer objects including the containing DMA coherent
938  * buffer and puts them in the fifo.
939  * Buffers of Rx channels are put in the kthread fifo, hence immediately
940  * submitted to the HDM.
941  *
942  * Returns the number of allocated and enqueued MBOs.
943  */
944 static int arm_mbo_chain(struct most_channel *c, int dir,
945                          void (*compl)(struct mbo *))
946 {
947         unsigned int i;
948         int retval;
949         struct mbo *mbo;
950         u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
951
952         atomic_set(&c->mbo_nq_level, 0);
953
954         for (i = 0; i < c->cfg.num_buffers; i++) {
955                 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
956                 if (!mbo) {
957                         retval = i;
958                         goto _exit;
959                 }
960                 mbo->context = c;
961                 mbo->ifp = c->iface;
962                 mbo->hdm_channel_id = c->channel_id;
963                 mbo->virt_address = dma_alloc_coherent(NULL,
964                                                        coherent_buf_size,
965                                                        &mbo->bus_address,
966                                                        GFP_KERNEL);
967                 if (!mbo->virt_address) {
968                         pr_info("WARN: No DMA coherent buffer.\n");
969                         retval = i;
970                         goto _error1;
971                 }
972                 mbo->complete = compl;
973                 mbo->num_buffers_ptr = &dummy_num_buffers;
974                 if (dir == MOST_CH_RX) {
975                         nq_hdm_mbo(mbo);
976                         atomic_inc(&c->mbo_nq_level);
977                 } else {
978                         arm_mbo(mbo);
979                 }
980         }
981         return i;
982
983 _error1:
984         kfree(mbo);
985 _exit:
986         return retval;
987 }
988
989 /**
990  * most_submit_mbo - submits an MBO to fifo
991  * @mbo: most buffer
992  */
993 void most_submit_mbo(struct mbo *mbo)
994 {
995         if (WARN_ONCE(!mbo || !mbo->context,
996                       "bad mbo or missing channel reference\n"))
997                 return;
998
999         nq_hdm_mbo(mbo);
1000 }
1001 EXPORT_SYMBOL_GPL(most_submit_mbo);
1002
1003 /**
1004  * most_write_completion - write completion handler
1005  * @mbo: most buffer
1006  *
1007  * This recycles the MBO for further usage. In case the channel has been
1008  * poisoned, the MBO is scheduled to be trashed.
1009  */
1010 static void most_write_completion(struct mbo *mbo)
1011 {
1012         struct most_channel *c;
1013
1014         BUG_ON((!mbo) || (!mbo->context));
1015
1016         c = mbo->context;
1017         if (mbo->status == MBO_E_INVAL)
1018                 pr_info("WARN: Tx MBO status: invalid\n");
1019         if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
1020                 trash_mbo(mbo);
1021         else
1022                 arm_mbo(mbo);
1023 }
1024
1025 int channel_has_mbo(struct most_interface *iface, int id,
1026                     struct core_component *comp)
1027 {
1028         struct most_channel *c = iface->p->channel[id];
1029         unsigned long flags;
1030         int empty;
1031
1032         if (unlikely(!c))
1033                 return -EINVAL;
1034
1035         if (c->pipe0.refs && c->pipe1.refs &&
1036             ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
1037              (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
1038                 return 0;
1039
1040         spin_lock_irqsave(&c->fifo_lock, flags);
1041         empty = list_empty(&c->fifo);
1042         spin_unlock_irqrestore(&c->fifo_lock, flags);
1043         return !empty;
1044 }
1045 EXPORT_SYMBOL_GPL(channel_has_mbo);
1046
1047 /**
1048  * most_get_mbo - get pointer to an MBO of pool
1049  * @iface: pointer to interface instance
1050  * @id: channel ID
1051  * @comp: driver component
1052  *
1053  * This attempts to get a free buffer out of the channel fifo.
1054  * Returns a pointer to MBO on success or NULL otherwise.
1055  */
1056 struct mbo *most_get_mbo(struct most_interface *iface, int id,
1057                          struct core_component *comp)
1058 {
1059         struct mbo *mbo;
1060         struct most_channel *c;
1061         unsigned long flags;
1062         int *num_buffers_ptr;
1063
1064         c = iface->p->channel[id];
1065         if (unlikely(!c))
1066                 return NULL;
1067
1068         if (c->pipe0.refs && c->pipe1.refs &&
1069             ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
1070              (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
1071                 return NULL;
1072
1073         if (comp == c->pipe0.comp)
1074                 num_buffers_ptr = &c->pipe0.num_buffers;
1075         else if (comp == c->pipe1.comp)
1076                 num_buffers_ptr = &c->pipe1.num_buffers;
1077         else
1078                 num_buffers_ptr = &dummy_num_buffers;
1079
1080         spin_lock_irqsave(&c->fifo_lock, flags);
1081         if (list_empty(&c->fifo)) {
1082                 spin_unlock_irqrestore(&c->fifo_lock, flags);
1083                 return NULL;
1084         }
1085         mbo = list_pop_mbo(&c->fifo);
1086         --*num_buffers_ptr;
1087         spin_unlock_irqrestore(&c->fifo_lock, flags);
1088
1089         mbo->num_buffers_ptr = num_buffers_ptr;
1090         mbo->buffer_length = c->cfg.buffer_size;
1091         return mbo;
1092 }
1093 EXPORT_SYMBOL_GPL(most_get_mbo);
1094
1095 /**
1096  * most_put_mbo - return buffer to pool
1097  * @mbo: most buffer
1098  */
1099 void most_put_mbo(struct mbo *mbo)
1100 {
1101         struct most_channel *c = mbo->context;
1102
1103         if (c->cfg.direction == MOST_CH_TX) {
1104                 arm_mbo(mbo);
1105                 return;
1106         }
1107         nq_hdm_mbo(mbo);
1108         atomic_inc(&c->mbo_nq_level);
1109 }
1110 EXPORT_SYMBOL_GPL(most_put_mbo);
1111
1112 /**
1113  * most_read_completion - read completion handler
1114  * @mbo: most buffer
1115  *
1116  * This function is called by the HDM when data has been received from the
1117  * hardware and copied to the buffer of the MBO.
1118  *
1119  * In case the channel has been poisoned it puts the buffer in the trash queue.
1120  * Otherwise, it passes the buffer to an component for further processing.
1121  */
1122 static void most_read_completion(struct mbo *mbo)
1123 {
1124         struct most_channel *c = mbo->context;
1125
1126         if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1127                 trash_mbo(mbo);
1128                 return;
1129         }
1130
1131         if (mbo->status == MBO_E_INVAL) {
1132                 nq_hdm_mbo(mbo);
1133                 atomic_inc(&c->mbo_nq_level);
1134                 return;
1135         }
1136
1137         if (atomic_sub_and_test(1, &c->mbo_nq_level))
1138                 c->is_starving = 1;
1139
1140         if (c->pipe0.refs && c->pipe0.comp->rx_completion &&
1141             c->pipe0.comp->rx_completion(mbo) == 0)
1142                 return;
1143
1144         if (c->pipe1.refs && c->pipe1.comp->rx_completion &&
1145             c->pipe1.comp->rx_completion(mbo) == 0)
1146                 return;
1147
1148         most_put_mbo(mbo);
1149 }
1150
1151 /**
1152  * most_start_channel - prepares a channel for communication
1153  * @iface: pointer to interface instance
1154  * @id: channel ID
1155  * @comp: driver component
1156  *
1157  * This prepares the channel for usage. Cross-checks whether the
1158  * channel's been properly configured.
1159  *
1160  * Returns 0 on success or error code otherwise.
1161  */
1162 int most_start_channel(struct most_interface *iface, int id,
1163                        struct core_component *comp)
1164 {
1165         int num_buffer;
1166         int ret;
1167         struct most_channel *c = iface->p->channel[id];
1168
1169         if (unlikely(!c))
1170                 return -EINVAL;
1171
1172         mutex_lock(&c->start_mutex);
1173         if (c->pipe0.refs + c->pipe1.refs > 0)
1174                 goto out; /* already started by another component */
1175
1176         if (!try_module_get(iface->mod)) {
1177                 pr_info("failed to acquire HDM lock\n");
1178                 mutex_unlock(&c->start_mutex);
1179                 return -ENOLCK;
1180         }
1181
1182         c->cfg.extra_len = 0;
1183         if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1184                 pr_info("channel configuration failed. Go check settings...\n");
1185                 ret = -EINVAL;
1186                 goto error;
1187         }
1188
1189         init_waitqueue_head(&c->hdm_fifo_wq);
1190
1191         if (c->cfg.direction == MOST_CH_RX)
1192                 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1193                                            most_read_completion);
1194         else
1195                 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1196                                            most_write_completion);
1197         if (unlikely(!num_buffer)) {
1198                 pr_info("failed to allocate memory\n");
1199                 ret = -ENOMEM;
1200                 goto error;
1201         }
1202
1203         ret = run_enqueue_thread(c, id);
1204         if (ret)
1205                 goto error;
1206
1207         c->is_starving = 0;
1208         c->pipe0.num_buffers = c->cfg.num_buffers / 2;
1209         c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers;
1210         atomic_set(&c->mbo_ref, num_buffer);
1211
1212 out:
1213         if (comp == c->pipe0.comp)
1214                 c->pipe0.refs++;
1215         if (comp == c->pipe1.comp)
1216                 c->pipe1.refs++;
1217         mutex_unlock(&c->start_mutex);
1218         return 0;
1219
1220 error:
1221         module_put(iface->mod);
1222         mutex_unlock(&c->start_mutex);
1223         return ret;
1224 }
1225 EXPORT_SYMBOL_GPL(most_start_channel);
1226
1227 /**
1228  * most_stop_channel - stops a running channel
1229  * @iface: pointer to interface instance
1230  * @id: channel ID
1231  * @comp: driver component
1232  */
1233 int most_stop_channel(struct most_interface *iface, int id,
1234                       struct core_component *comp)
1235 {
1236         struct most_channel *c;
1237
1238         if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1239                 pr_err("Bad interface or index out of range\n");
1240                 return -EINVAL;
1241         }
1242         c = iface->p->channel[id];
1243         if (unlikely(!c))
1244                 return -EINVAL;
1245
1246         mutex_lock(&c->start_mutex);
1247         if (c->pipe0.refs + c->pipe1.refs >= 2)
1248                 goto out;
1249
1250         if (c->hdm_enqueue_task)
1251                 kthread_stop(c->hdm_enqueue_task);
1252         c->hdm_enqueue_task = NULL;
1253
1254         if (iface->mod)
1255                 module_put(iface->mod);
1256
1257         c->is_poisoned = true;
1258         if (c->iface->poison_channel(c->iface, c->channel_id)) {
1259                 pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1260                        c->iface->description);
1261                 mutex_unlock(&c->start_mutex);
1262                 return -EAGAIN;
1263         }
1264         flush_trash_fifo(c);
1265         flush_channel_fifos(c);
1266
1267 #ifdef CMPL_INTERRUPTIBLE
1268         if (wait_for_completion_interruptible(&c->cleanup)) {
1269                 pr_info("Interrupted while clean up ch %d\n", c->channel_id);
1270                 mutex_unlock(&c->start_mutex);
1271                 return -EINTR;
1272         }
1273 #else
1274         wait_for_completion(&c->cleanup);
1275 #endif
1276         c->is_poisoned = false;
1277
1278 out:
1279         if (comp == c->pipe0.comp)
1280                 c->pipe0.refs--;
1281         if (comp == c->pipe1.comp)
1282                 c->pipe1.refs--;
1283         mutex_unlock(&c->start_mutex);
1284         return 0;
1285 }
1286 EXPORT_SYMBOL_GPL(most_stop_channel);
1287
1288 /**
1289  * most_register_component - registers a driver component with the core
1290  * @comp: driver component
1291  */
1292 int most_register_component(struct core_component *comp)
1293 {
1294         if (!comp) {
1295                 pr_err("Bad component\n");
1296                 return -EINVAL;
1297         }
1298         list_add_tail(&comp->list, &mc.comp_list);
1299         pr_info("registered new core component %s\n", comp->name);
1300         return 0;
1301 }
1302 EXPORT_SYMBOL_GPL(most_register_component);
1303
1304 static int disconnect_channels(struct device *dev, void *data)
1305 {
1306         struct most_interface *iface;
1307         struct most_channel *c, *tmp;
1308         struct core_component *comp = data;
1309
1310         iface = to_most_interface(dev);
1311         list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
1312                 if (c->pipe0.comp == comp || c->pipe1.comp == comp)
1313                         comp->disconnect_channel(c->iface, c->channel_id);
1314                 if (c->pipe0.comp == comp)
1315                         c->pipe0.comp = NULL;
1316                 if (c->pipe1.comp == comp)
1317                         c->pipe1.comp = NULL;
1318         }
1319         return 0;
1320 }
1321
1322 /**
1323  * most_deregister_component - deregisters a driver component with the core
1324  * @comp: driver component
1325  */
1326 int most_deregister_component(struct core_component *comp)
1327 {
1328         if (!comp) {
1329                 pr_err("Bad component\n");
1330                 return -EINVAL;
1331         }
1332
1333         bus_for_each_dev(&mc.bus, NULL, comp, disconnect_channels);
1334         list_del(&comp->list);
1335         pr_info("deregistering component %s\n", comp->name);
1336         return 0;
1337 }
1338 EXPORT_SYMBOL_GPL(most_deregister_component);
1339
1340 static void release_interface(struct device *dev)
1341 {
1342         pr_info("releasing interface dev %s...\n", dev_name(dev));
1343 }
1344
1345 static void release_channel(struct device *dev)
1346 {
1347         pr_info("releasing channel dev %s...\n", dev_name(dev));
1348 }
1349
1350 /**
1351  * most_register_interface - registers an interface with core
1352  * @iface: device interface
1353  *
1354  * Allocates and initializes a new interface instance and all of its channels.
1355  * Returns a pointer to kobject or an error pointer.
1356  */
1357 int most_register_interface(struct most_interface *iface)
1358 {
1359         unsigned int i;
1360         int id;
1361         struct most_channel *c;
1362
1363         if (!iface || !iface->enqueue || !iface->configure ||
1364             !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1365                 pr_err("Bad interface or channel overflow\n");
1366                 return -EINVAL;
1367         }
1368
1369         id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1370         if (id < 0) {
1371                 pr_info("Failed to alloc mdev ID\n");
1372                 return id;
1373         }
1374
1375         iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
1376         if (!iface->p) {
1377                 pr_info("Failed to allocate interface instance\n");
1378                 ida_simple_remove(&mdev_id, id);
1379                 return -ENOMEM;
1380         }
1381
1382         INIT_LIST_HEAD(&iface->p->channel_list);
1383         iface->p->dev_id = id;
1384         snprintf(iface->p->name, STRING_SIZE, "mdev%d", id);
1385         iface->dev.init_name = iface->p->name;
1386         iface->dev.bus = &mc.bus;
1387         iface->dev.parent = &mc.dev;
1388         iface->dev.groups = interface_attr_groups;
1389         iface->dev.release = release_interface;
1390         if (device_register(&iface->dev)) {
1391                 pr_err("registering iface->dev failed\n");
1392                 kfree(iface->p);
1393                 ida_simple_remove(&mdev_id, id);
1394                 return -ENOMEM;
1395         }
1396
1397         for (i = 0; i < iface->num_channels; i++) {
1398                 const char *name_suffix = iface->channel_vector[i].name_suffix;
1399
1400                 c = kzalloc(sizeof(*c), GFP_KERNEL);
1401                 if (!c)
1402                         goto free_instance;
1403                 if (!name_suffix)
1404                         snprintf(c->name, STRING_SIZE, "ch%d", i);
1405                 else
1406                         snprintf(c->name, STRING_SIZE, "%s", name_suffix);
1407                 c->dev.init_name = c->name;
1408                 c->dev.parent = &iface->dev;
1409                 c->dev.groups = channel_attr_groups;
1410                 c->dev.release = release_channel;
1411                 if (device_register(&c->dev)) {
1412                         pr_err("registering c->dev failed\n");
1413                         goto free_instance_nodev;
1414                 }
1415                 iface->p->channel[i] = c;
1416                 c->is_starving = 0;
1417                 c->iface = iface;
1418                 c->channel_id = i;
1419                 c->keep_mbo = false;
1420                 c->enqueue_halt = false;
1421                 c->is_poisoned = false;
1422                 c->cfg.direction = 0;
1423                 c->cfg.data_type = 0;
1424                 c->cfg.num_buffers = 0;
1425                 c->cfg.buffer_size = 0;
1426                 c->cfg.subbuffer_size = 0;
1427                 c->cfg.packets_per_xact = 0;
1428                 spin_lock_init(&c->fifo_lock);
1429                 INIT_LIST_HEAD(&c->fifo);
1430                 INIT_LIST_HEAD(&c->trash_fifo);
1431                 INIT_LIST_HEAD(&c->halt_fifo);
1432                 init_completion(&c->cleanup);
1433                 atomic_set(&c->mbo_ref, 0);
1434                 mutex_init(&c->start_mutex);
1435                 mutex_init(&c->nq_mutex);
1436                 list_add_tail(&c->list, &iface->p->channel_list);
1437         }
1438         pr_info("registered new device mdev%d (%s)\n",
1439                 id, iface->description);
1440         return 0;
1441
1442 free_instance_nodev:
1443         kfree(c);
1444
1445 free_instance:
1446         while (i > 0) {
1447                 c = iface->p->channel[--i];
1448                 device_unregister(&c->dev);
1449                 kfree(c);
1450         }
1451         kfree(iface->p);
1452         device_unregister(&iface->dev);
1453         ida_simple_remove(&mdev_id, id);
1454         return -ENOMEM;
1455 }
1456 EXPORT_SYMBOL_GPL(most_register_interface);
1457
1458 /**
1459  * most_deregister_interface - deregisters an interface with core
1460  * @iface: device interface
1461  *
1462  * Before removing an interface instance from the list, all running
1463  * channels are stopped and poisoned.
1464  */
1465 void most_deregister_interface(struct most_interface *iface)
1466 {
1467         int i;
1468         struct most_channel *c;
1469
1470         pr_info("deregistering device %s (%s)\n", dev_name(&iface->dev), iface->description);
1471         for (i = 0; i < iface->num_channels; i++) {
1472                 c = iface->p->channel[i];
1473                 if (c->pipe0.comp)
1474                         c->pipe0.comp->disconnect_channel(c->iface,
1475                                                         c->channel_id);
1476                 if (c->pipe1.comp)
1477                         c->pipe1.comp->disconnect_channel(c->iface,
1478                                                         c->channel_id);
1479                 c->pipe0.comp = NULL;
1480                 c->pipe1.comp = NULL;
1481                 list_del(&c->list);
1482                 device_unregister(&c->dev);
1483                 kfree(c);
1484         }
1485
1486         ida_simple_remove(&mdev_id, iface->p->dev_id);
1487         kfree(iface->p);
1488         device_unregister(&iface->dev);
1489 }
1490 EXPORT_SYMBOL_GPL(most_deregister_interface);
1491
1492 /**
1493  * most_stop_enqueue - prevents core from enqueueing MBOs
1494  * @iface: pointer to interface
1495  * @id: channel id
1496  *
1497  * This is called by an HDM that _cannot_ attend to its duties and
1498  * is imminent to get run over by the core. The core is not going to
1499  * enqueue any further packets unless the flagging HDM calls
1500  * most_resume enqueue().
1501  */
1502 void most_stop_enqueue(struct most_interface *iface, int id)
1503 {
1504         struct most_channel *c = iface->p->channel[id];
1505
1506         if (!c)
1507                 return;
1508
1509         mutex_lock(&c->nq_mutex);
1510         c->enqueue_halt = true;
1511         mutex_unlock(&c->nq_mutex);
1512 }
1513 EXPORT_SYMBOL_GPL(most_stop_enqueue);
1514
1515 /**
1516  * most_resume_enqueue - allow core to enqueue MBOs again
1517  * @iface: pointer to interface
1518  * @id: channel id
1519  *
1520  * This clears the enqueue halt flag and enqueues all MBOs currently
1521  * sitting in the wait fifo.
1522  */
1523 void most_resume_enqueue(struct most_interface *iface, int id)
1524 {
1525         struct most_channel *c = iface->p->channel[id];
1526
1527         if (!c)
1528                 return;
1529
1530         mutex_lock(&c->nq_mutex);
1531         c->enqueue_halt = false;
1532         mutex_unlock(&c->nq_mutex);
1533
1534         wake_up_interruptible(&c->hdm_fifo_wq);
1535 }
1536 EXPORT_SYMBOL_GPL(most_resume_enqueue);
1537
1538 static void release_most_sub(struct device *dev)
1539 {
1540         pr_info("releasing most_subsystem\n");
1541 }
1542
1543 static int __init most_init(void)
1544 {
1545         int err;
1546
1547         pr_info("init()\n");
1548         INIT_LIST_HEAD(&mc.comp_list);
1549         ida_init(&mdev_id);
1550
1551         mc.bus.name = "most",
1552         mc.bus.match = most_match,
1553         mc.drv.name = "most_core",
1554         mc.drv.bus = &mc.bus,
1555         mc.drv.groups = module_attr_groups;
1556
1557         err = bus_register(&mc.bus);
1558         if (err) {
1559                 pr_info("Cannot register most bus\n");
1560                 return err;
1561         }
1562         mc.class = class_create(THIS_MODULE, "most");
1563         if (IS_ERR(mc.class)) {
1564                 pr_info("No udev support.\n");
1565                 err = PTR_ERR(mc.class);
1566                 goto exit_bus;
1567         }
1568
1569         err = driver_register(&mc.drv);
1570         if (err) {
1571                 pr_info("Cannot register core driver\n");
1572                 goto exit_class;
1573         }
1574         mc.dev.init_name = "most_bus";
1575         mc.dev.release = release_most_sub;
1576         if (device_register(&mc.dev)) {
1577                 err = -ENOMEM;
1578                 goto exit_driver;
1579         }
1580
1581         return 0;
1582
1583 exit_driver:
1584         driver_unregister(&mc.drv);
1585 exit_class:
1586         class_destroy(mc.class);
1587 exit_bus:
1588         bus_unregister(&mc.bus);
1589         return err;
1590 }
1591
1592 static void __exit most_exit(void)
1593 {
1594         pr_info("exit core module\n");
1595         device_unregister(&mc.dev);
1596         driver_unregister(&mc.drv);
1597         class_destroy(mc.class);
1598         bus_unregister(&mc.bus);
1599         ida_destroy(&mdev_id);
1600 }
1601
1602 module_init(most_init);
1603 module_exit(most_exit);
1604 MODULE_LICENSE("GPL");
1605 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1606 MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");