staging: rtl8723bs: Remove commented code
[linux-2.6-block.git] / drivers / staging / most / core.c
CommitLineData
1a79f22d 1// SPDX-License-Identifier: GPL-2.0
57562a72
CG
2/*
3 * core.c - Implementation of core module of MOST Linux driver stack
4 *
5 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
57562a72
CG
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9#include <linux/module.h>
10#include <linux/fs.h>
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/device.h>
14#include <linux/list.h>
15#include <linux/poll.h>
16#include <linux/wait.h>
17#include <linux/kobject.h>
18#include <linux/mutex.h>
19#include <linux/completion.h>
20#include <linux/sysfs.h>
21#include <linux/kthread.h>
22#include <linux/dma-mapping.h>
23#include <linux/idr.h>
057301cd 24#include <most/core.h>
57562a72
CG
25
26#define MAX_CHANNELS 64
27#define STRING_SIZE 80
28
57562a72 29static struct ida mdev_id;
71457d48 30static int dummy_num_buffers;
57562a72 31
14ae5f03
CG
32static struct mostcore {
33 struct device dev;
34 struct device_driver drv;
35 struct bus_type bus;
81ce26b7 36 struct list_head comp_list;
14ae5f03
CG
37} mc;
38
37d641ef 39#define to_driver(d) container_of(d, struct mostcore, drv)
14ae5f03 40
7faeffec 41struct pipe {
5a5abf02 42 struct core_component *comp;
ccfbaee0
CG
43 int refs;
44 int num_buffers;
45};
46
fcb7fad8 47struct most_channel {
4d5f022f 48 struct device dev;
57562a72
CG
49 struct completion cleanup;
50 atomic_t mbo_ref;
51 atomic_t mbo_nq_level;
2aa9b96f 52 u16 channel_id;
845101be 53 char name[STRING_SIZE];
57562a72 54 bool is_poisoned;
f13f6981 55 struct mutex start_mutex;
bf9503f1 56 struct mutex nq_mutex; /* nq thread synchronization */
57562a72
CG
57 int is_starving;
58 struct most_interface *iface;
57562a72
CG
59 struct most_channel_config cfg;
60 bool keep_mbo;
61 bool enqueue_halt;
62 struct list_head fifo;
63 spinlock_t fifo_lock;
64 struct list_head halt_fifo;
65 struct list_head list;
f898f989
CG
66 struct pipe pipe0;
67 struct pipe pipe1;
57562a72
CG
68 struct list_head trash_fifo;
69 struct task_struct *hdm_enqueue_task;
57562a72 70 wait_queue_head_t hdm_fifo_wq;
ed021a0f 71
57562a72 72};
9cbe5aa6 73
fcb7fad8 74#define to_channel(d) container_of(d, struct most_channel, dev)
57562a72 75
9136fccf 76struct interface_private {
57562a72 77 int dev_id;
9136fccf 78 char name[STRING_SIZE];
fcb7fad8 79 struct most_channel *channel[MAX_CHANNELS];
9136fccf 80 struct list_head channel_list;
57562a72 81};
9cbe5aa6 82
e7f2b70f
HPGE
83static const struct {
84 int most_ch_data_type;
06324664 85 const char *name;
95f73013 86} ch_data_type[] = {
4df0991b
CG
87 { MOST_CH_CONTROL, "control" },
88 { MOST_CH_ASYNC, "async" },
89 { MOST_CH_SYNC, "sync" },
90 { MOST_CH_ISOC, "isoc"},
91 { MOST_CH_ISOC, "isoc_avp"},
95f73013 92};
e7f2b70f 93
57562a72
CG
94/**
95 * list_pop_mbo - retrieves the first MBO of the list and removes it
96 * @ptr: the list head to grab the MBO from.
97 */
98#define list_pop_mbo(ptr) \
99({ \
100 struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
101 list_del(&_mbo->list); \
102 _mbo; \
103})
104
57562a72
CG
105/**
106 * most_free_mbo_coherent - free an MBO and its coherent buffer
b7937dc4 107 * @mbo: most buffer
57562a72
CG
108 */
109static void most_free_mbo_coherent(struct mbo *mbo)
110{
fcb7fad8 111 struct most_channel *c = mbo->context;
57562a72
CG
112 u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
113
3598cec5
CG
114 if (c->iface->dma_free)
115 c->iface->dma_free(mbo, coherent_buf_size);
116 else
117 kfree(mbo->virt_address);
57562a72
CG
118 kfree(mbo);
119 if (atomic_sub_and_test(1, &c->mbo_ref))
120 complete(&c->cleanup);
121}
122
123/**
124 * flush_channel_fifos - clear the channel fifos
125 * @c: pointer to channel object
126 */
fcb7fad8 127static void flush_channel_fifos(struct most_channel *c)
57562a72
CG
128{
129 unsigned long flags, hf_flags;
130 struct mbo *mbo, *tmp;
131
132 if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
133 return;
134
135 spin_lock_irqsave(&c->fifo_lock, flags);
136 list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
137 list_del(&mbo->list);
138 spin_unlock_irqrestore(&c->fifo_lock, flags);
0834be6c 139 most_free_mbo_coherent(mbo);
57562a72
CG
140 spin_lock_irqsave(&c->fifo_lock, flags);
141 }
142 spin_unlock_irqrestore(&c->fifo_lock, flags);
143
144 spin_lock_irqsave(&c->fifo_lock, hf_flags);
145 list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
146 list_del(&mbo->list);
147 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
0834be6c 148 most_free_mbo_coherent(mbo);
57562a72
CG
149 spin_lock_irqsave(&c->fifo_lock, hf_flags);
150 }
151 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
152
153 if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
154 pr_info("WARN: fifo | trash fifo not empty\n");
155}
156
157/**
158 * flush_trash_fifo - clear the trash fifo
159 * @c: pointer to channel object
160 */
fcb7fad8 161static int flush_trash_fifo(struct most_channel *c)
57562a72
CG
162{
163 struct mbo *mbo, *tmp;
164 unsigned long flags;
165
166 spin_lock_irqsave(&c->fifo_lock, flags);
167 list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
168 list_del(&mbo->list);
169 spin_unlock_irqrestore(&c->fifo_lock, flags);
170 most_free_mbo_coherent(mbo);
171 spin_lock_irqsave(&c->fifo_lock, flags);
172 }
173 spin_unlock_irqrestore(&c->fifo_lock, flags);
174 return 0;
175}
176
4d5f022f
CG
177static ssize_t available_directions_show(struct device *dev,
178 struct device_attribute *attr,
edaa1e33 179 char *buf)
57562a72 180{
fcb7fad8 181 struct most_channel *c = to_channel(dev);
57562a72
CG
182 unsigned int i = c->channel_id;
183
184 strcpy(buf, "");
185 if (c->iface->channel_vector[i].direction & MOST_CH_RX)
95f73013 186 strcat(buf, "rx ");
57562a72 187 if (c->iface->channel_vector[i].direction & MOST_CH_TX)
95f73013 188 strcat(buf, "tx ");
57562a72 189 strcat(buf, "\n");
22ff195b 190 return strlen(buf);
57562a72
CG
191}
192
4d5f022f
CG
193static ssize_t available_datatypes_show(struct device *dev,
194 struct device_attribute *attr,
57562a72
CG
195 char *buf)
196{
fcb7fad8 197 struct most_channel *c = to_channel(dev);
57562a72
CG
198 unsigned int i = c->channel_id;
199
200 strcpy(buf, "");
201 if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
202 strcat(buf, "control ");
203 if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
204 strcat(buf, "async ");
205 if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
206 strcat(buf, "sync ");
0540609f 207 if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
95f73013 208 strcat(buf, "isoc ");
57562a72 209 strcat(buf, "\n");
22ff195b 210 return strlen(buf);
57562a72
CG
211}
212
4d5f022f
CG
213static ssize_t number_of_packet_buffers_show(struct device *dev,
214 struct device_attribute *attr,
4dd7c7c7 215 char *buf)
57562a72 216{
fcb7fad8 217 struct most_channel *c = to_channel(dev);
57562a72
CG
218 unsigned int i = c->channel_id;
219
220 return snprintf(buf, PAGE_SIZE, "%d\n",
221 c->iface->channel_vector[i].num_buffers_packet);
222}
223
4d5f022f
CG
224static ssize_t number_of_stream_buffers_show(struct device *dev,
225 struct device_attribute *attr,
4dd7c7c7 226 char *buf)
57562a72 227{
fcb7fad8 228 struct most_channel *c = to_channel(dev);
57562a72
CG
229 unsigned int i = c->channel_id;
230
231 return snprintf(buf, PAGE_SIZE, "%d\n",
232 c->iface->channel_vector[i].num_buffers_streaming);
233}
234
4d5f022f
CG
235static ssize_t size_of_packet_buffer_show(struct device *dev,
236 struct device_attribute *attr,
4dd7c7c7 237 char *buf)
57562a72 238{
fcb7fad8 239 struct most_channel *c = to_channel(dev);
57562a72
CG
240 unsigned int i = c->channel_id;
241
242 return snprintf(buf, PAGE_SIZE, "%d\n",
243 c->iface->channel_vector[i].buffer_size_packet);
244}
245
4d5f022f
CG
246static ssize_t size_of_stream_buffer_show(struct device *dev,
247 struct device_attribute *attr,
4dd7c7c7 248 char *buf)
57562a72 249{
fcb7fad8 250 struct most_channel *c = to_channel(dev);
57562a72
CG
251 unsigned int i = c->channel_id;
252
253 return snprintf(buf, PAGE_SIZE, "%d\n",
254 c->iface->channel_vector[i].buffer_size_streaming);
255}
256
4d5f022f
CG
257static ssize_t channel_starving_show(struct device *dev,
258 struct device_attribute *attr,
57562a72
CG
259 char *buf)
260{
fcb7fad8 261 struct most_channel *c = to_channel(dev);
4d5f022f 262
57562a72
CG
263 return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
264}
265
4d5f022f
CG
266static ssize_t set_number_of_buffers_show(struct device *dev,
267 struct device_attribute *attr,
57562a72
CG
268 char *buf)
269{
fcb7fad8 270 struct most_channel *c = to_channel(dev);
4d5f022f 271
57562a72
CG
272 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
273}
274
4d5f022f
CG
275static ssize_t set_buffer_size_show(struct device *dev,
276 struct device_attribute *attr,
57562a72
CG
277 char *buf)
278{
fcb7fad8 279 struct most_channel *c = to_channel(dev);
4d5f022f 280
57562a72
CG
281 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
282}
283
4d5f022f
CG
284static ssize_t set_direction_show(struct device *dev,
285 struct device_attribute *attr,
57562a72
CG
286 char *buf)
287{
fcb7fad8 288 struct most_channel *c = to_channel(dev);
4d5f022f 289
57562a72 290 if (c->cfg.direction & MOST_CH_TX)
95f73013 291 return snprintf(buf, PAGE_SIZE, "tx\n");
57562a72 292 else if (c->cfg.direction & MOST_CH_RX)
95f73013 293 return snprintf(buf, PAGE_SIZE, "rx\n");
57562a72
CG
294 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
295}
296
4d5f022f
CG
297static ssize_t set_datatype_show(struct device *dev,
298 struct device_attribute *attr,
57562a72
CG
299 char *buf)
300{
e7f2b70f 301 int i;
fcb7fad8 302 struct most_channel *c = to_channel(dev);
e7f2b70f
HPGE
303
304 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
305 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
f419f889
PT
306 return snprintf(buf, PAGE_SIZE, "%s",
307 ch_data_type[i].name);
e7f2b70f 308 }
57562a72
CG
309 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
310}
311
4d5f022f
CG
312static ssize_t set_subbuffer_size_show(struct device *dev,
313 struct device_attribute *attr,
57562a72
CG
314 char *buf)
315{
fcb7fad8 316 struct most_channel *c = to_channel(dev);
4d5f022f 317
57562a72
CG
318 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
319}
320
4d5f022f
CG
321static ssize_t set_packets_per_xact_show(struct device *dev,
322 struct device_attribute *attr,
57562a72
CG
323 char *buf)
324{
fcb7fad8 325 struct most_channel *c = to_channel(dev);
4d5f022f 326
57562a72
CG
327 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
328}
329
dbd36d57
CG
330static ssize_t set_dbr_size_show(struct device *dev,
331 struct device_attribute *attr, char *buf)
332{
333 struct most_channel *c = to_channel(dev);
334
335 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size);
336}
337
4ad86623
CG
338#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
339static umode_t channel_attr_is_visible(struct kobject *kobj,
340 struct attribute *attr, int index)
341{
342 struct device_attribute *dev_attr = to_dev_attr(attr);
343 struct device *dev = kobj_to_dev(kobj);
344 struct most_channel *c = to_channel(dev);
345
346 if (!strcmp(dev_attr->attr.name, "set_dbr_size") &&
347 (c->iface->interface != ITYPE_MEDIALB_DIM2))
348 return 0;
349 if (!strcmp(dev_attr->attr.name, "set_packets_per_xact") &&
350 (c->iface->interface != ITYPE_USB))
351 return 0;
352
353 return attr->mode;
354}
355
4d5f022f
CG
356#define DEV_ATTR(_name) (&dev_attr_##_name.attr)
357
358static DEVICE_ATTR_RO(available_directions);
359static DEVICE_ATTR_RO(available_datatypes);
360static DEVICE_ATTR_RO(number_of_packet_buffers);
361static DEVICE_ATTR_RO(number_of_stream_buffers);
362static DEVICE_ATTR_RO(size_of_stream_buffer);
363static DEVICE_ATTR_RO(size_of_packet_buffer);
364static DEVICE_ATTR_RO(channel_starving);
787105b3
CG
365static DEVICE_ATTR_RO(set_buffer_size);
366static DEVICE_ATTR_RO(set_number_of_buffers);
367static DEVICE_ATTR_RO(set_direction);
368static DEVICE_ATTR_RO(set_datatype);
369static DEVICE_ATTR_RO(set_subbuffer_size);
370static DEVICE_ATTR_RO(set_packets_per_xact);
371static DEVICE_ATTR_RO(set_dbr_size);
4d5f022f
CG
372
373static struct attribute *channel_attrs[] = {
374 DEV_ATTR(available_directions),
375 DEV_ATTR(available_datatypes),
376 DEV_ATTR(number_of_packet_buffers),
377 DEV_ATTR(number_of_stream_buffers),
378 DEV_ATTR(size_of_stream_buffer),
379 DEV_ATTR(size_of_packet_buffer),
380 DEV_ATTR(channel_starving),
381 DEV_ATTR(set_buffer_size),
382 DEV_ATTR(set_number_of_buffers),
383 DEV_ATTR(set_direction),
384 DEV_ATTR(set_datatype),
385 DEV_ATTR(set_subbuffer_size),
386 DEV_ATTR(set_packets_per_xact),
dbd36d57 387 DEV_ATTR(set_dbr_size),
57562a72
CG
388 NULL,
389};
390
4d5f022f
CG
391static struct attribute_group channel_attr_group = {
392 .attrs = channel_attrs,
4ad86623 393 .is_visible = channel_attr_is_visible,
57562a72
CG
394};
395
4d5f022f
CG
396static const struct attribute_group *channel_attr_groups[] = {
397 &channel_attr_group,
398 NULL,
399};
57562a72 400
4d5f022f
CG
401static ssize_t description_show(struct device *dev,
402 struct device_attribute *attr,
57562a72
CG
403 char *buf)
404{
4d5f022f
CG
405 struct most_interface *iface = to_most_interface(dev);
406
407 return snprintf(buf, PAGE_SIZE, "%s\n", iface->description);
57562a72
CG
408}
409
4d5f022f
CG
410static ssize_t interface_show(struct device *dev,
411 struct device_attribute *attr,
57562a72
CG
412 char *buf)
413{
4d5f022f
CG
414 struct most_interface *iface = to_most_interface(dev);
415
416 switch (iface->interface) {
57562a72
CG
417 case ITYPE_LOOPBACK:
418 return snprintf(buf, PAGE_SIZE, "loopback\n");
419 case ITYPE_I2C:
420 return snprintf(buf, PAGE_SIZE, "i2c\n");
421 case ITYPE_I2S:
422 return snprintf(buf, PAGE_SIZE, "i2s\n");
423 case ITYPE_TSI:
424 return snprintf(buf, PAGE_SIZE, "tsi\n");
425 case ITYPE_HBI:
426 return snprintf(buf, PAGE_SIZE, "hbi\n");
427 case ITYPE_MEDIALB_DIM:
428 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
429 case ITYPE_MEDIALB_DIM2:
430 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
431 case ITYPE_USB:
432 return snprintf(buf, PAGE_SIZE, "usb\n");
433 case ITYPE_PCIE:
434 return snprintf(buf, PAGE_SIZE, "pcie\n");
435 }
436 return snprintf(buf, PAGE_SIZE, "unknown\n");
437}
438
4d5f022f
CG
439static DEVICE_ATTR_RO(description);
440static DEVICE_ATTR_RO(interface);
57562a72 441
4d5f022f
CG
442static struct attribute *interface_attrs[] = {
443 DEV_ATTR(description),
444 DEV_ATTR(interface),
57562a72
CG
445 NULL,
446};
447
4d5f022f
CG
448static struct attribute_group interface_attr_group = {
449 .attrs = interface_attrs,
57562a72
CG
450};
451
4d5f022f
CG
452static const struct attribute_group *interface_attr_groups[] = {
453 &interface_attr_group,
454 NULL,
455};
57562a72 456
fdbdc0e6 457static struct core_component *match_component(char *name)
bdafb7e8 458{
5a5abf02 459 struct core_component *comp;
bdafb7e8 460
5a5abf02
CG
461 list_for_each_entry(comp, &mc.comp_list, list) {
462 if (!strcmp(comp->name, name))
463 return comp;
bdafb7e8
CG
464 }
465 return NULL;
466}
467
e7e3ce04
AS
468struct show_links_data {
469 int offs;
470 char *buf;
471};
472
845c31de 473static int print_links(struct device *dev, void *data)
57562a72 474{
e7e3ce04
AS
475 struct show_links_data *d = data;
476 int offs = d->offs;
477 char *buf = d->buf;
9136fccf
CG
478 struct most_channel *c;
479 struct most_interface *iface = to_most_interface(dev);
bc5f96a1 480
9136fccf 481 list_for_each_entry(c, &iface->p->channel_list, list) {
5a5abf02 482 if (c->pipe0.comp) {
9136fccf
CG
483 offs += snprintf(buf + offs,
484 PAGE_SIZE - offs,
485 "%s:%s:%s\n",
5a5abf02 486 c->pipe0.comp->name,
9136fccf
CG
487 dev_name(&iface->dev),
488 dev_name(&c->dev));
489 }
5a5abf02 490 if (c->pipe1.comp) {
9136fccf
CG
491 offs += snprintf(buf + offs,
492 PAGE_SIZE - offs,
493 "%s:%s:%s\n",
5a5abf02 494 c->pipe1.comp->name,
9136fccf
CG
495 dev_name(&iface->dev),
496 dev_name(&c->dev));
bc5f96a1
CG
497 }
498 }
e7e3ce04 499 d->offs = offs;
9136fccf
CG
500 return 0;
501}
502
503static ssize_t links_show(struct device_driver *drv, char *buf)
504{
e7e3ce04
AS
505 struct show_links_data d = { .buf = buf };
506
507 bus_for_each_dev(&mc.bus, NULL, &d, print_links);
508 return d.offs;
57562a72
CG
509}
510
fdbdc0e6 511static ssize_t components_show(struct device_driver *drv, char *buf)
bdafb7e8 512{
5a5abf02 513 struct core_component *comp;
bdafb7e8
CG
514 int offs = 0;
515
5a5abf02 516 list_for_each_entry(comp, &mc.comp_list, list) {
bdafb7e8 517 offs += snprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
5a5abf02 518 comp->name);
bdafb7e8
CG
519 }
520 return offs;
521}
ed49a3bd 522
57562a72 523/**
b7937dc4 524 * split_string - parses buf and extracts ':' separated substrings.
57562a72
CG
525 *
526 * @buf: complete string from attribute 'add_channel'
b7937dc4
CG
527 * @a: storage for 1st substring (=interface name)
528 * @b: storage for 2nd substring (=channel name)
529 * @c: storage for 3rd substring (=component name)
530 * @d: storage optional 4th substring (=user defined name)
57562a72
CG
531 *
532 * Examples:
533 *
b7937dc4
CG
534 * Input: "mdev0:ch6:cdev:my_channel\n" or
535 * "mdev0:ch6:cdev:my_channel"
57562a72 536 *
b7937dc4 537 * Output: *a -> "mdev0", *b -> "ch6", *c -> "cdev" *d -> "my_channel"
57562a72 538 *
b7937dc4
CG
539 * Input: "mdev1:ep81:cdev\n"
540 * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d -> ""
57562a72 541 *
5d7df3ae 542 * Input: "mdev1:ep81"
b7937dc4 543 * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d == NULL
57562a72 544 */
bdafb7e8 545static int split_string(char *buf, char **a, char **b, char **c, char **d)
57562a72
CG
546{
547 *a = strsep(&buf, ":");
548 if (!*a)
549 return -EIO;
550
551 *b = strsep(&buf, ":\n");
552 if (!*b)
553 return -EIO;
554
bdafb7e8
CG
555 *c = strsep(&buf, ":\n");
556 if (!*c)
557 return -EIO;
558
559 if (d)
560 *d = strsep(&buf, ":\n");
57562a72
CG
561
562 return 0;
563}
564
565/**
b7937dc4
CG
566 * get_channel - get pointer to channel
567 * @mdev: name of the device interface
568 * @mdev_ch: name of channel
57562a72 569 */
ec0c2f62 570static struct most_channel *get_channel(char *mdev, char *mdev_ch)
57562a72 571{
9136fccf
CG
572 struct device *dev = NULL;
573 struct most_interface *iface;
fcb7fad8 574 struct most_channel *c, *tmp;
57562a72 575
209de310 576 dev = bus_find_device_by_name(&mc.bus, NULL, mdev);
9136fccf
CG
577 if (!dev)
578 return NULL;
579 iface = to_most_interface(dev);
580 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
581 if (!strcmp(dev_name(&c->dev), mdev_ch))
582 return c;
57562a72 583 }
9136fccf 584 return NULL;
57562a72
CG
585}
586
fcb7fad8 587static
db09fe0d
CG
588inline int link_channel_to_component(struct most_channel *c,
589 struct core_component *comp,
dfee92dd 590 char *name,
db09fe0d 591 char *comp_param)
e6e79b44
CG
592{
593 int ret;
5a5abf02 594 struct core_component **comp_ptr;
e6e79b44 595
5a5abf02
CG
596 if (!c->pipe0.comp)
597 comp_ptr = &c->pipe0.comp;
598 else if (!c->pipe1.comp)
599 comp_ptr = &c->pipe1.comp;
e6e79b44
CG
600 else
601 return -ENOSPC;
602
5a5abf02 603 *comp_ptr = comp;
dfee92dd
CG
604 ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, name,
605 comp_param);
e6e79b44 606 if (ret) {
5a5abf02 607 *comp_ptr = NULL;
e6e79b44
CG
608 return ret;
609 }
e6e79b44
CG
610 return 0;
611}
612
3d89b273
CG
613int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val)
614{
615 struct most_channel *c = get_channel(mdev, mdev_ch);
616
617 if (!c)
618 return -ENODEV;
619 c->cfg.buffer_size = val;
620 return 0;
621}
622
623int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val)
624{
625 struct most_channel *c = get_channel(mdev, mdev_ch);
626
627 if (!c)
628 return -ENODEV;
629 c->cfg.subbuffer_size = val;
630 return 0;
631}
632
633int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val)
634{
635 struct most_channel *c = get_channel(mdev, mdev_ch);
636
637 if (!c)
638 return -ENODEV;
639 c->cfg.dbr_size = val;
640 return 0;
641}
642
643int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val)
644{
645 struct most_channel *c = get_channel(mdev, mdev_ch);
646
647 if (!c)
648 return -ENODEV;
649 c->cfg.num_buffers = val;
650 return 0;
651}
652
653int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf)
654{
655 int i;
656 struct most_channel *c = get_channel(mdev, mdev_ch);
657
658 if (!c)
659 return -ENODEV;
660 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
661 if (!strcmp(buf, ch_data_type[i].name)) {
662 c->cfg.data_type = ch_data_type[i].most_ch_data_type;
663 break;
664 }
665 }
666
667 if (i == ARRAY_SIZE(ch_data_type))
668 pr_info("WARN: invalid attribute settings\n");
669 return 0;
670}
671
672int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf)
673{
674 struct most_channel *c = get_channel(mdev, mdev_ch);
675
676 if (!c)
677 return -ENODEV;
4df0991b 678 if (!strcmp(buf, "dir_rx")) {
3d89b273 679 c->cfg.direction = MOST_CH_RX;
4df0991b 680 } else if (!strcmp(buf, "rx")) {
3d89b273 681 c->cfg.direction = MOST_CH_RX;
4df0991b 682 } else if (!strcmp(buf, "dir_tx")) {
3d89b273 683 c->cfg.direction = MOST_CH_TX;
4df0991b 684 } else if (!strcmp(buf, "tx")) {
3d89b273
CG
685 c->cfg.direction = MOST_CH_TX;
686 } else {
687 pr_info("Invalid direction\n");
688 return -ENODATA;
689 }
690 return 0;
691}
692
693int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val)
694{
695 struct most_channel *c = get_channel(mdev, mdev_ch);
696
697 if (!c)
698 return -ENODEV;
699 c->cfg.packets_per_xact = val;
700 return 0;
701}
702
703int most_cfg_complete(char *comp_name)
704{
705 struct core_component *comp;
706
707 comp = match_component(comp_name);
708 if (!comp)
709 return -ENODEV;
710
711 return comp->cfg_complete();
712}
713
714int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
715 char *comp_param)
716{
acdbb897
CG
717 struct most_channel *c = get_channel(mdev, mdev_ch);
718 struct core_component *comp = match_component(comp_name);
3d89b273 719
acdbb897 720 if (!c || !comp)
3d89b273
CG
721 return -ENODEV;
722
723 return link_channel_to_component(c, comp, link_name, comp_param);
724}
f419f889 725
57562a72 726/**
bf676f4c 727 * remove_link_store - store function for remove_link attribute
b7937dc4 728 * @drv: device driver
57562a72
CG
729 * @buf: buffer
730 * @len: buffer length
731 *
732 * Example:
eefb2a84 733 * echo "mdev0:ep81" >remove_link
57562a72 734 */
bdafb7e8 735static ssize_t remove_link_store(struct device_driver *drv,
57562a72
CG
736 const char *buf,
737 size_t len)
738{
fcb7fad8 739 struct most_channel *c;
5a5abf02 740 struct core_component *comp;
57562a72
CG
741 char buffer[STRING_SIZE];
742 char *mdev;
743 char *mdev_ch;
5a5abf02 744 char *comp_name;
57562a72 745 int ret;
3f78f611 746 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
57562a72
CG
747
748 strlcpy(buffer, buf, max_len);
5a5abf02 749 ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, NULL);
57562a72
CG
750 if (ret)
751 return ret;
fdbdc0e6 752 comp = match_component(comp_name);
3ba5515b
CG
753 if (!comp)
754 return -ENODEV;
ec0c2f62 755 c = get_channel(mdev, mdev_ch);
9136fccf 756 if (!c)
57562a72
CG
757 return -ENODEV;
758
5a5abf02 759 if (comp->disconnect_channel(c->iface, c->channel_id))
44fe5781 760 return -EIO;
5a5abf02
CG
761 if (c->pipe0.comp == comp)
762 c->pipe0.comp = NULL;
763 if (c->pipe1.comp == comp)
764 c->pipe1.comp = NULL;
57562a72
CG
765 return len;
766}
767
3d89b273
CG
768int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
769{
770 struct most_channel *c;
771 struct core_component *comp;
772
773 comp = match_component(comp_name);
774 if (!comp)
775 return -ENODEV;
776 c = get_channel(mdev, mdev_ch);
777 if (!c)
778 return -ENODEV;
779
780 if (comp->disconnect_channel(c->iface, c->channel_id))
781 return -EIO;
782 if (c->pipe0.comp == comp)
783 c->pipe0.comp = NULL;
784 if (c->pipe1.comp == comp)
785 c->pipe1.comp = NULL;
786 return 0;
787}
788
bdafb7e8
CG
789#define DRV_ATTR(_name) (&driver_attr_##_name.attr)
790
791static DRIVER_ATTR_RO(links);
fdbdc0e6 792static DRIVER_ATTR_RO(components);
bdafb7e8 793static DRIVER_ATTR_WO(remove_link);
57562a72 794
fdbdc0e6 795static struct attribute *mc_attrs[] = {
bdafb7e8 796 DRV_ATTR(links),
fdbdc0e6 797 DRV_ATTR(components),
bdafb7e8 798 DRV_ATTR(remove_link),
57562a72
CG
799 NULL,
800};
801
fdbdc0e6
CG
802static struct attribute_group mc_attr_group = {
803 .attrs = mc_attrs,
57562a72
CG
804};
805
fdbdc0e6
CG
806static const struct attribute_group *mc_attr_groups[] = {
807 &mc_attr_group,
4d5f022f
CG
808 NULL,
809};
57562a72 810
845c31de 811static int most_match(struct device *dev, struct device_driver *drv)
921c80c5
CG
812{
813 if (!strcmp(dev_name(dev), "most"))
814 return 0;
815 else
816 return 1;
817}
818
57562a72
CG
819static inline void trash_mbo(struct mbo *mbo)
820{
821 unsigned long flags;
fcb7fad8 822 struct most_channel *c = mbo->context;
57562a72
CG
823
824 spin_lock_irqsave(&c->fifo_lock, flags);
825 list_add(&mbo->list, &c->trash_fifo);
826 spin_unlock_irqrestore(&c->fifo_lock, flags);
827}
828
fcb7fad8 829static bool hdm_mbo_ready(struct most_channel *c)
57562a72 830{
bf9503f1 831 bool empty;
57562a72 832
bf9503f1
CG
833 if (c->enqueue_halt)
834 return false;
835
836 spin_lock_irq(&c->fifo_lock);
837 empty = list_empty(&c->halt_fifo);
838 spin_unlock_irq(&c->fifo_lock);
839
840 return !empty;
57562a72
CG
841}
842
843static void nq_hdm_mbo(struct mbo *mbo)
844{
845 unsigned long flags;
fcb7fad8 846 struct most_channel *c = mbo->context;
57562a72
CG
847
848 spin_lock_irqsave(&c->fifo_lock, flags);
849 list_add_tail(&mbo->list, &c->halt_fifo);
850 spin_unlock_irqrestore(&c->fifo_lock, flags);
851 wake_up_interruptible(&c->hdm_fifo_wq);
852}
853
854static int hdm_enqueue_thread(void *data)
855{
fcb7fad8 856 struct most_channel *c = data;
57562a72 857 struct mbo *mbo;
bf9503f1 858 int ret;
57562a72
CG
859 typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
860
861 while (likely(!kthread_should_stop())) {
862 wait_event_interruptible(c->hdm_fifo_wq,
bf9503f1 863 hdm_mbo_ready(c) ||
623d8002 864 kthread_should_stop());
57562a72 865
bf9503f1
CG
866 mutex_lock(&c->nq_mutex);
867 spin_lock_irq(&c->fifo_lock);
868 if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
869 spin_unlock_irq(&c->fifo_lock);
870 mutex_unlock(&c->nq_mutex);
57562a72 871 continue;
bf9503f1
CG
872 }
873
874 mbo = list_pop_mbo(&c->halt_fifo);
875 spin_unlock_irq(&c->fifo_lock);
57562a72
CG
876
877 if (c->cfg.direction == MOST_CH_RX)
878 mbo->buffer_length = c->cfg.buffer_size;
879
bf9503f1
CG
880 ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
881 mutex_unlock(&c->nq_mutex);
882
883 if (unlikely(ret)) {
57562a72
CG
884 pr_err("hdm enqueue failed\n");
885 nq_hdm_mbo(mbo);
886 c->hdm_enqueue_task = NULL;
887 return 0;
888 }
889 }
890
891 return 0;
892}
893
fcb7fad8 894static int run_enqueue_thread(struct most_channel *c, int channel_id)
57562a72
CG
895{
896 struct task_struct *task =
246ed517
SB
897 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
898 channel_id);
57562a72
CG
899
900 if (IS_ERR(task))
901 return PTR_ERR(task);
902
903 c->hdm_enqueue_task = task;
904 return 0;
905}
906
907/**
908 * arm_mbo - recycle MBO for further usage
b7937dc4 909 * @mbo: most buffer
57562a72
CG
910 *
911 * This puts an MBO back to the list to have it ready for up coming
912 * tx transactions.
913 *
914 * In case the MBO belongs to a channel that recently has been
915 * poisoned, the MBO is scheduled to be trashed.
b7937dc4 916 * Calls the completion handler of an attached component.
57562a72
CG
917 */
918static void arm_mbo(struct mbo *mbo)
919{
920 unsigned long flags;
fcb7fad8 921 struct most_channel *c;
57562a72 922
57562a72
CG
923 c = mbo->context;
924
925 if (c->is_poisoned) {
926 trash_mbo(mbo);
927 return;
928 }
929
930 spin_lock_irqsave(&c->fifo_lock, flags);
71457d48 931 ++*mbo->num_buffers_ptr;
57562a72
CG
932 list_add_tail(&mbo->list, &c->fifo);
933 spin_unlock_irqrestore(&c->fifo_lock, flags);
934
5a5abf02
CG
935 if (c->pipe0.refs && c->pipe0.comp->tx_completion)
936 c->pipe0.comp->tx_completion(c->iface, c->channel_id);
f13f6981 937
5a5abf02
CG
938 if (c->pipe1.refs && c->pipe1.comp->tx_completion)
939 c->pipe1.comp->tx_completion(c->iface, c->channel_id);
57562a72
CG
940}
941
942/**
943 * arm_mbo_chain - helper function that arms an MBO chain for the HDM
944 * @c: pointer to interface channel
945 * @dir: direction of the channel
946 * @compl: pointer to completion function
947 *
948 * This allocates buffer objects including the containing DMA coherent
949 * buffer and puts them in the fifo.
950 * Buffers of Rx channels are put in the kthread fifo, hence immediately
951 * submitted to the HDM.
952 *
953 * Returns the number of allocated and enqueued MBOs.
954 */
fcb7fad8 955static int arm_mbo_chain(struct most_channel *c, int dir,
c942ea7a 956 void (*compl)(struct mbo *))
57562a72
CG
957{
958 unsigned int i;
57562a72 959 struct mbo *mbo;
aaf40322 960 unsigned long flags;
2ae07510 961 u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
57562a72
CG
962
963 atomic_set(&c->mbo_nq_level, 0);
964
965 for (i = 0; i < c->cfg.num_buffers; i++) {
966 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
aaf40322
CG
967 if (!mbo)
968 goto flush_fifos;
969
57562a72
CG
970 mbo->context = c;
971 mbo->ifp = c->iface;
972 mbo->hdm_channel_id = c->channel_id;
3598cec5
CG
973 if (c->iface->dma_alloc) {
974 mbo->virt_address =
975 c->iface->dma_alloc(mbo, coherent_buf_size);
976 } else {
977 mbo->virt_address =
978 kzalloc(coherent_buf_size, GFP_KERNEL);
979 }
aaf40322
CG
980 if (!mbo->virt_address)
981 goto release_mbo;
982
57562a72 983 mbo->complete = compl;
71457d48 984 mbo->num_buffers_ptr = &dummy_num_buffers;
57562a72
CG
985 if (dir == MOST_CH_RX) {
986 nq_hdm_mbo(mbo);
987 atomic_inc(&c->mbo_nq_level);
988 } else {
aaf40322
CG
989 spin_lock_irqsave(&c->fifo_lock, flags);
990 list_add_tail(&mbo->list, &c->fifo);
991 spin_unlock_irqrestore(&c->fifo_lock, flags);
57562a72
CG
992 }
993 }
aaf40322 994 return c->cfg.num_buffers;
57562a72 995
aaf40322 996release_mbo:
57562a72 997 kfree(mbo);
aaf40322
CG
998
999flush_fifos:
1000 flush_channel_fifos(c);
1001 return 0;
57562a72
CG
1002}
1003
1004/**
1005 * most_submit_mbo - submits an MBO to fifo
b7937dc4 1006 * @mbo: most buffer
57562a72 1007 */
a6f9d846 1008void most_submit_mbo(struct mbo *mbo)
57562a72 1009{
a6f9d846
CG
1010 if (WARN_ONCE(!mbo || !mbo->context,
1011 "bad mbo or missing channel reference\n"))
1012 return;
57562a72
CG
1013
1014 nq_hdm_mbo(mbo);
57562a72
CG
1015}
1016EXPORT_SYMBOL_GPL(most_submit_mbo);
1017
1018/**
1019 * most_write_completion - write completion handler
b7937dc4 1020 * @mbo: most buffer
57562a72
CG
1021 *
1022 * This recycles the MBO for further usage. In case the channel has been
1023 * poisoned, the MBO is scheduled to be trashed.
1024 */
1025static void most_write_completion(struct mbo *mbo)
1026{
fcb7fad8 1027 struct most_channel *c;
57562a72 1028
57562a72
CG
1029 c = mbo->context;
1030 if (mbo->status == MBO_E_INVAL)
1031 pr_info("WARN: Tx MBO status: invalid\n");
ec58d2a8 1032 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
57562a72
CG
1033 trash_mbo(mbo);
1034 else
1035 arm_mbo(mbo);
1036}
1037
5a5abf02
CG
1038int channel_has_mbo(struct most_interface *iface, int id,
1039 struct core_component *comp)
aac997df 1040{
9136fccf 1041 struct most_channel *c = iface->p->channel[id];
aac997df
CG
1042 unsigned long flags;
1043 int empty;
1044
1045 if (unlikely(!c))
1046 return -EINVAL;
1047
f898f989 1048 if (c->pipe0.refs && c->pipe1.refs &&
5a5abf02
CG
1049 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
1050 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
cdc293d5
CG
1051 return 0;
1052
aac997df
CG
1053 spin_lock_irqsave(&c->fifo_lock, flags);
1054 empty = list_empty(&c->fifo);
1055 spin_unlock_irqrestore(&c->fifo_lock, flags);
1056 return !empty;
1057}
1058EXPORT_SYMBOL_GPL(channel_has_mbo);
1059
57562a72
CG
1060/**
1061 * most_get_mbo - get pointer to an MBO of pool
1062 * @iface: pointer to interface instance
1063 * @id: channel ID
b7937dc4 1064 * @comp: driver component
57562a72
CG
1065 *
1066 * This attempts to get a free buffer out of the channel fifo.
1067 * Returns a pointer to MBO on success or NULL otherwise.
1068 */
71457d48 1069struct mbo *most_get_mbo(struct most_interface *iface, int id,
5a5abf02 1070 struct core_component *comp)
57562a72
CG
1071{
1072 struct mbo *mbo;
fcb7fad8 1073 struct most_channel *c;
57562a72 1074 unsigned long flags;
71457d48 1075 int *num_buffers_ptr;
57562a72 1076
9136fccf 1077 c = iface->p->channel[id];
57562a72
CG
1078 if (unlikely(!c))
1079 return NULL;
71457d48 1080
f898f989 1081 if (c->pipe0.refs && c->pipe1.refs &&
5a5abf02
CG
1082 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
1083 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
71457d48
CG
1084 return NULL;
1085
5a5abf02 1086 if (comp == c->pipe0.comp)
f898f989 1087 num_buffers_ptr = &c->pipe0.num_buffers;
5a5abf02 1088 else if (comp == c->pipe1.comp)
f898f989 1089 num_buffers_ptr = &c->pipe1.num_buffers;
71457d48
CG
1090 else
1091 num_buffers_ptr = &dummy_num_buffers;
1092
57562a72
CG
1093 spin_lock_irqsave(&c->fifo_lock, flags);
1094 if (list_empty(&c->fifo)) {
1095 spin_unlock_irqrestore(&c->fifo_lock, flags);
1096 return NULL;
1097 }
1098 mbo = list_pop_mbo(&c->fifo);
71457d48 1099 --*num_buffers_ptr;
57562a72 1100 spin_unlock_irqrestore(&c->fifo_lock, flags);
71457d48
CG
1101
1102 mbo->num_buffers_ptr = num_buffers_ptr;
57562a72
CG
1103 mbo->buffer_length = c->cfg.buffer_size;
1104 return mbo;
1105}
1106EXPORT_SYMBOL_GPL(most_get_mbo);
1107
57562a72
CG
1108/**
1109 * most_put_mbo - return buffer to pool
b7937dc4 1110 * @mbo: most buffer
57562a72
CG
1111 */
1112void most_put_mbo(struct mbo *mbo)
1113{
fcb7fad8 1114 struct most_channel *c = mbo->context;
57562a72 1115
57562a72
CG
1116 if (c->cfg.direction == MOST_CH_TX) {
1117 arm_mbo(mbo);
1118 return;
1119 }
1120 nq_hdm_mbo(mbo);
1121 atomic_inc(&c->mbo_nq_level);
1122}
1123EXPORT_SYMBOL_GPL(most_put_mbo);
1124
1125/**
1126 * most_read_completion - read completion handler
b7937dc4 1127 * @mbo: most buffer
57562a72
CG
1128 *
1129 * This function is called by the HDM when data has been received from the
1130 * hardware and copied to the buffer of the MBO.
1131 *
1132 * In case the channel has been poisoned it puts the buffer in the trash queue.
b7937dc4 1133 * Otherwise, it passes the buffer to an component for further processing.
57562a72
CG
1134 */
1135static void most_read_completion(struct mbo *mbo)
1136{
fcb7fad8 1137 struct most_channel *c = mbo->context;
57562a72 1138
f13f6981
CG
1139 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1140 trash_mbo(mbo);
1141 return;
1142 }
57562a72
CG
1143
1144 if (mbo->status == MBO_E_INVAL) {
1145 nq_hdm_mbo(mbo);
1146 atomic_inc(&c->mbo_nq_level);
1147 return;
1148 }
1149
5a63e23a 1150 if (atomic_sub_and_test(1, &c->mbo_nq_level))
57562a72 1151 c->is_starving = 1;
57562a72 1152
5a5abf02
CG
1153 if (c->pipe0.refs && c->pipe0.comp->rx_completion &&
1154 c->pipe0.comp->rx_completion(mbo) == 0)
57562a72 1155 return;
f13f6981 1156
5a5abf02
CG
1157 if (c->pipe1.refs && c->pipe1.comp->rx_completion &&
1158 c->pipe1.comp->rx_completion(mbo) == 0)
57562a72 1159 return;
f13f6981
CG
1160
1161 most_put_mbo(mbo);
57562a72
CG
1162}
1163
1164/**
1165 * most_start_channel - prepares a channel for communication
1166 * @iface: pointer to interface instance
1167 * @id: channel ID
b7937dc4 1168 * @comp: driver component
57562a72
CG
1169 *
1170 * This prepares the channel for usage. Cross-checks whether the
1171 * channel's been properly configured.
1172 *
1173 * Returns 0 on success or error code otherwise.
1174 */
f13f6981 1175int most_start_channel(struct most_interface *iface, int id,
5a5abf02 1176 struct core_component *comp)
57562a72
CG
1177{
1178 int num_buffer;
1179 int ret;
9136fccf 1180 struct most_channel *c = iface->p->channel[id];
57562a72
CG
1181
1182 if (unlikely(!c))
1183 return -EINVAL;
1184
f13f6981 1185 mutex_lock(&c->start_mutex);
f898f989 1186 if (c->pipe0.refs + c->pipe1.refs > 0)
b7937dc4 1187 goto out; /* already started by another component */
57562a72
CG
1188
1189 if (!try_module_get(iface->mod)) {
1190 pr_info("failed to acquire HDM lock\n");
f13f6981 1191 mutex_unlock(&c->start_mutex);
57562a72
CG
1192 return -ENOLCK;
1193 }
57562a72
CG
1194
1195 c->cfg.extra_len = 0;
1196 if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1197 pr_info("channel configuration failed. Go check settings...\n");
1198 ret = -EINVAL;
bddd3c25 1199 goto err_put_module;
57562a72
CG
1200 }
1201
1202 init_waitqueue_head(&c->hdm_fifo_wq);
1203
1204 if (c->cfg.direction == MOST_CH_RX)
1205 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1206 most_read_completion);
1207 else
1208 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1209 most_write_completion);
47af41b0 1210 if (unlikely(!num_buffer)) {
57562a72 1211 ret = -ENOMEM;
bddd3c25 1212 goto err_put_module;
57562a72
CG
1213 }
1214
1215 ret = run_enqueue_thread(c, id);
1216 if (ret)
bddd3c25 1217 goto err_put_module;
57562a72 1218
57562a72 1219 c->is_starving = 0;
f898f989
CG
1220 c->pipe0.num_buffers = c->cfg.num_buffers / 2;
1221 c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers;
57562a72 1222 atomic_set(&c->mbo_ref, num_buffer);
f13f6981
CG
1223
1224out:
5a5abf02 1225 if (comp == c->pipe0.comp)
f898f989 1226 c->pipe0.refs++;
5a5abf02 1227 if (comp == c->pipe1.comp)
f898f989 1228 c->pipe1.refs++;
f13f6981 1229 mutex_unlock(&c->start_mutex);
57562a72 1230 return 0;
f13f6981 1231
bddd3c25 1232err_put_module:
e23afff9 1233 module_put(iface->mod);
f13f6981 1234 mutex_unlock(&c->start_mutex);
57562a72
CG
1235 return ret;
1236}
1237EXPORT_SYMBOL_GPL(most_start_channel);
1238
1239/**
1240 * most_stop_channel - stops a running channel
1241 * @iface: pointer to interface instance
1242 * @id: channel ID
b7937dc4 1243 * @comp: driver component
57562a72 1244 */
f13f6981 1245int most_stop_channel(struct most_interface *iface, int id,
5a5abf02 1246 struct core_component *comp)
57562a72 1247{
fcb7fad8 1248 struct most_channel *c;
57562a72
CG
1249
1250 if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1251 pr_err("Bad interface or index out of range\n");
1252 return -EINVAL;
1253 }
9136fccf 1254 c = iface->p->channel[id];
57562a72
CG
1255 if (unlikely(!c))
1256 return -EINVAL;
1257
f13f6981 1258 mutex_lock(&c->start_mutex);
f898f989 1259 if (c->pipe0.refs + c->pipe1.refs >= 2)
f13f6981 1260 goto out;
57562a72 1261
57562a72
CG
1262 if (c->hdm_enqueue_task)
1263 kthread_stop(c->hdm_enqueue_task);
1264 c->hdm_enqueue_task = NULL;
57562a72 1265
9cda3007 1266 if (iface->mod)
57562a72 1267 module_put(iface->mod);
57562a72
CG
1268
1269 c->is_poisoned = true;
1270 if (c->iface->poison_channel(c->iface, c->channel_id)) {
1271 pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1272 c->iface->description);
f13f6981 1273 mutex_unlock(&c->start_mutex);
57562a72
CG
1274 return -EAGAIN;
1275 }
1276 flush_trash_fifo(c);
1277 flush_channel_fifos(c);
1278
1279#ifdef CMPL_INTERRUPTIBLE
1280 if (wait_for_completion_interruptible(&c->cleanup)) {
1281 pr_info("Interrupted while clean up ch %d\n", c->channel_id);
f13f6981 1282 mutex_unlock(&c->start_mutex);
57562a72
CG
1283 return -EINTR;
1284 }
1285#else
1286 wait_for_completion(&c->cleanup);
1287#endif
1288 c->is_poisoned = false;
f13f6981
CG
1289
1290out:
5a5abf02 1291 if (comp == c->pipe0.comp)
f898f989 1292 c->pipe0.refs--;
5a5abf02 1293 if (comp == c->pipe1.comp)
f898f989 1294 c->pipe1.refs--;
f13f6981 1295 mutex_unlock(&c->start_mutex);
57562a72
CG
1296 return 0;
1297}
1298EXPORT_SYMBOL_GPL(most_stop_channel);
1299
1300/**
b7937dc4
CG
1301 * most_register_component - registers a driver component with the core
1302 * @comp: driver component
57562a72 1303 */
5a5abf02 1304int most_register_component(struct core_component *comp)
57562a72 1305{
5a5abf02 1306 if (!comp) {
b7937dc4 1307 pr_err("Bad component\n");
57562a72
CG
1308 return -EINVAL;
1309 }
5a5abf02 1310 list_add_tail(&comp->list, &mc.comp_list);
b7937dc4 1311 pr_info("registered new core component %s\n", comp->name);
57562a72
CG
1312 return 0;
1313}
ed021a0f 1314EXPORT_SYMBOL_GPL(most_register_component);
57562a72 1315
9136fccf
CG
1316static int disconnect_channels(struct device *dev, void *data)
1317{
1318 struct most_interface *iface;
1319 struct most_channel *c, *tmp;
5a5abf02 1320 struct core_component *comp = data;
9136fccf
CG
1321
1322 iface = to_most_interface(dev);
1323 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
5a5abf02
CG
1324 if (c->pipe0.comp == comp || c->pipe1.comp == comp)
1325 comp->disconnect_channel(c->iface, c->channel_id);
1326 if (c->pipe0.comp == comp)
1327 c->pipe0.comp = NULL;
1328 if (c->pipe1.comp == comp)
1329 c->pipe1.comp = NULL;
9136fccf
CG
1330 }
1331 return 0;
1332}
1333
57562a72 1334/**
b7937dc4
CG
1335 * most_deregister_component - deregisters a driver component with the core
1336 * @comp: driver component
57562a72 1337 */
5a5abf02 1338int most_deregister_component(struct core_component *comp)
57562a72 1339{
5a5abf02 1340 if (!comp) {
b7937dc4 1341 pr_err("Bad component\n");
57562a72
CG
1342 return -EINVAL;
1343 }
1344
5a5abf02
CG
1345 bus_for_each_dev(&mc.bus, NULL, comp, disconnect_channels);
1346 list_del(&comp->list);
b7937dc4 1347 pr_info("deregistering component %s\n", comp->name);
57562a72
CG
1348 return 0;
1349}
ed021a0f 1350EXPORT_SYMBOL_GPL(most_deregister_component);
57562a72 1351
4d5f022f
CG
1352static void release_interface(struct device *dev)
1353{
1354 pr_info("releasing interface dev %s...\n", dev_name(dev));
1355}
1356
1357static void release_channel(struct device *dev)
1358{
1359 pr_info("releasing channel dev %s...\n", dev_name(dev));
1360}
1361
57562a72
CG
1362/**
1363 * most_register_interface - registers an interface with core
b7937dc4 1364 * @iface: device interface
57562a72
CG
1365 *
1366 * Allocates and initializes a new interface instance and all of its channels.
1367 * Returns a pointer to kobject or an error pointer.
1368 */
4d5f022f 1369int most_register_interface(struct most_interface *iface)
57562a72
CG
1370{
1371 unsigned int i;
1372 int id;
fcb7fad8 1373 struct most_channel *c;
57562a72
CG
1374
1375 if (!iface || !iface->enqueue || !iface->configure ||
1376 !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1377 pr_err("Bad interface or channel overflow\n");
4d5f022f 1378 return -EINVAL;
57562a72
CG
1379 }
1380
1381 id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1382 if (id < 0) {
1383 pr_info("Failed to alloc mdev ID\n");
4d5f022f 1384 return id;
57562a72 1385 }
57562a72 1386
9136fccf
CG
1387 iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
1388 if (!iface->p) {
b7382d44 1389 ida_simple_remove(&mdev_id, id);
4d5f022f 1390 return -ENOMEM;
57562a72
CG
1391 }
1392
9136fccf
CG
1393 INIT_LIST_HEAD(&iface->p->channel_list);
1394 iface->p->dev_id = id;
3970d0d8 1395 strscpy(iface->p->name, iface->description, sizeof(iface->p->name));
9136fccf 1396 iface->dev.init_name = iface->p->name;
14ae5f03
CG
1397 iface->dev.bus = &mc.bus;
1398 iface->dev.parent = &mc.dev;
4d5f022f
CG
1399 iface->dev.groups = interface_attr_groups;
1400 iface->dev.release = release_interface;
1401 if (device_register(&iface->dev)) {
1402 pr_err("registering iface->dev failed\n");
9136fccf 1403 kfree(iface->p);
4d5f022f
CG
1404 ida_simple_remove(&mdev_id, id);
1405 return -ENOMEM;
1406 }
57562a72
CG
1407
1408 for (i = 0; i < iface->num_channels; i++) {
1409 const char *name_suffix = iface->channel_vector[i].name_suffix;
1410
4d5f022f 1411 c = kzalloc(sizeof(*c), GFP_KERNEL);
57562a72 1412 if (!c)
bddd3c25 1413 goto err_free_resources;
845101be
CG
1414 if (!name_suffix)
1415 snprintf(c->name, STRING_SIZE, "ch%d", i);
1416 else
1417 snprintf(c->name, STRING_SIZE, "%s", name_suffix);
1418 c->dev.init_name = c->name;
4d5f022f
CG
1419 c->dev.parent = &iface->dev;
1420 c->dev.groups = channel_attr_groups;
1421 c->dev.release = release_channel;
9136fccf 1422 iface->p->channel[i] = c;
57562a72
CG
1423 c->is_starving = 0;
1424 c->iface = iface;
57562a72
CG
1425 c->channel_id = i;
1426 c->keep_mbo = false;
1427 c->enqueue_halt = false;
1428 c->is_poisoned = false;
57562a72
CG
1429 c->cfg.direction = 0;
1430 c->cfg.data_type = 0;
1431 c->cfg.num_buffers = 0;
1432 c->cfg.buffer_size = 0;
1433 c->cfg.subbuffer_size = 0;
1434 c->cfg.packets_per_xact = 0;
1435 spin_lock_init(&c->fifo_lock);
1436 INIT_LIST_HEAD(&c->fifo);
1437 INIT_LIST_HEAD(&c->trash_fifo);
1438 INIT_LIST_HEAD(&c->halt_fifo);
1439 init_completion(&c->cleanup);
1440 atomic_set(&c->mbo_ref, 0);
f13f6981 1441 mutex_init(&c->start_mutex);
bf9503f1 1442 mutex_init(&c->nq_mutex);
9136fccf 1443 list_add_tail(&c->list, &iface->p->channel_list);
f0b4a22a
CG
1444 if (device_register(&c->dev)) {
1445 pr_err("registering c->dev failed\n");
bddd3c25 1446 goto err_free_most_channel;
f0b4a22a 1447 }
57562a72 1448 }
b7937dc4 1449 pr_info("registered new device mdev%d (%s)\n",
4d5f022f 1450 id, iface->description);
acdbb897 1451 most_interface_register_notify(iface->description);
4d5f022f 1452 return 0;
57562a72 1453
bddd3c25 1454err_free_most_channel:
9136fccf
CG
1455 kfree(c);
1456
bddd3c25 1457err_free_resources:
9136fccf
CG
1458 while (i > 0) {
1459 c = iface->p->channel[--i];
1460 device_unregister(&c->dev);
1461 kfree(c);
1462 }
1463 kfree(iface->p);
4d5f022f 1464 device_unregister(&iface->dev);
b7382d44 1465 ida_simple_remove(&mdev_id, id);
4d5f022f 1466 return -ENOMEM;
57562a72
CG
1467}
1468EXPORT_SYMBOL_GPL(most_register_interface);
1469
1470/**
1471 * most_deregister_interface - deregisters an interface with core
b7937dc4 1472 * @iface: device interface
57562a72
CG
1473 *
1474 * Before removing an interface instance from the list, all running
1475 * channels are stopped and poisoned.
1476 */
1477void most_deregister_interface(struct most_interface *iface)
1478{
4d5f022f 1479 int i;
fcb7fad8 1480 struct most_channel *c;
57562a72 1481
92d01a56
QK
1482 pr_info("deregistering device %s (%s)\n", dev_name(&iface->dev),
1483 iface->description);
4d5f022f 1484 for (i = 0; i < iface->num_channels; i++) {
9136fccf 1485 c = iface->p->channel[i];
5a5abf02
CG
1486 if (c->pipe0.comp)
1487 c->pipe0.comp->disconnect_channel(c->iface,
a0fceb1f 1488 c->channel_id);
5a5abf02
CG
1489 if (c->pipe1.comp)
1490 c->pipe1.comp->disconnect_channel(c->iface,
a0fceb1f 1491 c->channel_id);
5a5abf02
CG
1492 c->pipe0.comp = NULL;
1493 c->pipe1.comp = NULL;
4d5f022f
CG
1494 list_del(&c->list);
1495 device_unregister(&c->dev);
1496 kfree(c);
a0fceb1f
CG
1497 }
1498
9136fccf
CG
1499 ida_simple_remove(&mdev_id, iface->p->dev_id);
1500 kfree(iface->p);
4d5f022f 1501 device_unregister(&iface->dev);
57562a72
CG
1502}
1503EXPORT_SYMBOL_GPL(most_deregister_interface);
1504
1505/**
1506 * most_stop_enqueue - prevents core from enqueueing MBOs
1507 * @iface: pointer to interface
1508 * @id: channel id
1509 *
1510 * This is called by an HDM that _cannot_ attend to its duties and
1511 * is imminent to get run over by the core. The core is not going to
1512 * enqueue any further packets unless the flagging HDM calls
1513 * most_resume enqueue().
1514 */
1515void most_stop_enqueue(struct most_interface *iface, int id)
1516{
9136fccf 1517 struct most_channel *c = iface->p->channel[id];
57562a72 1518
bf9503f1
CG
1519 if (!c)
1520 return;
1521
1522 mutex_lock(&c->nq_mutex);
1523 c->enqueue_halt = true;
1524 mutex_unlock(&c->nq_mutex);
57562a72
CG
1525}
1526EXPORT_SYMBOL_GPL(most_stop_enqueue);
1527
1528/**
1529 * most_resume_enqueue - allow core to enqueue MBOs again
1530 * @iface: pointer to interface
1531 * @id: channel id
1532 *
1533 * This clears the enqueue halt flag and enqueues all MBOs currently
1534 * sitting in the wait fifo.
1535 */
1536void most_resume_enqueue(struct most_interface *iface, int id)
1537{
9136fccf 1538 struct most_channel *c = iface->p->channel[id];
57562a72 1539
bf9503f1 1540 if (!c)
57562a72 1541 return;
bf9503f1
CG
1542
1543 mutex_lock(&c->nq_mutex);
57562a72 1544 c->enqueue_halt = false;
bf9503f1 1545 mutex_unlock(&c->nq_mutex);
57562a72
CG
1546
1547 wake_up_interruptible(&c->hdm_fifo_wq);
1548}
1549EXPORT_SYMBOL_GPL(most_resume_enqueue);
1550
4d5f022f
CG
1551static void release_most_sub(struct device *dev)
1552{
1553 pr_info("releasing most_subsystem\n");
1554}
1555
57562a72
CG
1556static int __init most_init(void)
1557{
cc4188b6
SM
1558 int err;
1559
57562a72 1560 pr_info("init()\n");
81ce26b7 1561 INIT_LIST_HEAD(&mc.comp_list);
57562a72
CG
1562 ida_init(&mdev_id);
1563
14ae5f03
CG
1564 mc.bus.name = "most",
1565 mc.bus.match = most_match,
1566 mc.drv.name = "most_core",
1567 mc.drv.bus = &mc.bus,
fdbdc0e6 1568 mc.drv.groups = mc_attr_groups;
14ae5f03
CG
1569
1570 err = bus_register(&mc.bus);
cc4188b6 1571 if (err) {
57562a72 1572 pr_info("Cannot register most bus\n");
cc4188b6 1573 return err;
57562a72 1574 }
14ae5f03 1575 err = driver_register(&mc.drv);
cc4188b6 1576 if (err) {
57562a72 1577 pr_info("Cannot register core driver\n");
bddd3c25 1578 goto err_unregister_bus;
57562a72 1579 }
14ae5f03
CG
1580 mc.dev.init_name = "most_bus";
1581 mc.dev.release = release_most_sub;
1582 if (device_register(&mc.dev)) {
cc4188b6 1583 err = -ENOMEM;
bddd3c25 1584 goto err_unregister_driver;
cc4188b6 1585 }
919c03ae 1586 configfs_init();
57562a72
CG
1587 return 0;
1588
bddd3c25 1589err_unregister_driver:
14ae5f03 1590 driver_unregister(&mc.drv);
bddd3c25 1591err_unregister_bus:
14ae5f03 1592 bus_unregister(&mc.bus);
cc4188b6 1593 return err;
57562a72
CG
1594}
1595
1596static void __exit most_exit(void)
1597{
57562a72 1598 pr_info("exit core module\n");
14ae5f03
CG
1599 device_unregister(&mc.dev);
1600 driver_unregister(&mc.drv);
14ae5f03 1601 bus_unregister(&mc.bus);
57562a72
CG
1602 ida_destroy(&mdev_id);
1603}
1604
1605module_init(most_init);
1606module_exit(most_exit);
1607MODULE_LICENSE("GPL");
1608MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1609MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");