Merge branches 'for-4.20/upstream-fixes', 'for-4.21/core', 'for-4.21/hid-asus', ...
[linux-2.6-block.git] / drivers / staging / most / core.c
CommitLineData
1a79f22d 1// SPDX-License-Identifier: GPL-2.0
57562a72
CG
2/*
3 * core.c - Implementation of core module of MOST Linux driver stack
4 *
5 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
57562a72
CG
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9#include <linux/module.h>
10#include <linux/fs.h>
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/device.h>
14#include <linux/list.h>
15#include <linux/poll.h>
16#include <linux/wait.h>
17#include <linux/kobject.h>
18#include <linux/mutex.h>
19#include <linux/completion.h>
20#include <linux/sysfs.h>
21#include <linux/kthread.h>
22#include <linux/dma-mapping.h>
23#include <linux/idr.h>
057301cd 24#include <most/core.h>
57562a72
CG
25
26#define MAX_CHANNELS 64
27#define STRING_SIZE 80
28
57562a72 29static struct ida mdev_id;
71457d48 30static int dummy_num_buffers;
57562a72 31
14ae5f03
CG
32static struct mostcore {
33 struct device dev;
34 struct device_driver drv;
35 struct bus_type bus;
81ce26b7 36 struct list_head comp_list;
14ae5f03
CG
37} mc;
38
37d641ef 39#define to_driver(d) container_of(d, struct mostcore, drv)
14ae5f03 40
7faeffec 41struct pipe {
5a5abf02 42 struct core_component *comp;
ccfbaee0
CG
43 int refs;
44 int num_buffers;
45};
46
fcb7fad8 47struct most_channel {
4d5f022f 48 struct device dev;
57562a72
CG
49 struct completion cleanup;
50 atomic_t mbo_ref;
51 atomic_t mbo_nq_level;
2aa9b96f 52 u16 channel_id;
845101be 53 char name[STRING_SIZE];
57562a72 54 bool is_poisoned;
f13f6981 55 struct mutex start_mutex;
bf9503f1 56 struct mutex nq_mutex; /* nq thread synchronization */
57562a72
CG
57 int is_starving;
58 struct most_interface *iface;
57562a72
CG
59 struct most_channel_config cfg;
60 bool keep_mbo;
61 bool enqueue_halt;
62 struct list_head fifo;
63 spinlock_t fifo_lock;
64 struct list_head halt_fifo;
65 struct list_head list;
f898f989
CG
66 struct pipe pipe0;
67 struct pipe pipe1;
57562a72
CG
68 struct list_head trash_fifo;
69 struct task_struct *hdm_enqueue_task;
57562a72 70 wait_queue_head_t hdm_fifo_wq;
ed021a0f 71
57562a72 72};
9cbe5aa6 73
fcb7fad8 74#define to_channel(d) container_of(d, struct most_channel, dev)
57562a72 75
9136fccf 76struct interface_private {
57562a72 77 int dev_id;
9136fccf 78 char name[STRING_SIZE];
fcb7fad8 79 struct most_channel *channel[MAX_CHANNELS];
9136fccf 80 struct list_head channel_list;
57562a72 81};
9cbe5aa6 82
e7f2b70f
HPGE
83static const struct {
84 int most_ch_data_type;
06324664 85 const char *name;
95f73013
CG
86} ch_data_type[] = {
87 { MOST_CH_CONTROL, "control\n" },
e7f2b70f
HPGE
88 { MOST_CH_ASYNC, "async\n" },
89 { MOST_CH_SYNC, "sync\n" },
0540609f
AS
90 { MOST_CH_ISOC, "isoc\n"},
91 { MOST_CH_ISOC, "isoc_avp\n"},
95f73013 92};
e7f2b70f 93
57562a72
CG
94/**
95 * list_pop_mbo - retrieves the first MBO of the list and removes it
96 * @ptr: the list head to grab the MBO from.
97 */
98#define list_pop_mbo(ptr) \
99({ \
100 struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
101 list_del(&_mbo->list); \
102 _mbo; \
103})
104
57562a72
CG
105/**
106 * most_free_mbo_coherent - free an MBO and its coherent buffer
b7937dc4 107 * @mbo: most buffer
57562a72
CG
108 */
109static void most_free_mbo_coherent(struct mbo *mbo)
110{
fcb7fad8 111 struct most_channel *c = mbo->context;
57562a72
CG
112 u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
113
3598cec5
CG
114 if (c->iface->dma_free)
115 c->iface->dma_free(mbo, coherent_buf_size);
116 else
117 kfree(mbo->virt_address);
57562a72
CG
118 kfree(mbo);
119 if (atomic_sub_and_test(1, &c->mbo_ref))
120 complete(&c->cleanup);
121}
122
123/**
124 * flush_channel_fifos - clear the channel fifos
125 * @c: pointer to channel object
126 */
fcb7fad8 127static void flush_channel_fifos(struct most_channel *c)
57562a72
CG
128{
129 unsigned long flags, hf_flags;
130 struct mbo *mbo, *tmp;
131
132 if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
133 return;
134
135 spin_lock_irqsave(&c->fifo_lock, flags);
136 list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
137 list_del(&mbo->list);
138 spin_unlock_irqrestore(&c->fifo_lock, flags);
0834be6c 139 most_free_mbo_coherent(mbo);
57562a72
CG
140 spin_lock_irqsave(&c->fifo_lock, flags);
141 }
142 spin_unlock_irqrestore(&c->fifo_lock, flags);
143
144 spin_lock_irqsave(&c->fifo_lock, hf_flags);
145 list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
146 list_del(&mbo->list);
147 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
0834be6c 148 most_free_mbo_coherent(mbo);
57562a72
CG
149 spin_lock_irqsave(&c->fifo_lock, hf_flags);
150 }
151 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
152
153 if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
154 pr_info("WARN: fifo | trash fifo not empty\n");
155}
156
157/**
158 * flush_trash_fifo - clear the trash fifo
159 * @c: pointer to channel object
160 */
fcb7fad8 161static int flush_trash_fifo(struct most_channel *c)
57562a72
CG
162{
163 struct mbo *mbo, *tmp;
164 unsigned long flags;
165
166 spin_lock_irqsave(&c->fifo_lock, flags);
167 list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
168 list_del(&mbo->list);
169 spin_unlock_irqrestore(&c->fifo_lock, flags);
170 most_free_mbo_coherent(mbo);
171 spin_lock_irqsave(&c->fifo_lock, flags);
172 }
173 spin_unlock_irqrestore(&c->fifo_lock, flags);
174 return 0;
175}
176
4d5f022f
CG
177static ssize_t available_directions_show(struct device *dev,
178 struct device_attribute *attr,
edaa1e33 179 char *buf)
57562a72 180{
fcb7fad8 181 struct most_channel *c = to_channel(dev);
57562a72
CG
182 unsigned int i = c->channel_id;
183
184 strcpy(buf, "");
185 if (c->iface->channel_vector[i].direction & MOST_CH_RX)
95f73013 186 strcat(buf, "rx ");
57562a72 187 if (c->iface->channel_vector[i].direction & MOST_CH_TX)
95f73013 188 strcat(buf, "tx ");
57562a72 189 strcat(buf, "\n");
22ff195b 190 return strlen(buf);
57562a72
CG
191}
192
4d5f022f
CG
193static ssize_t available_datatypes_show(struct device *dev,
194 struct device_attribute *attr,
57562a72
CG
195 char *buf)
196{
fcb7fad8 197 struct most_channel *c = to_channel(dev);
57562a72
CG
198 unsigned int i = c->channel_id;
199
200 strcpy(buf, "");
201 if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
202 strcat(buf, "control ");
203 if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
204 strcat(buf, "async ");
205 if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
206 strcat(buf, "sync ");
0540609f 207 if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
95f73013 208 strcat(buf, "isoc ");
57562a72 209 strcat(buf, "\n");
22ff195b 210 return strlen(buf);
57562a72
CG
211}
212
4d5f022f
CG
213static ssize_t number_of_packet_buffers_show(struct device *dev,
214 struct device_attribute *attr,
4dd7c7c7 215 char *buf)
57562a72 216{
fcb7fad8 217 struct most_channel *c = to_channel(dev);
57562a72
CG
218 unsigned int i = c->channel_id;
219
220 return snprintf(buf, PAGE_SIZE, "%d\n",
221 c->iface->channel_vector[i].num_buffers_packet);
222}
223
4d5f022f
CG
224static ssize_t number_of_stream_buffers_show(struct device *dev,
225 struct device_attribute *attr,
4dd7c7c7 226 char *buf)
57562a72 227{
fcb7fad8 228 struct most_channel *c = to_channel(dev);
57562a72
CG
229 unsigned int i = c->channel_id;
230
231 return snprintf(buf, PAGE_SIZE, "%d\n",
232 c->iface->channel_vector[i].num_buffers_streaming);
233}
234
4d5f022f
CG
235static ssize_t size_of_packet_buffer_show(struct device *dev,
236 struct device_attribute *attr,
4dd7c7c7 237 char *buf)
57562a72 238{
fcb7fad8 239 struct most_channel *c = to_channel(dev);
57562a72
CG
240 unsigned int i = c->channel_id;
241
242 return snprintf(buf, PAGE_SIZE, "%d\n",
243 c->iface->channel_vector[i].buffer_size_packet);
244}
245
4d5f022f
CG
246static ssize_t size_of_stream_buffer_show(struct device *dev,
247 struct device_attribute *attr,
4dd7c7c7 248 char *buf)
57562a72 249{
fcb7fad8 250 struct most_channel *c = to_channel(dev);
57562a72
CG
251 unsigned int i = c->channel_id;
252
253 return snprintf(buf, PAGE_SIZE, "%d\n",
254 c->iface->channel_vector[i].buffer_size_streaming);
255}
256
4d5f022f
CG
257static ssize_t channel_starving_show(struct device *dev,
258 struct device_attribute *attr,
57562a72
CG
259 char *buf)
260{
fcb7fad8 261 struct most_channel *c = to_channel(dev);
4d5f022f 262
57562a72
CG
263 return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
264}
265
4d5f022f
CG
266static ssize_t set_number_of_buffers_show(struct device *dev,
267 struct device_attribute *attr,
57562a72
CG
268 char *buf)
269{
fcb7fad8 270 struct most_channel *c = to_channel(dev);
4d5f022f 271
57562a72
CG
272 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
273}
274
4d5f022f
CG
275static ssize_t set_number_of_buffers_store(struct device *dev,
276 struct device_attribute *attr,
57562a72
CG
277 const char *buf,
278 size_t count)
279{
fcb7fad8 280 struct most_channel *c = to_channel(dev);
57562a72
CG
281 int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
282
283 if (ret)
284 return ret;
285 return count;
286}
287
4d5f022f
CG
288static ssize_t set_buffer_size_show(struct device *dev,
289 struct device_attribute *attr,
57562a72
CG
290 char *buf)
291{
fcb7fad8 292 struct most_channel *c = to_channel(dev);
4d5f022f 293
57562a72
CG
294 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
295}
296
4d5f022f
CG
297static ssize_t set_buffer_size_store(struct device *dev,
298 struct device_attribute *attr,
57562a72
CG
299 const char *buf,
300 size_t count)
301{
fcb7fad8 302 struct most_channel *c = to_channel(dev);
57562a72
CG
303 int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
304
305 if (ret)
306 return ret;
307 return count;
308}
309
4d5f022f
CG
310static ssize_t set_direction_show(struct device *dev,
311 struct device_attribute *attr,
57562a72
CG
312 char *buf)
313{
fcb7fad8 314 struct most_channel *c = to_channel(dev);
4d5f022f 315
57562a72 316 if (c->cfg.direction & MOST_CH_TX)
95f73013 317 return snprintf(buf, PAGE_SIZE, "tx\n");
57562a72 318 else if (c->cfg.direction & MOST_CH_RX)
95f73013 319 return snprintf(buf, PAGE_SIZE, "rx\n");
57562a72
CG
320 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
321}
322
4d5f022f
CG
323static ssize_t set_direction_store(struct device *dev,
324 struct device_attribute *attr,
57562a72
CG
325 const char *buf,
326 size_t count)
327{
fcb7fad8 328 struct most_channel *c = to_channel(dev);
4d5f022f 329
9deba73d 330 if (!strcmp(buf, "dir_rx\n")) {
57562a72 331 c->cfg.direction = MOST_CH_RX;
95f73013
CG
332 } else if (!strcmp(buf, "rx\n")) {
333 c->cfg.direction = MOST_CH_RX;
9deba73d 334 } else if (!strcmp(buf, "dir_tx\n")) {
57562a72 335 c->cfg.direction = MOST_CH_TX;
95f73013
CG
336 } else if (!strcmp(buf, "tx\n")) {
337 c->cfg.direction = MOST_CH_TX;
9deba73d 338 } else {
57562a72
CG
339 pr_info("WARN: invalid attribute settings\n");
340 return -EINVAL;
341 }
342 return count;
343}
344
4d5f022f
CG
345static ssize_t set_datatype_show(struct device *dev,
346 struct device_attribute *attr,
57562a72
CG
347 char *buf)
348{
e7f2b70f 349 int i;
fcb7fad8 350 struct most_channel *c = to_channel(dev);
e7f2b70f
HPGE
351
352 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
353 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
13c45007 354 return snprintf(buf, PAGE_SIZE, "%s", ch_data_type[i].name);
e7f2b70f 355 }
57562a72
CG
356 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
357}
358
4d5f022f
CG
359static ssize_t set_datatype_store(struct device *dev,
360 struct device_attribute *attr,
57562a72
CG
361 const char *buf,
362 size_t count)
363{
e7f2b70f 364 int i;
fcb7fad8 365 struct most_channel *c = to_channel(dev);
e7f2b70f
HPGE
366
367 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
368 if (!strcmp(buf, ch_data_type[i].name)) {
369 c->cfg.data_type = ch_data_type[i].most_ch_data_type;
370 break;
371 }
372 }
373
374 if (i == ARRAY_SIZE(ch_data_type)) {
57562a72
CG
375 pr_info("WARN: invalid attribute settings\n");
376 return -EINVAL;
377 }
378 return count;
379}
380
4d5f022f
CG
381static ssize_t set_subbuffer_size_show(struct device *dev,
382 struct device_attribute *attr,
57562a72
CG
383 char *buf)
384{
fcb7fad8 385 struct most_channel *c = to_channel(dev);
4d5f022f 386
57562a72
CG
387 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
388}
389
4d5f022f
CG
390static ssize_t set_subbuffer_size_store(struct device *dev,
391 struct device_attribute *attr,
57562a72
CG
392 const char *buf,
393 size_t count)
394{
fcb7fad8 395 struct most_channel *c = to_channel(dev);
57562a72
CG
396 int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
397
398 if (ret)
399 return ret;
400 return count;
401}
402
4d5f022f
CG
403static ssize_t set_packets_per_xact_show(struct device *dev,
404 struct device_attribute *attr,
57562a72
CG
405 char *buf)
406{
fcb7fad8 407 struct most_channel *c = to_channel(dev);
4d5f022f 408
57562a72
CG
409 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
410}
411
4d5f022f
CG
412static ssize_t set_packets_per_xact_store(struct device *dev,
413 struct device_attribute *attr,
57562a72
CG
414 const char *buf,
415 size_t count)
416{
fcb7fad8 417 struct most_channel *c = to_channel(dev);
57562a72
CG
418 int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
419
420 if (ret)
421 return ret;
422 return count;
423}
424
dbd36d57
CG
425static ssize_t set_dbr_size_show(struct device *dev,
426 struct device_attribute *attr, char *buf)
427{
428 struct most_channel *c = to_channel(dev);
429
430 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size);
431}
432
433static ssize_t set_dbr_size_store(struct device *dev,
434 struct device_attribute *attr,
435 const char *buf, size_t count)
436{
437 struct most_channel *c = to_channel(dev);
438 int ret = kstrtou16(buf, 0, &c->cfg.dbr_size);
439
440 if (ret)
441 return ret;
442 return count;
443}
444
4ad86623
CG
445#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
446static umode_t channel_attr_is_visible(struct kobject *kobj,
447 struct attribute *attr, int index)
448{
449 struct device_attribute *dev_attr = to_dev_attr(attr);
450 struct device *dev = kobj_to_dev(kobj);
451 struct most_channel *c = to_channel(dev);
452
453 if (!strcmp(dev_attr->attr.name, "set_dbr_size") &&
454 (c->iface->interface != ITYPE_MEDIALB_DIM2))
455 return 0;
456 if (!strcmp(dev_attr->attr.name, "set_packets_per_xact") &&
457 (c->iface->interface != ITYPE_USB))
458 return 0;
459
460 return attr->mode;
461}
462
4d5f022f
CG
463#define DEV_ATTR(_name) (&dev_attr_##_name.attr)
464
465static DEVICE_ATTR_RO(available_directions);
466static DEVICE_ATTR_RO(available_datatypes);
467static DEVICE_ATTR_RO(number_of_packet_buffers);
468static DEVICE_ATTR_RO(number_of_stream_buffers);
469static DEVICE_ATTR_RO(size_of_stream_buffer);
470static DEVICE_ATTR_RO(size_of_packet_buffer);
471static DEVICE_ATTR_RO(channel_starving);
472static DEVICE_ATTR_RW(set_buffer_size);
473static DEVICE_ATTR_RW(set_number_of_buffers);
474static DEVICE_ATTR_RW(set_direction);
475static DEVICE_ATTR_RW(set_datatype);
476static DEVICE_ATTR_RW(set_subbuffer_size);
477static DEVICE_ATTR_RW(set_packets_per_xact);
dbd36d57 478static DEVICE_ATTR_RW(set_dbr_size);
4d5f022f
CG
479
480static struct attribute *channel_attrs[] = {
481 DEV_ATTR(available_directions),
482 DEV_ATTR(available_datatypes),
483 DEV_ATTR(number_of_packet_buffers),
484 DEV_ATTR(number_of_stream_buffers),
485 DEV_ATTR(size_of_stream_buffer),
486 DEV_ATTR(size_of_packet_buffer),
487 DEV_ATTR(channel_starving),
488 DEV_ATTR(set_buffer_size),
489 DEV_ATTR(set_number_of_buffers),
490 DEV_ATTR(set_direction),
491 DEV_ATTR(set_datatype),
492 DEV_ATTR(set_subbuffer_size),
493 DEV_ATTR(set_packets_per_xact),
dbd36d57 494 DEV_ATTR(set_dbr_size),
57562a72
CG
495 NULL,
496};
497
4d5f022f
CG
498static struct attribute_group channel_attr_group = {
499 .attrs = channel_attrs,
4ad86623 500 .is_visible = channel_attr_is_visible,
57562a72
CG
501};
502
4d5f022f
CG
503static const struct attribute_group *channel_attr_groups[] = {
504 &channel_attr_group,
505 NULL,
506};
57562a72 507
4d5f022f
CG
508static ssize_t description_show(struct device *dev,
509 struct device_attribute *attr,
57562a72
CG
510 char *buf)
511{
4d5f022f
CG
512 struct most_interface *iface = to_most_interface(dev);
513
514 return snprintf(buf, PAGE_SIZE, "%s\n", iface->description);
57562a72
CG
515}
516
4d5f022f
CG
517static ssize_t interface_show(struct device *dev,
518 struct device_attribute *attr,
57562a72
CG
519 char *buf)
520{
4d5f022f
CG
521 struct most_interface *iface = to_most_interface(dev);
522
523 switch (iface->interface) {
57562a72
CG
524 case ITYPE_LOOPBACK:
525 return snprintf(buf, PAGE_SIZE, "loopback\n");
526 case ITYPE_I2C:
527 return snprintf(buf, PAGE_SIZE, "i2c\n");
528 case ITYPE_I2S:
529 return snprintf(buf, PAGE_SIZE, "i2s\n");
530 case ITYPE_TSI:
531 return snprintf(buf, PAGE_SIZE, "tsi\n");
532 case ITYPE_HBI:
533 return snprintf(buf, PAGE_SIZE, "hbi\n");
534 case ITYPE_MEDIALB_DIM:
535 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
536 case ITYPE_MEDIALB_DIM2:
537 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
538 case ITYPE_USB:
539 return snprintf(buf, PAGE_SIZE, "usb\n");
540 case ITYPE_PCIE:
541 return snprintf(buf, PAGE_SIZE, "pcie\n");
542 }
543 return snprintf(buf, PAGE_SIZE, "unknown\n");
544}
545
4d5f022f
CG
546static DEVICE_ATTR_RO(description);
547static DEVICE_ATTR_RO(interface);
57562a72 548
4d5f022f
CG
549static struct attribute *interface_attrs[] = {
550 DEV_ATTR(description),
551 DEV_ATTR(interface),
57562a72
CG
552 NULL,
553};
554
4d5f022f
CG
555static struct attribute_group interface_attr_group = {
556 .attrs = interface_attrs,
57562a72
CG
557};
558
4d5f022f
CG
559static const struct attribute_group *interface_attr_groups[] = {
560 &interface_attr_group,
561 NULL,
562};
57562a72 563
fdbdc0e6 564static struct core_component *match_component(char *name)
bdafb7e8 565{
5a5abf02 566 struct core_component *comp;
bdafb7e8 567
5a5abf02
CG
568 list_for_each_entry(comp, &mc.comp_list, list) {
569 if (!strcmp(comp->name, name))
570 return comp;
bdafb7e8
CG
571 }
572 return NULL;
573}
574
e7e3ce04
AS
575struct show_links_data {
576 int offs;
577 char *buf;
578};
579
845c31de 580static int print_links(struct device *dev, void *data)
57562a72 581{
e7e3ce04
AS
582 struct show_links_data *d = data;
583 int offs = d->offs;
584 char *buf = d->buf;
9136fccf
CG
585 struct most_channel *c;
586 struct most_interface *iface = to_most_interface(dev);
bc5f96a1 587
9136fccf 588 list_for_each_entry(c, &iface->p->channel_list, list) {
5a5abf02 589 if (c->pipe0.comp) {
9136fccf
CG
590 offs += snprintf(buf + offs,
591 PAGE_SIZE - offs,
592 "%s:%s:%s\n",
5a5abf02 593 c->pipe0.comp->name,
9136fccf
CG
594 dev_name(&iface->dev),
595 dev_name(&c->dev));
596 }
5a5abf02 597 if (c->pipe1.comp) {
9136fccf
CG
598 offs += snprintf(buf + offs,
599 PAGE_SIZE - offs,
600 "%s:%s:%s\n",
5a5abf02 601 c->pipe1.comp->name,
9136fccf
CG
602 dev_name(&iface->dev),
603 dev_name(&c->dev));
bc5f96a1
CG
604 }
605 }
e7e3ce04 606 d->offs = offs;
9136fccf
CG
607 return 0;
608}
609
610static ssize_t links_show(struct device_driver *drv, char *buf)
611{
e7e3ce04
AS
612 struct show_links_data d = { .buf = buf };
613
614 bus_for_each_dev(&mc.bus, NULL, &d, print_links);
615 return d.offs;
57562a72
CG
616}
617
fdbdc0e6 618static ssize_t components_show(struct device_driver *drv, char *buf)
bdafb7e8 619{
5a5abf02 620 struct core_component *comp;
bdafb7e8
CG
621 int offs = 0;
622
5a5abf02 623 list_for_each_entry(comp, &mc.comp_list, list) {
bdafb7e8 624 offs += snprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
5a5abf02 625 comp->name);
bdafb7e8
CG
626 }
627 return offs;
628}
ed49a3bd 629
57562a72 630/**
b7937dc4 631 * split_string - parses buf and extracts ':' separated substrings.
57562a72
CG
632 *
633 * @buf: complete string from attribute 'add_channel'
b7937dc4
CG
634 * @a: storage for 1st substring (=interface name)
635 * @b: storage for 2nd substring (=channel name)
636 * @c: storage for 3rd substring (=component name)
637 * @d: storage optional 4th substring (=user defined name)
57562a72
CG
638 *
639 * Examples:
640 *
b7937dc4
CG
641 * Input: "mdev0:ch6:cdev:my_channel\n" or
642 * "mdev0:ch6:cdev:my_channel"
57562a72 643 *
b7937dc4 644 * Output: *a -> "mdev0", *b -> "ch6", *c -> "cdev" *d -> "my_channel"
57562a72 645 *
b7937dc4
CG
646 * Input: "mdev1:ep81:cdev\n"
647 * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d -> ""
57562a72 648 *
5d7df3ae 649 * Input: "mdev1:ep81"
b7937dc4 650 * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d == NULL
57562a72 651 */
bdafb7e8 652static int split_string(char *buf, char **a, char **b, char **c, char **d)
57562a72
CG
653{
654 *a = strsep(&buf, ":");
655 if (!*a)
656 return -EIO;
657
658 *b = strsep(&buf, ":\n");
659 if (!*b)
660 return -EIO;
661
bdafb7e8
CG
662 *c = strsep(&buf, ":\n");
663 if (!*c)
664 return -EIO;
665
666 if (d)
667 *d = strsep(&buf, ":\n");
57562a72
CG
668
669 return 0;
670}
671
9136fccf
CG
672static int match_bus_dev(struct device *dev, void *data)
673{
674 char *mdev_name = data;
675
676 return !strcmp(dev_name(dev), mdev_name);
677}
678
57562a72 679/**
b7937dc4
CG
680 * get_channel - get pointer to channel
681 * @mdev: name of the device interface
682 * @mdev_ch: name of channel
57562a72 683 */
ec0c2f62 684static struct most_channel *get_channel(char *mdev, char *mdev_ch)
57562a72 685{
9136fccf
CG
686 struct device *dev = NULL;
687 struct most_interface *iface;
fcb7fad8 688 struct most_channel *c, *tmp;
57562a72 689
9136fccf
CG
690 dev = bus_find_device(&mc.bus, NULL, mdev, match_bus_dev);
691 if (!dev)
692 return NULL;
693 iface = to_most_interface(dev);
694 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
695 if (!strcmp(dev_name(&c->dev), mdev_ch))
696 return c;
57562a72 697 }
9136fccf 698 return NULL;
57562a72
CG
699}
700
fcb7fad8 701static
db09fe0d
CG
702inline int link_channel_to_component(struct most_channel *c,
703 struct core_component *comp,
704 char *comp_param)
e6e79b44
CG
705{
706 int ret;
5a5abf02 707 struct core_component **comp_ptr;
e6e79b44 708
5a5abf02
CG
709 if (!c->pipe0.comp)
710 comp_ptr = &c->pipe0.comp;
711 else if (!c->pipe1.comp)
712 comp_ptr = &c->pipe1.comp;
e6e79b44
CG
713 else
714 return -ENOSPC;
715
5a5abf02 716 *comp_ptr = comp;
7e0d3542 717 ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, comp_param);
e6e79b44 718 if (ret) {
5a5abf02 719 *comp_ptr = NULL;
e6e79b44
CG
720 return ret;
721 }
e6e79b44
CG
722 return 0;
723}
724
57562a72 725/**
b7937dc4
CG
726 * add_link_store - store function for add_link attribute
727 * @drv: device driver
57562a72
CG
728 * @buf: buffer
729 * @len: buffer length
730 *
731 * This parses the string given by buf and splits it into
b7937dc4
CG
732 * four substrings. Note: last substring is optional. In case a cdev
733 * component is loaded the optional 4th substring will make up the name of
57562a72
CG
734 * device node in the /dev directory. If omitted, the device node will
735 * inherit the channel's name within sysfs.
736 *
b7937dc4 737 * Searches for (device, channel) pair and probes the component
57562a72
CG
738 *
739 * Example:
b7937dc4
CG
740 * (1) echo "mdev0:ch6:cdev:my_rxchannel" >add_link
741 * (2) echo "mdev1:ep81:cdev" >add_link
57562a72
CG
742 *
743 * (1) would create the device node /dev/my_rxchannel
5d7df3ae 744 * (2) would create the device node /dev/mdev1-ep81
57562a72 745 */
bdafb7e8 746static ssize_t add_link_store(struct device_driver *drv,
57562a72
CG
747 const char *buf,
748 size_t len)
749{
fcb7fad8 750 struct most_channel *c;
5a5abf02 751 struct core_component *comp;
57562a72
CG
752 char buffer[STRING_SIZE];
753 char *mdev;
754 char *mdev_ch;
5a5abf02
CG
755 char *comp_name;
756 char *comp_param;
57562a72
CG
757 char devnod_buf[STRING_SIZE];
758 int ret;
3f78f611 759 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
57562a72
CG
760
761 strlcpy(buffer, buf, max_len);
5a5abf02 762 ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, &comp_param);
57562a72
CG
763 if (ret)
764 return ret;
fdbdc0e6 765 comp = match_component(comp_name);
3ba5515b
CG
766 if (!comp)
767 return -ENODEV;
5a5abf02 768 if (!comp_param || *comp_param == 0) {
1446ff09
CG
769 snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
770 mdev_ch);
5a5abf02 771 comp_param = devnod_buf;
57562a72
CG
772 }
773
ec0c2f62 774 c = get_channel(mdev, mdev_ch);
9136fccf 775 if (!c)
57562a72
CG
776 return -ENODEV;
777
db09fe0d 778 ret = link_channel_to_component(c, comp, comp_param);
e6e79b44 779 if (ret)
57562a72 780 return ret;
57562a72
CG
781 return len;
782}
783
57562a72 784/**
bf676f4c 785 * remove_link_store - store function for remove_link attribute
b7937dc4 786 * @drv: device driver
57562a72
CG
787 * @buf: buffer
788 * @len: buffer length
789 *
790 * Example:
eefb2a84 791 * echo "mdev0:ep81" >remove_link
57562a72 792 */
bdafb7e8 793static ssize_t remove_link_store(struct device_driver *drv,
57562a72
CG
794 const char *buf,
795 size_t len)
796{
fcb7fad8 797 struct most_channel *c;
5a5abf02 798 struct core_component *comp;
57562a72
CG
799 char buffer[STRING_SIZE];
800 char *mdev;
801 char *mdev_ch;
5a5abf02 802 char *comp_name;
57562a72 803 int ret;
3f78f611 804 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
57562a72
CG
805
806 strlcpy(buffer, buf, max_len);
5a5abf02 807 ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, NULL);
57562a72
CG
808 if (ret)
809 return ret;
fdbdc0e6 810 comp = match_component(comp_name);
3ba5515b
CG
811 if (!comp)
812 return -ENODEV;
ec0c2f62 813 c = get_channel(mdev, mdev_ch);
9136fccf 814 if (!c)
57562a72
CG
815 return -ENODEV;
816
5a5abf02 817 if (comp->disconnect_channel(c->iface, c->channel_id))
44fe5781 818 return -EIO;
5a5abf02
CG
819 if (c->pipe0.comp == comp)
820 c->pipe0.comp = NULL;
821 if (c->pipe1.comp == comp)
822 c->pipe1.comp = NULL;
57562a72
CG
823 return len;
824}
825
bdafb7e8
CG
826#define DRV_ATTR(_name) (&driver_attr_##_name.attr)
827
828static DRIVER_ATTR_RO(links);
fdbdc0e6 829static DRIVER_ATTR_RO(components);
bdafb7e8
CG
830static DRIVER_ATTR_WO(add_link);
831static DRIVER_ATTR_WO(remove_link);
57562a72 832
fdbdc0e6 833static struct attribute *mc_attrs[] = {
bdafb7e8 834 DRV_ATTR(links),
fdbdc0e6 835 DRV_ATTR(components),
bdafb7e8
CG
836 DRV_ATTR(add_link),
837 DRV_ATTR(remove_link),
57562a72
CG
838 NULL,
839};
840
fdbdc0e6
CG
841static struct attribute_group mc_attr_group = {
842 .attrs = mc_attrs,
57562a72
CG
843};
844
fdbdc0e6
CG
845static const struct attribute_group *mc_attr_groups[] = {
846 &mc_attr_group,
4d5f022f
CG
847 NULL,
848};
57562a72 849
845c31de 850static int most_match(struct device *dev, struct device_driver *drv)
921c80c5
CG
851{
852 if (!strcmp(dev_name(dev), "most"))
853 return 0;
854 else
855 return 1;
856}
857
57562a72
CG
858static inline void trash_mbo(struct mbo *mbo)
859{
860 unsigned long flags;
fcb7fad8 861 struct most_channel *c = mbo->context;
57562a72
CG
862
863 spin_lock_irqsave(&c->fifo_lock, flags);
864 list_add(&mbo->list, &c->trash_fifo);
865 spin_unlock_irqrestore(&c->fifo_lock, flags);
866}
867
fcb7fad8 868static bool hdm_mbo_ready(struct most_channel *c)
57562a72 869{
bf9503f1 870 bool empty;
57562a72 871
bf9503f1
CG
872 if (c->enqueue_halt)
873 return false;
874
875 spin_lock_irq(&c->fifo_lock);
876 empty = list_empty(&c->halt_fifo);
877 spin_unlock_irq(&c->fifo_lock);
878
879 return !empty;
57562a72
CG
880}
881
882static void nq_hdm_mbo(struct mbo *mbo)
883{
884 unsigned long flags;
fcb7fad8 885 struct most_channel *c = mbo->context;
57562a72
CG
886
887 spin_lock_irqsave(&c->fifo_lock, flags);
888 list_add_tail(&mbo->list, &c->halt_fifo);
889 spin_unlock_irqrestore(&c->fifo_lock, flags);
890 wake_up_interruptible(&c->hdm_fifo_wq);
891}
892
893static int hdm_enqueue_thread(void *data)
894{
fcb7fad8 895 struct most_channel *c = data;
57562a72 896 struct mbo *mbo;
bf9503f1 897 int ret;
57562a72
CG
898 typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
899
900 while (likely(!kthread_should_stop())) {
901 wait_event_interruptible(c->hdm_fifo_wq,
bf9503f1 902 hdm_mbo_ready(c) ||
623d8002 903 kthread_should_stop());
57562a72 904
bf9503f1
CG
905 mutex_lock(&c->nq_mutex);
906 spin_lock_irq(&c->fifo_lock);
907 if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
908 spin_unlock_irq(&c->fifo_lock);
909 mutex_unlock(&c->nq_mutex);
57562a72 910 continue;
bf9503f1
CG
911 }
912
913 mbo = list_pop_mbo(&c->halt_fifo);
914 spin_unlock_irq(&c->fifo_lock);
57562a72
CG
915
916 if (c->cfg.direction == MOST_CH_RX)
917 mbo->buffer_length = c->cfg.buffer_size;
918
bf9503f1
CG
919 ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
920 mutex_unlock(&c->nq_mutex);
921
922 if (unlikely(ret)) {
57562a72
CG
923 pr_err("hdm enqueue failed\n");
924 nq_hdm_mbo(mbo);
925 c->hdm_enqueue_task = NULL;
926 return 0;
927 }
928 }
929
930 return 0;
931}
932
fcb7fad8 933static int run_enqueue_thread(struct most_channel *c, int channel_id)
57562a72
CG
934{
935 struct task_struct *task =
246ed517
SB
936 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
937 channel_id);
57562a72
CG
938
939 if (IS_ERR(task))
940 return PTR_ERR(task);
941
942 c->hdm_enqueue_task = task;
943 return 0;
944}
945
946/**
947 * arm_mbo - recycle MBO for further usage
b7937dc4 948 * @mbo: most buffer
57562a72
CG
949 *
950 * This puts an MBO back to the list to have it ready for up coming
951 * tx transactions.
952 *
953 * In case the MBO belongs to a channel that recently has been
954 * poisoned, the MBO is scheduled to be trashed.
b7937dc4 955 * Calls the completion handler of an attached component.
57562a72
CG
956 */
957static void arm_mbo(struct mbo *mbo)
958{
959 unsigned long flags;
fcb7fad8 960 struct most_channel *c;
57562a72 961
57562a72
CG
962 c = mbo->context;
963
964 if (c->is_poisoned) {
965 trash_mbo(mbo);
966 return;
967 }
968
969 spin_lock_irqsave(&c->fifo_lock, flags);
71457d48 970 ++*mbo->num_buffers_ptr;
57562a72
CG
971 list_add_tail(&mbo->list, &c->fifo);
972 spin_unlock_irqrestore(&c->fifo_lock, flags);
973
5a5abf02
CG
974 if (c->pipe0.refs && c->pipe0.comp->tx_completion)
975 c->pipe0.comp->tx_completion(c->iface, c->channel_id);
f13f6981 976
5a5abf02
CG
977 if (c->pipe1.refs && c->pipe1.comp->tx_completion)
978 c->pipe1.comp->tx_completion(c->iface, c->channel_id);
57562a72
CG
979}
980
981/**
982 * arm_mbo_chain - helper function that arms an MBO chain for the HDM
983 * @c: pointer to interface channel
984 * @dir: direction of the channel
985 * @compl: pointer to completion function
986 *
987 * This allocates buffer objects including the containing DMA coherent
988 * buffer and puts them in the fifo.
989 * Buffers of Rx channels are put in the kthread fifo, hence immediately
990 * submitted to the HDM.
991 *
992 * Returns the number of allocated and enqueued MBOs.
993 */
fcb7fad8 994static int arm_mbo_chain(struct most_channel *c, int dir,
c942ea7a 995 void (*compl)(struct mbo *))
57562a72
CG
996{
997 unsigned int i;
57562a72 998 struct mbo *mbo;
aaf40322 999 unsigned long flags;
2ae07510 1000 u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
57562a72
CG
1001
1002 atomic_set(&c->mbo_nq_level, 0);
1003
1004 for (i = 0; i < c->cfg.num_buffers; i++) {
1005 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
aaf40322
CG
1006 if (!mbo)
1007 goto flush_fifos;
1008
57562a72
CG
1009 mbo->context = c;
1010 mbo->ifp = c->iface;
1011 mbo->hdm_channel_id = c->channel_id;
3598cec5
CG
1012 if (c->iface->dma_alloc) {
1013 mbo->virt_address =
1014 c->iface->dma_alloc(mbo, coherent_buf_size);
1015 } else {
1016 mbo->virt_address =
1017 kzalloc(coherent_buf_size, GFP_KERNEL);
1018 }
aaf40322
CG
1019 if (!mbo->virt_address)
1020 goto release_mbo;
1021
57562a72 1022 mbo->complete = compl;
71457d48 1023 mbo->num_buffers_ptr = &dummy_num_buffers;
57562a72
CG
1024 if (dir == MOST_CH_RX) {
1025 nq_hdm_mbo(mbo);
1026 atomic_inc(&c->mbo_nq_level);
1027 } else {
aaf40322
CG
1028 spin_lock_irqsave(&c->fifo_lock, flags);
1029 list_add_tail(&mbo->list, &c->fifo);
1030 spin_unlock_irqrestore(&c->fifo_lock, flags);
57562a72
CG
1031 }
1032 }
aaf40322 1033 return c->cfg.num_buffers;
57562a72 1034
aaf40322 1035release_mbo:
57562a72 1036 kfree(mbo);
aaf40322
CG
1037
1038flush_fifos:
1039 flush_channel_fifos(c);
1040 return 0;
57562a72
CG
1041}
1042
1043/**
1044 * most_submit_mbo - submits an MBO to fifo
b7937dc4 1045 * @mbo: most buffer
57562a72 1046 */
a6f9d846 1047void most_submit_mbo(struct mbo *mbo)
57562a72 1048{
a6f9d846
CG
1049 if (WARN_ONCE(!mbo || !mbo->context,
1050 "bad mbo or missing channel reference\n"))
1051 return;
57562a72
CG
1052
1053 nq_hdm_mbo(mbo);
57562a72
CG
1054}
1055EXPORT_SYMBOL_GPL(most_submit_mbo);
1056
1057/**
1058 * most_write_completion - write completion handler
b7937dc4 1059 * @mbo: most buffer
57562a72
CG
1060 *
1061 * This recycles the MBO for further usage. In case the channel has been
1062 * poisoned, the MBO is scheduled to be trashed.
1063 */
1064static void most_write_completion(struct mbo *mbo)
1065{
fcb7fad8 1066 struct most_channel *c;
57562a72 1067
57562a72
CG
1068 c = mbo->context;
1069 if (mbo->status == MBO_E_INVAL)
1070 pr_info("WARN: Tx MBO status: invalid\n");
ec58d2a8 1071 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
57562a72
CG
1072 trash_mbo(mbo);
1073 else
1074 arm_mbo(mbo);
1075}
1076
5a5abf02
CG
1077int channel_has_mbo(struct most_interface *iface, int id,
1078 struct core_component *comp)
aac997df 1079{
9136fccf 1080 struct most_channel *c = iface->p->channel[id];
aac997df
CG
1081 unsigned long flags;
1082 int empty;
1083
1084 if (unlikely(!c))
1085 return -EINVAL;
1086
f898f989 1087 if (c->pipe0.refs && c->pipe1.refs &&
5a5abf02
CG
1088 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
1089 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
cdc293d5
CG
1090 return 0;
1091
aac997df
CG
1092 spin_lock_irqsave(&c->fifo_lock, flags);
1093 empty = list_empty(&c->fifo);
1094 spin_unlock_irqrestore(&c->fifo_lock, flags);
1095 return !empty;
1096}
1097EXPORT_SYMBOL_GPL(channel_has_mbo);
1098
57562a72
CG
1099/**
1100 * most_get_mbo - get pointer to an MBO of pool
1101 * @iface: pointer to interface instance
1102 * @id: channel ID
b7937dc4 1103 * @comp: driver component
57562a72
CG
1104 *
1105 * This attempts to get a free buffer out of the channel fifo.
1106 * Returns a pointer to MBO on success or NULL otherwise.
1107 */
71457d48 1108struct mbo *most_get_mbo(struct most_interface *iface, int id,
5a5abf02 1109 struct core_component *comp)
57562a72
CG
1110{
1111 struct mbo *mbo;
fcb7fad8 1112 struct most_channel *c;
57562a72 1113 unsigned long flags;
71457d48 1114 int *num_buffers_ptr;
57562a72 1115
9136fccf 1116 c = iface->p->channel[id];
57562a72
CG
1117 if (unlikely(!c))
1118 return NULL;
71457d48 1119
f898f989 1120 if (c->pipe0.refs && c->pipe1.refs &&
5a5abf02
CG
1121 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
1122 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
71457d48
CG
1123 return NULL;
1124
5a5abf02 1125 if (comp == c->pipe0.comp)
f898f989 1126 num_buffers_ptr = &c->pipe0.num_buffers;
5a5abf02 1127 else if (comp == c->pipe1.comp)
f898f989 1128 num_buffers_ptr = &c->pipe1.num_buffers;
71457d48
CG
1129 else
1130 num_buffers_ptr = &dummy_num_buffers;
1131
57562a72
CG
1132 spin_lock_irqsave(&c->fifo_lock, flags);
1133 if (list_empty(&c->fifo)) {
1134 spin_unlock_irqrestore(&c->fifo_lock, flags);
1135 return NULL;
1136 }
1137 mbo = list_pop_mbo(&c->fifo);
71457d48 1138 --*num_buffers_ptr;
57562a72 1139 spin_unlock_irqrestore(&c->fifo_lock, flags);
71457d48
CG
1140
1141 mbo->num_buffers_ptr = num_buffers_ptr;
57562a72
CG
1142 mbo->buffer_length = c->cfg.buffer_size;
1143 return mbo;
1144}
1145EXPORT_SYMBOL_GPL(most_get_mbo);
1146
57562a72
CG
1147/**
1148 * most_put_mbo - return buffer to pool
b7937dc4 1149 * @mbo: most buffer
57562a72
CG
1150 */
1151void most_put_mbo(struct mbo *mbo)
1152{
fcb7fad8 1153 struct most_channel *c = mbo->context;
57562a72 1154
57562a72
CG
1155 if (c->cfg.direction == MOST_CH_TX) {
1156 arm_mbo(mbo);
1157 return;
1158 }
1159 nq_hdm_mbo(mbo);
1160 atomic_inc(&c->mbo_nq_level);
1161}
1162EXPORT_SYMBOL_GPL(most_put_mbo);
1163
1164/**
1165 * most_read_completion - read completion handler
b7937dc4 1166 * @mbo: most buffer
57562a72
CG
1167 *
1168 * This function is called by the HDM when data has been received from the
1169 * hardware and copied to the buffer of the MBO.
1170 *
1171 * In case the channel has been poisoned it puts the buffer in the trash queue.
b7937dc4 1172 * Otherwise, it passes the buffer to an component for further processing.
57562a72
CG
1173 */
1174static void most_read_completion(struct mbo *mbo)
1175{
fcb7fad8 1176 struct most_channel *c = mbo->context;
57562a72 1177
f13f6981
CG
1178 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1179 trash_mbo(mbo);
1180 return;
1181 }
57562a72
CG
1182
1183 if (mbo->status == MBO_E_INVAL) {
1184 nq_hdm_mbo(mbo);
1185 atomic_inc(&c->mbo_nq_level);
1186 return;
1187 }
1188
5a63e23a 1189 if (atomic_sub_and_test(1, &c->mbo_nq_level))
57562a72 1190 c->is_starving = 1;
57562a72 1191
5a5abf02
CG
1192 if (c->pipe0.refs && c->pipe0.comp->rx_completion &&
1193 c->pipe0.comp->rx_completion(mbo) == 0)
57562a72 1194 return;
f13f6981 1195
5a5abf02
CG
1196 if (c->pipe1.refs && c->pipe1.comp->rx_completion &&
1197 c->pipe1.comp->rx_completion(mbo) == 0)
57562a72 1198 return;
f13f6981
CG
1199
1200 most_put_mbo(mbo);
57562a72
CG
1201}
1202
1203/**
1204 * most_start_channel - prepares a channel for communication
1205 * @iface: pointer to interface instance
1206 * @id: channel ID
b7937dc4 1207 * @comp: driver component
57562a72
CG
1208 *
1209 * This prepares the channel for usage. Cross-checks whether the
1210 * channel's been properly configured.
1211 *
1212 * Returns 0 on success or error code otherwise.
1213 */
f13f6981 1214int most_start_channel(struct most_interface *iface, int id,
5a5abf02 1215 struct core_component *comp)
57562a72
CG
1216{
1217 int num_buffer;
1218 int ret;
9136fccf 1219 struct most_channel *c = iface->p->channel[id];
57562a72
CG
1220
1221 if (unlikely(!c))
1222 return -EINVAL;
1223
f13f6981 1224 mutex_lock(&c->start_mutex);
f898f989 1225 if (c->pipe0.refs + c->pipe1.refs > 0)
b7937dc4 1226 goto out; /* already started by another component */
57562a72
CG
1227
1228 if (!try_module_get(iface->mod)) {
1229 pr_info("failed to acquire HDM lock\n");
f13f6981 1230 mutex_unlock(&c->start_mutex);
57562a72
CG
1231 return -ENOLCK;
1232 }
57562a72
CG
1233
1234 c->cfg.extra_len = 0;
1235 if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1236 pr_info("channel configuration failed. Go check settings...\n");
1237 ret = -EINVAL;
bddd3c25 1238 goto err_put_module;
57562a72
CG
1239 }
1240
1241 init_waitqueue_head(&c->hdm_fifo_wq);
1242
1243 if (c->cfg.direction == MOST_CH_RX)
1244 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1245 most_read_completion);
1246 else
1247 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1248 most_write_completion);
47af41b0 1249 if (unlikely(!num_buffer)) {
57562a72 1250 ret = -ENOMEM;
bddd3c25 1251 goto err_put_module;
57562a72
CG
1252 }
1253
1254 ret = run_enqueue_thread(c, id);
1255 if (ret)
bddd3c25 1256 goto err_put_module;
57562a72 1257
57562a72 1258 c->is_starving = 0;
f898f989
CG
1259 c->pipe0.num_buffers = c->cfg.num_buffers / 2;
1260 c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers;
57562a72 1261 atomic_set(&c->mbo_ref, num_buffer);
f13f6981
CG
1262
1263out:
5a5abf02 1264 if (comp == c->pipe0.comp)
f898f989 1265 c->pipe0.refs++;
5a5abf02 1266 if (comp == c->pipe1.comp)
f898f989 1267 c->pipe1.refs++;
f13f6981 1268 mutex_unlock(&c->start_mutex);
57562a72 1269 return 0;
f13f6981 1270
bddd3c25 1271err_put_module:
e23afff9 1272 module_put(iface->mod);
f13f6981 1273 mutex_unlock(&c->start_mutex);
57562a72
CG
1274 return ret;
1275}
1276EXPORT_SYMBOL_GPL(most_start_channel);
1277
1278/**
1279 * most_stop_channel - stops a running channel
1280 * @iface: pointer to interface instance
1281 * @id: channel ID
b7937dc4 1282 * @comp: driver component
57562a72 1283 */
f13f6981 1284int most_stop_channel(struct most_interface *iface, int id,
5a5abf02 1285 struct core_component *comp)
57562a72 1286{
fcb7fad8 1287 struct most_channel *c;
57562a72
CG
1288
1289 if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1290 pr_err("Bad interface or index out of range\n");
1291 return -EINVAL;
1292 }
9136fccf 1293 c = iface->p->channel[id];
57562a72
CG
1294 if (unlikely(!c))
1295 return -EINVAL;
1296
f13f6981 1297 mutex_lock(&c->start_mutex);
f898f989 1298 if (c->pipe0.refs + c->pipe1.refs >= 2)
f13f6981 1299 goto out;
57562a72 1300
57562a72
CG
1301 if (c->hdm_enqueue_task)
1302 kthread_stop(c->hdm_enqueue_task);
1303 c->hdm_enqueue_task = NULL;
57562a72 1304
9cda3007 1305 if (iface->mod)
57562a72 1306 module_put(iface->mod);
57562a72
CG
1307
1308 c->is_poisoned = true;
1309 if (c->iface->poison_channel(c->iface, c->channel_id)) {
1310 pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1311 c->iface->description);
f13f6981 1312 mutex_unlock(&c->start_mutex);
57562a72
CG
1313 return -EAGAIN;
1314 }
1315 flush_trash_fifo(c);
1316 flush_channel_fifos(c);
1317
1318#ifdef CMPL_INTERRUPTIBLE
1319 if (wait_for_completion_interruptible(&c->cleanup)) {
1320 pr_info("Interrupted while clean up ch %d\n", c->channel_id);
f13f6981 1321 mutex_unlock(&c->start_mutex);
57562a72
CG
1322 return -EINTR;
1323 }
1324#else
1325 wait_for_completion(&c->cleanup);
1326#endif
1327 c->is_poisoned = false;
f13f6981
CG
1328
1329out:
5a5abf02 1330 if (comp == c->pipe0.comp)
f898f989 1331 c->pipe0.refs--;
5a5abf02 1332 if (comp == c->pipe1.comp)
f898f989 1333 c->pipe1.refs--;
f13f6981 1334 mutex_unlock(&c->start_mutex);
57562a72
CG
1335 return 0;
1336}
1337EXPORT_SYMBOL_GPL(most_stop_channel);
1338
1339/**
b7937dc4
CG
1340 * most_register_component - registers a driver component with the core
1341 * @comp: driver component
57562a72 1342 */
5a5abf02 1343int most_register_component(struct core_component *comp)
57562a72 1344{
5a5abf02 1345 if (!comp) {
b7937dc4 1346 pr_err("Bad component\n");
57562a72
CG
1347 return -EINVAL;
1348 }
5a5abf02 1349 list_add_tail(&comp->list, &mc.comp_list);
b7937dc4 1350 pr_info("registered new core component %s\n", comp->name);
57562a72
CG
1351 return 0;
1352}
ed021a0f 1353EXPORT_SYMBOL_GPL(most_register_component);
57562a72 1354
9136fccf
CG
1355static int disconnect_channels(struct device *dev, void *data)
1356{
1357 struct most_interface *iface;
1358 struct most_channel *c, *tmp;
5a5abf02 1359 struct core_component *comp = data;
9136fccf
CG
1360
1361 iface = to_most_interface(dev);
1362 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
5a5abf02
CG
1363 if (c->pipe0.comp == comp || c->pipe1.comp == comp)
1364 comp->disconnect_channel(c->iface, c->channel_id);
1365 if (c->pipe0.comp == comp)
1366 c->pipe0.comp = NULL;
1367 if (c->pipe1.comp == comp)
1368 c->pipe1.comp = NULL;
9136fccf
CG
1369 }
1370 return 0;
1371}
1372
57562a72 1373/**
b7937dc4
CG
1374 * most_deregister_component - deregisters a driver component with the core
1375 * @comp: driver component
57562a72 1376 */
5a5abf02 1377int most_deregister_component(struct core_component *comp)
57562a72 1378{
5a5abf02 1379 if (!comp) {
b7937dc4 1380 pr_err("Bad component\n");
57562a72
CG
1381 return -EINVAL;
1382 }
1383
5a5abf02
CG
1384 bus_for_each_dev(&mc.bus, NULL, comp, disconnect_channels);
1385 list_del(&comp->list);
b7937dc4 1386 pr_info("deregistering component %s\n", comp->name);
57562a72
CG
1387 return 0;
1388}
ed021a0f 1389EXPORT_SYMBOL_GPL(most_deregister_component);
57562a72 1390
4d5f022f
CG
1391static void release_interface(struct device *dev)
1392{
1393 pr_info("releasing interface dev %s...\n", dev_name(dev));
1394}
1395
1396static void release_channel(struct device *dev)
1397{
1398 pr_info("releasing channel dev %s...\n", dev_name(dev));
1399}
1400
57562a72
CG
1401/**
1402 * most_register_interface - registers an interface with core
b7937dc4 1403 * @iface: device interface
57562a72
CG
1404 *
1405 * Allocates and initializes a new interface instance and all of its channels.
1406 * Returns a pointer to kobject or an error pointer.
1407 */
4d5f022f 1408int most_register_interface(struct most_interface *iface)
57562a72
CG
1409{
1410 unsigned int i;
1411 int id;
fcb7fad8 1412 struct most_channel *c;
57562a72
CG
1413
1414 if (!iface || !iface->enqueue || !iface->configure ||
1415 !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1416 pr_err("Bad interface or channel overflow\n");
4d5f022f 1417 return -EINVAL;
57562a72
CG
1418 }
1419
1420 id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1421 if (id < 0) {
1422 pr_info("Failed to alloc mdev ID\n");
4d5f022f 1423 return id;
57562a72 1424 }
57562a72 1425
9136fccf
CG
1426 iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
1427 if (!iface->p) {
b7382d44 1428 ida_simple_remove(&mdev_id, id);
4d5f022f 1429 return -ENOMEM;
57562a72
CG
1430 }
1431
9136fccf
CG
1432 INIT_LIST_HEAD(&iface->p->channel_list);
1433 iface->p->dev_id = id;
1434 snprintf(iface->p->name, STRING_SIZE, "mdev%d", id);
1435 iface->dev.init_name = iface->p->name;
14ae5f03
CG
1436 iface->dev.bus = &mc.bus;
1437 iface->dev.parent = &mc.dev;
4d5f022f
CG
1438 iface->dev.groups = interface_attr_groups;
1439 iface->dev.release = release_interface;
1440 if (device_register(&iface->dev)) {
1441 pr_err("registering iface->dev failed\n");
9136fccf 1442 kfree(iface->p);
4d5f022f
CG
1443 ida_simple_remove(&mdev_id, id);
1444 return -ENOMEM;
1445 }
57562a72
CG
1446
1447 for (i = 0; i < iface->num_channels; i++) {
1448 const char *name_suffix = iface->channel_vector[i].name_suffix;
1449
4d5f022f 1450 c = kzalloc(sizeof(*c), GFP_KERNEL);
57562a72 1451 if (!c)
bddd3c25 1452 goto err_free_resources;
845101be
CG
1453 if (!name_suffix)
1454 snprintf(c->name, STRING_SIZE, "ch%d", i);
1455 else
1456 snprintf(c->name, STRING_SIZE, "%s", name_suffix);
1457 c->dev.init_name = c->name;
4d5f022f
CG
1458 c->dev.parent = &iface->dev;
1459 c->dev.groups = channel_attr_groups;
1460 c->dev.release = release_channel;
9136fccf 1461 iface->p->channel[i] = c;
57562a72
CG
1462 c->is_starving = 0;
1463 c->iface = iface;
57562a72
CG
1464 c->channel_id = i;
1465 c->keep_mbo = false;
1466 c->enqueue_halt = false;
1467 c->is_poisoned = false;
57562a72
CG
1468 c->cfg.direction = 0;
1469 c->cfg.data_type = 0;
1470 c->cfg.num_buffers = 0;
1471 c->cfg.buffer_size = 0;
1472 c->cfg.subbuffer_size = 0;
1473 c->cfg.packets_per_xact = 0;
1474 spin_lock_init(&c->fifo_lock);
1475 INIT_LIST_HEAD(&c->fifo);
1476 INIT_LIST_HEAD(&c->trash_fifo);
1477 INIT_LIST_HEAD(&c->halt_fifo);
1478 init_completion(&c->cleanup);
1479 atomic_set(&c->mbo_ref, 0);
f13f6981 1480 mutex_init(&c->start_mutex);
bf9503f1 1481 mutex_init(&c->nq_mutex);
9136fccf 1482 list_add_tail(&c->list, &iface->p->channel_list);
f0b4a22a
CG
1483 if (device_register(&c->dev)) {
1484 pr_err("registering c->dev failed\n");
bddd3c25 1485 goto err_free_most_channel;
f0b4a22a 1486 }
57562a72 1487 }
b7937dc4 1488 pr_info("registered new device mdev%d (%s)\n",
4d5f022f
CG
1489 id, iface->description);
1490 return 0;
57562a72 1491
bddd3c25 1492err_free_most_channel:
9136fccf
CG
1493 kfree(c);
1494
bddd3c25 1495err_free_resources:
9136fccf
CG
1496 while (i > 0) {
1497 c = iface->p->channel[--i];
1498 device_unregister(&c->dev);
1499 kfree(c);
1500 }
1501 kfree(iface->p);
4d5f022f 1502 device_unregister(&iface->dev);
b7382d44 1503 ida_simple_remove(&mdev_id, id);
4d5f022f 1504 return -ENOMEM;
57562a72
CG
1505}
1506EXPORT_SYMBOL_GPL(most_register_interface);
1507
1508/**
1509 * most_deregister_interface - deregisters an interface with core
b7937dc4 1510 * @iface: device interface
57562a72
CG
1511 *
1512 * Before removing an interface instance from the list, all running
1513 * channels are stopped and poisoned.
1514 */
1515void most_deregister_interface(struct most_interface *iface)
1516{
4d5f022f 1517 int i;
fcb7fad8 1518 struct most_channel *c;
57562a72 1519
92d01a56
QK
1520 pr_info("deregistering device %s (%s)\n", dev_name(&iface->dev),
1521 iface->description);
4d5f022f 1522 for (i = 0; i < iface->num_channels; i++) {
9136fccf 1523 c = iface->p->channel[i];
5a5abf02
CG
1524 if (c->pipe0.comp)
1525 c->pipe0.comp->disconnect_channel(c->iface,
a0fceb1f 1526 c->channel_id);
5a5abf02
CG
1527 if (c->pipe1.comp)
1528 c->pipe1.comp->disconnect_channel(c->iface,
a0fceb1f 1529 c->channel_id);
5a5abf02
CG
1530 c->pipe0.comp = NULL;
1531 c->pipe1.comp = NULL;
4d5f022f
CG
1532 list_del(&c->list);
1533 device_unregister(&c->dev);
1534 kfree(c);
a0fceb1f
CG
1535 }
1536
9136fccf
CG
1537 ida_simple_remove(&mdev_id, iface->p->dev_id);
1538 kfree(iface->p);
4d5f022f 1539 device_unregister(&iface->dev);
57562a72
CG
1540}
1541EXPORT_SYMBOL_GPL(most_deregister_interface);
1542
1543/**
1544 * most_stop_enqueue - prevents core from enqueueing MBOs
1545 * @iface: pointer to interface
1546 * @id: channel id
1547 *
1548 * This is called by an HDM that _cannot_ attend to its duties and
1549 * is imminent to get run over by the core. The core is not going to
1550 * enqueue any further packets unless the flagging HDM calls
1551 * most_resume enqueue().
1552 */
1553void most_stop_enqueue(struct most_interface *iface, int id)
1554{
9136fccf 1555 struct most_channel *c = iface->p->channel[id];
57562a72 1556
bf9503f1
CG
1557 if (!c)
1558 return;
1559
1560 mutex_lock(&c->nq_mutex);
1561 c->enqueue_halt = true;
1562 mutex_unlock(&c->nq_mutex);
57562a72
CG
1563}
1564EXPORT_SYMBOL_GPL(most_stop_enqueue);
1565
1566/**
1567 * most_resume_enqueue - allow core to enqueue MBOs again
1568 * @iface: pointer to interface
1569 * @id: channel id
1570 *
1571 * This clears the enqueue halt flag and enqueues all MBOs currently
1572 * sitting in the wait fifo.
1573 */
1574void most_resume_enqueue(struct most_interface *iface, int id)
1575{
9136fccf 1576 struct most_channel *c = iface->p->channel[id];
57562a72 1577
bf9503f1 1578 if (!c)
57562a72 1579 return;
bf9503f1
CG
1580
1581 mutex_lock(&c->nq_mutex);
57562a72 1582 c->enqueue_halt = false;
bf9503f1 1583 mutex_unlock(&c->nq_mutex);
57562a72
CG
1584
1585 wake_up_interruptible(&c->hdm_fifo_wq);
1586}
1587EXPORT_SYMBOL_GPL(most_resume_enqueue);
1588
4d5f022f
CG
1589static void release_most_sub(struct device *dev)
1590{
1591 pr_info("releasing most_subsystem\n");
1592}
1593
57562a72
CG
1594static int __init most_init(void)
1595{
cc4188b6
SM
1596 int err;
1597
57562a72 1598 pr_info("init()\n");
81ce26b7 1599 INIT_LIST_HEAD(&mc.comp_list);
57562a72
CG
1600 ida_init(&mdev_id);
1601
14ae5f03
CG
1602 mc.bus.name = "most",
1603 mc.bus.match = most_match,
1604 mc.drv.name = "most_core",
1605 mc.drv.bus = &mc.bus,
fdbdc0e6 1606 mc.drv.groups = mc_attr_groups;
14ae5f03
CG
1607
1608 err = bus_register(&mc.bus);
cc4188b6 1609 if (err) {
57562a72 1610 pr_info("Cannot register most bus\n");
cc4188b6 1611 return err;
57562a72 1612 }
14ae5f03 1613 err = driver_register(&mc.drv);
cc4188b6 1614 if (err) {
57562a72 1615 pr_info("Cannot register core driver\n");
bddd3c25 1616 goto err_unregister_bus;
57562a72 1617 }
14ae5f03
CG
1618 mc.dev.init_name = "most_bus";
1619 mc.dev.release = release_most_sub;
1620 if (device_register(&mc.dev)) {
cc4188b6 1621 err = -ENOMEM;
bddd3c25 1622 goto err_unregister_driver;
cc4188b6 1623 }
57562a72
CG
1624
1625 return 0;
1626
bddd3c25 1627err_unregister_driver:
14ae5f03 1628 driver_unregister(&mc.drv);
bddd3c25 1629err_unregister_bus:
14ae5f03 1630 bus_unregister(&mc.bus);
cc4188b6 1631 return err;
57562a72
CG
1632}
1633
1634static void __exit most_exit(void)
1635{
57562a72 1636 pr_info("exit core module\n");
14ae5f03
CG
1637 device_unregister(&mc.dev);
1638 driver_unregister(&mc.drv);
14ae5f03 1639 bus_unregister(&mc.bus);
57562a72
CG
1640 ida_destroy(&mdev_id);
1641}
1642
1643module_init(most_init);
1644module_exit(most_exit);
1645MODULE_LICENSE("GPL");
1646MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1647MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");