staging: most: core: fix data type
[linux-2.6-block.git] / drivers / staging / most / core.c
CommitLineData
1a79f22d 1// SPDX-License-Identifier: GPL-2.0
57562a72
CG
2/*
3 * core.c - Implementation of core module of MOST Linux driver stack
4 *
5 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
57562a72
CG
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9#include <linux/module.h>
10#include <linux/fs.h>
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/device.h>
14#include <linux/list.h>
15#include <linux/poll.h>
16#include <linux/wait.h>
17#include <linux/kobject.h>
18#include <linux/mutex.h>
19#include <linux/completion.h>
20#include <linux/sysfs.h>
21#include <linux/kthread.h>
22#include <linux/dma-mapping.h>
23#include <linux/idr.h>
057301cd 24#include <most/core.h>
57562a72
CG
25
26#define MAX_CHANNELS 64
27#define STRING_SIZE 80
28
57562a72 29static struct ida mdev_id;
71457d48 30static int dummy_num_buffers;
57562a72 31
14ae5f03
CG
32static struct mostcore {
33 struct device dev;
34 struct device_driver drv;
35 struct bus_type bus;
36 struct class *class;
81ce26b7 37 struct list_head comp_list;
14ae5f03
CG
38} mc;
39
40#define to_driver(d) container_of(d, struct mostcore, drv);
41
7faeffec 42struct pipe {
5a5abf02 43 struct core_component *comp;
ccfbaee0
CG
44 int refs;
45 int num_buffers;
46};
47
fcb7fad8 48struct most_channel {
4d5f022f 49 struct device dev;
57562a72
CG
50 struct completion cleanup;
51 atomic_t mbo_ref;
52 atomic_t mbo_nq_level;
2aa9b96f 53 u16 channel_id;
845101be 54 char name[STRING_SIZE];
57562a72 55 bool is_poisoned;
f13f6981 56 struct mutex start_mutex;
bf9503f1 57 struct mutex nq_mutex; /* nq thread synchronization */
57562a72
CG
58 int is_starving;
59 struct most_interface *iface;
57562a72
CG
60 struct most_channel_config cfg;
61 bool keep_mbo;
62 bool enqueue_halt;
63 struct list_head fifo;
64 spinlock_t fifo_lock;
65 struct list_head halt_fifo;
66 struct list_head list;
f898f989
CG
67 struct pipe pipe0;
68 struct pipe pipe1;
57562a72
CG
69 struct list_head trash_fifo;
70 struct task_struct *hdm_enqueue_task;
57562a72 71 wait_queue_head_t hdm_fifo_wq;
ed021a0f 72
57562a72 73};
9cbe5aa6 74
fcb7fad8 75#define to_channel(d) container_of(d, struct most_channel, dev)
57562a72 76
9136fccf 77struct interface_private {
57562a72 78 int dev_id;
9136fccf 79 char name[STRING_SIZE];
fcb7fad8 80 struct most_channel *channel[MAX_CHANNELS];
9136fccf 81 struct list_head channel_list;
57562a72 82};
9cbe5aa6 83
e7f2b70f
HPGE
84static const struct {
85 int most_ch_data_type;
06324664 86 const char *name;
95f73013
CG
87} ch_data_type[] = {
88 { MOST_CH_CONTROL, "control\n" },
e7f2b70f
HPGE
89 { MOST_CH_ASYNC, "async\n" },
90 { MOST_CH_SYNC, "sync\n" },
0540609f
AS
91 { MOST_CH_ISOC, "isoc\n"},
92 { MOST_CH_ISOC, "isoc_avp\n"},
95f73013 93};
e7f2b70f 94
57562a72
CG
95/**
96 * list_pop_mbo - retrieves the first MBO of the list and removes it
97 * @ptr: the list head to grab the MBO from.
98 */
99#define list_pop_mbo(ptr) \
100({ \
101 struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
102 list_del(&_mbo->list); \
103 _mbo; \
104})
105
57562a72
CG
106/**
107 * most_free_mbo_coherent - free an MBO and its coherent buffer
b7937dc4 108 * @mbo: most buffer
57562a72
CG
109 */
110static void most_free_mbo_coherent(struct mbo *mbo)
111{
fcb7fad8 112 struct most_channel *c = mbo->context;
57562a72
CG
113 u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
114
115 dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
116 mbo->bus_address);
117 kfree(mbo);
118 if (atomic_sub_and_test(1, &c->mbo_ref))
119 complete(&c->cleanup);
120}
121
122/**
123 * flush_channel_fifos - clear the channel fifos
124 * @c: pointer to channel object
125 */
fcb7fad8 126static void flush_channel_fifos(struct most_channel *c)
57562a72
CG
127{
128 unsigned long flags, hf_flags;
129 struct mbo *mbo, *tmp;
130
131 if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
132 return;
133
134 spin_lock_irqsave(&c->fifo_lock, flags);
135 list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
136 list_del(&mbo->list);
137 spin_unlock_irqrestore(&c->fifo_lock, flags);
0834be6c 138 most_free_mbo_coherent(mbo);
57562a72
CG
139 spin_lock_irqsave(&c->fifo_lock, flags);
140 }
141 spin_unlock_irqrestore(&c->fifo_lock, flags);
142
143 spin_lock_irqsave(&c->fifo_lock, hf_flags);
144 list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
145 list_del(&mbo->list);
146 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
0834be6c 147 most_free_mbo_coherent(mbo);
57562a72
CG
148 spin_lock_irqsave(&c->fifo_lock, hf_flags);
149 }
150 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
151
152 if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
153 pr_info("WARN: fifo | trash fifo not empty\n");
154}
155
156/**
157 * flush_trash_fifo - clear the trash fifo
158 * @c: pointer to channel object
159 */
fcb7fad8 160static int flush_trash_fifo(struct most_channel *c)
57562a72
CG
161{
162 struct mbo *mbo, *tmp;
163 unsigned long flags;
164
165 spin_lock_irqsave(&c->fifo_lock, flags);
166 list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
167 list_del(&mbo->list);
168 spin_unlock_irqrestore(&c->fifo_lock, flags);
169 most_free_mbo_coherent(mbo);
170 spin_lock_irqsave(&c->fifo_lock, flags);
171 }
172 spin_unlock_irqrestore(&c->fifo_lock, flags);
173 return 0;
174}
175
4d5f022f
CG
176static ssize_t available_directions_show(struct device *dev,
177 struct device_attribute *attr,
edaa1e33 178 char *buf)
57562a72 179{
fcb7fad8 180 struct most_channel *c = to_channel(dev);
57562a72
CG
181 unsigned int i = c->channel_id;
182
183 strcpy(buf, "");
184 if (c->iface->channel_vector[i].direction & MOST_CH_RX)
95f73013 185 strcat(buf, "rx ");
57562a72 186 if (c->iface->channel_vector[i].direction & MOST_CH_TX)
95f73013 187 strcat(buf, "tx ");
57562a72 188 strcat(buf, "\n");
22ff195b 189 return strlen(buf);
57562a72
CG
190}
191
4d5f022f
CG
192static ssize_t available_datatypes_show(struct device *dev,
193 struct device_attribute *attr,
57562a72
CG
194 char *buf)
195{
fcb7fad8 196 struct most_channel *c = to_channel(dev);
57562a72
CG
197 unsigned int i = c->channel_id;
198
199 strcpy(buf, "");
200 if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
201 strcat(buf, "control ");
202 if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
203 strcat(buf, "async ");
204 if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
205 strcat(buf, "sync ");
0540609f 206 if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
95f73013 207 strcat(buf, "isoc ");
57562a72 208 strcat(buf, "\n");
22ff195b 209 return strlen(buf);
57562a72
CG
210}
211
4d5f022f
CG
212static ssize_t number_of_packet_buffers_show(struct device *dev,
213 struct device_attribute *attr,
4dd7c7c7 214 char *buf)
57562a72 215{
fcb7fad8 216 struct most_channel *c = to_channel(dev);
57562a72
CG
217 unsigned int i = c->channel_id;
218
219 return snprintf(buf, PAGE_SIZE, "%d\n",
220 c->iface->channel_vector[i].num_buffers_packet);
221}
222
4d5f022f
CG
223static ssize_t number_of_stream_buffers_show(struct device *dev,
224 struct device_attribute *attr,
4dd7c7c7 225 char *buf)
57562a72 226{
fcb7fad8 227 struct most_channel *c = to_channel(dev);
57562a72
CG
228 unsigned int i = c->channel_id;
229
230 return snprintf(buf, PAGE_SIZE, "%d\n",
231 c->iface->channel_vector[i].num_buffers_streaming);
232}
233
4d5f022f
CG
234static ssize_t size_of_packet_buffer_show(struct device *dev,
235 struct device_attribute *attr,
4dd7c7c7 236 char *buf)
57562a72 237{
fcb7fad8 238 struct most_channel *c = to_channel(dev);
57562a72
CG
239 unsigned int i = c->channel_id;
240
241 return snprintf(buf, PAGE_SIZE, "%d\n",
242 c->iface->channel_vector[i].buffer_size_packet);
243}
244
4d5f022f
CG
245static ssize_t size_of_stream_buffer_show(struct device *dev,
246 struct device_attribute *attr,
4dd7c7c7 247 char *buf)
57562a72 248{
fcb7fad8 249 struct most_channel *c = to_channel(dev);
57562a72
CG
250 unsigned int i = c->channel_id;
251
252 return snprintf(buf, PAGE_SIZE, "%d\n",
253 c->iface->channel_vector[i].buffer_size_streaming);
254}
255
4d5f022f
CG
256static ssize_t channel_starving_show(struct device *dev,
257 struct device_attribute *attr,
57562a72
CG
258 char *buf)
259{
fcb7fad8 260 struct most_channel *c = to_channel(dev);
4d5f022f 261
57562a72
CG
262 return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
263}
264
4d5f022f
CG
265static ssize_t set_number_of_buffers_show(struct device *dev,
266 struct device_attribute *attr,
57562a72
CG
267 char *buf)
268{
fcb7fad8 269 struct most_channel *c = to_channel(dev);
4d5f022f 270
57562a72
CG
271 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
272}
273
4d5f022f
CG
274static ssize_t set_number_of_buffers_store(struct device *dev,
275 struct device_attribute *attr,
57562a72
CG
276 const char *buf,
277 size_t count)
278{
fcb7fad8 279 struct most_channel *c = to_channel(dev);
57562a72
CG
280 int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
281
282 if (ret)
283 return ret;
284 return count;
285}
286
4d5f022f
CG
287static ssize_t set_buffer_size_show(struct device *dev,
288 struct device_attribute *attr,
57562a72
CG
289 char *buf)
290{
fcb7fad8 291 struct most_channel *c = to_channel(dev);
4d5f022f 292
57562a72
CG
293 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
294}
295
4d5f022f
CG
296static ssize_t set_buffer_size_store(struct device *dev,
297 struct device_attribute *attr,
57562a72
CG
298 const char *buf,
299 size_t count)
300{
fcb7fad8 301 struct most_channel *c = to_channel(dev);
57562a72
CG
302 int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
303
304 if (ret)
305 return ret;
306 return count;
307}
308
4d5f022f
CG
309static ssize_t set_direction_show(struct device *dev,
310 struct device_attribute *attr,
57562a72
CG
311 char *buf)
312{
fcb7fad8 313 struct most_channel *c = to_channel(dev);
4d5f022f 314
57562a72 315 if (c->cfg.direction & MOST_CH_TX)
95f73013 316 return snprintf(buf, PAGE_SIZE, "tx\n");
57562a72 317 else if (c->cfg.direction & MOST_CH_RX)
95f73013 318 return snprintf(buf, PAGE_SIZE, "rx\n");
57562a72
CG
319 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
320}
321
4d5f022f
CG
322static ssize_t set_direction_store(struct device *dev,
323 struct device_attribute *attr,
57562a72
CG
324 const char *buf,
325 size_t count)
326{
fcb7fad8 327 struct most_channel *c = to_channel(dev);
4d5f022f 328
9deba73d 329 if (!strcmp(buf, "dir_rx\n")) {
57562a72 330 c->cfg.direction = MOST_CH_RX;
95f73013
CG
331 } else if (!strcmp(buf, "rx\n")) {
332 c->cfg.direction = MOST_CH_RX;
9deba73d 333 } else if (!strcmp(buf, "dir_tx\n")) {
57562a72 334 c->cfg.direction = MOST_CH_TX;
95f73013
CG
335 } else if (!strcmp(buf, "tx\n")) {
336 c->cfg.direction = MOST_CH_TX;
9deba73d 337 } else {
57562a72
CG
338 pr_info("WARN: invalid attribute settings\n");
339 return -EINVAL;
340 }
341 return count;
342}
343
4d5f022f
CG
344static ssize_t set_datatype_show(struct device *dev,
345 struct device_attribute *attr,
57562a72
CG
346 char *buf)
347{
e7f2b70f 348 int i;
fcb7fad8 349 struct most_channel *c = to_channel(dev);
e7f2b70f
HPGE
350
351 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
352 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
353 return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
354 }
57562a72
CG
355 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
356}
357
4d5f022f
CG
358static ssize_t set_datatype_store(struct device *dev,
359 struct device_attribute *attr,
57562a72
CG
360 const char *buf,
361 size_t count)
362{
e7f2b70f 363 int i;
fcb7fad8 364 struct most_channel *c = to_channel(dev);
e7f2b70f
HPGE
365
366 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
367 if (!strcmp(buf, ch_data_type[i].name)) {
368 c->cfg.data_type = ch_data_type[i].most_ch_data_type;
369 break;
370 }
371 }
372
373 if (i == ARRAY_SIZE(ch_data_type)) {
57562a72
CG
374 pr_info("WARN: invalid attribute settings\n");
375 return -EINVAL;
376 }
377 return count;
378}
379
4d5f022f
CG
380static ssize_t set_subbuffer_size_show(struct device *dev,
381 struct device_attribute *attr,
57562a72
CG
382 char *buf)
383{
fcb7fad8 384 struct most_channel *c = to_channel(dev);
4d5f022f 385
57562a72
CG
386 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
387}
388
4d5f022f
CG
389static ssize_t set_subbuffer_size_store(struct device *dev,
390 struct device_attribute *attr,
57562a72
CG
391 const char *buf,
392 size_t count)
393{
fcb7fad8 394 struct most_channel *c = to_channel(dev);
57562a72
CG
395 int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
396
397 if (ret)
398 return ret;
399 return count;
400}
401
4d5f022f
CG
402static ssize_t set_packets_per_xact_show(struct device *dev,
403 struct device_attribute *attr,
57562a72
CG
404 char *buf)
405{
fcb7fad8 406 struct most_channel *c = to_channel(dev);
4d5f022f 407
57562a72
CG
408 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
409}
410
4d5f022f
CG
411static ssize_t set_packets_per_xact_store(struct device *dev,
412 struct device_attribute *attr,
57562a72
CG
413 const char *buf,
414 size_t count)
415{
fcb7fad8 416 struct most_channel *c = to_channel(dev);
57562a72
CG
417 int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
418
419 if (ret)
420 return ret;
421 return count;
422}
423
4d5f022f
CG
424#define DEV_ATTR(_name) (&dev_attr_##_name.attr)
425
426static DEVICE_ATTR_RO(available_directions);
427static DEVICE_ATTR_RO(available_datatypes);
428static DEVICE_ATTR_RO(number_of_packet_buffers);
429static DEVICE_ATTR_RO(number_of_stream_buffers);
430static DEVICE_ATTR_RO(size_of_stream_buffer);
431static DEVICE_ATTR_RO(size_of_packet_buffer);
432static DEVICE_ATTR_RO(channel_starving);
433static DEVICE_ATTR_RW(set_buffer_size);
434static DEVICE_ATTR_RW(set_number_of_buffers);
435static DEVICE_ATTR_RW(set_direction);
436static DEVICE_ATTR_RW(set_datatype);
437static DEVICE_ATTR_RW(set_subbuffer_size);
438static DEVICE_ATTR_RW(set_packets_per_xact);
439
440static struct attribute *channel_attrs[] = {
441 DEV_ATTR(available_directions),
442 DEV_ATTR(available_datatypes),
443 DEV_ATTR(number_of_packet_buffers),
444 DEV_ATTR(number_of_stream_buffers),
445 DEV_ATTR(size_of_stream_buffer),
446 DEV_ATTR(size_of_packet_buffer),
447 DEV_ATTR(channel_starving),
448 DEV_ATTR(set_buffer_size),
449 DEV_ATTR(set_number_of_buffers),
450 DEV_ATTR(set_direction),
451 DEV_ATTR(set_datatype),
452 DEV_ATTR(set_subbuffer_size),
453 DEV_ATTR(set_packets_per_xact),
57562a72
CG
454 NULL,
455};
456
4d5f022f
CG
457static struct attribute_group channel_attr_group = {
458 .attrs = channel_attrs,
57562a72
CG
459};
460
4d5f022f
CG
461static const struct attribute_group *channel_attr_groups[] = {
462 &channel_attr_group,
463 NULL,
464};
57562a72 465
4d5f022f
CG
466static ssize_t description_show(struct device *dev,
467 struct device_attribute *attr,
57562a72
CG
468 char *buf)
469{
4d5f022f
CG
470 struct most_interface *iface = to_most_interface(dev);
471
472 return snprintf(buf, PAGE_SIZE, "%s\n", iface->description);
57562a72
CG
473}
474
4d5f022f
CG
475static ssize_t interface_show(struct device *dev,
476 struct device_attribute *attr,
57562a72
CG
477 char *buf)
478{
4d5f022f
CG
479 struct most_interface *iface = to_most_interface(dev);
480
481 switch (iface->interface) {
57562a72
CG
482 case ITYPE_LOOPBACK:
483 return snprintf(buf, PAGE_SIZE, "loopback\n");
484 case ITYPE_I2C:
485 return snprintf(buf, PAGE_SIZE, "i2c\n");
486 case ITYPE_I2S:
487 return snprintf(buf, PAGE_SIZE, "i2s\n");
488 case ITYPE_TSI:
489 return snprintf(buf, PAGE_SIZE, "tsi\n");
490 case ITYPE_HBI:
491 return snprintf(buf, PAGE_SIZE, "hbi\n");
492 case ITYPE_MEDIALB_DIM:
493 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
494 case ITYPE_MEDIALB_DIM2:
495 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
496 case ITYPE_USB:
497 return snprintf(buf, PAGE_SIZE, "usb\n");
498 case ITYPE_PCIE:
499 return snprintf(buf, PAGE_SIZE, "pcie\n");
500 }
501 return snprintf(buf, PAGE_SIZE, "unknown\n");
502}
503
4d5f022f
CG
504static DEVICE_ATTR_RO(description);
505static DEVICE_ATTR_RO(interface);
57562a72 506
4d5f022f
CG
507static struct attribute *interface_attrs[] = {
508 DEV_ATTR(description),
509 DEV_ATTR(interface),
57562a72
CG
510 NULL,
511};
512
4d5f022f
CG
513static struct attribute_group interface_attr_group = {
514 .attrs = interface_attrs,
57562a72
CG
515};
516
4d5f022f
CG
517static const struct attribute_group *interface_attr_groups[] = {
518 &interface_attr_group,
519 NULL,
520};
57562a72 521
fdbdc0e6 522static struct core_component *match_component(char *name)
bdafb7e8 523{
5a5abf02 524 struct core_component *comp;
bdafb7e8 525
5a5abf02
CG
526 list_for_each_entry(comp, &mc.comp_list, list) {
527 if (!strcmp(comp->name, name))
528 return comp;
bdafb7e8
CG
529 }
530 return NULL;
531}
532
9136fccf 533int print_links(struct device *dev, void *data)
57562a72 534{
bc5f96a1 535 int offs = 0;
9136fccf
CG
536 char *buf = data;
537 struct most_channel *c;
538 struct most_interface *iface = to_most_interface(dev);
bc5f96a1 539
9136fccf 540 list_for_each_entry(c, &iface->p->channel_list, list) {
5a5abf02 541 if (c->pipe0.comp) {
9136fccf
CG
542 offs += snprintf(buf + offs,
543 PAGE_SIZE - offs,
544 "%s:%s:%s\n",
5a5abf02 545 c->pipe0.comp->name,
9136fccf
CG
546 dev_name(&iface->dev),
547 dev_name(&c->dev));
548 }
5a5abf02 549 if (c->pipe1.comp) {
9136fccf
CG
550 offs += snprintf(buf + offs,
551 PAGE_SIZE - offs,
552 "%s:%s:%s\n",
5a5abf02 553 c->pipe1.comp->name,
9136fccf
CG
554 dev_name(&iface->dev),
555 dev_name(&c->dev));
bc5f96a1
CG
556 }
557 }
9136fccf
CG
558 return 0;
559}
560
561static ssize_t links_show(struct device_driver *drv, char *buf)
562{
563 bus_for_each_dev(&mc.bus, NULL, buf, print_links);
564 return strlen(buf);
57562a72
CG
565}
566
fdbdc0e6 567static ssize_t components_show(struct device_driver *drv, char *buf)
bdafb7e8 568{
5a5abf02 569 struct core_component *comp;
bdafb7e8
CG
570 int offs = 0;
571
5a5abf02 572 list_for_each_entry(comp, &mc.comp_list, list) {
bdafb7e8 573 offs += snprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
5a5abf02 574 comp->name);
bdafb7e8
CG
575 }
576 return offs;
577}
57562a72 578/**
b7937dc4 579 * split_string - parses buf and extracts ':' separated substrings.
57562a72
CG
580 *
581 * @buf: complete string from attribute 'add_channel'
b7937dc4
CG
582 * @a: storage for 1st substring (=interface name)
583 * @b: storage for 2nd substring (=channel name)
584 * @c: storage for 3rd substring (=component name)
585 * @d: storage optional 4th substring (=user defined name)
57562a72
CG
586 *
587 * Examples:
588 *
b7937dc4
CG
589 * Input: "mdev0:ch6:cdev:my_channel\n" or
590 * "mdev0:ch6:cdev:my_channel"
57562a72 591 *
b7937dc4 592 * Output: *a -> "mdev0", *b -> "ch6", *c -> "cdev" *d -> "my_channel"
57562a72 593 *
b7937dc4
CG
594 * Input: "mdev1:ep81:cdev\n"
595 * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d -> ""
57562a72 596 *
5d7df3ae 597 * Input: "mdev1:ep81"
b7937dc4 598 * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d == NULL
57562a72 599 */
bdafb7e8 600static int split_string(char *buf, char **a, char **b, char **c, char **d)
57562a72
CG
601{
602 *a = strsep(&buf, ":");
603 if (!*a)
604 return -EIO;
605
606 *b = strsep(&buf, ":\n");
607 if (!*b)
608 return -EIO;
609
bdafb7e8
CG
610 *c = strsep(&buf, ":\n");
611 if (!*c)
612 return -EIO;
613
614 if (d)
615 *d = strsep(&buf, ":\n");
57562a72
CG
616
617 return 0;
618}
619
9136fccf
CG
620static int match_bus_dev(struct device *dev, void *data)
621{
622 char *mdev_name = data;
623
624 return !strcmp(dev_name(dev), mdev_name);
625}
626
57562a72 627/**
b7937dc4
CG
628 * get_channel - get pointer to channel
629 * @mdev: name of the device interface
630 * @mdev_ch: name of channel
57562a72 631 */
ec0c2f62 632static struct most_channel *get_channel(char *mdev, char *mdev_ch)
57562a72 633{
9136fccf
CG
634 struct device *dev = NULL;
635 struct most_interface *iface;
fcb7fad8 636 struct most_channel *c, *tmp;
57562a72 637
9136fccf
CG
638 dev = bus_find_device(&mc.bus, NULL, mdev, match_bus_dev);
639 if (!dev)
640 return NULL;
641 iface = to_most_interface(dev);
642 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
643 if (!strcmp(dev_name(&c->dev), mdev_ch))
644 return c;
57562a72 645 }
9136fccf 646 return NULL;
57562a72
CG
647}
648
fcb7fad8 649static
db09fe0d
CG
650inline int link_channel_to_component(struct most_channel *c,
651 struct core_component *comp,
652 char *comp_param)
e6e79b44
CG
653{
654 int ret;
5a5abf02 655 struct core_component **comp_ptr;
e6e79b44 656
5a5abf02
CG
657 if (!c->pipe0.comp)
658 comp_ptr = &c->pipe0.comp;
659 else if (!c->pipe1.comp)
660 comp_ptr = &c->pipe1.comp;
e6e79b44
CG
661 else
662 return -ENOSPC;
663
5a5abf02 664 *comp_ptr = comp;
7e0d3542 665 ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, comp_param);
e6e79b44 666 if (ret) {
5a5abf02 667 *comp_ptr = NULL;
e6e79b44
CG
668 return ret;
669 }
e6e79b44
CG
670 return 0;
671}
672
57562a72 673/**
b7937dc4
CG
674 * add_link_store - store function for add_link attribute
675 * @drv: device driver
57562a72
CG
676 * @buf: buffer
677 * @len: buffer length
678 *
679 * This parses the string given by buf and splits it into
b7937dc4
CG
680 * four substrings. Note: last substring is optional. In case a cdev
681 * component is loaded the optional 4th substring will make up the name of
57562a72
CG
682 * device node in the /dev directory. If omitted, the device node will
683 * inherit the channel's name within sysfs.
684 *
b7937dc4 685 * Searches for (device, channel) pair and probes the component
57562a72
CG
686 *
687 * Example:
b7937dc4
CG
688 * (1) echo "mdev0:ch6:cdev:my_rxchannel" >add_link
689 * (2) echo "mdev1:ep81:cdev" >add_link
57562a72
CG
690 *
691 * (1) would create the device node /dev/my_rxchannel
5d7df3ae 692 * (2) would create the device node /dev/mdev1-ep81
57562a72 693 */
bdafb7e8 694static ssize_t add_link_store(struct device_driver *drv,
57562a72
CG
695 const char *buf,
696 size_t len)
697{
fcb7fad8 698 struct most_channel *c;
5a5abf02 699 struct core_component *comp;
57562a72
CG
700 char buffer[STRING_SIZE];
701 char *mdev;
702 char *mdev_ch;
5a5abf02
CG
703 char *comp_name;
704 char *comp_param;
57562a72
CG
705 char devnod_buf[STRING_SIZE];
706 int ret;
3f78f611 707 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
57562a72
CG
708
709 strlcpy(buffer, buf, max_len);
5a5abf02 710 ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, &comp_param);
57562a72
CG
711 if (ret)
712 return ret;
fdbdc0e6 713 comp = match_component(comp_name);
5a5abf02 714 if (!comp_param || *comp_param == 0) {
1446ff09
CG
715 snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
716 mdev_ch);
5a5abf02 717 comp_param = devnod_buf;
57562a72
CG
718 }
719
ec0c2f62 720 c = get_channel(mdev, mdev_ch);
9136fccf 721 if (!c)
57562a72
CG
722 return -ENODEV;
723
db09fe0d 724 ret = link_channel_to_component(c, comp, comp_param);
e6e79b44 725 if (ret)
57562a72 726 return ret;
57562a72
CG
727 return len;
728}
729
57562a72 730/**
bf676f4c 731 * remove_link_store - store function for remove_link attribute
b7937dc4 732 * @drv: device driver
57562a72
CG
733 * @buf: buffer
734 * @len: buffer length
735 *
736 * Example:
eefb2a84 737 * echo "mdev0:ep81" >remove_link
57562a72 738 */
bdafb7e8 739static ssize_t remove_link_store(struct device_driver *drv,
57562a72
CG
740 const char *buf,
741 size_t len)
742{
fcb7fad8 743 struct most_channel *c;
5a5abf02 744 struct core_component *comp;
57562a72
CG
745 char buffer[STRING_SIZE];
746 char *mdev;
747 char *mdev_ch;
5a5abf02 748 char *comp_name;
57562a72 749 int ret;
3f78f611 750 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
57562a72
CG
751
752 strlcpy(buffer, buf, max_len);
5a5abf02 753 ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, NULL);
57562a72
CG
754 if (ret)
755 return ret;
fdbdc0e6 756 comp = match_component(comp_name);
ec0c2f62 757 c = get_channel(mdev, mdev_ch);
9136fccf 758 if (!c)
57562a72
CG
759 return -ENODEV;
760
5a5abf02 761 if (comp->disconnect_channel(c->iface, c->channel_id))
44fe5781 762 return -EIO;
5a5abf02
CG
763 if (c->pipe0.comp == comp)
764 c->pipe0.comp = NULL;
765 if (c->pipe1.comp == comp)
766 c->pipe1.comp = NULL;
57562a72
CG
767 return len;
768}
769
bdafb7e8
CG
770#define DRV_ATTR(_name) (&driver_attr_##_name.attr)
771
772static DRIVER_ATTR_RO(links);
fdbdc0e6 773static DRIVER_ATTR_RO(components);
bdafb7e8
CG
774static DRIVER_ATTR_WO(add_link);
775static DRIVER_ATTR_WO(remove_link);
57562a72 776
fdbdc0e6 777static struct attribute *mc_attrs[] = {
bdafb7e8 778 DRV_ATTR(links),
fdbdc0e6 779 DRV_ATTR(components),
bdafb7e8
CG
780 DRV_ATTR(add_link),
781 DRV_ATTR(remove_link),
57562a72
CG
782 NULL,
783};
784
fdbdc0e6
CG
785static struct attribute_group mc_attr_group = {
786 .attrs = mc_attrs,
57562a72
CG
787};
788
fdbdc0e6
CG
789static const struct attribute_group *mc_attr_groups[] = {
790 &mc_attr_group,
4d5f022f
CG
791 NULL,
792};
57562a72 793
921c80c5
CG
794int most_match(struct device *dev, struct device_driver *drv)
795{
796 if (!strcmp(dev_name(dev), "most"))
797 return 0;
798 else
799 return 1;
800}
801
57562a72
CG
802static inline void trash_mbo(struct mbo *mbo)
803{
804 unsigned long flags;
fcb7fad8 805 struct most_channel *c = mbo->context;
57562a72
CG
806
807 spin_lock_irqsave(&c->fifo_lock, flags);
808 list_add(&mbo->list, &c->trash_fifo);
809 spin_unlock_irqrestore(&c->fifo_lock, flags);
810}
811
fcb7fad8 812static bool hdm_mbo_ready(struct most_channel *c)
57562a72 813{
bf9503f1 814 bool empty;
57562a72 815
bf9503f1
CG
816 if (c->enqueue_halt)
817 return false;
818
819 spin_lock_irq(&c->fifo_lock);
820 empty = list_empty(&c->halt_fifo);
821 spin_unlock_irq(&c->fifo_lock);
822
823 return !empty;
57562a72
CG
824}
825
826static void nq_hdm_mbo(struct mbo *mbo)
827{
828 unsigned long flags;
fcb7fad8 829 struct most_channel *c = mbo->context;
57562a72
CG
830
831 spin_lock_irqsave(&c->fifo_lock, flags);
832 list_add_tail(&mbo->list, &c->halt_fifo);
833 spin_unlock_irqrestore(&c->fifo_lock, flags);
834 wake_up_interruptible(&c->hdm_fifo_wq);
835}
836
837static int hdm_enqueue_thread(void *data)
838{
fcb7fad8 839 struct most_channel *c = data;
57562a72 840 struct mbo *mbo;
bf9503f1 841 int ret;
57562a72
CG
842 typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
843
844 while (likely(!kthread_should_stop())) {
845 wait_event_interruptible(c->hdm_fifo_wq,
bf9503f1 846 hdm_mbo_ready(c) ||
623d8002 847 kthread_should_stop());
57562a72 848
bf9503f1
CG
849 mutex_lock(&c->nq_mutex);
850 spin_lock_irq(&c->fifo_lock);
851 if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
852 spin_unlock_irq(&c->fifo_lock);
853 mutex_unlock(&c->nq_mutex);
57562a72 854 continue;
bf9503f1
CG
855 }
856
857 mbo = list_pop_mbo(&c->halt_fifo);
858 spin_unlock_irq(&c->fifo_lock);
57562a72
CG
859
860 if (c->cfg.direction == MOST_CH_RX)
861 mbo->buffer_length = c->cfg.buffer_size;
862
bf9503f1
CG
863 ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
864 mutex_unlock(&c->nq_mutex);
865
866 if (unlikely(ret)) {
57562a72
CG
867 pr_err("hdm enqueue failed\n");
868 nq_hdm_mbo(mbo);
869 c->hdm_enqueue_task = NULL;
870 return 0;
871 }
872 }
873
874 return 0;
875}
876
fcb7fad8 877static int run_enqueue_thread(struct most_channel *c, int channel_id)
57562a72
CG
878{
879 struct task_struct *task =
246ed517
SB
880 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
881 channel_id);
57562a72
CG
882
883 if (IS_ERR(task))
884 return PTR_ERR(task);
885
886 c->hdm_enqueue_task = task;
887 return 0;
888}
889
890/**
891 * arm_mbo - recycle MBO for further usage
b7937dc4 892 * @mbo: most buffer
57562a72
CG
893 *
894 * This puts an MBO back to the list to have it ready for up coming
895 * tx transactions.
896 *
897 * In case the MBO belongs to a channel that recently has been
898 * poisoned, the MBO is scheduled to be trashed.
b7937dc4 899 * Calls the completion handler of an attached component.
57562a72
CG
900 */
901static void arm_mbo(struct mbo *mbo)
902{
903 unsigned long flags;
fcb7fad8 904 struct most_channel *c;
57562a72
CG
905
906 BUG_ON((!mbo) || (!mbo->context));
907 c = mbo->context;
908
909 if (c->is_poisoned) {
910 trash_mbo(mbo);
911 return;
912 }
913
914 spin_lock_irqsave(&c->fifo_lock, flags);
71457d48 915 ++*mbo->num_buffers_ptr;
57562a72
CG
916 list_add_tail(&mbo->list, &c->fifo);
917 spin_unlock_irqrestore(&c->fifo_lock, flags);
918
5a5abf02
CG
919 if (c->pipe0.refs && c->pipe0.comp->tx_completion)
920 c->pipe0.comp->tx_completion(c->iface, c->channel_id);
f13f6981 921
5a5abf02
CG
922 if (c->pipe1.refs && c->pipe1.comp->tx_completion)
923 c->pipe1.comp->tx_completion(c->iface, c->channel_id);
57562a72
CG
924}
925
926/**
927 * arm_mbo_chain - helper function that arms an MBO chain for the HDM
928 * @c: pointer to interface channel
929 * @dir: direction of the channel
930 * @compl: pointer to completion function
931 *
932 * This allocates buffer objects including the containing DMA coherent
933 * buffer and puts them in the fifo.
934 * Buffers of Rx channels are put in the kthread fifo, hence immediately
935 * submitted to the HDM.
936 *
937 * Returns the number of allocated and enqueued MBOs.
938 */
fcb7fad8 939static int arm_mbo_chain(struct most_channel *c, int dir,
c942ea7a 940 void (*compl)(struct mbo *))
57562a72
CG
941{
942 unsigned int i;
943 int retval;
944 struct mbo *mbo;
2ae07510 945 u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
57562a72
CG
946
947 atomic_set(&c->mbo_nq_level, 0);
948
949 for (i = 0; i < c->cfg.num_buffers; i++) {
950 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
951 if (!mbo) {
57562a72
CG
952 retval = i;
953 goto _exit;
954 }
955 mbo->context = c;
956 mbo->ifp = c->iface;
957 mbo->hdm_channel_id = c->channel_id;
958 mbo->virt_address = dma_alloc_coherent(NULL,
959 coherent_buf_size,
960 &mbo->bus_address,
961 GFP_KERNEL);
962 if (!mbo->virt_address) {
963 pr_info("WARN: No DMA coherent buffer.\n");
964 retval = i;
965 goto _error1;
966 }
967 mbo->complete = compl;
71457d48 968 mbo->num_buffers_ptr = &dummy_num_buffers;
57562a72
CG
969 if (dir == MOST_CH_RX) {
970 nq_hdm_mbo(mbo);
971 atomic_inc(&c->mbo_nq_level);
972 } else {
973 arm_mbo(mbo);
974 }
975 }
976 return i;
977
978_error1:
979 kfree(mbo);
980_exit:
981 return retval;
982}
983
984/**
985 * most_submit_mbo - submits an MBO to fifo
b7937dc4 986 * @mbo: most buffer
57562a72 987 */
a6f9d846 988void most_submit_mbo(struct mbo *mbo)
57562a72 989{
a6f9d846
CG
990 if (WARN_ONCE(!mbo || !mbo->context,
991 "bad mbo or missing channel reference\n"))
992 return;
57562a72
CG
993
994 nq_hdm_mbo(mbo);
57562a72
CG
995}
996EXPORT_SYMBOL_GPL(most_submit_mbo);
997
998/**
999 * most_write_completion - write completion handler
b7937dc4 1000 * @mbo: most buffer
57562a72
CG
1001 *
1002 * This recycles the MBO for further usage. In case the channel has been
1003 * poisoned, the MBO is scheduled to be trashed.
1004 */
1005static void most_write_completion(struct mbo *mbo)
1006{
fcb7fad8 1007 struct most_channel *c;
57562a72
CG
1008
1009 BUG_ON((!mbo) || (!mbo->context));
1010
1011 c = mbo->context;
1012 if (mbo->status == MBO_E_INVAL)
1013 pr_info("WARN: Tx MBO status: invalid\n");
ec58d2a8 1014 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
57562a72
CG
1015 trash_mbo(mbo);
1016 else
1017 arm_mbo(mbo);
1018}
1019
5a5abf02
CG
1020int channel_has_mbo(struct most_interface *iface, int id,
1021 struct core_component *comp)
aac997df 1022{
9136fccf 1023 struct most_channel *c = iface->p->channel[id];
aac997df
CG
1024 unsigned long flags;
1025 int empty;
1026
1027 if (unlikely(!c))
1028 return -EINVAL;
1029
f898f989 1030 if (c->pipe0.refs && c->pipe1.refs &&
5a5abf02
CG
1031 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
1032 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
cdc293d5
CG
1033 return 0;
1034
aac997df
CG
1035 spin_lock_irqsave(&c->fifo_lock, flags);
1036 empty = list_empty(&c->fifo);
1037 spin_unlock_irqrestore(&c->fifo_lock, flags);
1038 return !empty;
1039}
1040EXPORT_SYMBOL_GPL(channel_has_mbo);
1041
57562a72
CG
1042/**
1043 * most_get_mbo - get pointer to an MBO of pool
1044 * @iface: pointer to interface instance
1045 * @id: channel ID
b7937dc4 1046 * @comp: driver component
57562a72
CG
1047 *
1048 * This attempts to get a free buffer out of the channel fifo.
1049 * Returns a pointer to MBO on success or NULL otherwise.
1050 */
71457d48 1051struct mbo *most_get_mbo(struct most_interface *iface, int id,
5a5abf02 1052 struct core_component *comp)
57562a72
CG
1053{
1054 struct mbo *mbo;
fcb7fad8 1055 struct most_channel *c;
57562a72 1056 unsigned long flags;
71457d48 1057 int *num_buffers_ptr;
57562a72 1058
9136fccf 1059 c = iface->p->channel[id];
57562a72
CG
1060 if (unlikely(!c))
1061 return NULL;
71457d48 1062
f898f989 1063 if (c->pipe0.refs && c->pipe1.refs &&
5a5abf02
CG
1064 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
1065 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
71457d48
CG
1066 return NULL;
1067
5a5abf02 1068 if (comp == c->pipe0.comp)
f898f989 1069 num_buffers_ptr = &c->pipe0.num_buffers;
5a5abf02 1070 else if (comp == c->pipe1.comp)
f898f989 1071 num_buffers_ptr = &c->pipe1.num_buffers;
71457d48
CG
1072 else
1073 num_buffers_ptr = &dummy_num_buffers;
1074
57562a72
CG
1075 spin_lock_irqsave(&c->fifo_lock, flags);
1076 if (list_empty(&c->fifo)) {
1077 spin_unlock_irqrestore(&c->fifo_lock, flags);
1078 return NULL;
1079 }
1080 mbo = list_pop_mbo(&c->fifo);
71457d48 1081 --*num_buffers_ptr;
57562a72 1082 spin_unlock_irqrestore(&c->fifo_lock, flags);
71457d48
CG
1083
1084 mbo->num_buffers_ptr = num_buffers_ptr;
57562a72
CG
1085 mbo->buffer_length = c->cfg.buffer_size;
1086 return mbo;
1087}
1088EXPORT_SYMBOL_GPL(most_get_mbo);
1089
57562a72
CG
1090/**
1091 * most_put_mbo - return buffer to pool
b7937dc4 1092 * @mbo: most buffer
57562a72
CG
1093 */
1094void most_put_mbo(struct mbo *mbo)
1095{
fcb7fad8 1096 struct most_channel *c = mbo->context;
57562a72 1097
57562a72
CG
1098 if (c->cfg.direction == MOST_CH_TX) {
1099 arm_mbo(mbo);
1100 return;
1101 }
1102 nq_hdm_mbo(mbo);
1103 atomic_inc(&c->mbo_nq_level);
1104}
1105EXPORT_SYMBOL_GPL(most_put_mbo);
1106
1107/**
1108 * most_read_completion - read completion handler
b7937dc4 1109 * @mbo: most buffer
57562a72
CG
1110 *
1111 * This function is called by the HDM when data has been received from the
1112 * hardware and copied to the buffer of the MBO.
1113 *
1114 * In case the channel has been poisoned it puts the buffer in the trash queue.
b7937dc4 1115 * Otherwise, it passes the buffer to an component for further processing.
57562a72
CG
1116 */
1117static void most_read_completion(struct mbo *mbo)
1118{
fcb7fad8 1119 struct most_channel *c = mbo->context;
57562a72 1120
f13f6981
CG
1121 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1122 trash_mbo(mbo);
1123 return;
1124 }
57562a72
CG
1125
1126 if (mbo->status == MBO_E_INVAL) {
1127 nq_hdm_mbo(mbo);
1128 atomic_inc(&c->mbo_nq_level);
1129 return;
1130 }
1131
5a63e23a 1132 if (atomic_sub_and_test(1, &c->mbo_nq_level))
57562a72 1133 c->is_starving = 1;
57562a72 1134
5a5abf02
CG
1135 if (c->pipe0.refs && c->pipe0.comp->rx_completion &&
1136 c->pipe0.comp->rx_completion(mbo) == 0)
57562a72 1137 return;
f13f6981 1138
5a5abf02
CG
1139 if (c->pipe1.refs && c->pipe1.comp->rx_completion &&
1140 c->pipe1.comp->rx_completion(mbo) == 0)
57562a72 1141 return;
f13f6981
CG
1142
1143 most_put_mbo(mbo);
57562a72
CG
1144}
1145
1146/**
1147 * most_start_channel - prepares a channel for communication
1148 * @iface: pointer to interface instance
1149 * @id: channel ID
b7937dc4 1150 * @comp: driver component
57562a72
CG
1151 *
1152 * This prepares the channel for usage. Cross-checks whether the
1153 * channel's been properly configured.
1154 *
1155 * Returns 0 on success or error code otherwise.
1156 */
f13f6981 1157int most_start_channel(struct most_interface *iface, int id,
5a5abf02 1158 struct core_component *comp)
57562a72
CG
1159{
1160 int num_buffer;
1161 int ret;
9136fccf 1162 struct most_channel *c = iface->p->channel[id];
57562a72
CG
1163
1164 if (unlikely(!c))
1165 return -EINVAL;
1166
f13f6981 1167 mutex_lock(&c->start_mutex);
f898f989 1168 if (c->pipe0.refs + c->pipe1.refs > 0)
b7937dc4 1169 goto out; /* already started by another component */
57562a72
CG
1170
1171 if (!try_module_get(iface->mod)) {
1172 pr_info("failed to acquire HDM lock\n");
f13f6981 1173 mutex_unlock(&c->start_mutex);
57562a72
CG
1174 return -ENOLCK;
1175 }
57562a72
CG
1176
1177 c->cfg.extra_len = 0;
1178 if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1179 pr_info("channel configuration failed. Go check settings...\n");
1180 ret = -EINVAL;
1181 goto error;
1182 }
1183
1184 init_waitqueue_head(&c->hdm_fifo_wq);
1185
1186 if (c->cfg.direction == MOST_CH_RX)
1187 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1188 most_read_completion);
1189 else
1190 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1191 most_write_completion);
47af41b0 1192 if (unlikely(!num_buffer)) {
57562a72
CG
1193 pr_info("failed to allocate memory\n");
1194 ret = -ENOMEM;
1195 goto error;
1196 }
1197
1198 ret = run_enqueue_thread(c, id);
1199 if (ret)
1200 goto error;
1201
57562a72 1202 c->is_starving = 0;
f898f989
CG
1203 c->pipe0.num_buffers = c->cfg.num_buffers / 2;
1204 c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers;
57562a72 1205 atomic_set(&c->mbo_ref, num_buffer);
f13f6981
CG
1206
1207out:
5a5abf02 1208 if (comp == c->pipe0.comp)
f898f989 1209 c->pipe0.refs++;
5a5abf02 1210 if (comp == c->pipe1.comp)
f898f989 1211 c->pipe1.refs++;
f13f6981 1212 mutex_unlock(&c->start_mutex);
57562a72 1213 return 0;
f13f6981 1214
57562a72 1215error:
e23afff9 1216 module_put(iface->mod);
f13f6981 1217 mutex_unlock(&c->start_mutex);
57562a72
CG
1218 return ret;
1219}
1220EXPORT_SYMBOL_GPL(most_start_channel);
1221
1222/**
1223 * most_stop_channel - stops a running channel
1224 * @iface: pointer to interface instance
1225 * @id: channel ID
b7937dc4 1226 * @comp: driver component
57562a72 1227 */
f13f6981 1228int most_stop_channel(struct most_interface *iface, int id,
5a5abf02 1229 struct core_component *comp)
57562a72 1230{
fcb7fad8 1231 struct most_channel *c;
57562a72
CG
1232
1233 if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1234 pr_err("Bad interface or index out of range\n");
1235 return -EINVAL;
1236 }
9136fccf 1237 c = iface->p->channel[id];
57562a72
CG
1238 if (unlikely(!c))
1239 return -EINVAL;
1240
f13f6981 1241 mutex_lock(&c->start_mutex);
f898f989 1242 if (c->pipe0.refs + c->pipe1.refs >= 2)
f13f6981 1243 goto out;
57562a72 1244
57562a72
CG
1245 if (c->hdm_enqueue_task)
1246 kthread_stop(c->hdm_enqueue_task);
1247 c->hdm_enqueue_task = NULL;
57562a72 1248
9cda3007 1249 if (iface->mod)
57562a72 1250 module_put(iface->mod);
57562a72
CG
1251
1252 c->is_poisoned = true;
1253 if (c->iface->poison_channel(c->iface, c->channel_id)) {
1254 pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1255 c->iface->description);
f13f6981 1256 mutex_unlock(&c->start_mutex);
57562a72
CG
1257 return -EAGAIN;
1258 }
1259 flush_trash_fifo(c);
1260 flush_channel_fifos(c);
1261
1262#ifdef CMPL_INTERRUPTIBLE
1263 if (wait_for_completion_interruptible(&c->cleanup)) {
1264 pr_info("Interrupted while clean up ch %d\n", c->channel_id);
f13f6981 1265 mutex_unlock(&c->start_mutex);
57562a72
CG
1266 return -EINTR;
1267 }
1268#else
1269 wait_for_completion(&c->cleanup);
1270#endif
1271 c->is_poisoned = false;
f13f6981
CG
1272
1273out:
5a5abf02 1274 if (comp == c->pipe0.comp)
f898f989 1275 c->pipe0.refs--;
5a5abf02 1276 if (comp == c->pipe1.comp)
f898f989 1277 c->pipe1.refs--;
f13f6981 1278 mutex_unlock(&c->start_mutex);
57562a72
CG
1279 return 0;
1280}
1281EXPORT_SYMBOL_GPL(most_stop_channel);
1282
1283/**
b7937dc4
CG
1284 * most_register_component - registers a driver component with the core
1285 * @comp: driver component
57562a72 1286 */
5a5abf02 1287int most_register_component(struct core_component *comp)
57562a72 1288{
5a5abf02 1289 if (!comp) {
b7937dc4 1290 pr_err("Bad component\n");
57562a72
CG
1291 return -EINVAL;
1292 }
5a5abf02 1293 list_add_tail(&comp->list, &mc.comp_list);
b7937dc4 1294 pr_info("registered new core component %s\n", comp->name);
57562a72
CG
1295 return 0;
1296}
ed021a0f 1297EXPORT_SYMBOL_GPL(most_register_component);
57562a72 1298
9136fccf
CG
1299static int disconnect_channels(struct device *dev, void *data)
1300{
1301 struct most_interface *iface;
1302 struct most_channel *c, *tmp;
5a5abf02 1303 struct core_component *comp = data;
9136fccf
CG
1304
1305 iface = to_most_interface(dev);
1306 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
5a5abf02
CG
1307 if (c->pipe0.comp == comp || c->pipe1.comp == comp)
1308 comp->disconnect_channel(c->iface, c->channel_id);
1309 if (c->pipe0.comp == comp)
1310 c->pipe0.comp = NULL;
1311 if (c->pipe1.comp == comp)
1312 c->pipe1.comp = NULL;
9136fccf
CG
1313 }
1314 return 0;
1315}
1316
57562a72 1317/**
b7937dc4
CG
1318 * most_deregister_component - deregisters a driver component with the core
1319 * @comp: driver component
57562a72 1320 */
5a5abf02 1321int most_deregister_component(struct core_component *comp)
57562a72 1322{
5a5abf02 1323 if (!comp) {
b7937dc4 1324 pr_err("Bad component\n");
57562a72
CG
1325 return -EINVAL;
1326 }
1327
5a5abf02
CG
1328 bus_for_each_dev(&mc.bus, NULL, comp, disconnect_channels);
1329 list_del(&comp->list);
b7937dc4 1330 pr_info("deregistering component %s\n", comp->name);
57562a72
CG
1331 return 0;
1332}
ed021a0f 1333EXPORT_SYMBOL_GPL(most_deregister_component);
57562a72 1334
4d5f022f
CG
1335static void release_interface(struct device *dev)
1336{
1337 pr_info("releasing interface dev %s...\n", dev_name(dev));
1338}
1339
1340static void release_channel(struct device *dev)
1341{
1342 pr_info("releasing channel dev %s...\n", dev_name(dev));
1343}
1344
57562a72
CG
1345/**
1346 * most_register_interface - registers an interface with core
b7937dc4 1347 * @iface: device interface
57562a72
CG
1348 *
1349 * Allocates and initializes a new interface instance and all of its channels.
1350 * Returns a pointer to kobject or an error pointer.
1351 */
4d5f022f 1352int most_register_interface(struct most_interface *iface)
57562a72
CG
1353{
1354 unsigned int i;
1355 int id;
fcb7fad8 1356 struct most_channel *c;
57562a72
CG
1357
1358 if (!iface || !iface->enqueue || !iface->configure ||
1359 !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1360 pr_err("Bad interface or channel overflow\n");
4d5f022f 1361 return -EINVAL;
57562a72
CG
1362 }
1363
1364 id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1365 if (id < 0) {
1366 pr_info("Failed to alloc mdev ID\n");
4d5f022f 1367 return id;
57562a72 1368 }
57562a72 1369
9136fccf
CG
1370 iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
1371 if (!iface->p) {
57562a72 1372 pr_info("Failed to allocate interface instance\n");
b7382d44 1373 ida_simple_remove(&mdev_id, id);
4d5f022f 1374 return -ENOMEM;
57562a72
CG
1375 }
1376
9136fccf
CG
1377 INIT_LIST_HEAD(&iface->p->channel_list);
1378 iface->p->dev_id = id;
1379 snprintf(iface->p->name, STRING_SIZE, "mdev%d", id);
1380 iface->dev.init_name = iface->p->name;
14ae5f03
CG
1381 iface->dev.bus = &mc.bus;
1382 iface->dev.parent = &mc.dev;
4d5f022f
CG
1383 iface->dev.groups = interface_attr_groups;
1384 iface->dev.release = release_interface;
1385 if (device_register(&iface->dev)) {
1386 pr_err("registering iface->dev failed\n");
9136fccf 1387 kfree(iface->p);
4d5f022f
CG
1388 ida_simple_remove(&mdev_id, id);
1389 return -ENOMEM;
1390 }
57562a72
CG
1391
1392 for (i = 0; i < iface->num_channels; i++) {
1393 const char *name_suffix = iface->channel_vector[i].name_suffix;
1394
4d5f022f 1395 c = kzalloc(sizeof(*c), GFP_KERNEL);
57562a72
CG
1396 if (!c)
1397 goto free_instance;
845101be
CG
1398 if (!name_suffix)
1399 snprintf(c->name, STRING_SIZE, "ch%d", i);
1400 else
1401 snprintf(c->name, STRING_SIZE, "%s", name_suffix);
1402 c->dev.init_name = c->name;
4d5f022f
CG
1403 c->dev.parent = &iface->dev;
1404 c->dev.groups = channel_attr_groups;
1405 c->dev.release = release_channel;
1406 if (device_register(&c->dev)) {
1407 pr_err("registering c->dev failed\n");
9136fccf 1408 goto free_instance_nodev;
4d5f022f 1409 }
9136fccf 1410 iface->p->channel[i] = c;
57562a72
CG
1411 c->is_starving = 0;
1412 c->iface = iface;
57562a72
CG
1413 c->channel_id = i;
1414 c->keep_mbo = false;
1415 c->enqueue_halt = false;
1416 c->is_poisoned = false;
57562a72
CG
1417 c->cfg.direction = 0;
1418 c->cfg.data_type = 0;
1419 c->cfg.num_buffers = 0;
1420 c->cfg.buffer_size = 0;
1421 c->cfg.subbuffer_size = 0;
1422 c->cfg.packets_per_xact = 0;
1423 spin_lock_init(&c->fifo_lock);
1424 INIT_LIST_HEAD(&c->fifo);
1425 INIT_LIST_HEAD(&c->trash_fifo);
1426 INIT_LIST_HEAD(&c->halt_fifo);
1427 init_completion(&c->cleanup);
1428 atomic_set(&c->mbo_ref, 0);
f13f6981 1429 mutex_init(&c->start_mutex);
bf9503f1 1430 mutex_init(&c->nq_mutex);
9136fccf 1431 list_add_tail(&c->list, &iface->p->channel_list);
57562a72 1432 }
b7937dc4 1433 pr_info("registered new device mdev%d (%s)\n",
4d5f022f
CG
1434 id, iface->description);
1435 return 0;
57562a72 1436
9136fccf
CG
1437free_instance_nodev:
1438 kfree(c);
1439
57562a72 1440free_instance:
9136fccf
CG
1441 while (i > 0) {
1442 c = iface->p->channel[--i];
1443 device_unregister(&c->dev);
1444 kfree(c);
1445 }
1446 kfree(iface->p);
4d5f022f 1447 device_unregister(&iface->dev);
b7382d44 1448 ida_simple_remove(&mdev_id, id);
4d5f022f 1449 return -ENOMEM;
57562a72
CG
1450}
1451EXPORT_SYMBOL_GPL(most_register_interface);
1452
1453/**
1454 * most_deregister_interface - deregisters an interface with core
b7937dc4 1455 * @iface: device interface
57562a72
CG
1456 *
1457 * Before removing an interface instance from the list, all running
1458 * channels are stopped and poisoned.
1459 */
1460void most_deregister_interface(struct most_interface *iface)
1461{
4d5f022f 1462 int i;
fcb7fad8 1463 struct most_channel *c;
57562a72 1464
b7937dc4 1465 pr_info("deregistering device %s (%s)\n", dev_name(&iface->dev), iface->description);
4d5f022f 1466 for (i = 0; i < iface->num_channels; i++) {
9136fccf 1467 c = iface->p->channel[i];
5a5abf02
CG
1468 if (c->pipe0.comp)
1469 c->pipe0.comp->disconnect_channel(c->iface,
a0fceb1f 1470 c->channel_id);
5a5abf02
CG
1471 if (c->pipe1.comp)
1472 c->pipe1.comp->disconnect_channel(c->iface,
a0fceb1f 1473 c->channel_id);
5a5abf02
CG
1474 c->pipe0.comp = NULL;
1475 c->pipe1.comp = NULL;
4d5f022f
CG
1476 list_del(&c->list);
1477 device_unregister(&c->dev);
1478 kfree(c);
a0fceb1f
CG
1479 }
1480
9136fccf
CG
1481 ida_simple_remove(&mdev_id, iface->p->dev_id);
1482 kfree(iface->p);
4d5f022f 1483 device_unregister(&iface->dev);
57562a72
CG
1484}
1485EXPORT_SYMBOL_GPL(most_deregister_interface);
1486
1487/**
1488 * most_stop_enqueue - prevents core from enqueueing MBOs
1489 * @iface: pointer to interface
1490 * @id: channel id
1491 *
1492 * This is called by an HDM that _cannot_ attend to its duties and
1493 * is imminent to get run over by the core. The core is not going to
1494 * enqueue any further packets unless the flagging HDM calls
1495 * most_resume enqueue().
1496 */
1497void most_stop_enqueue(struct most_interface *iface, int id)
1498{
9136fccf 1499 struct most_channel *c = iface->p->channel[id];
57562a72 1500
bf9503f1
CG
1501 if (!c)
1502 return;
1503
1504 mutex_lock(&c->nq_mutex);
1505 c->enqueue_halt = true;
1506 mutex_unlock(&c->nq_mutex);
57562a72
CG
1507}
1508EXPORT_SYMBOL_GPL(most_stop_enqueue);
1509
1510/**
1511 * most_resume_enqueue - allow core to enqueue MBOs again
1512 * @iface: pointer to interface
1513 * @id: channel id
1514 *
1515 * This clears the enqueue halt flag and enqueues all MBOs currently
1516 * sitting in the wait fifo.
1517 */
1518void most_resume_enqueue(struct most_interface *iface, int id)
1519{
9136fccf 1520 struct most_channel *c = iface->p->channel[id];
57562a72 1521
bf9503f1 1522 if (!c)
57562a72 1523 return;
bf9503f1
CG
1524
1525 mutex_lock(&c->nq_mutex);
57562a72 1526 c->enqueue_halt = false;
bf9503f1 1527 mutex_unlock(&c->nq_mutex);
57562a72
CG
1528
1529 wake_up_interruptible(&c->hdm_fifo_wq);
1530}
1531EXPORT_SYMBOL_GPL(most_resume_enqueue);
1532
4d5f022f
CG
1533static void release_most_sub(struct device *dev)
1534{
1535 pr_info("releasing most_subsystem\n");
1536}
1537
57562a72
CG
1538static int __init most_init(void)
1539{
cc4188b6
SM
1540 int err;
1541
57562a72 1542 pr_info("init()\n");
81ce26b7 1543 INIT_LIST_HEAD(&mc.comp_list);
57562a72
CG
1544 ida_init(&mdev_id);
1545
14ae5f03
CG
1546 mc.bus.name = "most",
1547 mc.bus.match = most_match,
1548 mc.drv.name = "most_core",
1549 mc.drv.bus = &mc.bus,
fdbdc0e6 1550 mc.drv.groups = mc_attr_groups;
14ae5f03
CG
1551
1552 err = bus_register(&mc.bus);
cc4188b6 1553 if (err) {
57562a72 1554 pr_info("Cannot register most bus\n");
cc4188b6 1555 return err;
57562a72 1556 }
14ae5f03
CG
1557 mc.class = class_create(THIS_MODULE, "most");
1558 if (IS_ERR(mc.class)) {
57562a72 1559 pr_info("No udev support.\n");
14ae5f03 1560 err = PTR_ERR(mc.class);
57562a72
CG
1561 goto exit_bus;
1562 }
cc4188b6 1563
14ae5f03 1564 err = driver_register(&mc.drv);
cc4188b6 1565 if (err) {
57562a72
CG
1566 pr_info("Cannot register core driver\n");
1567 goto exit_class;
1568 }
14ae5f03
CG
1569 mc.dev.init_name = "most_bus";
1570 mc.dev.release = release_most_sub;
1571 if (device_register(&mc.dev)) {
cc4188b6 1572 err = -ENOMEM;
4d5f022f 1573 goto exit_driver;
cc4188b6 1574 }
57562a72
CG
1575
1576 return 0;
1577
57562a72 1578exit_driver:
14ae5f03 1579 driver_unregister(&mc.drv);
57562a72 1580exit_class:
14ae5f03 1581 class_destroy(mc.class);
57562a72 1582exit_bus:
14ae5f03 1583 bus_unregister(&mc.bus);
cc4188b6 1584 return err;
57562a72
CG
1585}
1586
1587static void __exit most_exit(void)
1588{
57562a72 1589 pr_info("exit core module\n");
14ae5f03
CG
1590 device_unregister(&mc.dev);
1591 driver_unregister(&mc.drv);
1592 class_destroy(mc.class);
1593 bus_unregister(&mc.bus);
57562a72
CG
1594 ida_destroy(&mdev_id);
1595}
1596
1597module_init(most_init);
1598module_exit(most_exit);
1599MODULE_LICENSE("GPL");
1600MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1601MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");