staging: most: return error value
[linux-block.git] / drivers / staging / most / mostcore / core.c
CommitLineData
57562a72
CG
1/*
2 * core.c - Implementation of core module of MOST Linux driver stack
3 *
4 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * This file is licensed under GPLv2.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/module.h>
16#include <linux/fs.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/device.h>
20#include <linux/list.h>
21#include <linux/poll.h>
22#include <linux/wait.h>
23#include <linux/kobject.h>
24#include <linux/mutex.h>
25#include <linux/completion.h>
26#include <linux/sysfs.h>
27#include <linux/kthread.h>
28#include <linux/dma-mapping.h>
29#include <linux/idr.h>
30#include "mostcore.h"
31
32#define MAX_CHANNELS 64
33#define STRING_SIZE 80
34
35static struct class *most_class;
36static struct device *class_glue_dir;
37static struct ida mdev_id;
71457d48 38static int dummy_num_buffers;
57562a72 39
ccfbaee0
CG
40struct most_c_aim_obj {
41 struct most_aim *ptr;
42 int refs;
43 int num_buffers;
44};
45
57562a72
CG
46struct most_c_obj {
47 struct kobject kobj;
48 struct completion cleanup;
49 atomic_t mbo_ref;
50 atomic_t mbo_nq_level;
2aa9b96f 51 u16 channel_id;
57562a72 52 bool is_poisoned;
f13f6981 53 struct mutex start_mutex;
57562a72
CG
54 int is_starving;
55 struct most_interface *iface;
56 struct most_inst_obj *inst;
57 struct most_channel_config cfg;
58 bool keep_mbo;
59 bool enqueue_halt;
60 struct list_head fifo;
61 spinlock_t fifo_lock;
62 struct list_head halt_fifo;
63 struct list_head list;
ccfbaee0
CG
64 struct most_c_aim_obj aim0;
65 struct most_c_aim_obj aim1;
57562a72
CG
66 struct list_head trash_fifo;
67 struct task_struct *hdm_enqueue_task;
57562a72
CG
68 wait_queue_head_t hdm_fifo_wq;
69};
9cbe5aa6 70
57562a72
CG
71#define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
72
73struct most_inst_obj {
74 int dev_id;
57562a72
CG
75 struct most_interface *iface;
76 struct list_head channel_list;
77 struct most_c_obj *channel[MAX_CHANNELS];
78 struct kobject kobj;
79 struct list_head list;
80};
9cbe5aa6 81
e7f2b70f
HPGE
82static const struct {
83 int most_ch_data_type;
84 char *name;
85} ch_data_type[] = { { MOST_CH_CONTROL, "control\n" },
86 { MOST_CH_ASYNC, "async\n" },
87 { MOST_CH_SYNC, "sync\n" },
88 { MOST_CH_ISOC_AVP, "isoc_avp\n"} };
89
57562a72
CG
90#define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
91
92/**
93 * list_pop_mbo - retrieves the first MBO of the list and removes it
94 * @ptr: the list head to grab the MBO from.
95 */
96#define list_pop_mbo(ptr) \
97({ \
98 struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
99 list_del(&_mbo->list); \
100 _mbo; \
101})
102
57562a72
CG
103/* ___ ___
104 * ___C H A N N E L___
105 */
106
107/**
108 * struct most_c_attr - to access the attributes of a channel object
109 * @attr: attributes of a channel
110 * @show: pointer to the show function
111 * @store: pointer to the store function
112 */
113struct most_c_attr {
114 struct attribute attr;
115 ssize_t (*show)(struct most_c_obj *d,
116 struct most_c_attr *attr,
117 char *buf);
118 ssize_t (*store)(struct most_c_obj *d,
119 struct most_c_attr *attr,
120 const char *buf,
121 size_t count);
122};
9cbe5aa6 123
57562a72
CG
124#define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
125
126#define MOST_CHNL_ATTR(_name, _mode, _show, _store) \
127 struct most_c_attr most_chnl_attr_##_name = \
128 __ATTR(_name, _mode, _show, _store)
129
130/**
131 * channel_attr_show - show function of channel object
132 * @kobj: pointer to its kobject
133 * @attr: pointer to its attributes
134 * @buf: buffer
135 */
136static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
137 char *buf)
138{
139 struct most_c_attr *channel_attr = to_channel_attr(attr);
140 struct most_c_obj *c_obj = to_c_obj(kobj);
141
142 if (!channel_attr->show)
143 return -EIO;
144
145 return channel_attr->show(c_obj, channel_attr, buf);
146}
147
148/**
149 * channel_attr_store - store function of channel object
150 * @kobj: pointer to its kobject
151 * @attr: pointer to its attributes
152 * @buf: buffer
153 * @len: length of buffer
154 */
155static ssize_t channel_attr_store(struct kobject *kobj,
156 struct attribute *attr,
157 const char *buf,
158 size_t len)
159{
160 struct most_c_attr *channel_attr = to_channel_attr(attr);
161 struct most_c_obj *c_obj = to_c_obj(kobj);
162
163 if (!channel_attr->store)
164 return -EIO;
165 return channel_attr->store(c_obj, channel_attr, buf, len);
166}
167
168static const struct sysfs_ops most_channel_sysfs_ops = {
169 .show = channel_attr_show,
170 .store = channel_attr_store,
171};
172
173/**
174 * most_free_mbo_coherent - free an MBO and its coherent buffer
175 * @mbo: buffer to be released
176 *
177 */
178static void most_free_mbo_coherent(struct mbo *mbo)
179{
180 struct most_c_obj *c = mbo->context;
181 u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
182
183 dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
184 mbo->bus_address);
185 kfree(mbo);
186 if (atomic_sub_and_test(1, &c->mbo_ref))
187 complete(&c->cleanup);
188}
189
190/**
191 * flush_channel_fifos - clear the channel fifos
192 * @c: pointer to channel object
193 */
c942ea7a 194static void flush_channel_fifos(struct most_c_obj *c)
57562a72
CG
195{
196 unsigned long flags, hf_flags;
197 struct mbo *mbo, *tmp;
198
199 if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
200 return;
201
202 spin_lock_irqsave(&c->fifo_lock, flags);
203 list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
204 list_del(&mbo->list);
205 spin_unlock_irqrestore(&c->fifo_lock, flags);
0834be6c 206 most_free_mbo_coherent(mbo);
57562a72
CG
207 spin_lock_irqsave(&c->fifo_lock, flags);
208 }
209 spin_unlock_irqrestore(&c->fifo_lock, flags);
210
211 spin_lock_irqsave(&c->fifo_lock, hf_flags);
212 list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
213 list_del(&mbo->list);
214 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
0834be6c 215 most_free_mbo_coherent(mbo);
57562a72
CG
216 spin_lock_irqsave(&c->fifo_lock, hf_flags);
217 }
218 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
219
220 if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
221 pr_info("WARN: fifo | trash fifo not empty\n");
222}
223
224/**
225 * flush_trash_fifo - clear the trash fifo
226 * @c: pointer to channel object
227 */
228static int flush_trash_fifo(struct most_c_obj *c)
229{
230 struct mbo *mbo, *tmp;
231 unsigned long flags;
232
233 spin_lock_irqsave(&c->fifo_lock, flags);
234 list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
235 list_del(&mbo->list);
236 spin_unlock_irqrestore(&c->fifo_lock, flags);
237 most_free_mbo_coherent(mbo);
238 spin_lock_irqsave(&c->fifo_lock, flags);
239 }
240 spin_unlock_irqrestore(&c->fifo_lock, flags);
241 return 0;
242}
243
244/**
245 * most_channel_release - release function of channel object
246 * @kobj: pointer to channel's kobject
247 */
248static void most_channel_release(struct kobject *kobj)
249{
250 struct most_c_obj *c = to_c_obj(kobj);
251
252 kfree(c);
253}
254
255static ssize_t show_available_directions(struct most_c_obj *c,
edaa1e33
CG
256 struct most_c_attr *attr,
257 char *buf)
57562a72
CG
258{
259 unsigned int i = c->channel_id;
260
261 strcpy(buf, "");
262 if (c->iface->channel_vector[i].direction & MOST_CH_RX)
263 strcat(buf, "dir_rx ");
264 if (c->iface->channel_vector[i].direction & MOST_CH_TX)
265 strcat(buf, "dir_tx ");
266 strcat(buf, "\n");
267 return strlen(buf) + 1;
268}
269
270static ssize_t show_available_datatypes(struct most_c_obj *c,
271 struct most_c_attr *attr,
272 char *buf)
273{
274 unsigned int i = c->channel_id;
275
276 strcpy(buf, "");
277 if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
278 strcat(buf, "control ");
279 if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
280 strcat(buf, "async ");
281 if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
282 strcat(buf, "sync ");
283 if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC_AVP)
284 strcat(buf, "isoc_avp ");
285 strcat(buf, "\n");
286 return strlen(buf) + 1;
287}
288
289static
290ssize_t show_number_of_packet_buffers(struct most_c_obj *c,
291 struct most_c_attr *attr,
292 char *buf)
293{
294 unsigned int i = c->channel_id;
295
296 return snprintf(buf, PAGE_SIZE, "%d\n",
297 c->iface->channel_vector[i].num_buffers_packet);
298}
299
300static
301ssize_t show_number_of_stream_buffers(struct most_c_obj *c,
302 struct most_c_attr *attr,
303 char *buf)
304{
305 unsigned int i = c->channel_id;
306
307 return snprintf(buf, PAGE_SIZE, "%d\n",
308 c->iface->channel_vector[i].num_buffers_streaming);
309}
310
311static
312ssize_t show_size_of_packet_buffer(struct most_c_obj *c,
313 struct most_c_attr *attr,
314 char *buf)
315{
316 unsigned int i = c->channel_id;
317
318 return snprintf(buf, PAGE_SIZE, "%d\n",
319 c->iface->channel_vector[i].buffer_size_packet);
320}
321
322static
323ssize_t show_size_of_stream_buffer(struct most_c_obj *c,
324 struct most_c_attr *attr,
325 char *buf)
326{
327 unsigned int i = c->channel_id;
328
329 return snprintf(buf, PAGE_SIZE, "%d\n",
330 c->iface->channel_vector[i].buffer_size_streaming);
331}
332
333static ssize_t show_channel_starving(struct most_c_obj *c,
334 struct most_c_attr *attr,
335 char *buf)
336{
337 return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
338}
339
57562a72 340#define create_show_channel_attribute(val) \
add04a98 341 static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL)
57562a72
CG
342
343create_show_channel_attribute(available_directions);
344create_show_channel_attribute(available_datatypes);
345create_show_channel_attribute(number_of_packet_buffers);
346create_show_channel_attribute(number_of_stream_buffers);
347create_show_channel_attribute(size_of_stream_buffer);
348create_show_channel_attribute(size_of_packet_buffer);
349create_show_channel_attribute(channel_starving);
350
351static ssize_t show_set_number_of_buffers(struct most_c_obj *c,
352 struct most_c_attr *attr,
353 char *buf)
354{
355 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
356}
357
358static ssize_t store_set_number_of_buffers(struct most_c_obj *c,
359 struct most_c_attr *attr,
360 const char *buf,
361 size_t count)
362{
363 int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
364
365 if (ret)
366 return ret;
367 return count;
368}
369
370static ssize_t show_set_buffer_size(struct most_c_obj *c,
371 struct most_c_attr *attr,
372 char *buf)
373{
374 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
375}
376
377static ssize_t store_set_buffer_size(struct most_c_obj *c,
378 struct most_c_attr *attr,
379 const char *buf,
380 size_t count)
381{
382 int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
383
384 if (ret)
385 return ret;
386 return count;
387}
388
389static ssize_t show_set_direction(struct most_c_obj *c,
390 struct most_c_attr *attr,
391 char *buf)
392{
393 if (c->cfg.direction & MOST_CH_TX)
394 return snprintf(buf, PAGE_SIZE, "dir_tx\n");
395 else if (c->cfg.direction & MOST_CH_RX)
396 return snprintf(buf, PAGE_SIZE, "dir_rx\n");
397 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
398}
399
400static ssize_t store_set_direction(struct most_c_obj *c,
401 struct most_c_attr *attr,
402 const char *buf,
403 size_t count)
404{
9deba73d 405 if (!strcmp(buf, "dir_rx\n")) {
57562a72 406 c->cfg.direction = MOST_CH_RX;
9deba73d 407 } else if (!strcmp(buf, "dir_tx\n")) {
57562a72 408 c->cfg.direction = MOST_CH_TX;
9deba73d 409 } else {
57562a72
CG
410 pr_info("WARN: invalid attribute settings\n");
411 return -EINVAL;
412 }
413 return count;
414}
415
416static ssize_t show_set_datatype(struct most_c_obj *c,
417 struct most_c_attr *attr,
418 char *buf)
419{
e7f2b70f
HPGE
420 int i;
421
422 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
423 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
424 return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
425 }
57562a72
CG
426 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
427}
428
429static ssize_t store_set_datatype(struct most_c_obj *c,
430 struct most_c_attr *attr,
431 const char *buf,
432 size_t count)
433{
e7f2b70f
HPGE
434 int i;
435
436 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
437 if (!strcmp(buf, ch_data_type[i].name)) {
438 c->cfg.data_type = ch_data_type[i].most_ch_data_type;
439 break;
440 }
441 }
442
443 if (i == ARRAY_SIZE(ch_data_type)) {
57562a72
CG
444 pr_info("WARN: invalid attribute settings\n");
445 return -EINVAL;
446 }
447 return count;
448}
449
450static ssize_t show_set_subbuffer_size(struct most_c_obj *c,
451 struct most_c_attr *attr,
452 char *buf)
453{
454 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
455}
456
457static ssize_t store_set_subbuffer_size(struct most_c_obj *c,
458 struct most_c_attr *attr,
459 const char *buf,
460 size_t count)
461{
462 int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
463
464 if (ret)
465 return ret;
466 return count;
467}
468
469static ssize_t show_set_packets_per_xact(struct most_c_obj *c,
470 struct most_c_attr *attr,
471 char *buf)
472{
473 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
474}
475
476static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
477 struct most_c_attr *attr,
478 const char *buf,
479 size_t count)
480{
481 int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
482
483 if (ret)
484 return ret;
485 return count;
486}
487
488#define create_channel_attribute(value) \
489 static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \
490 show_##value, \
491 store_##value)
492
493create_channel_attribute(set_buffer_size);
494create_channel_attribute(set_number_of_buffers);
495create_channel_attribute(set_direction);
496create_channel_attribute(set_datatype);
497create_channel_attribute(set_subbuffer_size);
498create_channel_attribute(set_packets_per_xact);
499
57562a72
CG
500/**
501 * most_channel_def_attrs - array of default attributes of channel object
502 */
503static struct attribute *most_channel_def_attrs[] = {
504 &most_chnl_attr_available_directions.attr,
505 &most_chnl_attr_available_datatypes.attr,
506 &most_chnl_attr_number_of_packet_buffers.attr,
507 &most_chnl_attr_number_of_stream_buffers.attr,
508 &most_chnl_attr_size_of_packet_buffer.attr,
509 &most_chnl_attr_size_of_stream_buffer.attr,
510 &most_chnl_attr_set_number_of_buffers.attr,
511 &most_chnl_attr_set_buffer_size.attr,
512 &most_chnl_attr_set_direction.attr,
513 &most_chnl_attr_set_datatype.attr,
514 &most_chnl_attr_set_subbuffer_size.attr,
515 &most_chnl_attr_set_packets_per_xact.attr,
516 &most_chnl_attr_channel_starving.attr,
517 NULL,
518};
519
520static struct kobj_type most_channel_ktype = {
521 .sysfs_ops = &most_channel_sysfs_ops,
522 .release = most_channel_release,
523 .default_attrs = most_channel_def_attrs,
524};
525
526static struct kset *most_channel_kset;
527
528/**
529 * create_most_c_obj - allocates a channel object
530 * @name: name of the channel object
531 * @parent: parent kobject
532 *
533 * This create a channel object and registers it with sysfs.
534 * Returns a pointer to the object or NULL when something went wrong.
535 */
536static struct most_c_obj *
537create_most_c_obj(const char *name, struct kobject *parent)
538{
539 struct most_c_obj *c;
540 int retval;
541
542 c = kzalloc(sizeof(*c), GFP_KERNEL);
543 if (!c)
544 return NULL;
545 c->kobj.kset = most_channel_kset;
546 retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
547 "%s", name);
548 if (retval) {
549 kobject_put(&c->kobj);
550 return NULL;
551 }
552 kobject_uevent(&c->kobj, KOBJ_ADD);
553 return c;
554}
555
57562a72
CG
556/* ___ ___
557 * ___I N S T A N C E___
558 */
559#define MOST_INST_ATTR(_name, _mode, _show, _store) \
560 struct most_inst_attribute most_inst_attr_##_name = \
561 __ATTR(_name, _mode, _show, _store)
562
563static struct list_head instance_list;
564
565/**
566 * struct most_inst_attribute - to access the attributes of instance object
567 * @attr: attributes of an instance
568 * @show: pointer to the show function
569 * @store: pointer to the store function
570 */
571struct most_inst_attribute {
572 struct attribute attr;
573 ssize_t (*show)(struct most_inst_obj *d,
574 struct most_inst_attribute *attr,
575 char *buf);
576 ssize_t (*store)(struct most_inst_obj *d,
577 struct most_inst_attribute *attr,
578 const char *buf,
579 size_t count);
580};
9cbe5aa6 581
57562a72
CG
582#define to_instance_attr(a) \
583 container_of(a, struct most_inst_attribute, attr)
584
585/**
586 * instance_attr_show - show function for an instance object
587 * @kobj: pointer to kobject
588 * @attr: pointer to attribute struct
589 * @buf: buffer
590 */
591static ssize_t instance_attr_show(struct kobject *kobj,
592 struct attribute *attr,
593 char *buf)
594{
595 struct most_inst_attribute *instance_attr;
596 struct most_inst_obj *instance_obj;
597
598 instance_attr = to_instance_attr(attr);
599 instance_obj = to_inst_obj(kobj);
600
601 if (!instance_attr->show)
602 return -EIO;
603
604 return instance_attr->show(instance_obj, instance_attr, buf);
605}
606
607/**
608 * instance_attr_store - store function for an instance object
609 * @kobj: pointer to kobject
610 * @attr: pointer to attribute struct
611 * @buf: buffer
612 * @len: length of buffer
613 */
614static ssize_t instance_attr_store(struct kobject *kobj,
615 struct attribute *attr,
616 const char *buf,
617 size_t len)
618{
619 struct most_inst_attribute *instance_attr;
620 struct most_inst_obj *instance_obj;
621
622 instance_attr = to_instance_attr(attr);
623 instance_obj = to_inst_obj(kobj);
624
625 if (!instance_attr->store)
626 return -EIO;
627
628 return instance_attr->store(instance_obj, instance_attr, buf, len);
629}
630
631static const struct sysfs_ops most_inst_sysfs_ops = {
632 .show = instance_attr_show,
633 .store = instance_attr_store,
634};
635
636/**
637 * most_inst_release - release function for instance object
638 * @kobj: pointer to instance's kobject
639 *
640 * This frees the allocated memory for the instance object
641 */
642static void most_inst_release(struct kobject *kobj)
643{
644 struct most_inst_obj *inst = to_inst_obj(kobj);
645
646 kfree(inst);
647}
648
649static ssize_t show_description(struct most_inst_obj *instance_obj,
650 struct most_inst_attribute *attr,
651 char *buf)
652{
653 return snprintf(buf, PAGE_SIZE, "%s\n",
654 instance_obj->iface->description);
655}
656
657static ssize_t show_interface(struct most_inst_obj *instance_obj,
658 struct most_inst_attribute *attr,
659 char *buf)
660{
661 switch (instance_obj->iface->interface) {
662 case ITYPE_LOOPBACK:
663 return snprintf(buf, PAGE_SIZE, "loopback\n");
664 case ITYPE_I2C:
665 return snprintf(buf, PAGE_SIZE, "i2c\n");
666 case ITYPE_I2S:
667 return snprintf(buf, PAGE_SIZE, "i2s\n");
668 case ITYPE_TSI:
669 return snprintf(buf, PAGE_SIZE, "tsi\n");
670 case ITYPE_HBI:
671 return snprintf(buf, PAGE_SIZE, "hbi\n");
672 case ITYPE_MEDIALB_DIM:
673 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
674 case ITYPE_MEDIALB_DIM2:
675 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
676 case ITYPE_USB:
677 return snprintf(buf, PAGE_SIZE, "usb\n");
678 case ITYPE_PCIE:
679 return snprintf(buf, PAGE_SIZE, "pcie\n");
680 }
681 return snprintf(buf, PAGE_SIZE, "unknown\n");
682}
683
684#define create_inst_attribute(value) \
685 static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL)
686
687create_inst_attribute(description);
688create_inst_attribute(interface);
689
690static struct attribute *most_inst_def_attrs[] = {
691 &most_inst_attr_description.attr,
692 &most_inst_attr_interface.attr,
693 NULL,
694};
695
696static struct kobj_type most_inst_ktype = {
697 .sysfs_ops = &most_inst_sysfs_ops,
698 .release = most_inst_release,
699 .default_attrs = most_inst_def_attrs,
700};
701
702static struct kset *most_inst_kset;
703
57562a72
CG
704/**
705 * create_most_inst_obj - creates an instance object
706 * @name: name of the object to be created
707 *
708 * This allocates memory for an instance structure, assigns the proper kset
709 * and registers it with sysfs.
710 *
711 * Returns a pointer to the instance object or NULL when something went wrong.
712 */
713static struct most_inst_obj *create_most_inst_obj(const char *name)
714{
715 struct most_inst_obj *inst;
716 int retval;
717
718 inst = kzalloc(sizeof(*inst), GFP_KERNEL);
719 if (!inst)
720 return NULL;
721 inst->kobj.kset = most_inst_kset;
722 retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
723 "%s", name);
724 if (retval) {
725 kobject_put(&inst->kobj);
726 return NULL;
727 }
728 kobject_uevent(&inst->kobj, KOBJ_ADD);
729 return inst;
730}
731
732/**
733 * destroy_most_inst_obj - MOST instance release function
734 * @inst: pointer to the instance object
735 *
736 * This decrements the reference counter of the instance object.
737 * If the reference count turns zero, its release function is called
738 */
739static void destroy_most_inst_obj(struct most_inst_obj *inst)
740{
741 struct most_c_obj *c, *tmp;
742
57562a72 743 list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
9ce039a0
CG
744 flush_trash_fifo(c);
745 flush_channel_fifos(c);
9ce039a0 746 kobject_put(&c->kobj);
57562a72
CG
747 }
748 kobject_put(&inst->kobj);
749}
750
751/* ___ ___
752 * ___A I M___
753 */
754struct most_aim_obj {
755 struct kobject kobj;
756 struct list_head list;
757 struct most_aim *driver;
758 char add_link[STRING_SIZE];
759 char remove_link[STRING_SIZE];
760};
9cbe5aa6 761
57562a72
CG
762#define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
763
764static struct list_head aim_list;
765
57562a72
CG
766/**
767 * struct most_aim_attribute - to access the attributes of AIM object
768 * @attr: attributes of an AIM
769 * @show: pointer to the show function
770 * @store: pointer to the store function
771 */
772struct most_aim_attribute {
773 struct attribute attr;
774 ssize_t (*show)(struct most_aim_obj *d,
775 struct most_aim_attribute *attr,
776 char *buf);
777 ssize_t (*store)(struct most_aim_obj *d,
778 struct most_aim_attribute *attr,
779 const char *buf,
780 size_t count);
781};
9cbe5aa6 782
57562a72
CG
783#define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
784
785/**
786 * aim_attr_show - show function of an AIM object
787 * @kobj: pointer to kobject
788 * @attr: pointer to attribute struct
789 * @buf: buffer
790 */
791static ssize_t aim_attr_show(struct kobject *kobj,
792 struct attribute *attr,
793 char *buf)
794{
795 struct most_aim_attribute *aim_attr;
796 struct most_aim_obj *aim_obj;
797
798 aim_attr = to_aim_attr(attr);
799 aim_obj = to_aim_obj(kobj);
800
801 if (!aim_attr->show)
802 return -EIO;
803
804 return aim_attr->show(aim_obj, aim_attr, buf);
805}
806
807/**
808 * aim_attr_store - store function of an AIM object
809 * @kobj: pointer to kobject
810 * @attr: pointer to attribute struct
811 * @buf: buffer
812 * @len: length of buffer
813 */
814static ssize_t aim_attr_store(struct kobject *kobj,
815 struct attribute *attr,
816 const char *buf,
817 size_t len)
818{
819 struct most_aim_attribute *aim_attr;
820 struct most_aim_obj *aim_obj;
821
822 aim_attr = to_aim_attr(attr);
823 aim_obj = to_aim_obj(kobj);
824
825 if (!aim_attr->store)
826 return -EIO;
827 return aim_attr->store(aim_obj, aim_attr, buf, len);
828}
829
830static const struct sysfs_ops most_aim_sysfs_ops = {
831 .show = aim_attr_show,
832 .store = aim_attr_store,
833};
834
835/**
836 * most_aim_release - AIM release function
837 * @kobj: pointer to AIM's kobject
838 */
839static void most_aim_release(struct kobject *kobj)
840{
841 struct most_aim_obj *aim_obj = to_aim_obj(kobj);
842
843 kfree(aim_obj);
844}
845
846static ssize_t show_add_link(struct most_aim_obj *aim_obj,
847 struct most_aim_attribute *attr,
848 char *buf)
849{
850 return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->add_link);
851}
852
853/**
854 * split_string - parses and changes string in the buffer buf and
855 * splits it into two mandatory and one optional substrings.
856 *
857 * @buf: complete string from attribute 'add_channel'
858 * @a: address of pointer to 1st substring (=instance name)
859 * @b: address of pointer to 2nd substring (=channel name)
860 * @c: optional address of pointer to 3rd substring (=user defined name)
861 *
862 * Examples:
863 *
864 * Input: "mdev0:ch0@ep_81:my_channel\n" or
865 * "mdev0:ch0@ep_81:my_channel"
866 *
867 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel"
868 *
869 * Input: "mdev0:ch0@ep_81\n"
870 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> ""
871 *
872 * Input: "mdev0:ch0@ep_81"
873 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL
874 */
c942ea7a 875static int split_string(char *buf, char **a, char **b, char **c)
57562a72
CG
876{
877 *a = strsep(&buf, ":");
878 if (!*a)
879 return -EIO;
880
881 *b = strsep(&buf, ":\n");
882 if (!*b)
883 return -EIO;
884
885 if (c)
886 *c = strsep(&buf, ":\n");
887
888 return 0;
889}
890
891/**
892 * get_channel_by_name - get pointer to channel object
893 * @mdev: name of the device instance
894 * @mdev_ch: name of the respective channel
895 *
896 * This retrieves the pointer to a channel object.
897 */
898static struct
899most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
900{
901 struct most_c_obj *c, *tmp;
902 struct most_inst_obj *i, *i_tmp;
903 int found = 0;
904
905 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
906 if (!strcmp(kobject_name(&i->kobj), mdev)) {
907 found++;
908 break;
909 }
910 }
911 if (unlikely(!found))
912 return ERR_PTR(-EIO);
913
914 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
915 if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
916 found++;
917 break;
918 }
919 }
47af41b0 920 if (unlikely(found < 2))
57562a72
CG
921 return ERR_PTR(-EIO);
922 return c;
923}
924
925/**
926 * store_add_link - store() function for add_link attribute
927 * @aim_obj: pointer to AIM object
928 * @attr: its attributes
929 * @buf: buffer
930 * @len: buffer length
931 *
932 * This parses the string given by buf and splits it into
933 * three substrings. Note: third substring is optional. In case a cdev
934 * AIM is loaded the optional 3rd substring will make up the name of
935 * device node in the /dev directory. If omitted, the device node will
936 * inherit the channel's name within sysfs.
937 *
938 * Searches for a pair of device and channel and probes the AIM
939 *
940 * Example:
941 * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link
942 * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link
943 *
944 * (1) would create the device node /dev/my_rxchannel
945 * (2) would create the device node /dev/mdev0-ch0@ep_81
946 */
947static ssize_t store_add_link(struct most_aim_obj *aim_obj,
948 struct most_aim_attribute *attr,
949 const char *buf,
950 size_t len)
951{
952 struct most_c_obj *c;
953 struct most_aim **aim_ptr;
954 char buffer[STRING_SIZE];
955 char *mdev;
956 char *mdev_ch;
957 char *mdev_devnod;
958 char devnod_buf[STRING_SIZE];
959 int ret;
3f78f611 960 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
57562a72
CG
961
962 strlcpy(buffer, buf, max_len);
963 strlcpy(aim_obj->add_link, buf, max_len);
964
965 ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
966 if (ret)
967 return ret;
968
04ca5837 969 if (!mdev_devnod || *mdev_devnod == 0) {
1446ff09
CG
970 snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
971 mdev_ch);
57562a72
CG
972 mdev_devnod = devnod_buf;
973 }
974
975 c = get_channel_by_name(mdev, mdev_ch);
976 if (IS_ERR(c))
977 return -ENODEV;
978
ccfbaee0
CG
979 if (!c->aim0.ptr)
980 aim_ptr = &c->aim0.ptr;
981 else if (!c->aim1.ptr)
982 aim_ptr = &c->aim1.ptr;
57562a72
CG
983 else
984 return -ENOSPC;
985
42e252a6 986 *aim_ptr = aim_obj->driver;
57562a72
CG
987 ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
988 &c->cfg, &c->kobj, mdev_devnod);
42e252a6
CG
989 if (ret) {
990 *aim_ptr = NULL;
57562a72 991 return ret;
42e252a6
CG
992 }
993
57562a72
CG
994 return len;
995}
996
c942ea7a 997static struct most_aim_attribute most_aim_attr_add_link =
57562a72
CG
998 __ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link);
999
1000static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
1001 struct most_aim_attribute *attr,
1002 char *buf)
1003{
1004 return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link);
1005}
1006
1007/**
1008 * store_remove_link - store function for remove_link attribute
1009 * @aim_obj: pointer to AIM object
1010 * @attr: its attributes
1011 * @buf: buffer
1012 * @len: buffer length
1013 *
1014 * Example:
1015 * echo -n -e "mdev0:ch0@ep_81\n" >remove_link
1016 */
1017static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
1018 struct most_aim_attribute *attr,
1019 const char *buf,
1020 size_t len)
1021{
1022 struct most_c_obj *c;
1023 char buffer[STRING_SIZE];
1024 char *mdev;
1025 char *mdev_ch;
1026 int ret;
3f78f611 1027 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
57562a72
CG
1028
1029 strlcpy(buffer, buf, max_len);
1030 strlcpy(aim_obj->remove_link, buf, max_len);
1031 ret = split_string(buffer, &mdev, &mdev_ch, NULL);
1032 if (ret)
1033 return ret;
1034
1035 c = get_channel_by_name(mdev, mdev_ch);
1036 if (IS_ERR(c))
1037 return -ENODEV;
1038
44fe5781
CG
1039 if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
1040 return -EIO;
ccfbaee0
CG
1041 if (c->aim0.ptr == aim_obj->driver)
1042 c->aim0.ptr = NULL;
1043 if (c->aim1.ptr == aim_obj->driver)
1044 c->aim1.ptr = NULL;
57562a72
CG
1045 return len;
1046}
1047
c942ea7a 1048static struct most_aim_attribute most_aim_attr_remove_link =
1446ff09
CG
1049 __ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link,
1050 store_remove_link);
57562a72
CG
1051
1052static struct attribute *most_aim_def_attrs[] = {
1053 &most_aim_attr_add_link.attr,
1054 &most_aim_attr_remove_link.attr,
1055 NULL,
1056};
1057
1058static struct kobj_type most_aim_ktype = {
1059 .sysfs_ops = &most_aim_sysfs_ops,
1060 .release = most_aim_release,
1061 .default_attrs = most_aim_def_attrs,
1062};
1063
1064static struct kset *most_aim_kset;
1065
1066/**
1067 * create_most_aim_obj - creates an AIM object
1068 * @name: name of the AIM
1069 *
1070 * This creates an AIM object assigns the proper kset and registers
1071 * it with sysfs.
1072 * Returns a pointer to the object or NULL if something went wrong.
1073 */
1074static struct most_aim_obj *create_most_aim_obj(const char *name)
1075{
1076 struct most_aim_obj *most_aim;
1077 int retval;
1078
1079 most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
1080 if (!most_aim)
1081 return NULL;
1082 most_aim->kobj.kset = most_aim_kset;
1083 retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
1084 NULL, "%s", name);
1085 if (retval) {
1086 kobject_put(&most_aim->kobj);
1087 return NULL;
1088 }
1089 kobject_uevent(&most_aim->kobj, KOBJ_ADD);
1090 return most_aim;
1091}
1092
1093/**
1094 * destroy_most_aim_obj - AIM release function
1095 * @p: pointer to AIM object
1096 *
1097 * This decrements the reference counter of the AIM object. If the
1098 * reference count turns zero, its release function will be called.
1099 */
1100static void destroy_most_aim_obj(struct most_aim_obj *p)
1101{
1102 kobject_put(&p->kobj);
1103}
1104
57562a72
CG
1105/* ___ ___
1106 * ___C O R E___
1107 */
1108
1109/**
1110 * Instantiation of the MOST bus
1111 */
c942ea7a 1112static struct bus_type most_bus = {
57562a72
CG
1113 .name = "most",
1114};
1115
1116/**
1117 * Instantiation of the core driver
1118 */
c942ea7a 1119static struct device_driver mostcore = {
57562a72
CG
1120 .name = "mostcore",
1121 .bus = &most_bus,
1122};
1123
1124static inline void trash_mbo(struct mbo *mbo)
1125{
1126 unsigned long flags;
1127 struct most_c_obj *c = mbo->context;
1128
1129 spin_lock_irqsave(&c->fifo_lock, flags);
1130 list_add(&mbo->list, &c->trash_fifo);
1131 spin_unlock_irqrestore(&c->fifo_lock, flags);
1132}
1133
1134static struct mbo *get_hdm_mbo(struct most_c_obj *c)
1135{
1136 unsigned long flags;
1137 struct mbo *mbo;
1138
1139 spin_lock_irqsave(&c->fifo_lock, flags);
1140 if (c->enqueue_halt || list_empty(&c->halt_fifo))
1141 mbo = NULL;
1142 else
1143 mbo = list_pop_mbo(&c->halt_fifo);
1144 spin_unlock_irqrestore(&c->fifo_lock, flags);
1145 return mbo;
1146}
1147
1148static void nq_hdm_mbo(struct mbo *mbo)
1149{
1150 unsigned long flags;
1151 struct most_c_obj *c = mbo->context;
1152
1153 spin_lock_irqsave(&c->fifo_lock, flags);
1154 list_add_tail(&mbo->list, &c->halt_fifo);
1155 spin_unlock_irqrestore(&c->fifo_lock, flags);
1156 wake_up_interruptible(&c->hdm_fifo_wq);
1157}
1158
1159static int hdm_enqueue_thread(void *data)
1160{
1161 struct most_c_obj *c = data;
1162 struct mbo *mbo;
1163 typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
1164
1165 while (likely(!kthread_should_stop())) {
1166 wait_event_interruptible(c->hdm_fifo_wq,
623d8002
CG
1167 (mbo = get_hdm_mbo(c)) ||
1168 kthread_should_stop());
57562a72
CG
1169
1170 if (unlikely(!mbo))
1171 continue;
1172
1173 if (c->cfg.direction == MOST_CH_RX)
1174 mbo->buffer_length = c->cfg.buffer_size;
1175
1176 if (unlikely(enqueue(mbo->ifp, mbo->hdm_channel_id, mbo))) {
1177 pr_err("hdm enqueue failed\n");
1178 nq_hdm_mbo(mbo);
1179 c->hdm_enqueue_task = NULL;
1180 return 0;
1181 }
1182 }
1183
1184 return 0;
1185}
1186
1187static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
1188{
1189 struct task_struct *task =
246ed517
SB
1190 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
1191 channel_id);
57562a72
CG
1192
1193 if (IS_ERR(task))
1194 return PTR_ERR(task);
1195
1196 c->hdm_enqueue_task = task;
1197 return 0;
1198}
1199
1200/**
1201 * arm_mbo - recycle MBO for further usage
1202 * @mbo: buffer object
1203 *
1204 * This puts an MBO back to the list to have it ready for up coming
1205 * tx transactions.
1206 *
1207 * In case the MBO belongs to a channel that recently has been
1208 * poisoned, the MBO is scheduled to be trashed.
1209 * Calls the completion handler of an attached AIM.
1210 */
1211static void arm_mbo(struct mbo *mbo)
1212{
1213 unsigned long flags;
1214 struct most_c_obj *c;
1215
1216 BUG_ON((!mbo) || (!mbo->context));
1217 c = mbo->context;
1218
1219 if (c->is_poisoned) {
1220 trash_mbo(mbo);
1221 return;
1222 }
1223
1224 spin_lock_irqsave(&c->fifo_lock, flags);
71457d48 1225 ++*mbo->num_buffers_ptr;
57562a72
CG
1226 list_add_tail(&mbo->list, &c->fifo);
1227 spin_unlock_irqrestore(&c->fifo_lock, flags);
1228
ccfbaee0
CG
1229 if (c->aim0.refs && c->aim0.ptr->tx_completion)
1230 c->aim0.ptr->tx_completion(c->iface, c->channel_id);
f13f6981 1231
ccfbaee0
CG
1232 if (c->aim1.refs && c->aim1.ptr->tx_completion)
1233 c->aim1.ptr->tx_completion(c->iface, c->channel_id);
57562a72
CG
1234}
1235
1236/**
1237 * arm_mbo_chain - helper function that arms an MBO chain for the HDM
1238 * @c: pointer to interface channel
1239 * @dir: direction of the channel
1240 * @compl: pointer to completion function
1241 *
1242 * This allocates buffer objects including the containing DMA coherent
1243 * buffer and puts them in the fifo.
1244 * Buffers of Rx channels are put in the kthread fifo, hence immediately
1245 * submitted to the HDM.
1246 *
1247 * Returns the number of allocated and enqueued MBOs.
1248 */
c942ea7a
AR
1249static int arm_mbo_chain(struct most_c_obj *c, int dir,
1250 void (*compl)(struct mbo *))
57562a72
CG
1251{
1252 unsigned int i;
1253 int retval;
1254 struct mbo *mbo;
2ae07510 1255 u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
57562a72
CG
1256
1257 atomic_set(&c->mbo_nq_level, 0);
1258
1259 for (i = 0; i < c->cfg.num_buffers; i++) {
1260 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
1261 if (!mbo) {
1262 pr_info("WARN: Allocation of MBO failed.\n");
1263 retval = i;
1264 goto _exit;
1265 }
1266 mbo->context = c;
1267 mbo->ifp = c->iface;
1268 mbo->hdm_channel_id = c->channel_id;
1269 mbo->virt_address = dma_alloc_coherent(NULL,
1270 coherent_buf_size,
1271 &mbo->bus_address,
1272 GFP_KERNEL);
1273 if (!mbo->virt_address) {
1274 pr_info("WARN: No DMA coherent buffer.\n");
1275 retval = i;
1276 goto _error1;
1277 }
1278 mbo->complete = compl;
71457d48 1279 mbo->num_buffers_ptr = &dummy_num_buffers;
57562a72
CG
1280 if (dir == MOST_CH_RX) {
1281 nq_hdm_mbo(mbo);
1282 atomic_inc(&c->mbo_nq_level);
1283 } else {
1284 arm_mbo(mbo);
1285 }
1286 }
1287 return i;
1288
1289_error1:
1290 kfree(mbo);
1291_exit:
1292 return retval;
1293}
1294
1295/**
1296 * most_submit_mbo - submits an MBO to fifo
1297 * @mbo: pointer to the MBO
1298 *
1299 */
1300int most_submit_mbo(struct mbo *mbo)
1301{
57562a72
CG
1302 if (unlikely((!mbo) || (!mbo->context))) {
1303 pr_err("Bad MBO or missing channel reference\n");
1304 return -EINVAL;
1305 }
57562a72
CG
1306
1307 nq_hdm_mbo(mbo);
1308 return 0;
1309}
1310EXPORT_SYMBOL_GPL(most_submit_mbo);
1311
1312/**
1313 * most_write_completion - write completion handler
1314 * @mbo: pointer to MBO
1315 *
1316 * This recycles the MBO for further usage. In case the channel has been
1317 * poisoned, the MBO is scheduled to be trashed.
1318 */
1319static void most_write_completion(struct mbo *mbo)
1320{
1321 struct most_c_obj *c;
1322
1323 BUG_ON((!mbo) || (!mbo->context));
1324
1325 c = mbo->context;
1326 if (mbo->status == MBO_E_INVAL)
1327 pr_info("WARN: Tx MBO status: invalid\n");
ec58d2a8 1328 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
57562a72
CG
1329 trash_mbo(mbo);
1330 else
1331 arm_mbo(mbo);
1332}
1333
1334/**
1335 * get_channel_by_iface - get pointer to channel object
1336 * @iface: pointer to interface instance
1337 * @id: channel ID
1338 *
1339 * This retrieves a pointer to a channel of the given interface and channel ID.
1340 */
1341static struct
1342most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
1343{
1344 struct most_inst_obj *i;
1345
1346 if (unlikely(!iface)) {
1347 pr_err("Bad interface\n");
1348 return NULL;
1349 }
1350 if (unlikely((id < 0) || (id >= iface->num_channels))) {
1351 pr_err("Channel index (%d) out of range\n", id);
1352 return NULL;
1353 }
1354 i = iface->priv;
1355 if (unlikely(!i)) {
1356 pr_err("interface is not registered\n");
1357 return NULL;
1358 }
1359 return i->channel[id];
1360}
1361
cdc293d5 1362int channel_has_mbo(struct most_interface *iface, int id, struct most_aim *aim)
aac997df
CG
1363{
1364 struct most_c_obj *c = get_channel_by_iface(iface, id);
1365 unsigned long flags;
1366 int empty;
1367
1368 if (unlikely(!c))
1369 return -EINVAL;
1370
cdc293d5
CG
1371 if (c->aim0.refs && c->aim1.refs &&
1372 ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
1373 (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
1374 return 0;
1375
aac997df
CG
1376 spin_lock_irqsave(&c->fifo_lock, flags);
1377 empty = list_empty(&c->fifo);
1378 spin_unlock_irqrestore(&c->fifo_lock, flags);
1379 return !empty;
1380}
1381EXPORT_SYMBOL_GPL(channel_has_mbo);
1382
57562a72
CG
1383/**
1384 * most_get_mbo - get pointer to an MBO of pool
1385 * @iface: pointer to interface instance
1386 * @id: channel ID
1387 *
1388 * This attempts to get a free buffer out of the channel fifo.
1389 * Returns a pointer to MBO on success or NULL otherwise.
1390 */
71457d48
CG
1391struct mbo *most_get_mbo(struct most_interface *iface, int id,
1392 struct most_aim *aim)
57562a72
CG
1393{
1394 struct mbo *mbo;
1395 struct most_c_obj *c;
1396 unsigned long flags;
71457d48 1397 int *num_buffers_ptr;
57562a72
CG
1398
1399 c = get_channel_by_iface(iface, id);
1400 if (unlikely(!c))
1401 return NULL;
71457d48 1402
ccfbaee0
CG
1403 if (c->aim0.refs && c->aim1.refs &&
1404 ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
1405 (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
71457d48
CG
1406 return NULL;
1407
ccfbaee0
CG
1408 if (aim == c->aim0.ptr)
1409 num_buffers_ptr = &c->aim0.num_buffers;
1410 else if (aim == c->aim1.ptr)
1411 num_buffers_ptr = &c->aim1.num_buffers;
71457d48
CG
1412 else
1413 num_buffers_ptr = &dummy_num_buffers;
1414
57562a72
CG
1415 spin_lock_irqsave(&c->fifo_lock, flags);
1416 if (list_empty(&c->fifo)) {
1417 spin_unlock_irqrestore(&c->fifo_lock, flags);
1418 return NULL;
1419 }
1420 mbo = list_pop_mbo(&c->fifo);
71457d48 1421 --*num_buffers_ptr;
57562a72 1422 spin_unlock_irqrestore(&c->fifo_lock, flags);
71457d48
CG
1423
1424 mbo->num_buffers_ptr = num_buffers_ptr;
57562a72
CG
1425 mbo->buffer_length = c->cfg.buffer_size;
1426 return mbo;
1427}
1428EXPORT_SYMBOL_GPL(most_get_mbo);
1429
57562a72
CG
1430/**
1431 * most_put_mbo - return buffer to pool
1432 * @mbo: buffer object
1433 */
1434void most_put_mbo(struct mbo *mbo)
1435{
6ed90e36 1436 struct most_c_obj *c = mbo->context;
57562a72 1437
57562a72
CG
1438 if (c->cfg.direction == MOST_CH_TX) {
1439 arm_mbo(mbo);
1440 return;
1441 }
1442 nq_hdm_mbo(mbo);
1443 atomic_inc(&c->mbo_nq_level);
1444}
1445EXPORT_SYMBOL_GPL(most_put_mbo);
1446
1447/**
1448 * most_read_completion - read completion handler
1449 * @mbo: pointer to MBO
1450 *
1451 * This function is called by the HDM when data has been received from the
1452 * hardware and copied to the buffer of the MBO.
1453 *
1454 * In case the channel has been poisoned it puts the buffer in the trash queue.
1455 * Otherwise, it passes the buffer to an AIM for further processing.
1456 */
1457static void most_read_completion(struct mbo *mbo)
1458{
f13f6981 1459 struct most_c_obj *c = mbo->context;
57562a72 1460
f13f6981
CG
1461 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1462 trash_mbo(mbo);
1463 return;
1464 }
57562a72
CG
1465
1466 if (mbo->status == MBO_E_INVAL) {
1467 nq_hdm_mbo(mbo);
1468 atomic_inc(&c->mbo_nq_level);
1469 return;
1470 }
1471
1472 if (atomic_sub_and_test(1, &c->mbo_nq_level)) {
1473 pr_info("WARN: rx device out of buffers\n");
1474 c->is_starving = 1;
1475 }
1476
ccfbaee0
CG
1477 if (c->aim0.refs && c->aim0.ptr->rx_completion &&
1478 c->aim0.ptr->rx_completion(mbo) == 0)
57562a72 1479 return;
f13f6981 1480
ccfbaee0
CG
1481 if (c->aim1.refs && c->aim1.ptr->rx_completion &&
1482 c->aim1.ptr->rx_completion(mbo) == 0)
57562a72 1483 return;
f13f6981
CG
1484
1485 most_put_mbo(mbo);
57562a72
CG
1486}
1487
1488/**
1489 * most_start_channel - prepares a channel for communication
1490 * @iface: pointer to interface instance
1491 * @id: channel ID
1492 *
1493 * This prepares the channel for usage. Cross-checks whether the
1494 * channel's been properly configured.
1495 *
1496 * Returns 0 on success or error code otherwise.
1497 */
f13f6981
CG
1498int most_start_channel(struct most_interface *iface, int id,
1499 struct most_aim *aim)
57562a72
CG
1500{
1501 int num_buffer;
1502 int ret;
1503 struct most_c_obj *c = get_channel_by_iface(iface, id);
1504
1505 if (unlikely(!c))
1506 return -EINVAL;
1507
f13f6981 1508 mutex_lock(&c->start_mutex);
ccfbaee0 1509 if (c->aim0.refs + c->aim1.refs > 0)
f13f6981 1510 goto out; /* already started by other aim */
57562a72
CG
1511
1512 if (!try_module_get(iface->mod)) {
1513 pr_info("failed to acquire HDM lock\n");
f13f6981 1514 mutex_unlock(&c->start_mutex);
57562a72
CG
1515 return -ENOLCK;
1516 }
57562a72
CG
1517
1518 c->cfg.extra_len = 0;
1519 if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1520 pr_info("channel configuration failed. Go check settings...\n");
1521 ret = -EINVAL;
1522 goto error;
1523 }
1524
1525 init_waitqueue_head(&c->hdm_fifo_wq);
1526
1527 if (c->cfg.direction == MOST_CH_RX)
1528 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1529 most_read_completion);
1530 else
1531 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1532 most_write_completion);
47af41b0 1533 if (unlikely(!num_buffer)) {
57562a72
CG
1534 pr_info("failed to allocate memory\n");
1535 ret = -ENOMEM;
1536 goto error;
1537 }
1538
1539 ret = run_enqueue_thread(c, id);
1540 if (ret)
1541 goto error;
1542
57562a72 1543 c->is_starving = 0;
ccfbaee0
CG
1544 c->aim0.num_buffers = c->cfg.num_buffers / 2;
1545 c->aim1.num_buffers = c->cfg.num_buffers - c->aim0.num_buffers;
57562a72 1546 atomic_set(&c->mbo_ref, num_buffer);
f13f6981
CG
1547
1548out:
ccfbaee0
CG
1549 if (aim == c->aim0.ptr)
1550 c->aim0.refs++;
1551 if (aim == c->aim1.ptr)
1552 c->aim1.refs++;
f13f6981 1553 mutex_unlock(&c->start_mutex);
57562a72 1554 return 0;
f13f6981 1555
57562a72 1556error:
e23afff9 1557 module_put(iface->mod);
f13f6981 1558 mutex_unlock(&c->start_mutex);
57562a72
CG
1559 return ret;
1560}
1561EXPORT_SYMBOL_GPL(most_start_channel);
1562
1563/**
1564 * most_stop_channel - stops a running channel
1565 * @iface: pointer to interface instance
1566 * @id: channel ID
1567 */
f13f6981
CG
1568int most_stop_channel(struct most_interface *iface, int id,
1569 struct most_aim *aim)
57562a72
CG
1570{
1571 struct most_c_obj *c;
1572
1573 if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1574 pr_err("Bad interface or index out of range\n");
1575 return -EINVAL;
1576 }
1577 c = get_channel_by_iface(iface, id);
1578 if (unlikely(!c))
1579 return -EINVAL;
1580
f13f6981 1581 mutex_lock(&c->start_mutex);
ccfbaee0 1582 if (c->aim0.refs + c->aim1.refs >= 2)
f13f6981 1583 goto out;
57562a72 1584
57562a72
CG
1585 if (c->hdm_enqueue_task)
1586 kthread_stop(c->hdm_enqueue_task);
1587 c->hdm_enqueue_task = NULL;
57562a72 1588
9cda3007 1589 if (iface->mod)
57562a72 1590 module_put(iface->mod);
57562a72
CG
1591
1592 c->is_poisoned = true;
1593 if (c->iface->poison_channel(c->iface, c->channel_id)) {
1594 pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1595 c->iface->description);
f13f6981 1596 mutex_unlock(&c->start_mutex);
57562a72
CG
1597 return -EAGAIN;
1598 }
1599 flush_trash_fifo(c);
1600 flush_channel_fifos(c);
1601
1602#ifdef CMPL_INTERRUPTIBLE
1603 if (wait_for_completion_interruptible(&c->cleanup)) {
1604 pr_info("Interrupted while clean up ch %d\n", c->channel_id);
f13f6981 1605 mutex_unlock(&c->start_mutex);
57562a72
CG
1606 return -EINTR;
1607 }
1608#else
1609 wait_for_completion(&c->cleanup);
1610#endif
1611 c->is_poisoned = false;
f13f6981
CG
1612
1613out:
ccfbaee0
CG
1614 if (aim == c->aim0.ptr)
1615 c->aim0.refs--;
1616 if (aim == c->aim1.ptr)
1617 c->aim1.refs--;
f13f6981 1618 mutex_unlock(&c->start_mutex);
57562a72
CG
1619 return 0;
1620}
1621EXPORT_SYMBOL_GPL(most_stop_channel);
1622
1623/**
1624 * most_register_aim - registers an AIM (driver) with the core
1625 * @aim: instance of AIM to be registered
1626 */
1627int most_register_aim(struct most_aim *aim)
1628{
1629 struct most_aim_obj *aim_obj;
1630
1631 if (!aim) {
1632 pr_err("Bad driver\n");
1633 return -EINVAL;
1634 }
1635 aim_obj = create_most_aim_obj(aim->name);
1636 if (!aim_obj) {
1637 pr_info("failed to alloc driver object\n");
1638 return -ENOMEM;
1639 }
1640 aim_obj->driver = aim;
1641 aim->context = aim_obj;
1642 pr_info("registered new application interfacing module %s\n",
1643 aim->name);
1644 list_add_tail(&aim_obj->list, &aim_list);
1645 return 0;
1646}
1647EXPORT_SYMBOL_GPL(most_register_aim);
1648
1649/**
1650 * most_deregister_aim - deregisters an AIM (driver) with the core
1651 * @aim: AIM to be removed
1652 */
1653int most_deregister_aim(struct most_aim *aim)
1654{
1655 struct most_aim_obj *aim_obj;
1656 struct most_c_obj *c, *tmp;
1657 struct most_inst_obj *i, *i_tmp;
1658
1659 if (!aim) {
1660 pr_err("Bad driver\n");
1661 return -EINVAL;
1662 }
1663
1664 aim_obj = aim->context;
1665 if (!aim_obj) {
1666 pr_info("driver not registered.\n");
1667 return -EINVAL;
1668 }
1669 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1670 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
ccfbaee0 1671 if (c->aim0.ptr == aim || c->aim1.ptr == aim)
57562a72
CG
1672 aim->disconnect_channel(
1673 c->iface, c->channel_id);
ccfbaee0
CG
1674 if (c->aim0.ptr == aim)
1675 c->aim0.ptr = NULL;
1676 if (c->aim1.ptr == aim)
1677 c->aim1.ptr = NULL;
57562a72
CG
1678 }
1679 }
1680 list_del(&aim_obj->list);
1681 destroy_most_aim_obj(aim_obj);
1682 pr_info("deregistering application interfacing module %s\n", aim->name);
1683 return 0;
1684}
1685EXPORT_SYMBOL_GPL(most_deregister_aim);
1686
1687/**
1688 * most_register_interface - registers an interface with core
1689 * @iface: pointer to the instance of the interface description.
1690 *
1691 * Allocates and initializes a new interface instance and all of its channels.
1692 * Returns a pointer to kobject or an error pointer.
1693 */
1694struct kobject *most_register_interface(struct most_interface *iface)
1695{
1696 unsigned int i;
1697 int id;
1698 char name[STRING_SIZE];
1699 char channel_name[STRING_SIZE];
1700 struct most_c_obj *c;
1701 struct most_inst_obj *inst;
1702
1703 if (!iface || !iface->enqueue || !iface->configure ||
1704 !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1705 pr_err("Bad interface or channel overflow\n");
1706 return ERR_PTR(-EINVAL);
1707 }
1708
1709 id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1710 if (id < 0) {
1711 pr_info("Failed to alloc mdev ID\n");
1712 return ERR_PTR(id);
1713 }
1714 snprintf(name, STRING_SIZE, "mdev%d", id);
1715
1716 inst = create_most_inst_obj(name);
1717 if (!inst) {
1718 pr_info("Failed to allocate interface instance\n");
b7382d44 1719 ida_simple_remove(&mdev_id, id);
57562a72
CG
1720 return ERR_PTR(-ENOMEM);
1721 }
1722
1723 iface->priv = inst;
1724 INIT_LIST_HEAD(&inst->channel_list);
1725 inst->iface = iface;
1726 inst->dev_id = id;
57562a72
CG
1727 list_add_tail(&inst->list, &instance_list);
1728
1729 for (i = 0; i < iface->num_channels; i++) {
1730 const char *name_suffix = iface->channel_vector[i].name_suffix;
1731
1732 if (!name_suffix)
1733 snprintf(channel_name, STRING_SIZE, "ch%d", i);
1734 else if (name_suffix[0] == '@')
1735 snprintf(channel_name, STRING_SIZE, "ch%d%s", i,
1736 name_suffix);
1737 else
1738 snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
1739
1740 /* this increments the reference count of this instance */
1741 c = create_most_c_obj(channel_name, &inst->kobj);
1742 if (!c)
1743 goto free_instance;
1744 inst->channel[i] = c;
1745 c->is_starving = 0;
1746 c->iface = iface;
1747 c->inst = inst;
1748 c->channel_id = i;
1749 c->keep_mbo = false;
1750 c->enqueue_halt = false;
1751 c->is_poisoned = false;
57562a72
CG
1752 c->cfg.direction = 0;
1753 c->cfg.data_type = 0;
1754 c->cfg.num_buffers = 0;
1755 c->cfg.buffer_size = 0;
1756 c->cfg.subbuffer_size = 0;
1757 c->cfg.packets_per_xact = 0;
1758 spin_lock_init(&c->fifo_lock);
1759 INIT_LIST_HEAD(&c->fifo);
1760 INIT_LIST_HEAD(&c->trash_fifo);
1761 INIT_LIST_HEAD(&c->halt_fifo);
1762 init_completion(&c->cleanup);
1763 atomic_set(&c->mbo_ref, 0);
f13f6981 1764 mutex_init(&c->start_mutex);
57562a72
CG
1765 list_add_tail(&c->list, &inst->channel_list);
1766 }
1767 pr_info("registered new MOST device mdev%d (%s)\n",
1768 inst->dev_id, iface->description);
1769 return &inst->kobj;
1770
1771free_instance:
1772 pr_info("Failed allocate channel(s)\n");
1773 list_del(&inst->list);
b7382d44 1774 ida_simple_remove(&mdev_id, id);
57562a72
CG
1775 destroy_most_inst_obj(inst);
1776 return ERR_PTR(-ENOMEM);
1777}
1778EXPORT_SYMBOL_GPL(most_register_interface);
1779
1780/**
1781 * most_deregister_interface - deregisters an interface with core
1782 * @iface: pointer to the interface instance description.
1783 *
1784 * Before removing an interface instance from the list, all running
1785 * channels are stopped and poisoned.
1786 */
1787void most_deregister_interface(struct most_interface *iface)
1788{
1789 struct most_inst_obj *i = iface->priv;
1790 struct most_c_obj *c;
1791
57562a72
CG
1792 if (unlikely(!i)) {
1793 pr_info("Bad Interface\n");
57562a72
CG
1794 return;
1795 }
1796 pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
1797 iface->description);
1798
a0fceb1f
CG
1799 list_for_each_entry(c, &i->channel_list, list) {
1800 if (c->aim0.ptr)
1801 c->aim0.ptr->disconnect_channel(c->iface,
1802 c->channel_id);
1803 if (c->aim1.ptr)
1804 c->aim1.ptr->disconnect_channel(c->iface,
1805 c->channel_id);
1806 c->aim0.ptr = NULL;
1807 c->aim1.ptr = NULL;
1808 }
1809
57562a72
CG
1810 ida_simple_remove(&mdev_id, i->dev_id);
1811 list_del(&i->list);
1812 destroy_most_inst_obj(i);
1813}
1814EXPORT_SYMBOL_GPL(most_deregister_interface);
1815
1816/**
1817 * most_stop_enqueue - prevents core from enqueueing MBOs
1818 * @iface: pointer to interface
1819 * @id: channel id
1820 *
1821 * This is called by an HDM that _cannot_ attend to its duties and
1822 * is imminent to get run over by the core. The core is not going to
1823 * enqueue any further packets unless the flagging HDM calls
1824 * most_resume enqueue().
1825 */
1826void most_stop_enqueue(struct most_interface *iface, int id)
1827{
1828 struct most_c_obj *c = get_channel_by_iface(iface, id);
1829
1830 if (likely(c))
1831 c->enqueue_halt = true;
1832}
1833EXPORT_SYMBOL_GPL(most_stop_enqueue);
1834
1835/**
1836 * most_resume_enqueue - allow core to enqueue MBOs again
1837 * @iface: pointer to interface
1838 * @id: channel id
1839 *
1840 * This clears the enqueue halt flag and enqueues all MBOs currently
1841 * sitting in the wait fifo.
1842 */
1843void most_resume_enqueue(struct most_interface *iface, int id)
1844{
1845 struct most_c_obj *c = get_channel_by_iface(iface, id);
1846
1847 if (unlikely(!c))
1848 return;
1849 c->enqueue_halt = false;
1850
1851 wake_up_interruptible(&c->hdm_fifo_wq);
1852}
1853EXPORT_SYMBOL_GPL(most_resume_enqueue);
1854
1855static int __init most_init(void)
1856{
1857 pr_info("init()\n");
1858 INIT_LIST_HEAD(&instance_list);
1859 INIT_LIST_HEAD(&aim_list);
57562a72
CG
1860 ida_init(&mdev_id);
1861
1862 if (bus_register(&most_bus)) {
1863 pr_info("Cannot register most bus\n");
1864 goto exit;
1865 }
1866
1867 most_class = class_create(THIS_MODULE, "most");
1868 if (IS_ERR(most_class)) {
1869 pr_info("No udev support.\n");
1870 goto exit_bus;
1871 }
1872 if (driver_register(&mostcore)) {
1873 pr_info("Cannot register core driver\n");
1874 goto exit_class;
1875 }
1876
1877 class_glue_dir =
1878 device_create(most_class, NULL, 0, NULL, "mostcore");
17ac98ac 1879 if (IS_ERR(class_glue_dir))
57562a72
CG
1880 goto exit_driver;
1881
1882 most_aim_kset =
1883 kset_create_and_add("aims", NULL, &class_glue_dir->kobj);
1884 if (!most_aim_kset)
1885 goto exit_class_container;
1886
1887 most_inst_kset =
1888 kset_create_and_add("devices", NULL, &class_glue_dir->kobj);
1889 if (!most_inst_kset)
1890 goto exit_driver_kset;
1891
1892 return 0;
1893
1894exit_driver_kset:
1895 kset_unregister(most_aim_kset);
1896exit_class_container:
1897 device_destroy(most_class, 0);
1898exit_driver:
1899 driver_unregister(&mostcore);
1900exit_class:
1901 class_destroy(most_class);
1902exit_bus:
1903 bus_unregister(&most_bus);
1904exit:
1905 return -ENOMEM;
1906}
1907
1908static void __exit most_exit(void)
1909{
1910 struct most_inst_obj *i, *i_tmp;
1911 struct most_aim_obj *d, *d_tmp;
1912
1913 pr_info("exit core module\n");
1914 list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
1915 destroy_most_aim_obj(d);
1916 }
1917
1918 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1919 list_del(&i->list);
1920 destroy_most_inst_obj(i);
1921 }
1922 kset_unregister(most_inst_kset);
1923 kset_unregister(most_aim_kset);
1924 device_destroy(most_class, 0);
1925 driver_unregister(&mostcore);
1926 class_destroy(most_class);
1927 bus_unregister(&most_bus);
1928 ida_destroy(&mdev_id);
1929}
1930
1931module_init(most_init);
1932module_exit(most_exit);
1933MODULE_LICENSE("GPL");
1934MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1935MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");