staging: most: make hdm-usb follow the coding style
[linux-2.6-block.git] / drivers / staging / most / mostcore / core.c
CommitLineData
57562a72
CG
1/*
2 * core.c - Implementation of core module of MOST Linux driver stack
3 *
4 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * This file is licensed under GPLv2.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/module.h>
16#include <linux/fs.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/device.h>
20#include <linux/list.h>
21#include <linux/poll.h>
22#include <linux/wait.h>
23#include <linux/kobject.h>
24#include <linux/mutex.h>
25#include <linux/completion.h>
26#include <linux/sysfs.h>
27#include <linux/kthread.h>
28#include <linux/dma-mapping.h>
29#include <linux/idr.h>
30#include "mostcore.h"
31
32#define MAX_CHANNELS 64
33#define STRING_SIZE 80
34
35static struct class *most_class;
36static struct device *class_glue_dir;
37static struct ida mdev_id;
38static int modref;
71457d48 39static int dummy_num_buffers;
57562a72 40
ccfbaee0
CG
41struct most_c_aim_obj {
42 struct most_aim *ptr;
43 int refs;
44 int num_buffers;
45};
46
57562a72
CG
47struct most_c_obj {
48 struct kobject kobj;
49 struct completion cleanup;
50 atomic_t mbo_ref;
51 atomic_t mbo_nq_level;
52 uint16_t channel_id;
53 bool is_poisoned;
f13f6981 54 struct mutex start_mutex;
57562a72
CG
55 int is_starving;
56 struct most_interface *iface;
57 struct most_inst_obj *inst;
58 struct most_channel_config cfg;
59 bool keep_mbo;
60 bool enqueue_halt;
61 struct list_head fifo;
62 spinlock_t fifo_lock;
63 struct list_head halt_fifo;
64 struct list_head list;
ccfbaee0
CG
65 struct most_c_aim_obj aim0;
66 struct most_c_aim_obj aim1;
57562a72
CG
67 struct list_head trash_fifo;
68 struct task_struct *hdm_enqueue_task;
69 struct mutex stop_task_mutex;
70 wait_queue_head_t hdm_fifo_wq;
71};
72#define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
73
74struct most_inst_obj {
75 int dev_id;
76 atomic_t tainted;
77 struct most_interface *iface;
78 struct list_head channel_list;
79 struct most_c_obj *channel[MAX_CHANNELS];
80 struct kobject kobj;
81 struct list_head list;
82};
83#define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
84
85/**
86 * list_pop_mbo - retrieves the first MBO of the list and removes it
87 * @ptr: the list head to grab the MBO from.
88 */
89#define list_pop_mbo(ptr) \
90({ \
91 struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
92 list_del(&_mbo->list); \
93 _mbo; \
94})
95
96static struct mutex deregister_mutex;
97
98/* ___ ___
99 * ___C H A N N E L___
100 */
101
102/**
103 * struct most_c_attr - to access the attributes of a channel object
104 * @attr: attributes of a channel
105 * @show: pointer to the show function
106 * @store: pointer to the store function
107 */
108struct most_c_attr {
109 struct attribute attr;
110 ssize_t (*show)(struct most_c_obj *d,
111 struct most_c_attr *attr,
112 char *buf);
113 ssize_t (*store)(struct most_c_obj *d,
114 struct most_c_attr *attr,
115 const char *buf,
116 size_t count);
117};
118#define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
119
120#define MOST_CHNL_ATTR(_name, _mode, _show, _store) \
121 struct most_c_attr most_chnl_attr_##_name = \
122 __ATTR(_name, _mode, _show, _store)
123
124/**
125 * channel_attr_show - show function of channel object
126 * @kobj: pointer to its kobject
127 * @attr: pointer to its attributes
128 * @buf: buffer
129 */
130static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
131 char *buf)
132{
133 struct most_c_attr *channel_attr = to_channel_attr(attr);
134 struct most_c_obj *c_obj = to_c_obj(kobj);
135
136 if (!channel_attr->show)
137 return -EIO;
138
139 return channel_attr->show(c_obj, channel_attr, buf);
140}
141
142/**
143 * channel_attr_store - store function of channel object
144 * @kobj: pointer to its kobject
145 * @attr: pointer to its attributes
146 * @buf: buffer
147 * @len: length of buffer
148 */
149static ssize_t channel_attr_store(struct kobject *kobj,
150 struct attribute *attr,
151 const char *buf,
152 size_t len)
153{
154 struct most_c_attr *channel_attr = to_channel_attr(attr);
155 struct most_c_obj *c_obj = to_c_obj(kobj);
156
157 if (!channel_attr->store)
158 return -EIO;
159 return channel_attr->store(c_obj, channel_attr, buf, len);
160}
161
162static const struct sysfs_ops most_channel_sysfs_ops = {
163 .show = channel_attr_show,
164 .store = channel_attr_store,
165};
166
167/**
168 * most_free_mbo_coherent - free an MBO and its coherent buffer
169 * @mbo: buffer to be released
170 *
171 */
172static void most_free_mbo_coherent(struct mbo *mbo)
173{
174 struct most_c_obj *c = mbo->context;
175 u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
176
177 dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
178 mbo->bus_address);
179 kfree(mbo);
180 if (atomic_sub_and_test(1, &c->mbo_ref))
181 complete(&c->cleanup);
182}
183
184/**
185 * flush_channel_fifos - clear the channel fifos
186 * @c: pointer to channel object
187 */
c942ea7a 188static void flush_channel_fifos(struct most_c_obj *c)
57562a72
CG
189{
190 unsigned long flags, hf_flags;
191 struct mbo *mbo, *tmp;
192
193 if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
194 return;
195
196 spin_lock_irqsave(&c->fifo_lock, flags);
197 list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
198 list_del(&mbo->list);
199 spin_unlock_irqrestore(&c->fifo_lock, flags);
0834be6c 200 most_free_mbo_coherent(mbo);
57562a72
CG
201 spin_lock_irqsave(&c->fifo_lock, flags);
202 }
203 spin_unlock_irqrestore(&c->fifo_lock, flags);
204
205 spin_lock_irqsave(&c->fifo_lock, hf_flags);
206 list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
207 list_del(&mbo->list);
208 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
0834be6c 209 most_free_mbo_coherent(mbo);
57562a72
CG
210 spin_lock_irqsave(&c->fifo_lock, hf_flags);
211 }
212 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
213
214 if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
215 pr_info("WARN: fifo | trash fifo not empty\n");
216}
217
218/**
219 * flush_trash_fifo - clear the trash fifo
220 * @c: pointer to channel object
221 */
222static int flush_trash_fifo(struct most_c_obj *c)
223{
224 struct mbo *mbo, *tmp;
225 unsigned long flags;
226
227 spin_lock_irqsave(&c->fifo_lock, flags);
228 list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
229 list_del(&mbo->list);
230 spin_unlock_irqrestore(&c->fifo_lock, flags);
231 most_free_mbo_coherent(mbo);
232 spin_lock_irqsave(&c->fifo_lock, flags);
233 }
234 spin_unlock_irqrestore(&c->fifo_lock, flags);
235 return 0;
236}
237
238/**
239 * most_channel_release - release function of channel object
240 * @kobj: pointer to channel's kobject
241 */
242static void most_channel_release(struct kobject *kobj)
243{
244 struct most_c_obj *c = to_c_obj(kobj);
245
246 kfree(c);
247}
248
249static ssize_t show_available_directions(struct most_c_obj *c,
250 struct most_c_attr *attr,
251 char *buf)
252{
253 unsigned int i = c->channel_id;
254
255 strcpy(buf, "");
256 if (c->iface->channel_vector[i].direction & MOST_CH_RX)
257 strcat(buf, "dir_rx ");
258 if (c->iface->channel_vector[i].direction & MOST_CH_TX)
259 strcat(buf, "dir_tx ");
260 strcat(buf, "\n");
261 return strlen(buf) + 1;
262}
263
264static ssize_t show_available_datatypes(struct most_c_obj *c,
265 struct most_c_attr *attr,
266 char *buf)
267{
268 unsigned int i = c->channel_id;
269
270 strcpy(buf, "");
271 if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
272 strcat(buf, "control ");
273 if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
274 strcat(buf, "async ");
275 if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
276 strcat(buf, "sync ");
277 if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC_AVP)
278 strcat(buf, "isoc_avp ");
279 strcat(buf, "\n");
280 return strlen(buf) + 1;
281}
282
283static
284ssize_t show_number_of_packet_buffers(struct most_c_obj *c,
285 struct most_c_attr *attr,
286 char *buf)
287{
288 unsigned int i = c->channel_id;
289
290 return snprintf(buf, PAGE_SIZE, "%d\n",
291 c->iface->channel_vector[i].num_buffers_packet);
292}
293
294static
295ssize_t show_number_of_stream_buffers(struct most_c_obj *c,
296 struct most_c_attr *attr,
297 char *buf)
298{
299 unsigned int i = c->channel_id;
300
301 return snprintf(buf, PAGE_SIZE, "%d\n",
302 c->iface->channel_vector[i].num_buffers_streaming);
303}
304
305static
306ssize_t show_size_of_packet_buffer(struct most_c_obj *c,
307 struct most_c_attr *attr,
308 char *buf)
309{
310 unsigned int i = c->channel_id;
311
312 return snprintf(buf, PAGE_SIZE, "%d\n",
313 c->iface->channel_vector[i].buffer_size_packet);
314}
315
316static
317ssize_t show_size_of_stream_buffer(struct most_c_obj *c,
318 struct most_c_attr *attr,
319 char *buf)
320{
321 unsigned int i = c->channel_id;
322
323 return snprintf(buf, PAGE_SIZE, "%d\n",
324 c->iface->channel_vector[i].buffer_size_streaming);
325}
326
327static ssize_t show_channel_starving(struct most_c_obj *c,
328 struct most_c_attr *attr,
329 char *buf)
330{
331 return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
332}
333
334
335#define create_show_channel_attribute(val) \
add04a98 336 static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL)
57562a72
CG
337
338create_show_channel_attribute(available_directions);
339create_show_channel_attribute(available_datatypes);
340create_show_channel_attribute(number_of_packet_buffers);
341create_show_channel_attribute(number_of_stream_buffers);
342create_show_channel_attribute(size_of_stream_buffer);
343create_show_channel_attribute(size_of_packet_buffer);
344create_show_channel_attribute(channel_starving);
345
346static ssize_t show_set_number_of_buffers(struct most_c_obj *c,
347 struct most_c_attr *attr,
348 char *buf)
349{
350 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
351}
352
353static ssize_t store_set_number_of_buffers(struct most_c_obj *c,
354 struct most_c_attr *attr,
355 const char *buf,
356 size_t count)
357{
358 int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
359
360 if (ret)
361 return ret;
362 return count;
363}
364
365static ssize_t show_set_buffer_size(struct most_c_obj *c,
366 struct most_c_attr *attr,
367 char *buf)
368{
369 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
370}
371
372static ssize_t store_set_buffer_size(struct most_c_obj *c,
373 struct most_c_attr *attr,
374 const char *buf,
375 size_t count)
376{
377 int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
378
379 if (ret)
380 return ret;
381 return count;
382}
383
384static ssize_t show_set_direction(struct most_c_obj *c,
385 struct most_c_attr *attr,
386 char *buf)
387{
388 if (c->cfg.direction & MOST_CH_TX)
389 return snprintf(buf, PAGE_SIZE, "dir_tx\n");
390 else if (c->cfg.direction & MOST_CH_RX)
391 return snprintf(buf, PAGE_SIZE, "dir_rx\n");
392 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
393}
394
395static ssize_t store_set_direction(struct most_c_obj *c,
396 struct most_c_attr *attr,
397 const char *buf,
398 size_t count)
399{
400 if (!strcmp(buf, "dir_rx\n"))
401 c->cfg.direction = MOST_CH_RX;
402 else if (!strcmp(buf, "dir_tx\n"))
403 c->cfg.direction = MOST_CH_TX;
404 else {
405 pr_info("WARN: invalid attribute settings\n");
406 return -EINVAL;
407 }
408 return count;
409}
410
411static ssize_t show_set_datatype(struct most_c_obj *c,
412 struct most_c_attr *attr,
413 char *buf)
414{
415 if (c->cfg.data_type & MOST_CH_CONTROL)
416 return snprintf(buf, PAGE_SIZE, "control\n");
417 else if (c->cfg.data_type & MOST_CH_ASYNC)
418 return snprintf(buf, PAGE_SIZE, "async\n");
419 else if (c->cfg.data_type & MOST_CH_SYNC)
420 return snprintf(buf, PAGE_SIZE, "sync\n");
421 else if (c->cfg.data_type & MOST_CH_ISOC_AVP)
422 return snprintf(buf, PAGE_SIZE, "isoc_avp\n");
423 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
424}
425
426static ssize_t store_set_datatype(struct most_c_obj *c,
427 struct most_c_attr *attr,
428 const char *buf,
429 size_t count)
430{
431 if (!strcmp(buf, "control\n"))
432 c->cfg.data_type = MOST_CH_CONTROL;
433 else if (!strcmp(buf, "async\n"))
434 c->cfg.data_type = MOST_CH_ASYNC;
435 else if (!strcmp(buf, "sync\n"))
436 c->cfg.data_type = MOST_CH_SYNC;
437 else if (!strcmp(buf, "isoc_avp\n"))
438 c->cfg.data_type = MOST_CH_ISOC_AVP;
439 else {
440 pr_info("WARN: invalid attribute settings\n");
441 return -EINVAL;
442 }
443 return count;
444}
445
446static ssize_t show_set_subbuffer_size(struct most_c_obj *c,
447 struct most_c_attr *attr,
448 char *buf)
449{
450 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
451}
452
453static ssize_t store_set_subbuffer_size(struct most_c_obj *c,
454 struct most_c_attr *attr,
455 const char *buf,
456 size_t count)
457{
458 int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
459
460 if (ret)
461 return ret;
462 return count;
463}
464
465static ssize_t show_set_packets_per_xact(struct most_c_obj *c,
466 struct most_c_attr *attr,
467 char *buf)
468{
469 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
470}
471
472static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
473 struct most_c_attr *attr,
474 const char *buf,
475 size_t count)
476{
477 int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
478
479 if (ret)
480 return ret;
481 return count;
482}
483
484#define create_channel_attribute(value) \
485 static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \
486 show_##value, \
487 store_##value)
488
489create_channel_attribute(set_buffer_size);
490create_channel_attribute(set_number_of_buffers);
491create_channel_attribute(set_direction);
492create_channel_attribute(set_datatype);
493create_channel_attribute(set_subbuffer_size);
494create_channel_attribute(set_packets_per_xact);
495
496
497/**
498 * most_channel_def_attrs - array of default attributes of channel object
499 */
500static struct attribute *most_channel_def_attrs[] = {
501 &most_chnl_attr_available_directions.attr,
502 &most_chnl_attr_available_datatypes.attr,
503 &most_chnl_attr_number_of_packet_buffers.attr,
504 &most_chnl_attr_number_of_stream_buffers.attr,
505 &most_chnl_attr_size_of_packet_buffer.attr,
506 &most_chnl_attr_size_of_stream_buffer.attr,
507 &most_chnl_attr_set_number_of_buffers.attr,
508 &most_chnl_attr_set_buffer_size.attr,
509 &most_chnl_attr_set_direction.attr,
510 &most_chnl_attr_set_datatype.attr,
511 &most_chnl_attr_set_subbuffer_size.attr,
512 &most_chnl_attr_set_packets_per_xact.attr,
513 &most_chnl_attr_channel_starving.attr,
514 NULL,
515};
516
517static struct kobj_type most_channel_ktype = {
518 .sysfs_ops = &most_channel_sysfs_ops,
519 .release = most_channel_release,
520 .default_attrs = most_channel_def_attrs,
521};
522
523static struct kset *most_channel_kset;
524
525/**
526 * create_most_c_obj - allocates a channel object
527 * @name: name of the channel object
528 * @parent: parent kobject
529 *
530 * This create a channel object and registers it with sysfs.
531 * Returns a pointer to the object or NULL when something went wrong.
532 */
533static struct most_c_obj *
534create_most_c_obj(const char *name, struct kobject *parent)
535{
536 struct most_c_obj *c;
537 int retval;
538
539 c = kzalloc(sizeof(*c), GFP_KERNEL);
540 if (!c)
541 return NULL;
542 c->kobj.kset = most_channel_kset;
543 retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
544 "%s", name);
545 if (retval) {
546 kobject_put(&c->kobj);
547 return NULL;
548 }
549 kobject_uevent(&c->kobj, KOBJ_ADD);
550 return c;
551}
552
553/**
554 * destroy_most_c_obj - channel release function
555 * @c: pointer to channel object
556 *
557 * This decrements the reference counter of the channel object.
558 * If the reference count turns zero, its release function is called.
559 */
560static void destroy_most_c_obj(struct most_c_obj *c)
561{
ccfbaee0
CG
562 if (c->aim0.ptr)
563 c->aim0.ptr->disconnect_channel(c->iface, c->channel_id);
564 if (c->aim1.ptr)
565 c->aim1.ptr->disconnect_channel(c->iface, c->channel_id);
566 c->aim0.ptr = NULL;
567 c->aim1.ptr = NULL;
57562a72
CG
568
569 mutex_lock(&deregister_mutex);
570 flush_trash_fifo(c);
571 flush_channel_fifos(c);
572 mutex_unlock(&deregister_mutex);
573 kobject_put(&c->kobj);
574}
575
576/* ___ ___
577 * ___I N S T A N C E___
578 */
579#define MOST_INST_ATTR(_name, _mode, _show, _store) \
580 struct most_inst_attribute most_inst_attr_##_name = \
581 __ATTR(_name, _mode, _show, _store)
582
583static struct list_head instance_list;
584
585/**
586 * struct most_inst_attribute - to access the attributes of instance object
587 * @attr: attributes of an instance
588 * @show: pointer to the show function
589 * @store: pointer to the store function
590 */
591struct most_inst_attribute {
592 struct attribute attr;
593 ssize_t (*show)(struct most_inst_obj *d,
594 struct most_inst_attribute *attr,
595 char *buf);
596 ssize_t (*store)(struct most_inst_obj *d,
597 struct most_inst_attribute *attr,
598 const char *buf,
599 size_t count);
600};
601#define to_instance_attr(a) \
602 container_of(a, struct most_inst_attribute, attr)
603
604/**
605 * instance_attr_show - show function for an instance object
606 * @kobj: pointer to kobject
607 * @attr: pointer to attribute struct
608 * @buf: buffer
609 */
610static ssize_t instance_attr_show(struct kobject *kobj,
611 struct attribute *attr,
612 char *buf)
613{
614 struct most_inst_attribute *instance_attr;
615 struct most_inst_obj *instance_obj;
616
617 instance_attr = to_instance_attr(attr);
618 instance_obj = to_inst_obj(kobj);
619
620 if (!instance_attr->show)
621 return -EIO;
622
623 return instance_attr->show(instance_obj, instance_attr, buf);
624}
625
626/**
627 * instance_attr_store - store function for an instance object
628 * @kobj: pointer to kobject
629 * @attr: pointer to attribute struct
630 * @buf: buffer
631 * @len: length of buffer
632 */
633static ssize_t instance_attr_store(struct kobject *kobj,
634 struct attribute *attr,
635 const char *buf,
636 size_t len)
637{
638 struct most_inst_attribute *instance_attr;
639 struct most_inst_obj *instance_obj;
640
641 instance_attr = to_instance_attr(attr);
642 instance_obj = to_inst_obj(kobj);
643
644 if (!instance_attr->store)
645 return -EIO;
646
647 return instance_attr->store(instance_obj, instance_attr, buf, len);
648}
649
650static const struct sysfs_ops most_inst_sysfs_ops = {
651 .show = instance_attr_show,
652 .store = instance_attr_store,
653};
654
655/**
656 * most_inst_release - release function for instance object
657 * @kobj: pointer to instance's kobject
658 *
659 * This frees the allocated memory for the instance object
660 */
661static void most_inst_release(struct kobject *kobj)
662{
663 struct most_inst_obj *inst = to_inst_obj(kobj);
664
665 kfree(inst);
666}
667
668static ssize_t show_description(struct most_inst_obj *instance_obj,
669 struct most_inst_attribute *attr,
670 char *buf)
671{
672 return snprintf(buf, PAGE_SIZE, "%s\n",
673 instance_obj->iface->description);
674}
675
676static ssize_t show_interface(struct most_inst_obj *instance_obj,
677 struct most_inst_attribute *attr,
678 char *buf)
679{
680 switch (instance_obj->iface->interface) {
681 case ITYPE_LOOPBACK:
682 return snprintf(buf, PAGE_SIZE, "loopback\n");
683 case ITYPE_I2C:
684 return snprintf(buf, PAGE_SIZE, "i2c\n");
685 case ITYPE_I2S:
686 return snprintf(buf, PAGE_SIZE, "i2s\n");
687 case ITYPE_TSI:
688 return snprintf(buf, PAGE_SIZE, "tsi\n");
689 case ITYPE_HBI:
690 return snprintf(buf, PAGE_SIZE, "hbi\n");
691 case ITYPE_MEDIALB_DIM:
692 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
693 case ITYPE_MEDIALB_DIM2:
694 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
695 case ITYPE_USB:
696 return snprintf(buf, PAGE_SIZE, "usb\n");
697 case ITYPE_PCIE:
698 return snprintf(buf, PAGE_SIZE, "pcie\n");
699 }
700 return snprintf(buf, PAGE_SIZE, "unknown\n");
701}
702
703#define create_inst_attribute(value) \
704 static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL)
705
706create_inst_attribute(description);
707create_inst_attribute(interface);
708
709static struct attribute *most_inst_def_attrs[] = {
710 &most_inst_attr_description.attr,
711 &most_inst_attr_interface.attr,
712 NULL,
713};
714
715static struct kobj_type most_inst_ktype = {
716 .sysfs_ops = &most_inst_sysfs_ops,
717 .release = most_inst_release,
718 .default_attrs = most_inst_def_attrs,
719};
720
721static struct kset *most_inst_kset;
722
723
724/**
725 * create_most_inst_obj - creates an instance object
726 * @name: name of the object to be created
727 *
728 * This allocates memory for an instance structure, assigns the proper kset
729 * and registers it with sysfs.
730 *
731 * Returns a pointer to the instance object or NULL when something went wrong.
732 */
733static struct most_inst_obj *create_most_inst_obj(const char *name)
734{
735 struct most_inst_obj *inst;
736 int retval;
737
738 inst = kzalloc(sizeof(*inst), GFP_KERNEL);
739 if (!inst)
740 return NULL;
741 inst->kobj.kset = most_inst_kset;
742 retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
743 "%s", name);
744 if (retval) {
745 kobject_put(&inst->kobj);
746 return NULL;
747 }
748 kobject_uevent(&inst->kobj, KOBJ_ADD);
749 return inst;
750}
751
752/**
753 * destroy_most_inst_obj - MOST instance release function
754 * @inst: pointer to the instance object
755 *
756 * This decrements the reference counter of the instance object.
757 * If the reference count turns zero, its release function is called
758 */
759static void destroy_most_inst_obj(struct most_inst_obj *inst)
760{
761 struct most_c_obj *c, *tmp;
762
763 /* need to destroy channels first, since
764 * each channel incremented the
765 * reference count of the inst->kobj
766 */
767 list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
768 destroy_most_c_obj(c);
769 }
770 kobject_put(&inst->kobj);
771}
772
773/* ___ ___
774 * ___A I M___
775 */
776struct most_aim_obj {
777 struct kobject kobj;
778 struct list_head list;
779 struct most_aim *driver;
780 char add_link[STRING_SIZE];
781 char remove_link[STRING_SIZE];
782};
783#define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
784
785static struct list_head aim_list;
786
787
788/**
789 * struct most_aim_attribute - to access the attributes of AIM object
790 * @attr: attributes of an AIM
791 * @show: pointer to the show function
792 * @store: pointer to the store function
793 */
794struct most_aim_attribute {
795 struct attribute attr;
796 ssize_t (*show)(struct most_aim_obj *d,
797 struct most_aim_attribute *attr,
798 char *buf);
799 ssize_t (*store)(struct most_aim_obj *d,
800 struct most_aim_attribute *attr,
801 const char *buf,
802 size_t count);
803};
804#define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
805
806/**
807 * aim_attr_show - show function of an AIM object
808 * @kobj: pointer to kobject
809 * @attr: pointer to attribute struct
810 * @buf: buffer
811 */
812static ssize_t aim_attr_show(struct kobject *kobj,
813 struct attribute *attr,
814 char *buf)
815{
816 struct most_aim_attribute *aim_attr;
817 struct most_aim_obj *aim_obj;
818
819 aim_attr = to_aim_attr(attr);
820 aim_obj = to_aim_obj(kobj);
821
822 if (!aim_attr->show)
823 return -EIO;
824
825 return aim_attr->show(aim_obj, aim_attr, buf);
826}
827
828/**
829 * aim_attr_store - store function of an AIM object
830 * @kobj: pointer to kobject
831 * @attr: pointer to attribute struct
832 * @buf: buffer
833 * @len: length of buffer
834 */
835static ssize_t aim_attr_store(struct kobject *kobj,
836 struct attribute *attr,
837 const char *buf,
838 size_t len)
839{
840 struct most_aim_attribute *aim_attr;
841 struct most_aim_obj *aim_obj;
842
843 aim_attr = to_aim_attr(attr);
844 aim_obj = to_aim_obj(kobj);
845
846 if (!aim_attr->store)
847 return -EIO;
848 return aim_attr->store(aim_obj, aim_attr, buf, len);
849}
850
851static const struct sysfs_ops most_aim_sysfs_ops = {
852 .show = aim_attr_show,
853 .store = aim_attr_store,
854};
855
856/**
857 * most_aim_release - AIM release function
858 * @kobj: pointer to AIM's kobject
859 */
860static void most_aim_release(struct kobject *kobj)
861{
862 struct most_aim_obj *aim_obj = to_aim_obj(kobj);
863
864 kfree(aim_obj);
865}
866
867static ssize_t show_add_link(struct most_aim_obj *aim_obj,
868 struct most_aim_attribute *attr,
869 char *buf)
870{
871 return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->add_link);
872}
873
874/**
875 * split_string - parses and changes string in the buffer buf and
876 * splits it into two mandatory and one optional substrings.
877 *
878 * @buf: complete string from attribute 'add_channel'
879 * @a: address of pointer to 1st substring (=instance name)
880 * @b: address of pointer to 2nd substring (=channel name)
881 * @c: optional address of pointer to 3rd substring (=user defined name)
882 *
883 * Examples:
884 *
885 * Input: "mdev0:ch0@ep_81:my_channel\n" or
886 * "mdev0:ch0@ep_81:my_channel"
887 *
888 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel"
889 *
890 * Input: "mdev0:ch0@ep_81\n"
891 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> ""
892 *
893 * Input: "mdev0:ch0@ep_81"
894 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL
895 */
c942ea7a 896static int split_string(char *buf, char **a, char **b, char **c)
57562a72
CG
897{
898 *a = strsep(&buf, ":");
899 if (!*a)
900 return -EIO;
901
902 *b = strsep(&buf, ":\n");
903 if (!*b)
904 return -EIO;
905
906 if (c)
907 *c = strsep(&buf, ":\n");
908
909 return 0;
910}
911
912/**
913 * get_channel_by_name - get pointer to channel object
914 * @mdev: name of the device instance
915 * @mdev_ch: name of the respective channel
916 *
917 * This retrieves the pointer to a channel object.
918 */
919static struct
920most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
921{
922 struct most_c_obj *c, *tmp;
923 struct most_inst_obj *i, *i_tmp;
924 int found = 0;
925
926 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
927 if (!strcmp(kobject_name(&i->kobj), mdev)) {
928 found++;
929 break;
930 }
931 }
932 if (unlikely(!found))
933 return ERR_PTR(-EIO);
934
935 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
936 if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
937 found++;
938 break;
939 }
940 }
941 if (unlikely(2 > found))
942 return ERR_PTR(-EIO);
943 return c;
944}
945
946/**
947 * store_add_link - store() function for add_link attribute
948 * @aim_obj: pointer to AIM object
949 * @attr: its attributes
950 * @buf: buffer
951 * @len: buffer length
952 *
953 * This parses the string given by buf and splits it into
954 * three substrings. Note: third substring is optional. In case a cdev
955 * AIM is loaded the optional 3rd substring will make up the name of
956 * device node in the /dev directory. If omitted, the device node will
957 * inherit the channel's name within sysfs.
958 *
959 * Searches for a pair of device and channel and probes the AIM
960 *
961 * Example:
962 * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link
963 * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link
964 *
965 * (1) would create the device node /dev/my_rxchannel
966 * (2) would create the device node /dev/mdev0-ch0@ep_81
967 */
968static ssize_t store_add_link(struct most_aim_obj *aim_obj,
969 struct most_aim_attribute *attr,
970 const char *buf,
971 size_t len)
972{
973 struct most_c_obj *c;
974 struct most_aim **aim_ptr;
975 char buffer[STRING_SIZE];
976 char *mdev;
977 char *mdev_ch;
978 char *mdev_devnod;
979 char devnod_buf[STRING_SIZE];
980 int ret;
3f78f611 981 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
57562a72
CG
982
983 strlcpy(buffer, buf, max_len);
984 strlcpy(aim_obj->add_link, buf, max_len);
985
986 ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
987 if (ret)
988 return ret;
989
04ca5837 990 if (!mdev_devnod || *mdev_devnod == 0) {
7ac5c9f0 991 snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev, mdev_ch);
57562a72
CG
992 mdev_devnod = devnod_buf;
993 }
994
995 c = get_channel_by_name(mdev, mdev_ch);
996 if (IS_ERR(c))
997 return -ENODEV;
998
ccfbaee0
CG
999 if (!c->aim0.ptr)
1000 aim_ptr = &c->aim0.ptr;
1001 else if (!c->aim1.ptr)
1002 aim_ptr = &c->aim1.ptr;
57562a72
CG
1003 else
1004 return -ENOSPC;
1005
1006 ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
1007 &c->cfg, &c->kobj, mdev_devnod);
1008 if (ret)
1009 return ret;
1010 *aim_ptr = aim_obj->driver;
1011 return len;
1012}
1013
c942ea7a 1014static struct most_aim_attribute most_aim_attr_add_link =
57562a72
CG
1015 __ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link);
1016
1017static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
1018 struct most_aim_attribute *attr,
1019 char *buf)
1020{
1021 return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link);
1022}
1023
1024/**
1025 * store_remove_link - store function for remove_link attribute
1026 * @aim_obj: pointer to AIM object
1027 * @attr: its attributes
1028 * @buf: buffer
1029 * @len: buffer length
1030 *
1031 * Example:
1032 * echo -n -e "mdev0:ch0@ep_81\n" >remove_link
1033 */
1034static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
1035 struct most_aim_attribute *attr,
1036 const char *buf,
1037 size_t len)
1038{
1039 struct most_c_obj *c;
1040 char buffer[STRING_SIZE];
1041 char *mdev;
1042 char *mdev_ch;
1043 int ret;
3f78f611 1044 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
57562a72
CG
1045
1046 strlcpy(buffer, buf, max_len);
1047 strlcpy(aim_obj->remove_link, buf, max_len);
1048 ret = split_string(buffer, &mdev, &mdev_ch, NULL);
1049 if (ret)
1050 return ret;
1051
1052 c = get_channel_by_name(mdev, mdev_ch);
1053 if (IS_ERR(c))
1054 return -ENODEV;
1055
ccfbaee0
CG
1056 if (c->aim0.ptr == aim_obj->driver)
1057 c->aim0.ptr = NULL;
1058 if (c->aim1.ptr == aim_obj->driver)
1059 c->aim1.ptr = NULL;
57562a72
CG
1060 if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
1061 return -EIO;
1062 return len;
1063}
1064
c942ea7a 1065static struct most_aim_attribute most_aim_attr_remove_link =
57562a72
CG
1066 __ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link, store_remove_link);
1067
1068static struct attribute *most_aim_def_attrs[] = {
1069 &most_aim_attr_add_link.attr,
1070 &most_aim_attr_remove_link.attr,
1071 NULL,
1072};
1073
1074static struct kobj_type most_aim_ktype = {
1075 .sysfs_ops = &most_aim_sysfs_ops,
1076 .release = most_aim_release,
1077 .default_attrs = most_aim_def_attrs,
1078};
1079
1080static struct kset *most_aim_kset;
1081
1082/**
1083 * create_most_aim_obj - creates an AIM object
1084 * @name: name of the AIM
1085 *
1086 * This creates an AIM object assigns the proper kset and registers
1087 * it with sysfs.
1088 * Returns a pointer to the object or NULL if something went wrong.
1089 */
1090static struct most_aim_obj *create_most_aim_obj(const char *name)
1091{
1092 struct most_aim_obj *most_aim;
1093 int retval;
1094
1095 most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
1096 if (!most_aim)
1097 return NULL;
1098 most_aim->kobj.kset = most_aim_kset;
1099 retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
1100 NULL, "%s", name);
1101 if (retval) {
1102 kobject_put(&most_aim->kobj);
1103 return NULL;
1104 }
1105 kobject_uevent(&most_aim->kobj, KOBJ_ADD);
1106 return most_aim;
1107}
1108
1109/**
1110 * destroy_most_aim_obj - AIM release function
1111 * @p: pointer to AIM object
1112 *
1113 * This decrements the reference counter of the AIM object. If the
1114 * reference count turns zero, its release function will be called.
1115 */
1116static void destroy_most_aim_obj(struct most_aim_obj *p)
1117{
1118 kobject_put(&p->kobj);
1119}
1120
1121
1122/* ___ ___
1123 * ___C O R E___
1124 */
1125
1126/**
1127 * Instantiation of the MOST bus
1128 */
c942ea7a 1129static struct bus_type most_bus = {
57562a72
CG
1130 .name = "most",
1131};
1132
1133/**
1134 * Instantiation of the core driver
1135 */
c942ea7a 1136static struct device_driver mostcore = {
57562a72
CG
1137 .name = "mostcore",
1138 .bus = &most_bus,
1139};
1140
1141static inline void trash_mbo(struct mbo *mbo)
1142{
1143 unsigned long flags;
1144 struct most_c_obj *c = mbo->context;
1145
1146 spin_lock_irqsave(&c->fifo_lock, flags);
1147 list_add(&mbo->list, &c->trash_fifo);
1148 spin_unlock_irqrestore(&c->fifo_lock, flags);
1149}
1150
1151static struct mbo *get_hdm_mbo(struct most_c_obj *c)
1152{
1153 unsigned long flags;
1154 struct mbo *mbo;
1155
1156 spin_lock_irqsave(&c->fifo_lock, flags);
1157 if (c->enqueue_halt || list_empty(&c->halt_fifo))
1158 mbo = NULL;
1159 else
1160 mbo = list_pop_mbo(&c->halt_fifo);
1161 spin_unlock_irqrestore(&c->fifo_lock, flags);
1162 return mbo;
1163}
1164
1165static void nq_hdm_mbo(struct mbo *mbo)
1166{
1167 unsigned long flags;
1168 struct most_c_obj *c = mbo->context;
1169
1170 spin_lock_irqsave(&c->fifo_lock, flags);
1171 list_add_tail(&mbo->list, &c->halt_fifo);
1172 spin_unlock_irqrestore(&c->fifo_lock, flags);
1173 wake_up_interruptible(&c->hdm_fifo_wq);
1174}
1175
1176static int hdm_enqueue_thread(void *data)
1177{
1178 struct most_c_obj *c = data;
1179 struct mbo *mbo;
1180 typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
1181
1182 while (likely(!kthread_should_stop())) {
1183 wait_event_interruptible(c->hdm_fifo_wq,
1184 (mbo = get_hdm_mbo(c))
1185 || kthread_should_stop());
1186
1187 if (unlikely(!mbo))
1188 continue;
1189
1190 if (c->cfg.direction == MOST_CH_RX)
1191 mbo->buffer_length = c->cfg.buffer_size;
1192
1193 if (unlikely(enqueue(mbo->ifp, mbo->hdm_channel_id, mbo))) {
1194 pr_err("hdm enqueue failed\n");
1195 nq_hdm_mbo(mbo);
1196 c->hdm_enqueue_task = NULL;
1197 return 0;
1198 }
1199 }
1200
1201 return 0;
1202}
1203
1204static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
1205{
1206 struct task_struct *task =
246ed517
SB
1207 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
1208 channel_id);
57562a72
CG
1209
1210 if (IS_ERR(task))
1211 return PTR_ERR(task);
1212
1213 c->hdm_enqueue_task = task;
1214 return 0;
1215}
1216
1217/**
1218 * arm_mbo - recycle MBO for further usage
1219 * @mbo: buffer object
1220 *
1221 * This puts an MBO back to the list to have it ready for up coming
1222 * tx transactions.
1223 *
1224 * In case the MBO belongs to a channel that recently has been
1225 * poisoned, the MBO is scheduled to be trashed.
1226 * Calls the completion handler of an attached AIM.
1227 */
1228static void arm_mbo(struct mbo *mbo)
1229{
1230 unsigned long flags;
1231 struct most_c_obj *c;
1232
1233 BUG_ON((!mbo) || (!mbo->context));
1234 c = mbo->context;
1235
1236 if (c->is_poisoned) {
1237 trash_mbo(mbo);
1238 return;
1239 }
1240
1241 spin_lock_irqsave(&c->fifo_lock, flags);
71457d48 1242 ++*mbo->num_buffers_ptr;
57562a72
CG
1243 list_add_tail(&mbo->list, &c->fifo);
1244 spin_unlock_irqrestore(&c->fifo_lock, flags);
1245
ccfbaee0
CG
1246 if (c->aim0.refs && c->aim0.ptr->tx_completion)
1247 c->aim0.ptr->tx_completion(c->iface, c->channel_id);
f13f6981 1248
ccfbaee0
CG
1249 if (c->aim1.refs && c->aim1.ptr->tx_completion)
1250 c->aim1.ptr->tx_completion(c->iface, c->channel_id);
57562a72
CG
1251}
1252
1253/**
1254 * arm_mbo_chain - helper function that arms an MBO chain for the HDM
1255 * @c: pointer to interface channel
1256 * @dir: direction of the channel
1257 * @compl: pointer to completion function
1258 *
1259 * This allocates buffer objects including the containing DMA coherent
1260 * buffer and puts them in the fifo.
1261 * Buffers of Rx channels are put in the kthread fifo, hence immediately
1262 * submitted to the HDM.
1263 *
1264 * Returns the number of allocated and enqueued MBOs.
1265 */
c942ea7a
AR
1266static int arm_mbo_chain(struct most_c_obj *c, int dir,
1267 void (*compl)(struct mbo *))
57562a72
CG
1268{
1269 unsigned int i;
1270 int retval;
1271 struct mbo *mbo;
2ae07510 1272 u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
57562a72
CG
1273
1274 atomic_set(&c->mbo_nq_level, 0);
1275
1276 for (i = 0; i < c->cfg.num_buffers; i++) {
1277 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
1278 if (!mbo) {
1279 pr_info("WARN: Allocation of MBO failed.\n");
1280 retval = i;
1281 goto _exit;
1282 }
1283 mbo->context = c;
1284 mbo->ifp = c->iface;
1285 mbo->hdm_channel_id = c->channel_id;
1286 mbo->virt_address = dma_alloc_coherent(NULL,
1287 coherent_buf_size,
1288 &mbo->bus_address,
1289 GFP_KERNEL);
1290 if (!mbo->virt_address) {
1291 pr_info("WARN: No DMA coherent buffer.\n");
1292 retval = i;
1293 goto _error1;
1294 }
1295 mbo->complete = compl;
71457d48 1296 mbo->num_buffers_ptr = &dummy_num_buffers;
57562a72
CG
1297 if (dir == MOST_CH_RX) {
1298 nq_hdm_mbo(mbo);
1299 atomic_inc(&c->mbo_nq_level);
1300 } else {
1301 arm_mbo(mbo);
1302 }
1303 }
1304 return i;
1305
1306_error1:
1307 kfree(mbo);
1308_exit:
1309 return retval;
1310}
1311
1312/**
1313 * most_submit_mbo - submits an MBO to fifo
1314 * @mbo: pointer to the MBO
1315 *
1316 */
1317int most_submit_mbo(struct mbo *mbo)
1318{
1319 struct most_c_obj *c;
1320 struct most_inst_obj *i;
1321
1322 if (unlikely((!mbo) || (!mbo->context))) {
1323 pr_err("Bad MBO or missing channel reference\n");
1324 return -EINVAL;
1325 }
1326 c = mbo->context;
1327 i = c->inst;
1328
1329 if (unlikely(atomic_read(&i->tainted)))
1330 return -ENODEV;
1331
1332 nq_hdm_mbo(mbo);
1333 return 0;
1334}
1335EXPORT_SYMBOL_GPL(most_submit_mbo);
1336
1337/**
1338 * most_write_completion - write completion handler
1339 * @mbo: pointer to MBO
1340 *
1341 * This recycles the MBO for further usage. In case the channel has been
1342 * poisoned, the MBO is scheduled to be trashed.
1343 */
1344static void most_write_completion(struct mbo *mbo)
1345{
1346 struct most_c_obj *c;
1347
1348 BUG_ON((!mbo) || (!mbo->context));
1349
1350 c = mbo->context;
1351 if (mbo->status == MBO_E_INVAL)
1352 pr_info("WARN: Tx MBO status: invalid\n");
ec58d2a8 1353 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
57562a72
CG
1354 trash_mbo(mbo);
1355 else
1356 arm_mbo(mbo);
1357}
1358
1359/**
1360 * get_channel_by_iface - get pointer to channel object
1361 * @iface: pointer to interface instance
1362 * @id: channel ID
1363 *
1364 * This retrieves a pointer to a channel of the given interface and channel ID.
1365 */
1366static struct
1367most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
1368{
1369 struct most_inst_obj *i;
1370
1371 if (unlikely(!iface)) {
1372 pr_err("Bad interface\n");
1373 return NULL;
1374 }
1375 if (unlikely((id < 0) || (id >= iface->num_channels))) {
1376 pr_err("Channel index (%d) out of range\n", id);
1377 return NULL;
1378 }
1379 i = iface->priv;
1380 if (unlikely(!i)) {
1381 pr_err("interface is not registered\n");
1382 return NULL;
1383 }
1384 return i->channel[id];
1385}
1386
aac997df
CG
1387int channel_has_mbo(struct most_interface *iface, int id)
1388{
1389 struct most_c_obj *c = get_channel_by_iface(iface, id);
1390 unsigned long flags;
1391 int empty;
1392
1393 if (unlikely(!c))
1394 return -EINVAL;
1395
1396 spin_lock_irqsave(&c->fifo_lock, flags);
1397 empty = list_empty(&c->fifo);
1398 spin_unlock_irqrestore(&c->fifo_lock, flags);
1399 return !empty;
1400}
1401EXPORT_SYMBOL_GPL(channel_has_mbo);
1402
57562a72
CG
1403/**
1404 * most_get_mbo - get pointer to an MBO of pool
1405 * @iface: pointer to interface instance
1406 * @id: channel ID
1407 *
1408 * This attempts to get a free buffer out of the channel fifo.
1409 * Returns a pointer to MBO on success or NULL otherwise.
1410 */
71457d48
CG
1411struct mbo *most_get_mbo(struct most_interface *iface, int id,
1412 struct most_aim *aim)
57562a72
CG
1413{
1414 struct mbo *mbo;
1415 struct most_c_obj *c;
1416 unsigned long flags;
71457d48 1417 int *num_buffers_ptr;
57562a72
CG
1418
1419 c = get_channel_by_iface(iface, id);
1420 if (unlikely(!c))
1421 return NULL;
71457d48 1422
ccfbaee0
CG
1423 if (c->aim0.refs && c->aim1.refs &&
1424 ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
1425 (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
71457d48
CG
1426 return NULL;
1427
ccfbaee0
CG
1428 if (aim == c->aim0.ptr)
1429 num_buffers_ptr = &c->aim0.num_buffers;
1430 else if (aim == c->aim1.ptr)
1431 num_buffers_ptr = &c->aim1.num_buffers;
71457d48
CG
1432 else
1433 num_buffers_ptr = &dummy_num_buffers;
1434
57562a72
CG
1435 spin_lock_irqsave(&c->fifo_lock, flags);
1436 if (list_empty(&c->fifo)) {
1437 spin_unlock_irqrestore(&c->fifo_lock, flags);
1438 return NULL;
1439 }
1440 mbo = list_pop_mbo(&c->fifo);
71457d48 1441 --*num_buffers_ptr;
57562a72 1442 spin_unlock_irqrestore(&c->fifo_lock, flags);
71457d48
CG
1443
1444 mbo->num_buffers_ptr = num_buffers_ptr;
57562a72
CG
1445 mbo->buffer_length = c->cfg.buffer_size;
1446 return mbo;
1447}
1448EXPORT_SYMBOL_GPL(most_get_mbo);
1449
1450
1451/**
1452 * most_put_mbo - return buffer to pool
1453 * @mbo: buffer object
1454 */
1455void most_put_mbo(struct mbo *mbo)
1456{
1457 struct most_c_obj *c;
1458 struct most_inst_obj *i;
1459
1460 c = mbo->context;
1461 i = c->inst;
1462
1463 if (unlikely(atomic_read(&i->tainted))) {
1464 mbo->status = MBO_E_CLOSE;
1465 trash_mbo(mbo);
1466 return;
1467 }
1468 if (c->cfg.direction == MOST_CH_TX) {
1469 arm_mbo(mbo);
1470 return;
1471 }
1472 nq_hdm_mbo(mbo);
1473 atomic_inc(&c->mbo_nq_level);
1474}
1475EXPORT_SYMBOL_GPL(most_put_mbo);
1476
1477/**
1478 * most_read_completion - read completion handler
1479 * @mbo: pointer to MBO
1480 *
1481 * This function is called by the HDM when data has been received from the
1482 * hardware and copied to the buffer of the MBO.
1483 *
1484 * In case the channel has been poisoned it puts the buffer in the trash queue.
1485 * Otherwise, it passes the buffer to an AIM for further processing.
1486 */
1487static void most_read_completion(struct mbo *mbo)
1488{
f13f6981 1489 struct most_c_obj *c = mbo->context;
57562a72 1490
f13f6981
CG
1491 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1492 trash_mbo(mbo);
1493 return;
1494 }
57562a72
CG
1495
1496 if (mbo->status == MBO_E_INVAL) {
1497 nq_hdm_mbo(mbo);
1498 atomic_inc(&c->mbo_nq_level);
1499 return;
1500 }
1501
1502 if (atomic_sub_and_test(1, &c->mbo_nq_level)) {
1503 pr_info("WARN: rx device out of buffers\n");
1504 c->is_starving = 1;
1505 }
1506
ccfbaee0
CG
1507 if (c->aim0.refs && c->aim0.ptr->rx_completion &&
1508 c->aim0.ptr->rx_completion(mbo) == 0)
57562a72 1509 return;
f13f6981 1510
ccfbaee0
CG
1511 if (c->aim1.refs && c->aim1.ptr->rx_completion &&
1512 c->aim1.ptr->rx_completion(mbo) == 0)
57562a72 1513 return;
f13f6981
CG
1514
1515 most_put_mbo(mbo);
57562a72
CG
1516}
1517
1518/**
1519 * most_start_channel - prepares a channel for communication
1520 * @iface: pointer to interface instance
1521 * @id: channel ID
1522 *
1523 * This prepares the channel for usage. Cross-checks whether the
1524 * channel's been properly configured.
1525 *
1526 * Returns 0 on success or error code otherwise.
1527 */
f13f6981
CG
1528int most_start_channel(struct most_interface *iface, int id,
1529 struct most_aim *aim)
57562a72
CG
1530{
1531 int num_buffer;
1532 int ret;
1533 struct most_c_obj *c = get_channel_by_iface(iface, id);
1534
1535 if (unlikely(!c))
1536 return -EINVAL;
1537
f13f6981 1538 mutex_lock(&c->start_mutex);
ccfbaee0 1539 if (c->aim0.refs + c->aim1.refs > 0)
f13f6981 1540 goto out; /* already started by other aim */
57562a72
CG
1541
1542 if (!try_module_get(iface->mod)) {
1543 pr_info("failed to acquire HDM lock\n");
f13f6981 1544 mutex_unlock(&c->start_mutex);
57562a72
CG
1545 return -ENOLCK;
1546 }
1547 modref++;
1548
1549 c->cfg.extra_len = 0;
1550 if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1551 pr_info("channel configuration failed. Go check settings...\n");
1552 ret = -EINVAL;
1553 goto error;
1554 }
1555
1556 init_waitqueue_head(&c->hdm_fifo_wq);
1557
1558 if (c->cfg.direction == MOST_CH_RX)
1559 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1560 most_read_completion);
1561 else
1562 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1563 most_write_completion);
1564 if (unlikely(0 == num_buffer)) {
1565 pr_info("failed to allocate memory\n");
1566 ret = -ENOMEM;
1567 goto error;
1568 }
1569
1570 ret = run_enqueue_thread(c, id);
1571 if (ret)
1572 goto error;
1573
57562a72 1574 c->is_starving = 0;
ccfbaee0
CG
1575 c->aim0.num_buffers = c->cfg.num_buffers / 2;
1576 c->aim1.num_buffers = c->cfg.num_buffers - c->aim0.num_buffers;
57562a72 1577 atomic_set(&c->mbo_ref, num_buffer);
f13f6981
CG
1578
1579out:
ccfbaee0
CG
1580 if (aim == c->aim0.ptr)
1581 c->aim0.refs++;
1582 if (aim == c->aim1.ptr)
1583 c->aim1.refs++;
f13f6981 1584 mutex_unlock(&c->start_mutex);
57562a72 1585 return 0;
f13f6981 1586
57562a72
CG
1587error:
1588 if (iface->mod)
1589 module_put(iface->mod);
1590 modref--;
f13f6981 1591 mutex_unlock(&c->start_mutex);
57562a72
CG
1592 return ret;
1593}
1594EXPORT_SYMBOL_GPL(most_start_channel);
1595
1596/**
1597 * most_stop_channel - stops a running channel
1598 * @iface: pointer to interface instance
1599 * @id: channel ID
1600 */
f13f6981
CG
1601int most_stop_channel(struct most_interface *iface, int id,
1602 struct most_aim *aim)
57562a72
CG
1603{
1604 struct most_c_obj *c;
1605
1606 if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1607 pr_err("Bad interface or index out of range\n");
1608 return -EINVAL;
1609 }
1610 c = get_channel_by_iface(iface, id);
1611 if (unlikely(!c))
1612 return -EINVAL;
1613
f13f6981 1614 mutex_lock(&c->start_mutex);
ccfbaee0 1615 if (c->aim0.refs + c->aim1.refs >= 2)
f13f6981 1616 goto out;
57562a72 1617
57562a72
CG
1618 mutex_lock(&c->stop_task_mutex);
1619 if (c->hdm_enqueue_task)
1620 kthread_stop(c->hdm_enqueue_task);
1621 c->hdm_enqueue_task = NULL;
1622 mutex_unlock(&c->stop_task_mutex);
1623
1624 mutex_lock(&deregister_mutex);
1625 if (atomic_read(&c->inst->tainted)) {
1626 mutex_unlock(&deregister_mutex);
f13f6981 1627 mutex_unlock(&c->start_mutex);
57562a72
CG
1628 return -ENODEV;
1629 }
1630 mutex_unlock(&deregister_mutex);
1631
1632 if (iface->mod && modref) {
1633 module_put(iface->mod);
1634 modref--;
1635 }
1636
1637 c->is_poisoned = true;
1638 if (c->iface->poison_channel(c->iface, c->channel_id)) {
1639 pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1640 c->iface->description);
f13f6981 1641 mutex_unlock(&c->start_mutex);
57562a72
CG
1642 return -EAGAIN;
1643 }
1644 flush_trash_fifo(c);
1645 flush_channel_fifos(c);
1646
1647#ifdef CMPL_INTERRUPTIBLE
1648 if (wait_for_completion_interruptible(&c->cleanup)) {
1649 pr_info("Interrupted while clean up ch %d\n", c->channel_id);
f13f6981 1650 mutex_unlock(&c->start_mutex);
57562a72
CG
1651 return -EINTR;
1652 }
1653#else
1654 wait_for_completion(&c->cleanup);
1655#endif
1656 c->is_poisoned = false;
f13f6981
CG
1657
1658out:
ccfbaee0
CG
1659 if (aim == c->aim0.ptr)
1660 c->aim0.refs--;
1661 if (aim == c->aim1.ptr)
1662 c->aim1.refs--;
f13f6981 1663 mutex_unlock(&c->start_mutex);
57562a72
CG
1664 return 0;
1665}
1666EXPORT_SYMBOL_GPL(most_stop_channel);
1667
1668/**
1669 * most_register_aim - registers an AIM (driver) with the core
1670 * @aim: instance of AIM to be registered
1671 */
1672int most_register_aim(struct most_aim *aim)
1673{
1674 struct most_aim_obj *aim_obj;
1675
1676 if (!aim) {
1677 pr_err("Bad driver\n");
1678 return -EINVAL;
1679 }
1680 aim_obj = create_most_aim_obj(aim->name);
1681 if (!aim_obj) {
1682 pr_info("failed to alloc driver object\n");
1683 return -ENOMEM;
1684 }
1685 aim_obj->driver = aim;
1686 aim->context = aim_obj;
1687 pr_info("registered new application interfacing module %s\n",
1688 aim->name);
1689 list_add_tail(&aim_obj->list, &aim_list);
1690 return 0;
1691}
1692EXPORT_SYMBOL_GPL(most_register_aim);
1693
1694/**
1695 * most_deregister_aim - deregisters an AIM (driver) with the core
1696 * @aim: AIM to be removed
1697 */
1698int most_deregister_aim(struct most_aim *aim)
1699{
1700 struct most_aim_obj *aim_obj;
1701 struct most_c_obj *c, *tmp;
1702 struct most_inst_obj *i, *i_tmp;
1703
1704 if (!aim) {
1705 pr_err("Bad driver\n");
1706 return -EINVAL;
1707 }
1708
1709 aim_obj = aim->context;
1710 if (!aim_obj) {
1711 pr_info("driver not registered.\n");
1712 return -EINVAL;
1713 }
1714 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1715 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
ccfbaee0 1716 if (c->aim0.ptr == aim || c->aim1.ptr == aim)
57562a72
CG
1717 aim->disconnect_channel(
1718 c->iface, c->channel_id);
ccfbaee0
CG
1719 if (c->aim0.ptr == aim)
1720 c->aim0.ptr = NULL;
1721 if (c->aim1.ptr == aim)
1722 c->aim1.ptr = NULL;
57562a72
CG
1723 }
1724 }
1725 list_del(&aim_obj->list);
1726 destroy_most_aim_obj(aim_obj);
1727 pr_info("deregistering application interfacing module %s\n", aim->name);
1728 return 0;
1729}
1730EXPORT_SYMBOL_GPL(most_deregister_aim);
1731
1732/**
1733 * most_register_interface - registers an interface with core
1734 * @iface: pointer to the instance of the interface description.
1735 *
1736 * Allocates and initializes a new interface instance and all of its channels.
1737 * Returns a pointer to kobject or an error pointer.
1738 */
1739struct kobject *most_register_interface(struct most_interface *iface)
1740{
1741 unsigned int i;
1742 int id;
1743 char name[STRING_SIZE];
1744 char channel_name[STRING_SIZE];
1745 struct most_c_obj *c;
1746 struct most_inst_obj *inst;
1747
1748 if (!iface || !iface->enqueue || !iface->configure ||
1749 !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1750 pr_err("Bad interface or channel overflow\n");
1751 return ERR_PTR(-EINVAL);
1752 }
1753
1754 id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1755 if (id < 0) {
1756 pr_info("Failed to alloc mdev ID\n");
1757 return ERR_PTR(id);
1758 }
1759 snprintf(name, STRING_SIZE, "mdev%d", id);
1760
1761 inst = create_most_inst_obj(name);
1762 if (!inst) {
1763 pr_info("Failed to allocate interface instance\n");
1764 return ERR_PTR(-ENOMEM);
1765 }
1766
1767 iface->priv = inst;
1768 INIT_LIST_HEAD(&inst->channel_list);
1769 inst->iface = iface;
1770 inst->dev_id = id;
1771 atomic_set(&inst->tainted, 0);
1772 list_add_tail(&inst->list, &instance_list);
1773
1774 for (i = 0; i < iface->num_channels; i++) {
1775 const char *name_suffix = iface->channel_vector[i].name_suffix;
1776
1777 if (!name_suffix)
1778 snprintf(channel_name, STRING_SIZE, "ch%d", i);
1779 else if (name_suffix[0] == '@')
1780 snprintf(channel_name, STRING_SIZE, "ch%d%s", i,
1781 name_suffix);
1782 else
1783 snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
1784
1785 /* this increments the reference count of this instance */
1786 c = create_most_c_obj(channel_name, &inst->kobj);
1787 if (!c)
1788 goto free_instance;
1789 inst->channel[i] = c;
1790 c->is_starving = 0;
1791 c->iface = iface;
1792 c->inst = inst;
1793 c->channel_id = i;
1794 c->keep_mbo = false;
1795 c->enqueue_halt = false;
1796 c->is_poisoned = false;
57562a72
CG
1797 c->cfg.direction = 0;
1798 c->cfg.data_type = 0;
1799 c->cfg.num_buffers = 0;
1800 c->cfg.buffer_size = 0;
1801 c->cfg.subbuffer_size = 0;
1802 c->cfg.packets_per_xact = 0;
1803 spin_lock_init(&c->fifo_lock);
1804 INIT_LIST_HEAD(&c->fifo);
1805 INIT_LIST_HEAD(&c->trash_fifo);
1806 INIT_LIST_HEAD(&c->halt_fifo);
1807 init_completion(&c->cleanup);
1808 atomic_set(&c->mbo_ref, 0);
f13f6981 1809 mutex_init(&c->start_mutex);
57562a72
CG
1810 mutex_init(&c->stop_task_mutex);
1811 list_add_tail(&c->list, &inst->channel_list);
1812 }
1813 pr_info("registered new MOST device mdev%d (%s)\n",
1814 inst->dev_id, iface->description);
1815 return &inst->kobj;
1816
1817free_instance:
1818 pr_info("Failed allocate channel(s)\n");
1819 list_del(&inst->list);
1820 destroy_most_inst_obj(inst);
1821 return ERR_PTR(-ENOMEM);
1822}
1823EXPORT_SYMBOL_GPL(most_register_interface);
1824
1825/**
1826 * most_deregister_interface - deregisters an interface with core
1827 * @iface: pointer to the interface instance description.
1828 *
1829 * Before removing an interface instance from the list, all running
1830 * channels are stopped and poisoned.
1831 */
1832void most_deregister_interface(struct most_interface *iface)
1833{
1834 struct most_inst_obj *i = iface->priv;
1835 struct most_c_obj *c;
1836
1837 mutex_lock(&deregister_mutex);
1838 if (unlikely(!i)) {
1839 pr_info("Bad Interface\n");
1840 mutex_unlock(&deregister_mutex);
1841 return;
1842 }
1843 pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
1844 iface->description);
1845
1846 atomic_set(&i->tainted, 1);
1847 mutex_unlock(&deregister_mutex);
1848
1849 while (modref) {
1850 if (iface->mod && modref)
1851 module_put(iface->mod);
1852 modref--;
1853 }
1854
1855 list_for_each_entry(c, &i->channel_list, list) {
ccfbaee0 1856 if (c->aim0.refs + c->aim1.refs <= 0)
57562a72
CG
1857 continue;
1858
1859 mutex_lock(&c->stop_task_mutex);
1860 if (c->hdm_enqueue_task)
1861 kthread_stop(c->hdm_enqueue_task);
1862 c->hdm_enqueue_task = NULL;
1863 mutex_unlock(&c->stop_task_mutex);
1864
1865 if (iface->poison_channel(iface, c->channel_id))
1866 pr_err("Can't poison channel %d\n", c->channel_id);
1867 }
1868 ida_simple_remove(&mdev_id, i->dev_id);
1869 list_del(&i->list);
1870 destroy_most_inst_obj(i);
1871}
1872EXPORT_SYMBOL_GPL(most_deregister_interface);
1873
1874/**
1875 * most_stop_enqueue - prevents core from enqueueing MBOs
1876 * @iface: pointer to interface
1877 * @id: channel id
1878 *
1879 * This is called by an HDM that _cannot_ attend to its duties and
1880 * is imminent to get run over by the core. The core is not going to
1881 * enqueue any further packets unless the flagging HDM calls
1882 * most_resume enqueue().
1883 */
1884void most_stop_enqueue(struct most_interface *iface, int id)
1885{
1886 struct most_c_obj *c = get_channel_by_iface(iface, id);
1887
1888 if (likely(c))
1889 c->enqueue_halt = true;
1890}
1891EXPORT_SYMBOL_GPL(most_stop_enqueue);
1892
1893/**
1894 * most_resume_enqueue - allow core to enqueue MBOs again
1895 * @iface: pointer to interface
1896 * @id: channel id
1897 *
1898 * This clears the enqueue halt flag and enqueues all MBOs currently
1899 * sitting in the wait fifo.
1900 */
1901void most_resume_enqueue(struct most_interface *iface, int id)
1902{
1903 struct most_c_obj *c = get_channel_by_iface(iface, id);
1904
1905 if (unlikely(!c))
1906 return;
1907 c->enqueue_halt = false;
1908
1909 wake_up_interruptible(&c->hdm_fifo_wq);
1910}
1911EXPORT_SYMBOL_GPL(most_resume_enqueue);
1912
1913static int __init most_init(void)
1914{
1915 pr_info("init()\n");
1916 INIT_LIST_HEAD(&instance_list);
1917 INIT_LIST_HEAD(&aim_list);
1918 mutex_init(&deregister_mutex);
1919 ida_init(&mdev_id);
1920
1921 if (bus_register(&most_bus)) {
1922 pr_info("Cannot register most bus\n");
1923 goto exit;
1924 }
1925
1926 most_class = class_create(THIS_MODULE, "most");
1927 if (IS_ERR(most_class)) {
1928 pr_info("No udev support.\n");
1929 goto exit_bus;
1930 }
1931 if (driver_register(&mostcore)) {
1932 pr_info("Cannot register core driver\n");
1933 goto exit_class;
1934 }
1935
1936 class_glue_dir =
1937 device_create(most_class, NULL, 0, NULL, "mostcore");
1938 if (!class_glue_dir)
1939 goto exit_driver;
1940
1941 most_aim_kset =
1942 kset_create_and_add("aims", NULL, &class_glue_dir->kobj);
1943 if (!most_aim_kset)
1944 goto exit_class_container;
1945
1946 most_inst_kset =
1947 kset_create_and_add("devices", NULL, &class_glue_dir->kobj);
1948 if (!most_inst_kset)
1949 goto exit_driver_kset;
1950
1951 return 0;
1952
1953exit_driver_kset:
1954 kset_unregister(most_aim_kset);
1955exit_class_container:
1956 device_destroy(most_class, 0);
1957exit_driver:
1958 driver_unregister(&mostcore);
1959exit_class:
1960 class_destroy(most_class);
1961exit_bus:
1962 bus_unregister(&most_bus);
1963exit:
1964 return -ENOMEM;
1965}
1966
1967static void __exit most_exit(void)
1968{
1969 struct most_inst_obj *i, *i_tmp;
1970 struct most_aim_obj *d, *d_tmp;
1971
1972 pr_info("exit core module\n");
1973 list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
1974 destroy_most_aim_obj(d);
1975 }
1976
1977 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1978 list_del(&i->list);
1979 destroy_most_inst_obj(i);
1980 }
1981 kset_unregister(most_inst_kset);
1982 kset_unregister(most_aim_kset);
1983 device_destroy(most_class, 0);
1984 driver_unregister(&mostcore);
1985 class_destroy(most_class);
1986 bus_unregister(&most_bus);
1987 ida_destroy(&mdev_id);
1988}
1989
1990module_init(most_init);
1991module_exit(most_exit);
1992MODULE_LICENSE("GPL");
1993MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1994MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");