staging: most: remove function destroy_most_c_obj
[linux-2.6-block.git] / drivers / staging / most / mostcore / core.c
CommitLineData
57562a72
CG
1/*
2 * core.c - Implementation of core module of MOST Linux driver stack
3 *
4 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * This file is licensed under GPLv2.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/module.h>
16#include <linux/fs.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/device.h>
20#include <linux/list.h>
21#include <linux/poll.h>
22#include <linux/wait.h>
23#include <linux/kobject.h>
24#include <linux/mutex.h>
25#include <linux/completion.h>
26#include <linux/sysfs.h>
27#include <linux/kthread.h>
28#include <linux/dma-mapping.h>
29#include <linux/idr.h>
30#include "mostcore.h"
31
32#define MAX_CHANNELS 64
33#define STRING_SIZE 80
34
35static struct class *most_class;
36static struct device *class_glue_dir;
37static struct ida mdev_id;
38static int modref;
71457d48 39static int dummy_num_buffers;
57562a72 40
ccfbaee0
CG
41struct most_c_aim_obj {
42 struct most_aim *ptr;
43 int refs;
44 int num_buffers;
45};
46
57562a72
CG
47struct most_c_obj {
48 struct kobject kobj;
49 struct completion cleanup;
50 atomic_t mbo_ref;
51 atomic_t mbo_nq_level;
2aa9b96f 52 u16 channel_id;
57562a72 53 bool is_poisoned;
f13f6981 54 struct mutex start_mutex;
57562a72
CG
55 int is_starving;
56 struct most_interface *iface;
57 struct most_inst_obj *inst;
58 struct most_channel_config cfg;
59 bool keep_mbo;
60 bool enqueue_halt;
61 struct list_head fifo;
62 spinlock_t fifo_lock;
63 struct list_head halt_fifo;
64 struct list_head list;
ccfbaee0
CG
65 struct most_c_aim_obj aim0;
66 struct most_c_aim_obj aim1;
57562a72
CG
67 struct list_head trash_fifo;
68 struct task_struct *hdm_enqueue_task;
69 struct mutex stop_task_mutex;
70 wait_queue_head_t hdm_fifo_wq;
71};
9cbe5aa6 72
57562a72
CG
73#define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
74
75struct most_inst_obj {
76 int dev_id;
77 atomic_t tainted;
78 struct most_interface *iface;
79 struct list_head channel_list;
80 struct most_c_obj *channel[MAX_CHANNELS];
81 struct kobject kobj;
82 struct list_head list;
83};
9cbe5aa6 84
57562a72
CG
85#define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
86
87/**
88 * list_pop_mbo - retrieves the first MBO of the list and removes it
89 * @ptr: the list head to grab the MBO from.
90 */
91#define list_pop_mbo(ptr) \
92({ \
93 struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
94 list_del(&_mbo->list); \
95 _mbo; \
96})
97
98static struct mutex deregister_mutex;
99
100/* ___ ___
101 * ___C H A N N E L___
102 */
103
104/**
105 * struct most_c_attr - to access the attributes of a channel object
106 * @attr: attributes of a channel
107 * @show: pointer to the show function
108 * @store: pointer to the store function
109 */
110struct most_c_attr {
111 struct attribute attr;
112 ssize_t (*show)(struct most_c_obj *d,
113 struct most_c_attr *attr,
114 char *buf);
115 ssize_t (*store)(struct most_c_obj *d,
116 struct most_c_attr *attr,
117 const char *buf,
118 size_t count);
119};
9cbe5aa6 120
57562a72
CG
121#define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
122
123#define MOST_CHNL_ATTR(_name, _mode, _show, _store) \
124 struct most_c_attr most_chnl_attr_##_name = \
125 __ATTR(_name, _mode, _show, _store)
126
127/**
128 * channel_attr_show - show function of channel object
129 * @kobj: pointer to its kobject
130 * @attr: pointer to its attributes
131 * @buf: buffer
132 */
133static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
134 char *buf)
135{
136 struct most_c_attr *channel_attr = to_channel_attr(attr);
137 struct most_c_obj *c_obj = to_c_obj(kobj);
138
139 if (!channel_attr->show)
140 return -EIO;
141
142 return channel_attr->show(c_obj, channel_attr, buf);
143}
144
145/**
146 * channel_attr_store - store function of channel object
147 * @kobj: pointer to its kobject
148 * @attr: pointer to its attributes
149 * @buf: buffer
150 * @len: length of buffer
151 */
152static ssize_t channel_attr_store(struct kobject *kobj,
153 struct attribute *attr,
154 const char *buf,
155 size_t len)
156{
157 struct most_c_attr *channel_attr = to_channel_attr(attr);
158 struct most_c_obj *c_obj = to_c_obj(kobj);
159
160 if (!channel_attr->store)
161 return -EIO;
162 return channel_attr->store(c_obj, channel_attr, buf, len);
163}
164
165static const struct sysfs_ops most_channel_sysfs_ops = {
166 .show = channel_attr_show,
167 .store = channel_attr_store,
168};
169
170/**
171 * most_free_mbo_coherent - free an MBO and its coherent buffer
172 * @mbo: buffer to be released
173 *
174 */
175static void most_free_mbo_coherent(struct mbo *mbo)
176{
177 struct most_c_obj *c = mbo->context;
178 u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
179
180 dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
181 mbo->bus_address);
182 kfree(mbo);
183 if (atomic_sub_and_test(1, &c->mbo_ref))
184 complete(&c->cleanup);
185}
186
187/**
188 * flush_channel_fifos - clear the channel fifos
189 * @c: pointer to channel object
190 */
c942ea7a 191static void flush_channel_fifos(struct most_c_obj *c)
57562a72
CG
192{
193 unsigned long flags, hf_flags;
194 struct mbo *mbo, *tmp;
195
196 if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
197 return;
198
199 spin_lock_irqsave(&c->fifo_lock, flags);
200 list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
201 list_del(&mbo->list);
202 spin_unlock_irqrestore(&c->fifo_lock, flags);
0834be6c 203 most_free_mbo_coherent(mbo);
57562a72
CG
204 spin_lock_irqsave(&c->fifo_lock, flags);
205 }
206 spin_unlock_irqrestore(&c->fifo_lock, flags);
207
208 spin_lock_irqsave(&c->fifo_lock, hf_flags);
209 list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
210 list_del(&mbo->list);
211 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
0834be6c 212 most_free_mbo_coherent(mbo);
57562a72
CG
213 spin_lock_irqsave(&c->fifo_lock, hf_flags);
214 }
215 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
216
217 if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
218 pr_info("WARN: fifo | trash fifo not empty\n");
219}
220
221/**
222 * flush_trash_fifo - clear the trash fifo
223 * @c: pointer to channel object
224 */
225static int flush_trash_fifo(struct most_c_obj *c)
226{
227 struct mbo *mbo, *tmp;
228 unsigned long flags;
229
230 spin_lock_irqsave(&c->fifo_lock, flags);
231 list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
232 list_del(&mbo->list);
233 spin_unlock_irqrestore(&c->fifo_lock, flags);
234 most_free_mbo_coherent(mbo);
235 spin_lock_irqsave(&c->fifo_lock, flags);
236 }
237 spin_unlock_irqrestore(&c->fifo_lock, flags);
238 return 0;
239}
240
241/**
242 * most_channel_release - release function of channel object
243 * @kobj: pointer to channel's kobject
244 */
245static void most_channel_release(struct kobject *kobj)
246{
247 struct most_c_obj *c = to_c_obj(kobj);
248
249 kfree(c);
250}
251
252static ssize_t show_available_directions(struct most_c_obj *c,
edaa1e33
CG
253 struct most_c_attr *attr,
254 char *buf)
57562a72
CG
255{
256 unsigned int i = c->channel_id;
257
258 strcpy(buf, "");
259 if (c->iface->channel_vector[i].direction & MOST_CH_RX)
260 strcat(buf, "dir_rx ");
261 if (c->iface->channel_vector[i].direction & MOST_CH_TX)
262 strcat(buf, "dir_tx ");
263 strcat(buf, "\n");
264 return strlen(buf) + 1;
265}
266
267static ssize_t show_available_datatypes(struct most_c_obj *c,
268 struct most_c_attr *attr,
269 char *buf)
270{
271 unsigned int i = c->channel_id;
272
273 strcpy(buf, "");
274 if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
275 strcat(buf, "control ");
276 if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
277 strcat(buf, "async ");
278 if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
279 strcat(buf, "sync ");
280 if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC_AVP)
281 strcat(buf, "isoc_avp ");
282 strcat(buf, "\n");
283 return strlen(buf) + 1;
284}
285
286static
287ssize_t show_number_of_packet_buffers(struct most_c_obj *c,
288 struct most_c_attr *attr,
289 char *buf)
290{
291 unsigned int i = c->channel_id;
292
293 return snprintf(buf, PAGE_SIZE, "%d\n",
294 c->iface->channel_vector[i].num_buffers_packet);
295}
296
297static
298ssize_t show_number_of_stream_buffers(struct most_c_obj *c,
299 struct most_c_attr *attr,
300 char *buf)
301{
302 unsigned int i = c->channel_id;
303
304 return snprintf(buf, PAGE_SIZE, "%d\n",
305 c->iface->channel_vector[i].num_buffers_streaming);
306}
307
308static
309ssize_t show_size_of_packet_buffer(struct most_c_obj *c,
310 struct most_c_attr *attr,
311 char *buf)
312{
313 unsigned int i = c->channel_id;
314
315 return snprintf(buf, PAGE_SIZE, "%d\n",
316 c->iface->channel_vector[i].buffer_size_packet);
317}
318
319static
320ssize_t show_size_of_stream_buffer(struct most_c_obj *c,
321 struct most_c_attr *attr,
322 char *buf)
323{
324 unsigned int i = c->channel_id;
325
326 return snprintf(buf, PAGE_SIZE, "%d\n",
327 c->iface->channel_vector[i].buffer_size_streaming);
328}
329
330static ssize_t show_channel_starving(struct most_c_obj *c,
331 struct most_c_attr *attr,
332 char *buf)
333{
334 return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
335}
336
57562a72 337#define create_show_channel_attribute(val) \
add04a98 338 static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL)
57562a72
CG
339
340create_show_channel_attribute(available_directions);
341create_show_channel_attribute(available_datatypes);
342create_show_channel_attribute(number_of_packet_buffers);
343create_show_channel_attribute(number_of_stream_buffers);
344create_show_channel_attribute(size_of_stream_buffer);
345create_show_channel_attribute(size_of_packet_buffer);
346create_show_channel_attribute(channel_starving);
347
348static ssize_t show_set_number_of_buffers(struct most_c_obj *c,
349 struct most_c_attr *attr,
350 char *buf)
351{
352 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
353}
354
355static ssize_t store_set_number_of_buffers(struct most_c_obj *c,
356 struct most_c_attr *attr,
357 const char *buf,
358 size_t count)
359{
360 int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
361
362 if (ret)
363 return ret;
364 return count;
365}
366
367static ssize_t show_set_buffer_size(struct most_c_obj *c,
368 struct most_c_attr *attr,
369 char *buf)
370{
371 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
372}
373
374static ssize_t store_set_buffer_size(struct most_c_obj *c,
375 struct most_c_attr *attr,
376 const char *buf,
377 size_t count)
378{
379 int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
380
381 if (ret)
382 return ret;
383 return count;
384}
385
386static ssize_t show_set_direction(struct most_c_obj *c,
387 struct most_c_attr *attr,
388 char *buf)
389{
390 if (c->cfg.direction & MOST_CH_TX)
391 return snprintf(buf, PAGE_SIZE, "dir_tx\n");
392 else if (c->cfg.direction & MOST_CH_RX)
393 return snprintf(buf, PAGE_SIZE, "dir_rx\n");
394 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
395}
396
397static ssize_t store_set_direction(struct most_c_obj *c,
398 struct most_c_attr *attr,
399 const char *buf,
400 size_t count)
401{
9deba73d 402 if (!strcmp(buf, "dir_rx\n")) {
57562a72 403 c->cfg.direction = MOST_CH_RX;
9deba73d 404 } else if (!strcmp(buf, "dir_tx\n")) {
57562a72 405 c->cfg.direction = MOST_CH_TX;
9deba73d 406 } else {
57562a72
CG
407 pr_info("WARN: invalid attribute settings\n");
408 return -EINVAL;
409 }
410 return count;
411}
412
413static ssize_t show_set_datatype(struct most_c_obj *c,
414 struct most_c_attr *attr,
415 char *buf)
416{
417 if (c->cfg.data_type & MOST_CH_CONTROL)
418 return snprintf(buf, PAGE_SIZE, "control\n");
419 else if (c->cfg.data_type & MOST_CH_ASYNC)
420 return snprintf(buf, PAGE_SIZE, "async\n");
421 else if (c->cfg.data_type & MOST_CH_SYNC)
422 return snprintf(buf, PAGE_SIZE, "sync\n");
423 else if (c->cfg.data_type & MOST_CH_ISOC_AVP)
424 return snprintf(buf, PAGE_SIZE, "isoc_avp\n");
425 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
426}
427
428static ssize_t store_set_datatype(struct most_c_obj *c,
429 struct most_c_attr *attr,
430 const char *buf,
431 size_t count)
432{
9deba73d 433 if (!strcmp(buf, "control\n")) {
57562a72 434 c->cfg.data_type = MOST_CH_CONTROL;
9deba73d 435 } else if (!strcmp(buf, "async\n")) {
57562a72 436 c->cfg.data_type = MOST_CH_ASYNC;
9deba73d 437 } else if (!strcmp(buf, "sync\n")) {
57562a72 438 c->cfg.data_type = MOST_CH_SYNC;
9deba73d 439 } else if (!strcmp(buf, "isoc_avp\n")) {
57562a72 440 c->cfg.data_type = MOST_CH_ISOC_AVP;
9deba73d 441 } else {
57562a72
CG
442 pr_info("WARN: invalid attribute settings\n");
443 return -EINVAL;
444 }
445 return count;
446}
447
448static ssize_t show_set_subbuffer_size(struct most_c_obj *c,
449 struct most_c_attr *attr,
450 char *buf)
451{
452 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
453}
454
455static ssize_t store_set_subbuffer_size(struct most_c_obj *c,
456 struct most_c_attr *attr,
457 const char *buf,
458 size_t count)
459{
460 int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
461
462 if (ret)
463 return ret;
464 return count;
465}
466
467static ssize_t show_set_packets_per_xact(struct most_c_obj *c,
468 struct most_c_attr *attr,
469 char *buf)
470{
471 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
472}
473
474static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
475 struct most_c_attr *attr,
476 const char *buf,
477 size_t count)
478{
479 int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
480
481 if (ret)
482 return ret;
483 return count;
484}
485
486#define create_channel_attribute(value) \
487 static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \
488 show_##value, \
489 store_##value)
490
491create_channel_attribute(set_buffer_size);
492create_channel_attribute(set_number_of_buffers);
493create_channel_attribute(set_direction);
494create_channel_attribute(set_datatype);
495create_channel_attribute(set_subbuffer_size);
496create_channel_attribute(set_packets_per_xact);
497
57562a72
CG
498/**
499 * most_channel_def_attrs - array of default attributes of channel object
500 */
501static struct attribute *most_channel_def_attrs[] = {
502 &most_chnl_attr_available_directions.attr,
503 &most_chnl_attr_available_datatypes.attr,
504 &most_chnl_attr_number_of_packet_buffers.attr,
505 &most_chnl_attr_number_of_stream_buffers.attr,
506 &most_chnl_attr_size_of_packet_buffer.attr,
507 &most_chnl_attr_size_of_stream_buffer.attr,
508 &most_chnl_attr_set_number_of_buffers.attr,
509 &most_chnl_attr_set_buffer_size.attr,
510 &most_chnl_attr_set_direction.attr,
511 &most_chnl_attr_set_datatype.attr,
512 &most_chnl_attr_set_subbuffer_size.attr,
513 &most_chnl_attr_set_packets_per_xact.attr,
514 &most_chnl_attr_channel_starving.attr,
515 NULL,
516};
517
518static struct kobj_type most_channel_ktype = {
519 .sysfs_ops = &most_channel_sysfs_ops,
520 .release = most_channel_release,
521 .default_attrs = most_channel_def_attrs,
522};
523
524static struct kset *most_channel_kset;
525
526/**
527 * create_most_c_obj - allocates a channel object
528 * @name: name of the channel object
529 * @parent: parent kobject
530 *
531 * This create a channel object and registers it with sysfs.
532 * Returns a pointer to the object or NULL when something went wrong.
533 */
534static struct most_c_obj *
535create_most_c_obj(const char *name, struct kobject *parent)
536{
537 struct most_c_obj *c;
538 int retval;
539
540 c = kzalloc(sizeof(*c), GFP_KERNEL);
541 if (!c)
542 return NULL;
543 c->kobj.kset = most_channel_kset;
544 retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
545 "%s", name);
546 if (retval) {
547 kobject_put(&c->kobj);
548 return NULL;
549 }
550 kobject_uevent(&c->kobj, KOBJ_ADD);
551 return c;
552}
553
57562a72
CG
554/* ___ ___
555 * ___I N S T A N C E___
556 */
557#define MOST_INST_ATTR(_name, _mode, _show, _store) \
558 struct most_inst_attribute most_inst_attr_##_name = \
559 __ATTR(_name, _mode, _show, _store)
560
561static struct list_head instance_list;
562
563/**
564 * struct most_inst_attribute - to access the attributes of instance object
565 * @attr: attributes of an instance
566 * @show: pointer to the show function
567 * @store: pointer to the store function
568 */
569struct most_inst_attribute {
570 struct attribute attr;
571 ssize_t (*show)(struct most_inst_obj *d,
572 struct most_inst_attribute *attr,
573 char *buf);
574 ssize_t (*store)(struct most_inst_obj *d,
575 struct most_inst_attribute *attr,
576 const char *buf,
577 size_t count);
578};
9cbe5aa6 579
57562a72
CG
580#define to_instance_attr(a) \
581 container_of(a, struct most_inst_attribute, attr)
582
583/**
584 * instance_attr_show - show function for an instance object
585 * @kobj: pointer to kobject
586 * @attr: pointer to attribute struct
587 * @buf: buffer
588 */
589static ssize_t instance_attr_show(struct kobject *kobj,
590 struct attribute *attr,
591 char *buf)
592{
593 struct most_inst_attribute *instance_attr;
594 struct most_inst_obj *instance_obj;
595
596 instance_attr = to_instance_attr(attr);
597 instance_obj = to_inst_obj(kobj);
598
599 if (!instance_attr->show)
600 return -EIO;
601
602 return instance_attr->show(instance_obj, instance_attr, buf);
603}
604
605/**
606 * instance_attr_store - store function for an instance object
607 * @kobj: pointer to kobject
608 * @attr: pointer to attribute struct
609 * @buf: buffer
610 * @len: length of buffer
611 */
612static ssize_t instance_attr_store(struct kobject *kobj,
613 struct attribute *attr,
614 const char *buf,
615 size_t len)
616{
617 struct most_inst_attribute *instance_attr;
618 struct most_inst_obj *instance_obj;
619
620 instance_attr = to_instance_attr(attr);
621 instance_obj = to_inst_obj(kobj);
622
623 if (!instance_attr->store)
624 return -EIO;
625
626 return instance_attr->store(instance_obj, instance_attr, buf, len);
627}
628
629static const struct sysfs_ops most_inst_sysfs_ops = {
630 .show = instance_attr_show,
631 .store = instance_attr_store,
632};
633
634/**
635 * most_inst_release - release function for instance object
636 * @kobj: pointer to instance's kobject
637 *
638 * This frees the allocated memory for the instance object
639 */
640static void most_inst_release(struct kobject *kobj)
641{
642 struct most_inst_obj *inst = to_inst_obj(kobj);
643
644 kfree(inst);
645}
646
647static ssize_t show_description(struct most_inst_obj *instance_obj,
648 struct most_inst_attribute *attr,
649 char *buf)
650{
651 return snprintf(buf, PAGE_SIZE, "%s\n",
652 instance_obj->iface->description);
653}
654
655static ssize_t show_interface(struct most_inst_obj *instance_obj,
656 struct most_inst_attribute *attr,
657 char *buf)
658{
659 switch (instance_obj->iface->interface) {
660 case ITYPE_LOOPBACK:
661 return snprintf(buf, PAGE_SIZE, "loopback\n");
662 case ITYPE_I2C:
663 return snprintf(buf, PAGE_SIZE, "i2c\n");
664 case ITYPE_I2S:
665 return snprintf(buf, PAGE_SIZE, "i2s\n");
666 case ITYPE_TSI:
667 return snprintf(buf, PAGE_SIZE, "tsi\n");
668 case ITYPE_HBI:
669 return snprintf(buf, PAGE_SIZE, "hbi\n");
670 case ITYPE_MEDIALB_DIM:
671 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
672 case ITYPE_MEDIALB_DIM2:
673 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
674 case ITYPE_USB:
675 return snprintf(buf, PAGE_SIZE, "usb\n");
676 case ITYPE_PCIE:
677 return snprintf(buf, PAGE_SIZE, "pcie\n");
678 }
679 return snprintf(buf, PAGE_SIZE, "unknown\n");
680}
681
682#define create_inst_attribute(value) \
683 static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL)
684
685create_inst_attribute(description);
686create_inst_attribute(interface);
687
688static struct attribute *most_inst_def_attrs[] = {
689 &most_inst_attr_description.attr,
690 &most_inst_attr_interface.attr,
691 NULL,
692};
693
694static struct kobj_type most_inst_ktype = {
695 .sysfs_ops = &most_inst_sysfs_ops,
696 .release = most_inst_release,
697 .default_attrs = most_inst_def_attrs,
698};
699
700static struct kset *most_inst_kset;
701
57562a72
CG
702/**
703 * create_most_inst_obj - creates an instance object
704 * @name: name of the object to be created
705 *
706 * This allocates memory for an instance structure, assigns the proper kset
707 * and registers it with sysfs.
708 *
709 * Returns a pointer to the instance object or NULL when something went wrong.
710 */
711static struct most_inst_obj *create_most_inst_obj(const char *name)
712{
713 struct most_inst_obj *inst;
714 int retval;
715
716 inst = kzalloc(sizeof(*inst), GFP_KERNEL);
717 if (!inst)
718 return NULL;
719 inst->kobj.kset = most_inst_kset;
720 retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
721 "%s", name);
722 if (retval) {
723 kobject_put(&inst->kobj);
724 return NULL;
725 }
726 kobject_uevent(&inst->kobj, KOBJ_ADD);
727 return inst;
728}
729
730/**
731 * destroy_most_inst_obj - MOST instance release function
732 * @inst: pointer to the instance object
733 *
734 * This decrements the reference counter of the instance object.
735 * If the reference count turns zero, its release function is called
736 */
737static void destroy_most_inst_obj(struct most_inst_obj *inst)
738{
739 struct most_c_obj *c, *tmp;
740
741 /* need to destroy channels first, since
742 * each channel incremented the
743 * reference count of the inst->kobj
744 */
745 list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
9ce039a0
CG
746 if (c->aim0.ptr)
747 c->aim0.ptr->disconnect_channel(c->iface,
748 c->channel_id);
749 if (c->aim1.ptr)
750 c->aim1.ptr->disconnect_channel(c->iface,
751 c->channel_id);
752 c->aim0.ptr = NULL;
753 c->aim1.ptr = NULL;
754
755 mutex_lock(&deregister_mutex);
756 flush_trash_fifo(c);
757 flush_channel_fifos(c);
758 mutex_unlock(&deregister_mutex);
759 kobject_put(&c->kobj);
57562a72
CG
760 }
761 kobject_put(&inst->kobj);
762}
763
764/* ___ ___
765 * ___A I M___
766 */
767struct most_aim_obj {
768 struct kobject kobj;
769 struct list_head list;
770 struct most_aim *driver;
771 char add_link[STRING_SIZE];
772 char remove_link[STRING_SIZE];
773};
9cbe5aa6 774
57562a72
CG
775#define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
776
777static struct list_head aim_list;
778
57562a72
CG
779/**
780 * struct most_aim_attribute - to access the attributes of AIM object
781 * @attr: attributes of an AIM
782 * @show: pointer to the show function
783 * @store: pointer to the store function
784 */
785struct most_aim_attribute {
786 struct attribute attr;
787 ssize_t (*show)(struct most_aim_obj *d,
788 struct most_aim_attribute *attr,
789 char *buf);
790 ssize_t (*store)(struct most_aim_obj *d,
791 struct most_aim_attribute *attr,
792 const char *buf,
793 size_t count);
794};
9cbe5aa6 795
57562a72
CG
796#define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
797
798/**
799 * aim_attr_show - show function of an AIM object
800 * @kobj: pointer to kobject
801 * @attr: pointer to attribute struct
802 * @buf: buffer
803 */
804static ssize_t aim_attr_show(struct kobject *kobj,
805 struct attribute *attr,
806 char *buf)
807{
808 struct most_aim_attribute *aim_attr;
809 struct most_aim_obj *aim_obj;
810
811 aim_attr = to_aim_attr(attr);
812 aim_obj = to_aim_obj(kobj);
813
814 if (!aim_attr->show)
815 return -EIO;
816
817 return aim_attr->show(aim_obj, aim_attr, buf);
818}
819
820/**
821 * aim_attr_store - store function of an AIM object
822 * @kobj: pointer to kobject
823 * @attr: pointer to attribute struct
824 * @buf: buffer
825 * @len: length of buffer
826 */
827static ssize_t aim_attr_store(struct kobject *kobj,
828 struct attribute *attr,
829 const char *buf,
830 size_t len)
831{
832 struct most_aim_attribute *aim_attr;
833 struct most_aim_obj *aim_obj;
834
835 aim_attr = to_aim_attr(attr);
836 aim_obj = to_aim_obj(kobj);
837
838 if (!aim_attr->store)
839 return -EIO;
840 return aim_attr->store(aim_obj, aim_attr, buf, len);
841}
842
843static const struct sysfs_ops most_aim_sysfs_ops = {
844 .show = aim_attr_show,
845 .store = aim_attr_store,
846};
847
848/**
849 * most_aim_release - AIM release function
850 * @kobj: pointer to AIM's kobject
851 */
852static void most_aim_release(struct kobject *kobj)
853{
854 struct most_aim_obj *aim_obj = to_aim_obj(kobj);
855
856 kfree(aim_obj);
857}
858
859static ssize_t show_add_link(struct most_aim_obj *aim_obj,
860 struct most_aim_attribute *attr,
861 char *buf)
862{
863 return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->add_link);
864}
865
866/**
867 * split_string - parses and changes string in the buffer buf and
868 * splits it into two mandatory and one optional substrings.
869 *
870 * @buf: complete string from attribute 'add_channel'
871 * @a: address of pointer to 1st substring (=instance name)
872 * @b: address of pointer to 2nd substring (=channel name)
873 * @c: optional address of pointer to 3rd substring (=user defined name)
874 *
875 * Examples:
876 *
877 * Input: "mdev0:ch0@ep_81:my_channel\n" or
878 * "mdev0:ch0@ep_81:my_channel"
879 *
880 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel"
881 *
882 * Input: "mdev0:ch0@ep_81\n"
883 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> ""
884 *
885 * Input: "mdev0:ch0@ep_81"
886 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL
887 */
c942ea7a 888static int split_string(char *buf, char **a, char **b, char **c)
57562a72
CG
889{
890 *a = strsep(&buf, ":");
891 if (!*a)
892 return -EIO;
893
894 *b = strsep(&buf, ":\n");
895 if (!*b)
896 return -EIO;
897
898 if (c)
899 *c = strsep(&buf, ":\n");
900
901 return 0;
902}
903
904/**
905 * get_channel_by_name - get pointer to channel object
906 * @mdev: name of the device instance
907 * @mdev_ch: name of the respective channel
908 *
909 * This retrieves the pointer to a channel object.
910 */
911static struct
912most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
913{
914 struct most_c_obj *c, *tmp;
915 struct most_inst_obj *i, *i_tmp;
916 int found = 0;
917
918 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
919 if (!strcmp(kobject_name(&i->kobj), mdev)) {
920 found++;
921 break;
922 }
923 }
924 if (unlikely(!found))
925 return ERR_PTR(-EIO);
926
927 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
928 if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
929 found++;
930 break;
931 }
932 }
47af41b0 933 if (unlikely(found < 2))
57562a72
CG
934 return ERR_PTR(-EIO);
935 return c;
936}
937
938/**
939 * store_add_link - store() function for add_link attribute
940 * @aim_obj: pointer to AIM object
941 * @attr: its attributes
942 * @buf: buffer
943 * @len: buffer length
944 *
945 * This parses the string given by buf and splits it into
946 * three substrings. Note: third substring is optional. In case a cdev
947 * AIM is loaded the optional 3rd substring will make up the name of
948 * device node in the /dev directory. If omitted, the device node will
949 * inherit the channel's name within sysfs.
950 *
951 * Searches for a pair of device and channel and probes the AIM
952 *
953 * Example:
954 * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link
955 * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link
956 *
957 * (1) would create the device node /dev/my_rxchannel
958 * (2) would create the device node /dev/mdev0-ch0@ep_81
959 */
960static ssize_t store_add_link(struct most_aim_obj *aim_obj,
961 struct most_aim_attribute *attr,
962 const char *buf,
963 size_t len)
964{
965 struct most_c_obj *c;
966 struct most_aim **aim_ptr;
967 char buffer[STRING_SIZE];
968 char *mdev;
969 char *mdev_ch;
970 char *mdev_devnod;
971 char devnod_buf[STRING_SIZE];
972 int ret;
3f78f611 973 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
57562a72
CG
974
975 strlcpy(buffer, buf, max_len);
976 strlcpy(aim_obj->add_link, buf, max_len);
977
978 ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
979 if (ret)
980 return ret;
981
04ca5837 982 if (!mdev_devnod || *mdev_devnod == 0) {
1446ff09
CG
983 snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
984 mdev_ch);
57562a72
CG
985 mdev_devnod = devnod_buf;
986 }
987
988 c = get_channel_by_name(mdev, mdev_ch);
989 if (IS_ERR(c))
990 return -ENODEV;
991
ccfbaee0
CG
992 if (!c->aim0.ptr)
993 aim_ptr = &c->aim0.ptr;
994 else if (!c->aim1.ptr)
995 aim_ptr = &c->aim1.ptr;
57562a72
CG
996 else
997 return -ENOSPC;
998
999 ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
1000 &c->cfg, &c->kobj, mdev_devnod);
1001 if (ret)
1002 return ret;
1003 *aim_ptr = aim_obj->driver;
1004 return len;
1005}
1006
c942ea7a 1007static struct most_aim_attribute most_aim_attr_add_link =
57562a72
CG
1008 __ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link);
1009
1010static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
1011 struct most_aim_attribute *attr,
1012 char *buf)
1013{
1014 return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link);
1015}
1016
1017/**
1018 * store_remove_link - store function for remove_link attribute
1019 * @aim_obj: pointer to AIM object
1020 * @attr: its attributes
1021 * @buf: buffer
1022 * @len: buffer length
1023 *
1024 * Example:
1025 * echo -n -e "mdev0:ch0@ep_81\n" >remove_link
1026 */
1027static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
1028 struct most_aim_attribute *attr,
1029 const char *buf,
1030 size_t len)
1031{
1032 struct most_c_obj *c;
1033 char buffer[STRING_SIZE];
1034 char *mdev;
1035 char *mdev_ch;
1036 int ret;
3f78f611 1037 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
57562a72
CG
1038
1039 strlcpy(buffer, buf, max_len);
1040 strlcpy(aim_obj->remove_link, buf, max_len);
1041 ret = split_string(buffer, &mdev, &mdev_ch, NULL);
1042 if (ret)
1043 return ret;
1044
1045 c = get_channel_by_name(mdev, mdev_ch);
1046 if (IS_ERR(c))
1047 return -ENODEV;
1048
ccfbaee0
CG
1049 if (c->aim0.ptr == aim_obj->driver)
1050 c->aim0.ptr = NULL;
1051 if (c->aim1.ptr == aim_obj->driver)
1052 c->aim1.ptr = NULL;
57562a72
CG
1053 if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
1054 return -EIO;
1055 return len;
1056}
1057
c942ea7a 1058static struct most_aim_attribute most_aim_attr_remove_link =
1446ff09
CG
1059 __ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link,
1060 store_remove_link);
57562a72
CG
1061
1062static struct attribute *most_aim_def_attrs[] = {
1063 &most_aim_attr_add_link.attr,
1064 &most_aim_attr_remove_link.attr,
1065 NULL,
1066};
1067
1068static struct kobj_type most_aim_ktype = {
1069 .sysfs_ops = &most_aim_sysfs_ops,
1070 .release = most_aim_release,
1071 .default_attrs = most_aim_def_attrs,
1072};
1073
1074static struct kset *most_aim_kset;
1075
1076/**
1077 * create_most_aim_obj - creates an AIM object
1078 * @name: name of the AIM
1079 *
1080 * This creates an AIM object assigns the proper kset and registers
1081 * it with sysfs.
1082 * Returns a pointer to the object or NULL if something went wrong.
1083 */
1084static struct most_aim_obj *create_most_aim_obj(const char *name)
1085{
1086 struct most_aim_obj *most_aim;
1087 int retval;
1088
1089 most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
1090 if (!most_aim)
1091 return NULL;
1092 most_aim->kobj.kset = most_aim_kset;
1093 retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
1094 NULL, "%s", name);
1095 if (retval) {
1096 kobject_put(&most_aim->kobj);
1097 return NULL;
1098 }
1099 kobject_uevent(&most_aim->kobj, KOBJ_ADD);
1100 return most_aim;
1101}
1102
1103/**
1104 * destroy_most_aim_obj - AIM release function
1105 * @p: pointer to AIM object
1106 *
1107 * This decrements the reference counter of the AIM object. If the
1108 * reference count turns zero, its release function will be called.
1109 */
1110static void destroy_most_aim_obj(struct most_aim_obj *p)
1111{
1112 kobject_put(&p->kobj);
1113}
1114
57562a72
CG
1115/* ___ ___
1116 * ___C O R E___
1117 */
1118
1119/**
1120 * Instantiation of the MOST bus
1121 */
c942ea7a 1122static struct bus_type most_bus = {
57562a72
CG
1123 .name = "most",
1124};
1125
1126/**
1127 * Instantiation of the core driver
1128 */
c942ea7a 1129static struct device_driver mostcore = {
57562a72
CG
1130 .name = "mostcore",
1131 .bus = &most_bus,
1132};
1133
1134static inline void trash_mbo(struct mbo *mbo)
1135{
1136 unsigned long flags;
1137 struct most_c_obj *c = mbo->context;
1138
1139 spin_lock_irqsave(&c->fifo_lock, flags);
1140 list_add(&mbo->list, &c->trash_fifo);
1141 spin_unlock_irqrestore(&c->fifo_lock, flags);
1142}
1143
1144static struct mbo *get_hdm_mbo(struct most_c_obj *c)
1145{
1146 unsigned long flags;
1147 struct mbo *mbo;
1148
1149 spin_lock_irqsave(&c->fifo_lock, flags);
1150 if (c->enqueue_halt || list_empty(&c->halt_fifo))
1151 mbo = NULL;
1152 else
1153 mbo = list_pop_mbo(&c->halt_fifo);
1154 spin_unlock_irqrestore(&c->fifo_lock, flags);
1155 return mbo;
1156}
1157
1158static void nq_hdm_mbo(struct mbo *mbo)
1159{
1160 unsigned long flags;
1161 struct most_c_obj *c = mbo->context;
1162
1163 spin_lock_irqsave(&c->fifo_lock, flags);
1164 list_add_tail(&mbo->list, &c->halt_fifo);
1165 spin_unlock_irqrestore(&c->fifo_lock, flags);
1166 wake_up_interruptible(&c->hdm_fifo_wq);
1167}
1168
1169static int hdm_enqueue_thread(void *data)
1170{
1171 struct most_c_obj *c = data;
1172 struct mbo *mbo;
1173 typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
1174
1175 while (likely(!kthread_should_stop())) {
1176 wait_event_interruptible(c->hdm_fifo_wq,
623d8002
CG
1177 (mbo = get_hdm_mbo(c)) ||
1178 kthread_should_stop());
57562a72
CG
1179
1180 if (unlikely(!mbo))
1181 continue;
1182
1183 if (c->cfg.direction == MOST_CH_RX)
1184 mbo->buffer_length = c->cfg.buffer_size;
1185
1186 if (unlikely(enqueue(mbo->ifp, mbo->hdm_channel_id, mbo))) {
1187 pr_err("hdm enqueue failed\n");
1188 nq_hdm_mbo(mbo);
1189 c->hdm_enqueue_task = NULL;
1190 return 0;
1191 }
1192 }
1193
1194 return 0;
1195}
1196
1197static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
1198{
1199 struct task_struct *task =
246ed517
SB
1200 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
1201 channel_id);
57562a72
CG
1202
1203 if (IS_ERR(task))
1204 return PTR_ERR(task);
1205
1206 c->hdm_enqueue_task = task;
1207 return 0;
1208}
1209
1210/**
1211 * arm_mbo - recycle MBO for further usage
1212 * @mbo: buffer object
1213 *
1214 * This puts an MBO back to the list to have it ready for up coming
1215 * tx transactions.
1216 *
1217 * In case the MBO belongs to a channel that recently has been
1218 * poisoned, the MBO is scheduled to be trashed.
1219 * Calls the completion handler of an attached AIM.
1220 */
1221static void arm_mbo(struct mbo *mbo)
1222{
1223 unsigned long flags;
1224 struct most_c_obj *c;
1225
1226 BUG_ON((!mbo) || (!mbo->context));
1227 c = mbo->context;
1228
1229 if (c->is_poisoned) {
1230 trash_mbo(mbo);
1231 return;
1232 }
1233
1234 spin_lock_irqsave(&c->fifo_lock, flags);
71457d48 1235 ++*mbo->num_buffers_ptr;
57562a72
CG
1236 list_add_tail(&mbo->list, &c->fifo);
1237 spin_unlock_irqrestore(&c->fifo_lock, flags);
1238
ccfbaee0
CG
1239 if (c->aim0.refs && c->aim0.ptr->tx_completion)
1240 c->aim0.ptr->tx_completion(c->iface, c->channel_id);
f13f6981 1241
ccfbaee0
CG
1242 if (c->aim1.refs && c->aim1.ptr->tx_completion)
1243 c->aim1.ptr->tx_completion(c->iface, c->channel_id);
57562a72
CG
1244}
1245
1246/**
1247 * arm_mbo_chain - helper function that arms an MBO chain for the HDM
1248 * @c: pointer to interface channel
1249 * @dir: direction of the channel
1250 * @compl: pointer to completion function
1251 *
1252 * This allocates buffer objects including the containing DMA coherent
1253 * buffer and puts them in the fifo.
1254 * Buffers of Rx channels are put in the kthread fifo, hence immediately
1255 * submitted to the HDM.
1256 *
1257 * Returns the number of allocated and enqueued MBOs.
1258 */
c942ea7a
AR
1259static int arm_mbo_chain(struct most_c_obj *c, int dir,
1260 void (*compl)(struct mbo *))
57562a72
CG
1261{
1262 unsigned int i;
1263 int retval;
1264 struct mbo *mbo;
2ae07510 1265 u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
57562a72
CG
1266
1267 atomic_set(&c->mbo_nq_level, 0);
1268
1269 for (i = 0; i < c->cfg.num_buffers; i++) {
1270 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
1271 if (!mbo) {
1272 pr_info("WARN: Allocation of MBO failed.\n");
1273 retval = i;
1274 goto _exit;
1275 }
1276 mbo->context = c;
1277 mbo->ifp = c->iface;
1278 mbo->hdm_channel_id = c->channel_id;
1279 mbo->virt_address = dma_alloc_coherent(NULL,
1280 coherent_buf_size,
1281 &mbo->bus_address,
1282 GFP_KERNEL);
1283 if (!mbo->virt_address) {
1284 pr_info("WARN: No DMA coherent buffer.\n");
1285 retval = i;
1286 goto _error1;
1287 }
1288 mbo->complete = compl;
71457d48 1289 mbo->num_buffers_ptr = &dummy_num_buffers;
57562a72
CG
1290 if (dir == MOST_CH_RX) {
1291 nq_hdm_mbo(mbo);
1292 atomic_inc(&c->mbo_nq_level);
1293 } else {
1294 arm_mbo(mbo);
1295 }
1296 }
1297 return i;
1298
1299_error1:
1300 kfree(mbo);
1301_exit:
1302 return retval;
1303}
1304
1305/**
1306 * most_submit_mbo - submits an MBO to fifo
1307 * @mbo: pointer to the MBO
1308 *
1309 */
1310int most_submit_mbo(struct mbo *mbo)
1311{
1312 struct most_c_obj *c;
1313 struct most_inst_obj *i;
1314
1315 if (unlikely((!mbo) || (!mbo->context))) {
1316 pr_err("Bad MBO or missing channel reference\n");
1317 return -EINVAL;
1318 }
1319 c = mbo->context;
1320 i = c->inst;
1321
1322 if (unlikely(atomic_read(&i->tainted)))
1323 return -ENODEV;
1324
1325 nq_hdm_mbo(mbo);
1326 return 0;
1327}
1328EXPORT_SYMBOL_GPL(most_submit_mbo);
1329
1330/**
1331 * most_write_completion - write completion handler
1332 * @mbo: pointer to MBO
1333 *
1334 * This recycles the MBO for further usage. In case the channel has been
1335 * poisoned, the MBO is scheduled to be trashed.
1336 */
1337static void most_write_completion(struct mbo *mbo)
1338{
1339 struct most_c_obj *c;
1340
1341 BUG_ON((!mbo) || (!mbo->context));
1342
1343 c = mbo->context;
1344 if (mbo->status == MBO_E_INVAL)
1345 pr_info("WARN: Tx MBO status: invalid\n");
ec58d2a8 1346 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
57562a72
CG
1347 trash_mbo(mbo);
1348 else
1349 arm_mbo(mbo);
1350}
1351
1352/**
1353 * get_channel_by_iface - get pointer to channel object
1354 * @iface: pointer to interface instance
1355 * @id: channel ID
1356 *
1357 * This retrieves a pointer to a channel of the given interface and channel ID.
1358 */
1359static struct
1360most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
1361{
1362 struct most_inst_obj *i;
1363
1364 if (unlikely(!iface)) {
1365 pr_err("Bad interface\n");
1366 return NULL;
1367 }
1368 if (unlikely((id < 0) || (id >= iface->num_channels))) {
1369 pr_err("Channel index (%d) out of range\n", id);
1370 return NULL;
1371 }
1372 i = iface->priv;
1373 if (unlikely(!i)) {
1374 pr_err("interface is not registered\n");
1375 return NULL;
1376 }
1377 return i->channel[id];
1378}
1379
aac997df
CG
1380int channel_has_mbo(struct most_interface *iface, int id)
1381{
1382 struct most_c_obj *c = get_channel_by_iface(iface, id);
1383 unsigned long flags;
1384 int empty;
1385
1386 if (unlikely(!c))
1387 return -EINVAL;
1388
1389 spin_lock_irqsave(&c->fifo_lock, flags);
1390 empty = list_empty(&c->fifo);
1391 spin_unlock_irqrestore(&c->fifo_lock, flags);
1392 return !empty;
1393}
1394EXPORT_SYMBOL_GPL(channel_has_mbo);
1395
57562a72
CG
1396/**
1397 * most_get_mbo - get pointer to an MBO of pool
1398 * @iface: pointer to interface instance
1399 * @id: channel ID
1400 *
1401 * This attempts to get a free buffer out of the channel fifo.
1402 * Returns a pointer to MBO on success or NULL otherwise.
1403 */
71457d48
CG
1404struct mbo *most_get_mbo(struct most_interface *iface, int id,
1405 struct most_aim *aim)
57562a72
CG
1406{
1407 struct mbo *mbo;
1408 struct most_c_obj *c;
1409 unsigned long flags;
71457d48 1410 int *num_buffers_ptr;
57562a72
CG
1411
1412 c = get_channel_by_iface(iface, id);
1413 if (unlikely(!c))
1414 return NULL;
71457d48 1415
ccfbaee0
CG
1416 if (c->aim0.refs && c->aim1.refs &&
1417 ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
1418 (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
71457d48
CG
1419 return NULL;
1420
ccfbaee0
CG
1421 if (aim == c->aim0.ptr)
1422 num_buffers_ptr = &c->aim0.num_buffers;
1423 else if (aim == c->aim1.ptr)
1424 num_buffers_ptr = &c->aim1.num_buffers;
71457d48
CG
1425 else
1426 num_buffers_ptr = &dummy_num_buffers;
1427
57562a72
CG
1428 spin_lock_irqsave(&c->fifo_lock, flags);
1429 if (list_empty(&c->fifo)) {
1430 spin_unlock_irqrestore(&c->fifo_lock, flags);
1431 return NULL;
1432 }
1433 mbo = list_pop_mbo(&c->fifo);
71457d48 1434 --*num_buffers_ptr;
57562a72 1435 spin_unlock_irqrestore(&c->fifo_lock, flags);
71457d48
CG
1436
1437 mbo->num_buffers_ptr = num_buffers_ptr;
57562a72
CG
1438 mbo->buffer_length = c->cfg.buffer_size;
1439 return mbo;
1440}
1441EXPORT_SYMBOL_GPL(most_get_mbo);
1442
57562a72
CG
1443/**
1444 * most_put_mbo - return buffer to pool
1445 * @mbo: buffer object
1446 */
1447void most_put_mbo(struct mbo *mbo)
1448{
1449 struct most_c_obj *c;
1450 struct most_inst_obj *i;
1451
1452 c = mbo->context;
1453 i = c->inst;
1454
1455 if (unlikely(atomic_read(&i->tainted))) {
1456 mbo->status = MBO_E_CLOSE;
1457 trash_mbo(mbo);
1458 return;
1459 }
1460 if (c->cfg.direction == MOST_CH_TX) {
1461 arm_mbo(mbo);
1462 return;
1463 }
1464 nq_hdm_mbo(mbo);
1465 atomic_inc(&c->mbo_nq_level);
1466}
1467EXPORT_SYMBOL_GPL(most_put_mbo);
1468
1469/**
1470 * most_read_completion - read completion handler
1471 * @mbo: pointer to MBO
1472 *
1473 * This function is called by the HDM when data has been received from the
1474 * hardware and copied to the buffer of the MBO.
1475 *
1476 * In case the channel has been poisoned it puts the buffer in the trash queue.
1477 * Otherwise, it passes the buffer to an AIM for further processing.
1478 */
1479static void most_read_completion(struct mbo *mbo)
1480{
f13f6981 1481 struct most_c_obj *c = mbo->context;
57562a72 1482
f13f6981
CG
1483 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1484 trash_mbo(mbo);
1485 return;
1486 }
57562a72
CG
1487
1488 if (mbo->status == MBO_E_INVAL) {
1489 nq_hdm_mbo(mbo);
1490 atomic_inc(&c->mbo_nq_level);
1491 return;
1492 }
1493
1494 if (atomic_sub_and_test(1, &c->mbo_nq_level)) {
1495 pr_info("WARN: rx device out of buffers\n");
1496 c->is_starving = 1;
1497 }
1498
ccfbaee0
CG
1499 if (c->aim0.refs && c->aim0.ptr->rx_completion &&
1500 c->aim0.ptr->rx_completion(mbo) == 0)
57562a72 1501 return;
f13f6981 1502
ccfbaee0
CG
1503 if (c->aim1.refs && c->aim1.ptr->rx_completion &&
1504 c->aim1.ptr->rx_completion(mbo) == 0)
57562a72 1505 return;
f13f6981
CG
1506
1507 most_put_mbo(mbo);
57562a72
CG
1508}
1509
1510/**
1511 * most_start_channel - prepares a channel for communication
1512 * @iface: pointer to interface instance
1513 * @id: channel ID
1514 *
1515 * This prepares the channel for usage. Cross-checks whether the
1516 * channel's been properly configured.
1517 *
1518 * Returns 0 on success or error code otherwise.
1519 */
f13f6981
CG
1520int most_start_channel(struct most_interface *iface, int id,
1521 struct most_aim *aim)
57562a72
CG
1522{
1523 int num_buffer;
1524 int ret;
1525 struct most_c_obj *c = get_channel_by_iface(iface, id);
1526
1527 if (unlikely(!c))
1528 return -EINVAL;
1529
f13f6981 1530 mutex_lock(&c->start_mutex);
ccfbaee0 1531 if (c->aim0.refs + c->aim1.refs > 0)
f13f6981 1532 goto out; /* already started by other aim */
57562a72
CG
1533
1534 if (!try_module_get(iface->mod)) {
1535 pr_info("failed to acquire HDM lock\n");
f13f6981 1536 mutex_unlock(&c->start_mutex);
57562a72
CG
1537 return -ENOLCK;
1538 }
1539 modref++;
1540
1541 c->cfg.extra_len = 0;
1542 if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1543 pr_info("channel configuration failed. Go check settings...\n");
1544 ret = -EINVAL;
1545 goto error;
1546 }
1547
1548 init_waitqueue_head(&c->hdm_fifo_wq);
1549
1550 if (c->cfg.direction == MOST_CH_RX)
1551 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1552 most_read_completion);
1553 else
1554 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1555 most_write_completion);
47af41b0 1556 if (unlikely(!num_buffer)) {
57562a72
CG
1557 pr_info("failed to allocate memory\n");
1558 ret = -ENOMEM;
1559 goto error;
1560 }
1561
1562 ret = run_enqueue_thread(c, id);
1563 if (ret)
1564 goto error;
1565
57562a72 1566 c->is_starving = 0;
ccfbaee0
CG
1567 c->aim0.num_buffers = c->cfg.num_buffers / 2;
1568 c->aim1.num_buffers = c->cfg.num_buffers - c->aim0.num_buffers;
57562a72 1569 atomic_set(&c->mbo_ref, num_buffer);
f13f6981
CG
1570
1571out:
ccfbaee0
CG
1572 if (aim == c->aim0.ptr)
1573 c->aim0.refs++;
1574 if (aim == c->aim1.ptr)
1575 c->aim1.refs++;
f13f6981 1576 mutex_unlock(&c->start_mutex);
57562a72 1577 return 0;
f13f6981 1578
57562a72 1579error:
e23afff9 1580 module_put(iface->mod);
57562a72 1581 modref--;
f13f6981 1582 mutex_unlock(&c->start_mutex);
57562a72
CG
1583 return ret;
1584}
1585EXPORT_SYMBOL_GPL(most_start_channel);
1586
1587/**
1588 * most_stop_channel - stops a running channel
1589 * @iface: pointer to interface instance
1590 * @id: channel ID
1591 */
f13f6981
CG
1592int most_stop_channel(struct most_interface *iface, int id,
1593 struct most_aim *aim)
57562a72
CG
1594{
1595 struct most_c_obj *c;
1596
1597 if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1598 pr_err("Bad interface or index out of range\n");
1599 return -EINVAL;
1600 }
1601 c = get_channel_by_iface(iface, id);
1602 if (unlikely(!c))
1603 return -EINVAL;
1604
f13f6981 1605 mutex_lock(&c->start_mutex);
ccfbaee0 1606 if (c->aim0.refs + c->aim1.refs >= 2)
f13f6981 1607 goto out;
57562a72 1608
57562a72
CG
1609 mutex_lock(&c->stop_task_mutex);
1610 if (c->hdm_enqueue_task)
1611 kthread_stop(c->hdm_enqueue_task);
1612 c->hdm_enqueue_task = NULL;
1613 mutex_unlock(&c->stop_task_mutex);
1614
1615 mutex_lock(&deregister_mutex);
1616 if (atomic_read(&c->inst->tainted)) {
1617 mutex_unlock(&deregister_mutex);
f13f6981 1618 mutex_unlock(&c->start_mutex);
57562a72
CG
1619 return -ENODEV;
1620 }
1621 mutex_unlock(&deregister_mutex);
1622
1623 if (iface->mod && modref) {
1624 module_put(iface->mod);
1625 modref--;
1626 }
1627
1628 c->is_poisoned = true;
1629 if (c->iface->poison_channel(c->iface, c->channel_id)) {
1630 pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1631 c->iface->description);
f13f6981 1632 mutex_unlock(&c->start_mutex);
57562a72
CG
1633 return -EAGAIN;
1634 }
1635 flush_trash_fifo(c);
1636 flush_channel_fifos(c);
1637
1638#ifdef CMPL_INTERRUPTIBLE
1639 if (wait_for_completion_interruptible(&c->cleanup)) {
1640 pr_info("Interrupted while clean up ch %d\n", c->channel_id);
f13f6981 1641 mutex_unlock(&c->start_mutex);
57562a72
CG
1642 return -EINTR;
1643 }
1644#else
1645 wait_for_completion(&c->cleanup);
1646#endif
1647 c->is_poisoned = false;
f13f6981
CG
1648
1649out:
ccfbaee0
CG
1650 if (aim == c->aim0.ptr)
1651 c->aim0.refs--;
1652 if (aim == c->aim1.ptr)
1653 c->aim1.refs--;
f13f6981 1654 mutex_unlock(&c->start_mutex);
57562a72
CG
1655 return 0;
1656}
1657EXPORT_SYMBOL_GPL(most_stop_channel);
1658
1659/**
1660 * most_register_aim - registers an AIM (driver) with the core
1661 * @aim: instance of AIM to be registered
1662 */
1663int most_register_aim(struct most_aim *aim)
1664{
1665 struct most_aim_obj *aim_obj;
1666
1667 if (!aim) {
1668 pr_err("Bad driver\n");
1669 return -EINVAL;
1670 }
1671 aim_obj = create_most_aim_obj(aim->name);
1672 if (!aim_obj) {
1673 pr_info("failed to alloc driver object\n");
1674 return -ENOMEM;
1675 }
1676 aim_obj->driver = aim;
1677 aim->context = aim_obj;
1678 pr_info("registered new application interfacing module %s\n",
1679 aim->name);
1680 list_add_tail(&aim_obj->list, &aim_list);
1681 return 0;
1682}
1683EXPORT_SYMBOL_GPL(most_register_aim);
1684
1685/**
1686 * most_deregister_aim - deregisters an AIM (driver) with the core
1687 * @aim: AIM to be removed
1688 */
1689int most_deregister_aim(struct most_aim *aim)
1690{
1691 struct most_aim_obj *aim_obj;
1692 struct most_c_obj *c, *tmp;
1693 struct most_inst_obj *i, *i_tmp;
1694
1695 if (!aim) {
1696 pr_err("Bad driver\n");
1697 return -EINVAL;
1698 }
1699
1700 aim_obj = aim->context;
1701 if (!aim_obj) {
1702 pr_info("driver not registered.\n");
1703 return -EINVAL;
1704 }
1705 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1706 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
ccfbaee0 1707 if (c->aim0.ptr == aim || c->aim1.ptr == aim)
57562a72
CG
1708 aim->disconnect_channel(
1709 c->iface, c->channel_id);
ccfbaee0
CG
1710 if (c->aim0.ptr == aim)
1711 c->aim0.ptr = NULL;
1712 if (c->aim1.ptr == aim)
1713 c->aim1.ptr = NULL;
57562a72
CG
1714 }
1715 }
1716 list_del(&aim_obj->list);
1717 destroy_most_aim_obj(aim_obj);
1718 pr_info("deregistering application interfacing module %s\n", aim->name);
1719 return 0;
1720}
1721EXPORT_SYMBOL_GPL(most_deregister_aim);
1722
1723/**
1724 * most_register_interface - registers an interface with core
1725 * @iface: pointer to the instance of the interface description.
1726 *
1727 * Allocates and initializes a new interface instance and all of its channels.
1728 * Returns a pointer to kobject or an error pointer.
1729 */
1730struct kobject *most_register_interface(struct most_interface *iface)
1731{
1732 unsigned int i;
1733 int id;
1734 char name[STRING_SIZE];
1735 char channel_name[STRING_SIZE];
1736 struct most_c_obj *c;
1737 struct most_inst_obj *inst;
1738
1739 if (!iface || !iface->enqueue || !iface->configure ||
1740 !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1741 pr_err("Bad interface or channel overflow\n");
1742 return ERR_PTR(-EINVAL);
1743 }
1744
1745 id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1746 if (id < 0) {
1747 pr_info("Failed to alloc mdev ID\n");
1748 return ERR_PTR(id);
1749 }
1750 snprintf(name, STRING_SIZE, "mdev%d", id);
1751
1752 inst = create_most_inst_obj(name);
1753 if (!inst) {
1754 pr_info("Failed to allocate interface instance\n");
1755 return ERR_PTR(-ENOMEM);
1756 }
1757
1758 iface->priv = inst;
1759 INIT_LIST_HEAD(&inst->channel_list);
1760 inst->iface = iface;
1761 inst->dev_id = id;
1762 atomic_set(&inst->tainted, 0);
1763 list_add_tail(&inst->list, &instance_list);
1764
1765 for (i = 0; i < iface->num_channels; i++) {
1766 const char *name_suffix = iface->channel_vector[i].name_suffix;
1767
1768 if (!name_suffix)
1769 snprintf(channel_name, STRING_SIZE, "ch%d", i);
1770 else if (name_suffix[0] == '@')
1771 snprintf(channel_name, STRING_SIZE, "ch%d%s", i,
1772 name_suffix);
1773 else
1774 snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
1775
1776 /* this increments the reference count of this instance */
1777 c = create_most_c_obj(channel_name, &inst->kobj);
1778 if (!c)
1779 goto free_instance;
1780 inst->channel[i] = c;
1781 c->is_starving = 0;
1782 c->iface = iface;
1783 c->inst = inst;
1784 c->channel_id = i;
1785 c->keep_mbo = false;
1786 c->enqueue_halt = false;
1787 c->is_poisoned = false;
57562a72
CG
1788 c->cfg.direction = 0;
1789 c->cfg.data_type = 0;
1790 c->cfg.num_buffers = 0;
1791 c->cfg.buffer_size = 0;
1792 c->cfg.subbuffer_size = 0;
1793 c->cfg.packets_per_xact = 0;
1794 spin_lock_init(&c->fifo_lock);
1795 INIT_LIST_HEAD(&c->fifo);
1796 INIT_LIST_HEAD(&c->trash_fifo);
1797 INIT_LIST_HEAD(&c->halt_fifo);
1798 init_completion(&c->cleanup);
1799 atomic_set(&c->mbo_ref, 0);
f13f6981 1800 mutex_init(&c->start_mutex);
57562a72
CG
1801 mutex_init(&c->stop_task_mutex);
1802 list_add_tail(&c->list, &inst->channel_list);
1803 }
1804 pr_info("registered new MOST device mdev%d (%s)\n",
1805 inst->dev_id, iface->description);
1806 return &inst->kobj;
1807
1808free_instance:
1809 pr_info("Failed allocate channel(s)\n");
1810 list_del(&inst->list);
1811 destroy_most_inst_obj(inst);
1812 return ERR_PTR(-ENOMEM);
1813}
1814EXPORT_SYMBOL_GPL(most_register_interface);
1815
1816/**
1817 * most_deregister_interface - deregisters an interface with core
1818 * @iface: pointer to the interface instance description.
1819 *
1820 * Before removing an interface instance from the list, all running
1821 * channels are stopped and poisoned.
1822 */
1823void most_deregister_interface(struct most_interface *iface)
1824{
1825 struct most_inst_obj *i = iface->priv;
1826 struct most_c_obj *c;
1827
1828 mutex_lock(&deregister_mutex);
1829 if (unlikely(!i)) {
1830 pr_info("Bad Interface\n");
1831 mutex_unlock(&deregister_mutex);
1832 return;
1833 }
1834 pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
1835 iface->description);
1836
1837 atomic_set(&i->tainted, 1);
1838 mutex_unlock(&deregister_mutex);
1839
1840 while (modref) {
1841 if (iface->mod && modref)
1842 module_put(iface->mod);
1843 modref--;
1844 }
1845
1846 list_for_each_entry(c, &i->channel_list, list) {
ccfbaee0 1847 if (c->aim0.refs + c->aim1.refs <= 0)
57562a72
CG
1848 continue;
1849
1850 mutex_lock(&c->stop_task_mutex);
1851 if (c->hdm_enqueue_task)
1852 kthread_stop(c->hdm_enqueue_task);
1853 c->hdm_enqueue_task = NULL;
1854 mutex_unlock(&c->stop_task_mutex);
1855
1856 if (iface->poison_channel(iface, c->channel_id))
1857 pr_err("Can't poison channel %d\n", c->channel_id);
1858 }
1859 ida_simple_remove(&mdev_id, i->dev_id);
1860 list_del(&i->list);
1861 destroy_most_inst_obj(i);
1862}
1863EXPORT_SYMBOL_GPL(most_deregister_interface);
1864
1865/**
1866 * most_stop_enqueue - prevents core from enqueueing MBOs
1867 * @iface: pointer to interface
1868 * @id: channel id
1869 *
1870 * This is called by an HDM that _cannot_ attend to its duties and
1871 * is imminent to get run over by the core. The core is not going to
1872 * enqueue any further packets unless the flagging HDM calls
1873 * most_resume enqueue().
1874 */
1875void most_stop_enqueue(struct most_interface *iface, int id)
1876{
1877 struct most_c_obj *c = get_channel_by_iface(iface, id);
1878
1879 if (likely(c))
1880 c->enqueue_halt = true;
1881}
1882EXPORT_SYMBOL_GPL(most_stop_enqueue);
1883
1884/**
1885 * most_resume_enqueue - allow core to enqueue MBOs again
1886 * @iface: pointer to interface
1887 * @id: channel id
1888 *
1889 * This clears the enqueue halt flag and enqueues all MBOs currently
1890 * sitting in the wait fifo.
1891 */
1892void most_resume_enqueue(struct most_interface *iface, int id)
1893{
1894 struct most_c_obj *c = get_channel_by_iface(iface, id);
1895
1896 if (unlikely(!c))
1897 return;
1898 c->enqueue_halt = false;
1899
1900 wake_up_interruptible(&c->hdm_fifo_wq);
1901}
1902EXPORT_SYMBOL_GPL(most_resume_enqueue);
1903
1904static int __init most_init(void)
1905{
1906 pr_info("init()\n");
1907 INIT_LIST_HEAD(&instance_list);
1908 INIT_LIST_HEAD(&aim_list);
1909 mutex_init(&deregister_mutex);
1910 ida_init(&mdev_id);
1911
1912 if (bus_register(&most_bus)) {
1913 pr_info("Cannot register most bus\n");
1914 goto exit;
1915 }
1916
1917 most_class = class_create(THIS_MODULE, "most");
1918 if (IS_ERR(most_class)) {
1919 pr_info("No udev support.\n");
1920 goto exit_bus;
1921 }
1922 if (driver_register(&mostcore)) {
1923 pr_info("Cannot register core driver\n");
1924 goto exit_class;
1925 }
1926
1927 class_glue_dir =
1928 device_create(most_class, NULL, 0, NULL, "mostcore");
1929 if (!class_glue_dir)
1930 goto exit_driver;
1931
1932 most_aim_kset =
1933 kset_create_and_add("aims", NULL, &class_glue_dir->kobj);
1934 if (!most_aim_kset)
1935 goto exit_class_container;
1936
1937 most_inst_kset =
1938 kset_create_and_add("devices", NULL, &class_glue_dir->kobj);
1939 if (!most_inst_kset)
1940 goto exit_driver_kset;
1941
1942 return 0;
1943
1944exit_driver_kset:
1945 kset_unregister(most_aim_kset);
1946exit_class_container:
1947 device_destroy(most_class, 0);
1948exit_driver:
1949 driver_unregister(&mostcore);
1950exit_class:
1951 class_destroy(most_class);
1952exit_bus:
1953 bus_unregister(&most_bus);
1954exit:
1955 return -ENOMEM;
1956}
1957
1958static void __exit most_exit(void)
1959{
1960 struct most_inst_obj *i, *i_tmp;
1961 struct most_aim_obj *d, *d_tmp;
1962
1963 pr_info("exit core module\n");
1964 list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
1965 destroy_most_aim_obj(d);
1966 }
1967
1968 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1969 list_del(&i->list);
1970 destroy_most_inst_obj(i);
1971 }
1972 kset_unregister(most_inst_kset);
1973 kset_unregister(most_aim_kset);
1974 device_destroy(most_class, 0);
1975 driver_unregister(&mostcore);
1976 class_destroy(most_class);
1977 bus_unregister(&most_bus);
1978 ida_destroy(&mdev_id);
1979}
1980
1981module_init(most_init);
1982module_exit(most_exit);
1983MODULE_LICENSE("GPL");
1984MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1985MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");