greybus: hid: Replace WARN_ON() with dev_err()
[linux-2.6-block.git] / drivers / staging / greybus / loopback.c
CommitLineData
355a7058
AB
1/*
2 * Loopback bridge driver for the Greybus loopback module.
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
8923c5b5
JH
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
355a7058
AB
12#include <linux/kernel.h>
13#include <linux/module.h>
85d678c0 14#include <linux/mutex.h>
355a7058
AB
15#include <linux/slab.h>
16#include <linux/kthread.h>
17#include <linux/delay.h>
18#include <linux/random.h>
5679f783 19#include <linux/sizes.h>
cbd204b4
BD
20#include <linux/cdev.h>
21#include <linux/fs.h>
22#include <linux/kfifo.h>
aa27bf82 23#include <linux/debugfs.h>
5015115d 24#include <linux/list_sort.h>
2e238d71 25#include <linux/spinlock.h>
12927835 26#include <linux/workqueue.h>
36f241ff 27#include <linux/atomic.h>
cbd204b4 28
1ffc12be
JH
29#include <asm/div64.h>
30
355a7058 31#include "greybus.h"
fd58926e 32#include "connection.h"
355a7058 33
fd489e1a
BD
34#define NSEC_PER_DAY 86400000000000ULL
35
355a7058 36struct gb_loopback_stats {
e051c82b
AE
37 u32 min;
38 u32 max;
00af6583 39 u64 sum;
e13fc28f 40 u32 count;
355a7058
AB
41};
42
aa27bf82
BD
43struct gb_loopback_device {
44 struct dentry *root;
45 u32 count;
8e1d6c33 46 size_t size_max;
67d1eece 47
2e238d71
BD
48 /* We need to take a lock in atomic context */
49 spinlock_t lock;
67d1eece 50 struct list_head list;
12927835
BD
51 struct list_head list_op_async;
52 wait_queue_head_t wq;
aa27bf82
BD
53};
54
55static struct gb_loopback_device gb_dev;
56
12927835
BD
57struct gb_loopback_async_operation {
58 struct gb_loopback *gb;
59 struct gb_operation *operation;
60 struct timeval ts;
61 struct timer_list timer;
62 struct list_head entry;
63 struct work_struct work;
64 struct kref kref;
65 bool pending;
66 int (*completion)(struct gb_loopback_async_operation *op_async);
67};
68
355a7058
AB
69struct gb_loopback {
70 struct gb_connection *connection;
355a7058 71
aa27bf82 72 struct dentry *file;
4b0ea00c
BD
73 struct kfifo kfifo_lat;
74 struct kfifo kfifo_ts;
85d678c0 75 struct mutex mutex;
355a7058 76 struct task_struct *task;
67d1eece 77 struct list_head entry;
079fa32b 78 struct device *dev;
8e1d6c33 79 wait_queue_head_t wq;
36f241ff
BD
80 wait_queue_head_t wq_completion;
81 atomic_t outstanding_operations;
355a7058 82
67d1eece 83 /* Per connection stats */
ab81bb9c 84 struct timeval ts;
355a7058
AB
85 struct gb_loopback_stats latency;
86 struct gb_loopback_stats throughput;
583cbf50 87 struct gb_loopback_stats requests_per_second;
1ec5843e
BD
88 struct gb_loopback_stats apbridge_unipro_latency;
89 struct gb_loopback_stats gpbridge_firmware_latency;
67d1eece 90
8e1d6c33 91 int type;
12927835 92 int async;
079fa32b 93 int id;
8e1d6c33
BD
94 u32 size;
95 u32 iteration_max;
67d1eece 96 u32 iteration_count;
b36f04fa 97 int us_wait;
355a7058 98 u32 error;
12927835
BD
99 u32 requests_completed;
100 u32 requests_timedout;
101 u32 timeout;
102 u32 jiffy_timeout;
103 u32 timeout_min;
104 u32 timeout_max;
8e3fba55 105 u32 outstanding_operations_max;
8e1d6c33
BD
106 u32 lbid;
107 u64 elapsed_nsecs;
e6227ee6
BD
108 u32 apbridge_latency_ts;
109 u32 gpbridge_latency_ts;
355a7058
AB
110};
111
079fa32b
AH
112static struct class loopback_class = {
113 .name = "gb_loopback",
114 .owner = THIS_MODULE,
115};
116static DEFINE_IDA(loopback_ida);
117
12927835
BD
118/* Min/max values in jiffies */
119#define GB_LOOPBACK_TIMEOUT_MIN 1
120#define GB_LOOPBACK_TIMEOUT_MAX 10000
121
cbd204b4
BD
122#define GB_LOOPBACK_FIFO_DEFAULT 8192
123
cbd204b4
BD
124static unsigned kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
125module_param(kfifo_depth, uint, 0444);
126
cbd204b4
BD
127/* Maximum size of any one send data buffer we support */
128#define MAX_PACKET_SIZE (PAGE_SIZE * 2)
129
b36f04fa 130#define GB_LOOPBACK_US_WAIT_MAX 1000000
355a7058 131
355a7058 132/* interface sysfs attributes */
8e1d6c33
BD
133#define gb_loopback_ro_attr(field) \
134static ssize_t field##_show(struct device *dev, \
355a7058
AB
135 struct device_attribute *attr, \
136 char *buf) \
137{ \
079fa32b 138 struct gb_loopback *gb = dev_get_drvdata(dev); \
8e1d6c33 139 return sprintf(buf, "%u\n", gb->field); \
355a7058 140} \
8e1d6c33 141static DEVICE_ATTR_RO(field)
355a7058 142
8e1d6c33
BD
143#define gb_loopback_ro_stats_attr(name, field, type) \
144static ssize_t name##_##field##_show(struct device *dev, \
355a7058
AB
145 struct device_attribute *attr, \
146 char *buf) \
147{ \
079fa32b 148 struct gb_loopback *gb = dev_get_drvdata(dev); \
8e1d6c33 149 return sprintf(buf, "%"#type"\n", gb->name.field); \
355a7058 150} \
8e1d6c33 151static DEVICE_ATTR_RO(name##_##field)
355a7058 152
8e1d6c33
BD
153#define gb_loopback_ro_avg_attr(name) \
154static ssize_t name##_avg_show(struct device *dev, \
7a135a96
AE
155 struct device_attribute *attr, \
156 char *buf) \
157{ \
f06272b2 158 struct gb_loopback_stats *stats; \
f06272b2 159 struct gb_loopback *gb; \
fb37f137
AB
160 u64 avg, rem; \
161 u32 count; \
079fa32b 162 gb = dev_get_drvdata(dev); \
8e1d6c33 163 stats = &gb->name; \
f06272b2 164 count = stats->count ? stats->count : 1; \
fb37f137 165 avg = stats->sum; \
f06272b2 166 rem = do_div(avg, count); \
fb37f137
AB
167 rem = 1000000 * rem / count; \
168 return sprintf(buf, "%llu.%06u\n", avg, (u32)rem); \
7a135a96 169} \
8e1d6c33 170static DEVICE_ATTR_RO(name##_avg)
7a135a96 171
8e1d6c33
BD
172#define gb_loopback_stats_attrs(field) \
173 gb_loopback_ro_stats_attr(field, min, u); \
174 gb_loopback_ro_stats_attr(field, max, u); \
175 gb_loopback_ro_avg_attr(field)
355a7058
AB
176
177#define gb_loopback_attr(field, type) \
178static ssize_t field##_show(struct device *dev, \
179 struct device_attribute *attr, \
180 char *buf) \
181{ \
079fa32b 182 struct gb_loopback *gb = dev_get_drvdata(dev); \
355a7058
AB
183 return sprintf(buf, "%"#type"\n", gb->field); \
184} \
185static ssize_t field##_store(struct device *dev, \
186 struct device_attribute *attr, \
187 const char *buf, \
188 size_t len) \
189{ \
190 int ret; \
079fa32b 191 struct gb_loopback *gb = dev_get_drvdata(dev); \
8e1d6c33 192 mutex_lock(&gb->mutex); \
355a7058 193 ret = sscanf(buf, "%"#type, &gb->field); \
355a7058 194 if (ret != 1) \
85d678c0
BD
195 len = -EINVAL; \
196 else \
8e1d6c33
BD
197 gb_loopback_check_attr(gb, bundle); \
198 mutex_unlock(&gb->mutex); \
355a7058
AB
199 return len; \
200} \
201static DEVICE_ATTR_RW(field)
202
f06272b2
BD
203#define gb_dev_loopback_ro_attr(field, conn) \
204static ssize_t field##_show(struct device *dev, \
67d1eece
BD
205 struct device_attribute *attr, \
206 char *buf) \
207{ \
079fa32b 208 struct gb_loopback *gb = dev_get_drvdata(dev); \
8e1d6c33 209 return sprintf(buf, "%u\n", gb->field); \
67d1eece
BD
210} \
211static DEVICE_ATTR_RO(field)
212
213#define gb_dev_loopback_rw_attr(field, type) \
214static ssize_t field##_show(struct device *dev, \
215 struct device_attribute *attr, \
216 char *buf) \
217{ \
079fa32b 218 struct gb_loopback *gb = dev_get_drvdata(dev); \
8e1d6c33 219 return sprintf(buf, "%"#type"\n", gb->field); \
67d1eece
BD
220} \
221static ssize_t field##_store(struct device *dev, \
222 struct device_attribute *attr, \
223 const char *buf, \
224 size_t len) \
225{ \
226 int ret; \
079fa32b 227 struct gb_loopback *gb = dev_get_drvdata(dev); \
8e1d6c33
BD
228 mutex_lock(&gb->mutex); \
229 ret = sscanf(buf, "%"#type, &gb->field); \
67d1eece
BD
230 if (ret != 1) \
231 len = -EINVAL; \
232 else \
079fa32b 233 gb_loopback_check_attr(gb); \
8e1d6c33 234 mutex_unlock(&gb->mutex); \
67d1eece
BD
235 return len; \
236} \
237static DEVICE_ATTR_RW(field)
238
8e1d6c33 239static void gb_loopback_reset_stats(struct gb_loopback *gb);
079fa32b 240static void gb_loopback_check_attr(struct gb_loopback *gb)
355a7058 241{
b36f04fa
BD
242 if (gb->us_wait > GB_LOOPBACK_US_WAIT_MAX)
243 gb->us_wait = GB_LOOPBACK_US_WAIT_MAX;
8e1d6c33
BD
244 if (gb->size > gb_dev.size_max)
245 gb->size = gb_dev.size_max;
12927835
BD
246 gb->requests_timedout = 0;
247 gb->requests_completed = 0;
8e1d6c33
BD
248 gb->iteration_count = 0;
249 gb->error = 0;
250
251 if (kfifo_depth < gb->iteration_max) {
079fa32b 252 dev_warn(gb->dev,
8e1d6c33
BD
253 "cannot log bytes %u kfifo_depth %u\n",
254 gb->iteration_max, kfifo_depth);
cb60f496 255 }
8e1d6c33
BD
256 kfifo_reset_out(&gb->kfifo_lat);
257 kfifo_reset_out(&gb->kfifo_ts);
cb60f496 258
8e1d6c33 259 switch (gb->type) {
a598f438
BD
260 case GB_LOOPBACK_TYPE_PING:
261 case GB_LOOPBACK_TYPE_TRANSFER:
384a7a3c 262 case GB_LOOPBACK_TYPE_SINK:
12927835
BD
263 gb->jiffy_timeout = usecs_to_jiffies(gb->timeout);
264 if (!gb->jiffy_timeout)
265 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MIN;
266 else if (gb->jiffy_timeout > GB_LOOPBACK_TIMEOUT_MAX)
267 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MAX;
8e1d6c33
BD
268 gb_loopback_reset_stats(gb);
269 wake_up(&gb->wq);
a598f438
BD
270 break;
271 default:
8e1d6c33 272 gb->type = 0;
a598f438
BD
273 break;
274 }
355a7058
AB
275}
276
277/* Time to send and receive one message */
8e1d6c33 278gb_loopback_stats_attrs(latency);
583cbf50 279/* Number of requests sent per second on this cport */
8e1d6c33 280gb_loopback_stats_attrs(requests_per_second);
355a7058 281/* Quantity of data sent and received on this cport */
8e1d6c33 282gb_loopback_stats_attrs(throughput);
1ec5843e 283/* Latency across the UniPro link from APBridge's perspective */
8e1d6c33 284gb_loopback_stats_attrs(apbridge_unipro_latency);
1ec5843e 285/* Firmware induced overhead in the GPBridge */
8e1d6c33
BD
286gb_loopback_stats_attrs(gpbridge_firmware_latency);
287
e140c75e 288/* Number of errors encountered during loop */
8e1d6c33 289gb_loopback_ro_attr(error);
12927835
BD
290/* Number of requests successfully completed async */
291gb_loopback_ro_attr(requests_completed);
292/* Number of requests timed out async */
293gb_loopback_ro_attr(requests_timedout);
294/* Timeout minimum in useconds */
295gb_loopback_ro_attr(timeout_min);
296/* Timeout minimum in useconds */
297gb_loopback_ro_attr(timeout_max);
355a7058
AB
298
299/*
799a3f03 300 * Type of loopback message to send based on protocol type definitions
355a7058 301 * 0 => Don't send message
799a3f03 302 * 2 => Send ping message continuously (message without payload)
006335a0 303 * 3 => Send transfer message continuously (message with payload,
799a3f03
BD
304 * payload returned in response)
305 * 4 => Send a sink message (message with payload, no payload in response)
355a7058 306 */
67d1eece 307gb_dev_loopback_rw_attr(type, d);
355a7058 308/* Size of transfer message payload: 0-4096 bytes */
67d1eece 309gb_dev_loopback_rw_attr(size, u);
48f19ee8 310/* Time to wait between two messages: 0-1000 ms */
b36f04fa 311gb_dev_loopback_rw_attr(us_wait, d);
00af6583 312/* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
67d1eece
BD
313gb_dev_loopback_rw_attr(iteration_max, u);
314/* The current index of the for (i = 0; i < iteration_max; i++) loop */
f06272b2 315gb_dev_loopback_ro_attr(iteration_count, false);
12927835
BD
316/* A flag to indicate synchronous or asynchronous operations */
317gb_dev_loopback_rw_attr(async, u);
318/* Timeout of an individual asynchronous request */
319gb_dev_loopback_rw_attr(timeout, u);
8e3fba55
BD
320/* Maximum number of in-flight operations before back-off */
321gb_dev_loopback_rw_attr(outstanding_operations_max, u);
355a7058 322
8e1d6c33
BD
323static struct attribute *loopback_attrs[] = {
324 &dev_attr_latency_min.attr,
325 &dev_attr_latency_max.attr,
326 &dev_attr_latency_avg.attr,
327 &dev_attr_requests_per_second_min.attr,
328 &dev_attr_requests_per_second_max.attr,
329 &dev_attr_requests_per_second_avg.attr,
330 &dev_attr_throughput_min.attr,
331 &dev_attr_throughput_max.attr,
332 &dev_attr_throughput_avg.attr,
333 &dev_attr_apbridge_unipro_latency_min.attr,
334 &dev_attr_apbridge_unipro_latency_max.attr,
335 &dev_attr_apbridge_unipro_latency_avg.attr,
336 &dev_attr_gpbridge_firmware_latency_min.attr,
337 &dev_attr_gpbridge_firmware_latency_max.attr,
338 &dev_attr_gpbridge_firmware_latency_avg.attr,
355a7058
AB
339 &dev_attr_type.attr,
340 &dev_attr_size.attr,
b36f04fa 341 &dev_attr_us_wait.attr,
00af6583
BD
342 &dev_attr_iteration_count.attr,
343 &dev_attr_iteration_max.attr,
12927835 344 &dev_attr_async.attr,
8e1d6c33 345 &dev_attr_error.attr,
12927835
BD
346 &dev_attr_requests_completed.attr,
347 &dev_attr_requests_timedout.attr,
348 &dev_attr_timeout.attr,
8e3fba55 349 &dev_attr_outstanding_operations_max.attr,
12927835
BD
350 &dev_attr_timeout_min.attr,
351 &dev_attr_timeout_max.attr,
355a7058
AB
352 NULL,
353};
8e1d6c33 354ATTRIBUTE_GROUPS(loopback);
355a7058 355
ab81bb9c 356static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error);
12927835 357
bd416103
BD
358static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
359{
360 u32 lat;
361
362 do_div(elapsed_nsecs, NSEC_PER_USEC);
363 lat = elapsed_nsecs;
364 return lat;
365}
366
7c985351
BD
367static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
368{
369 if (t2 > t1)
370 return t2 - t1;
371 else
372 return NSEC_PER_DAY - t2 + t1;
373}
374
2f842304 375static u64 gb_loopback_calc_latency(struct timeval *ts, struct timeval *te)
4c192665
BD
376{
377 u64 t1, t2;
378
379 t1 = timeval_to_ns(ts);
380 t2 = timeval_to_ns(te);
7c985351
BD
381
382 return __gb_loopback_calc_latency(t1, t2);
4c192665
BD
383}
384
4b0ea00c
BD
385static void gb_loopback_push_latency_ts(struct gb_loopback *gb,
386 struct timeval *ts, struct timeval *te)
387{
388 kfifo_in(&gb->kfifo_ts, (unsigned char *)ts, sizeof(*ts));
389 kfifo_in(&gb->kfifo_ts, (unsigned char *)te, sizeof(*te));
390}
391
fbb8edba
BD
392static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
393 void *request, int request_size,
394 void *response, int response_size)
384a7a3c 395{
fbb8edba 396 struct gb_operation *operation;
384a7a3c 397 struct timeval ts, te;
fbb8edba 398 int ret;
384a7a3c
BD
399
400 do_gettimeofday(&ts);
fbb8edba
BD
401 operation = gb_operation_create(gb->connection, type, request_size,
402 response_size, GFP_KERNEL);
d9048d8c
AB
403 if (!operation)
404 return -ENOMEM;
c3bba87a 405
fbb8edba
BD
406 if (request_size)
407 memcpy(operation->request->payload, request, request_size);
408
409 ret = gb_operation_request_send_sync(operation);
410 if (ret) {
d9a9ea1b 411 dev_err(&gb->connection->bundle->dev,
fbb8edba 412 "synchronous operation failed: %d\n", ret);
d9048d8c 413 goto out_put_operation;
fbb8edba
BD
414 } else {
415 if (response_size == operation->response->payload_size) {
416 memcpy(response, operation->response->payload,
417 response_size);
418 } else {
d9a9ea1b 419 dev_err(&gb->connection->bundle->dev,
fbb8edba
BD
420 "response size %zu expected %d\n",
421 operation->response->payload_size,
422 response_size);
d9048d8c
AB
423 ret = -EINVAL;
424 goto out_put_operation;
fbb8edba
BD
425 }
426 }
6ab1ce4d 427
384a7a3c 428 do_gettimeofday(&te);
2f842304
BD
429
430 /* Calculate the total time the message took */
4b0ea00c 431 gb_loopback_push_latency_ts(gb, &ts, &te);
2f842304
BD
432 gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
433
d9048d8c
AB
434out_put_operation:
435 gb_operation_put(operation);
436
fbb8edba
BD
437 return ret;
438}
384a7a3c 439
12927835
BD
440static void __gb_loopback_async_operation_destroy(struct kref *kref)
441{
442 struct gb_loopback_async_operation *op_async;
443
444 op_async = container_of(kref, struct gb_loopback_async_operation, kref);
445
446 list_del(&op_async->entry);
447 if (op_async->operation)
448 gb_operation_put(op_async->operation);
36f241ff
BD
449 atomic_dec(&op_async->gb->outstanding_operations);
450 wake_up(&op_async->gb->wq_completion);
12927835
BD
451 kfree(op_async);
452}
453
454static void gb_loopback_async_operation_get(struct gb_loopback_async_operation
455 *op_async)
456{
457 kref_get(&op_async->kref);
458}
459
460static void gb_loopback_async_operation_put(struct gb_loopback_async_operation
461 *op_async)
462{
463 unsigned long flags;
464
465 spin_lock_irqsave(&gb_dev.lock, flags);
466 kref_put(&op_async->kref, __gb_loopback_async_operation_destroy);
467 spin_unlock_irqrestore(&gb_dev.lock, flags);
468}
469
470static struct gb_loopback_async_operation *
471 gb_loopback_operation_find(u16 id)
472{
473 struct gb_loopback_async_operation *op_async;
474 bool found = false;
475 unsigned long flags;
476
477 spin_lock_irqsave(&gb_dev.lock, flags);
478 list_for_each_entry(op_async, &gb_dev.list_op_async, entry) {
479 if (op_async->operation->id == id) {
480 gb_loopback_async_operation_get(op_async);
481 found = true;
482 break;
483 }
484 }
485 spin_unlock_irqrestore(&gb_dev.lock, flags);
486
487 return found ? op_async : NULL;
488}
489
36f241ff
BD
490static void gb_loopback_async_wait_all(struct gb_loopback *gb)
491{
492 wait_event(gb->wq_completion,
493 !atomic_read(&gb->outstanding_operations));
494}
495
12927835
BD
496static void gb_loopback_async_operation_callback(struct gb_operation *operation)
497{
498 struct gb_loopback_async_operation *op_async;
499 struct gb_loopback *gb;
500 struct timeval te;
501 bool err = false;
502
503 do_gettimeofday(&te);
504 op_async = gb_loopback_operation_find(operation->id);
505 if (!op_async)
506 return;
507
508 gb = op_async->gb;
509 mutex_lock(&gb->mutex);
510
511 if (!op_async->pending || gb_operation_result(operation)) {
512 err = true;
513 } else {
514 if (op_async->completion)
515 if (op_async->completion(op_async))
516 err = true;
517 }
518
519 if (err) {
520 gb->error++;
521 } else {
12927835
BD
522 gb_loopback_push_latency_ts(gb, &op_async->ts, &te);
523 gb->elapsed_nsecs = gb_loopback_calc_latency(&op_async->ts,
524 &te);
12927835
BD
525 }
526
527 if (op_async->pending) {
528 gb->iteration_count++;
529 op_async->pending = false;
530 del_timer_sync(&op_async->timer);
531 gb_loopback_async_operation_put(op_async);
ab81bb9c 532 gb_loopback_calculate_stats(gb, err);
12927835
BD
533 }
534 mutex_unlock(&gb->mutex);
535
536 dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n",
537 operation->id);
538
539 gb_loopback_async_operation_put(op_async);
540}
541
542static void gb_loopback_async_operation_work(struct work_struct *work)
543{
544 struct gb_loopback *gb;
545 struct gb_operation *operation;
546 struct gb_loopback_async_operation *op_async;
547
548 op_async = container_of(work, struct gb_loopback_async_operation, work);
12927835
BD
549 gb = op_async->gb;
550 operation = op_async->operation;
551
552 mutex_lock(&gb->mutex);
553 if (op_async->pending) {
554 gb->requests_timedout++;
555 gb->error++;
556 gb->iteration_count++;
557 op_async->pending = false;
558 gb_loopback_async_operation_put(op_async);
ab81bb9c 559 gb_loopback_calculate_stats(gb, true);
12927835
BD
560 }
561 mutex_unlock(&gb->mutex);
562
563 dev_dbg(&gb->connection->bundle->dev, "timeout operation %d\n",
564 operation->id);
565
566 gb_operation_cancel(operation, -ETIMEDOUT);
567 gb_loopback_async_operation_put(op_async);
568}
569
570static void gb_loopback_async_operation_timeout(unsigned long data)
571{
572 struct gb_loopback_async_operation *op_async;
573 u16 id = data;
574
575 op_async = gb_loopback_operation_find(id);
576 if (!op_async) {
577 pr_err("operation %d not found - time out ?\n", id);
578 return;
579 }
580 schedule_work(&op_async->work);
581}
582
583static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
584 void *request, int request_size,
585 int response_size,
586 void *completion)
587{
588 struct gb_loopback_async_operation *op_async;
589 struct gb_operation *operation;
590 int ret;
591 unsigned long flags;
592
593 op_async = kzalloc(sizeof(*op_async), GFP_KERNEL);
594 if (!op_async)
595 return -ENOMEM;
596
597 INIT_WORK(&op_async->work, gb_loopback_async_operation_work);
598 init_timer(&op_async->timer);
599 kref_init(&op_async->kref);
600
601 operation = gb_operation_create(gb->connection, type, request_size,
602 response_size, GFP_KERNEL);
603 if (!operation) {
c7aae4e6
BD
604 kfree(op_async);
605 return -ENOMEM;
12927835
BD
606 }
607
608 if (request_size)
609 memcpy(operation->request->payload, request, request_size);
610
611 op_async->gb = gb;
612 op_async->operation = operation;
613 op_async->completion = completion;
614
615 spin_lock_irqsave(&gb_dev.lock, flags);
616 list_add_tail(&op_async->entry, &gb_dev.list_op_async);
617 spin_unlock_irqrestore(&gb_dev.lock, flags);
618
619 do_gettimeofday(&op_async->ts);
620 op_async->pending = true;
36f241ff 621 atomic_inc(&gb->outstanding_operations);
12927835
BD
622 ret = gb_operation_request_send(operation,
623 gb_loopback_async_operation_callback,
624 GFP_KERNEL);
625 if (ret)
626 goto error;
627
628 op_async->timer.function = gb_loopback_async_operation_timeout;
629 op_async->timer.expires = jiffies + gb->jiffy_timeout;
630 op_async->timer.data = (unsigned long)operation->id;
631 add_timer(&op_async->timer);
632
633 return ret;
634error:
635 gb_loopback_async_operation_put(op_async);
636 return ret;
637}
638
639static int gb_loopback_sync_sink(struct gb_loopback *gb, u32 len)
fbb8edba
BD
640{
641 struct gb_loopback_transfer_request *request;
642 int retval;
643
644 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
645 if (!request)
646 return -ENOMEM;
647
648 request->len = cpu_to_le32(len);
649 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK,
650 request, len + sizeof(*request),
651 NULL, 0);
384a7a3c
BD
652 kfree(request);
653 return retval;
654}
655
12927835 656static int gb_loopback_sync_transfer(struct gb_loopback *gb, u32 len)
355a7058 657{
355a7058
AB
658 struct gb_loopback_transfer_request *request;
659 struct gb_loopback_transfer_response *response;
660 int retval;
661
d6a1a3b5
BD
662 gb->apbridge_latency_ts = 0;
663 gb->gpbridge_latency_ts = 0;
664
355a7058
AB
665 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
666 if (!request)
667 return -ENOMEM;
668 response = kmalloc(len + sizeof(*response), GFP_KERNEL);
669 if (!response) {
670 kfree(request);
671 return -ENOMEM;
672 }
673
dc4a1069
VK
674 memset(request->data, 0x5A, len);
675
355a7058 676 request->len = cpu_to_le32(len);
fbb8edba
BD
677 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER,
678 request, len + sizeof(*request),
679 response, len + sizeof(*response));
355a7058
AB
680 if (retval)
681 goto gb_error;
682
dc366f8e 683 if (memcmp(request->data, response->data, len)) {
d9a9ea1b
GKH
684 dev_err(&gb->connection->bundle->dev,
685 "Loopback Data doesn't match\n");
355a7058 686 retval = -EREMOTEIO;
dc366f8e 687 }
1ec5843e
BD
688 gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0);
689 gb->gpbridge_latency_ts = (u32)__le32_to_cpu(response->reserved1);
355a7058
AB
690
691gb_error:
692 kfree(request);
693 kfree(response);
694
695 return retval;
696}
697
12927835 698static int gb_loopback_sync_ping(struct gb_loopback *gb)
355a7058 699{
fbb8edba
BD
700 return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
701 NULL, 0, NULL, 0);
355a7058
AB
702}
703
12927835
BD
704static int gb_loopback_async_sink(struct gb_loopback *gb, u32 len)
705{
706 struct gb_loopback_transfer_request *request;
707 int retval;
708
709 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
710 if (!request)
711 return -ENOMEM;
712
713 request->len = cpu_to_le32(len);
714 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_SINK,
715 request, len + sizeof(*request),
716 0, NULL);
717 kfree(request);
718 return retval;
719}
720
721static int gb_loopback_async_transfer_complete(
722 struct gb_loopback_async_operation *op_async)
723{
724 struct gb_loopback *gb;
725 struct gb_operation *operation;
726 struct gb_loopback_transfer_request *request;
727 struct gb_loopback_transfer_response *response;
728 size_t len;
729 int retval = 0;
730
731 gb = op_async->gb;
732 operation = op_async->operation;
733 request = operation->request->payload;
734 response = operation->response->payload;
735 len = le32_to_cpu(request->len);
736
737 if (memcmp(request->data, response->data, len)) {
738 dev_err(&gb->connection->bundle->dev,
739 "Loopback Data doesn't match operation id %d\n",
740 operation->id);
741 retval = -EREMOTEIO;
742 } else {
743 gb->apbridge_latency_ts =
744 (u32)__le32_to_cpu(response->reserved0);
745 gb->gpbridge_latency_ts =
746 (u32)__le32_to_cpu(response->reserved1);
747 }
748
749 return retval;
750}
751
752static int gb_loopback_async_transfer(struct gb_loopback *gb, u32 len)
753{
754 struct gb_loopback_transfer_request *request;
755 int retval, response_len;
756
757 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
758 if (!request)
759 return -ENOMEM;
760
761 memset(request->data, 0x5A, len);
762
763 request->len = cpu_to_le32(len);
764 response_len = sizeof(struct gb_loopback_transfer_response);
765 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_TRANSFER,
766 request, len + sizeof(*request),
767 len + response_len,
768 gb_loopback_async_transfer_complete);
769 if (retval)
770 goto gb_error;
771
772gb_error:
773 kfree(request);
774 return retval;
775}
776
777static int gb_loopback_async_ping(struct gb_loopback *gb)
778{
779 return gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_PING,
780 NULL, 0, 0, NULL);
781}
782
e82a11dc 783static int gb_loopback_request_handler(struct gb_operation *operation)
ac1c2840
AE
784{
785 struct gb_connection *connection = operation->connection;
786 struct gb_loopback_transfer_request *request;
787 struct gb_loopback_transfer_response *response;
d9a9ea1b 788 struct device *dev = &connection->bundle->dev;
e51eafeb 789 size_t len;
ac1c2840
AE
790
791 /* By convention, the AP initiates the version operation */
e82a11dc 792 switch (operation->type) {
0e2462d1 793 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
d9a9ea1b 794 dev_err(dev, "module-initiated version operation\n");
ac1c2840
AE
795 return -EINVAL;
796 case GB_LOOPBACK_TYPE_PING:
384a7a3c 797 case GB_LOOPBACK_TYPE_SINK:
ac1c2840
AE
798 return 0;
799 case GB_LOOPBACK_TYPE_TRANSFER:
800 if (operation->request->payload_size < sizeof(*request)) {
d9a9ea1b 801 dev_err(dev, "transfer request too small (%zu < %zu)\n",
ac1c2840
AE
802 operation->request->payload_size,
803 sizeof(*request));
804 return -EINVAL; /* -EMSGSIZE */
805 }
806 request = operation->request->payload;
807 len = le32_to_cpu(request->len);
67d1eece 808 if (len > gb_dev.size_max) {
d9a9ea1b 809 dev_err(dev, "transfer request too large (%zu > %zu)\n",
67d1eece 810 len, gb_dev.size_max);
c3bba87a
BD
811 return -EINVAL;
812 }
813
81ad6994
BG
814 if (!gb_operation_response_alloc(operation,
815 len + sizeof(*response), GFP_KERNEL)) {
816 dev_err(dev, "error allocating response\n");
817 return -ENOMEM;
ac1c2840 818 }
81ad6994
BG
819 response = operation->response->payload;
820 response->len = cpu_to_le32(len);
821 if (len)
822 memcpy(response->data, request->data, len);
823
ac1c2840
AE
824 return 0;
825 default:
e82a11dc 826 dev_err(dev, "unsupported request: %u\n", operation->type);
ac1c2840
AE
827 return -EINVAL;
828 }
829}
830
8e1d6c33 831static void gb_loopback_reset_stats(struct gb_loopback *gb)
355a7058
AB
832{
833 struct gb_loopback_stats reset = {
e051c82b 834 .min = U32_MAX,
355a7058 835 };
67d1eece
BD
836
837 /* Reset per-connection stats */
8e1d6c33
BD
838 memcpy(&gb->latency, &reset,
839 sizeof(struct gb_loopback_stats));
840 memcpy(&gb->throughput, &reset,
841 sizeof(struct gb_loopback_stats));
842 memcpy(&gb->requests_per_second, &reset,
843 sizeof(struct gb_loopback_stats));
844 memcpy(&gb->apbridge_unipro_latency, &reset,
845 sizeof(struct gb_loopback_stats));
846 memcpy(&gb->gpbridge_firmware_latency, &reset,
847 sizeof(struct gb_loopback_stats));
67d1eece 848
f42a6891
BD
849 /* Should be initialized at least once per transaction set */
850 gb->apbridge_latency_ts = 0;
851 gb->gpbridge_latency_ts = 0;
ab81bb9c 852 memset(&gb->ts, 0, sizeof(struct timeval));
355a7058
AB
853}
854
a6e7e535 855static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
355a7058 856{
00af6583
BD
857 if (stats->min > val)
858 stats->min = val;
859 if (stats->max < val)
860 stats->max = val;
861 stats->sum += val;
862 stats->count++;
355a7058
AB
863}
864
ab81bb9c
AB
865static void gb_loopback_update_stats_window(struct gb_loopback_stats *stats,
866 u64 val, u32 count)
867{
868 stats->sum += val;
869 stats->count += count;
870
871 do_div(val, count);
872 if (stats->min > val)
873 stats->min = val;
874 if (stats->max < val)
875 stats->max = val;
876}
877
583cbf50 878static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
355a7058 879{
ab81bb9c 880 u64 req = gb->requests_completed * USEC_PER_SEC;
00af6583 881
ab81bb9c 882 gb_loopback_update_stats_window(&gb->requests_per_second, req, latency);
355a7058
AB
883}
884
00af6583 885static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
355a7058 886{
ab81bb9c 887 u64 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
f7908e4d 888
8e1d6c33 889 switch (gb->type) {
f7908e4d
BD
890 case GB_LOOPBACK_TYPE_PING:
891 break;
892 case GB_LOOPBACK_TYPE_SINK:
893 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
8e1d6c33 894 gb->size;
f7908e4d
BD
895 break;
896 case GB_LOOPBACK_TYPE_TRANSFER:
897 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
898 sizeof(struct gb_loopback_transfer_response) +
8e1d6c33 899 gb->size * 2;
f7908e4d
BD
900 break;
901 default:
902 return;
903 }
00af6583 904
ab81bb9c
AB
905 aggregate_size *= gb->requests_completed;
906 aggregate_size *= USEC_PER_SEC;
907 gb_loopback_update_stats_window(&gb->throughput, aggregate_size,
908 latency);
355a7058
AB
909}
910
ab81bb9c 911static void gb_loopback_calculate_latency_stats(struct gb_loopback *gb)
355a7058
AB
912{
913 u32 lat;
355a7058 914
00af6583 915 /* Express latency in terms of microseconds */
bd416103 916 lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
355a7058 917
98676ca8 918 /* Log latency stastic */
00af6583 919 gb_loopback_update_stats(&gb->latency, lat);
67d1eece
BD
920
921 /* Raw latency log on a per thread basis */
4b0ea00c 922 kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
00af6583 923
1ec5843e 924 /* Log the firmware supplied latency values */
1ec5843e
BD
925 gb_loopback_update_stats(&gb->apbridge_unipro_latency,
926 gb->apbridge_latency_ts);
1ec5843e
BD
927 gb_loopback_update_stats(&gb->gpbridge_firmware_latency,
928 gb->gpbridge_latency_ts);
355a7058
AB
929}
930
ab81bb9c
AB
931static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error)
932{
933 u64 nlat;
934 u32 lat;
935 struct timeval te;
936
937 if (!error) {
938 gb->requests_completed++;
939 gb_loopback_calculate_latency_stats(gb);
940 }
941
942 do_gettimeofday(&te);
943 nlat = gb_loopback_calc_latency(&gb->ts, &te);
944 if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) {
945 lat = gb_loopback_nsec_to_usec_latency(nlat);
946
947 gb_loopback_throughput_update(gb, lat);
948 gb_loopback_requests_update(gb, lat);
949
950 if (gb->iteration_count != gb->iteration_max) {
951 gb->ts = te;
952 gb->requests_completed = 0;
953 }
954 }
955}
956
8e3fba55
BD
957static void gb_loopback_async_wait_to_send(struct gb_loopback *gb)
958{
959 if (!(gb->async && gb->outstanding_operations_max))
960 return;
961 wait_event_interruptible(gb->wq_completion,
962 (atomic_read(&gb->outstanding_operations) <
963 gb->outstanding_operations_max) ||
964 kthread_should_stop());
965}
966
355a7058
AB
967static int gb_loopback_fn(void *data)
968{
969 int error = 0;
b36f04fa 970 int us_wait = 0;
67d1eece
BD
971 int type;
972 u32 size;
12927835 973 u32 send_count = 0;
3f12c3ed 974 struct gb_loopback *gb = data;
355a7058 975
3dfe8aaa 976 while (1) {
8e1d6c33
BD
977 if (!gb->type)
978 wait_event_interruptible(gb->wq, gb->type ||
3dfe8aaa 979 kthread_should_stop());
8e3fba55
BD
980 if (kthread_should_stop())
981 break;
12927835 982
8e3fba55
BD
983 /* Limit the maximum number of in-flight async operations */
984 gb_loopback_async_wait_to_send(gb);
3dfe8aaa
BD
985 if (kthread_should_stop())
986 break;
85d678c0 987
67d1eece 988 mutex_lock(&gb->mutex);
8e1d6c33
BD
989 sysfs_notify(&gb->connection->bundle->dev.kobj,
990 NULL, "iteration_count");
991
992 /* Optionally terminate */
12927835 993 if (send_count == gb->iteration_max) {
ab81bb9c
AB
994 if (gb->iteration_count == gb->iteration_max) {
995 gb->type = 0;
996 send_count = 0;
997 }
67d1eece 998 mutex_unlock(&gb->mutex);
8e1d6c33 999 continue;
67d1eece 1000 }
8e1d6c33 1001 size = gb->size;
b36f04fa 1002 us_wait = gb->us_wait;
8e1d6c33 1003 type = gb->type;
ab81bb9c
AB
1004 if (gb->ts.tv_usec == 0 && gb->ts.tv_sec == 0)
1005 do_gettimeofday(&gb->ts);
d9fb3754
BD
1006 mutex_unlock(&gb->mutex);
1007
67d1eece 1008 /* Else operations to perform */
12927835
BD
1009 if (gb->async) {
1010 if (type == GB_LOOPBACK_TYPE_PING) {
1011 error = gb_loopback_async_ping(gb);
12927835
BD
1012 } else if (type == GB_LOOPBACK_TYPE_TRANSFER) {
1013 error = gb_loopback_async_transfer(gb, size);
1014 } else if (type == GB_LOOPBACK_TYPE_SINK) {
1015 error = gb_loopback_async_sink(gb, size);
1016 }
1017
1018 if (error)
1019 gb->error++;
1020 } else {
1021 /* We are effectively single threaded here */
1022 if (type == GB_LOOPBACK_TYPE_PING)
1023 error = gb_loopback_sync_ping(gb);
1024 else if (type == GB_LOOPBACK_TYPE_TRANSFER)
1025 error = gb_loopback_sync_transfer(gb, size);
1026 else if (type == GB_LOOPBACK_TYPE_SINK)
1027 error = gb_loopback_sync_sink(gb, size);
1028
1029 if (error)
1030 gb->error++;
1031 gb->iteration_count++;
ab81bb9c 1032 gb_loopback_calculate_stats(gb, !!error);
12927835
BD
1033 }
1034 send_count++;
b36f04fa
BD
1035 if (us_wait)
1036 udelay(us_wait);
355a7058
AB
1037 }
1038 return 0;
1039}
1040
4b0ea00c
BD
1041static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
1042 struct kfifo *kfifo,
1043 struct mutex *mutex)
aa27bf82 1044{
aa27bf82
BD
1045 u32 latency;
1046 int retval;
1047
4b0ea00c 1048 if (kfifo_len(kfifo) == 0) {
aa27bf82
BD
1049 retval = -EAGAIN;
1050 goto done;
1051 }
1052
4b0ea00c
BD
1053 mutex_lock(mutex);
1054 retval = kfifo_out(kfifo, &latency, sizeof(latency));
aa27bf82
BD
1055 if (retval > 0) {
1056 seq_printf(s, "%u", latency);
1057 retval = 0;
1058 }
4b0ea00c 1059 mutex_unlock(mutex);
aa27bf82
BD
1060done:
1061 return retval;
1062}
1063
4b0ea00c
BD
1064static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
1065{
1066 struct gb_loopback *gb = s->private;
1067
1068 return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
1069 &gb->mutex);
1070}
1071
aa27bf82
BD
1072static int gb_loopback_latency_open(struct inode *inode, struct file *file)
1073{
1074 return single_open(file, gb_loopback_dbgfs_latency_show,
1075 inode->i_private);
1076}
1077
1078static const struct file_operations gb_loopback_debugfs_latency_ops = {
1079 .open = gb_loopback_latency_open,
1080 .read = seq_read,
1081 .llseek = seq_lseek,
1082 .release = single_release,
1083};
1084
5015115d
BD
1085static int gb_loopback_bus_id_compare(void *priv, struct list_head *lha,
1086 struct list_head *lhb)
1087{
1088 struct gb_loopback *a = list_entry(lha, struct gb_loopback, entry);
1089 struct gb_loopback *b = list_entry(lhb, struct gb_loopback, entry);
1090 struct gb_connection *ca = a->connection;
1091 struct gb_connection *cb = b->connection;
1092
5015115d
BD
1093 if (ca->bundle->intf->interface_id < cb->bundle->intf->interface_id)
1094 return -1;
1095 if (cb->bundle->intf->interface_id < ca->bundle->intf->interface_id)
1096 return 1;
1097 if (ca->bundle->id < cb->bundle->id)
1098 return -1;
1099 if (cb->bundle->id < ca->bundle->id)
1100 return 1;
1101 if (ca->intf_cport_id < cb->intf_cport_id)
1102 return -1;
1103 else if (cb->intf_cport_id < ca->intf_cport_id)
1104 return 1;
1105
1106 return 0;
1107}
1108
1109static void gb_loopback_insert_id(struct gb_loopback *gb)
1110{
1111 struct gb_loopback *gb_list;
1112 u32 new_lbid = 0;
1113
1114 /* perform an insertion sort */
1115 list_add_tail(&gb->entry, &gb_dev.list);
1116 list_sort(NULL, &gb_dev.list, gb_loopback_bus_id_compare);
1117 list_for_each_entry(gb_list, &gb_dev.list, entry) {
1118 gb_list->lbid = 1 << new_lbid;
1119 new_lbid++;
1120 }
1121}
1122
67d1eece 1123#define DEBUGFS_NAMELEN 32
aa27bf82 1124
e82a11dc
VK
1125static int gb_loopback_probe(struct gb_bundle *bundle,
1126 const struct greybus_bundle_id *id)
355a7058 1127{
e82a11dc
VK
1128 struct greybus_descriptor_cport *cport_desc;
1129 struct gb_connection *connection;
355a7058 1130 struct gb_loopback *gb;
079fa32b 1131 struct device *dev;
355a7058 1132 int retval;
aa27bf82 1133 char name[DEBUGFS_NAMELEN];
2e238d71 1134 unsigned long flags;
355a7058 1135
e82a11dc
VK
1136 if (bundle->num_cports != 1)
1137 return -ENODEV;
1138
1139 cport_desc = &bundle->cport_desc[0];
1140 if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOOPBACK)
1141 return -ENODEV;
1142
355a7058
AB
1143 gb = kzalloc(sizeof(*gb), GFP_KERNEL);
1144 if (!gb)
1145 return -ENOMEM;
1146
e82a11dc
VK
1147 connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
1148 gb_loopback_request_handler);
1149 if (IS_ERR(connection)) {
1150 retval = PTR_ERR(connection);
1151 goto out_kzalloc;
1152 }
1153
1154 gb->connection = connection;
1155 greybus_set_drvdata(bundle, gb);
1156
8e1d6c33 1157 init_waitqueue_head(&gb->wq);
36f241ff
BD
1158 init_waitqueue_head(&gb->wq_completion);
1159 atomic_set(&gb->outstanding_operations, 0);
8e1d6c33
BD
1160 gb_loopback_reset_stats(gb);
1161
12927835
BD
1162 /* Reported values to user-space for min/max timeouts */
1163 gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN);
1164 gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX);
1165
f06272b2 1166 if (!gb_dev.count) {
f06272b2
BD
1167 /* Calculate maximum payload */
1168 gb_dev.size_max = gb_operation_get_payload_size_max(connection);
1169 if (gb_dev.size_max <=
1170 sizeof(struct gb_loopback_transfer_request)) {
1171 retval = -EINVAL;
e82a11dc 1172 goto out_connection_destroy;
f06272b2
BD
1173 }
1174 gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
1175 }
f06272b2
BD
1176
1177 /* Create per-connection sysfs and debugfs data-points */
8d8d36da 1178 snprintf(name, sizeof(name), "raw_latency_%s",
d9a9ea1b 1179 dev_name(&connection->bundle->dev));
aa27bf82
BD
1180 gb->file = debugfs_create_file(name, S_IFREG | S_IRUGO, gb_dev.root, gb,
1181 &gb_loopback_debugfs_latency_ops);
079fa32b
AH
1182
1183 gb->id = ida_simple_get(&loopback_ida, 0, 0, GFP_KERNEL);
1184 if (gb->id < 0) {
1185 retval = gb->id;
e82a11dc 1186 goto out_debugfs_remove;
079fa32b
AH
1187 }
1188
e82a11dc
VK
1189 retval = gb_connection_enable(connection);
1190 if (retval)
1191 goto out_ida_remove;
1192
079fa32b
AH
1193 dev = device_create_with_groups(&loopback_class,
1194 &connection->bundle->dev,
1195 MKDEV(0, 0), gb, loopback_groups,
1196 "gb_loopback%d", gb->id);
1197 if (IS_ERR(dev)) {
1198 retval = PTR_ERR(dev);
e82a11dc 1199 goto out_connection_disable;
079fa32b
AH
1200 }
1201 gb->dev = dev;
c3bba87a 1202
cbd204b4 1203 /* Allocate kfifo */
4b0ea00c 1204 if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
cbd204b4
BD
1205 GFP_KERNEL)) {
1206 retval = -ENOMEM;
079fa32b 1207 goto out_conn;
cbd204b4 1208 }
4b0ea00c
BD
1209 if (kfifo_alloc(&gb->kfifo_ts, kfifo_depth * sizeof(struct timeval) * 2,
1210 GFP_KERNEL)) {
1211 retval = -ENOMEM;
1212 goto out_kfifo0;
1213 }
cbd204b4
BD
1214
1215 /* Fork worker thread */
85d678c0 1216 mutex_init(&gb->mutex);
355a7058
AB
1217 gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
1218 if (IS_ERR(gb->task)) {
69f60347 1219 retval = PTR_ERR(gb->task);
4b0ea00c 1220 goto out_kfifo1;
355a7058
AB
1221 }
1222
2e238d71 1223 spin_lock_irqsave(&gb_dev.lock, flags);
5015115d 1224 gb_loopback_insert_id(gb);
aa27bf82 1225 gb_dev.count++;
2e238d71
BD
1226 spin_unlock_irqrestore(&gb_dev.lock, flags);
1227
1228 gb_connection_latency_tag_enable(connection);
355a7058
AB
1229 return 0;
1230
4b0ea00c
BD
1231out_kfifo1:
1232 kfifo_free(&gb->kfifo_ts);
1233out_kfifo0:
1234 kfifo_free(&gb->kfifo_lat);
079fa32b
AH
1235out_conn:
1236 device_unregister(dev);
e82a11dc
VK
1237out_connection_disable:
1238 gb_connection_disable(connection);
1239out_ida_remove:
079fa32b 1240 ida_simple_remove(&loopback_ida, gb->id);
e82a11dc 1241out_debugfs_remove:
aa27bf82 1242 debugfs_remove(gb->file);
e82a11dc
VK
1243out_connection_destroy:
1244 gb_connection_destroy(connection);
2e238d71 1245out_kzalloc:
355a7058 1246 kfree(gb);
1fb807cf 1247
355a7058
AB
1248 return retval;
1249}
1250
e82a11dc 1251static void gb_loopback_disconnect(struct gb_bundle *bundle)
355a7058 1252{
e82a11dc 1253 struct gb_loopback *gb = greybus_get_drvdata(bundle);
2e238d71 1254 unsigned long flags;
355a7058 1255
e82a11dc
VK
1256 gb_connection_disable(gb->connection);
1257
355a7058
AB
1258 if (!IS_ERR_OR_NULL(gb->task))
1259 kthread_stop(gb->task);
cbd204b4 1260
4b0ea00c
BD
1261 kfifo_free(&gb->kfifo_lat);
1262 kfifo_free(&gb->kfifo_ts);
e82a11dc 1263 gb_connection_latency_tag_disable(gb->connection);
aa27bf82 1264 debugfs_remove(gb->file);
e82a11dc
VK
1265
1266 /*
1267 * FIXME: gb_loopback_async_wait_all() is redundant now, as connection
1268 * is disabled at the beginning and so we can't have any more
1269 * incoming/outgoing requests.
1270 */
36f241ff 1271 gb_loopback_async_wait_all(gb);
2e238d71
BD
1272
1273 spin_lock_irqsave(&gb_dev.lock, flags);
1274 gb_dev.count--;
ff477d07 1275 list_del(&gb->entry);
2e238d71
BD
1276 spin_unlock_irqrestore(&gb_dev.lock, flags);
1277
079fa32b
AH
1278 device_unregister(gb->dev);
1279 ida_simple_remove(&loopback_ida, gb->id);
1280
e82a11dc 1281 gb_connection_destroy(gb->connection);
5f3e0d17 1282 kfree(gb);
355a7058
AB
1283}
1284
e82a11dc
VK
1285static const struct greybus_bundle_id gb_loopback_id_table[] = {
1286 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK) },
1287 { }
1288};
1289MODULE_DEVICE_TABLE(greybus, gb_loopback_id_table);
1290
1291static struct greybus_driver gb_loopback_driver = {
1292 .name = "loopback",
1293 .probe = gb_loopback_probe,
1294 .disconnect = gb_loopback_disconnect,
1295 .id_table = gb_loopback_id_table,
355a7058
AB
1296};
1297
cbd204b4
BD
1298static int loopback_init(void)
1299{
4b0ea00c
BD
1300 int retval;
1301
67d1eece 1302 INIT_LIST_HEAD(&gb_dev.list);
12927835 1303 INIT_LIST_HEAD(&gb_dev.list_op_async);
2e238d71 1304 spin_lock_init(&gb_dev.lock);
aa27bf82 1305 gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
67d1eece 1306
079fa32b
AH
1307 retval = class_register(&loopback_class);
1308 if (retval)
1309 goto err;
1310
e82a11dc 1311 retval = greybus_register(&gb_loopback_driver);
079fa32b
AH
1312 if (retval)
1313 goto err_unregister;
1314
1315 return 0;
4b0ea00c 1316
079fa32b
AH
1317err_unregister:
1318 class_unregister(&loopback_class);
1319err:
4b0ea00c
BD
1320 debugfs_remove_recursive(gb_dev.root);
1321 return retval;
cbd204b4
BD
1322}
1323module_init(loopback_init);
1324
1325static void __exit loopback_exit(void)
1326{
aa27bf82 1327 debugfs_remove_recursive(gb_dev.root);
e82a11dc 1328 greybus_deregister(&gb_loopback_driver);
079fa32b
AH
1329 class_unregister(&loopback_class);
1330 ida_destroy(&loopback_ida);
cbd204b4
BD
1331}
1332module_exit(loopback_exit);
355a7058
AB
1333
1334MODULE_LICENSE("GPL v2");