ACPI / debugger: Add IO interface to access debugger functionalities
[linux-block.git] / drivers / acpi / acpi_dbg.c
CommitLineData
8cfb0cdf
LZ
1/*
2 * ACPI AML interfacing support
3 *
4 * Copyright (C) 2015, Intel Corporation
5 * Authors: Lv Zheng <lv.zheng@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12/* #define DEBUG */
13#define pr_fmt(fmt) "ACPI : AML: " fmt
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/wait.h>
18#include <linux/poll.h>
19#include <linux/sched.h>
20#include <linux/kthread.h>
21#include <linux/proc_fs.h>
22#include <linux/debugfs.h>
23#include <linux/circ_buf.h>
24#include <linux/acpi_dbg.h>
25#include "internal.h"
26
27#define ACPI_AML_BUF_ALIGN (sizeof (acpi_size))
28#define ACPI_AML_BUF_SIZE PAGE_SIZE
29
30#define circ_count(circ) \
31 (CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
32#define circ_count_to_end(circ) \
33 (CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
34#define circ_space(circ) \
35 (CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
36#define circ_space_to_end(circ) \
37 (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
38
39#define ACPI_AML_OPENED 0x0001
40#define ACPI_AML_CLOSED 0x0002
41#define ACPI_AML_IN_USER 0x0004 /* user space is writing cmd */
42#define ACPI_AML_IN_KERN 0x0008 /* kernel space is reading cmd */
43#define ACPI_AML_OUT_USER 0x0010 /* user space is reading log */
44#define ACPI_AML_OUT_KERN 0x0020 /* kernel space is writing log */
45#define ACPI_AML_USER (ACPI_AML_IN_USER | ACPI_AML_OUT_USER)
46#define ACPI_AML_KERN (ACPI_AML_IN_KERN | ACPI_AML_OUT_KERN)
47#define ACPI_AML_BUSY (ACPI_AML_USER | ACPI_AML_KERN)
48#define ACPI_AML_OPEN (ACPI_AML_OPENED | ACPI_AML_CLOSED)
49
50struct acpi_aml_io {
51 wait_queue_head_t wait;
52 unsigned long flags;
53 unsigned long users;
54 struct mutex lock;
55 struct task_struct *thread;
56 char out_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
57 struct circ_buf out_crc;
58 char in_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
59 struct circ_buf in_crc;
60 acpi_osd_exec_callback function;
61 void *context;
62 unsigned long usages;
63};
64
65static struct acpi_aml_io acpi_aml_io;
66static bool acpi_aml_initialized;
67static struct file *acpi_aml_active_reader;
68static struct dentry *acpi_aml_dentry;
69
70static inline bool __acpi_aml_running(void)
71{
72 return acpi_aml_io.thread ? true : false;
73}
74
75static inline bool __acpi_aml_access_ok(unsigned long flag)
76{
77 /*
78 * The debugger interface is in opened state (OPENED && !CLOSED),
79 * then it is allowed to access the debugger buffers from either
80 * user space or the kernel space.
81 * In addition, for the kernel space, only the debugger thread
82 * (thread ID matched) is allowed to access.
83 */
84 if (!(acpi_aml_io.flags & ACPI_AML_OPENED) ||
85 (acpi_aml_io.flags & ACPI_AML_CLOSED) ||
86 !__acpi_aml_running())
87 return false;
88 if ((flag & ACPI_AML_KERN) &&
89 current != acpi_aml_io.thread)
90 return false;
91 return true;
92}
93
94static inline bool __acpi_aml_readable(struct circ_buf *circ, unsigned long flag)
95{
96 /*
97 * Another read is not in progress and there is data in buffer
98 * available for read.
99 */
100 if (!(acpi_aml_io.flags & flag) && circ_count(circ))
101 return true;
102 return false;
103}
104
105static inline bool __acpi_aml_writable(struct circ_buf *circ, unsigned long flag)
106{
107 /*
108 * Another write is not in progress and there is buffer space
109 * available for write.
110 */
111 if (!(acpi_aml_io.flags & flag) && circ_space(circ))
112 return true;
113 return false;
114}
115
116static inline bool __acpi_aml_busy(void)
117{
118 if (acpi_aml_io.flags & ACPI_AML_BUSY)
119 return true;
120 return false;
121}
122
123static inline bool __acpi_aml_opened(void)
124{
125 if (acpi_aml_io.flags & ACPI_AML_OPEN)
126 return true;
127 return false;
128}
129
130static inline bool __acpi_aml_used(void)
131{
132 return acpi_aml_io.usages ? true : false;
133}
134
135static inline bool acpi_aml_running(void)
136{
137 bool ret;
138
139 mutex_lock(&acpi_aml_io.lock);
140 ret = __acpi_aml_running();
141 mutex_unlock(&acpi_aml_io.lock);
142 return ret;
143}
144
145static bool acpi_aml_busy(void)
146{
147 bool ret;
148
149 mutex_lock(&acpi_aml_io.lock);
150 ret = __acpi_aml_busy();
151 mutex_unlock(&acpi_aml_io.lock);
152 return ret;
153}
154
155static bool acpi_aml_used(void)
156{
157 bool ret;
158
159 /*
160 * The usage count is prepared to avoid race conditions between the
161 * starts and the stops of the debugger thread.
162 */
163 mutex_lock(&acpi_aml_io.lock);
164 ret = __acpi_aml_used();
165 mutex_unlock(&acpi_aml_io.lock);
166 return ret;
167}
168
169static bool acpi_aml_kern_readable(void)
170{
171 bool ret;
172
173 mutex_lock(&acpi_aml_io.lock);
174 ret = !__acpi_aml_access_ok(ACPI_AML_IN_KERN) ||
175 __acpi_aml_readable(&acpi_aml_io.in_crc, ACPI_AML_IN_KERN);
176 mutex_unlock(&acpi_aml_io.lock);
177 return ret;
178}
179
180static bool acpi_aml_kern_writable(void)
181{
182 bool ret;
183
184 mutex_lock(&acpi_aml_io.lock);
185 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_KERN) ||
186 __acpi_aml_writable(&acpi_aml_io.out_crc, ACPI_AML_OUT_KERN);
187 mutex_unlock(&acpi_aml_io.lock);
188 return ret;
189}
190
191static bool acpi_aml_user_readable(void)
192{
193 bool ret;
194
195 mutex_lock(&acpi_aml_io.lock);
196 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_USER) ||
197 __acpi_aml_readable(&acpi_aml_io.out_crc, ACPI_AML_OUT_USER);
198 mutex_unlock(&acpi_aml_io.lock);
199 return ret;
200}
201
202static bool acpi_aml_user_writable(void)
203{
204 bool ret;
205
206 mutex_lock(&acpi_aml_io.lock);
207 ret = !__acpi_aml_access_ok(ACPI_AML_IN_USER) ||
208 __acpi_aml_writable(&acpi_aml_io.in_crc, ACPI_AML_IN_USER);
209 mutex_unlock(&acpi_aml_io.lock);
210 return ret;
211}
212
213static int acpi_aml_lock_write(struct circ_buf *circ, unsigned long flag)
214{
215 int ret = 0;
216
217 mutex_lock(&acpi_aml_io.lock);
218 if (!__acpi_aml_access_ok(flag)) {
219 ret = -EFAULT;
220 goto out;
221 }
222 if (!__acpi_aml_writable(circ, flag)) {
223 ret = -EAGAIN;
224 goto out;
225 }
226 acpi_aml_io.flags |= flag;
227out:
228 mutex_unlock(&acpi_aml_io.lock);
229 return ret;
230}
231
232static int acpi_aml_lock_read(struct circ_buf *circ, unsigned long flag)
233{
234 int ret = 0;
235
236 mutex_lock(&acpi_aml_io.lock);
237 if (!__acpi_aml_access_ok(flag)) {
238 ret = -EFAULT;
239 goto out;
240 }
241 if (!__acpi_aml_readable(circ, flag)) {
242 ret = -EAGAIN;
243 goto out;
244 }
245 acpi_aml_io.flags |= flag;
246out:
247 mutex_unlock(&acpi_aml_io.lock);
248 return ret;
249}
250
251static void acpi_aml_unlock_fifo(unsigned long flag, bool wakeup)
252{
253 mutex_lock(&acpi_aml_io.lock);
254 acpi_aml_io.flags &= ~flag;
255 if (wakeup)
256 wake_up_interruptible(&acpi_aml_io.wait);
257 mutex_unlock(&acpi_aml_io.lock);
258}
259
260static int acpi_aml_write_kern(const char *buf, int len)
261{
262 int ret;
263 struct circ_buf *crc = &acpi_aml_io.out_crc;
264 int n;
265 char *p;
266
267 ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
268 if (IS_ERR_VALUE(ret))
269 return ret;
270 /* sync tail before inserting logs */
271 smp_mb();
272 p = &crc->buf[crc->head];
273 n = min(len, circ_space_to_end(crc));
274 memcpy(p, buf, n);
275 /* sync head after inserting logs */
276 smp_wmb();
277 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
278 acpi_aml_unlock_fifo(ACPI_AML_OUT_KERN, true);
279 return n;
280}
281
282static int acpi_aml_readb_kern(void)
283{
284 int ret;
285 struct circ_buf *crc = &acpi_aml_io.in_crc;
286 char *p;
287
288 ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
289 if (IS_ERR_VALUE(ret))
290 return ret;
291 /* sync head before removing cmds */
292 smp_rmb();
293 p = &crc->buf[crc->tail];
294 ret = (int)*p;
295 /* sync tail before inserting cmds */
296 smp_mb();
297 crc->tail = (crc->tail + 1) & (ACPI_AML_BUF_SIZE - 1);
298 acpi_aml_unlock_fifo(ACPI_AML_IN_KERN, true);
299 return ret;
300}
301
302/*
303 * acpi_aml_write_log() - Capture debugger output
304 * @msg: the debugger output
305 *
306 * This function should be used to implement acpi_os_printf() to filter out
307 * the debugger output and store the output into the debugger interface
308 * buffer. Return the size of stored logs or errno.
309 */
310ssize_t acpi_aml_write_log(const char *msg)
311{
312 int ret = 0;
313 int count = 0, size = 0;
314
315 if (!acpi_aml_initialized)
316 return -ENODEV;
317 if (msg)
318 count = strlen(msg);
319 while (count > 0) {
320again:
321 ret = acpi_aml_write_kern(msg + size, count);
322 if (ret == -EAGAIN) {
323 ret = wait_event_interruptible(acpi_aml_io.wait,
324 acpi_aml_kern_writable());
325 /*
326 * We need to retry when the condition
327 * becomes true.
328 */
329 if (ret == 0)
330 goto again;
331 break;
332 }
333 if (IS_ERR_VALUE(ret))
334 break;
335 size += ret;
336 count -= ret;
337 }
338 return size > 0 ? size : ret;
339}
340EXPORT_SYMBOL(acpi_aml_write_log);
341
342/*
343 * acpi_aml_read_cmd() - Capture debugger input
344 * @msg: the debugger input
345 * @size: the size of the debugger input
346 *
347 * This function should be used to implement acpi_os_get_line() to capture
348 * the debugger input commands and store the input commands into the
349 * debugger interface buffer. Return the size of stored commands or errno.
350 */
351ssize_t acpi_aml_read_cmd(char *msg, size_t count)
352{
353 int ret = 0;
354 int size = 0;
355
356 /*
357 * This is ensured by the running fact of the debugger thread
358 * unless a bug is introduced.
359 */
360 BUG_ON(!acpi_aml_initialized);
361 while (count > 0) {
362again:
363 /*
364 * Check each input byte to find the end of the command.
365 */
366 ret = acpi_aml_readb_kern();
367 if (ret == -EAGAIN) {
368 ret = wait_event_interruptible(acpi_aml_io.wait,
369 acpi_aml_kern_readable());
370 /*
371 * We need to retry when the condition becomes
372 * true.
373 */
374 if (ret == 0)
375 goto again;
376 }
377 if (IS_ERR_VALUE(ret))
378 break;
379 *(msg + size) = (char)ret;
380 size++;
381 count--;
382 if (ret == '\n') {
383 /*
384 * acpi_os_get_line() requires a zero terminated command
385 * string.
386 */
387 *(msg + size - 1) = '\0';
388 break;
389 }
390 }
391 return size > 0 ? size : ret;
392}
393EXPORT_SYMBOL(acpi_aml_read_cmd);
394
395static int acpi_aml_thread(void *unsed)
396{
397 acpi_osd_exec_callback function = NULL;
398 void *context;
399
400 mutex_lock(&acpi_aml_io.lock);
401 if (acpi_aml_io.function) {
402 acpi_aml_io.usages++;
403 function = acpi_aml_io.function;
404 context = acpi_aml_io.context;
405 }
406 mutex_unlock(&acpi_aml_io.lock);
407
408 if (function)
409 function(context);
410
411 mutex_lock(&acpi_aml_io.lock);
412 acpi_aml_io.usages--;
413 if (!__acpi_aml_used()) {
414 acpi_aml_io.thread = NULL;
415 wake_up(&acpi_aml_io.wait);
416 }
417 mutex_unlock(&acpi_aml_io.lock);
418
419 return 0;
420}
421
422/*
423 * acpi_aml_create_thread() - Create AML debugger thread
424 * @function: the debugger thread callback
425 * @context: the context to be passed to the debugger thread
426 *
427 * This function should be used to implement acpi_os_execute() which is
428 * used by the ACPICA debugger to create the debugger thread.
429 */
430int acpi_aml_create_thread(acpi_osd_exec_callback function, void *context)
431{
432 struct task_struct *t;
433
434 mutex_lock(&acpi_aml_io.lock);
435 acpi_aml_io.function = function;
436 acpi_aml_io.context = context;
437 mutex_unlock(&acpi_aml_io.lock);
438
439 t = kthread_create(acpi_aml_thread, NULL, "aml");
440 if (IS_ERR(t)) {
441 pr_err("Failed to create AML debugger thread.\n");
442 return PTR_ERR(t);
443 }
444
445 mutex_lock(&acpi_aml_io.lock);
446 acpi_aml_io.thread = t;
447 acpi_set_debugger_thread_id((acpi_thread_id)(unsigned long)t);
448 wake_up_process(t);
449 mutex_unlock(&acpi_aml_io.lock);
450 return 0;
451}
452EXPORT_SYMBOL(acpi_aml_create_thread);
453
454int acpi_aml_wait_command_ready(void)
455{
456 acpi_status status;
457
458 if (!acpi_gbl_method_executing)
459 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_COMMAND_PROMPT);
460 else
461 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_EXECUTE_PROMPT);
462
463 status = acpi_os_get_line(acpi_gbl_db_line_buf,
464 ACPI_DB_LINE_BUFFER_SIZE, NULL);
465 if (ACPI_FAILURE(status))
466 return -EINVAL;
467 return 0;
468}
469EXPORT_SYMBOL(acpi_aml_wait_command_ready);
470
471int acpi_aml_notify_command_complete(void)
472{
473 return 0;
474}
475EXPORT_SYMBOL(acpi_aml_notify_command_complete);
476
477static int acpi_aml_open(struct inode *inode, struct file *file)
478{
479 int ret = 0;
480 acpi_status status;
481
482 mutex_lock(&acpi_aml_io.lock);
483 /*
484 * The debugger interface is being closed, no new user is allowed
485 * during this period.
486 */
487 if (acpi_aml_io.flags & ACPI_AML_CLOSED) {
488 ret = -EBUSY;
489 goto err_lock;
490 }
491 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
492 /*
493 * Only one reader is allowed to initiate the debugger
494 * thread.
495 */
496 if (acpi_aml_active_reader) {
497 ret = -EBUSY;
498 goto err_lock;
499 } else {
500 pr_debug("Opening debugger reader.\n");
501 acpi_aml_active_reader = file;
502 }
503 } else {
504 /*
505 * No writer is allowed unless the debugger thread is
506 * ready.
507 */
508 if (!(acpi_aml_io.flags & ACPI_AML_OPENED)) {
509 ret = -ENODEV;
510 goto err_lock;
511 }
512 }
513 if (acpi_aml_active_reader == file) {
514 pr_debug("Opening debugger interface.\n");
515 mutex_unlock(&acpi_aml_io.lock);
516
517 pr_debug("Initializing debugger thread.\n");
518 status = acpi_initialize_debugger();
519 if (ACPI_FAILURE(status)) {
520 pr_err("Failed to initialize debugger.\n");
521 ret = -EINVAL;
522 goto err_lock;
523 }
524 acpi_aml_io.flags |= ACPI_AML_OPENED;
525 pr_debug("Debugger thread initialized.\n");
526
527 mutex_lock(&acpi_aml_io.lock);
528 acpi_aml_io.out_crc.head = acpi_aml_io.out_crc.tail = 0;
529 acpi_aml_io.in_crc.head = acpi_aml_io.in_crc.tail = 0;
530 pr_debug("Debugger interface opened.\n");
531 }
532 acpi_aml_io.users++;
533err_lock:
534 if (IS_ERR_VALUE(ret)) {
535 if (acpi_aml_active_reader == file)
536 acpi_aml_active_reader = NULL;
537 }
538 mutex_unlock(&acpi_aml_io.lock);
539 return ret;
540}
541
542static int acpi_aml_release(struct inode *inode, struct file *file)
543{
544 mutex_lock(&acpi_aml_io.lock);
545 acpi_aml_io.users--;
546 if (file == acpi_aml_active_reader) {
547 pr_debug("Closing debugger reader.\n");
548 acpi_aml_active_reader = NULL;
549
550 pr_debug("Closing debugger interface.\n");
551 acpi_aml_io.flags |= ACPI_AML_CLOSED;
552
553 /*
554 * Wake up all user space/kernel space blocked
555 * readers/writers.
556 */
557 wake_up_interruptible(&acpi_aml_io.wait);
558 mutex_unlock(&acpi_aml_io.lock);
559 /*
560 * Wait all user space/kernel space readers/writers to
561 * stop so that ACPICA command loop of the debugger thread
562 * should fail all its command line reads after this point.
563 */
564 wait_event(acpi_aml_io.wait, !acpi_aml_busy());
565
566 /*
567 * Then we try to terminate the debugger thread if it is
568 * not terminated.
569 */
570 pr_debug("Terminating debugger thread.\n");
571 acpi_terminate_debugger();
572 wait_event(acpi_aml_io.wait, !acpi_aml_used());
573 pr_debug("Debugger thread terminated.\n");
574
575 mutex_lock(&acpi_aml_io.lock);
576 acpi_aml_io.flags &= ~ACPI_AML_OPENED;
577 }
578 if (acpi_aml_io.users == 0) {
579 pr_debug("Debugger interface closed.\n");
580 acpi_aml_io.flags &= ~ACPI_AML_CLOSED;
581 }
582 mutex_unlock(&acpi_aml_io.lock);
583 return 0;
584}
585
586static int acpi_aml_read_user(char __user *buf, int len)
587{
588 int ret;
589 struct circ_buf *crc = &acpi_aml_io.out_crc;
590 int n;
591 char *p;
592
593 ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
594 if (IS_ERR_VALUE(ret))
595 return ret;
596 /* sync head before removing logs */
597 smp_rmb();
598 p = &crc->buf[crc->tail];
599 n = min(len, circ_count_to_end(crc));
600 ret = copy_to_user(buf, p, n);
601 if (IS_ERR_VALUE(ret))
602 goto out;
603 /* sync tail after removing logs */
604 smp_mb();
605 crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
606 ret = n;
607out:
608 acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !IS_ERR_VALUE(ret));
609 return ret;
610}
611
612static ssize_t acpi_aml_read(struct file *file, char __user *buf,
613 size_t count, loff_t *ppos)
614{
615 int ret = 0;
616 int size = 0;
617
618 if (!buf || count < 0)
619 return -EINVAL;
620 if (!count)
621 return 0;
622 if (!access_ok(VERIFY_WRITE, buf, count))
623 return -EFAULT;
624
625 while (count > 0) {
626again:
627 ret = acpi_aml_read_user(buf + size, count);
628 if (ret == -EAGAIN) {
629 if (file->f_flags & O_NONBLOCK)
630 break;
631 else {
632 ret = wait_event_interruptible(acpi_aml_io.wait,
633 acpi_aml_user_readable());
634 /*
635 * We need to retry when the condition
636 * becomes true.
637 */
638 if (ret == 0)
639 goto again;
640 }
641 }
642 if (IS_ERR_VALUE(ret)) {
643 if (!acpi_aml_running())
644 ret = 0;
645 break;
646 }
647 if (ret) {
648 size += ret;
649 count -= ret;
650 *ppos += ret;
651 break;
652 }
653 }
654 return size > 0 ? size : ret;
655}
656
657static int acpi_aml_write_user(const char __user *buf, int len)
658{
659 int ret;
660 struct circ_buf *crc = &acpi_aml_io.in_crc;
661 int n;
662 char *p;
663
664 ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
665 if (IS_ERR_VALUE(ret))
666 return ret;
667 /* sync tail before inserting cmds */
668 smp_mb();
669 p = &crc->buf[crc->head];
670 n = min(len, circ_space_to_end(crc));
671 ret = copy_from_user(p, buf, n);
672 if (IS_ERR_VALUE(ret))
673 goto out;
674 /* sync head after inserting cmds */
675 smp_wmb();
676 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
677 ret = n;
678out:
679 acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !IS_ERR_VALUE(ret));
680 return n;
681}
682
683static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
684 size_t count, loff_t *ppos)
685{
686 int ret = 0;
687 int size = 0;
688
689 if (!buf || count < 0)
690 return -EINVAL;
691 if (!count)
692 return 0;
693 if (!access_ok(VERIFY_READ, buf, count))
694 return -EFAULT;
695
696 while (count > 0) {
697again:
698 ret = acpi_aml_write_user(buf + size, count);
699 if (ret == -EAGAIN) {
700 if (file->f_flags & O_NONBLOCK)
701 break;
702 else {
703 ret = wait_event_interruptible(acpi_aml_io.wait,
704 acpi_aml_user_writable());
705 /*
706 * We need to retry when the condition
707 * becomes true.
708 */
709 if (ret == 0)
710 goto again;
711 }
712 }
713 if (IS_ERR_VALUE(ret)) {
714 if (!acpi_aml_running())
715 ret = 0;
716 break;
717 }
718 if (ret) {
719 size += ret;
720 count -= ret;
721 *ppos += ret;
722 }
723 }
724 return size > 0 ? size : ret;
725}
726
727static unsigned int acpi_aml_poll(struct file *file, poll_table *wait)
728{
729 int masks = 0;
730
731 poll_wait(file, &acpi_aml_io.wait, wait);
732 if (acpi_aml_user_readable())
733 masks |= POLLIN | POLLRDNORM;
734 if (acpi_aml_user_writable())
735 masks |= POLLOUT | POLLWRNORM;
736
737 return masks;
738}
739
740static const struct file_operations acpi_aml_operations = {
741 .read = acpi_aml_read,
742 .write = acpi_aml_write,
743 .poll = acpi_aml_poll,
744 .open = acpi_aml_open,
745 .release = acpi_aml_release,
746 .llseek = generic_file_llseek,
747};
748
749int __init acpi_aml_init(void)
750{
751 if (!acpi_debugfs_dir)
752 return -ENOENT;
753 /* Initialize AML IO interface */
754 mutex_init(&acpi_aml_io.lock);
755 init_waitqueue_head(&acpi_aml_io.wait);
756 acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf;
757 acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf;
758 acpi_aml_dentry = debugfs_create_file("acpidbg",
759 S_IFREG | S_IRUGO | S_IWUSR,
760 acpi_debugfs_dir, NULL,
761 &acpi_aml_operations);
762 if (acpi_aml_dentry == NULL)
763 return -ENODEV;
764 acpi_aml_initialized = true;
765 return 0;
766}
767
768#if 0
769void __exit acpi_aml_exit(void)
770{
771 /* TODO: Stop the in kernel debugger */
772 if (acpi_aml_dentry)
773 debugfs_remove(acpi_aml_dentry);
774 acpi_aml_initialized = false;
775}
776
777module_init(acpi_aml_init);
778module_exit(acpi_aml_exit);
779#endif