Commit | Line | Data |
---|---|---|
c3c4e307 | 1 | // SPDX-License-Identifier: GPL-2.0 |
c89f2750 | 2 | /* |
c89f2750 DDT |
3 | * Copyright (C) 2012 Intel, Inc. |
4 | * Copyright (C) 2013 Intel, Inc. | |
2f3be882 | 5 | * Copyright (C) 2014 Linaro Limited |
726ea1a8 | 6 | * Copyright (C) 2011-2016 Google, Inc. |
c89f2750 DDT |
7 | * |
8 | * This software is licensed under the terms of the GNU General Public | |
9 | * License version 2, as published by the Free Software Foundation, and | |
10 | * may be copied, distributed, and modified under those terms. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | */ | |
18 | ||
19 | /* This source file contains the implementation of a special device driver | |
20 | * that intends to provide a *very* fast communication channel between the | |
21 | * guest system and the QEMU emulator. | |
22 | * | |
23 | * Usage from the guest is simply the following (error handling simplified): | |
24 | * | |
25 | * int fd = open("/dev/qemu_pipe",O_RDWR); | |
26 | * .... write() or read() through the pipe. | |
27 | * | |
28 | * This driver doesn't deal with the exact protocol used during the session. | |
29 | * It is intended to be as simple as something like: | |
30 | * | |
31 | * // do this _just_ after opening the fd to connect to a specific | |
32 | * // emulator service. | |
33 | * const char* msg = "<pipename>"; | |
34 | * if (write(fd, msg, strlen(msg)+1) < 0) { | |
35 | * ... could not connect to <pipename> service | |
36 | * close(fd); | |
37 | * } | |
38 | * | |
39 | * // after this, simply read() and write() to communicate with the | |
40 | * // service. Exact protocol details left as an exercise to the reader. | |
41 | * | |
42 | * This driver is very fast because it doesn't copy any data through | |
43 | * intermediate buffers, since the emulator is capable of translating | |
44 | * guest user addresses into host ones. | |
45 | * | |
46 | * Note that we must however ensure that each user page involved in the | |
47 | * exchange is properly mapped during a transfer. | |
48 | */ | |
49 | ||
50 | #include <linux/module.h> | |
ac316725 | 51 | #include <linux/mod_devicetable.h> |
c89f2750 DDT |
52 | #include <linux/interrupt.h> |
53 | #include <linux/kernel.h> | |
54 | #include <linux/spinlock.h> | |
55 | #include <linux/miscdevice.h> | |
56 | #include <linux/platform_device.h> | |
57 | #include <linux/poll.h> | |
58 | #include <linux/sched.h> | |
59 | #include <linux/bitops.h> | |
60 | #include <linux/slab.h> | |
61 | #include <linux/io.h> | |
1d427da1 | 62 | #include <linux/dma-mapping.h> |
2f3be882 | 63 | #include <linux/mm.h> |
d62f324b | 64 | #include <linux/acpi.h> |
d23069a5 | 65 | #include <linux/bug.h> |
95577010 | 66 | #include "goldfish_pipe_qemu.h" |
c89f2750 | 67 | |
726ea1a8 JQ |
68 | /* |
69 | * Update this when something changes in the driver's behavior so the host | |
70 | * can benefit from knowing it | |
71 | */ | |
72 | enum { | |
73 | PIPE_DRIVER_VERSION = 2, | |
74 | PIPE_CURRENT_DEVICE_VERSION = 2 | |
75 | }; | |
76 | ||
726ea1a8 JQ |
77 | enum { |
78 | MAX_BUFFERS_PER_COMMAND = 336, | |
79 | MAX_SIGNALLED_PIPES = 64, | |
80 | INITIAL_PIPES_CAPACITY = 64 | |
81 | }; | |
82 | ||
83 | struct goldfish_pipe_dev; | |
726ea1a8 JQ |
84 | |
85 | /* A per-pipe command structure, shared with the host */ | |
86 | struct goldfish_pipe_command { | |
ed824215 RK |
87 | s32 cmd; /* PipeCmdCode, guest -> host */ |
88 | s32 id; /* pipe id, guest -> host */ | |
89 | s32 status; /* command execution status, host -> guest */ | |
726ea1a8 JQ |
90 | s32 reserved; /* to pad to 64-bit boundary */ |
91 | union { | |
92 | /* Parameters for PIPE_CMD_{READ,WRITE} */ | |
93 | struct { | |
94 | /* number of buffers, guest -> host */ | |
95 | u32 buffers_count; | |
96 | /* number of consumed bytes, host -> guest */ | |
97 | s32 consumed_size; | |
98 | /* buffer pointers, guest -> host */ | |
99 | u64 ptrs[MAX_BUFFERS_PER_COMMAND]; | |
100 | /* buffer sizes, guest -> host */ | |
101 | u32 sizes[MAX_BUFFERS_PER_COMMAND]; | |
102 | } rw_params; | |
103 | }; | |
104 | }; | |
105 | ||
106 | /* A single signalled pipe information */ | |
107 | struct signalled_pipe_buffer { | |
108 | u32 id; | |
c89f2750 DDT |
109 | u32 flags; |
110 | }; | |
111 | ||
726ea1a8 JQ |
112 | /* Parameters for the PIPE_CMD_OPEN command */ |
113 | struct open_command_param { | |
114 | u64 command_buffer_ptr; | |
115 | u32 rw_params_max_count; | |
c89f2750 DDT |
116 | }; |
117 | ||
726ea1a8 JQ |
118 | /* Device-level set of buffers shared with the host */ |
119 | struct goldfish_pipe_dev_buffers { | |
120 | struct open_command_param open_command_params; | |
562a74de RK |
121 | struct signalled_pipe_buffer |
122 | signalled_pipe_buffers[MAX_SIGNALLED_PIPES]; | |
726ea1a8 | 123 | }; |
c89f2750 DDT |
124 | |
125 | /* This data type models a given pipe instance */ | |
126 | struct goldfish_pipe { | |
726ea1a8 JQ |
127 | /* pipe ID - index into goldfish_pipe_dev::pipes array */ |
128 | u32 id; | |
46928cc6 | 129 | |
726ea1a8 JQ |
130 | /* The wake flags pipe is waiting for |
131 | * Note: not protected with any lock, uses atomic operations | |
132 | * and barriers to make it thread-safe. | |
133 | */ | |
c89f2750 | 134 | unsigned long flags; |
46928cc6 | 135 | |
726ea1a8 JQ |
136 | /* wake flags host have signalled, |
137 | * - protected by goldfish_pipe_dev::lock | |
138 | */ | |
139 | unsigned long signalled_flags; | |
140 | ||
141 | /* A pointer to command buffer */ | |
142 | struct goldfish_pipe_command *command_buffer; | |
143 | ||
144 | /* doubly linked list of signalled pipes, protected by | |
145 | * goldfish_pipe_dev::lock | |
146 | */ | |
147 | struct goldfish_pipe *prev_signalled; | |
148 | struct goldfish_pipe *next_signalled; | |
149 | ||
150 | /* | |
151 | * A pipe's own lock. Protects the following: | |
152 | * - *command_buffer - makes sure a command can safely write its | |
153 | * parameters to the host and read the results back. | |
154 | */ | |
155 | struct mutex lock; | |
156 | ||
157 | /* A wake queue for sleeping until host signals an event */ | |
c89f2750 | 158 | wait_queue_head_t wake_queue; |
46928cc6 | 159 | |
726ea1a8 JQ |
160 | /* Pointer to the parent goldfish_pipe_dev instance */ |
161 | struct goldfish_pipe_dev *dev; | |
48a2d422 RK |
162 | |
163 | /* A buffer of pages, too large to fit into a stack frame */ | |
164 | struct page *pages[MAX_BUFFERS_PER_COMMAND]; | |
c89f2750 DDT |
165 | }; |
166 | ||
726ea1a8 JQ |
167 | /* The global driver data. Holds a reference to the i/o page used to |
168 | * communicate with the emulator, and a wake queue for blocked tasks | |
169 | * waiting to be awoken. | |
170 | */ | |
171 | struct goldfish_pipe_dev { | |
172 | /* | |
173 | * Global device spinlock. Protects the following members: | |
174 | * - pipes, pipes_capacity | |
175 | * - [*pipes, *pipes + pipes_capacity) - array data | |
176 | * - first_signalled_pipe, | |
177 | * goldfish_pipe::prev_signalled, | |
178 | * goldfish_pipe::next_signalled, | |
179 | * goldfish_pipe::signalled_flags - all singnalled-related fields, | |
180 | * in all allocated pipes | |
181 | * - open_command_params - PIPE_CMD_OPEN-related buffers | |
182 | * | |
183 | * It looks like a lot of different fields, but the trick is that | |
184 | * the only operation that happens often is the signalled pipes array | |
185 | * manipulation. That's why it's OK for now to keep the rest of the | |
186 | * fields under the same lock. If we notice too much contention because | |
187 | * of PIPE_CMD_OPEN, then we should add a separate lock there. | |
188 | */ | |
189 | spinlock_t lock; | |
c89f2750 | 190 | |
726ea1a8 JQ |
191 | /* |
192 | * Array of the pipes of |pipes_capacity| elements, | |
193 | * indexed by goldfish_pipe::id | |
194 | */ | |
195 | struct goldfish_pipe **pipes; | |
196 | u32 pipes_capacity; | |
197 | ||
198 | /* Pointers to the buffers host uses for interaction with this driver */ | |
199 | struct goldfish_pipe_dev_buffers *buffers; | |
200 | ||
201 | /* Head of a doubly linked list of signalled pipes */ | |
202 | struct goldfish_pipe *first_signalled_pipe; | |
203 | ||
25b97d57 RK |
204 | /* ptr to platform device's device struct */ |
205 | struct device *pdev_dev; | |
206 | ||
726ea1a8 JQ |
207 | /* Some device-specific data */ |
208 | int irq; | |
209 | int version; | |
210 | unsigned char __iomem *base; | |
c89f2750 DDT |
211 | }; |
212 | ||
3790e28c | 213 | static struct goldfish_pipe_dev goldfish_pipe_dev; |
c89f2750 | 214 | |
92c320b9 RK |
215 | static int goldfish_pipe_cmd_locked(struct goldfish_pipe *pipe, |
216 | enum PipeCmdCode cmd) | |
a99698fa | 217 | { |
726ea1a8 JQ |
218 | pipe->command_buffer->cmd = cmd; |
219 | /* failure by default */ | |
220 | pipe->command_buffer->status = PIPE_ERROR_INVAL; | |
221 | writel(pipe->id, pipe->dev->base + PIPE_REG_CMD); | |
222 | return pipe->command_buffer->status; | |
c89f2750 DDT |
223 | } |
224 | ||
92c320b9 | 225 | static int goldfish_pipe_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) |
a99698fa | 226 | { |
726ea1a8 | 227 | int status; |
c89f2750 | 228 | |
726ea1a8 JQ |
229 | if (mutex_lock_interruptible(&pipe->lock)) |
230 | return PIPE_ERROR_IO; | |
92c320b9 | 231 | status = goldfish_pipe_cmd_locked(pipe, cmd); |
726ea1a8 JQ |
232 | mutex_unlock(&pipe->lock); |
233 | return status; | |
c89f2750 DDT |
234 | } |
235 | ||
726ea1a8 JQ |
236 | /* |
237 | * This function converts an error code returned by the emulator through | |
c89f2750 DDT |
238 | * the PIPE_REG_STATUS i/o register into a valid negative errno value. |
239 | */ | |
240 | static int goldfish_pipe_error_convert(int status) | |
241 | { | |
242 | switch (status) { | |
243 | case PIPE_ERROR_AGAIN: | |
244 | return -EAGAIN; | |
245 | case PIPE_ERROR_NOMEM: | |
246 | return -ENOMEM; | |
247 | case PIPE_ERROR_IO: | |
248 | return -EIO; | |
249 | default: | |
250 | return -EINVAL; | |
251 | } | |
252 | } | |
253 | ||
52bcc7d9 RK |
254 | static int pin_user_pages(unsigned long first_page, |
255 | unsigned long last_page, | |
256 | unsigned int last_page_size, | |
257 | int is_write, | |
258 | struct page *pages[MAX_BUFFERS_PER_COMMAND], | |
259 | unsigned int *iter_last_page_size) | |
c89f2750 | 260 | { |
726ea1a8 JQ |
261 | int ret; |
262 | int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1; | |
263 | ||
264 | if (requested_pages > MAX_BUFFERS_PER_COMMAND) { | |
265 | requested_pages = MAX_BUFFERS_PER_COMMAND; | |
266 | *iter_last_page_size = PAGE_SIZE; | |
267 | } else { | |
268 | *iter_last_page_size = last_page_size; | |
269 | } | |
270 | ||
52bcc7d9 RK |
271 | ret = get_user_pages_fast(first_page, requested_pages, !is_write, |
272 | pages); | |
726ea1a8 JQ |
273 | if (ret <= 0) |
274 | return -EFAULT; | |
275 | if (ret < requested_pages) | |
276 | *iter_last_page_size = PAGE_SIZE; | |
c89f2750 | 277 | |
1d1021a0 | 278 | return ret; |
c89f2750 DDT |
279 | } |
280 | ||
726ea1a8 | 281 | static void release_user_pages(struct page **pages, int pages_count, |
52bcc7d9 | 282 | int is_write, s32 consumed_size) |
c89f2750 | 283 | { |
726ea1a8 | 284 | int i; |
c89f2750 | 285 | |
726ea1a8 JQ |
286 | for (i = 0; i < pages_count; i++) { |
287 | if (!is_write && consumed_size > 0) | |
288 | set_page_dirty(pages[i]); | |
289 | put_page(pages[i]); | |
290 | } | |
291 | } | |
292 | ||
293 | /* Populate the call parameters, merging adjacent pages together */ | |
52bcc7d9 RK |
294 | static void populate_rw_params(struct page **pages, |
295 | int pages_count, | |
296 | unsigned long address, | |
297 | unsigned long address_end, | |
298 | unsigned long first_page, | |
299 | unsigned long last_page, | |
300 | unsigned int iter_last_page_size, | |
301 | int is_write, | |
302 | struct goldfish_pipe_command *command) | |
726ea1a8 JQ |
303 | { |
304 | /* | |
305 | * Process the first page separately - it's the only page that | |
306 | * needs special handling for its start address. | |
307 | */ | |
308 | unsigned long xaddr = page_to_phys(pages[0]); | |
309 | unsigned long xaddr_prev = xaddr; | |
310 | int buffer_idx = 0; | |
311 | int i = 1; | |
312 | int size_on_page = first_page == last_page | |
313 | ? (int)(address_end - address) | |
314 | : (PAGE_SIZE - (address & ~PAGE_MASK)); | |
315 | command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK)); | |
316 | command->rw_params.sizes[0] = size_on_page; | |
317 | for (; i < pages_count; ++i) { | |
318 | xaddr = page_to_phys(pages[i]); | |
319 | size_on_page = (i == pages_count - 1) ? | |
320 | iter_last_page_size : PAGE_SIZE; | |
321 | if (xaddr == xaddr_prev + PAGE_SIZE) { | |
322 | command->rw_params.sizes[buffer_idx] += size_on_page; | |
323 | } else { | |
324 | ++buffer_idx; | |
325 | command->rw_params.ptrs[buffer_idx] = (u64)xaddr; | |
326 | command->rw_params.sizes[buffer_idx] = size_on_page; | |
327 | } | |
328 | xaddr_prev = xaddr; | |
329 | } | |
330 | command->rw_params.buffers_count = buffer_idx + 1; | |
331 | } | |
c89f2750 | 332 | |
726ea1a8 | 333 | static int transfer_max_buffers(struct goldfish_pipe *pipe, |
52bcc7d9 RK |
334 | unsigned long address, |
335 | unsigned long address_end, | |
336 | int is_write, | |
337 | unsigned long last_page, | |
338 | unsigned int last_page_size, | |
339 | s32 *consumed_size, | |
340 | int *status) | |
726ea1a8 | 341 | { |
726ea1a8 JQ |
342 | unsigned long first_page = address & PAGE_MASK; |
343 | unsigned int iter_last_page_size; | |
48a2d422 | 344 | int pages_count; |
726ea1a8 JQ |
345 | |
346 | /* Serialize access to the pipe command buffers */ | |
347 | if (mutex_lock_interruptible(&pipe->lock)) | |
348 | return -ERESTARTSYS; | |
349 | ||
48a2d422 RK |
350 | pages_count = pin_user_pages(first_page, last_page, |
351 | last_page_size, is_write, | |
352 | pipe->pages, &iter_last_page_size); | |
353 | if (pages_count < 0) { | |
354 | mutex_unlock(&pipe->lock); | |
355 | return pages_count; | |
356 | } | |
357 | ||
358 | populate_rw_params(pipe->pages, pages_count, address, address_end, | |
52bcc7d9 RK |
359 | first_page, last_page, iter_last_page_size, is_write, |
360 | pipe->command_buffer); | |
726ea1a8 JQ |
361 | |
362 | /* Transfer the data */ | |
92c320b9 | 363 | *status = goldfish_pipe_cmd_locked(pipe, |
726ea1a8 JQ |
364 | is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ); |
365 | ||
366 | *consumed_size = pipe->command_buffer->rw_params.consumed_size; | |
367 | ||
48a2d422 | 368 | release_user_pages(pipe->pages, pages_count, is_write, *consumed_size); |
726ea1a8 | 369 | |
f563dab4 | 370 | mutex_unlock(&pipe->lock); |
726ea1a8 | 371 | return 0; |
c89f2750 DDT |
372 | } |
373 | ||
726ea1a8 | 374 | static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write) |
c89f2750 | 375 | { |
61b38f02 | 376 | u32 wake_bit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ; |
c89f2750 | 377 | |
61b38f02 | 378 | set_bit(wake_bit, &pipe->flags); |
726ea1a8 JQ |
379 | |
380 | /* Tell the emulator we're going to wait for a wake event */ | |
92c320b9 | 381 | goldfish_pipe_cmd(pipe, |
726ea1a8 JQ |
382 | is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ); |
383 | ||
61b38f02 | 384 | while (test_bit(wake_bit, &pipe->flags)) { |
562a74de | 385 | if (wait_event_interruptible(pipe->wake_queue, |
52bcc7d9 | 386 | !test_bit(wake_bit, &pipe->flags))) |
726ea1a8 JQ |
387 | return -ERESTARTSYS; |
388 | ||
389 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) | |
390 | return -EIO; | |
391 | } | |
c89f2750 | 392 | |
c89f2750 DDT |
393 | return 0; |
394 | } | |
395 | ||
726ea1a8 | 396 | static ssize_t goldfish_pipe_read_write(struct file *filp, |
52bcc7d9 RK |
397 | char __user *buffer, |
398 | size_t bufflen, | |
399 | int is_write) | |
c89f2750 | 400 | { |
c89f2750 | 401 | struct goldfish_pipe *pipe = filp->private_data; |
2f3be882 | 402 | int count = 0, ret = -EINVAL; |
726ea1a8 JQ |
403 | unsigned long address, address_end, last_page; |
404 | unsigned int last_page_size; | |
c89f2750 DDT |
405 | |
406 | /* If the emulator already closed the pipe, no need to go further */ | |
726ea1a8 | 407 | if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))) |
c89f2750 | 408 | return -EIO; |
c89f2750 | 409 | /* Null reads or writes succeeds */ |
3411d035 | 410 | if (unlikely(bufflen == 0)) |
c89f2750 | 411 | return 0; |
c89f2750 | 412 | /* Check the buffer range for access */ |
726ea1a8 | 413 | if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ, |
52bcc7d9 | 414 | buffer, bufflen))) |
c89f2750 DDT |
415 | return -EFAULT; |
416 | ||
726ea1a8 | 417 | address = (unsigned long)buffer; |
c89f2750 | 418 | address_end = address + bufflen; |
726ea1a8 JQ |
419 | last_page = (address_end - 1) & PAGE_MASK; |
420 | last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1; | |
c89f2750 DDT |
421 | |
422 | while (address < address_end) { | |
726ea1a8 JQ |
423 | s32 consumed_size; |
424 | int status; | |
4f42071c | 425 | |
726ea1a8 | 426 | ret = transfer_max_buffers(pipe, address, address_end, is_write, |
52bcc7d9 RK |
427 | last_page, last_page_size, |
428 | &consumed_size, &status); | |
2f3be882 | 429 | if (ret < 0) |
5fae054c | 430 | break; |
c89f2750 | 431 | |
726ea1a8 JQ |
432 | if (consumed_size > 0) { |
433 | /* No matter what's the status, we've transferred | |
434 | * something. | |
4f42071c | 435 | */ |
726ea1a8 JQ |
436 | count += consumed_size; |
437 | address += consumed_size; | |
c89f2750 | 438 | } |
726ea1a8 | 439 | if (status > 0) |
c89f2750 | 440 | continue; |
726ea1a8 JQ |
441 | if (status == 0) { |
442 | /* EOF */ | |
2f3be882 | 443 | ret = 0; |
c89f2750 | 444 | break; |
726ea1a8 JQ |
445 | } |
446 | if (count > 0) { | |
2f3be882 | 447 | /* |
726ea1a8 JQ |
448 | * An error occurred, but we already transferred |
449 | * something on one of the previous iterations. | |
2f3be882 CD |
450 | * Just return what we already copied and log this |
451 | * err. | |
2f3be882 | 452 | */ |
25dd0f40 | 453 | if (status != PIPE_ERROR_AGAIN) |
25b97d57 RK |
454 | dev_err_ratelimited(pipe->dev->pdev_dev, |
455 | "backend error %d on %s\n", | |
2f3be882 | 456 | status, is_write ? "write" : "read"); |
c89f2750 | 457 | break; |
2f3be882 | 458 | } |
c89f2750 | 459 | |
2f3be882 | 460 | /* |
726ea1a8 | 461 | * If the error is not PIPE_ERROR_AGAIN, or if we are in |
2f3be882 CD |
462 | * non-blocking mode, just return the error code. |
463 | */ | |
c89f2750 DDT |
464 | if (status != PIPE_ERROR_AGAIN || |
465 | (filp->f_flags & O_NONBLOCK) != 0) { | |
466 | ret = goldfish_pipe_error_convert(status); | |
467 | break; | |
468 | } | |
469 | ||
726ea1a8 JQ |
470 | status = wait_for_host_signal(pipe, is_write); |
471 | if (status < 0) | |
472 | return status; | |
c89f2750 | 473 | } |
2f3be882 | 474 | |
726ea1a8 | 475 | if (count > 0) |
2f3be882 | 476 | return count; |
726ea1a8 | 477 | return ret; |
c89f2750 DDT |
478 | } |
479 | ||
480 | static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, | |
52bcc7d9 | 481 | size_t bufflen, loff_t *ppos) |
c89f2750 | 482 | { |
726ea1a8 | 483 | return goldfish_pipe_read_write(filp, buffer, bufflen, |
52bcc7d9 | 484 | /* is_write */ 0); |
c89f2750 DDT |
485 | } |
486 | ||
487 | static ssize_t goldfish_pipe_write(struct file *filp, | |
52bcc7d9 RK |
488 | const char __user *buffer, size_t bufflen, |
489 | loff_t *ppos) | |
c89f2750 | 490 | { |
52bcc7d9 RK |
491 | /* cast away the const */ |
492 | char __user *no_const_buffer = (char __user *)buffer; | |
493 | ||
494 | return goldfish_pipe_read_write(filp, no_const_buffer, bufflen, | |
495 | /* is_write */ 1); | |
c89f2750 DDT |
496 | } |
497 | ||
afc9a42b | 498 | static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait) |
c89f2750 DDT |
499 | { |
500 | struct goldfish_pipe *pipe = filp->private_data; | |
afc9a42b | 501 | __poll_t mask = 0; |
c89f2750 DDT |
502 | int status; |
503 | ||
c89f2750 DDT |
504 | poll_wait(filp, &pipe->wake_queue, wait); |
505 | ||
92c320b9 | 506 | status = goldfish_pipe_cmd(pipe, PIPE_CMD_POLL); |
726ea1a8 JQ |
507 | if (status < 0) |
508 | return -ERESTARTSYS; | |
c89f2750 DDT |
509 | |
510 | if (status & PIPE_POLL_IN) | |
a9a08845 | 511 | mask |= EPOLLIN | EPOLLRDNORM; |
c89f2750 | 512 | if (status & PIPE_POLL_OUT) |
a9a08845 | 513 | mask |= EPOLLOUT | EPOLLWRNORM; |
c89f2750 | 514 | if (status & PIPE_POLL_HUP) |
a9a08845 | 515 | mask |= EPOLLHUP; |
c89f2750 | 516 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) |
a9a08845 | 517 | mask |= EPOLLERR; |
c89f2750 DDT |
518 | |
519 | return mask; | |
520 | } | |
521 | ||
726ea1a8 | 522 | static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev, |
52bcc7d9 | 523 | u32 id, u32 flags) |
c89f2750 | 524 | { |
726ea1a8 | 525 | struct goldfish_pipe *pipe; |
c89f2750 | 526 | |
726ea1a8 JQ |
527 | if (WARN_ON(id >= dev->pipes_capacity)) |
528 | return; | |
529 | ||
530 | pipe = dev->pipes[id]; | |
531 | if (!pipe) | |
532 | return; | |
533 | pipe->signalled_flags |= flags; | |
534 | ||
cc14057f RK |
535 | if (pipe->prev_signalled || pipe->next_signalled || |
536 | dev->first_signalled_pipe == pipe) | |
726ea1a8 JQ |
537 | return; /* already in the list */ |
538 | pipe->next_signalled = dev->first_signalled_pipe; | |
539 | if (dev->first_signalled_pipe) | |
540 | dev->first_signalled_pipe->prev_signalled = pipe; | |
541 | dev->first_signalled_pipe = pipe; | |
542 | } | |
49a75c44 | 543 | |
726ea1a8 | 544 | static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev, |
52bcc7d9 | 545 | struct goldfish_pipe *pipe) |
53bdf668 | 546 | { |
726ea1a8 JQ |
547 | if (pipe->prev_signalled) |
548 | pipe->prev_signalled->next_signalled = pipe->next_signalled; | |
549 | if (pipe->next_signalled) | |
550 | pipe->next_signalled->prev_signalled = pipe->prev_signalled; | |
551 | if (pipe == dev->first_signalled_pipe) | |
552 | dev->first_signalled_pipe = pipe->next_signalled; | |
553 | pipe->prev_signalled = NULL; | |
554 | pipe->next_signalled = NULL; | |
555 | } | |
25c72c78 | 556 | |
726ea1a8 JQ |
557 | static struct goldfish_pipe *signalled_pipes_pop_front( |
558 | struct goldfish_pipe_dev *dev, int *wakes) | |
559 | { | |
560 | struct goldfish_pipe *pipe; | |
561 | unsigned long flags; | |
c89f2750 | 562 | |
726ea1a8 | 563 | spin_lock_irqsave(&dev->lock, flags); |
c89f2750 | 564 | |
726ea1a8 JQ |
565 | pipe = dev->first_signalled_pipe; |
566 | if (pipe) { | |
567 | *wakes = pipe->signalled_flags; | |
568 | pipe->signalled_flags = 0; | |
569 | /* | |
570 | * This is an optimized version of | |
571 | * signalled_pipes_remove_locked() | |
572 | * - We want to make it as fast as possible to | |
573 | * wake the sleeping pipe operations faster. | |
574 | */ | |
575 | dev->first_signalled_pipe = pipe->next_signalled; | |
576 | if (dev->first_signalled_pipe) | |
577 | dev->first_signalled_pipe->prev_signalled = NULL; | |
578 | pipe->next_signalled = NULL; | |
579 | } | |
c89f2750 | 580 | |
726ea1a8 JQ |
581 | spin_unlock_irqrestore(&dev->lock, flags); |
582 | return pipe; | |
583 | } | |
584 | ||
585 | static void goldfish_interrupt_task(unsigned long unused) | |
586 | { | |
726ea1a8 JQ |
587 | /* Iterate over the signalled pipes and wake them one by one */ |
588 | struct goldfish_pipe *pipe; | |
589 | int wakes; | |
590 | ||
869fd502 RK |
591 | while ((pipe = signalled_pipes_pop_front(&goldfish_pipe_dev, &wakes)) != |
592 | NULL) { | |
c89f2750 | 593 | if (wakes & PIPE_WAKE_CLOSED) { |
726ea1a8 JQ |
594 | pipe->flags = 1 << BIT_CLOSED_ON_HOST; |
595 | } else { | |
596 | if (wakes & PIPE_WAKE_READ) | |
597 | clear_bit(BIT_WAKE_ON_READ, &pipe->flags); | |
598 | if (wakes & PIPE_WAKE_WRITE) | |
599 | clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags); | |
c89f2750 | 600 | } |
726ea1a8 JQ |
601 | /* |
602 | * wake_up_interruptible() implies a write barrier, so don't | |
603 | * explicitly add another one here. | |
604 | */ | |
c89f2750 | 605 | wake_up_interruptible(&pipe->wake_queue); |
c89f2750 | 606 | } |
726ea1a8 | 607 | } |
ea4ba866 | 608 | static DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0); |
c89f2750 | 609 | |
726ea1a8 JQ |
610 | /* |
611 | * The general idea of the interrupt handling: | |
612 | * | |
613 | * 1. device raises an interrupt if there's at least one signalled pipe | |
614 | * 2. IRQ handler reads the signalled pipes and their count from the device | |
615 | * 3. device writes them into a shared buffer and returns the count | |
616 | * it only resets the IRQ if it has returned all signalled pipes, | |
617 | * otherwise it leaves it raised, so IRQ handler will be called | |
618 | * again for the next chunk | |
619 | * 4. IRQ handler adds all returned pipes to the device's signalled pipes list | |
620 | * 5. IRQ handler launches a tasklet to process the signalled pipes from the | |
621 | * list in a separate context | |
622 | */ | |
623 | static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id) | |
624 | { | |
625 | u32 count; | |
626 | u32 i; | |
627 | unsigned long flags; | |
628 | struct goldfish_pipe_dev *dev = dev_id; | |
629 | ||
869fd502 | 630 | if (dev != &goldfish_pipe_dev) |
726ea1a8 JQ |
631 | return IRQ_NONE; |
632 | ||
633 | /* Request the signalled pipes from the device */ | |
634 | spin_lock_irqsave(&dev->lock, flags); | |
635 | ||
636 | count = readl(dev->base + PIPE_REG_GET_SIGNALLED); | |
637 | if (count == 0) { | |
638 | spin_unlock_irqrestore(&dev->lock, flags); | |
639 | return IRQ_NONE; | |
640 | } | |
641 | if (count > MAX_SIGNALLED_PIPES) | |
642 | count = MAX_SIGNALLED_PIPES; | |
643 | ||
644 | for (i = 0; i < count; ++i) | |
645 | signalled_pipes_add_locked(dev, | |
646 | dev->buffers->signalled_pipe_buffers[i].id, | |
647 | dev->buffers->signalled_pipe_buffers[i].flags); | |
648 | ||
649 | spin_unlock_irqrestore(&dev->lock, flags); | |
650 | ||
651 | tasklet_schedule(&goldfish_interrupt_tasklet); | |
652 | return IRQ_HANDLED; | |
653 | } | |
654 | ||
655 | static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev) | |
656 | { | |
657 | int id; | |
658 | ||
659 | for (id = 0; id < dev->pipes_capacity; ++id) | |
660 | if (!dev->pipes[id]) | |
661 | return id; | |
662 | ||
663 | { | |
84ae527a RK |
664 | /* Reallocate the array. |
665 | * Since get_free_pipe_id_locked runs with interrupts disabled, | |
666 | * we don't want to make calls that could lead to sleep. | |
667 | */ | |
726ea1a8 JQ |
668 | u32 new_capacity = 2 * dev->pipes_capacity; |
669 | struct goldfish_pipe **pipes = | |
3eff8ecd | 670 | kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC); |
726ea1a8 JQ |
671 | if (!pipes) |
672 | return -ENOMEM; | |
673 | memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity); | |
674 | kfree(dev->pipes); | |
675 | dev->pipes = pipes; | |
676 | id = dev->pipes_capacity; | |
677 | dev->pipes_capacity = new_capacity; | |
678 | } | |
679 | return id; | |
c89f2750 DDT |
680 | } |
681 | ||
682 | /** | |
726ea1a8 | 683 | * goldfish_pipe_open - open a channel to the AVD |
c89f2750 DDT |
684 | * @inode: inode of device |
685 | * @file: file struct of opener | |
686 | * | |
687 | * Create a new pipe link between the emulator and the use application. | |
688 | * Each new request produces a new pipe. | |
689 | * | |
690 | * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit | |
691 | * right now so this is fine. A move to 64bit will need this addressing | |
692 | */ | |
693 | static int goldfish_pipe_open(struct inode *inode, struct file *file) | |
694 | { | |
869fd502 | 695 | struct goldfish_pipe_dev *dev = &goldfish_pipe_dev; |
726ea1a8 JQ |
696 | unsigned long flags; |
697 | int id; | |
698 | int status; | |
c89f2750 DDT |
699 | |
700 | /* Allocate new pipe kernel object */ | |
726ea1a8 | 701 | struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL); |
bfb8e838 | 702 | if (!pipe) |
c89f2750 DDT |
703 | return -ENOMEM; |
704 | ||
705 | pipe->dev = dev; | |
706 | mutex_init(&pipe->lock); | |
707 | init_waitqueue_head(&pipe->wake_queue); | |
708 | ||
709 | /* | |
726ea1a8 JQ |
710 | * Command buffer needs to be allocated on its own page to make sure |
711 | * it is physically contiguous in host's address space. | |
c89f2750 | 712 | */ |
d23069a5 | 713 | BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE); |
726ea1a8 JQ |
714 | pipe->command_buffer = |
715 | (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL); | |
716 | if (!pipe->command_buffer) { | |
717 | status = -ENOMEM; | |
718 | goto err_pipe; | |
719 | } | |
c89f2750 | 720 | |
726ea1a8 JQ |
721 | spin_lock_irqsave(&dev->lock, flags); |
722 | ||
723 | id = get_free_pipe_id_locked(dev); | |
724 | if (id < 0) { | |
725 | status = id; | |
726 | goto err_id_locked; | |
c89f2750 DDT |
727 | } |
728 | ||
726ea1a8 JQ |
729 | dev->pipes[id] = pipe; |
730 | pipe->id = id; | |
731 | pipe->command_buffer->id = id; | |
732 | ||
733 | /* Now tell the emulator we're opening a new pipe. */ | |
734 | dev->buffers->open_command_params.rw_params_max_count = | |
735 | MAX_BUFFERS_PER_COMMAND; | |
736 | dev->buffers->open_command_params.command_buffer_ptr = | |
737 | (u64)(unsigned long)__pa(pipe->command_buffer); | |
92c320b9 | 738 | status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN); |
726ea1a8 JQ |
739 | spin_unlock_irqrestore(&dev->lock, flags); |
740 | if (status < 0) | |
741 | goto err_cmd; | |
c89f2750 DDT |
742 | /* All is done, save the pipe into the file's private data field */ |
743 | file->private_data = pipe; | |
744 | return 0; | |
726ea1a8 JQ |
745 | |
746 | err_cmd: | |
747 | spin_lock_irqsave(&dev->lock, flags); | |
748 | dev->pipes[id] = NULL; | |
749 | err_id_locked: | |
750 | spin_unlock_irqrestore(&dev->lock, flags); | |
751 | free_page((unsigned long)pipe->command_buffer); | |
752 | err_pipe: | |
753 | kfree(pipe); | |
754 | return status; | |
c89f2750 DDT |
755 | } |
756 | ||
757 | static int goldfish_pipe_release(struct inode *inode, struct file *filp) | |
758 | { | |
726ea1a8 | 759 | unsigned long flags; |
c89f2750 | 760 | struct goldfish_pipe *pipe = filp->private_data; |
726ea1a8 | 761 | struct goldfish_pipe_dev *dev = pipe->dev; |
c89f2750 DDT |
762 | |
763 | /* The guest is closing the channel, so tell the emulator right now */ | |
92c320b9 | 764 | goldfish_pipe_cmd(pipe, PIPE_CMD_CLOSE); |
726ea1a8 JQ |
765 | |
766 | spin_lock_irqsave(&dev->lock, flags); | |
767 | dev->pipes[pipe->id] = NULL; | |
768 | signalled_pipes_remove_locked(dev, pipe); | |
769 | spin_unlock_irqrestore(&dev->lock, flags); | |
770 | ||
c89f2750 | 771 | filp->private_data = NULL; |
726ea1a8 JQ |
772 | free_page((unsigned long)pipe->command_buffer); |
773 | kfree(pipe); | |
c89f2750 DDT |
774 | return 0; |
775 | } | |
776 | ||
777 | static const struct file_operations goldfish_pipe_fops = { | |
778 | .owner = THIS_MODULE, | |
779 | .read = goldfish_pipe_read, | |
780 | .write = goldfish_pipe_write, | |
781 | .poll = goldfish_pipe_poll, | |
782 | .open = goldfish_pipe_open, | |
783 | .release = goldfish_pipe_release, | |
784 | }; | |
785 | ||
869fd502 | 786 | static struct miscdevice goldfish_pipe_miscdev = { |
c89f2750 DDT |
787 | .minor = MISC_DYNAMIC_MINOR, |
788 | .name = "goldfish_pipe", | |
789 | .fops = &goldfish_pipe_fops, | |
790 | }; | |
791 | ||
610a72b7 RK |
792 | static void write_pa_addr(void *addr, void __iomem *portl, void __iomem *porth) |
793 | { | |
794 | const unsigned long paddr = __pa(addr); | |
795 | ||
796 | writel(upper_32_bits(paddr), porth); | |
797 | writel(lower_32_bits(paddr), portl); | |
798 | } | |
799 | ||
726ea1a8 JQ |
800 | static int goldfish_pipe_device_init(struct platform_device *pdev) |
801 | { | |
869fd502 | 802 | struct goldfish_pipe_dev *dev = &goldfish_pipe_dev; |
726ea1a8 JQ |
803 | int err = devm_request_irq(&pdev->dev, dev->irq, |
804 | goldfish_pipe_interrupt, | |
805 | IRQF_SHARED, "goldfish_pipe", dev); | |
806 | if (err) { | |
807 | dev_err(&pdev->dev, "unable to allocate IRQ for v2\n"); | |
808 | return err; | |
809 | } | |
810 | ||
869fd502 | 811 | err = misc_register(&goldfish_pipe_miscdev); |
726ea1a8 JQ |
812 | if (err) { |
813 | dev_err(&pdev->dev, "unable to register v2 device\n"); | |
814 | return err; | |
815 | } | |
816 | ||
25b97d57 | 817 | dev->pdev_dev = &pdev->dev; |
726ea1a8 JQ |
818 | dev->first_signalled_pipe = NULL; |
819 | dev->pipes_capacity = INITIAL_PIPES_CAPACITY; | |
820 | dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), | |
52bcc7d9 | 821 | GFP_KERNEL); |
726ea1a8 JQ |
822 | if (!dev->pipes) |
823 | return -ENOMEM; | |
824 | ||
825 | /* | |
826 | * We're going to pass two buffers, open_command_params and | |
827 | * signalled_pipe_buffers, to the host. This means each of those buffers | |
828 | * needs to be contained in a single physical page. The easiest choice | |
829 | * is to just allocate a page and place the buffers in it. | |
830 | */ | |
d23069a5 | 831 | BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE); |
2ed43e53 RK |
832 | dev->buffers = (struct goldfish_pipe_dev_buffers *) |
833 | __get_free_page(GFP_KERNEL); | |
834 | if (!dev->buffers) { | |
726ea1a8 JQ |
835 | kfree(dev->pipes); |
836 | return -ENOMEM; | |
837 | } | |
726ea1a8 JQ |
838 | |
839 | /* Send the buffer addresses to the host */ | |
610a72b7 RK |
840 | write_pa_addr(&dev->buffers->signalled_pipe_buffers, |
841 | dev->base + PIPE_REG_SIGNAL_BUFFER, | |
842 | dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH); | |
843 | ||
468e62f9 | 844 | writel(MAX_SIGNALLED_PIPES, |
610a72b7 RK |
845 | dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT); |
846 | ||
847 | write_pa_addr(&dev->buffers->open_command_params, | |
848 | dev->base + PIPE_REG_OPEN_BUFFER, | |
849 | dev->base + PIPE_REG_OPEN_BUFFER_HIGH); | |
850 | ||
726ea1a8 JQ |
851 | return 0; |
852 | } | |
853 | ||
854 | static void goldfish_pipe_device_deinit(struct platform_device *pdev) | |
855 | { | |
869fd502 RK |
856 | misc_deregister(&goldfish_pipe_miscdev); |
857 | kfree(goldfish_pipe_dev.pipes); | |
858 | free_page((unsigned long)goldfish_pipe_dev.buffers); | |
726ea1a8 JQ |
859 | } |
860 | ||
c89f2750 DDT |
861 | static int goldfish_pipe_probe(struct platform_device *pdev) |
862 | { | |
863 | int err; | |
864 | struct resource *r; | |
869fd502 | 865 | struct goldfish_pipe_dev *dev = &goldfish_pipe_dev; |
c89f2750 DDT |
866 | |
867 | /* not thread safe, but this should not happen */ | |
bfb8e838 | 868 | WARN_ON(dev->base); |
c89f2750 DDT |
869 | |
870 | spin_lock_init(&dev->lock); | |
871 | ||
872 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
bfb8e838 | 873 | if (!r || resource_size(r) < PAGE_SIZE) { |
c89f2750 DDT |
874 | dev_err(&pdev->dev, "can't allocate i/o page\n"); |
875 | return -EINVAL; | |
876 | } | |
877 | dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); | |
bfb8e838 | 878 | if (!dev->base) { |
c89f2750 DDT |
879 | dev_err(&pdev->dev, "ioremap failed\n"); |
880 | return -EINVAL; | |
881 | } | |
882 | ||
883 | r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
bfb8e838 | 884 | if (!r) { |
c89f2750 DDT |
885 | err = -EINVAL; |
886 | goto error; | |
887 | } | |
888 | dev->irq = r->start; | |
889 | ||
726ea1a8 JQ |
890 | /* |
891 | * Exchange the versions with the host device | |
892 | * | |
893 | * Note: v1 driver used to not report its version, so we write it before | |
894 | * reading device version back: this allows the host implementation to | |
895 | * detect the old driver (if there was no version write before read). | |
4f42071c | 896 | */ |
726ea1a8 | 897 | writel((u32)PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION); |
4f42071c | 898 | dev->version = readl(dev->base + PIPE_REG_VERSION); |
726ea1a8 JQ |
899 | if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION)) |
900 | return -EINVAL; | |
901 | ||
902 | err = goldfish_pipe_device_init(pdev); | |
903 | if (!err) | |
904 | return 0; | |
c89f2750 DDT |
905 | |
906 | error: | |
907 | dev->base = NULL; | |
908 | return err; | |
909 | } | |
910 | ||
911 | static int goldfish_pipe_remove(struct platform_device *pdev) | |
912 | { | |
869fd502 | 913 | struct goldfish_pipe_dev *dev = &goldfish_pipe_dev; |
726ea1a8 | 914 | goldfish_pipe_device_deinit(pdev); |
c89f2750 DDT |
915 | dev->base = NULL; |
916 | return 0; | |
917 | } | |
918 | ||
d62f324b JH |
919 | static const struct acpi_device_id goldfish_pipe_acpi_match[] = { |
920 | { "GFSH0003", 0 }, | |
921 | { }, | |
922 | }; | |
923 | MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match); | |
924 | ||
91a18a41 GH |
925 | static const struct of_device_id goldfish_pipe_of_match[] = { |
926 | { .compatible = "google,android-pipe", }, | |
927 | {}, | |
928 | }; | |
929 | MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match); | |
930 | ||
726ea1a8 | 931 | static struct platform_driver goldfish_pipe_driver = { |
c89f2750 DDT |
932 | .probe = goldfish_pipe_probe, |
933 | .remove = goldfish_pipe_remove, | |
934 | .driver = { | |
91a18a41 | 935 | .name = "goldfish_pipe", |
91a18a41 | 936 | .of_match_table = goldfish_pipe_of_match, |
d62f324b | 937 | .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match), |
c89f2750 DDT |
938 | } |
939 | }; | |
940 | ||
726ea1a8 | 941 | module_platform_driver(goldfish_pipe_driver); |
c89f2750 | 942 | MODULE_AUTHOR("David Turner <digit@google.com>"); |
c3c4e307 | 943 | MODULE_LICENSE("GPL v2"); |