Commit | Line | Data |
---|---|---|
c3c4e307 | 1 | // SPDX-License-Identifier: GPL-2.0 |
c89f2750 | 2 | /* |
c89f2750 DDT |
3 | * Copyright (C) 2012 Intel, Inc. |
4 | * Copyright (C) 2013 Intel, Inc. | |
2f3be882 | 5 | * Copyright (C) 2014 Linaro Limited |
726ea1a8 | 6 | * Copyright (C) 2011-2016 Google, Inc. |
c89f2750 DDT |
7 | * |
8 | * This software is licensed under the terms of the GNU General Public | |
9 | * License version 2, as published by the Free Software Foundation, and | |
10 | * may be copied, distributed, and modified under those terms. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | */ | |
18 | ||
19 | /* This source file contains the implementation of a special device driver | |
20 | * that intends to provide a *very* fast communication channel between the | |
21 | * guest system and the QEMU emulator. | |
22 | * | |
23 | * Usage from the guest is simply the following (error handling simplified): | |
24 | * | |
25 | * int fd = open("/dev/qemu_pipe",O_RDWR); | |
26 | * .... write() or read() through the pipe. | |
27 | * | |
28 | * This driver doesn't deal with the exact protocol used during the session. | |
29 | * It is intended to be as simple as something like: | |
30 | * | |
31 | * // do this _just_ after opening the fd to connect to a specific | |
32 | * // emulator service. | |
33 | * const char* msg = "<pipename>"; | |
34 | * if (write(fd, msg, strlen(msg)+1) < 0) { | |
35 | * ... could not connect to <pipename> service | |
36 | * close(fd); | |
37 | * } | |
38 | * | |
39 | * // after this, simply read() and write() to communicate with the | |
40 | * // service. Exact protocol details left as an exercise to the reader. | |
41 | * | |
42 | * This driver is very fast because it doesn't copy any data through | |
43 | * intermediate buffers, since the emulator is capable of translating | |
44 | * guest user addresses into host ones. | |
45 | * | |
46 | * Note that we must however ensure that each user page involved in the | |
47 | * exchange is properly mapped during a transfer. | |
48 | */ | |
49 | ||
726ea1a8 | 50 | |
c89f2750 | 51 | #include <linux/module.h> |
ac316725 | 52 | #include <linux/mod_devicetable.h> |
c89f2750 DDT |
53 | #include <linux/interrupt.h> |
54 | #include <linux/kernel.h> | |
55 | #include <linux/spinlock.h> | |
56 | #include <linux/miscdevice.h> | |
57 | #include <linux/platform_device.h> | |
58 | #include <linux/poll.h> | |
59 | #include <linux/sched.h> | |
60 | #include <linux/bitops.h> | |
61 | #include <linux/slab.h> | |
62 | #include <linux/io.h> | |
a99698fa | 63 | #include <linux/goldfish.h> |
1d427da1 | 64 | #include <linux/dma-mapping.h> |
2f3be882 | 65 | #include <linux/mm.h> |
d62f324b | 66 | #include <linux/acpi.h> |
d23069a5 | 67 | #include <linux/bug.h> |
95577010 | 68 | #include "goldfish_pipe_qemu.h" |
c89f2750 | 69 | |
726ea1a8 JQ |
70 | /* |
71 | * Update this when something changes in the driver's behavior so the host | |
72 | * can benefit from knowing it | |
73 | */ | |
74 | enum { | |
75 | PIPE_DRIVER_VERSION = 2, | |
76 | PIPE_CURRENT_DEVICE_VERSION = 2 | |
77 | }; | |
78 | ||
726ea1a8 JQ |
79 | enum { |
80 | MAX_BUFFERS_PER_COMMAND = 336, | |
81 | MAX_SIGNALLED_PIPES = 64, | |
82 | INITIAL_PIPES_CAPACITY = 64 | |
83 | }; | |
84 | ||
85 | struct goldfish_pipe_dev; | |
86 | struct goldfish_pipe; | |
87 | struct goldfish_pipe_command; | |
88 | ||
89 | /* A per-pipe command structure, shared with the host */ | |
90 | struct goldfish_pipe_command { | |
ed824215 RK |
91 | s32 cmd; /* PipeCmdCode, guest -> host */ |
92 | s32 id; /* pipe id, guest -> host */ | |
93 | s32 status; /* command execution status, host -> guest */ | |
726ea1a8 JQ |
94 | s32 reserved; /* to pad to 64-bit boundary */ |
95 | union { | |
96 | /* Parameters for PIPE_CMD_{READ,WRITE} */ | |
97 | struct { | |
98 | /* number of buffers, guest -> host */ | |
99 | u32 buffers_count; | |
100 | /* number of consumed bytes, host -> guest */ | |
101 | s32 consumed_size; | |
102 | /* buffer pointers, guest -> host */ | |
103 | u64 ptrs[MAX_BUFFERS_PER_COMMAND]; | |
104 | /* buffer sizes, guest -> host */ | |
105 | u32 sizes[MAX_BUFFERS_PER_COMMAND]; | |
106 | } rw_params; | |
107 | }; | |
108 | }; | |
109 | ||
110 | /* A single signalled pipe information */ | |
111 | struct signalled_pipe_buffer { | |
112 | u32 id; | |
c89f2750 DDT |
113 | u32 flags; |
114 | }; | |
115 | ||
726ea1a8 JQ |
116 | /* Parameters for the PIPE_CMD_OPEN command */ |
117 | struct open_command_param { | |
118 | u64 command_buffer_ptr; | |
119 | u32 rw_params_max_count; | |
c89f2750 DDT |
120 | }; |
121 | ||
726ea1a8 JQ |
122 | /* Device-level set of buffers shared with the host */ |
123 | struct goldfish_pipe_dev_buffers { | |
124 | struct open_command_param open_command_params; | |
125 | struct signalled_pipe_buffer signalled_pipe_buffers[ | |
126 | MAX_SIGNALLED_PIPES]; | |
127 | }; | |
c89f2750 DDT |
128 | |
129 | /* This data type models a given pipe instance */ | |
130 | struct goldfish_pipe { | |
726ea1a8 JQ |
131 | /* pipe ID - index into goldfish_pipe_dev::pipes array */ |
132 | u32 id; | |
46928cc6 | 133 | |
726ea1a8 JQ |
134 | /* The wake flags pipe is waiting for |
135 | * Note: not protected with any lock, uses atomic operations | |
136 | * and barriers to make it thread-safe. | |
137 | */ | |
c89f2750 | 138 | unsigned long flags; |
46928cc6 | 139 | |
726ea1a8 JQ |
140 | /* wake flags host have signalled, |
141 | * - protected by goldfish_pipe_dev::lock | |
142 | */ | |
143 | unsigned long signalled_flags; | |
144 | ||
145 | /* A pointer to command buffer */ | |
146 | struct goldfish_pipe_command *command_buffer; | |
147 | ||
148 | /* doubly linked list of signalled pipes, protected by | |
149 | * goldfish_pipe_dev::lock | |
150 | */ | |
151 | struct goldfish_pipe *prev_signalled; | |
152 | struct goldfish_pipe *next_signalled; | |
153 | ||
154 | /* | |
155 | * A pipe's own lock. Protects the following: | |
156 | * - *command_buffer - makes sure a command can safely write its | |
157 | * parameters to the host and read the results back. | |
158 | */ | |
159 | struct mutex lock; | |
160 | ||
161 | /* A wake queue for sleeping until host signals an event */ | |
c89f2750 | 162 | wait_queue_head_t wake_queue; |
46928cc6 | 163 | |
726ea1a8 JQ |
164 | /* Pointer to the parent goldfish_pipe_dev instance */ |
165 | struct goldfish_pipe_dev *dev; | |
c89f2750 DDT |
166 | }; |
167 | ||
726ea1a8 JQ |
168 | /* The global driver data. Holds a reference to the i/o page used to |
169 | * communicate with the emulator, and a wake queue for blocked tasks | |
170 | * waiting to be awoken. | |
171 | */ | |
172 | struct goldfish_pipe_dev { | |
173 | /* | |
174 | * Global device spinlock. Protects the following members: | |
175 | * - pipes, pipes_capacity | |
176 | * - [*pipes, *pipes + pipes_capacity) - array data | |
177 | * - first_signalled_pipe, | |
178 | * goldfish_pipe::prev_signalled, | |
179 | * goldfish_pipe::next_signalled, | |
180 | * goldfish_pipe::signalled_flags - all singnalled-related fields, | |
181 | * in all allocated pipes | |
182 | * - open_command_params - PIPE_CMD_OPEN-related buffers | |
183 | * | |
184 | * It looks like a lot of different fields, but the trick is that | |
185 | * the only operation that happens often is the signalled pipes array | |
186 | * manipulation. That's why it's OK for now to keep the rest of the | |
187 | * fields under the same lock. If we notice too much contention because | |
188 | * of PIPE_CMD_OPEN, then we should add a separate lock there. | |
189 | */ | |
190 | spinlock_t lock; | |
c89f2750 | 191 | |
726ea1a8 JQ |
192 | /* |
193 | * Array of the pipes of |pipes_capacity| elements, | |
194 | * indexed by goldfish_pipe::id | |
195 | */ | |
196 | struct goldfish_pipe **pipes; | |
197 | u32 pipes_capacity; | |
198 | ||
199 | /* Pointers to the buffers host uses for interaction with this driver */ | |
200 | struct goldfish_pipe_dev_buffers *buffers; | |
201 | ||
202 | /* Head of a doubly linked list of signalled pipes */ | |
203 | struct goldfish_pipe *first_signalled_pipe; | |
204 | ||
25b97d57 RK |
205 | /* ptr to platform device's device struct */ |
206 | struct device *pdev_dev; | |
207 | ||
726ea1a8 JQ |
208 | /* Some device-specific data */ |
209 | int irq; | |
210 | int version; | |
211 | unsigned char __iomem *base; | |
c89f2750 DDT |
212 | }; |
213 | ||
869fd502 | 214 | struct goldfish_pipe_dev goldfish_pipe_dev; |
c89f2750 | 215 | |
726ea1a8 | 216 | static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) |
a99698fa | 217 | { |
726ea1a8 JQ |
218 | pipe->command_buffer->cmd = cmd; |
219 | /* failure by default */ | |
220 | pipe->command_buffer->status = PIPE_ERROR_INVAL; | |
221 | writel(pipe->id, pipe->dev->base + PIPE_REG_CMD); | |
222 | return pipe->command_buffer->status; | |
c89f2750 DDT |
223 | } |
224 | ||
726ea1a8 | 225 | static int goldfish_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) |
a99698fa | 226 | { |
726ea1a8 | 227 | int status; |
c89f2750 | 228 | |
726ea1a8 JQ |
229 | if (mutex_lock_interruptible(&pipe->lock)) |
230 | return PIPE_ERROR_IO; | |
231 | status = goldfish_cmd_locked(pipe, cmd); | |
232 | mutex_unlock(&pipe->lock); | |
233 | return status; | |
c89f2750 DDT |
234 | } |
235 | ||
726ea1a8 JQ |
236 | /* |
237 | * This function converts an error code returned by the emulator through | |
c89f2750 DDT |
238 | * the PIPE_REG_STATUS i/o register into a valid negative errno value. |
239 | */ | |
240 | static int goldfish_pipe_error_convert(int status) | |
241 | { | |
242 | switch (status) { | |
243 | case PIPE_ERROR_AGAIN: | |
244 | return -EAGAIN; | |
245 | case PIPE_ERROR_NOMEM: | |
246 | return -ENOMEM; | |
247 | case PIPE_ERROR_IO: | |
248 | return -EIO; | |
249 | default: | |
250 | return -EINVAL; | |
251 | } | |
252 | } | |
253 | ||
726ea1a8 JQ |
254 | static int pin_user_pages(unsigned long first_page, unsigned long last_page, |
255 | unsigned int last_page_size, int is_write, | |
256 | struct page *pages[MAX_BUFFERS_PER_COMMAND], | |
257 | unsigned int *iter_last_page_size) | |
c89f2750 | 258 | { |
726ea1a8 JQ |
259 | int ret; |
260 | int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1; | |
261 | ||
262 | if (requested_pages > MAX_BUFFERS_PER_COMMAND) { | |
263 | requested_pages = MAX_BUFFERS_PER_COMMAND; | |
264 | *iter_last_page_size = PAGE_SIZE; | |
265 | } else { | |
266 | *iter_last_page_size = last_page_size; | |
267 | } | |
268 | ||
269 | ret = get_user_pages_fast( | |
270 | first_page, requested_pages, !is_write, pages); | |
271 | if (ret <= 0) | |
272 | return -EFAULT; | |
273 | if (ret < requested_pages) | |
274 | *iter_last_page_size = PAGE_SIZE; | |
c89f2750 | 275 | |
1d1021a0 | 276 | return ret; |
c89f2750 DDT |
277 | } |
278 | ||
726ea1a8 JQ |
279 | static void release_user_pages(struct page **pages, int pages_count, |
280 | int is_write, s32 consumed_size) | |
c89f2750 | 281 | { |
726ea1a8 | 282 | int i; |
c89f2750 | 283 | |
726ea1a8 JQ |
284 | for (i = 0; i < pages_count; i++) { |
285 | if (!is_write && consumed_size > 0) | |
286 | set_page_dirty(pages[i]); | |
287 | put_page(pages[i]); | |
288 | } | |
289 | } | |
290 | ||
291 | /* Populate the call parameters, merging adjacent pages together */ | |
292 | static void populate_rw_params( | |
293 | struct page **pages, int pages_count, | |
294 | unsigned long address, unsigned long address_end, | |
295 | unsigned long first_page, unsigned long last_page, | |
296 | unsigned int iter_last_page_size, int is_write, | |
297 | struct goldfish_pipe_command *command) | |
298 | { | |
299 | /* | |
300 | * Process the first page separately - it's the only page that | |
301 | * needs special handling for its start address. | |
302 | */ | |
303 | unsigned long xaddr = page_to_phys(pages[0]); | |
304 | unsigned long xaddr_prev = xaddr; | |
305 | int buffer_idx = 0; | |
306 | int i = 1; | |
307 | int size_on_page = first_page == last_page | |
308 | ? (int)(address_end - address) | |
309 | : (PAGE_SIZE - (address & ~PAGE_MASK)); | |
310 | command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK)); | |
311 | command->rw_params.sizes[0] = size_on_page; | |
312 | for (; i < pages_count; ++i) { | |
313 | xaddr = page_to_phys(pages[i]); | |
314 | size_on_page = (i == pages_count - 1) ? | |
315 | iter_last_page_size : PAGE_SIZE; | |
316 | if (xaddr == xaddr_prev + PAGE_SIZE) { | |
317 | command->rw_params.sizes[buffer_idx] += size_on_page; | |
318 | } else { | |
319 | ++buffer_idx; | |
320 | command->rw_params.ptrs[buffer_idx] = (u64)xaddr; | |
321 | command->rw_params.sizes[buffer_idx] = size_on_page; | |
322 | } | |
323 | xaddr_prev = xaddr; | |
324 | } | |
325 | command->rw_params.buffers_count = buffer_idx + 1; | |
326 | } | |
c89f2750 | 327 | |
726ea1a8 JQ |
328 | static int transfer_max_buffers(struct goldfish_pipe *pipe, |
329 | unsigned long address, unsigned long address_end, int is_write, | |
330 | unsigned long last_page, unsigned int last_page_size, | |
331 | s32 *consumed_size, int *status) | |
332 | { | |
f563dab4 | 333 | static struct page *pages[MAX_BUFFERS_PER_COMMAND]; |
726ea1a8 JQ |
334 | unsigned long first_page = address & PAGE_MASK; |
335 | unsigned int iter_last_page_size; | |
336 | int pages_count = pin_user_pages(first_page, last_page, | |
337 | last_page_size, is_write, | |
338 | pages, &iter_last_page_size); | |
c89f2750 | 339 | |
726ea1a8 JQ |
340 | if (pages_count < 0) |
341 | return pages_count; | |
342 | ||
343 | /* Serialize access to the pipe command buffers */ | |
344 | if (mutex_lock_interruptible(&pipe->lock)) | |
345 | return -ERESTARTSYS; | |
346 | ||
347 | populate_rw_params(pages, pages_count, address, address_end, | |
348 | first_page, last_page, iter_last_page_size, is_write, | |
349 | pipe->command_buffer); | |
350 | ||
351 | /* Transfer the data */ | |
352 | *status = goldfish_cmd_locked(pipe, | |
353 | is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ); | |
354 | ||
355 | *consumed_size = pipe->command_buffer->rw_params.consumed_size; | |
356 | ||
726ea1a8 JQ |
357 | release_user_pages(pages, pages_count, is_write, *consumed_size); |
358 | ||
f563dab4 GKH |
359 | mutex_unlock(&pipe->lock); |
360 | ||
726ea1a8 | 361 | return 0; |
c89f2750 DDT |
362 | } |
363 | ||
726ea1a8 | 364 | static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write) |
c89f2750 | 365 | { |
726ea1a8 | 366 | u32 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ; |
c89f2750 | 367 | |
726ea1a8 JQ |
368 | set_bit(wakeBit, &pipe->flags); |
369 | ||
370 | /* Tell the emulator we're going to wait for a wake event */ | |
371 | (void)goldfish_cmd(pipe, | |
372 | is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ); | |
373 | ||
374 | while (test_bit(wakeBit, &pipe->flags)) { | |
375 | if (wait_event_interruptible( | |
376 | pipe->wake_queue, | |
377 | !test_bit(wakeBit, &pipe->flags))) | |
378 | return -ERESTARTSYS; | |
379 | ||
380 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) | |
381 | return -EIO; | |
382 | } | |
c89f2750 | 383 | |
c89f2750 DDT |
384 | return 0; |
385 | } | |
386 | ||
726ea1a8 JQ |
387 | static ssize_t goldfish_pipe_read_write(struct file *filp, |
388 | char __user *buffer, size_t bufflen, int is_write) | |
c89f2750 | 389 | { |
c89f2750 | 390 | struct goldfish_pipe *pipe = filp->private_data; |
2f3be882 | 391 | int count = 0, ret = -EINVAL; |
726ea1a8 JQ |
392 | unsigned long address, address_end, last_page; |
393 | unsigned int last_page_size; | |
c89f2750 DDT |
394 | |
395 | /* If the emulator already closed the pipe, no need to go further */ | |
726ea1a8 | 396 | if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))) |
c89f2750 | 397 | return -EIO; |
c89f2750 | 398 | /* Null reads or writes succeeds */ |
3411d035 | 399 | if (unlikely(bufflen == 0)) |
c89f2750 | 400 | return 0; |
c89f2750 | 401 | /* Check the buffer range for access */ |
726ea1a8 JQ |
402 | if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ, |
403 | buffer, bufflen))) | |
c89f2750 DDT |
404 | return -EFAULT; |
405 | ||
726ea1a8 | 406 | address = (unsigned long)buffer; |
c89f2750 | 407 | address_end = address + bufflen; |
726ea1a8 JQ |
408 | last_page = (address_end - 1) & PAGE_MASK; |
409 | last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1; | |
c89f2750 DDT |
410 | |
411 | while (address < address_end) { | |
726ea1a8 JQ |
412 | s32 consumed_size; |
413 | int status; | |
4f42071c | 414 | |
726ea1a8 JQ |
415 | ret = transfer_max_buffers(pipe, address, address_end, is_write, |
416 | last_page, last_page_size, &consumed_size, | |
417 | &status); | |
2f3be882 | 418 | if (ret < 0) |
5fae054c | 419 | break; |
c89f2750 | 420 | |
726ea1a8 JQ |
421 | if (consumed_size > 0) { |
422 | /* No matter what's the status, we've transferred | |
423 | * something. | |
4f42071c | 424 | */ |
726ea1a8 JQ |
425 | count += consumed_size; |
426 | address += consumed_size; | |
c89f2750 | 427 | } |
726ea1a8 | 428 | if (status > 0) |
c89f2750 | 429 | continue; |
726ea1a8 JQ |
430 | if (status == 0) { |
431 | /* EOF */ | |
2f3be882 | 432 | ret = 0; |
c89f2750 | 433 | break; |
726ea1a8 JQ |
434 | } |
435 | if (count > 0) { | |
2f3be882 | 436 | /* |
726ea1a8 JQ |
437 | * An error occurred, but we already transferred |
438 | * something on one of the previous iterations. | |
2f3be882 CD |
439 | * Just return what we already copied and log this |
440 | * err. | |
2f3be882 | 441 | */ |
25dd0f40 | 442 | if (status != PIPE_ERROR_AGAIN) |
25b97d57 RK |
443 | dev_err_ratelimited(pipe->dev->pdev_dev, |
444 | "backend error %d on %s\n", | |
2f3be882 | 445 | status, is_write ? "write" : "read"); |
c89f2750 | 446 | break; |
2f3be882 | 447 | } |
c89f2750 | 448 | |
2f3be882 | 449 | /* |
726ea1a8 | 450 | * If the error is not PIPE_ERROR_AGAIN, or if we are in |
2f3be882 CD |
451 | * non-blocking mode, just return the error code. |
452 | */ | |
c89f2750 DDT |
453 | if (status != PIPE_ERROR_AGAIN || |
454 | (filp->f_flags & O_NONBLOCK) != 0) { | |
455 | ret = goldfish_pipe_error_convert(status); | |
456 | break; | |
457 | } | |
458 | ||
726ea1a8 JQ |
459 | status = wait_for_host_signal(pipe, is_write); |
460 | if (status < 0) | |
461 | return status; | |
c89f2750 | 462 | } |
2f3be882 | 463 | |
726ea1a8 | 464 | if (count > 0) |
2f3be882 | 465 | return count; |
726ea1a8 | 466 | return ret; |
c89f2750 DDT |
467 | } |
468 | ||
469 | static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, | |
726ea1a8 | 470 | size_t bufflen, loff_t *ppos) |
c89f2750 | 471 | { |
726ea1a8 JQ |
472 | return goldfish_pipe_read_write(filp, buffer, bufflen, |
473 | /* is_write */ 0); | |
c89f2750 DDT |
474 | } |
475 | ||
476 | static ssize_t goldfish_pipe_write(struct file *filp, | |
477 | const char __user *buffer, size_t bufflen, | |
478 | loff_t *ppos) | |
479 | { | |
726ea1a8 JQ |
480 | return goldfish_pipe_read_write(filp, |
481 | /* cast away the const */(char __user *)buffer, bufflen, | |
482 | /* is_write */ 1); | |
c89f2750 DDT |
483 | } |
484 | ||
afc9a42b | 485 | static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait) |
c89f2750 DDT |
486 | { |
487 | struct goldfish_pipe *pipe = filp->private_data; | |
afc9a42b | 488 | __poll_t mask = 0; |
c89f2750 DDT |
489 | int status; |
490 | ||
c89f2750 DDT |
491 | poll_wait(filp, &pipe->wake_queue, wait); |
492 | ||
726ea1a8 JQ |
493 | status = goldfish_cmd(pipe, PIPE_CMD_POLL); |
494 | if (status < 0) | |
495 | return -ERESTARTSYS; | |
c89f2750 DDT |
496 | |
497 | if (status & PIPE_POLL_IN) | |
a9a08845 | 498 | mask |= EPOLLIN | EPOLLRDNORM; |
c89f2750 | 499 | if (status & PIPE_POLL_OUT) |
a9a08845 | 500 | mask |= EPOLLOUT | EPOLLWRNORM; |
c89f2750 | 501 | if (status & PIPE_POLL_HUP) |
a9a08845 | 502 | mask |= EPOLLHUP; |
c89f2750 | 503 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) |
a9a08845 | 504 | mask |= EPOLLERR; |
c89f2750 DDT |
505 | |
506 | return mask; | |
507 | } | |
508 | ||
726ea1a8 JQ |
509 | static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev, |
510 | u32 id, u32 flags) | |
c89f2750 | 511 | { |
726ea1a8 | 512 | struct goldfish_pipe *pipe; |
c89f2750 | 513 | |
726ea1a8 JQ |
514 | if (WARN_ON(id >= dev->pipes_capacity)) |
515 | return; | |
516 | ||
517 | pipe = dev->pipes[id]; | |
518 | if (!pipe) | |
519 | return; | |
520 | pipe->signalled_flags |= flags; | |
521 | ||
522 | if (pipe->prev_signalled || pipe->next_signalled | |
523 | || dev->first_signalled_pipe == pipe) | |
524 | return; /* already in the list */ | |
525 | pipe->next_signalled = dev->first_signalled_pipe; | |
526 | if (dev->first_signalled_pipe) | |
527 | dev->first_signalled_pipe->prev_signalled = pipe; | |
528 | dev->first_signalled_pipe = pipe; | |
529 | } | |
49a75c44 | 530 | |
726ea1a8 | 531 | static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev, |
53bdf668 RK |
532 | struct goldfish_pipe *pipe) |
533 | { | |
726ea1a8 JQ |
534 | if (pipe->prev_signalled) |
535 | pipe->prev_signalled->next_signalled = pipe->next_signalled; | |
536 | if (pipe->next_signalled) | |
537 | pipe->next_signalled->prev_signalled = pipe->prev_signalled; | |
538 | if (pipe == dev->first_signalled_pipe) | |
539 | dev->first_signalled_pipe = pipe->next_signalled; | |
540 | pipe->prev_signalled = NULL; | |
541 | pipe->next_signalled = NULL; | |
542 | } | |
25c72c78 | 543 | |
726ea1a8 JQ |
544 | static struct goldfish_pipe *signalled_pipes_pop_front( |
545 | struct goldfish_pipe_dev *dev, int *wakes) | |
546 | { | |
547 | struct goldfish_pipe *pipe; | |
548 | unsigned long flags; | |
c89f2750 | 549 | |
726ea1a8 | 550 | spin_lock_irqsave(&dev->lock, flags); |
c89f2750 | 551 | |
726ea1a8 JQ |
552 | pipe = dev->first_signalled_pipe; |
553 | if (pipe) { | |
554 | *wakes = pipe->signalled_flags; | |
555 | pipe->signalled_flags = 0; | |
556 | /* | |
557 | * This is an optimized version of | |
558 | * signalled_pipes_remove_locked() | |
559 | * - We want to make it as fast as possible to | |
560 | * wake the sleeping pipe operations faster. | |
561 | */ | |
562 | dev->first_signalled_pipe = pipe->next_signalled; | |
563 | if (dev->first_signalled_pipe) | |
564 | dev->first_signalled_pipe->prev_signalled = NULL; | |
565 | pipe->next_signalled = NULL; | |
566 | } | |
c89f2750 | 567 | |
726ea1a8 JQ |
568 | spin_unlock_irqrestore(&dev->lock, flags); |
569 | return pipe; | |
570 | } | |
571 | ||
572 | static void goldfish_interrupt_task(unsigned long unused) | |
573 | { | |
726ea1a8 JQ |
574 | /* Iterate over the signalled pipes and wake them one by one */ |
575 | struct goldfish_pipe *pipe; | |
576 | int wakes; | |
577 | ||
869fd502 RK |
578 | while ((pipe = signalled_pipes_pop_front(&goldfish_pipe_dev, &wakes)) != |
579 | NULL) { | |
c89f2750 | 580 | if (wakes & PIPE_WAKE_CLOSED) { |
726ea1a8 JQ |
581 | pipe->flags = 1 << BIT_CLOSED_ON_HOST; |
582 | } else { | |
583 | if (wakes & PIPE_WAKE_READ) | |
584 | clear_bit(BIT_WAKE_ON_READ, &pipe->flags); | |
585 | if (wakes & PIPE_WAKE_WRITE) | |
586 | clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags); | |
c89f2750 | 587 | } |
726ea1a8 JQ |
588 | /* |
589 | * wake_up_interruptible() implies a write barrier, so don't | |
590 | * explicitly add another one here. | |
591 | */ | |
c89f2750 | 592 | wake_up_interruptible(&pipe->wake_queue); |
c89f2750 | 593 | } |
726ea1a8 | 594 | } |
ea4ba866 | 595 | static DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0); |
c89f2750 | 596 | |
726ea1a8 JQ |
597 | /* |
598 | * The general idea of the interrupt handling: | |
599 | * | |
600 | * 1. device raises an interrupt if there's at least one signalled pipe | |
601 | * 2. IRQ handler reads the signalled pipes and their count from the device | |
602 | * 3. device writes them into a shared buffer and returns the count | |
603 | * it only resets the IRQ if it has returned all signalled pipes, | |
604 | * otherwise it leaves it raised, so IRQ handler will be called | |
605 | * again for the next chunk | |
606 | * 4. IRQ handler adds all returned pipes to the device's signalled pipes list | |
607 | * 5. IRQ handler launches a tasklet to process the signalled pipes from the | |
608 | * list in a separate context | |
609 | */ | |
610 | static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id) | |
611 | { | |
612 | u32 count; | |
613 | u32 i; | |
614 | unsigned long flags; | |
615 | struct goldfish_pipe_dev *dev = dev_id; | |
616 | ||
869fd502 | 617 | if (dev != &goldfish_pipe_dev) |
726ea1a8 JQ |
618 | return IRQ_NONE; |
619 | ||
620 | /* Request the signalled pipes from the device */ | |
621 | spin_lock_irqsave(&dev->lock, flags); | |
622 | ||
623 | count = readl(dev->base + PIPE_REG_GET_SIGNALLED); | |
624 | if (count == 0) { | |
625 | spin_unlock_irqrestore(&dev->lock, flags); | |
626 | return IRQ_NONE; | |
627 | } | |
628 | if (count > MAX_SIGNALLED_PIPES) | |
629 | count = MAX_SIGNALLED_PIPES; | |
630 | ||
631 | for (i = 0; i < count; ++i) | |
632 | signalled_pipes_add_locked(dev, | |
633 | dev->buffers->signalled_pipe_buffers[i].id, | |
634 | dev->buffers->signalled_pipe_buffers[i].flags); | |
635 | ||
636 | spin_unlock_irqrestore(&dev->lock, flags); | |
637 | ||
638 | tasklet_schedule(&goldfish_interrupt_tasklet); | |
639 | return IRQ_HANDLED; | |
640 | } | |
641 | ||
642 | static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev) | |
643 | { | |
644 | int id; | |
645 | ||
646 | for (id = 0; id < dev->pipes_capacity; ++id) | |
647 | if (!dev->pipes[id]) | |
648 | return id; | |
649 | ||
650 | { | |
84ae527a RK |
651 | /* Reallocate the array. |
652 | * Since get_free_pipe_id_locked runs with interrupts disabled, | |
653 | * we don't want to make calls that could lead to sleep. | |
654 | */ | |
726ea1a8 JQ |
655 | u32 new_capacity = 2 * dev->pipes_capacity; |
656 | struct goldfish_pipe **pipes = | |
3eff8ecd | 657 | kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC); |
726ea1a8 JQ |
658 | if (!pipes) |
659 | return -ENOMEM; | |
660 | memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity); | |
661 | kfree(dev->pipes); | |
662 | dev->pipes = pipes; | |
663 | id = dev->pipes_capacity; | |
664 | dev->pipes_capacity = new_capacity; | |
665 | } | |
666 | return id; | |
c89f2750 DDT |
667 | } |
668 | ||
669 | /** | |
726ea1a8 | 670 | * goldfish_pipe_open - open a channel to the AVD |
c89f2750 DDT |
671 | * @inode: inode of device |
672 | * @file: file struct of opener | |
673 | * | |
674 | * Create a new pipe link between the emulator and the use application. | |
675 | * Each new request produces a new pipe. | |
676 | * | |
677 | * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit | |
678 | * right now so this is fine. A move to 64bit will need this addressing | |
679 | */ | |
680 | static int goldfish_pipe_open(struct inode *inode, struct file *file) | |
681 | { | |
869fd502 | 682 | struct goldfish_pipe_dev *dev = &goldfish_pipe_dev; |
726ea1a8 JQ |
683 | unsigned long flags; |
684 | int id; | |
685 | int status; | |
c89f2750 DDT |
686 | |
687 | /* Allocate new pipe kernel object */ | |
726ea1a8 | 688 | struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL); |
c89f2750 DDT |
689 | if (pipe == NULL) |
690 | return -ENOMEM; | |
691 | ||
692 | pipe->dev = dev; | |
693 | mutex_init(&pipe->lock); | |
694 | init_waitqueue_head(&pipe->wake_queue); | |
695 | ||
696 | /* | |
726ea1a8 JQ |
697 | * Command buffer needs to be allocated on its own page to make sure |
698 | * it is physically contiguous in host's address space. | |
c89f2750 | 699 | */ |
d23069a5 | 700 | BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE); |
726ea1a8 JQ |
701 | pipe->command_buffer = |
702 | (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL); | |
703 | if (!pipe->command_buffer) { | |
704 | status = -ENOMEM; | |
705 | goto err_pipe; | |
706 | } | |
c89f2750 | 707 | |
726ea1a8 JQ |
708 | spin_lock_irqsave(&dev->lock, flags); |
709 | ||
710 | id = get_free_pipe_id_locked(dev); | |
711 | if (id < 0) { | |
712 | status = id; | |
713 | goto err_id_locked; | |
c89f2750 DDT |
714 | } |
715 | ||
726ea1a8 JQ |
716 | dev->pipes[id] = pipe; |
717 | pipe->id = id; | |
718 | pipe->command_buffer->id = id; | |
719 | ||
720 | /* Now tell the emulator we're opening a new pipe. */ | |
721 | dev->buffers->open_command_params.rw_params_max_count = | |
722 | MAX_BUFFERS_PER_COMMAND; | |
723 | dev->buffers->open_command_params.command_buffer_ptr = | |
724 | (u64)(unsigned long)__pa(pipe->command_buffer); | |
725 | status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN); | |
726 | spin_unlock_irqrestore(&dev->lock, flags); | |
727 | if (status < 0) | |
728 | goto err_cmd; | |
c89f2750 DDT |
729 | /* All is done, save the pipe into the file's private data field */ |
730 | file->private_data = pipe; | |
731 | return 0; | |
726ea1a8 JQ |
732 | |
733 | err_cmd: | |
734 | spin_lock_irqsave(&dev->lock, flags); | |
735 | dev->pipes[id] = NULL; | |
736 | err_id_locked: | |
737 | spin_unlock_irqrestore(&dev->lock, flags); | |
738 | free_page((unsigned long)pipe->command_buffer); | |
739 | err_pipe: | |
740 | kfree(pipe); | |
741 | return status; | |
c89f2750 DDT |
742 | } |
743 | ||
744 | static int goldfish_pipe_release(struct inode *inode, struct file *filp) | |
745 | { | |
726ea1a8 | 746 | unsigned long flags; |
c89f2750 | 747 | struct goldfish_pipe *pipe = filp->private_data; |
726ea1a8 | 748 | struct goldfish_pipe_dev *dev = pipe->dev; |
c89f2750 DDT |
749 | |
750 | /* The guest is closing the channel, so tell the emulator right now */ | |
726ea1a8 JQ |
751 | (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE); |
752 | ||
753 | spin_lock_irqsave(&dev->lock, flags); | |
754 | dev->pipes[pipe->id] = NULL; | |
755 | signalled_pipes_remove_locked(dev, pipe); | |
756 | spin_unlock_irqrestore(&dev->lock, flags); | |
757 | ||
c89f2750 | 758 | filp->private_data = NULL; |
726ea1a8 JQ |
759 | free_page((unsigned long)pipe->command_buffer); |
760 | kfree(pipe); | |
c89f2750 DDT |
761 | return 0; |
762 | } | |
763 | ||
764 | static const struct file_operations goldfish_pipe_fops = { | |
765 | .owner = THIS_MODULE, | |
766 | .read = goldfish_pipe_read, | |
767 | .write = goldfish_pipe_write, | |
768 | .poll = goldfish_pipe_poll, | |
769 | .open = goldfish_pipe_open, | |
770 | .release = goldfish_pipe_release, | |
771 | }; | |
772 | ||
869fd502 | 773 | static struct miscdevice goldfish_pipe_miscdev = { |
c89f2750 DDT |
774 | .minor = MISC_DYNAMIC_MINOR, |
775 | .name = "goldfish_pipe", | |
776 | .fops = &goldfish_pipe_fops, | |
777 | }; | |
778 | ||
610a72b7 RK |
779 | static void write_pa_addr(void *addr, void __iomem *portl, void __iomem *porth) |
780 | { | |
781 | const unsigned long paddr = __pa(addr); | |
782 | ||
783 | writel(upper_32_bits(paddr), porth); | |
784 | writel(lower_32_bits(paddr), portl); | |
785 | } | |
786 | ||
726ea1a8 JQ |
787 | static int goldfish_pipe_device_init(struct platform_device *pdev) |
788 | { | |
869fd502 | 789 | struct goldfish_pipe_dev *dev = &goldfish_pipe_dev; |
726ea1a8 JQ |
790 | int err = devm_request_irq(&pdev->dev, dev->irq, |
791 | goldfish_pipe_interrupt, | |
792 | IRQF_SHARED, "goldfish_pipe", dev); | |
793 | if (err) { | |
794 | dev_err(&pdev->dev, "unable to allocate IRQ for v2\n"); | |
795 | return err; | |
796 | } | |
797 | ||
869fd502 | 798 | err = misc_register(&goldfish_pipe_miscdev); |
726ea1a8 JQ |
799 | if (err) { |
800 | dev_err(&pdev->dev, "unable to register v2 device\n"); | |
801 | return err; | |
802 | } | |
803 | ||
25b97d57 | 804 | dev->pdev_dev = &pdev->dev; |
726ea1a8 JQ |
805 | dev->first_signalled_pipe = NULL; |
806 | dev->pipes_capacity = INITIAL_PIPES_CAPACITY; | |
807 | dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), | |
808 | GFP_KERNEL); | |
809 | if (!dev->pipes) | |
810 | return -ENOMEM; | |
811 | ||
812 | /* | |
813 | * We're going to pass two buffers, open_command_params and | |
814 | * signalled_pipe_buffers, to the host. This means each of those buffers | |
815 | * needs to be contained in a single physical page. The easiest choice | |
816 | * is to just allocate a page and place the buffers in it. | |
817 | */ | |
d23069a5 | 818 | BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE); |
2ed43e53 RK |
819 | dev->buffers = (struct goldfish_pipe_dev_buffers *) |
820 | __get_free_page(GFP_KERNEL); | |
821 | if (!dev->buffers) { | |
726ea1a8 JQ |
822 | kfree(dev->pipes); |
823 | return -ENOMEM; | |
824 | } | |
726ea1a8 JQ |
825 | |
826 | /* Send the buffer addresses to the host */ | |
610a72b7 RK |
827 | write_pa_addr(&dev->buffers->signalled_pipe_buffers, |
828 | dev->base + PIPE_REG_SIGNAL_BUFFER, | |
829 | dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH); | |
830 | ||
468e62f9 | 831 | writel(MAX_SIGNALLED_PIPES, |
610a72b7 RK |
832 | dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT); |
833 | ||
834 | write_pa_addr(&dev->buffers->open_command_params, | |
835 | dev->base + PIPE_REG_OPEN_BUFFER, | |
836 | dev->base + PIPE_REG_OPEN_BUFFER_HIGH); | |
837 | ||
726ea1a8 JQ |
838 | return 0; |
839 | } | |
840 | ||
841 | static void goldfish_pipe_device_deinit(struct platform_device *pdev) | |
842 | { | |
869fd502 RK |
843 | misc_deregister(&goldfish_pipe_miscdev); |
844 | kfree(goldfish_pipe_dev.pipes); | |
845 | free_page((unsigned long)goldfish_pipe_dev.buffers); | |
726ea1a8 JQ |
846 | } |
847 | ||
c89f2750 DDT |
848 | static int goldfish_pipe_probe(struct platform_device *pdev) |
849 | { | |
850 | int err; | |
851 | struct resource *r; | |
869fd502 | 852 | struct goldfish_pipe_dev *dev = &goldfish_pipe_dev; |
c89f2750 DDT |
853 | |
854 | /* not thread safe, but this should not happen */ | |
855 | WARN_ON(dev->base != NULL); | |
856 | ||
857 | spin_lock_init(&dev->lock); | |
858 | ||
859 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
860 | if (r == NULL || resource_size(r) < PAGE_SIZE) { | |
861 | dev_err(&pdev->dev, "can't allocate i/o page\n"); | |
862 | return -EINVAL; | |
863 | } | |
864 | dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); | |
865 | if (dev->base == NULL) { | |
866 | dev_err(&pdev->dev, "ioremap failed\n"); | |
867 | return -EINVAL; | |
868 | } | |
869 | ||
870 | r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
871 | if (r == NULL) { | |
872 | err = -EINVAL; | |
873 | goto error; | |
874 | } | |
875 | dev->irq = r->start; | |
876 | ||
726ea1a8 JQ |
877 | /* |
878 | * Exchange the versions with the host device | |
879 | * | |
880 | * Note: v1 driver used to not report its version, so we write it before | |
881 | * reading device version back: this allows the host implementation to | |
882 | * detect the old driver (if there was no version write before read). | |
4f42071c | 883 | */ |
726ea1a8 | 884 | writel((u32)PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION); |
4f42071c | 885 | dev->version = readl(dev->base + PIPE_REG_VERSION); |
726ea1a8 JQ |
886 | if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION)) |
887 | return -EINVAL; | |
888 | ||
889 | err = goldfish_pipe_device_init(pdev); | |
890 | if (!err) | |
891 | return 0; | |
c89f2750 DDT |
892 | |
893 | error: | |
894 | dev->base = NULL; | |
895 | return err; | |
896 | } | |
897 | ||
898 | static int goldfish_pipe_remove(struct platform_device *pdev) | |
899 | { | |
869fd502 | 900 | struct goldfish_pipe_dev *dev = &goldfish_pipe_dev; |
726ea1a8 | 901 | goldfish_pipe_device_deinit(pdev); |
c89f2750 DDT |
902 | dev->base = NULL; |
903 | return 0; | |
904 | } | |
905 | ||
d62f324b JH |
906 | static const struct acpi_device_id goldfish_pipe_acpi_match[] = { |
907 | { "GFSH0003", 0 }, | |
908 | { }, | |
909 | }; | |
910 | MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match); | |
911 | ||
91a18a41 GH |
912 | static const struct of_device_id goldfish_pipe_of_match[] = { |
913 | { .compatible = "google,android-pipe", }, | |
914 | {}, | |
915 | }; | |
916 | MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match); | |
917 | ||
726ea1a8 | 918 | static struct platform_driver goldfish_pipe_driver = { |
c89f2750 DDT |
919 | .probe = goldfish_pipe_probe, |
920 | .remove = goldfish_pipe_remove, | |
921 | .driver = { | |
91a18a41 | 922 | .name = "goldfish_pipe", |
91a18a41 | 923 | .of_match_table = goldfish_pipe_of_match, |
d62f324b | 924 | .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match), |
c89f2750 DDT |
925 | } |
926 | }; | |
927 | ||
726ea1a8 | 928 | module_platform_driver(goldfish_pipe_driver); |
c89f2750 | 929 | MODULE_AUTHOR("David Turner <digit@google.com>"); |
c3c4e307 | 930 | MODULE_LICENSE("GPL v2"); |