Commit | Line | Data |
---|---|---|
c3c4e307 | 1 | // SPDX-License-Identifier: GPL-2.0 |
c89f2750 | 2 | /* |
c89f2750 DDT |
3 | * Copyright (C) 2012 Intel, Inc. |
4 | * Copyright (C) 2013 Intel, Inc. | |
2f3be882 | 5 | * Copyright (C) 2014 Linaro Limited |
726ea1a8 | 6 | * Copyright (C) 2011-2016 Google, Inc. |
c89f2750 DDT |
7 | * |
8 | * This software is licensed under the terms of the GNU General Public | |
9 | * License version 2, as published by the Free Software Foundation, and | |
10 | * may be copied, distributed, and modified under those terms. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | */ | |
18 | ||
19 | /* This source file contains the implementation of a special device driver | |
20 | * that intends to provide a *very* fast communication channel between the | |
21 | * guest system and the QEMU emulator. | |
22 | * | |
23 | * Usage from the guest is simply the following (error handling simplified): | |
24 | * | |
25 | * int fd = open("/dev/qemu_pipe",O_RDWR); | |
26 | * .... write() or read() through the pipe. | |
27 | * | |
28 | * This driver doesn't deal with the exact protocol used during the session. | |
29 | * It is intended to be as simple as something like: | |
30 | * | |
31 | * // do this _just_ after opening the fd to connect to a specific | |
32 | * // emulator service. | |
33 | * const char* msg = "<pipename>"; | |
34 | * if (write(fd, msg, strlen(msg)+1) < 0) { | |
35 | * ... could not connect to <pipename> service | |
36 | * close(fd); | |
37 | * } | |
38 | * | |
39 | * // after this, simply read() and write() to communicate with the | |
40 | * // service. Exact protocol details left as an exercise to the reader. | |
41 | * | |
42 | * This driver is very fast because it doesn't copy any data through | |
43 | * intermediate buffers, since the emulator is capable of translating | |
44 | * guest user addresses into host ones. | |
45 | * | |
46 | * Note that we must however ensure that each user page involved in the | |
47 | * exchange is properly mapped during a transfer. | |
48 | */ | |
49 | ||
726ea1a8 | 50 | |
c89f2750 | 51 | #include <linux/module.h> |
ac316725 | 52 | #include <linux/mod_devicetable.h> |
c89f2750 DDT |
53 | #include <linux/interrupt.h> |
54 | #include <linux/kernel.h> | |
55 | #include <linux/spinlock.h> | |
56 | #include <linux/miscdevice.h> | |
57 | #include <linux/platform_device.h> | |
58 | #include <linux/poll.h> | |
59 | #include <linux/sched.h> | |
60 | #include <linux/bitops.h> | |
61 | #include <linux/slab.h> | |
62 | #include <linux/io.h> | |
a99698fa | 63 | #include <linux/goldfish.h> |
1d427da1 | 64 | #include <linux/dma-mapping.h> |
2f3be882 | 65 | #include <linux/mm.h> |
d62f324b | 66 | #include <linux/acpi.h> |
95577010 | 67 | #include "goldfish_pipe_qemu.h" |
c89f2750 | 68 | |
726ea1a8 JQ |
69 | /* |
70 | * Update this when something changes in the driver's behavior so the host | |
71 | * can benefit from knowing it | |
72 | */ | |
73 | enum { | |
74 | PIPE_DRIVER_VERSION = 2, | |
75 | PIPE_CURRENT_DEVICE_VERSION = 2 | |
76 | }; | |
77 | ||
726ea1a8 JQ |
78 | enum { |
79 | MAX_BUFFERS_PER_COMMAND = 336, | |
80 | MAX_SIGNALLED_PIPES = 64, | |
81 | INITIAL_PIPES_CAPACITY = 64 | |
82 | }; | |
83 | ||
84 | struct goldfish_pipe_dev; | |
85 | struct goldfish_pipe; | |
86 | struct goldfish_pipe_command; | |
87 | ||
88 | /* A per-pipe command structure, shared with the host */ | |
89 | struct goldfish_pipe_command { | |
ed824215 RK |
90 | s32 cmd; /* PipeCmdCode, guest -> host */ |
91 | s32 id; /* pipe id, guest -> host */ | |
92 | s32 status; /* command execution status, host -> guest */ | |
726ea1a8 JQ |
93 | s32 reserved; /* to pad to 64-bit boundary */ |
94 | union { | |
95 | /* Parameters for PIPE_CMD_{READ,WRITE} */ | |
96 | struct { | |
97 | /* number of buffers, guest -> host */ | |
98 | u32 buffers_count; | |
99 | /* number of consumed bytes, host -> guest */ | |
100 | s32 consumed_size; | |
101 | /* buffer pointers, guest -> host */ | |
102 | u64 ptrs[MAX_BUFFERS_PER_COMMAND]; | |
103 | /* buffer sizes, guest -> host */ | |
104 | u32 sizes[MAX_BUFFERS_PER_COMMAND]; | |
105 | } rw_params; | |
106 | }; | |
107 | }; | |
108 | ||
109 | /* A single signalled pipe information */ | |
110 | struct signalled_pipe_buffer { | |
111 | u32 id; | |
c89f2750 DDT |
112 | u32 flags; |
113 | }; | |
114 | ||
726ea1a8 JQ |
115 | /* Parameters for the PIPE_CMD_OPEN command */ |
116 | struct open_command_param { | |
117 | u64 command_buffer_ptr; | |
118 | u32 rw_params_max_count; | |
c89f2750 DDT |
119 | }; |
120 | ||
726ea1a8 JQ |
121 | /* Device-level set of buffers shared with the host */ |
122 | struct goldfish_pipe_dev_buffers { | |
123 | struct open_command_param open_command_params; | |
124 | struct signalled_pipe_buffer signalled_pipe_buffers[ | |
125 | MAX_SIGNALLED_PIPES]; | |
126 | }; | |
c89f2750 DDT |
127 | |
128 | /* This data type models a given pipe instance */ | |
129 | struct goldfish_pipe { | |
726ea1a8 JQ |
130 | /* pipe ID - index into goldfish_pipe_dev::pipes array */ |
131 | u32 id; | |
132 | /* The wake flags pipe is waiting for | |
133 | * Note: not protected with any lock, uses atomic operations | |
134 | * and barriers to make it thread-safe. | |
135 | */ | |
c89f2750 | 136 | unsigned long flags; |
726ea1a8 JQ |
137 | /* wake flags host have signalled, |
138 | * - protected by goldfish_pipe_dev::lock | |
139 | */ | |
140 | unsigned long signalled_flags; | |
141 | ||
142 | /* A pointer to command buffer */ | |
143 | struct goldfish_pipe_command *command_buffer; | |
144 | ||
145 | /* doubly linked list of signalled pipes, protected by | |
146 | * goldfish_pipe_dev::lock | |
147 | */ | |
148 | struct goldfish_pipe *prev_signalled; | |
149 | struct goldfish_pipe *next_signalled; | |
150 | ||
151 | /* | |
152 | * A pipe's own lock. Protects the following: | |
153 | * - *command_buffer - makes sure a command can safely write its | |
154 | * parameters to the host and read the results back. | |
155 | */ | |
156 | struct mutex lock; | |
157 | ||
158 | /* A wake queue for sleeping until host signals an event */ | |
c89f2750 | 159 | wait_queue_head_t wake_queue; |
726ea1a8 JQ |
160 | /* Pointer to the parent goldfish_pipe_dev instance */ |
161 | struct goldfish_pipe_dev *dev; | |
c89f2750 DDT |
162 | }; |
163 | ||
726ea1a8 JQ |
164 | /* The global driver data. Holds a reference to the i/o page used to |
165 | * communicate with the emulator, and a wake queue for blocked tasks | |
166 | * waiting to be awoken. | |
167 | */ | |
168 | struct goldfish_pipe_dev { | |
169 | /* | |
170 | * Global device spinlock. Protects the following members: | |
171 | * - pipes, pipes_capacity | |
172 | * - [*pipes, *pipes + pipes_capacity) - array data | |
173 | * - first_signalled_pipe, | |
174 | * goldfish_pipe::prev_signalled, | |
175 | * goldfish_pipe::next_signalled, | |
176 | * goldfish_pipe::signalled_flags - all singnalled-related fields, | |
177 | * in all allocated pipes | |
178 | * - open_command_params - PIPE_CMD_OPEN-related buffers | |
179 | * | |
180 | * It looks like a lot of different fields, but the trick is that | |
181 | * the only operation that happens often is the signalled pipes array | |
182 | * manipulation. That's why it's OK for now to keep the rest of the | |
183 | * fields under the same lock. If we notice too much contention because | |
184 | * of PIPE_CMD_OPEN, then we should add a separate lock there. | |
185 | */ | |
186 | spinlock_t lock; | |
c89f2750 | 187 | |
726ea1a8 JQ |
188 | /* |
189 | * Array of the pipes of |pipes_capacity| elements, | |
190 | * indexed by goldfish_pipe::id | |
191 | */ | |
192 | struct goldfish_pipe **pipes; | |
193 | u32 pipes_capacity; | |
194 | ||
195 | /* Pointers to the buffers host uses for interaction with this driver */ | |
196 | struct goldfish_pipe_dev_buffers *buffers; | |
197 | ||
198 | /* Head of a doubly linked list of signalled pipes */ | |
199 | struct goldfish_pipe *first_signalled_pipe; | |
200 | ||
201 | /* Some device-specific data */ | |
202 | int irq; | |
203 | int version; | |
204 | unsigned char __iomem *base; | |
c89f2750 DDT |
205 | }; |
206 | ||
70caf709 | 207 | static struct goldfish_pipe_dev pipe_dev[1] = {}; |
c89f2750 | 208 | |
726ea1a8 | 209 | static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) |
a99698fa | 210 | { |
726ea1a8 JQ |
211 | pipe->command_buffer->cmd = cmd; |
212 | /* failure by default */ | |
213 | pipe->command_buffer->status = PIPE_ERROR_INVAL; | |
214 | writel(pipe->id, pipe->dev->base + PIPE_REG_CMD); | |
215 | return pipe->command_buffer->status; | |
c89f2750 DDT |
216 | } |
217 | ||
726ea1a8 | 218 | static int goldfish_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) |
a99698fa | 219 | { |
726ea1a8 | 220 | int status; |
c89f2750 | 221 | |
726ea1a8 JQ |
222 | if (mutex_lock_interruptible(&pipe->lock)) |
223 | return PIPE_ERROR_IO; | |
224 | status = goldfish_cmd_locked(pipe, cmd); | |
225 | mutex_unlock(&pipe->lock); | |
226 | return status; | |
c89f2750 DDT |
227 | } |
228 | ||
726ea1a8 JQ |
229 | /* |
230 | * This function converts an error code returned by the emulator through | |
c89f2750 DDT |
231 | * the PIPE_REG_STATUS i/o register into a valid negative errno value. |
232 | */ | |
233 | static int goldfish_pipe_error_convert(int status) | |
234 | { | |
235 | switch (status) { | |
236 | case PIPE_ERROR_AGAIN: | |
237 | return -EAGAIN; | |
238 | case PIPE_ERROR_NOMEM: | |
239 | return -ENOMEM; | |
240 | case PIPE_ERROR_IO: | |
241 | return -EIO; | |
242 | default: | |
243 | return -EINVAL; | |
244 | } | |
245 | } | |
246 | ||
726ea1a8 JQ |
247 | static int pin_user_pages(unsigned long first_page, unsigned long last_page, |
248 | unsigned int last_page_size, int is_write, | |
249 | struct page *pages[MAX_BUFFERS_PER_COMMAND], | |
250 | unsigned int *iter_last_page_size) | |
c89f2750 | 251 | { |
726ea1a8 JQ |
252 | int ret; |
253 | int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1; | |
254 | ||
255 | if (requested_pages > MAX_BUFFERS_PER_COMMAND) { | |
256 | requested_pages = MAX_BUFFERS_PER_COMMAND; | |
257 | *iter_last_page_size = PAGE_SIZE; | |
258 | } else { | |
259 | *iter_last_page_size = last_page_size; | |
260 | } | |
261 | ||
262 | ret = get_user_pages_fast( | |
263 | first_page, requested_pages, !is_write, pages); | |
264 | if (ret <= 0) | |
265 | return -EFAULT; | |
266 | if (ret < requested_pages) | |
267 | *iter_last_page_size = PAGE_SIZE; | |
268 | return ret; | |
c89f2750 | 269 | |
c89f2750 DDT |
270 | } |
271 | ||
726ea1a8 JQ |
272 | static void release_user_pages(struct page **pages, int pages_count, |
273 | int is_write, s32 consumed_size) | |
c89f2750 | 274 | { |
726ea1a8 | 275 | int i; |
c89f2750 | 276 | |
726ea1a8 JQ |
277 | for (i = 0; i < pages_count; i++) { |
278 | if (!is_write && consumed_size > 0) | |
279 | set_page_dirty(pages[i]); | |
280 | put_page(pages[i]); | |
281 | } | |
282 | } | |
283 | ||
284 | /* Populate the call parameters, merging adjacent pages together */ | |
285 | static void populate_rw_params( | |
286 | struct page **pages, int pages_count, | |
287 | unsigned long address, unsigned long address_end, | |
288 | unsigned long first_page, unsigned long last_page, | |
289 | unsigned int iter_last_page_size, int is_write, | |
290 | struct goldfish_pipe_command *command) | |
291 | { | |
292 | /* | |
293 | * Process the first page separately - it's the only page that | |
294 | * needs special handling for its start address. | |
295 | */ | |
296 | unsigned long xaddr = page_to_phys(pages[0]); | |
297 | unsigned long xaddr_prev = xaddr; | |
298 | int buffer_idx = 0; | |
299 | int i = 1; | |
300 | int size_on_page = first_page == last_page | |
301 | ? (int)(address_end - address) | |
302 | : (PAGE_SIZE - (address & ~PAGE_MASK)); | |
303 | command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK)); | |
304 | command->rw_params.sizes[0] = size_on_page; | |
305 | for (; i < pages_count; ++i) { | |
306 | xaddr = page_to_phys(pages[i]); | |
307 | size_on_page = (i == pages_count - 1) ? | |
308 | iter_last_page_size : PAGE_SIZE; | |
309 | if (xaddr == xaddr_prev + PAGE_SIZE) { | |
310 | command->rw_params.sizes[buffer_idx] += size_on_page; | |
311 | } else { | |
312 | ++buffer_idx; | |
313 | command->rw_params.ptrs[buffer_idx] = (u64)xaddr; | |
314 | command->rw_params.sizes[buffer_idx] = size_on_page; | |
315 | } | |
316 | xaddr_prev = xaddr; | |
317 | } | |
318 | command->rw_params.buffers_count = buffer_idx + 1; | |
319 | } | |
c89f2750 | 320 | |
726ea1a8 JQ |
321 | static int transfer_max_buffers(struct goldfish_pipe *pipe, |
322 | unsigned long address, unsigned long address_end, int is_write, | |
323 | unsigned long last_page, unsigned int last_page_size, | |
324 | s32 *consumed_size, int *status) | |
325 | { | |
f563dab4 | 326 | static struct page *pages[MAX_BUFFERS_PER_COMMAND]; |
726ea1a8 JQ |
327 | unsigned long first_page = address & PAGE_MASK; |
328 | unsigned int iter_last_page_size; | |
329 | int pages_count = pin_user_pages(first_page, last_page, | |
330 | last_page_size, is_write, | |
331 | pages, &iter_last_page_size); | |
c89f2750 | 332 | |
726ea1a8 JQ |
333 | if (pages_count < 0) |
334 | return pages_count; | |
335 | ||
336 | /* Serialize access to the pipe command buffers */ | |
337 | if (mutex_lock_interruptible(&pipe->lock)) | |
338 | return -ERESTARTSYS; | |
339 | ||
340 | populate_rw_params(pages, pages_count, address, address_end, | |
341 | first_page, last_page, iter_last_page_size, is_write, | |
342 | pipe->command_buffer); | |
343 | ||
344 | /* Transfer the data */ | |
345 | *status = goldfish_cmd_locked(pipe, | |
346 | is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ); | |
347 | ||
348 | *consumed_size = pipe->command_buffer->rw_params.consumed_size; | |
349 | ||
726ea1a8 JQ |
350 | release_user_pages(pages, pages_count, is_write, *consumed_size); |
351 | ||
f563dab4 GKH |
352 | mutex_unlock(&pipe->lock); |
353 | ||
726ea1a8 | 354 | return 0; |
c89f2750 DDT |
355 | } |
356 | ||
726ea1a8 | 357 | static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write) |
c89f2750 | 358 | { |
726ea1a8 | 359 | u32 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ; |
c89f2750 | 360 | |
726ea1a8 JQ |
361 | set_bit(wakeBit, &pipe->flags); |
362 | ||
363 | /* Tell the emulator we're going to wait for a wake event */ | |
364 | (void)goldfish_cmd(pipe, | |
365 | is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ); | |
366 | ||
367 | while (test_bit(wakeBit, &pipe->flags)) { | |
368 | if (wait_event_interruptible( | |
369 | pipe->wake_queue, | |
370 | !test_bit(wakeBit, &pipe->flags))) | |
371 | return -ERESTARTSYS; | |
372 | ||
373 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) | |
374 | return -EIO; | |
375 | } | |
c89f2750 | 376 | |
c89f2750 DDT |
377 | return 0; |
378 | } | |
379 | ||
726ea1a8 JQ |
380 | static ssize_t goldfish_pipe_read_write(struct file *filp, |
381 | char __user *buffer, size_t bufflen, int is_write) | |
c89f2750 | 382 | { |
c89f2750 | 383 | struct goldfish_pipe *pipe = filp->private_data; |
2f3be882 | 384 | int count = 0, ret = -EINVAL; |
726ea1a8 JQ |
385 | unsigned long address, address_end, last_page; |
386 | unsigned int last_page_size; | |
c89f2750 DDT |
387 | |
388 | /* If the emulator already closed the pipe, no need to go further */ | |
726ea1a8 | 389 | if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))) |
c89f2750 | 390 | return -EIO; |
c89f2750 | 391 | /* Null reads or writes succeeds */ |
3411d035 | 392 | if (unlikely(bufflen == 0)) |
c89f2750 | 393 | return 0; |
c89f2750 | 394 | /* Check the buffer range for access */ |
726ea1a8 JQ |
395 | if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ, |
396 | buffer, bufflen))) | |
c89f2750 DDT |
397 | return -EFAULT; |
398 | ||
726ea1a8 | 399 | address = (unsigned long)buffer; |
c89f2750 | 400 | address_end = address + bufflen; |
726ea1a8 JQ |
401 | last_page = (address_end - 1) & PAGE_MASK; |
402 | last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1; | |
c89f2750 DDT |
403 | |
404 | while (address < address_end) { | |
726ea1a8 JQ |
405 | s32 consumed_size; |
406 | int status; | |
4f42071c | 407 | |
726ea1a8 JQ |
408 | ret = transfer_max_buffers(pipe, address, address_end, is_write, |
409 | last_page, last_page_size, &consumed_size, | |
410 | &status); | |
2f3be882 | 411 | if (ret < 0) |
5fae054c | 412 | break; |
c89f2750 | 413 | |
726ea1a8 JQ |
414 | if (consumed_size > 0) { |
415 | /* No matter what's the status, we've transferred | |
416 | * something. | |
4f42071c | 417 | */ |
726ea1a8 JQ |
418 | count += consumed_size; |
419 | address += consumed_size; | |
c89f2750 | 420 | } |
726ea1a8 | 421 | if (status > 0) |
c89f2750 | 422 | continue; |
726ea1a8 JQ |
423 | if (status == 0) { |
424 | /* EOF */ | |
2f3be882 | 425 | ret = 0; |
c89f2750 | 426 | break; |
726ea1a8 JQ |
427 | } |
428 | if (count > 0) { | |
2f3be882 | 429 | /* |
726ea1a8 JQ |
430 | * An error occurred, but we already transferred |
431 | * something on one of the previous iterations. | |
2f3be882 CD |
432 | * Just return what we already copied and log this |
433 | * err. | |
2f3be882 | 434 | */ |
25dd0f40 | 435 | if (status != PIPE_ERROR_AGAIN) |
726ea1a8 | 436 | pr_info_ratelimited("goldfish_pipe: backend error %d on %s\n", |
2f3be882 | 437 | status, is_write ? "write" : "read"); |
c89f2750 | 438 | break; |
2f3be882 | 439 | } |
c89f2750 | 440 | |
2f3be882 | 441 | /* |
726ea1a8 | 442 | * If the error is not PIPE_ERROR_AGAIN, or if we are in |
2f3be882 CD |
443 | * non-blocking mode, just return the error code. |
444 | */ | |
c89f2750 DDT |
445 | if (status != PIPE_ERROR_AGAIN || |
446 | (filp->f_flags & O_NONBLOCK) != 0) { | |
447 | ret = goldfish_pipe_error_convert(status); | |
448 | break; | |
449 | } | |
450 | ||
726ea1a8 JQ |
451 | status = wait_for_host_signal(pipe, is_write); |
452 | if (status < 0) | |
453 | return status; | |
c89f2750 | 454 | } |
2f3be882 | 455 | |
726ea1a8 | 456 | if (count > 0) |
2f3be882 | 457 | return count; |
726ea1a8 | 458 | return ret; |
c89f2750 DDT |
459 | } |
460 | ||
461 | static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, | |
726ea1a8 | 462 | size_t bufflen, loff_t *ppos) |
c89f2750 | 463 | { |
726ea1a8 JQ |
464 | return goldfish_pipe_read_write(filp, buffer, bufflen, |
465 | /* is_write */ 0); | |
c89f2750 DDT |
466 | } |
467 | ||
468 | static ssize_t goldfish_pipe_write(struct file *filp, | |
469 | const char __user *buffer, size_t bufflen, | |
470 | loff_t *ppos) | |
471 | { | |
726ea1a8 JQ |
472 | return goldfish_pipe_read_write(filp, |
473 | /* cast away the const */(char __user *)buffer, bufflen, | |
474 | /* is_write */ 1); | |
c89f2750 DDT |
475 | } |
476 | ||
afc9a42b | 477 | static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait) |
c89f2750 DDT |
478 | { |
479 | struct goldfish_pipe *pipe = filp->private_data; | |
afc9a42b | 480 | __poll_t mask = 0; |
c89f2750 DDT |
481 | int status; |
482 | ||
c89f2750 DDT |
483 | poll_wait(filp, &pipe->wake_queue, wait); |
484 | ||
726ea1a8 JQ |
485 | status = goldfish_cmd(pipe, PIPE_CMD_POLL); |
486 | if (status < 0) | |
487 | return -ERESTARTSYS; | |
c89f2750 DDT |
488 | |
489 | if (status & PIPE_POLL_IN) | |
a9a08845 | 490 | mask |= EPOLLIN | EPOLLRDNORM; |
c89f2750 | 491 | if (status & PIPE_POLL_OUT) |
a9a08845 | 492 | mask |= EPOLLOUT | EPOLLWRNORM; |
c89f2750 | 493 | if (status & PIPE_POLL_HUP) |
a9a08845 | 494 | mask |= EPOLLHUP; |
c89f2750 | 495 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) |
a9a08845 | 496 | mask |= EPOLLERR; |
c89f2750 DDT |
497 | |
498 | return mask; | |
499 | } | |
500 | ||
726ea1a8 JQ |
501 | static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev, |
502 | u32 id, u32 flags) | |
c89f2750 | 503 | { |
726ea1a8 | 504 | struct goldfish_pipe *pipe; |
c89f2750 | 505 | |
726ea1a8 JQ |
506 | if (WARN_ON(id >= dev->pipes_capacity)) |
507 | return; | |
508 | ||
509 | pipe = dev->pipes[id]; | |
510 | if (!pipe) | |
511 | return; | |
512 | pipe->signalled_flags |= flags; | |
513 | ||
514 | if (pipe->prev_signalled || pipe->next_signalled | |
515 | || dev->first_signalled_pipe == pipe) | |
516 | return; /* already in the list */ | |
517 | pipe->next_signalled = dev->first_signalled_pipe; | |
518 | if (dev->first_signalled_pipe) | |
519 | dev->first_signalled_pipe->prev_signalled = pipe; | |
520 | dev->first_signalled_pipe = pipe; | |
521 | } | |
49a75c44 | 522 | |
726ea1a8 | 523 | static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev, |
53bdf668 RK |
524 | struct goldfish_pipe *pipe) |
525 | { | |
726ea1a8 JQ |
526 | if (pipe->prev_signalled) |
527 | pipe->prev_signalled->next_signalled = pipe->next_signalled; | |
528 | if (pipe->next_signalled) | |
529 | pipe->next_signalled->prev_signalled = pipe->prev_signalled; | |
530 | if (pipe == dev->first_signalled_pipe) | |
531 | dev->first_signalled_pipe = pipe->next_signalled; | |
532 | pipe->prev_signalled = NULL; | |
533 | pipe->next_signalled = NULL; | |
534 | } | |
25c72c78 | 535 | |
726ea1a8 JQ |
536 | static struct goldfish_pipe *signalled_pipes_pop_front( |
537 | struct goldfish_pipe_dev *dev, int *wakes) | |
538 | { | |
539 | struct goldfish_pipe *pipe; | |
540 | unsigned long flags; | |
c89f2750 | 541 | |
726ea1a8 | 542 | spin_lock_irqsave(&dev->lock, flags); |
c89f2750 | 543 | |
726ea1a8 JQ |
544 | pipe = dev->first_signalled_pipe; |
545 | if (pipe) { | |
546 | *wakes = pipe->signalled_flags; | |
547 | pipe->signalled_flags = 0; | |
548 | /* | |
549 | * This is an optimized version of | |
550 | * signalled_pipes_remove_locked() | |
551 | * - We want to make it as fast as possible to | |
552 | * wake the sleeping pipe operations faster. | |
553 | */ | |
554 | dev->first_signalled_pipe = pipe->next_signalled; | |
555 | if (dev->first_signalled_pipe) | |
556 | dev->first_signalled_pipe->prev_signalled = NULL; | |
557 | pipe->next_signalled = NULL; | |
558 | } | |
c89f2750 | 559 | |
726ea1a8 JQ |
560 | spin_unlock_irqrestore(&dev->lock, flags); |
561 | return pipe; | |
562 | } | |
563 | ||
564 | static void goldfish_interrupt_task(unsigned long unused) | |
565 | { | |
566 | struct goldfish_pipe_dev *dev = pipe_dev; | |
567 | /* Iterate over the signalled pipes and wake them one by one */ | |
568 | struct goldfish_pipe *pipe; | |
569 | int wakes; | |
570 | ||
571 | while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) { | |
c89f2750 | 572 | if (wakes & PIPE_WAKE_CLOSED) { |
726ea1a8 JQ |
573 | pipe->flags = 1 << BIT_CLOSED_ON_HOST; |
574 | } else { | |
575 | if (wakes & PIPE_WAKE_READ) | |
576 | clear_bit(BIT_WAKE_ON_READ, &pipe->flags); | |
577 | if (wakes & PIPE_WAKE_WRITE) | |
578 | clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags); | |
c89f2750 | 579 | } |
726ea1a8 JQ |
580 | /* |
581 | * wake_up_interruptible() implies a write barrier, so don't | |
582 | * explicitly add another one here. | |
583 | */ | |
c89f2750 | 584 | wake_up_interruptible(&pipe->wake_queue); |
c89f2750 | 585 | } |
726ea1a8 | 586 | } |
ea4ba866 | 587 | static DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0); |
c89f2750 | 588 | |
726ea1a8 JQ |
589 | /* |
590 | * The general idea of the interrupt handling: | |
591 | * | |
592 | * 1. device raises an interrupt if there's at least one signalled pipe | |
593 | * 2. IRQ handler reads the signalled pipes and their count from the device | |
594 | * 3. device writes them into a shared buffer and returns the count | |
595 | * it only resets the IRQ if it has returned all signalled pipes, | |
596 | * otherwise it leaves it raised, so IRQ handler will be called | |
597 | * again for the next chunk | |
598 | * 4. IRQ handler adds all returned pipes to the device's signalled pipes list | |
599 | * 5. IRQ handler launches a tasklet to process the signalled pipes from the | |
600 | * list in a separate context | |
601 | */ | |
602 | static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id) | |
603 | { | |
604 | u32 count; | |
605 | u32 i; | |
606 | unsigned long flags; | |
607 | struct goldfish_pipe_dev *dev = dev_id; | |
608 | ||
609 | if (dev != pipe_dev) | |
610 | return IRQ_NONE; | |
611 | ||
612 | /* Request the signalled pipes from the device */ | |
613 | spin_lock_irqsave(&dev->lock, flags); | |
614 | ||
615 | count = readl(dev->base + PIPE_REG_GET_SIGNALLED); | |
616 | if (count == 0) { | |
617 | spin_unlock_irqrestore(&dev->lock, flags); | |
618 | return IRQ_NONE; | |
619 | } | |
620 | if (count > MAX_SIGNALLED_PIPES) | |
621 | count = MAX_SIGNALLED_PIPES; | |
622 | ||
623 | for (i = 0; i < count; ++i) | |
624 | signalled_pipes_add_locked(dev, | |
625 | dev->buffers->signalled_pipe_buffers[i].id, | |
626 | dev->buffers->signalled_pipe_buffers[i].flags); | |
627 | ||
628 | spin_unlock_irqrestore(&dev->lock, flags); | |
629 | ||
630 | tasklet_schedule(&goldfish_interrupt_tasklet); | |
631 | return IRQ_HANDLED; | |
632 | } | |
633 | ||
634 | static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev) | |
635 | { | |
636 | int id; | |
637 | ||
638 | for (id = 0; id < dev->pipes_capacity; ++id) | |
639 | if (!dev->pipes[id]) | |
640 | return id; | |
641 | ||
642 | { | |
84ae527a RK |
643 | /* Reallocate the array. |
644 | * Since get_free_pipe_id_locked runs with interrupts disabled, | |
645 | * we don't want to make calls that could lead to sleep. | |
646 | */ | |
726ea1a8 JQ |
647 | u32 new_capacity = 2 * dev->pipes_capacity; |
648 | struct goldfish_pipe **pipes = | |
3eff8ecd | 649 | kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC); |
726ea1a8 JQ |
650 | if (!pipes) |
651 | return -ENOMEM; | |
652 | memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity); | |
653 | kfree(dev->pipes); | |
654 | dev->pipes = pipes; | |
655 | id = dev->pipes_capacity; | |
656 | dev->pipes_capacity = new_capacity; | |
657 | } | |
658 | return id; | |
c89f2750 DDT |
659 | } |
660 | ||
661 | /** | |
726ea1a8 | 662 | * goldfish_pipe_open - open a channel to the AVD |
c89f2750 DDT |
663 | * @inode: inode of device |
664 | * @file: file struct of opener | |
665 | * | |
666 | * Create a new pipe link between the emulator and the use application. | |
667 | * Each new request produces a new pipe. | |
668 | * | |
669 | * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit | |
670 | * right now so this is fine. A move to 64bit will need this addressing | |
671 | */ | |
672 | static int goldfish_pipe_open(struct inode *inode, struct file *file) | |
673 | { | |
c89f2750 | 674 | struct goldfish_pipe_dev *dev = pipe_dev; |
726ea1a8 JQ |
675 | unsigned long flags; |
676 | int id; | |
677 | int status; | |
c89f2750 DDT |
678 | |
679 | /* Allocate new pipe kernel object */ | |
726ea1a8 | 680 | struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL); |
c89f2750 DDT |
681 | if (pipe == NULL) |
682 | return -ENOMEM; | |
683 | ||
684 | pipe->dev = dev; | |
685 | mutex_init(&pipe->lock); | |
686 | init_waitqueue_head(&pipe->wake_queue); | |
687 | ||
688 | /* | |
726ea1a8 JQ |
689 | * Command buffer needs to be allocated on its own page to make sure |
690 | * it is physically contiguous in host's address space. | |
c89f2750 | 691 | */ |
726ea1a8 JQ |
692 | pipe->command_buffer = |
693 | (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL); | |
694 | if (!pipe->command_buffer) { | |
695 | status = -ENOMEM; | |
696 | goto err_pipe; | |
697 | } | |
c89f2750 | 698 | |
726ea1a8 JQ |
699 | spin_lock_irqsave(&dev->lock, flags); |
700 | ||
701 | id = get_free_pipe_id_locked(dev); | |
702 | if (id < 0) { | |
703 | status = id; | |
704 | goto err_id_locked; | |
c89f2750 DDT |
705 | } |
706 | ||
726ea1a8 JQ |
707 | dev->pipes[id] = pipe; |
708 | pipe->id = id; | |
709 | pipe->command_buffer->id = id; | |
710 | ||
711 | /* Now tell the emulator we're opening a new pipe. */ | |
712 | dev->buffers->open_command_params.rw_params_max_count = | |
713 | MAX_BUFFERS_PER_COMMAND; | |
714 | dev->buffers->open_command_params.command_buffer_ptr = | |
715 | (u64)(unsigned long)__pa(pipe->command_buffer); | |
716 | status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN); | |
717 | spin_unlock_irqrestore(&dev->lock, flags); | |
718 | if (status < 0) | |
719 | goto err_cmd; | |
c89f2750 DDT |
720 | /* All is done, save the pipe into the file's private data field */ |
721 | file->private_data = pipe; | |
722 | return 0; | |
726ea1a8 JQ |
723 | |
724 | err_cmd: | |
725 | spin_lock_irqsave(&dev->lock, flags); | |
726 | dev->pipes[id] = NULL; | |
727 | err_id_locked: | |
728 | spin_unlock_irqrestore(&dev->lock, flags); | |
729 | free_page((unsigned long)pipe->command_buffer); | |
730 | err_pipe: | |
731 | kfree(pipe); | |
732 | return status; | |
c89f2750 DDT |
733 | } |
734 | ||
735 | static int goldfish_pipe_release(struct inode *inode, struct file *filp) | |
736 | { | |
726ea1a8 | 737 | unsigned long flags; |
c89f2750 | 738 | struct goldfish_pipe *pipe = filp->private_data; |
726ea1a8 | 739 | struct goldfish_pipe_dev *dev = pipe->dev; |
c89f2750 DDT |
740 | |
741 | /* The guest is closing the channel, so tell the emulator right now */ | |
726ea1a8 JQ |
742 | (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE); |
743 | ||
744 | spin_lock_irqsave(&dev->lock, flags); | |
745 | dev->pipes[pipe->id] = NULL; | |
746 | signalled_pipes_remove_locked(dev, pipe); | |
747 | spin_unlock_irqrestore(&dev->lock, flags); | |
748 | ||
c89f2750 | 749 | filp->private_data = NULL; |
726ea1a8 JQ |
750 | free_page((unsigned long)pipe->command_buffer); |
751 | kfree(pipe); | |
c89f2750 DDT |
752 | return 0; |
753 | } | |
754 | ||
755 | static const struct file_operations goldfish_pipe_fops = { | |
756 | .owner = THIS_MODULE, | |
757 | .read = goldfish_pipe_read, | |
758 | .write = goldfish_pipe_write, | |
759 | .poll = goldfish_pipe_poll, | |
760 | .open = goldfish_pipe_open, | |
761 | .release = goldfish_pipe_release, | |
762 | }; | |
763 | ||
726ea1a8 | 764 | static struct miscdevice goldfish_pipe_dev = { |
c89f2750 DDT |
765 | .minor = MISC_DYNAMIC_MINOR, |
766 | .name = "goldfish_pipe", | |
767 | .fops = &goldfish_pipe_fops, | |
768 | }; | |
769 | ||
726ea1a8 JQ |
770 | static int goldfish_pipe_device_init(struct platform_device *pdev) |
771 | { | |
772 | char *page; | |
773 | struct goldfish_pipe_dev *dev = pipe_dev; | |
774 | int err = devm_request_irq(&pdev->dev, dev->irq, | |
775 | goldfish_pipe_interrupt, | |
776 | IRQF_SHARED, "goldfish_pipe", dev); | |
777 | if (err) { | |
778 | dev_err(&pdev->dev, "unable to allocate IRQ for v2\n"); | |
779 | return err; | |
780 | } | |
781 | ||
782 | err = misc_register(&goldfish_pipe_dev); | |
783 | if (err) { | |
784 | dev_err(&pdev->dev, "unable to register v2 device\n"); | |
785 | return err; | |
786 | } | |
787 | ||
788 | dev->first_signalled_pipe = NULL; | |
789 | dev->pipes_capacity = INITIAL_PIPES_CAPACITY; | |
790 | dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), | |
791 | GFP_KERNEL); | |
792 | if (!dev->pipes) | |
793 | return -ENOMEM; | |
794 | ||
795 | /* | |
796 | * We're going to pass two buffers, open_command_params and | |
797 | * signalled_pipe_buffers, to the host. This means each of those buffers | |
798 | * needs to be contained in a single physical page. The easiest choice | |
799 | * is to just allocate a page and place the buffers in it. | |
800 | */ | |
801 | if (WARN_ON(sizeof(*dev->buffers) > PAGE_SIZE)) | |
802 | return -ENOMEM; | |
803 | ||
804 | page = (char *)__get_free_page(GFP_KERNEL); | |
805 | if (!page) { | |
806 | kfree(dev->pipes); | |
807 | return -ENOMEM; | |
808 | } | |
809 | dev->buffers = (struct goldfish_pipe_dev_buffers *)page; | |
810 | ||
811 | /* Send the buffer addresses to the host */ | |
812 | { | |
813 | u64 paddr = __pa(&dev->buffers->signalled_pipe_buffers); | |
814 | ||
815 | writel((u32)(unsigned long)(paddr >> 32), | |
816 | dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH); | |
817 | writel((u32)(unsigned long)paddr, | |
818 | dev->base + PIPE_REG_SIGNAL_BUFFER); | |
819 | writel((u32)MAX_SIGNALLED_PIPES, | |
820 | dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT); | |
821 | ||
822 | paddr = __pa(&dev->buffers->open_command_params); | |
823 | writel((u32)(unsigned long)(paddr >> 32), | |
824 | dev->base + PIPE_REG_OPEN_BUFFER_HIGH); | |
825 | writel((u32)(unsigned long)paddr, | |
826 | dev->base + PIPE_REG_OPEN_BUFFER); | |
827 | } | |
828 | return 0; | |
829 | } | |
830 | ||
831 | static void goldfish_pipe_device_deinit(struct platform_device *pdev) | |
832 | { | |
833 | struct goldfish_pipe_dev *dev = pipe_dev; | |
834 | ||
835 | misc_deregister(&goldfish_pipe_dev); | |
836 | kfree(dev->pipes); | |
837 | free_page((unsigned long)dev->buffers); | |
838 | } | |
839 | ||
c89f2750 DDT |
840 | static int goldfish_pipe_probe(struct platform_device *pdev) |
841 | { | |
842 | int err; | |
843 | struct resource *r; | |
844 | struct goldfish_pipe_dev *dev = pipe_dev; | |
845 | ||
726ea1a8 JQ |
846 | if (WARN_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE)) |
847 | return -ENOMEM; | |
848 | ||
c89f2750 DDT |
849 | /* not thread safe, but this should not happen */ |
850 | WARN_ON(dev->base != NULL); | |
851 | ||
852 | spin_lock_init(&dev->lock); | |
853 | ||
854 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
855 | if (r == NULL || resource_size(r) < PAGE_SIZE) { | |
856 | dev_err(&pdev->dev, "can't allocate i/o page\n"); | |
857 | return -EINVAL; | |
858 | } | |
859 | dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); | |
860 | if (dev->base == NULL) { | |
861 | dev_err(&pdev->dev, "ioremap failed\n"); | |
862 | return -EINVAL; | |
863 | } | |
864 | ||
865 | r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
866 | if (r == NULL) { | |
867 | err = -EINVAL; | |
868 | goto error; | |
869 | } | |
870 | dev->irq = r->start; | |
871 | ||
726ea1a8 JQ |
872 | /* |
873 | * Exchange the versions with the host device | |
874 | * | |
875 | * Note: v1 driver used to not report its version, so we write it before | |
876 | * reading device version back: this allows the host implementation to | |
877 | * detect the old driver (if there was no version write before read). | |
4f42071c | 878 | */ |
726ea1a8 | 879 | writel((u32)PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION); |
4f42071c | 880 | dev->version = readl(dev->base + PIPE_REG_VERSION); |
726ea1a8 JQ |
881 | if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION)) |
882 | return -EINVAL; | |
883 | ||
884 | err = goldfish_pipe_device_init(pdev); | |
885 | if (!err) | |
886 | return 0; | |
c89f2750 DDT |
887 | |
888 | error: | |
889 | dev->base = NULL; | |
890 | return err; | |
891 | } | |
892 | ||
893 | static int goldfish_pipe_remove(struct platform_device *pdev) | |
894 | { | |
895 | struct goldfish_pipe_dev *dev = pipe_dev; | |
726ea1a8 | 896 | goldfish_pipe_device_deinit(pdev); |
c89f2750 DDT |
897 | dev->base = NULL; |
898 | return 0; | |
899 | } | |
900 | ||
d62f324b JH |
901 | static const struct acpi_device_id goldfish_pipe_acpi_match[] = { |
902 | { "GFSH0003", 0 }, | |
903 | { }, | |
904 | }; | |
905 | MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match); | |
906 | ||
91a18a41 GH |
907 | static const struct of_device_id goldfish_pipe_of_match[] = { |
908 | { .compatible = "google,android-pipe", }, | |
909 | {}, | |
910 | }; | |
911 | MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match); | |
912 | ||
726ea1a8 | 913 | static struct platform_driver goldfish_pipe_driver = { |
c89f2750 DDT |
914 | .probe = goldfish_pipe_probe, |
915 | .remove = goldfish_pipe_remove, | |
916 | .driver = { | |
91a18a41 | 917 | .name = "goldfish_pipe", |
91a18a41 | 918 | .of_match_table = goldfish_pipe_of_match, |
d62f324b | 919 | .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match), |
c89f2750 DDT |
920 | } |
921 | }; | |
922 | ||
726ea1a8 | 923 | module_platform_driver(goldfish_pipe_driver); |
c89f2750 | 924 | MODULE_AUTHOR("David Turner <digit@google.com>"); |
c3c4e307 | 925 | MODULE_LICENSE("GPL v2"); |