Commit | Line | Data |
---|---|---|
2b6a321d AD |
1 | /* |
2 | * Copyright (c) 2011-2016 Synaptics Incorporated | |
3 | * Copyright (c) 2011 Unixphere | |
4 | * | |
5 | * This driver provides the core support for a single RMI4-based device. | |
6 | * | |
7 | * The RMI4 specification can be found here (URL split for line length): | |
8 | * | |
9 | * http://www.synaptics.com/sites/default/files/ | |
10 | * 511-000136-01-Rev-E-RMI4-Interfacing-Guide.pdf | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify it | |
13 | * under the terms of the GNU General Public License version 2 as published by | |
14 | * the Free Software Foundation. | |
15 | */ | |
16 | ||
17 | #include <linux/bitmap.h> | |
18 | #include <linux/delay.h> | |
19 | #include <linux/fs.h> | |
3aeed5b5 | 20 | #include <linux/irq.h> |
2b6a321d AD |
21 | #include <linux/pm.h> |
22 | #include <linux/slab.h> | |
d8a8b3ed | 23 | #include <linux/of.h> |
24d28e4f | 24 | #include <linux/irqdomain.h> |
2b6a321d AD |
25 | #include <uapi/linux/input.h> |
26 | #include <linux/rmi.h> | |
27 | #include "rmi_bus.h" | |
28 | #include "rmi_driver.h" | |
29 | ||
30 | #define HAS_NONSTANDARD_PDT_MASK 0x40 | |
31 | #define RMI4_MAX_PAGE 0xff | |
32 | #define RMI4_PAGE_SIZE 0x100 | |
33 | #define RMI4_PAGE_MASK 0xFF00 | |
34 | ||
35 | #define RMI_DEVICE_RESET_CMD 0x01 | |
36 | #define DEFAULT_RESET_DELAY_MS 100 | |
37 | ||
29fd0ec2 | 38 | void rmi_free_function_list(struct rmi_device *rmi_dev) |
2b6a321d AD |
39 | { |
40 | struct rmi_function *fn, *tmp; | |
41 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); | |
42 | ||
6bd0dcfa ND |
43 | rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n"); |
44 | ||
a1ab6902 DT |
45 | /* Doing it in the reverse order so F01 will be removed last */ |
46 | list_for_each_entry_safe_reverse(fn, tmp, | |
47 | &data->function_list, node) { | |
48 | list_del(&fn->node); | |
49 | rmi_unregister_function(fn); | |
50 | } | |
51 | ||
29fd0ec2 ND |
52 | devm_kfree(&rmi_dev->dev, data->irq_memory); |
53 | data->irq_memory = NULL; | |
54 | data->irq_status = NULL; | |
55 | data->fn_irq_bits = NULL; | |
56 | data->current_irq_mask = NULL; | |
57 | data->new_irq_mask = NULL; | |
58 | ||
2b6a321d | 59 | data->f01_container = NULL; |
29fd0ec2 | 60 | data->f34_container = NULL; |
2b6a321d AD |
61 | } |
62 | ||
63 | static int reset_one_function(struct rmi_function *fn) | |
64 | { | |
65 | struct rmi_function_handler *fh; | |
66 | int retval = 0; | |
67 | ||
68 | if (!fn || !fn->dev.driver) | |
69 | return 0; | |
70 | ||
71 | fh = to_rmi_function_handler(fn->dev.driver); | |
72 | if (fh->reset) { | |
73 | retval = fh->reset(fn); | |
74 | if (retval < 0) | |
75 | dev_err(&fn->dev, "Reset failed with code %d.\n", | |
76 | retval); | |
77 | } | |
78 | ||
79 | return retval; | |
80 | } | |
81 | ||
82 | static int configure_one_function(struct rmi_function *fn) | |
83 | { | |
84 | struct rmi_function_handler *fh; | |
85 | int retval = 0; | |
86 | ||
87 | if (!fn || !fn->dev.driver) | |
88 | return 0; | |
89 | ||
90 | fh = to_rmi_function_handler(fn->dev.driver); | |
91 | if (fh->config) { | |
92 | retval = fh->config(fn); | |
93 | if (retval < 0) | |
94 | dev_err(&fn->dev, "Config failed with code %d.\n", | |
95 | retval); | |
96 | } | |
97 | ||
98 | return retval; | |
99 | } | |
100 | ||
101 | static int rmi_driver_process_reset_requests(struct rmi_device *rmi_dev) | |
102 | { | |
103 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); | |
104 | struct rmi_function *entry; | |
105 | int retval; | |
106 | ||
107 | list_for_each_entry(entry, &data->function_list, node) { | |
108 | retval = reset_one_function(entry); | |
109 | if (retval < 0) | |
110 | return retval; | |
111 | } | |
112 | ||
113 | return 0; | |
114 | } | |
115 | ||
116 | static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev) | |
117 | { | |
118 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); | |
119 | struct rmi_function *entry; | |
120 | int retval; | |
121 | ||
122 | list_for_each_entry(entry, &data->function_list, node) { | |
123 | retval = configure_one_function(entry); | |
124 | if (retval < 0) | |
125 | return retval; | |
126 | } | |
127 | ||
128 | return 0; | |
129 | } | |
130 | ||
3aeed5b5 | 131 | static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev) |
2b6a321d AD |
132 | { |
133 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); | |
134 | struct device *dev = &rmi_dev->dev; | |
24d28e4f | 135 | int i; |
2b6a321d AD |
136 | int error; |
137 | ||
138 | if (!data) | |
139 | return 0; | |
140 | ||
ae9979c3 | 141 | if (!data->attn_data.data) { |
2b6a321d AD |
142 | error = rmi_read_block(rmi_dev, |
143 | data->f01_container->fd.data_base_addr + 1, | |
144 | data->irq_status, data->num_of_irq_regs); | |
145 | if (error < 0) { | |
146 | dev_err(dev, "Failed to read irqs, code=%d\n", error); | |
147 | return error; | |
148 | } | |
149 | } | |
150 | ||
151 | mutex_lock(&data->irq_mutex); | |
152 | bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask, | |
153 | data->irq_count); | |
154 | /* | |
155 | * At this point, irq_status has all bits that are set in the | |
156 | * interrupt status register and are enabled. | |
157 | */ | |
158 | mutex_unlock(&data->irq_mutex); | |
159 | ||
24d28e4f ND |
160 | for_each_set_bit(i, data->irq_status, data->irq_count) |
161 | handle_nested_irq(irq_find_mapping(data->irqdomain, i)); | |
2b6a321d AD |
162 | |
163 | if (data->input) | |
164 | input_sync(data->input); | |
165 | ||
166 | return 0; | |
167 | } | |
3aeed5b5 | 168 | |
b908d3cd BT |
169 | void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status, |
170 | void *data, size_t size) | |
171 | { | |
172 | struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); | |
173 | struct rmi4_attn_data attn_data; | |
174 | void *fifo_data; | |
175 | ||
176 | if (!drvdata->enabled) | |
177 | return; | |
178 | ||
179 | fifo_data = kmemdup(data, size, GFP_ATOMIC); | |
180 | if (!fifo_data) | |
181 | return; | |
182 | ||
183 | attn_data.irq_status = irq_status; | |
184 | attn_data.size = size; | |
185 | attn_data.data = fifo_data; | |
186 | ||
187 | kfifo_put(&drvdata->attn_fifo, attn_data); | |
188 | } | |
189 | EXPORT_SYMBOL_GPL(rmi_set_attn_data); | |
190 | ||
3aeed5b5 BA |
191 | static irqreturn_t rmi_irq_fn(int irq, void *dev_id) |
192 | { | |
193 | struct rmi_device *rmi_dev = dev_id; | |
b908d3cd BT |
194 | struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); |
195 | struct rmi4_attn_data attn_data = {0}; | |
196 | int ret, count; | |
197 | ||
198 | count = kfifo_get(&drvdata->attn_fifo, &attn_data); | |
199 | if (count) { | |
200 | *(drvdata->irq_status) = attn_data.irq_status; | |
ae9979c3 | 201 | drvdata->attn_data = attn_data; |
b908d3cd | 202 | } |
3aeed5b5 BA |
203 | |
204 | ret = rmi_process_interrupt_requests(rmi_dev); | |
205 | if (ret) | |
206 | rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, | |
207 | "Failed to process interrupt request: %d\n", ret); | |
208 | ||
55edde9f | 209 | if (count) { |
b908d3cd | 210 | kfree(attn_data.data); |
55edde9f ND |
211 | attn_data.data = NULL; |
212 | } | |
b908d3cd BT |
213 | |
214 | if (!kfifo_is_empty(&drvdata->attn_fifo)) | |
215 | return rmi_irq_fn(irq, dev_id); | |
216 | ||
3aeed5b5 BA |
217 | return IRQ_HANDLED; |
218 | } | |
219 | ||
220 | static int rmi_irq_init(struct rmi_device *rmi_dev) | |
221 | { | |
222 | struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); | |
a64ea311 | 223 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); |
3aeed5b5 BA |
224 | int irq_flags = irq_get_trigger_type(pdata->irq); |
225 | int ret; | |
226 | ||
227 | if (!irq_flags) | |
228 | irq_flags = IRQF_TRIGGER_LOW; | |
229 | ||
230 | ret = devm_request_threaded_irq(&rmi_dev->dev, pdata->irq, NULL, | |
231 | rmi_irq_fn, irq_flags | IRQF_ONESHOT, | |
72fe3870 | 232 | dev_driver_string(rmi_dev->xport->dev), |
3aeed5b5 BA |
233 | rmi_dev); |
234 | if (ret < 0) { | |
235 | dev_err(&rmi_dev->dev, "Failed to register interrupt %d\n", | |
236 | pdata->irq); | |
237 | ||
238 | return ret; | |
239 | } | |
240 | ||
a64ea311 BT |
241 | data->enabled = true; |
242 | ||
3aeed5b5 BA |
243 | return 0; |
244 | } | |
2b6a321d | 245 | |
f32361b7 BT |
246 | struct rmi_function *rmi_find_function(struct rmi_device *rmi_dev, u8 number) |
247 | { | |
248 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); | |
249 | struct rmi_function *entry; | |
250 | ||
251 | list_for_each_entry(entry, &data->function_list, node) { | |
252 | if (entry->fd.function_number == number) | |
253 | return entry; | |
254 | } | |
255 | ||
256 | return NULL; | |
257 | } | |
258 | ||
2b6a321d AD |
259 | static int suspend_one_function(struct rmi_function *fn) |
260 | { | |
261 | struct rmi_function_handler *fh; | |
262 | int retval = 0; | |
263 | ||
264 | if (!fn || !fn->dev.driver) | |
265 | return 0; | |
266 | ||
267 | fh = to_rmi_function_handler(fn->dev.driver); | |
268 | if (fh->suspend) { | |
269 | retval = fh->suspend(fn); | |
270 | if (retval < 0) | |
271 | dev_err(&fn->dev, "Suspend failed with code %d.\n", | |
272 | retval); | |
273 | } | |
274 | ||
275 | return retval; | |
276 | } | |
277 | ||
278 | static int rmi_suspend_functions(struct rmi_device *rmi_dev) | |
279 | { | |
280 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); | |
281 | struct rmi_function *entry; | |
282 | int retval; | |
283 | ||
284 | list_for_each_entry(entry, &data->function_list, node) { | |
285 | retval = suspend_one_function(entry); | |
286 | if (retval < 0) | |
287 | return retval; | |
288 | } | |
289 | ||
290 | return 0; | |
291 | } | |
292 | ||
293 | static int resume_one_function(struct rmi_function *fn) | |
294 | { | |
295 | struct rmi_function_handler *fh; | |
296 | int retval = 0; | |
297 | ||
298 | if (!fn || !fn->dev.driver) | |
299 | return 0; | |
300 | ||
301 | fh = to_rmi_function_handler(fn->dev.driver); | |
302 | if (fh->resume) { | |
303 | retval = fh->resume(fn); | |
304 | if (retval < 0) | |
305 | dev_err(&fn->dev, "Resume failed with code %d.\n", | |
306 | retval); | |
307 | } | |
308 | ||
309 | return retval; | |
310 | } | |
311 | ||
312 | static int rmi_resume_functions(struct rmi_device *rmi_dev) | |
313 | { | |
314 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); | |
315 | struct rmi_function *entry; | |
316 | int retval; | |
317 | ||
318 | list_for_each_entry(entry, &data->function_list, node) { | |
319 | retval = resume_one_function(entry); | |
320 | if (retval < 0) | |
321 | return retval; | |
322 | } | |
323 | ||
324 | return 0; | |
325 | } | |
326 | ||
29fd0ec2 | 327 | int rmi_enable_sensor(struct rmi_device *rmi_dev) |
2b6a321d AD |
328 | { |
329 | int retval = 0; | |
330 | ||
331 | retval = rmi_driver_process_config_requests(rmi_dev); | |
332 | if (retval < 0) | |
333 | return retval; | |
334 | ||
335 | return rmi_process_interrupt_requests(rmi_dev); | |
336 | } | |
337 | ||
338 | /** | |
339 | * rmi_driver_set_input_params - set input device id and other data. | |
340 | * | |
341 | * @rmi_dev: Pointer to an RMI device | |
342 | * @input: Pointer to input device | |
343 | * | |
344 | */ | |
345 | static int rmi_driver_set_input_params(struct rmi_device *rmi_dev, | |
346 | struct input_dev *input) | |
347 | { | |
348 | input->name = SYNAPTICS_INPUT_DEVICE_NAME; | |
349 | input->id.vendor = SYNAPTICS_VENDOR_ID; | |
350 | input->id.bustype = BUS_RMI; | |
351 | return 0; | |
352 | } | |
353 | ||
354 | static void rmi_driver_set_input_name(struct rmi_device *rmi_dev, | |
355 | struct input_dev *input) | |
356 | { | |
357 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); | |
ce363f0d | 358 | const char *device_name = rmi_f01_get_product_ID(data->f01_container); |
2b6a321d AD |
359 | char *name; |
360 | ||
361 | name = devm_kasprintf(&rmi_dev->dev, GFP_KERNEL, | |
362 | "Synaptics %s", device_name); | |
363 | if (!name) | |
364 | return; | |
365 | ||
366 | input->name = name; | |
367 | } | |
368 | ||
369 | static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev, | |
370 | unsigned long *mask) | |
371 | { | |
372 | int error = 0; | |
373 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); | |
374 | struct device *dev = &rmi_dev->dev; | |
375 | ||
376 | mutex_lock(&data->irq_mutex); | |
377 | bitmap_or(data->new_irq_mask, | |
378 | data->current_irq_mask, mask, data->irq_count); | |
379 | ||
380 | error = rmi_write_block(rmi_dev, | |
381 | data->f01_container->fd.control_base_addr + 1, | |
382 | data->new_irq_mask, data->num_of_irq_regs); | |
383 | if (error < 0) { | |
384 | dev_err(dev, "%s: Failed to change enabled interrupts!", | |
385 | __func__); | |
386 | goto error_unlock; | |
387 | } | |
388 | bitmap_copy(data->current_irq_mask, data->new_irq_mask, | |
389 | data->num_of_irq_regs); | |
390 | ||
391 | error_unlock: | |
392 | mutex_unlock(&data->irq_mutex); | |
393 | return error; | |
394 | } | |
395 | ||
396 | static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev, | |
397 | unsigned long *mask) | |
398 | { | |
399 | int error = 0; | |
400 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); | |
401 | struct device *dev = &rmi_dev->dev; | |
402 | ||
403 | mutex_lock(&data->irq_mutex); | |
404 | bitmap_andnot(data->new_irq_mask, | |
405 | data->current_irq_mask, mask, data->irq_count); | |
406 | ||
407 | error = rmi_write_block(rmi_dev, | |
408 | data->f01_container->fd.control_base_addr + 1, | |
409 | data->new_irq_mask, data->num_of_irq_regs); | |
410 | if (error < 0) { | |
411 | dev_err(dev, "%s: Failed to change enabled interrupts!", | |
412 | __func__); | |
413 | goto error_unlock; | |
414 | } | |
415 | bitmap_copy(data->current_irq_mask, data->new_irq_mask, | |
416 | data->num_of_irq_regs); | |
417 | ||
418 | error_unlock: | |
419 | mutex_unlock(&data->irq_mutex); | |
420 | return error; | |
421 | } | |
422 | ||
423 | static int rmi_driver_reset_handler(struct rmi_device *rmi_dev) | |
424 | { | |
425 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); | |
426 | int error; | |
427 | ||
428 | /* | |
429 | * Can get called before the driver is fully ready to deal with | |
430 | * this situation. | |
431 | */ | |
432 | if (!data || !data->f01_container) { | |
433 | dev_warn(&rmi_dev->dev, | |
434 | "Not ready to handle reset yet!\n"); | |
435 | return 0; | |
436 | } | |
437 | ||
438 | error = rmi_read_block(rmi_dev, | |
439 | data->f01_container->fd.control_base_addr + 1, | |
440 | data->current_irq_mask, data->num_of_irq_regs); | |
441 | if (error < 0) { | |
442 | dev_err(&rmi_dev->dev, "%s: Failed to read current IRQ mask.\n", | |
443 | __func__); | |
444 | return error; | |
445 | } | |
446 | ||
447 | error = rmi_driver_process_reset_requests(rmi_dev); | |
448 | if (error < 0) | |
449 | return error; | |
450 | ||
451 | error = rmi_driver_process_config_requests(rmi_dev); | |
452 | if (error < 0) | |
453 | return error; | |
454 | ||
455 | return 0; | |
456 | } | |
457 | ||
e9dade41 BT |
458 | static int rmi_read_pdt_entry(struct rmi_device *rmi_dev, |
459 | struct pdt_entry *entry, u16 pdt_address) | |
2b6a321d AD |
460 | { |
461 | u8 buf[RMI_PDT_ENTRY_SIZE]; | |
462 | int error; | |
463 | ||
464 | error = rmi_read_block(rmi_dev, pdt_address, buf, RMI_PDT_ENTRY_SIZE); | |
465 | if (error) { | |
466 | dev_err(&rmi_dev->dev, "Read PDT entry at %#06x failed, code: %d.\n", | |
467 | pdt_address, error); | |
468 | return error; | |
469 | } | |
470 | ||
471 | entry->page_start = pdt_address & RMI4_PAGE_MASK; | |
472 | entry->query_base_addr = buf[0]; | |
473 | entry->command_base_addr = buf[1]; | |
474 | entry->control_base_addr = buf[2]; | |
475 | entry->data_base_addr = buf[3]; | |
476 | entry->interrupt_source_count = buf[4] & RMI_PDT_INT_SOURCE_COUNT_MASK; | |
477 | entry->function_version = (buf[4] & RMI_PDT_FUNCTION_VERSION_MASK) >> 5; | |
478 | entry->function_number = buf[5]; | |
479 | ||
480 | return 0; | |
481 | } | |
2b6a321d AD |
482 | |
483 | static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt, | |
484 | struct rmi_function_descriptor *fd) | |
485 | { | |
486 | fd->query_base_addr = pdt->query_base_addr + pdt->page_start; | |
487 | fd->command_base_addr = pdt->command_base_addr + pdt->page_start; | |
488 | fd->control_base_addr = pdt->control_base_addr + pdt->page_start; | |
489 | fd->data_base_addr = pdt->data_base_addr + pdt->page_start; | |
490 | fd->function_number = pdt->function_number; | |
491 | fd->interrupt_source_count = pdt->interrupt_source_count; | |
492 | fd->function_version = pdt->function_version; | |
493 | } | |
494 | ||
495 | #define RMI_SCAN_CONTINUE 0 | |
496 | #define RMI_SCAN_DONE 1 | |
497 | ||
498 | static int rmi_scan_pdt_page(struct rmi_device *rmi_dev, | |
499 | int page, | |
ad338e8b | 500 | int *empty_pages, |
2b6a321d AD |
501 | void *ctx, |
502 | int (*callback)(struct rmi_device *rmi_dev, | |
503 | void *ctx, | |
504 | const struct pdt_entry *entry)) | |
505 | { | |
506 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); | |
507 | struct pdt_entry pdt_entry; | |
508 | u16 page_start = RMI4_PAGE_SIZE * page; | |
509 | u16 pdt_start = page_start + PDT_START_SCAN_LOCATION; | |
510 | u16 pdt_end = page_start + PDT_END_SCAN_LOCATION; | |
511 | u16 addr; | |
512 | int error; | |
513 | int retval; | |
514 | ||
515 | for (addr = pdt_start; addr >= pdt_end; addr -= RMI_PDT_ENTRY_SIZE) { | |
516 | error = rmi_read_pdt_entry(rmi_dev, &pdt_entry, addr); | |
517 | if (error) | |
518 | return error; | |
519 | ||
520 | if (RMI4_END_OF_PDT(pdt_entry.function_number)) | |
521 | break; | |
522 | ||
523 | retval = callback(rmi_dev, ctx, &pdt_entry); | |
524 | if (retval != RMI_SCAN_CONTINUE) | |
525 | return retval; | |
526 | } | |
527 | ||
ad338e8b ND |
528 | /* |
529 | * Count number of empty PDT pages. If a gap of two pages | |
530 | * or more is found, stop scanning. | |
531 | */ | |
532 | if (addr == pdt_start) | |
533 | ++*empty_pages; | |
534 | else | |
535 | *empty_pages = 0; | |
536 | ||
5191d88a | 537 | return (data->bootloader_mode || *empty_pages >= 2) ? |
2b6a321d AD |
538 | RMI_SCAN_DONE : RMI_SCAN_CONTINUE; |
539 | } | |
540 | ||
29fd0ec2 ND |
541 | int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx, |
542 | int (*callback)(struct rmi_device *rmi_dev, | |
543 | void *ctx, const struct pdt_entry *entry)) | |
2b6a321d AD |
544 | { |
545 | int page; | |
ad338e8b | 546 | int empty_pages = 0; |
2b6a321d AD |
547 | int retval = RMI_SCAN_DONE; |
548 | ||
549 | for (page = 0; page <= RMI4_MAX_PAGE; page++) { | |
ad338e8b ND |
550 | retval = rmi_scan_pdt_page(rmi_dev, page, &empty_pages, |
551 | ctx, callback); | |
2b6a321d AD |
552 | if (retval != RMI_SCAN_CONTINUE) |
553 | break; | |
554 | } | |
555 | ||
556 | return retval < 0 ? retval : 0; | |
557 | } | |
558 | ||
559 | int rmi_read_register_desc(struct rmi_device *d, u16 addr, | |
560 | struct rmi_register_descriptor *rdesc) | |
561 | { | |
562 | int ret; | |
563 | u8 size_presence_reg; | |
564 | u8 buf[35]; | |
565 | int presense_offset = 1; | |
566 | u8 *struct_buf; | |
567 | int reg; | |
568 | int offset = 0; | |
569 | int map_offset = 0; | |
570 | int i; | |
571 | int b; | |
572 | ||
573 | /* | |
574 | * The first register of the register descriptor is the size of | |
575 | * the register descriptor's presense register. | |
576 | */ | |
577 | ret = rmi_read(d, addr, &size_presence_reg); | |
578 | if (ret) | |
579 | return ret; | |
580 | ++addr; | |
581 | ||
582 | if (size_presence_reg < 0 || size_presence_reg > 35) | |
583 | return -EIO; | |
584 | ||
585 | memset(buf, 0, sizeof(buf)); | |
586 | ||
587 | /* | |
588 | * The presence register contains the size of the register structure | |
589 | * and a bitmap which identified which packet registers are present | |
590 | * for this particular register type (ie query, control, or data). | |
591 | */ | |
592 | ret = rmi_read_block(d, addr, buf, size_presence_reg); | |
593 | if (ret) | |
594 | return ret; | |
595 | ++addr; | |
596 | ||
597 | if (buf[0] == 0) { | |
598 | presense_offset = 3; | |
599 | rdesc->struct_size = buf[1] | (buf[2] << 8); | |
600 | } else { | |
601 | rdesc->struct_size = buf[0]; | |
602 | } | |
603 | ||
604 | for (i = presense_offset; i < size_presence_reg; i++) { | |
605 | for (b = 0; b < 8; b++) { | |
606 | if (buf[i] & (0x1 << b)) | |
607 | bitmap_set(rdesc->presense_map, map_offset, 1); | |
608 | ++map_offset; | |
609 | } | |
610 | } | |
611 | ||
612 | rdesc->num_registers = bitmap_weight(rdesc->presense_map, | |
613 | RMI_REG_DESC_PRESENSE_BITS); | |
614 | ||
615 | rdesc->registers = devm_kzalloc(&d->dev, rdesc->num_registers * | |
616 | sizeof(struct rmi_register_desc_item), | |
617 | GFP_KERNEL); | |
618 | if (!rdesc->registers) | |
619 | return -ENOMEM; | |
620 | ||
621 | /* | |
622 | * Allocate a temporary buffer to hold the register structure. | |
623 | * I'm not using devm_kzalloc here since it will not be retained | |
624 | * after exiting this function | |
625 | */ | |
626 | struct_buf = kzalloc(rdesc->struct_size, GFP_KERNEL); | |
627 | if (!struct_buf) | |
628 | return -ENOMEM; | |
629 | ||
630 | /* | |
631 | * The register structure contains information about every packet | |
632 | * register of this type. This includes the size of the packet | |
633 | * register and a bitmap of all subpackets contained in the packet | |
634 | * register. | |
635 | */ | |
636 | ret = rmi_read_block(d, addr, struct_buf, rdesc->struct_size); | |
637 | if (ret) | |
638 | goto free_struct_buff; | |
639 | ||
640 | reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS); | |
2b6a321d AD |
641 | for (i = 0; i < rdesc->num_registers; i++) { |
642 | struct rmi_register_desc_item *item = &rdesc->registers[i]; | |
643 | int reg_size = struct_buf[offset]; | |
644 | ||
645 | ++offset; | |
646 | if (reg_size == 0) { | |
647 | reg_size = struct_buf[offset] | | |
648 | (struct_buf[offset + 1] << 8); | |
649 | offset += 2; | |
650 | } | |
651 | ||
652 | if (reg_size == 0) { | |
653 | reg_size = struct_buf[offset] | | |
654 | (struct_buf[offset + 1] << 8) | | |
655 | (struct_buf[offset + 2] << 16) | | |
656 | (struct_buf[offset + 3] << 24); | |
657 | offset += 4; | |
658 | } | |
659 | ||
660 | item->reg = reg; | |
661 | item->reg_size = reg_size; | |
662 | ||
3e29d6bb AD |
663 | map_offset = 0; |
664 | ||
2b6a321d AD |
665 | do { |
666 | for (b = 0; b < 7; b++) { | |
667 | if (struct_buf[offset] & (0x1 << b)) | |
668 | bitmap_set(item->subpacket_map, | |
669 | map_offset, 1); | |
670 | ++map_offset; | |
671 | } | |
672 | } while (struct_buf[offset++] & 0x80); | |
673 | ||
674 | item->num_subpackets = bitmap_weight(item->subpacket_map, | |
675 | RMI_REG_DESC_SUBPACKET_BITS); | |
676 | ||
677 | rmi_dbg(RMI_DEBUG_CORE, &d->dev, | |
678 | "%s: reg: %d reg size: %ld subpackets: %d\n", __func__, | |
679 | item->reg, item->reg_size, item->num_subpackets); | |
680 | ||
681 | reg = find_next_bit(rdesc->presense_map, | |
682 | RMI_REG_DESC_PRESENSE_BITS, reg + 1); | |
683 | } | |
684 | ||
685 | free_struct_buff: | |
686 | kfree(struct_buf); | |
687 | return ret; | |
688 | } | |
2b6a321d AD |
689 | |
690 | const struct rmi_register_desc_item *rmi_get_register_desc_item( | |
691 | struct rmi_register_descriptor *rdesc, u16 reg) | |
692 | { | |
693 | const struct rmi_register_desc_item *item; | |
694 | int i; | |
695 | ||
696 | for (i = 0; i < rdesc->num_registers; i++) { | |
697 | item = &rdesc->registers[i]; | |
698 | if (item->reg == reg) | |
699 | return item; | |
700 | } | |
701 | ||
702 | return NULL; | |
703 | } | |
2b6a321d AD |
704 | |
705 | size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc) | |
706 | { | |
707 | const struct rmi_register_desc_item *item; | |
708 | int i; | |
709 | size_t size = 0; | |
710 | ||
711 | for (i = 0; i < rdesc->num_registers; i++) { | |
712 | item = &rdesc->registers[i]; | |
713 | size += item->reg_size; | |
714 | } | |
715 | return size; | |
716 | } | |
2b6a321d AD |
717 | |
718 | /* Compute the register offset relative to the base address */ | |
719 | int rmi_register_desc_calc_reg_offset( | |
720 | struct rmi_register_descriptor *rdesc, u16 reg) | |
721 | { | |
722 | const struct rmi_register_desc_item *item; | |
723 | int offset = 0; | |
724 | int i; | |
725 | ||
726 | for (i = 0; i < rdesc->num_registers; i++) { | |
727 | item = &rdesc->registers[i]; | |
728 | if (item->reg == reg) | |
729 | return offset; | |
730 | ++offset; | |
731 | } | |
732 | return -1; | |
733 | } | |
2b6a321d AD |
734 | |
735 | bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item, | |
736 | u8 subpacket) | |
737 | { | |
738 | return find_next_bit(item->subpacket_map, RMI_REG_DESC_PRESENSE_BITS, | |
739 | subpacket) == subpacket; | |
740 | } | |
741 | ||
2b6a321d AD |
742 | static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev, |
743 | const struct pdt_entry *pdt) | |
744 | { | |
5191d88a ND |
745 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); |
746 | int ret; | |
747 | u8 status; | |
2b6a321d | 748 | |
5191d88a ND |
749 | if (pdt->function_number == 0x34 && pdt->function_version > 1) { |
750 | ret = rmi_read(rmi_dev, pdt->data_base_addr, &status); | |
751 | if (ret) { | |
752 | dev_err(&rmi_dev->dev, | |
753 | "Failed to read F34 status: %d.\n", ret); | |
754 | return ret; | |
755 | } | |
756 | ||
757 | if (status & BIT(7)) | |
758 | data->bootloader_mode = true; | |
759 | } else if (pdt->function_number == 0x01) { | |
760 | ret = rmi_read(rmi_dev, pdt->data_base_addr, &status); | |
761 | if (ret) { | |
762 | dev_err(&rmi_dev->dev, | |
763 | "Failed to read F01 status: %d.\n", ret); | |
764 | return ret; | |
765 | } | |
766 | ||
767 | if (status & BIT(6)) | |
768 | data->bootloader_mode = true; | |
2b6a321d AD |
769 | } |
770 | ||
5191d88a | 771 | return 0; |
2b6a321d AD |
772 | } |
773 | ||
774 | static int rmi_count_irqs(struct rmi_device *rmi_dev, | |
775 | void *ctx, const struct pdt_entry *pdt) | |
776 | { | |
2b6a321d | 777 | int *irq_count = ctx; |
5191d88a | 778 | int ret; |
2b6a321d AD |
779 | |
780 | *irq_count += pdt->interrupt_source_count; | |
5191d88a ND |
781 | |
782 | ret = rmi_check_bootloader_mode(rmi_dev, pdt); | |
783 | if (ret < 0) | |
784 | return ret; | |
2b6a321d AD |
785 | |
786 | return RMI_SCAN_CONTINUE; | |
787 | } | |
788 | ||
29fd0ec2 ND |
789 | int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx, |
790 | const struct pdt_entry *pdt) | |
2b6a321d AD |
791 | { |
792 | int error; | |
793 | ||
794 | if (pdt->function_number == 0x01) { | |
795 | u16 cmd_addr = pdt->page_start + pdt->command_base_addr; | |
796 | u8 cmd_buf = RMI_DEVICE_RESET_CMD; | |
797 | const struct rmi_device_platform_data *pdata = | |
798 | rmi_get_platform_data(rmi_dev); | |
799 | ||
800 | if (rmi_dev->xport->ops->reset) { | |
801 | error = rmi_dev->xport->ops->reset(rmi_dev->xport, | |
802 | cmd_addr); | |
803 | if (error) | |
804 | return error; | |
805 | ||
806 | return RMI_SCAN_DONE; | |
807 | } | |
808 | ||
8029a283 | 809 | rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Sending reset\n"); |
2b6a321d AD |
810 | error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1); |
811 | if (error) { | |
812 | dev_err(&rmi_dev->dev, | |
813 | "Initial reset failed. Code = %d.\n", error); | |
814 | return error; | |
815 | } | |
816 | ||
817 | mdelay(pdata->reset_delay_ms ?: DEFAULT_RESET_DELAY_MS); | |
818 | ||
819 | return RMI_SCAN_DONE; | |
820 | } | |
821 | ||
822 | /* F01 should always be on page 0. If we don't find it there, fail. */ | |
823 | return pdt->page_start == 0 ? RMI_SCAN_CONTINUE : -ENODEV; | |
824 | } | |
825 | ||
826 | static int rmi_create_function(struct rmi_device *rmi_dev, | |
827 | void *ctx, const struct pdt_entry *pdt) | |
828 | { | |
829 | struct device *dev = &rmi_dev->dev; | |
ed77bdf4 | 830 | struct rmi_driver_data *data = dev_get_drvdata(dev); |
2b6a321d AD |
831 | int *current_irq_count = ctx; |
832 | struct rmi_function *fn; | |
833 | int i; | |
834 | int error; | |
835 | ||
836 | rmi_dbg(RMI_DEBUG_CORE, dev, "Initializing F%02X.\n", | |
837 | pdt->function_number); | |
838 | ||
839 | fn = kzalloc(sizeof(struct rmi_function) + | |
840 | BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long), | |
841 | GFP_KERNEL); | |
842 | if (!fn) { | |
843 | dev_err(dev, "Failed to allocate memory for F%02X\n", | |
844 | pdt->function_number); | |
845 | return -ENOMEM; | |
846 | } | |
847 | ||
848 | INIT_LIST_HEAD(&fn->node); | |
849 | rmi_driver_copy_pdt_to_fd(pdt, &fn->fd); | |
850 | ||
851 | fn->rmi_dev = rmi_dev; | |
852 | ||
853 | fn->num_of_irqs = pdt->interrupt_source_count; | |
854 | fn->irq_pos = *current_irq_count; | |
855 | *current_irq_count += fn->num_of_irqs; | |
856 | ||
857 | for (i = 0; i < fn->num_of_irqs; i++) | |
858 | set_bit(fn->irq_pos + i, fn->irq_mask); | |
859 | ||
860 | error = rmi_register_function(fn); | |
861 | if (error) | |
862 | goto err_put_fn; | |
863 | ||
864 | if (pdt->function_number == 0x01) | |
865 | data->f01_container = fn; | |
29fd0ec2 ND |
866 | else if (pdt->function_number == 0x34) |
867 | data->f34_container = fn; | |
2b6a321d AD |
868 | |
869 | list_add_tail(&fn->node, &data->function_list); | |
870 | ||
871 | return RMI_SCAN_CONTINUE; | |
872 | ||
873 | err_put_fn: | |
874 | put_device(&fn->dev); | |
875 | return error; | |
876 | } | |
877 | ||
a64ea311 | 878 | void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake) |
2b6a321d | 879 | { |
3aeed5b5 | 880 | struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); |
a64ea311 | 881 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); |
3aeed5b5 | 882 | int irq = pdata->irq; |
a64ea311 BT |
883 | int irq_flags; |
884 | int retval; | |
2b6a321d | 885 | |
a64ea311 BT |
886 | mutex_lock(&data->enabled_mutex); |
887 | ||
888 | if (data->enabled) | |
889 | goto out; | |
890 | ||
891 | enable_irq(irq); | |
892 | data->enabled = true; | |
893 | if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) { | |
894 | retval = disable_irq_wake(irq); | |
05e0be7c | 895 | if (retval) |
a64ea311 BT |
896 | dev_warn(&rmi_dev->dev, |
897 | "Failed to disable irq for wake: %d\n", | |
898 | retval); | |
899 | } | |
900 | ||
901 | /* | |
902 | * Call rmi_process_interrupt_requests() after enabling irq, | |
903 | * otherwise we may lose interrupt on edge-triggered systems. | |
904 | */ | |
905 | irq_flags = irq_get_trigger_type(pdata->irq); | |
906 | if (irq_flags & IRQ_TYPE_EDGE_BOTH) | |
907 | rmi_process_interrupt_requests(rmi_dev); | |
908 | ||
909 | out: | |
910 | mutex_unlock(&data->enabled_mutex); | |
911 | } | |
912 | ||
913 | void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake) | |
914 | { | |
915 | struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); | |
916 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); | |
b908d3cd | 917 | struct rmi4_attn_data attn_data = {0}; |
a64ea311 | 918 | int irq = pdata->irq; |
b908d3cd | 919 | int retval, count; |
a64ea311 BT |
920 | |
921 | mutex_lock(&data->enabled_mutex); | |
922 | ||
923 | if (!data->enabled) | |
924 | goto out; | |
2b6a321d | 925 | |
a64ea311 | 926 | data->enabled = false; |
3aeed5b5 BA |
927 | disable_irq(irq); |
928 | if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) { | |
929 | retval = enable_irq_wake(irq); | |
05e0be7c | 930 | if (retval) |
3aeed5b5 BA |
931 | dev_warn(&rmi_dev->dev, |
932 | "Failed to enable irq for wake: %d\n", | |
933 | retval); | |
934 | } | |
a64ea311 | 935 | |
b908d3cd BT |
936 | /* make sure the fifo is clean */ |
937 | while (!kfifo_is_empty(&data->attn_fifo)) { | |
938 | count = kfifo_get(&data->attn_fifo, &attn_data); | |
939 | if (count) | |
940 | kfree(attn_data.data); | |
941 | } | |
942 | ||
a64ea311 BT |
943 | out: |
944 | mutex_unlock(&data->enabled_mutex); | |
945 | } | |
946 | ||
947 | int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake) | |
948 | { | |
949 | int retval; | |
2b6a321d AD |
950 | |
951 | retval = rmi_suspend_functions(rmi_dev); | |
952 | if (retval) | |
953 | dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n", | |
954 | retval); | |
955 | ||
a64ea311 | 956 | rmi_disable_irq(rmi_dev, enable_wake); |
2b6a321d AD |
957 | return retval; |
958 | } | |
959 | EXPORT_SYMBOL_GPL(rmi_driver_suspend); | |
960 | ||
3aeed5b5 | 961 | int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake) |
2b6a321d AD |
962 | { |
963 | int retval; | |
964 | ||
a64ea311 | 965 | rmi_enable_irq(rmi_dev, clear_wake); |
3aeed5b5 | 966 | |
2b6a321d AD |
967 | retval = rmi_resume_functions(rmi_dev); |
968 | if (retval) | |
969 | dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n", | |
970 | retval); | |
971 | ||
972 | return retval; | |
973 | } | |
974 | EXPORT_SYMBOL_GPL(rmi_driver_resume); | |
975 | ||
976 | static int rmi_driver_remove(struct device *dev) | |
977 | { | |
978 | struct rmi_device *rmi_dev = to_rmi_device(dev); | |
24d28e4f | 979 | struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); |
2b6a321d | 980 | |
a64ea311 | 981 | rmi_disable_irq(rmi_dev, false); |
2b6a321d | 982 | |
24d28e4f ND |
983 | irq_domain_remove(data->irqdomain); |
984 | data->irqdomain = NULL; | |
985 | ||
29fd0ec2 | 986 | rmi_f34_remove_sysfs(rmi_dev); |
2b6a321d AD |
987 | rmi_free_function_list(rmi_dev); |
988 | ||
989 | return 0; | |
990 | } | |
991 | ||
d8a8b3ed AD |
992 | #ifdef CONFIG_OF |
993 | static int rmi_driver_of_probe(struct device *dev, | |
994 | struct rmi_device_platform_data *pdata) | |
995 | { | |
996 | int retval; | |
997 | ||
998 | retval = rmi_of_property_read_u32(dev, &pdata->reset_delay_ms, | |
999 | "syna,reset-delay-ms", 1); | |
1000 | if (retval) | |
1001 | return retval; | |
1002 | ||
1003 | return 0; | |
1004 | } | |
1005 | #else | |
1006 | static inline int rmi_driver_of_probe(struct device *dev, | |
1007 | struct rmi_device_platform_data *pdata) | |
1008 | { | |
1009 | return -ENODEV; | |
1010 | } | |
1011 | #endif | |
1012 | ||
29fd0ec2 | 1013 | int rmi_probe_interrupts(struct rmi_driver_data *data) |
6bd0dcfa ND |
1014 | { |
1015 | struct rmi_device *rmi_dev = data->rmi_dev; | |
1016 | struct device *dev = &rmi_dev->dev; | |
24d28e4f ND |
1017 | struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode; |
1018 | int irq_count = 0; | |
6bd0dcfa | 1019 | size_t size; |
6bd0dcfa ND |
1020 | int retval; |
1021 | ||
1022 | /* | |
1023 | * We need to count the IRQs and allocate their storage before scanning | |
1024 | * the PDT and creating the function entries, because adding a new | |
1025 | * function can trigger events that result in the IRQ related storage | |
1026 | * being accessed. | |
1027 | */ | |
1028 | rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__); | |
5191d88a ND |
1029 | data->bootloader_mode = false; |
1030 | ||
6bd0dcfa ND |
1031 | retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs); |
1032 | if (retval < 0) { | |
1033 | dev_err(dev, "IRQ counting failed with code %d.\n", retval); | |
1034 | return retval; | |
1035 | } | |
29fd0ec2 | 1036 | |
5191d88a | 1037 | if (data->bootloader_mode) |
ed77bdf4 | 1038 | dev_warn(dev, "Device in bootloader mode.\n"); |
29fd0ec2 | 1039 | |
24d28e4f ND |
1040 | /* Allocate and register a linear revmap irq_domain */ |
1041 | data->irqdomain = irq_domain_create_linear(fwnode, irq_count, | |
1042 | &irq_domain_simple_ops, | |
1043 | data); | |
1044 | if (!data->irqdomain) { | |
1045 | dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n"); | |
1046 | return PTR_ERR(data->irqdomain); | |
1047 | } | |
1048 | ||
6bd0dcfa ND |
1049 | data->irq_count = irq_count; |
1050 | data->num_of_irq_regs = (data->irq_count + 7) / 8; | |
1051 | ||
1052 | size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long); | |
29fd0ec2 ND |
1053 | data->irq_memory = devm_kzalloc(dev, size * 4, GFP_KERNEL); |
1054 | if (!data->irq_memory) { | |
6bd0dcfa | 1055 | dev_err(dev, "Failed to allocate memory for irq masks.\n"); |
e7839533 | 1056 | return -ENOMEM; |
6bd0dcfa ND |
1057 | } |
1058 | ||
29fd0ec2 ND |
1059 | data->irq_status = data->irq_memory + size * 0; |
1060 | data->fn_irq_bits = data->irq_memory + size * 1; | |
1061 | data->current_irq_mask = data->irq_memory + size * 2; | |
1062 | data->new_irq_mask = data->irq_memory + size * 3; | |
6bd0dcfa ND |
1063 | |
1064 | return retval; | |
1065 | } | |
1066 | ||
29fd0ec2 | 1067 | int rmi_init_functions(struct rmi_driver_data *data) |
6bd0dcfa ND |
1068 | { |
1069 | struct rmi_device *rmi_dev = data->rmi_dev; | |
1070 | struct device *dev = &rmi_dev->dev; | |
24d28e4f | 1071 | int irq_count = 0; |
6bd0dcfa ND |
1072 | int retval; |
1073 | ||
6bd0dcfa ND |
1074 | rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__); |
1075 | retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function); | |
1076 | if (retval < 0) { | |
1077 | dev_err(dev, "Function creation failed with code %d.\n", | |
1078 | retval); | |
1079 | goto err_destroy_functions; | |
1080 | } | |
1081 | ||
1082 | if (!data->f01_container) { | |
1083 | dev_err(dev, "Missing F01 container!\n"); | |
1084 | retval = -EINVAL; | |
1085 | goto err_destroy_functions; | |
1086 | } | |
1087 | ||
1088 | retval = rmi_read_block(rmi_dev, | |
1089 | data->f01_container->fd.control_base_addr + 1, | |
1090 | data->current_irq_mask, data->num_of_irq_regs); | |
1091 | if (retval < 0) { | |
1092 | dev_err(dev, "%s: Failed to read current IRQ mask.\n", | |
1093 | __func__); | |
1094 | goto err_destroy_functions; | |
1095 | } | |
1096 | ||
1097 | return 0; | |
1098 | ||
1099 | err_destroy_functions: | |
1100 | rmi_free_function_list(rmi_dev); | |
1101 | return retval; | |
1102 | } | |
1103 | ||
2b6a321d AD |
1104 | static int rmi_driver_probe(struct device *dev) |
1105 | { | |
1106 | struct rmi_driver *rmi_driver; | |
1107 | struct rmi_driver_data *data; | |
1108 | struct rmi_device_platform_data *pdata; | |
1109 | struct rmi_device *rmi_dev; | |
2b6a321d AD |
1110 | int retval; |
1111 | ||
1112 | rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n", | |
1113 | __func__); | |
1114 | ||
1115 | if (!rmi_is_physical_device(dev)) { | |
1116 | rmi_dbg(RMI_DEBUG_CORE, dev, "Not a physical device.\n"); | |
1117 | return -ENODEV; | |
1118 | } | |
1119 | ||
1120 | rmi_dev = to_rmi_device(dev); | |
1121 | rmi_driver = to_rmi_driver(dev->driver); | |
1122 | rmi_dev->driver = rmi_driver; | |
1123 | ||
1124 | pdata = rmi_get_platform_data(rmi_dev); | |
1125 | ||
d8a8b3ed AD |
1126 | if (rmi_dev->xport->dev->of_node) { |
1127 | retval = rmi_driver_of_probe(rmi_dev->xport->dev, pdata); | |
1128 | if (retval) | |
1129 | return retval; | |
1130 | } | |
1131 | ||
2b6a321d AD |
1132 | data = devm_kzalloc(dev, sizeof(struct rmi_driver_data), GFP_KERNEL); |
1133 | if (!data) | |
1134 | return -ENOMEM; | |
1135 | ||
1136 | INIT_LIST_HEAD(&data->function_list); | |
1137 | data->rmi_dev = rmi_dev; | |
1138 | dev_set_drvdata(&rmi_dev->dev, data); | |
1139 | ||
1140 | /* | |
1141 | * Right before a warm boot, the sensor might be in some unusual state, | |
1142 | * such as F54 diagnostics, or F34 bootloader mode after a firmware | |
1143 | * or configuration update. In order to clear the sensor to a known | |
1144 | * state and/or apply any updates, we issue a initial reset to clear any | |
1145 | * previous settings and force it into normal operation. | |
1146 | * | |
1147 | * We have to do this before actually building the PDT because | |
1148 | * the reflash updates (if any) might cause various registers to move | |
1149 | * around. | |
1150 | * | |
1151 | * For a number of reasons, this initial reset may fail to return | |
1152 | * within the specified time, but we'll still be able to bring up the | |
1153 | * driver normally after that failure. This occurs most commonly in | |
1154 | * a cold boot situation (where then firmware takes longer to come up | |
1155 | * than from a warm boot) and the reset_delay_ms in the platform data | |
1156 | * has been set too short to accommodate that. Since the sensor will | |
1157 | * eventually come up and be usable, we don't want to just fail here | |
1158 | * and leave the customer's device unusable. So we warn them, and | |
1159 | * continue processing. | |
1160 | */ | |
1161 | retval = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset); | |
1162 | if (retval < 0) | |
1163 | dev_warn(dev, "RMI initial reset failed! Continuing in spite of this.\n"); | |
1164 | ||
1165 | retval = rmi_read(rmi_dev, PDT_PROPERTIES_LOCATION, &data->pdt_props); | |
1166 | if (retval < 0) { | |
1167 | /* | |
1168 | * we'll print out a warning and continue since | |
1169 | * failure to get the PDT properties is not a cause to fail | |
1170 | */ | |
1171 | dev_warn(dev, "Could not read PDT properties from %#06x (code %d). Assuming 0x00.\n", | |
1172 | PDT_PROPERTIES_LOCATION, retval); | |
1173 | } | |
1174 | ||
2b6a321d | 1175 | mutex_init(&data->irq_mutex); |
a64ea311 | 1176 | mutex_init(&data->enabled_mutex); |
2b6a321d | 1177 | |
6bd0dcfa ND |
1178 | retval = rmi_probe_interrupts(data); |
1179 | if (retval) | |
2b6a321d | 1180 | goto err; |
2b6a321d AD |
1181 | |
1182 | if (rmi_dev->xport->input) { | |
1183 | /* | |
1184 | * The transport driver already has an input device. | |
1185 | * In some cases it is preferable to reuse the transport | |
1186 | * devices input device instead of creating a new one here. | |
1187 | * One example is some HID touchpads report "pass-through" | |
1188 | * button events are not reported by rmi registers. | |
1189 | */ | |
1190 | data->input = rmi_dev->xport->input; | |
1191 | } else { | |
1192 | data->input = devm_input_allocate_device(dev); | |
1193 | if (!data->input) { | |
1194 | dev_err(dev, "%s: Failed to allocate input device.\n", | |
1195 | __func__); | |
1196 | retval = -ENOMEM; | |
6bd0dcfa | 1197 | goto err; |
2b6a321d AD |
1198 | } |
1199 | rmi_driver_set_input_params(rmi_dev, data->input); | |
1200 | data->input->phys = devm_kasprintf(dev, GFP_KERNEL, | |
1201 | "%s/input0", dev_name(dev)); | |
1202 | } | |
1203 | ||
6bd0dcfa ND |
1204 | retval = rmi_init_functions(data); |
1205 | if (retval) | |
1206 | goto err; | |
2b6a321d | 1207 | |
29fd0ec2 ND |
1208 | retval = rmi_f34_create_sysfs(rmi_dev); |
1209 | if (retval) | |
1210 | goto err; | |
2b6a321d AD |
1211 | |
1212 | if (data->input) { | |
1213 | rmi_driver_set_input_name(rmi_dev, data->input); | |
1214 | if (!rmi_dev->xport->input) { | |
1215 | if (input_register_device(data->input)) { | |
1216 | dev_err(dev, "%s: Failed to register input device.\n", | |
1217 | __func__); | |
1218 | goto err_destroy_functions; | |
1219 | } | |
1220 | } | |
1221 | } | |
1222 | ||
3aeed5b5 BA |
1223 | retval = rmi_irq_init(rmi_dev); |
1224 | if (retval < 0) | |
1225 | goto err_destroy_functions; | |
1226 | ||
8cf0adf2 | 1227 | if (data->f01_container->dev.driver) { |
2b6a321d | 1228 | /* Driver already bound, so enable ATTN now. */ |
8cf0adf2 DT |
1229 | retval = rmi_enable_sensor(rmi_dev); |
1230 | if (retval) | |
1231 | goto err_disable_irq; | |
1232 | } | |
2b6a321d AD |
1233 | |
1234 | return 0; | |
1235 | ||
8cf0adf2 DT |
1236 | err_disable_irq: |
1237 | rmi_disable_irq(rmi_dev, false); | |
2b6a321d AD |
1238 | err_destroy_functions: |
1239 | rmi_free_function_list(rmi_dev); | |
1240 | err: | |
8cf0adf2 | 1241 | return retval; |
2b6a321d AD |
1242 | } |
1243 | ||
1244 | static struct rmi_driver rmi_physical_driver = { | |
1245 | .driver = { | |
1246 | .owner = THIS_MODULE, | |
1247 | .name = "rmi4_physical", | |
1248 | .bus = &rmi_bus_type, | |
1249 | .probe = rmi_driver_probe, | |
1250 | .remove = rmi_driver_remove, | |
1251 | }, | |
1252 | .reset_handler = rmi_driver_reset_handler, | |
1253 | .clear_irq_bits = rmi_driver_clear_irq_bits, | |
1254 | .set_irq_bits = rmi_driver_set_irq_bits, | |
1255 | .set_input_params = rmi_driver_set_input_params, | |
1256 | }; | |
1257 | ||
1258 | bool rmi_is_physical_driver(struct device_driver *drv) | |
1259 | { | |
1260 | return drv == &rmi_physical_driver.driver; | |
1261 | } | |
1262 | ||
1263 | int __init rmi_register_physical_driver(void) | |
1264 | { | |
1265 | int error; | |
1266 | ||
1267 | error = driver_register(&rmi_physical_driver.driver); | |
1268 | if (error) { | |
1269 | pr_err("%s: driver register failed, code=%d.\n", __func__, | |
1270 | error); | |
1271 | return error; | |
1272 | } | |
1273 | ||
1274 | return 0; | |
1275 | } | |
1276 | ||
1277 | void __exit rmi_unregister_physical_driver(void) | |
1278 | { | |
1279 | driver_unregister(&rmi_physical_driver.driver); | |
1280 | } |