treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 157
[linux-block.git] / drivers / media / rc / ir-rx51.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
c332e847
TK
2/*
3 * Copyright (C) 2008 Nokia Corporation
4 *
5 * Based on lirc_serial.c
c332e847 6 */
4406d52a 7#include <linux/clk.h>
c332e847 8#include <linux/module.h>
c332e847 9#include <linux/platform_device.h>
c332e847 10#include <linux/wait.h>
3fdd1526 11#include <linux/pwm.h>
b5406176 12#include <linux/of.h>
79cdad36 13#include <linux/hrtimer.h>
c332e847 14
a92def1b 15#include <media/rc-core.h>
c332e847 16
c332e847
TK
17#define WBUF_LEN 256
18
a92def1b
SY
19struct ir_rx51 {
20 struct rc_dev *rcdev;
3fdd1526 21 struct pwm_device *pwm;
79cdad36 22 struct hrtimer timer;
c332e847 23 struct device *dev;
c332e847
TK
24 wait_queue_head_t wqueue;
25
c332e847
TK
26 unsigned int freq; /* carrier frequency */
27 unsigned int duty_cycle; /* carrier duty cycle */
c332e847
TK
28 int wbuf[WBUF_LEN];
29 int wbuf_index;
30 unsigned long device_is_open;
c332e847
TK
31};
32
a92def1b 33static inline void ir_rx51_on(struct ir_rx51 *ir_rx51)
c332e847 34{
a92def1b 35 pwm_enable(ir_rx51->pwm);
c332e847
TK
36}
37
a92def1b 38static inline void ir_rx51_off(struct ir_rx51 *ir_rx51)
c332e847 39{
a92def1b 40 pwm_disable(ir_rx51->pwm);
c332e847
TK
41}
42
a92def1b 43static int init_timing_params(struct ir_rx51 *ir_rx51)
c332e847 44{
a92def1b
SY
45 struct pwm_device *pwm = ir_rx51->pwm;
46 int duty, period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, ir_rx51->freq);
3fdd1526 47
a92def1b 48 duty = DIV_ROUND_CLOSEST(ir_rx51->duty_cycle * period, 100);
3fdd1526
ID
49
50 pwm_config(pwm, duty, period);
51
c332e847
TK
52 return 0;
53}
54
a92def1b 55static enum hrtimer_restart ir_rx51_timer_cb(struct hrtimer *timer)
c332e847 56{
a92def1b 57 struct ir_rx51 *ir_rx51 = container_of(timer, struct ir_rx51, timer);
79cdad36 58 ktime_t now;
c332e847 59
a92def1b
SY
60 if (ir_rx51->wbuf_index < 0) {
61 dev_err_ratelimited(ir_rx51->dev,
62 "BUG wbuf_index has value of %i\n",
63 ir_rx51->wbuf_index);
c332e847
TK
64 goto end;
65 }
66
67 /*
68 * If we happen to hit an odd latency spike, loop through the
69 * pulses until we catch up.
70 */
71 do {
79cdad36
ID
72 u64 ns;
73
a92def1b 74 if (ir_rx51->wbuf_index >= WBUF_LEN)
c332e847 75 goto end;
a92def1b 76 if (ir_rx51->wbuf[ir_rx51->wbuf_index] == -1)
c332e847
TK
77 goto end;
78
a92def1b
SY
79 if (ir_rx51->wbuf_index % 2)
80 ir_rx51_off(ir_rx51);
c332e847 81 else
a92def1b 82 ir_rx51_on(ir_rx51);
c332e847 83
a92def1b 84 ns = US_TO_NS(ir_rx51->wbuf[ir_rx51->wbuf_index]);
79cdad36
ID
85 hrtimer_add_expires_ns(timer, ns);
86
a92def1b 87 ir_rx51->wbuf_index++;
c332e847 88
79cdad36
ID
89 now = timer->base->get_time();
90
2456e855 91 } while (hrtimer_get_expires_tv64(timer) < now);
c332e847 92
79cdad36 93 return HRTIMER_RESTART;
c332e847
TK
94end:
95 /* Stop TX here */
a92def1b
SY
96 ir_rx51_off(ir_rx51);
97 ir_rx51->wbuf_index = -1;
3fdd1526 98
a92def1b 99 wake_up_interruptible(&ir_rx51->wqueue);
c332e847 100
79cdad36 101 return HRTIMER_NORESTART;
c332e847
TK
102}
103
a92def1b
SY
104static int ir_rx51_tx(struct rc_dev *dev, unsigned int *buffer,
105 unsigned int count)
c332e847 106{
a92def1b 107 struct ir_rx51 *ir_rx51 = dev->priv;
c332e847 108
a92def1b 109 if (count > WBUF_LEN)
c332e847
TK
110 return -EINVAL;
111
a92def1b 112 memcpy(ir_rx51->wbuf, buffer, count * sizeof(unsigned int));
c332e847
TK
113
114 /* Wait any pending transfers to finish */
a92def1b 115 wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
c332e847 116
a92def1b 117 init_timing_params(ir_rx51);
c332e847 118 if (count < WBUF_LEN)
a92def1b 119 ir_rx51->wbuf[count] = -1; /* Insert termination mark */
c332e847
TK
120
121 /*
44773ba1
TL
122 * REVISIT: Adjust latency requirements so the device doesn't go in too
123 * deep sleep states with pm_qos_add_request().
c332e847 124 */
c332e847 125
a92def1b
SY
126 ir_rx51_on(ir_rx51);
127 ir_rx51->wbuf_index = 1;
128 hrtimer_start(&ir_rx51->timer,
129 ns_to_ktime(US_TO_NS(ir_rx51->wbuf[0])),
79cdad36 130 HRTIMER_MODE_REL);
c332e847
TK
131 /*
132 * Don't return back to the userspace until the transfer has
133 * finished
134 */
a92def1b 135 wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
c332e847 136
44773ba1 137 /* REVISIT: Remove pm_qos constraint, we can sleep again */
c332e847 138
a92def1b 139 return count;
c332e847
TK
140}
141
a92def1b 142static int ir_rx51_open(struct rc_dev *dev)
c332e847 143{
a92def1b 144 struct ir_rx51 *ir_rx51 = dev->priv;
c332e847 145
a92def1b 146 if (test_and_set_bit(1, &ir_rx51->device_is_open))
c332e847
TK
147 return -EBUSY;
148
a92def1b
SY
149 ir_rx51->pwm = pwm_get(ir_rx51->dev, NULL);
150 if (IS_ERR(ir_rx51->pwm)) {
151 int res = PTR_ERR(ir_rx51->pwm);
79cdad36 152
a92def1b 153 dev_err(ir_rx51->dev, "pwm_get failed: %d\n", res);
79cdad36
ID
154 return res;
155 }
156
157 return 0;
c332e847
TK
158}
159
a92def1b 160static void ir_rx51_release(struct rc_dev *dev)
c332e847 161{
a92def1b 162 struct ir_rx51 *ir_rx51 = dev->priv;
c332e847 163
a92def1b
SY
164 hrtimer_cancel(&ir_rx51->timer);
165 ir_rx51_off(ir_rx51);
166 pwm_put(ir_rx51->pwm);
c332e847 167
a92def1b 168 clear_bit(1, &ir_rx51->device_is_open);
c332e847
TK
169}
170
a92def1b 171static struct ir_rx51 ir_rx51 = {
c332e847
TK
172 .duty_cycle = 50,
173 .wbuf_index = -1,
174};
175
a92def1b
SY
176static int ir_rx51_set_duty_cycle(struct rc_dev *dev, u32 duty)
177{
178 struct ir_rx51 *ir_rx51 = dev->priv;
c332e847 179
a92def1b
SY
180 ir_rx51->duty_cycle = duty;
181
182 return 0;
183}
184
185static int ir_rx51_set_tx_carrier(struct rc_dev *dev, u32 carrier)
186{
187 struct ir_rx51 *ir_rx51 = dev->priv;
188
189 if (carrier > 500000 || carrier < 20000)
190 return -EINVAL;
191
192 ir_rx51->freq = carrier;
193
194 return 0;
195}
c332e847
TK
196
197#ifdef CONFIG_PM
198
a92def1b 199static int ir_rx51_suspend(struct platform_device *dev, pm_message_t state)
c332e847
TK
200{
201 /*
202 * In case the device is still open, do not suspend. Normally
203 * this should not be a problem as lircd only keeps the device
204 * open only for short periods of time. We also don't want to
205 * get involved with race conditions that might happen if we
206 * were in a middle of a transmit. Thus, we defer any suspend
207 * actions until transmit has completed.
208 */
a92def1b 209 if (test_and_set_bit(1, &ir_rx51.device_is_open))
c332e847
TK
210 return -EAGAIN;
211
a92def1b 212 clear_bit(1, &ir_rx51.device_is_open);
c332e847
TK
213
214 return 0;
215}
216
a92def1b 217static int ir_rx51_resume(struct platform_device *dev)
c332e847
TK
218{
219 return 0;
220}
221
222#else
223
a92def1b
SY
224#define ir_rx51_suspend NULL
225#define ir_rx51_resume NULL
c332e847
TK
226
227#endif /* CONFIG_PM */
228
a92def1b 229static int ir_rx51_probe(struct platform_device *dev)
c332e847 230{
3fdd1526 231 struct pwm_device *pwm;
a92def1b 232 struct rc_dev *rcdev;
3fdd1526 233
3fdd1526
ID
234 pwm = pwm_get(&dev->dev, NULL);
235 if (IS_ERR(pwm)) {
236 int err = PTR_ERR(pwm);
237
238 if (err != -EPROBE_DEFER)
239 dev_err(&dev->dev, "pwm_get failed: %d\n", err);
240 return err;
241 }
242
243 /* Use default, in case userspace does not set the carrier */
a92def1b 244 ir_rx51.freq = DIV_ROUND_CLOSEST(pwm_get_period(pwm), NSEC_PER_SEC);
3fdd1526
ID
245 pwm_put(pwm);
246
a92def1b
SY
247 hrtimer_init(&ir_rx51.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
248 ir_rx51.timer.function = ir_rx51_timer_cb;
79cdad36 249
a92def1b 250 ir_rx51.dev = &dev->dev;
c332e847 251
a92def1b
SY
252 rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW_TX);
253 if (!rcdev)
254 return -ENOMEM;
c332e847 255
a92def1b
SY
256 rcdev->priv = &ir_rx51;
257 rcdev->open = ir_rx51_open;
258 rcdev->close = ir_rx51_release;
259 rcdev->tx_ir = ir_rx51_tx;
260 rcdev->s_tx_duty_cycle = ir_rx51_set_duty_cycle;
261 rcdev->s_tx_carrier = ir_rx51_set_tx_carrier;
262 rcdev->driver_name = KBUILD_MODNAME;
263
264 ir_rx51.rcdev = rcdev;
265
266 return devm_rc_register_device(&dev->dev, ir_rx51.rcdev);
c332e847
TK
267}
268
a92def1b 269static int ir_rx51_remove(struct platform_device *dev)
c332e847 270{
a92def1b 271 return 0;
c332e847
TK
272}
273
a92def1b 274static const struct of_device_id ir_rx51_match[] = {
b5406176
ID
275 {
276 .compatible = "nokia,n900-ir",
277 },
278 {},
279};
a92def1b 280MODULE_DEVICE_TABLE(of, ir_rx51_match);
b5406176 281
a92def1b
SY
282static struct platform_driver ir_rx51_platform_driver = {
283 .probe = ir_rx51_probe,
284 .remove = ir_rx51_remove,
285 .suspend = ir_rx51_suspend,
286 .resume = ir_rx51_resume,
c332e847 287 .driver = {
a92def1b
SY
288 .name = KBUILD_MODNAME,
289 .of_match_table = of_match_ptr(ir_rx51_match),
c332e847
TK
290 },
291};
a92def1b 292module_platform_driver(ir_rx51_platform_driver);
c332e847 293
a92def1b 294MODULE_DESCRIPTION("IR TX driver for Nokia RX51");
c332e847
TK
295MODULE_AUTHOR("Nokia Corporation");
296MODULE_LICENSE("GPL");