1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * PTP 1588 clock support - character device implementation.
5 * Copyright (C) 2010 OMICRON electronics GmbH
7 #include <linux/module.h>
8 #include <linux/posix-clock.h>
9 #include <linux/poll.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/timekeeping.h>
13 #include <linux/debugfs.h>
15 #include <linux/nospec.h>
17 #include "ptp_private.h"
19 static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
20 enum ptp_pin_function func, unsigned int chan)
22 struct ptp_clock_request rq;
25 memset(&rq, 0, sizeof(rq));
31 rq.type = PTP_CLK_REQ_EXTTS;
32 rq.extts.index = chan;
33 err = ops->enable(ops, &rq, 0);
36 rq.type = PTP_CLK_REQ_PEROUT;
37 rq.perout.index = chan;
38 err = ops->enable(ops, &rq, 0);
49 int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
50 enum ptp_pin_function func, unsigned int chan)
52 struct ptp_clock_info *info = ptp->info;
53 struct ptp_pin_desc *pin1 = NULL, *pin2 = &info->pin_config[pin];
56 /* Check to see if any other pin previously had this function. */
57 for (i = 0; i < info->n_pins; i++) {
58 if (info->pin_config[i].func == func &&
59 info->pin_config[i].chan == chan) {
60 pin1 = &info->pin_config[i];
67 /* Check the desired function and channel. */
72 if (chan >= info->n_ext_ts)
76 if (chan >= info->n_per_out)
87 if (info->verify(info, pin, func, chan)) {
88 pr_err("driver cannot use function %u on pin %u\n", func, chan);
92 /* Disable whatever function was previously assigned. */
94 ptp_disable_pinfunc(info, func, chan);
95 pin1->func = PTP_PF_NONE;
98 ptp_disable_pinfunc(info, pin2->func, pin2->chan);
105 int ptp_open(struct posix_clock_context *pccontext, fmode_t fmode)
107 struct ptp_clock *ptp =
108 container_of(pccontext->clk, struct ptp_clock, clock);
109 struct timestamp_event_queue *queue;
110 char debugfsname[32];
113 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
116 queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
121 bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
122 spin_lock_init(&queue->lock);
123 spin_lock_irqsave(&ptp->tsevqs_lock, flags);
124 list_add_tail(&queue->qlist, &ptp->tsevqs);
125 spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
126 pccontext->private_clkdata = queue;
128 /* Debugfs contents */
129 sprintf(debugfsname, "0x%p", queue);
130 queue->debugfs_instance =
131 debugfs_create_dir(debugfsname, ptp->debugfs_root);
132 queue->dfs_bitmap.array = (u32 *)queue->mask;
133 queue->dfs_bitmap.n_elements =
134 DIV_ROUND_UP(PTP_MAX_CHANNELS, BITS_PER_BYTE * sizeof(u32));
135 debugfs_create_u32_array("mask", 0444, queue->debugfs_instance,
141 int ptp_release(struct posix_clock_context *pccontext)
143 struct timestamp_event_queue *queue = pccontext->private_clkdata;
145 struct ptp_clock *ptp =
146 container_of(pccontext->clk, struct ptp_clock, clock);
148 debugfs_remove(queue->debugfs_instance);
149 pccontext->private_clkdata = NULL;
150 spin_lock_irqsave(&ptp->tsevqs_lock, flags);
151 list_del(&queue->qlist);
152 spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
153 bitmap_free(queue->mask);
158 long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
161 struct ptp_clock *ptp =
162 container_of(pccontext->clk, struct ptp_clock, clock);
163 struct ptp_sys_offset_extended *extoff = NULL;
164 struct ptp_sys_offset_precise precise_offset;
165 struct system_device_crosststamp xtstamp;
166 struct ptp_clock_info *ops = ptp->info;
167 struct ptp_sys_offset *sysoff = NULL;
168 struct timestamp_event_queue *tsevq;
169 struct ptp_system_timestamp sts;
170 struct ptp_clock_request req;
171 struct ptp_clock_caps caps;
172 struct ptp_clock_time *pct;
173 unsigned int i, pin_index;
174 struct ptp_pin_desc pd;
175 struct timespec64 ts;
178 tsevq = pccontext->private_clkdata;
182 case PTP_CLOCK_GETCAPS:
183 case PTP_CLOCK_GETCAPS2:
184 memset(&caps, 0, sizeof(caps));
186 caps.max_adj = ptp->info->max_adj;
187 caps.n_alarm = ptp->info->n_alarm;
188 caps.n_ext_ts = ptp->info->n_ext_ts;
189 caps.n_per_out = ptp->info->n_per_out;
190 caps.pps = ptp->info->pps;
191 caps.n_pins = ptp->info->n_pins;
192 caps.cross_timestamping = ptp->info->getcrosststamp != NULL;
193 caps.adjust_phase = ptp->info->adjphase != NULL &&
194 ptp->info->getmaxphase != NULL;
195 if (caps.adjust_phase)
196 caps.max_phase_adj = ptp->info->getmaxphase(ptp->info);
197 if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
201 case PTP_EXTTS_REQUEST:
202 case PTP_EXTTS_REQUEST2:
203 memset(&req, 0, sizeof(req));
205 if (copy_from_user(&req.extts, (void __user *)arg,
206 sizeof(req.extts))) {
210 if (cmd == PTP_EXTTS_REQUEST2) {
211 /* Tell the drivers to check the flags carefully. */
212 req.extts.flags |= PTP_STRICT_FLAGS;
213 /* Make sure no reserved bit is set. */
214 if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
215 req.extts.rsv[0] || req.extts.rsv[1]) {
219 /* Ensure one of the rising/falling edge bits is set. */
220 if ((req.extts.flags & PTP_ENABLE_FEATURE) &&
221 (req.extts.flags & PTP_EXTTS_EDGES) == 0) {
225 } else if (cmd == PTP_EXTTS_REQUEST) {
226 req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
227 req.extts.rsv[0] = 0;
228 req.extts.rsv[1] = 0;
230 if (req.extts.index >= ops->n_ext_ts) {
234 req.type = PTP_CLK_REQ_EXTTS;
235 enable = req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0;
236 if (mutex_lock_interruptible(&ptp->pincfg_mux))
238 err = ops->enable(ops, &req, enable);
239 mutex_unlock(&ptp->pincfg_mux);
242 case PTP_PEROUT_REQUEST:
243 case PTP_PEROUT_REQUEST2:
244 memset(&req, 0, sizeof(req));
246 if (copy_from_user(&req.perout, (void __user *)arg,
247 sizeof(req.perout))) {
251 if (cmd == PTP_PEROUT_REQUEST2) {
252 struct ptp_perout_request *perout = &req.perout;
254 if (perout->flags & ~PTP_PEROUT_VALID_FLAGS) {
259 * The "on" field has undefined meaning if
260 * PTP_PEROUT_DUTY_CYCLE isn't set, we must still treat
261 * it as reserved, which must be set to zero.
263 if (!(perout->flags & PTP_PEROUT_DUTY_CYCLE) &&
264 (perout->rsv[0] || perout->rsv[1] ||
265 perout->rsv[2] || perout->rsv[3])) {
269 if (perout->flags & PTP_PEROUT_DUTY_CYCLE) {
270 /* The duty cycle must be subunitary. */
271 if (perout->on.sec > perout->period.sec ||
272 (perout->on.sec == perout->period.sec &&
273 perout->on.nsec > perout->period.nsec)) {
278 if (perout->flags & PTP_PEROUT_PHASE) {
280 * The phase should be specified modulo the
281 * period, therefore anything equal or larger
282 * than 1 period is invalid.
284 if (perout->phase.sec > perout->period.sec ||
285 (perout->phase.sec == perout->period.sec &&
286 perout->phase.nsec >= perout->period.nsec)) {
291 } else if (cmd == PTP_PEROUT_REQUEST) {
292 req.perout.flags &= PTP_PEROUT_V1_VALID_FLAGS;
293 req.perout.rsv[0] = 0;
294 req.perout.rsv[1] = 0;
295 req.perout.rsv[2] = 0;
296 req.perout.rsv[3] = 0;
298 if (req.perout.index >= ops->n_per_out) {
302 req.type = PTP_CLK_REQ_PEROUT;
303 enable = req.perout.period.sec || req.perout.period.nsec;
304 if (mutex_lock_interruptible(&ptp->pincfg_mux))
306 err = ops->enable(ops, &req, enable);
307 mutex_unlock(&ptp->pincfg_mux);
311 case PTP_ENABLE_PPS2:
312 memset(&req, 0, sizeof(req));
314 if (!capable(CAP_SYS_TIME))
316 req.type = PTP_CLK_REQ_PPS;
317 enable = arg ? 1 : 0;
318 if (mutex_lock_interruptible(&ptp->pincfg_mux))
320 err = ops->enable(ops, &req, enable);
321 mutex_unlock(&ptp->pincfg_mux);
324 case PTP_SYS_OFFSET_PRECISE:
325 case PTP_SYS_OFFSET_PRECISE2:
326 if (!ptp->info->getcrosststamp) {
330 err = ptp->info->getcrosststamp(ptp->info, &xtstamp);
334 memset(&precise_offset, 0, sizeof(precise_offset));
335 ts = ktime_to_timespec64(xtstamp.device);
336 precise_offset.device.sec = ts.tv_sec;
337 precise_offset.device.nsec = ts.tv_nsec;
338 ts = ktime_to_timespec64(xtstamp.sys_realtime);
339 precise_offset.sys_realtime.sec = ts.tv_sec;
340 precise_offset.sys_realtime.nsec = ts.tv_nsec;
341 ts = ktime_to_timespec64(xtstamp.sys_monoraw);
342 precise_offset.sys_monoraw.sec = ts.tv_sec;
343 precise_offset.sys_monoraw.nsec = ts.tv_nsec;
344 if (copy_to_user((void __user *)arg, &precise_offset,
345 sizeof(precise_offset)))
349 case PTP_SYS_OFFSET_EXTENDED:
350 case PTP_SYS_OFFSET_EXTENDED2:
351 if (!ptp->info->gettimex64) {
355 extoff = memdup_user((void __user *)arg, sizeof(*extoff));
356 if (IS_ERR(extoff)) {
357 err = PTR_ERR(extoff);
361 if (extoff->n_samples > PTP_MAX_SAMPLES
362 || extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) {
366 for (i = 0; i < extoff->n_samples; i++) {
367 err = ptp->info->gettimex64(ptp->info, &ts, &sts);
370 extoff->ts[i][0].sec = sts.pre_ts.tv_sec;
371 extoff->ts[i][0].nsec = sts.pre_ts.tv_nsec;
372 extoff->ts[i][1].sec = ts.tv_sec;
373 extoff->ts[i][1].nsec = ts.tv_nsec;
374 extoff->ts[i][2].sec = sts.post_ts.tv_sec;
375 extoff->ts[i][2].nsec = sts.post_ts.tv_nsec;
377 if (copy_to_user((void __user *)arg, extoff, sizeof(*extoff)))
382 case PTP_SYS_OFFSET2:
383 sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
384 if (IS_ERR(sysoff)) {
385 err = PTR_ERR(sysoff);
389 if (sysoff->n_samples > PTP_MAX_SAMPLES) {
393 pct = &sysoff->ts[0];
394 for (i = 0; i < sysoff->n_samples; i++) {
395 ktime_get_real_ts64(&ts);
396 pct->sec = ts.tv_sec;
397 pct->nsec = ts.tv_nsec;
400 err = ops->gettimex64(ops, &ts, NULL);
402 err = ops->gettime64(ops, &ts);
405 pct->sec = ts.tv_sec;
406 pct->nsec = ts.tv_nsec;
409 ktime_get_real_ts64(&ts);
410 pct->sec = ts.tv_sec;
411 pct->nsec = ts.tv_nsec;
412 if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
416 case PTP_PIN_GETFUNC:
417 case PTP_PIN_GETFUNC2:
418 if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
422 if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2]
423 || pd.rsv[3] || pd.rsv[4])
424 && cmd == PTP_PIN_GETFUNC2) {
427 } else if (cmd == PTP_PIN_GETFUNC) {
434 pin_index = pd.index;
435 if (pin_index >= ops->n_pins) {
439 pin_index = array_index_nospec(pin_index, ops->n_pins);
440 if (mutex_lock_interruptible(&ptp->pincfg_mux))
442 pd = ops->pin_config[pin_index];
443 mutex_unlock(&ptp->pincfg_mux);
444 if (!err && copy_to_user((void __user *)arg, &pd, sizeof(pd)))
448 case PTP_PIN_SETFUNC:
449 case PTP_PIN_SETFUNC2:
450 if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
454 if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2]
455 || pd.rsv[3] || pd.rsv[4])
456 && cmd == PTP_PIN_SETFUNC2) {
459 } else if (cmd == PTP_PIN_SETFUNC) {
466 pin_index = pd.index;
467 if (pin_index >= ops->n_pins) {
471 pin_index = array_index_nospec(pin_index, ops->n_pins);
472 if (mutex_lock_interruptible(&ptp->pincfg_mux))
474 err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
475 mutex_unlock(&ptp->pincfg_mux);
478 case PTP_MASK_CLEAR_ALL:
479 bitmap_clear(tsevq->mask, 0, PTP_MAX_CHANNELS);
482 case PTP_MASK_EN_SINGLE:
483 if (copy_from_user(&i, (void __user *)arg, sizeof(i))) {
487 if (i >= PTP_MAX_CHANNELS) {
491 set_bit(i, tsevq->mask);
505 __poll_t ptp_poll(struct posix_clock_context *pccontext, struct file *fp,
508 struct ptp_clock *ptp =
509 container_of(pccontext->clk, struct ptp_clock, clock);
510 struct timestamp_event_queue *queue;
512 queue = pccontext->private_clkdata;
516 poll_wait(fp, &ptp->tsev_wq, wait);
518 return queue_cnt(queue) ? EPOLLIN : 0;
521 #define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event))
523 ssize_t ptp_read(struct posix_clock_context *pccontext, uint rdflags,
524 char __user *buf, size_t cnt)
526 struct ptp_clock *ptp =
527 container_of(pccontext->clk, struct ptp_clock, clock);
528 struct timestamp_event_queue *queue;
529 struct ptp_extts_event *event;
534 queue = pccontext->private_clkdata;
540 if (cnt % sizeof(struct ptp_extts_event) != 0) {
545 if (cnt > EXTTS_BUFSIZE)
548 cnt = cnt / sizeof(struct ptp_extts_event);
550 if (wait_event_interruptible(ptp->tsev_wq,
551 ptp->defunct || queue_cnt(queue))) {
560 event = kmalloc(EXTTS_BUFSIZE, GFP_KERNEL);
566 spin_lock_irqsave(&queue->lock, flags);
568 qcnt = queue_cnt(queue);
573 for (i = 0; i < cnt; i++) {
574 event[i] = queue->buf[queue->head];
575 /* Paired with READ_ONCE() in queue_cnt() */
576 WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
579 spin_unlock_irqrestore(&queue->lock, flags);
581 cnt = cnt * sizeof(struct ptp_extts_event);
584 if (copy_to_user(buf, event, cnt)) {