2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/clocksource.h>
37 MLX5_CYCLES_SHIFT = 23
41 MLX5_PIN_MODE_IN = 0x0,
42 MLX5_PIN_MODE_OUT = 0x1,
46 MLX5_OUT_PATTERN_PULSE = 0x0,
47 MLX5_OUT_PATTERN_PERIODIC = 0x1,
51 MLX5_EVENT_MODE_DISABLE = 0x0,
52 MLX5_EVENT_MODE_REPETETIVE = 0x1,
53 MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
57 MLX5_MTPPS_FS_ENABLE = BIT(0x0),
58 MLX5_MTPPS_FS_PATTERN = BIT(0x2),
59 MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
60 MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
61 MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
62 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
65 static u64 read_internal_timer(const struct cyclecounter *cc)
67 struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
68 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
71 return mlx5_read_internal_timer(mdev) & cc->mask;
74 static void mlx5_pps_out(struct work_struct *work)
76 struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
78 struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
80 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
82 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
86 for (i = 0; i < clock->ptp_info.n_pins; i++) {
89 write_lock_irqsave(&clock->lock, flags);
90 tstart = clock->pps_info.start[i];
91 clock->pps_info.start[i] = 0;
92 write_unlock_irqrestore(&clock->lock, flags);
96 MLX5_SET(mtpps_reg, in, pin, i);
97 MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
98 MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
99 mlx5_set_mtpps(mdev, in, sizeof(in));
103 static void mlx5_timestamp_overflow(struct work_struct *work)
105 struct delayed_work *dwork = to_delayed_work(work);
106 struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
110 write_lock_irqsave(&clock->lock, flags);
111 timecounter_read(&clock->tc);
112 write_unlock_irqrestore(&clock->lock, flags);
113 schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
116 static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
117 const struct timespec64 *ts)
119 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
121 u64 ns = timespec64_to_ns(ts);
124 write_lock_irqsave(&clock->lock, flags);
125 timecounter_init(&clock->tc, &clock->cycles, ns);
126 write_unlock_irqrestore(&clock->lock, flags);
131 static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
133 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
138 write_lock_irqsave(&clock->lock, flags);
139 ns = timecounter_read(&clock->tc);
140 write_unlock_irqrestore(&clock->lock, flags);
142 *ts = ns_to_timespec64(ns);
147 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
149 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
153 write_lock_irqsave(&clock->lock, flags);
154 timecounter_adjtime(&clock->tc, delta);
155 write_unlock_irqrestore(&clock->lock, flags);
160 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
166 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
174 adj = clock->nominal_c_mult;
176 diff = div_u64(adj, 1000000000ULL);
178 write_lock_irqsave(&clock->lock, flags);
179 timecounter_read(&clock->tc);
180 clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
181 clock->nominal_c_mult + diff;
182 write_unlock_irqrestore(&clock->lock, flags);
187 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
188 struct ptp_clock_request *rq,
191 struct mlx5_clock *clock =
192 container_of(ptp, struct mlx5_clock, ptp_info);
193 struct mlx5_core_dev *mdev =
194 container_of(clock, struct mlx5_core_dev, clock);
195 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
196 u32 field_select = 0;
202 if (!MLX5_PPS_CAP(mdev))
205 if (rq->extts.index >= clock->ptp_info.n_pins)
209 pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
212 pin_mode = MLX5_PIN_MODE_IN;
213 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
214 field_select = MLX5_MTPPS_FS_PIN_MODE |
215 MLX5_MTPPS_FS_PATTERN |
216 MLX5_MTPPS_FS_ENABLE;
218 pin = rq->extts.index;
219 field_select = MLX5_MTPPS_FS_ENABLE;
222 MLX5_SET(mtpps_reg, in, pin, pin);
223 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
224 MLX5_SET(mtpps_reg, in, pattern, pattern);
225 MLX5_SET(mtpps_reg, in, enable, on);
226 MLX5_SET(mtpps_reg, in, field_select, field_select);
228 err = mlx5_set_mtpps(mdev, in, sizeof(in));
232 return mlx5_set_mtppse(mdev, pin, 0,
233 MLX5_EVENT_MODE_REPETETIVE & on);
236 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
237 struct ptp_clock_request *rq,
240 struct mlx5_clock *clock =
241 container_of(ptp, struct mlx5_clock, ptp_info);
242 struct mlx5_core_dev *mdev =
243 container_of(clock, struct mlx5_core_dev, clock);
244 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
245 u64 nsec_now, nsec_delta, time_stamp = 0;
246 u64 cycles_now, cycles_delta;
247 struct timespec64 ts;
249 u32 field_select = 0;
256 if (!MLX5_PPS_CAP(mdev))
259 if (rq->perout.index >= clock->ptp_info.n_pins)
263 pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
268 pin_mode = MLX5_PIN_MODE_OUT;
269 pattern = MLX5_OUT_PATTERN_PERIODIC;
270 ts.tv_sec = rq->perout.period.sec;
271 ts.tv_nsec = rq->perout.period.nsec;
272 ns = timespec64_to_ns(&ts);
274 if ((ns >> 1) != 500000000LL)
277 ts.tv_sec = rq->perout.start.sec;
278 ts.tv_nsec = rq->perout.start.nsec;
279 ns = timespec64_to_ns(&ts);
280 cycles_now = mlx5_read_internal_timer(mdev);
281 write_lock_irqsave(&clock->lock, flags);
282 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
283 nsec_delta = ns - nsec_now;
284 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
286 write_unlock_irqrestore(&clock->lock, flags);
287 time_stamp = cycles_now + cycles_delta;
288 field_select = MLX5_MTPPS_FS_PIN_MODE |
289 MLX5_MTPPS_FS_PATTERN |
290 MLX5_MTPPS_FS_ENABLE |
291 MLX5_MTPPS_FS_TIME_STAMP;
293 pin = rq->perout.index;
294 field_select = MLX5_MTPPS_FS_ENABLE;
297 MLX5_SET(mtpps_reg, in, pin, pin);
298 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
299 MLX5_SET(mtpps_reg, in, pattern, pattern);
300 MLX5_SET(mtpps_reg, in, enable, on);
301 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
302 MLX5_SET(mtpps_reg, in, field_select, field_select);
304 err = mlx5_set_mtpps(mdev, in, sizeof(in));
308 return mlx5_set_mtppse(mdev, pin, 0,
309 MLX5_EVENT_MODE_REPETETIVE & on);
312 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
313 struct ptp_clock_request *rq,
316 struct mlx5_clock *clock =
317 container_of(ptp, struct mlx5_clock, ptp_info);
319 clock->pps_info.enabled = !!on;
323 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
324 struct ptp_clock_request *rq,
328 case PTP_CLK_REQ_EXTTS:
329 return mlx5_extts_configure(ptp, rq, on);
330 case PTP_CLK_REQ_PEROUT:
331 return mlx5_perout_configure(ptp, rq, on);
332 case PTP_CLK_REQ_PPS:
333 return mlx5_pps_configure(ptp, rq, on);
340 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
341 enum ptp_pin_function func, unsigned int chan)
343 return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
346 static const struct ptp_clock_info mlx5_ptp_clock_info = {
347 .owner = THIS_MODULE,
349 .max_adj = 100000000,
355 .adjfreq = mlx5_ptp_adjfreq,
356 .adjtime = mlx5_ptp_adjtime,
357 .gettime64 = mlx5_ptp_gettime,
358 .settime64 = mlx5_ptp_settime,
363 static int mlx5_init_pin_config(struct mlx5_clock *clock)
367 clock->ptp_info.pin_config =
368 kzalloc(sizeof(*clock->ptp_info.pin_config) *
369 clock->ptp_info.n_pins, GFP_KERNEL);
370 if (!clock->ptp_info.pin_config)
372 clock->ptp_info.enable = mlx5_ptp_enable;
373 clock->ptp_info.verify = mlx5_ptp_verify;
374 clock->ptp_info.pps = 1;
376 for (i = 0; i < clock->ptp_info.n_pins; i++) {
377 snprintf(clock->ptp_info.pin_config[i].name,
378 sizeof(clock->ptp_info.pin_config[i].name),
380 clock->ptp_info.pin_config[i].index = i;
381 clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
382 clock->ptp_info.pin_config[i].chan = i;
388 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
390 struct mlx5_clock *clock = &mdev->clock;
391 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
393 mlx5_query_mtpps(mdev, out, sizeof(out));
395 clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
396 cap_number_of_pps_pins);
397 clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
398 cap_max_num_of_pps_in_pins);
399 clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
400 cap_max_num_of_pps_out_pins);
402 clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
403 clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
404 clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
405 clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
406 clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
407 clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
408 clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
409 clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
412 void mlx5_pps_event(struct mlx5_core_dev *mdev,
413 struct mlx5_eqe *eqe)
415 struct mlx5_clock *clock = &mdev->clock;
416 struct ptp_clock_event ptp_event;
417 struct timespec64 ts;
418 u64 nsec_now, nsec_delta;
419 u64 cycles_now, cycles_delta;
420 int pin = eqe->data.pps.pin;
424 switch (clock->ptp_info.pin_config[pin].func) {
426 if (clock->pps_info.enabled) {
427 ptp_event.type = PTP_CLOCK_PPSUSR;
428 ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp);
430 ptp_event.type = PTP_CLOCK_EXTTS;
432 ptp_clock_event(clock->ptp, &ptp_event);
435 mlx5_ptp_gettime(&clock->ptp_info, &ts);
436 cycles_now = mlx5_read_internal_timer(mdev);
439 ns = timespec64_to_ns(&ts);
440 write_lock_irqsave(&clock->lock, flags);
441 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
442 nsec_delta = ns - nsec_now;
443 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
445 clock->pps_info.start[pin] = cycles_now + cycles_delta;
446 schedule_work(&clock->pps_info.out_work);
447 write_unlock_irqrestore(&clock->lock, flags);
450 mlx5_core_err(mdev, " Unhandled event\n");
454 void mlx5_init_clock(struct mlx5_core_dev *mdev)
456 struct mlx5_clock *clock = &mdev->clock;
461 dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
463 mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
466 rwlock_init(&clock->lock);
467 clock->cycles.read = read_internal_timer;
468 clock->cycles.shift = MLX5_CYCLES_SHIFT;
469 clock->cycles.mult = clocksource_khz2mult(dev_freq,
470 clock->cycles.shift);
471 clock->nominal_c_mult = clock->cycles.mult;
472 clock->cycles.mask = CLOCKSOURCE_MASK(41);
474 timecounter_init(&clock->tc, &clock->cycles,
475 ktime_to_ns(ktime_get_real()));
477 /* Calculate period in seconds to call the overflow watchdog - to make
478 * sure counter is checked at least once every wrap around.
480 ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
482 do_div(ns, NSEC_PER_SEC / 2 / HZ);
483 clock->overflow_period = ns;
485 INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
486 INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
487 if (clock->overflow_period)
488 schedule_delayed_work(&clock->overflow_work, 0);
490 mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
492 /* Configure the PHC */
493 clock->ptp_info = mlx5_ptp_clock_info;
495 /* Initialize 1PPS data structures */
496 if (MLX5_PPS_CAP(mdev))
497 mlx5_get_pps_caps(mdev);
498 if (clock->ptp_info.n_pins)
499 mlx5_init_pin_config(clock);
501 clock->ptp = ptp_clock_register(&clock->ptp_info,
503 if (IS_ERR(clock->ptp)) {
504 mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
505 PTR_ERR(clock->ptp));
510 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
512 struct mlx5_clock *clock = &mdev->clock;
514 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
518 ptp_clock_unregister(clock->ptp);
522 cancel_work_sync(&clock->pps_info.out_work);
523 cancel_delayed_work_sync(&clock->overflow_work);
524 kfree(clock->ptp_info.pin_config);