2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <rdma/mlx5-abi.h>
40 MLX5_CYCLES_SHIFT = 23
44 MLX5_PIN_MODE_IN = 0x0,
45 MLX5_PIN_MODE_OUT = 0x1,
49 MLX5_OUT_PATTERN_PULSE = 0x0,
50 MLX5_OUT_PATTERN_PERIODIC = 0x1,
54 MLX5_EVENT_MODE_DISABLE = 0x0,
55 MLX5_EVENT_MODE_REPETETIVE = 0x1,
56 MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
60 MLX5_MTPPS_FS_ENABLE = BIT(0x0),
61 MLX5_MTPPS_FS_PATTERN = BIT(0x2),
62 MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
63 MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
64 MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
65 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
68 static u64 read_internal_timer(const struct cyclecounter *cc)
70 struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
71 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
74 return mlx5_read_internal_timer(mdev) & cc->mask;
77 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
79 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
80 struct mlx5_clock *clock = &mdev->clock;
86 sign = smp_load_acquire(&clock_info->sign);
87 smp_store_mb(clock_info->sign,
88 sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
90 clock_info->cycles = clock->tc.cycle_last;
91 clock_info->mult = clock->cycles.mult;
92 clock_info->nsec = clock->tc.nsec;
93 clock_info->frac = clock->tc.frac;
95 smp_store_release(&clock_info->sign,
96 sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
99 static void mlx5_pps_out(struct work_struct *work)
101 struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
103 struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
105 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
107 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
111 for (i = 0; i < clock->ptp_info.n_pins; i++) {
114 write_lock_irqsave(&clock->lock, flags);
115 tstart = clock->pps_info.start[i];
116 clock->pps_info.start[i] = 0;
117 write_unlock_irqrestore(&clock->lock, flags);
121 MLX5_SET(mtpps_reg, in, pin, i);
122 MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
123 MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
124 mlx5_set_mtpps(mdev, in, sizeof(in));
128 static void mlx5_timestamp_overflow(struct work_struct *work)
130 struct delayed_work *dwork = to_delayed_work(work);
131 struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
135 write_lock_irqsave(&clock->lock, flags);
136 timecounter_read(&clock->tc);
137 mlx5_update_clock_info_page(clock->mdev);
138 write_unlock_irqrestore(&clock->lock, flags);
139 schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
142 static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
143 const struct timespec64 *ts)
145 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
147 u64 ns = timespec64_to_ns(ts);
150 write_lock_irqsave(&clock->lock, flags);
151 timecounter_init(&clock->tc, &clock->cycles, ns);
152 mlx5_update_clock_info_page(clock->mdev);
153 write_unlock_irqrestore(&clock->lock, flags);
158 static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
160 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
165 write_lock_irqsave(&clock->lock, flags);
166 ns = timecounter_read(&clock->tc);
167 write_unlock_irqrestore(&clock->lock, flags);
169 *ts = ns_to_timespec64(ns);
174 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
176 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
180 write_lock_irqsave(&clock->lock, flags);
181 timecounter_adjtime(&clock->tc, delta);
182 mlx5_update_clock_info_page(clock->mdev);
183 write_unlock_irqrestore(&clock->lock, flags);
188 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
194 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
202 adj = clock->nominal_c_mult;
204 diff = div_u64(adj, 1000000000ULL);
206 write_lock_irqsave(&clock->lock, flags);
207 timecounter_read(&clock->tc);
208 clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
209 clock->nominal_c_mult + diff;
210 mlx5_update_clock_info_page(clock->mdev);
211 write_unlock_irqrestore(&clock->lock, flags);
216 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
217 struct ptp_clock_request *rq,
220 struct mlx5_clock *clock =
221 container_of(ptp, struct mlx5_clock, ptp_info);
222 struct mlx5_core_dev *mdev =
223 container_of(clock, struct mlx5_core_dev, clock);
224 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
225 u32 field_select = 0;
231 if (!MLX5_PPS_CAP(mdev))
234 if (rq->extts.index >= clock->ptp_info.n_pins)
238 pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
241 pin_mode = MLX5_PIN_MODE_IN;
242 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
243 field_select = MLX5_MTPPS_FS_PIN_MODE |
244 MLX5_MTPPS_FS_PATTERN |
245 MLX5_MTPPS_FS_ENABLE;
247 pin = rq->extts.index;
248 field_select = MLX5_MTPPS_FS_ENABLE;
251 MLX5_SET(mtpps_reg, in, pin, pin);
252 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
253 MLX5_SET(mtpps_reg, in, pattern, pattern);
254 MLX5_SET(mtpps_reg, in, enable, on);
255 MLX5_SET(mtpps_reg, in, field_select, field_select);
257 err = mlx5_set_mtpps(mdev, in, sizeof(in));
261 return mlx5_set_mtppse(mdev, pin, 0,
262 MLX5_EVENT_MODE_REPETETIVE & on);
265 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
266 struct ptp_clock_request *rq,
269 struct mlx5_clock *clock =
270 container_of(ptp, struct mlx5_clock, ptp_info);
271 struct mlx5_core_dev *mdev =
272 container_of(clock, struct mlx5_core_dev, clock);
273 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
274 u64 nsec_now, nsec_delta, time_stamp = 0;
275 u64 cycles_now, cycles_delta;
276 struct timespec64 ts;
278 u32 field_select = 0;
285 if (!MLX5_PPS_CAP(mdev))
288 if (rq->perout.index >= clock->ptp_info.n_pins)
292 pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
297 pin_mode = MLX5_PIN_MODE_OUT;
298 pattern = MLX5_OUT_PATTERN_PERIODIC;
299 ts.tv_sec = rq->perout.period.sec;
300 ts.tv_nsec = rq->perout.period.nsec;
301 ns = timespec64_to_ns(&ts);
303 if ((ns >> 1) != 500000000LL)
306 ts.tv_sec = rq->perout.start.sec;
307 ts.tv_nsec = rq->perout.start.nsec;
308 ns = timespec64_to_ns(&ts);
309 cycles_now = mlx5_read_internal_timer(mdev);
310 write_lock_irqsave(&clock->lock, flags);
311 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
312 nsec_delta = ns - nsec_now;
313 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
315 write_unlock_irqrestore(&clock->lock, flags);
316 time_stamp = cycles_now + cycles_delta;
317 field_select = MLX5_MTPPS_FS_PIN_MODE |
318 MLX5_MTPPS_FS_PATTERN |
319 MLX5_MTPPS_FS_ENABLE |
320 MLX5_MTPPS_FS_TIME_STAMP;
322 pin = rq->perout.index;
323 field_select = MLX5_MTPPS_FS_ENABLE;
326 MLX5_SET(mtpps_reg, in, pin, pin);
327 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
328 MLX5_SET(mtpps_reg, in, pattern, pattern);
329 MLX5_SET(mtpps_reg, in, enable, on);
330 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
331 MLX5_SET(mtpps_reg, in, field_select, field_select);
333 err = mlx5_set_mtpps(mdev, in, sizeof(in));
337 return mlx5_set_mtppse(mdev, pin, 0,
338 MLX5_EVENT_MODE_REPETETIVE & on);
341 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
342 struct ptp_clock_request *rq,
345 struct mlx5_clock *clock =
346 container_of(ptp, struct mlx5_clock, ptp_info);
348 clock->pps_info.enabled = !!on;
352 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
353 struct ptp_clock_request *rq,
357 case PTP_CLK_REQ_EXTTS:
358 return mlx5_extts_configure(ptp, rq, on);
359 case PTP_CLK_REQ_PEROUT:
360 return mlx5_perout_configure(ptp, rq, on);
361 case PTP_CLK_REQ_PPS:
362 return mlx5_pps_configure(ptp, rq, on);
369 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
370 enum ptp_pin_function func, unsigned int chan)
372 return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
375 static const struct ptp_clock_info mlx5_ptp_clock_info = {
376 .owner = THIS_MODULE,
378 .max_adj = 100000000,
384 .adjfreq = mlx5_ptp_adjfreq,
385 .adjtime = mlx5_ptp_adjtime,
386 .gettime64 = mlx5_ptp_gettime,
387 .settime64 = mlx5_ptp_settime,
392 static int mlx5_init_pin_config(struct mlx5_clock *clock)
396 clock->ptp_info.pin_config =
397 kzalloc(sizeof(*clock->ptp_info.pin_config) *
398 clock->ptp_info.n_pins, GFP_KERNEL);
399 if (!clock->ptp_info.pin_config)
401 clock->ptp_info.enable = mlx5_ptp_enable;
402 clock->ptp_info.verify = mlx5_ptp_verify;
403 clock->ptp_info.pps = 1;
405 for (i = 0; i < clock->ptp_info.n_pins; i++) {
406 snprintf(clock->ptp_info.pin_config[i].name,
407 sizeof(clock->ptp_info.pin_config[i].name),
409 clock->ptp_info.pin_config[i].index = i;
410 clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
411 clock->ptp_info.pin_config[i].chan = i;
417 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
419 struct mlx5_clock *clock = &mdev->clock;
420 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
422 mlx5_query_mtpps(mdev, out, sizeof(out));
424 clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
425 cap_number_of_pps_pins);
426 clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
427 cap_max_num_of_pps_in_pins);
428 clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
429 cap_max_num_of_pps_out_pins);
431 clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
432 clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
433 clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
434 clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
435 clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
436 clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
437 clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
438 clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
441 void mlx5_pps_event(struct mlx5_core_dev *mdev,
442 struct mlx5_eqe *eqe)
444 struct mlx5_clock *clock = &mdev->clock;
445 struct ptp_clock_event ptp_event;
446 struct timespec64 ts;
447 u64 nsec_now, nsec_delta;
448 u64 cycles_now, cycles_delta;
449 int pin = eqe->data.pps.pin;
453 switch (clock->ptp_info.pin_config[pin].func) {
455 ptp_event.index = pin;
456 ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
457 be64_to_cpu(eqe->data.pps.time_stamp));
458 if (clock->pps_info.enabled) {
459 ptp_event.type = PTP_CLOCK_PPSUSR;
460 ptp_event.pps_times.ts_real =
461 ns_to_timespec64(ptp_event.timestamp);
463 ptp_event.type = PTP_CLOCK_EXTTS;
465 ptp_clock_event(clock->ptp, &ptp_event);
468 mlx5_ptp_gettime(&clock->ptp_info, &ts);
469 cycles_now = mlx5_read_internal_timer(mdev);
472 ns = timespec64_to_ns(&ts);
473 write_lock_irqsave(&clock->lock, flags);
474 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
475 nsec_delta = ns - nsec_now;
476 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
478 clock->pps_info.start[pin] = cycles_now + cycles_delta;
479 schedule_work(&clock->pps_info.out_work);
480 write_unlock_irqrestore(&clock->lock, flags);
483 mlx5_core_err(mdev, " Unhandled event\n");
487 void mlx5_init_clock(struct mlx5_core_dev *mdev)
489 struct mlx5_clock *clock = &mdev->clock;
494 dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
496 mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
499 rwlock_init(&clock->lock);
500 clock->cycles.read = read_internal_timer;
501 clock->cycles.shift = MLX5_CYCLES_SHIFT;
502 clock->cycles.mult = clocksource_khz2mult(dev_freq,
503 clock->cycles.shift);
504 clock->nominal_c_mult = clock->cycles.mult;
505 clock->cycles.mask = CLOCKSOURCE_MASK(41);
508 timecounter_init(&clock->tc, &clock->cycles,
509 ktime_to_ns(ktime_get_real()));
511 /* Calculate period in seconds to call the overflow watchdog - to make
512 * sure counter is checked at least once every wrap around.
514 ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
516 do_div(ns, NSEC_PER_SEC / 2 / HZ);
517 clock->overflow_period = ns;
519 mdev->clock_info_page = alloc_page(GFP_KERNEL);
520 if (mdev->clock_info_page) {
521 mdev->clock_info = kmap(mdev->clock_info_page);
522 if (!mdev->clock_info) {
523 __free_page(mdev->clock_info_page);
524 mlx5_core_warn(mdev, "failed to map clock page\n");
526 mdev->clock_info->sign = 0;
527 mdev->clock_info->nsec = clock->tc.nsec;
528 mdev->clock_info->cycles = clock->tc.cycle_last;
529 mdev->clock_info->mask = clock->cycles.mask;
530 mdev->clock_info->mult = clock->nominal_c_mult;
531 mdev->clock_info->shift = clock->cycles.shift;
532 mdev->clock_info->frac = clock->tc.frac;
533 mdev->clock_info->overflow_period =
534 clock->overflow_period;
538 INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
539 INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
540 if (clock->overflow_period)
541 schedule_delayed_work(&clock->overflow_work, 0);
543 mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
545 /* Configure the PHC */
546 clock->ptp_info = mlx5_ptp_clock_info;
548 /* Initialize 1PPS data structures */
549 if (MLX5_PPS_CAP(mdev))
550 mlx5_get_pps_caps(mdev);
551 if (clock->ptp_info.n_pins)
552 mlx5_init_pin_config(clock);
554 clock->ptp = ptp_clock_register(&clock->ptp_info,
556 if (IS_ERR(clock->ptp)) {
557 mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
558 PTR_ERR(clock->ptp));
563 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
565 struct mlx5_clock *clock = &mdev->clock;
567 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
571 ptp_clock_unregister(clock->ptp);
575 cancel_work_sync(&clock->pps_info.out_work);
576 cancel_delayed_work_sync(&clock->overflow_work);
578 if (mdev->clock_info) {
579 kunmap(mdev->clock_info_page);
580 __free_page(mdev->clock_info_page);
581 mdev->clock_info = NULL;
584 kfree(clock->ptp_info.pin_config);