1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
8 #define QEDE_PTP_TX_TIMEOUT (2 * HZ)
11 const struct qed_eth_ptp_ops *ops;
12 struct ptp_clock_info clock_info;
13 struct cyclecounter cc;
14 struct timecounter tc;
15 struct ptp_clock *clock;
16 struct work_struct work;
17 unsigned long ptp_tx_start;
18 struct qede_dev *edev;
19 struct sk_buff *tx_skb;
21 /* ptp spinlock is used for protecting the cycle/time counter fields
22 * and, also for serializing the qed PTP API invocations.
25 bool hw_ts_ioctl_called;
31 * qede_ptp_adjfine() - Adjust the frequency of the PTP cycle counter.
33 * @info: The PTP clock info structure.
34 * @scaled_ppm: Scaled parts per million adjustment from base.
36 * Scaled parts per million is ppm with a 16-bit binary fractional field.
38 * Return: Zero on success, negative errno otherwise.
40 static int qede_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
42 struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info);
43 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
44 struct qede_dev *edev = ptp->edev;
48 if (edev->state == QEDE_STATE_OPEN) {
49 spin_lock_bh(&ptp->lock);
50 rc = ptp->ops->adjfreq(edev->cdev, ppb);
51 spin_unlock_bh(&ptp->lock);
53 DP_ERR(edev, "PTP adjfine called while interface is down\n");
61 static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
63 struct qede_dev *edev;
66 ptp = container_of(info, struct qede_ptp, clock_info);
69 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n",
72 spin_lock_bh(&ptp->lock);
73 timecounter_adjtime(&ptp->tc, delta);
74 spin_unlock_bh(&ptp->lock);
79 static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
81 struct qede_dev *edev;
85 ptp = container_of(info, struct qede_ptp, clock_info);
88 spin_lock_bh(&ptp->lock);
89 ns = timecounter_read(&ptp->tc);
90 spin_unlock_bh(&ptp->lock);
92 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns);
94 *ts = ns_to_timespec64(ns);
99 static int qede_ptp_settime(struct ptp_clock_info *info,
100 const struct timespec64 *ts)
102 struct qede_dev *edev;
103 struct qede_ptp *ptp;
106 ptp = container_of(info, struct qede_ptp, clock_info);
109 ns = timespec64_to_ns(ts);
111 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns);
113 /* Re-init the timecounter */
114 spin_lock_bh(&ptp->lock);
115 timecounter_init(&ptp->tc, &ptp->cc, ns);
116 spin_unlock_bh(&ptp->lock);
121 /* Enable (or disable) ancillary features of the phc subsystem */
122 static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info,
123 struct ptp_clock_request *rq,
126 struct qede_dev *edev;
127 struct qede_ptp *ptp;
129 ptp = container_of(info, struct qede_ptp, clock_info);
132 DP_ERR(edev, "PHC ancillary features are not supported\n");
137 static void qede_ptp_task(struct work_struct *work)
139 struct skb_shared_hwtstamps shhwtstamps;
140 struct qede_dev *edev;
141 struct qede_ptp *ptp;
146 ptp = container_of(work, struct qede_ptp, work);
148 timedout = time_is_before_jiffies(ptp->ptp_tx_start +
149 QEDE_PTP_TX_TIMEOUT);
151 /* Read Tx timestamp registers */
152 spin_lock_bh(&ptp->lock);
153 rc = ptp->ops->read_tx_ts(edev->cdev, ×tamp);
154 spin_unlock_bh(&ptp->lock);
156 if (unlikely(timedout)) {
157 DP_INFO(edev, "Tx timestamp is not recorded\n");
158 dev_kfree_skb_any(ptp->tx_skb);
160 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
162 edev->ptp_skip_txts++;
164 /* Reschedule to keep checking for a valid TS value */
165 schedule_work(&ptp->work);
170 ns = timecounter_cyc2time(&ptp->tc, timestamp);
171 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
172 shhwtstamps.hwtstamp = ns_to_ktime(ns);
173 skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
174 dev_kfree_skb_any(ptp->tx_skb);
176 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
178 DP_VERBOSE(edev, QED_MSG_DEBUG,
179 "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
183 /* Read the PHC. This API is invoked with ptp_lock held. */
184 static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
186 struct qede_dev *edev;
187 struct qede_ptp *ptp;
191 ptp = container_of(cc, struct qede_ptp, cc);
193 rc = ptp->ops->read_cc(edev->cdev, &phc_cycles);
195 WARN_ONCE(1, "PHC read err %d\n", rc);
197 DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles);
202 static int qede_ptp_cfg_filters(struct qede_dev *edev)
204 enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
205 enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
206 struct qede_ptp *ptp = edev->ptp;
211 if (!ptp->hw_ts_ioctl_called) {
212 DP_INFO(edev, "TS IOCTL not called\n");
216 switch (ptp->tx_type) {
218 set_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
219 tx_type = QED_PTP_HWTSTAMP_TX_ON;
222 case HWTSTAMP_TX_OFF:
223 clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
224 tx_type = QED_PTP_HWTSTAMP_TX_OFF;
227 case HWTSTAMP_TX_ONESTEP_SYNC:
228 case HWTSTAMP_TX_ONESTEP_P2P:
229 DP_ERR(edev, "One-step timestamping is not supported\n");
233 spin_lock_bh(&ptp->lock);
234 switch (ptp->rx_filter) {
235 case HWTSTAMP_FILTER_NONE:
236 rx_filter = QED_PTP_FILTER_NONE;
238 case HWTSTAMP_FILTER_ALL:
239 case HWTSTAMP_FILTER_SOME:
240 case HWTSTAMP_FILTER_NTP_ALL:
241 ptp->rx_filter = HWTSTAMP_FILTER_NONE;
242 rx_filter = QED_PTP_FILTER_ALL;
244 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
245 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
246 rx_filter = QED_PTP_FILTER_V1_L4_EVENT;
248 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
249 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
250 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
251 /* Initialize PTP detection for UDP/IPv4 events */
252 rx_filter = QED_PTP_FILTER_V1_L4_GEN;
254 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
255 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
256 rx_filter = QED_PTP_FILTER_V2_L4_EVENT;
258 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
259 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
260 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
261 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
262 rx_filter = QED_PTP_FILTER_V2_L4_GEN;
264 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
265 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
266 rx_filter = QED_PTP_FILTER_V2_L2_EVENT;
268 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
269 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
270 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
271 /* Initialize PTP detection L2 events */
272 rx_filter = QED_PTP_FILTER_V2_L2_GEN;
274 case HWTSTAMP_FILTER_PTP_V2_EVENT:
275 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
276 rx_filter = QED_PTP_FILTER_V2_EVENT;
278 case HWTSTAMP_FILTER_PTP_V2_SYNC:
279 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
280 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
281 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
282 rx_filter = QED_PTP_FILTER_V2_GEN;
286 ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
288 spin_unlock_bh(&ptp->lock);
293 int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
295 struct hwtstamp_config config;
296 struct qede_ptp *ptp;
303 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
306 DP_VERBOSE(edev, QED_MSG_DEBUG,
307 "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
308 config.tx_type, config.rx_filter);
310 ptp->hw_ts_ioctl_called = 1;
311 ptp->tx_type = config.tx_type;
312 ptp->rx_filter = config.rx_filter;
314 rc = qede_ptp_cfg_filters(edev);
318 config.rx_filter = ptp->rx_filter;
320 return copy_to_user(ifr->ifr_data, &config,
321 sizeof(config)) ? -EFAULT : 0;
324 int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *info)
326 struct qede_ptp *ptp = edev->ptp;
329 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
334 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
335 SOF_TIMESTAMPING_TX_HARDWARE |
336 SOF_TIMESTAMPING_RX_HARDWARE |
337 SOF_TIMESTAMPING_RAW_HARDWARE;
340 info->phc_index = ptp_clock_index(ptp->clock);
342 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
343 BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
344 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
345 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
346 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
347 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
348 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
349 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
350 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
351 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
352 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
353 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
354 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
356 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
361 void qede_ptp_disable(struct qede_dev *edev)
363 struct qede_ptp *ptp;
370 ptp_clock_unregister(ptp->clock);
374 /* Cancel PTP work queue. Should be done after the Tx queues are
375 * drained to prevent additional scheduling.
377 cancel_work_sync(&ptp->work);
379 dev_kfree_skb_any(ptp->tx_skb);
381 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
384 /* Disable PTP in HW */
385 spin_lock_bh(&ptp->lock);
386 ptp->ops->disable(edev->cdev);
387 spin_unlock_bh(&ptp->lock);
393 static int qede_ptp_init(struct qede_dev *edev)
395 struct qede_ptp *ptp;
402 spin_lock_init(&ptp->lock);
404 /* Configure PTP in HW */
405 rc = ptp->ops->enable(edev->cdev);
407 DP_INFO(edev, "PTP HW enable failed\n");
411 /* Init work queue for Tx timestamping */
412 INIT_WORK(&ptp->work, qede_ptp_task);
414 /* Init cyclecounter and timecounter */
415 memset(&ptp->cc, 0, sizeof(ptp->cc));
416 ptp->cc.read = qede_ptp_read_cc;
417 ptp->cc.mask = CYCLECOUNTER_MASK(64);
421 timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
426 int qede_ptp_enable(struct qede_dev *edev)
428 struct qede_ptp *ptp;
431 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
433 DP_INFO(edev, "Failed to allocate struct for PTP\n");
438 ptp->ops = edev->ops->ptp;
440 DP_INFO(edev, "PTP enable failed\n");
447 rc = qede_ptp_init(edev);
451 qede_ptp_cfg_filters(edev);
453 /* Fill the ptp_clock_info struct and register PTP clock */
454 ptp->clock_info.owner = THIS_MODULE;
455 snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
456 ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB;
457 ptp->clock_info.n_alarm = 0;
458 ptp->clock_info.n_ext_ts = 0;
459 ptp->clock_info.n_per_out = 0;
460 ptp->clock_info.pps = 0;
461 ptp->clock_info.adjfine = qede_ptp_adjfine;
462 ptp->clock_info.adjtime = qede_ptp_adjtime;
463 ptp->clock_info.gettime64 = qede_ptp_gettime;
464 ptp->clock_info.settime64 = qede_ptp_settime;
465 ptp->clock_info.enable = qede_ptp_ancillary_feature_enable;
467 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
468 if (IS_ERR(ptp->clock)) {
469 DP_ERR(edev, "PTP clock registration failed\n");
470 qede_ptp_disable(edev);
485 void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
487 struct qede_ptp *ptp;
493 if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
495 DP_VERBOSE(edev, QED_MSG_DEBUG, "Timestamping in progress\n");
496 edev->ptp_skip_txts++;
500 if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) {
501 DP_VERBOSE(edev, QED_MSG_DEBUG,
502 "Tx timestamping was not enabled, this pkt will not be timestamped\n");
503 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
504 edev->ptp_skip_txts++;
505 } else if (unlikely(ptp->tx_skb)) {
506 DP_VERBOSE(edev, QED_MSG_DEBUG,
507 "Device supports a single outstanding pkt to ts, It will not be ts\n");
508 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
509 edev->ptp_skip_txts++;
511 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
512 /* schedule check for Tx timestamp */
513 ptp->tx_skb = skb_get(skb);
514 ptp->ptp_tx_start = jiffies;
515 schedule_work(&ptp->work);
519 void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb)
521 struct qede_ptp *ptp;
529 spin_lock_bh(&ptp->lock);
530 rc = ptp->ops->read_rx_ts(edev->cdev, ×tamp);
532 spin_unlock_bh(&ptp->lock);
533 DP_INFO(edev, "Invalid Rx timestamp\n");
537 ns = timecounter_cyc2time(&ptp->tc, timestamp);
538 spin_unlock_bh(&ptp->lock);
539 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
540 DP_VERBOSE(edev, QED_MSG_DEBUG,
541 "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",