blob: aa4b5e7bb8e16cb2d670bb48ccb326fc0cd663ae [file] [log] [blame]
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +02001/* QLogic qede NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "qede_ptp.h"
33
34struct qede_ptp {
35 const struct qed_eth_ptp_ops *ops;
36 struct ptp_clock_info clock_info;
37 struct cyclecounter cc;
38 struct timecounter tc;
39 struct ptp_clock *clock;
40 struct work_struct work;
41 struct qede_dev *edev;
42 struct sk_buff *tx_skb;
43
44 /* ptp spinlock is used for protecting the cycle/time counter fields
45 * and, also for serializing the qed PTP API invocations.
46 */
47 spinlock_t lock;
48 bool hw_ts_ioctl_called;
49 u16 tx_type;
50 u16 rx_filter;
51};
52
53/**
54 * qede_ptp_adjfreq
55 * @ptp: the ptp clock structure
56 * @ppb: parts per billion adjustment from base
57 *
58 * Adjust the frequency of the ptp cycle counter by the
59 * indicated ppb from the base frequency.
60 */
61static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb)
62{
63 struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info);
64 struct qede_dev *edev = ptp->edev;
65 int rc;
66
67 __qede_lock(edev);
68 if (edev->state == QEDE_STATE_OPEN) {
69 spin_lock_bh(&ptp->lock);
70 rc = ptp->ops->adjfreq(edev->cdev, ppb);
71 spin_unlock_bh(&ptp->lock);
72 } else {
73 DP_ERR(edev, "PTP adjfreq called while interface is down\n");
74 rc = -EFAULT;
75 }
76 __qede_unlock(edev);
77
78 return rc;
79}
80
81static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
82{
83 struct qede_dev *edev;
84 struct qede_ptp *ptp;
85
86 ptp = container_of(info, struct qede_ptp, clock_info);
87 edev = ptp->edev;
88
89 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n",
90 delta);
91
92 spin_lock_bh(&ptp->lock);
93 timecounter_adjtime(&ptp->tc, delta);
94 spin_unlock_bh(&ptp->lock);
95
96 return 0;
97}
98
99static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
100{
101 struct qede_dev *edev;
102 struct qede_ptp *ptp;
103 u64 ns;
104
105 ptp = container_of(info, struct qede_ptp, clock_info);
106 edev = ptp->edev;
107
108 spin_lock_bh(&ptp->lock);
109 ns = timecounter_read(&ptp->tc);
110 spin_unlock_bh(&ptp->lock);
111
112 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns);
113
114 *ts = ns_to_timespec64(ns);
115
116 return 0;
117}
118
119static int qede_ptp_settime(struct ptp_clock_info *info,
120 const struct timespec64 *ts)
121{
122 struct qede_dev *edev;
123 struct qede_ptp *ptp;
124 u64 ns;
125
126 ptp = container_of(info, struct qede_ptp, clock_info);
127 edev = ptp->edev;
128
129 ns = timespec64_to_ns(ts);
130
131 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns);
132
133 /* Re-init the timecounter */
134 spin_lock_bh(&ptp->lock);
135 timecounter_init(&ptp->tc, &ptp->cc, ns);
136 spin_unlock_bh(&ptp->lock);
137
138 return 0;
139}
140
141/* Enable (or disable) ancillary features of the phc subsystem */
142static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info,
143 struct ptp_clock_request *rq,
144 int on)
145{
146 struct qede_dev *edev;
147 struct qede_ptp *ptp;
148
149 ptp = container_of(info, struct qede_ptp, clock_info);
150 edev = ptp->edev;
151
152 DP_ERR(edev, "PHC ancillary features are not supported\n");
153
154 return -ENOTSUPP;
155}
156
157static void qede_ptp_task(struct work_struct *work)
158{
159 struct skb_shared_hwtstamps shhwtstamps;
160 struct qede_dev *edev;
161 struct qede_ptp *ptp;
162 u64 timestamp, ns;
163 int rc;
164
165 ptp = container_of(work, struct qede_ptp, work);
166 edev = ptp->edev;
167
168 /* Read Tx timestamp registers */
169 spin_lock_bh(&ptp->lock);
170 rc = ptp->ops->read_tx_ts(edev->cdev, &timestamp);
171 spin_unlock_bh(&ptp->lock);
172 if (rc) {
173 /* Reschedule to keep checking for a valid timestamp value */
174 schedule_work(&ptp->work);
175 return;
176 }
177
178 ns = timecounter_cyc2time(&ptp->tc, timestamp);
179 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
180 shhwtstamps.hwtstamp = ns_to_ktime(ns);
181 skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
182 dev_kfree_skb_any(ptp->tx_skb);
183 ptp->tx_skb = NULL;
sudarsana.kalluru@cavium.com461eec12017-05-02 01:11:02 -0700184 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200185
186 DP_VERBOSE(edev, QED_MSG_DEBUG,
187 "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
188 timestamp, ns);
189}
190
191/* Read the PHC. This API is invoked with ptp_lock held. */
192static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
193{
194 struct qede_dev *edev;
195 struct qede_ptp *ptp;
196 u64 phc_cycles;
197 int rc;
198
199 ptp = container_of(cc, struct qede_ptp, cc);
200 edev = ptp->edev;
201 rc = ptp->ops->read_cc(edev->cdev, &phc_cycles);
202 if (rc)
203 WARN_ONCE(1, "PHC read err %d\n", rc);
204
205 DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles);
206
207 return phc_cycles;
208}
209
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200210static int qede_ptp_cfg_filters(struct qede_dev *edev)
211{
212 struct qede_ptp *ptp = edev->ptp;
213
214 if (!ptp)
215 return -EIO;
216
217 if (!ptp->hw_ts_ioctl_called) {
218 DP_INFO(edev, "TS IOCTL not called\n");
219 return 0;
220 }
221
222 switch (ptp->tx_type) {
223 case HWTSTAMP_TX_ON:
224 edev->flags |= QEDE_TX_TIMESTAMPING_EN;
225 ptp->ops->hwtstamp_tx_on(edev->cdev);
226 break;
227
228 case HWTSTAMP_TX_ONESTEP_SYNC:
229 DP_ERR(edev, "One-step timestamping is not supported\n");
230 return -ERANGE;
231 }
232
233 spin_lock_bh(&ptp->lock);
234 switch (ptp->rx_filter) {
235 case HWTSTAMP_FILTER_NONE:
236 break;
237 case HWTSTAMP_FILTER_ALL:
238 case HWTSTAMP_FILTER_SOME:
239 ptp->rx_filter = HWTSTAMP_FILTER_NONE;
240 break;
241 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
242 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
243 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
244 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
245 /* Initialize PTP detection for UDP/IPv4 events */
246 ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4);
247 break;
248 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
249 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
250 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
251 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
252 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
253 ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4_IPV6);
254 break;
255 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
256 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
257 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
258 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
259 /* Initialize PTP detection L2 events */
260 ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_L2);
261 break;
262 case HWTSTAMP_FILTER_PTP_V2_EVENT:
263 case HWTSTAMP_FILTER_PTP_V2_SYNC:
264 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
265 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
266 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
267 ptp->ops->cfg_rx_filters(edev->cdev,
268 QED_PTP_FILTER_L2_IPV4_IPV6);
269 break;
270 }
271
272 spin_unlock_bh(&ptp->lock);
273
274 return 0;
275}
276
277int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
278{
279 struct hwtstamp_config config;
280 struct qede_ptp *ptp;
281 int rc;
282
283 ptp = edev->ptp;
284 if (!ptp)
285 return -EIO;
286
287 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
288 return -EFAULT;
289
290 DP_VERBOSE(edev, QED_MSG_DEBUG,
291 "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
292 config.tx_type, config.rx_filter);
293
294 if (config.flags) {
295 DP_ERR(edev, "config.flags is reserved for future use\n");
296 return -EINVAL;
297 }
298
299 ptp->hw_ts_ioctl_called = 1;
300 ptp->tx_type = config.tx_type;
301 ptp->rx_filter = config.rx_filter;
302
303 rc = qede_ptp_cfg_filters(edev);
304 if (rc)
305 return rc;
306
307 config.rx_filter = ptp->rx_filter;
308
309 return copy_to_user(ifr->ifr_data, &config,
310 sizeof(config)) ? -EFAULT : 0;
311}
312
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200313int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
314{
315 struct qede_ptp *ptp = edev->ptp;
316
317 if (!ptp)
318 return -EIO;
319
320 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
321 SOF_TIMESTAMPING_RX_SOFTWARE |
322 SOF_TIMESTAMPING_SOFTWARE |
323 SOF_TIMESTAMPING_TX_HARDWARE |
324 SOF_TIMESTAMPING_RX_HARDWARE |
325 SOF_TIMESTAMPING_RAW_HARDWARE;
326
327 if (ptp->clock)
328 info->phc_index = ptp_clock_index(ptp->clock);
329 else
330 info->phc_index = -1;
331
332 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
333 BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
334 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
335 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
336 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
337 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
338 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
339 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
340 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
341 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
342 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
343 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
344 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
345
346 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
347
348 return 0;
349}
350
sudarsana.kalluru@cavium.com03574492017-04-26 09:00:51 -0700351void qede_ptp_disable(struct qede_dev *edev)
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200352{
353 struct qede_ptp *ptp;
354
355 ptp = edev->ptp;
356 if (!ptp)
357 return;
358
sudarsana.kalluru@cavium.com03574492017-04-26 09:00:51 -0700359 if (ptp->clock) {
360 ptp_clock_unregister(ptp->clock);
361 ptp->clock = NULL;
362 }
363
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200364 /* Cancel PTP work queue. Should be done after the Tx queues are
365 * drained to prevent additional scheduling.
366 */
367 cancel_work_sync(&ptp->work);
368 if (ptp->tx_skb) {
369 dev_kfree_skb_any(ptp->tx_skb);
370 ptp->tx_skb = NULL;
371 }
372
373 /* Disable PTP in HW */
374 spin_lock_bh(&ptp->lock);
375 ptp->ops->disable(edev->cdev);
376 spin_unlock_bh(&ptp->lock);
sudarsana.kalluru@cavium.com03574492017-04-26 09:00:51 -0700377
378 kfree(ptp);
379 edev->ptp = NULL;
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200380}
381
sudarsana.kalluru@cavium.com03574492017-04-26 09:00:51 -0700382static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200383{
384 struct qede_ptp *ptp;
sudarsana.kalluru@cavium.com03574492017-04-26 09:00:51 -0700385 int rc;
386
387 ptp = edev->ptp;
388 if (!ptp)
389 return -EINVAL;
390
391 spin_lock_init(&ptp->lock);
392
393 /* Configure PTP in HW */
394 rc = ptp->ops->enable(edev->cdev);
395 if (rc) {
396 DP_INFO(edev, "PTP HW enable failed\n");
397 return rc;
398 }
399
400 /* Init work queue for Tx timestamping */
401 INIT_WORK(&ptp->work, qede_ptp_task);
402
403 /* Init cyclecounter and timecounter. This is done only in the first
404 * load. If done in every load, PTP application will fail when doing
405 * unload / load (e.g. MTU change) while it is running.
406 */
407 if (init_tc) {
408 memset(&ptp->cc, 0, sizeof(ptp->cc));
409 ptp->cc.read = qede_ptp_read_cc;
410 ptp->cc.mask = CYCLECOUNTER_MASK(64);
411 ptp->cc.shift = 0;
412 ptp->cc.mult = 1;
413
414 timecounter_init(&ptp->tc, &ptp->cc,
415 ktime_to_ns(ktime_get_real()));
416 }
417
418 return rc;
419}
420
421int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
422{
423 struct qede_ptp *ptp;
424 int rc;
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200425
426 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
427 if (!ptp) {
428 DP_INFO(edev, "Failed to allocate struct for PTP\n");
429 return -ENOMEM;
430 }
431
432 ptp->edev = edev;
433 ptp->ops = edev->ops->ptp;
434 if (!ptp->ops) {
sudarsana.kalluru@cavium.com03574492017-04-26 09:00:51 -0700435 DP_INFO(edev, "PTP enable failed\n");
436 rc = -EIO;
437 goto err1;
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200438 }
439
440 edev->ptp = ptp;
441
sudarsana.kalluru@cavium.com03574492017-04-26 09:00:51 -0700442 rc = qede_ptp_init(edev, init_tc);
443 if (rc)
444 goto err1;
445
446 qede_ptp_cfg_filters(edev);
447
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200448 /* Fill the ptp_clock_info struct and register PTP clock */
449 ptp->clock_info.owner = THIS_MODULE;
450 snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
451 ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB;
452 ptp->clock_info.n_alarm = 0;
453 ptp->clock_info.n_ext_ts = 0;
454 ptp->clock_info.n_per_out = 0;
455 ptp->clock_info.pps = 0;
456 ptp->clock_info.adjfreq = qede_ptp_adjfreq;
457 ptp->clock_info.adjtime = qede_ptp_adjtime;
458 ptp->clock_info.gettime64 = qede_ptp_gettime;
459 ptp->clock_info.settime64 = qede_ptp_settime;
460 ptp->clock_info.enable = qede_ptp_ancillary_feature_enable;
461
462 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
463 if (IS_ERR(ptp->clock)) {
sudarsana.kalluru@cavium.com03574492017-04-26 09:00:51 -0700464 rc = -EINVAL;
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200465 DP_ERR(edev, "PTP clock registeration failed\n");
sudarsana.kalluru@cavium.com03574492017-04-26 09:00:51 -0700466 goto err2;
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200467 }
468
469 return 0;
sudarsana.kalluru@cavium.com03574492017-04-26 09:00:51 -0700470
471err2:
472 qede_ptp_disable(edev);
473 ptp->clock = NULL;
474err1:
475 kfree(ptp);
476 edev->ptp = NULL;
477
478 return rc;
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200479}
480
481void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
482{
483 struct qede_ptp *ptp;
484
485 ptp = edev->ptp;
486 if (!ptp)
487 return;
488
sudarsana.kalluru@cavium.com461eec12017-05-02 01:11:02 -0700489 if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags))
490 return;
491
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200492 if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) {
493 DP_NOTICE(edev,
494 "Tx timestamping was not enabled, this packet will not be timestamped\n");
495 } else if (unlikely(ptp->tx_skb)) {
496 DP_NOTICE(edev,
497 "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
498 } else {
499 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
500 /* schedule check for Tx timestamp */
501 ptp->tx_skb = skb_get(skb);
502 schedule_work(&ptp->work);
503 }
504}
505
506void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb)
507{
508 struct qede_ptp *ptp;
509 u64 timestamp, ns;
510 int rc;
511
512 ptp = edev->ptp;
513 if (!ptp)
514 return;
515
516 spin_lock_bh(&ptp->lock);
517 rc = ptp->ops->read_rx_ts(edev->cdev, &timestamp);
518 if (rc) {
519 spin_unlock_bh(&ptp->lock);
520 DP_INFO(edev, "Invalid Rx timestamp\n");
521 return;
522 }
523
524 ns = timecounter_cyc2time(&ptp->tc, timestamp);
525 spin_unlock_bh(&ptp->lock);
526 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
527 DP_VERBOSE(edev, QED_MSG_DEBUG,
528 "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
529 timestamp, ns);
530}