Sudarsana Reddy Kalluru | c78c70f | 2017-02-15 10:24:10 +0200 | [diff] [blame] | 1 | /* QLogic qed NIC Driver |
| 2 | * Copyright (c) 2015-2017 QLogic Corporation |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and /or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | #include <linux/types.h> |
| 33 | #include "qed.h" |
| 34 | #include "qed_dev_api.h" |
| 35 | #include "qed_hw.h" |
| 36 | #include "qed_l2.h" |
| 37 | #include "qed_ptp.h" |
| 38 | #include "qed_reg_addr.h" |
| 39 | |
| 40 | /* 16 nano second time quantas to wait before making a Drift adjustment */ |
| 41 | #define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT 0 |
| 42 | /* Nano seconds to add/subtract when making a Drift adjustment */ |
| 43 | #define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT 28 |
| 44 | /* Add/subtract the Adjustment_Value when making a Drift adjustment */ |
| 45 | #define QED_DRIFT_CNTR_DIRECTION_SHIFT 31 |
| 46 | #define QED_TIMESTAMP_MASK BIT(16) |
| 47 | |
| 48 | /* Read Rx timestamp */ |
| 49 | static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp) |
| 50 | { |
| 51 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
| 52 | struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; |
| 53 | u32 val; |
| 54 | |
| 55 | *timestamp = 0; |
| 56 | val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID); |
| 57 | if (!(val & QED_TIMESTAMP_MASK)) { |
| 58 | DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val); |
| 59 | return -EINVAL; |
| 60 | } |
| 61 | |
| 62 | val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB); |
| 63 | *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB); |
| 64 | *timestamp <<= 32; |
| 65 | *timestamp |= val; |
| 66 | |
| 67 | /* Reset timestamp register to allow new timestamp */ |
| 68 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, |
| 69 | QED_TIMESTAMP_MASK); |
| 70 | |
| 71 | return 0; |
| 72 | } |
| 73 | |
| 74 | /* Read Tx timestamp */ |
| 75 | static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp) |
| 76 | { |
| 77 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
| 78 | struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; |
| 79 | u32 val; |
| 80 | |
| 81 | *timestamp = 0; |
| 82 | val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID); |
| 83 | if (!(val & QED_TIMESTAMP_MASK)) { |
| 84 | DP_INFO(p_hwfn, "Invalid Tx timestamp, buf_seqid = %d\n", val); |
| 85 | return -EINVAL; |
| 86 | } |
| 87 | |
| 88 | val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB); |
| 89 | *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB); |
| 90 | *timestamp <<= 32; |
| 91 | *timestamp |= val; |
| 92 | |
| 93 | /* Reset timestamp register to allow new timestamp */ |
| 94 | qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK); |
| 95 | |
| 96 | return 0; |
| 97 | } |
| 98 | |
| 99 | /* Read Phy Hardware Clock */ |
| 100 | static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles) |
| 101 | { |
| 102 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
| 103 | struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; |
| 104 | u32 temp = 0; |
| 105 | |
| 106 | temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB); |
| 107 | *phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB); |
| 108 | *phc_cycles <<= 32; |
| 109 | *phc_cycles |= temp; |
| 110 | |
| 111 | return 0; |
| 112 | } |
| 113 | |
| 114 | /* Filter PTP protocol packets that need to be timestamped */ |
| 115 | static int qed_ptp_hw_cfg_rx_filters(struct qed_dev *cdev, |
| 116 | enum qed_ptp_filter_type type) |
| 117 | { |
| 118 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
| 119 | struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; |
| 120 | u32 rule_mask, parm_mask; |
| 121 | |
| 122 | switch (type) { |
| 123 | case QED_PTP_FILTER_L2_IPV4_IPV6: |
| 124 | parm_mask = 0x6AA; |
| 125 | rule_mask = 0x3EEE; |
| 126 | break; |
| 127 | case QED_PTP_FILTER_L2: |
| 128 | parm_mask = 0x6BF; |
| 129 | rule_mask = 0x3EFF; |
| 130 | break; |
| 131 | case QED_PTP_FILTER_IPV4_IPV6: |
| 132 | parm_mask = 0x7EA; |
| 133 | rule_mask = 0x3FFE; |
| 134 | break; |
| 135 | case QED_PTP_FILTER_IPV4: |
| 136 | parm_mask = 0x7EE; |
| 137 | rule_mask = 0x3FFE; |
| 138 | break; |
| 139 | default: |
| 140 | DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", type); |
| 141 | return -EINVAL; |
| 142 | } |
| 143 | |
| 144 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, parm_mask); |
| 145 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask); |
| 146 | |
| 147 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_TO_HOST, 0x1); |
| 148 | |
| 149 | /* Reset possibly old timestamps */ |
| 150 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, |
| 151 | QED_TIMESTAMP_MASK); |
| 152 | |
| 153 | return 0; |
| 154 | } |
| 155 | |
| 156 | /* Adjust the HW clock by a rate given in parts-per-billion (ppb) units. |
| 157 | * FW/HW accepts the adjustment value in terms of 3 parameters: |
| 158 | * Drift period - adjustment happens once in certain number of nano seconds. |
| 159 | * Drift value - time is adjusted by a certain value, for example by 5 ns. |
| 160 | * Drift direction - add or subtract the adjustment value. |
| 161 | * The routine translates ppb into the adjustment triplet in an optimal manner. |
| 162 | */ |
| 163 | static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb) |
| 164 | { |
| 165 | s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2; |
| 166 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
| 167 | struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; |
| 168 | u32 drift_ctr_cfg = 0, drift_state; |
| 169 | int drift_dir = 1; |
| 170 | |
| 171 | if (ppb < 0) { |
| 172 | ppb = -ppb; |
| 173 | drift_dir = 0; |
| 174 | } |
| 175 | |
| 176 | if (ppb > 1) { |
| 177 | s64 best_dif = ppb, best_approx_dev = 1; |
| 178 | |
| 179 | /* Adjustment value is up to +/-7ns, find an optimal value in |
| 180 | * this range. |
| 181 | */ |
| 182 | for (val = 7; val > 0; val--) { |
| 183 | period = div_s64(val * 1000000000, ppb); |
| 184 | period -= 8; |
| 185 | period >>= 4; |
| 186 | if (period < 1) |
| 187 | period = 1; |
| 188 | if (period > 0xFFFFFFE) |
| 189 | period = 0xFFFFFFE; |
| 190 | |
| 191 | /* Check both rounding ends for approximate error */ |
| 192 | approx_dev = period * 16 + 8; |
| 193 | dif = ppb * approx_dev - val * 1000000000; |
| 194 | dif2 = dif + 16 * ppb; |
| 195 | |
| 196 | if (dif < 0) |
| 197 | dif = -dif; |
| 198 | if (dif2 < 0) |
| 199 | dif2 = -dif2; |
| 200 | |
| 201 | /* Determine which end gives better approximation */ |
| 202 | if (dif * (approx_dev + 16) > dif2 * approx_dev) { |
| 203 | period++; |
| 204 | approx_dev += 16; |
| 205 | dif = dif2; |
| 206 | } |
| 207 | |
| 208 | /* Track best approximation found so far */ |
| 209 | if (best_dif * approx_dev > dif * best_approx_dev) { |
| 210 | best_dif = dif; |
| 211 | best_val = val; |
| 212 | best_period = period; |
| 213 | best_approx_dev = approx_dev; |
| 214 | } |
| 215 | } |
| 216 | } else if (ppb == 1) { |
| 217 | /* This is a special case as its the only value which wouldn't |
| 218 | * fit in a s64 variable. In order to prevent castings simple |
| 219 | * handle it seperately. |
| 220 | */ |
| 221 | best_val = 4; |
| 222 | best_period = 0xee6b27f; |
| 223 | } else { |
| 224 | best_val = 0; |
| 225 | best_period = 0xFFFFFFF; |
| 226 | } |
| 227 | |
| 228 | drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) | |
| 229 | (((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) | |
| 230 | (((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT); |
| 231 | |
| 232 | qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1); |
| 233 | |
| 234 | drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR); |
| 235 | if (drift_state & 1) { |
| 236 | qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, |
| 237 | drift_ctr_cfg); |
| 238 | } else { |
| 239 | DP_INFO(p_hwfn, "Drift counter is not reset\n"); |
| 240 | return -EINVAL; |
| 241 | } |
| 242 | |
| 243 | qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0); |
| 244 | |
| 245 | return 0; |
| 246 | } |
| 247 | |
| 248 | static int qed_ptp_hw_enable(struct qed_dev *cdev) |
| 249 | { |
| 250 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
| 251 | struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; |
| 252 | |
| 253 | /* Reset PTP event detection rules - will be configured in the IOCTL */ |
| 254 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); |
| 255 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF); |
| 256 | qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); |
| 257 | qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); |
| 258 | |
| 259 | qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7); |
| 260 | qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7); |
| 261 | |
| 262 | qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1); |
| 263 | |
| 264 | /* Pause free running counter */ |
Mintz, Yuval | 9c79dda | 2017-03-14 16:23:54 +0200 | [diff] [blame] | 265 | if (QED_IS_BB_B0(p_hwfn->cdev)) |
| 266 | qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2); |
| 267 | if (QED_IS_AH(p_hwfn->cdev)) |
| 268 | qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2); |
Sudarsana Reddy Kalluru | c78c70f | 2017-02-15 10:24:10 +0200 | [diff] [blame] | 269 | |
| 270 | qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0); |
| 271 | qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0); |
| 272 | /* Resume free running counter */ |
Mintz, Yuval | 9c79dda | 2017-03-14 16:23:54 +0200 | [diff] [blame] | 273 | if (QED_IS_BB_B0(p_hwfn->cdev)) |
| 274 | qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4); |
| 275 | if (QED_IS_AH(p_hwfn->cdev)) { |
| 276 | qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4); |
| 277 | qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1); |
| 278 | } |
Sudarsana Reddy Kalluru | c78c70f | 2017-02-15 10:24:10 +0200 | [diff] [blame] | 279 | |
| 280 | /* Disable drift register */ |
| 281 | qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0); |
| 282 | qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0); |
| 283 | |
| 284 | /* Reset possibly old timestamps */ |
| 285 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, |
| 286 | QED_TIMESTAMP_MASK); |
| 287 | qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK); |
| 288 | |
| 289 | return 0; |
| 290 | } |
| 291 | |
| 292 | static int qed_ptp_hw_hwtstamp_tx_on(struct qed_dev *cdev) |
| 293 | { |
| 294 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
| 295 | struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; |
| 296 | |
| 297 | qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x6AA); |
| 298 | qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3EEE); |
| 299 | |
| 300 | return 0; |
| 301 | } |
| 302 | |
| 303 | static int qed_ptp_hw_disable(struct qed_dev *cdev) |
| 304 | { |
| 305 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
| 306 | struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; |
| 307 | |
| 308 | /* Reset PTP event detection rules */ |
| 309 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); |
| 310 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF); |
| 311 | |
| 312 | qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); |
| 313 | qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); |
| 314 | |
| 315 | /* Disable the PTP feature */ |
| 316 | qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0); |
| 317 | qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0); |
| 318 | |
| 319 | return 0; |
| 320 | } |
| 321 | |
| 322 | const struct qed_eth_ptp_ops qed_ptp_ops_pass = { |
| 323 | .hwtstamp_tx_on = qed_ptp_hw_hwtstamp_tx_on, |
| 324 | .cfg_rx_filters = qed_ptp_hw_cfg_rx_filters, |
| 325 | .read_rx_ts = qed_ptp_hw_read_rx_ts, |
| 326 | .read_tx_ts = qed_ptp_hw_read_tx_ts, |
| 327 | .read_cc = qed_ptp_hw_read_cc, |
| 328 | .adjfreq = qed_ptp_hw_adjfreq, |
| 329 | .disable = qed_ptp_hw_disable, |
| 330 | .enable = qed_ptp_hw_enable, |
| 331 | }; |