Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 1 | /******************************************************************************* |
| 2 | |
| 3 | Intel 10 Gigabit PCI Express Linux driver |
| 4 | Copyright(c) 1999 - 2012 Intel Corporation. |
| 5 | |
| 6 | This program is free software; you can redistribute it and/or modify it |
| 7 | under the terms and conditions of the GNU General Public License, |
| 8 | version 2, as published by the Free Software Foundation. |
| 9 | |
| 10 | This program is distributed in the hope it will be useful, but WITHOUT |
| 11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 13 | more details. |
| 14 | |
| 15 | You should have received a copy of the GNU General Public License along with |
| 16 | this program; if not, write to the Free Software Foundation, Inc., |
| 17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | |
| 19 | The full GNU General Public License is included in this distribution in |
| 20 | the file called "COPYING". |
| 21 | |
| 22 | Contact Information: |
| 23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
| 24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 25 | |
| 26 | *******************************************************************************/ |
| 27 | #include "ixgbe.h" |
| 28 | #include <linux/export.h> |
Jacob Keller | 1d1a79b | 2012-05-22 06:18:08 +0000 | [diff] [blame] | 29 | #include <linux/ptp_classify.h> |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 30 | |
| 31 | /* |
| 32 | * The 82599 and the X540 do not have true 64bit nanosecond scale |
| 33 | * counter registers. Instead, SYSTIME is defined by a fixed point |
| 34 | * system which allows the user to define the scale counter increment |
| 35 | * value at every level change of the oscillator driving the SYSTIME |
| 36 | * value. For both devices the TIMINCA:IV field defines this |
| 37 | * increment. On the X540 device, 31 bits are provided. However on the |
| 38 | * 82599 only provides 24 bits. The time unit is determined by the |
| 39 | * clock frequency of the oscillator in combination with the TIMINCA |
| 40 | * register. When these devices link at 10Gb the oscillator has a |
| 41 | * period of 6.4ns. In order to convert the scale counter into |
| 42 | * nanoseconds the cyclecounter and timecounter structures are |
| 43 | * used. The SYSTIME registers need to be converted to ns values by use |
| 44 | * of only a right shift (division by power of 2). The following math |
| 45 | * determines the largest incvalue that will fit into the available |
| 46 | * bits in the TIMINCA register. |
| 47 | * |
| 48 | * PeriodWidth: Number of bits to store the clock period |
| 49 | * MaxWidth: The maximum width value of the TIMINCA register |
| 50 | * Period: The clock period for the oscillator |
| 51 | * round(): discard the fractional portion of the calculation |
| 52 | * |
| 53 | * Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ] |
| 54 | * |
| 55 | * For the X540, MaxWidth is 31 bits, and the base period is 6.4 ns |
| 56 | * For the 82599, MaxWidth is 24 bits, and the base period is 6.4 ns |
| 57 | * |
| 58 | * The period also changes based on the link speed: |
| 59 | * At 10Gb link or no link, the period remains the same. |
| 60 | * At 1Gb link, the period is multiplied by 10. (64ns) |
| 61 | * At 100Mb link, the period is multiplied by 100. (640ns) |
| 62 | * |
| 63 | * The calculated value allows us to right shift the SYSTIME register |
| 64 | * value in order to quickly convert it into a nanosecond clock, |
| 65 | * while allowing for the maximum possible adjustment value. |
| 66 | * |
| 67 | * These diagrams are only for the 10Gb link period |
| 68 | * |
| 69 | * SYSTIMEH SYSTIMEL |
| 70 | * +--------------+ +--------------+ |
| 71 | * X540 | 32 | | 1 | 3 | 28 | |
| 72 | * *--------------+ +--------------+ |
| 73 | * \________ 36 bits ______/ fract |
| 74 | * |
| 75 | * +--------------+ +--------------+ |
| 76 | * 82599 | 32 | | 8 | 3 | 21 | |
| 77 | * *--------------+ +--------------+ |
| 78 | * \________ 43 bits ______/ fract |
| 79 | * |
| 80 | * The 36 bit X540 SYSTIME overflows every |
| 81 | * 2^36 * 10^-9 / 60 = 1.14 minutes or 69 seconds |
| 82 | * |
| 83 | * The 43 bit 82599 SYSTIME overflows every |
| 84 | * 2^43 * 10^-9 / 3600 = 2.4 hours |
| 85 | */ |
| 86 | #define IXGBE_INCVAL_10GB 0x66666666 |
| 87 | #define IXGBE_INCVAL_1GB 0x40000000 |
| 88 | #define IXGBE_INCVAL_100 0x50000000 |
| 89 | |
| 90 | #define IXGBE_INCVAL_SHIFT_10GB 28 |
| 91 | #define IXGBE_INCVAL_SHIFT_1GB 24 |
| 92 | #define IXGBE_INCVAL_SHIFT_100 21 |
| 93 | |
| 94 | #define IXGBE_INCVAL_SHIFT_82599 7 |
| 95 | #define IXGBE_INCPER_SHIFT_82599 24 |
| 96 | #define IXGBE_MAX_TIMEADJ_VALUE 0x7FFFFFFFFFFFFFFFULL |
| 97 | |
| 98 | #define IXGBE_OVERFLOW_PERIOD (HZ * 30) |
| 99 | |
Jacob E Keller | 681ae1a | 2012-05-01 05:24:41 +0000 | [diff] [blame] | 100 | #ifndef NSECS_PER_SEC |
| 101 | #define NSECS_PER_SEC 1000000000ULL |
| 102 | #endif |
| 103 | |
Jacob Keller | 1d1a79b | 2012-05-22 06:18:08 +0000 | [diff] [blame] | 104 | static struct sock_filter ptp_filter[] = { |
| 105 | PTP_FILTER |
| 106 | }; |
| 107 | |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 108 | /** |
Jacob Keller | 8208367 | 2012-08-01 07:12:25 +0000 | [diff] [blame^] | 109 | * ixgbe_ptp_enable_sdp |
| 110 | * @hw: the hardware private structure |
| 111 | * @shift: the clock shift for calculating nanoseconds |
| 112 | * |
| 113 | * this function enables the clock out feature on the sdp0 for the |
| 114 | * X540 device. It will create a 1second periodic output that can be |
| 115 | * used as the PPS (via an interrupt). |
| 116 | * |
| 117 | * It calculates when the systime will be on an exact second, and then |
| 118 | * aligns the start of the PPS signal to that value. The shift is |
| 119 | * necessary because it can change based on the link speed. |
| 120 | */ |
| 121 | static void ixgbe_ptp_enable_sdp(struct ixgbe_adapter *adapter) |
| 122 | { |
| 123 | struct ixgbe_hw *hw = &adapter->hw; |
| 124 | int shift = adapter->cc.shift; |
| 125 | u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem; |
| 126 | u64 ns = 0, clock_edge = 0; |
| 127 | |
| 128 | switch (hw->mac.type) { |
| 129 | case ixgbe_mac_X540: |
| 130 | esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); |
| 131 | |
| 132 | /* |
| 133 | * enable the SDP0 pin as output, and connected to the native |
| 134 | * function for Timesync (ClockOut) |
| 135 | */ |
| 136 | esdp |= (IXGBE_ESDP_SDP0_DIR | |
| 137 | IXGBE_ESDP_SDP0_NATIVE); |
| 138 | |
| 139 | /* |
| 140 | * enable the Clock Out feature on SDP0, and allow interrupts |
| 141 | * to occur when the pin changes |
| 142 | */ |
| 143 | tsauxc = (IXGBE_TSAUXC_EN_CLK | |
| 144 | IXGBE_TSAUXC_SYNCLK | |
| 145 | IXGBE_TSAUXC_SDP0_INT); |
| 146 | |
| 147 | /* clock period (or pulse length) */ |
| 148 | clktiml = (u32)(NSECS_PER_SEC << shift); |
| 149 | clktimh = (u32)((NSECS_PER_SEC << shift) >> 32); |
| 150 | |
| 151 | /* |
| 152 | * Account for the cyclecounter wrap-around value by |
| 153 | * using the converted ns value of the current time to |
| 154 | * check for when the next aligned second would occur. |
| 155 | */ |
| 156 | clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); |
| 157 | clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; |
| 158 | ns = timecounter_cyc2time(&adapter->tc, clock_edge); |
| 159 | |
| 160 | div_u64_rem(ns, NSECS_PER_SEC, &rem); |
| 161 | clock_edge += ((NSECS_PER_SEC - (u64)rem) << shift); |
| 162 | |
| 163 | /* specify the initial clock start time */ |
| 164 | trgttiml = (u32)clock_edge; |
| 165 | trgttimh = (u32)(clock_edge >> 32); |
| 166 | |
| 167 | IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); |
| 168 | IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); |
| 169 | IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); |
| 170 | IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); |
| 171 | |
| 172 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); |
| 173 | IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); |
| 174 | |
| 175 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_TIMESYNC); |
| 176 | IXGBE_WRITE_FLUSH(hw); |
| 177 | break; |
| 178 | default: |
| 179 | break; |
| 180 | } |
| 181 | } |
| 182 | |
| 183 | /** |
| 184 | * ixgbe_ptp_disable_sdp |
| 185 | * @hw: the private hardware structure |
| 186 | * |
| 187 | * this function disables the auxiliary SDP clock out feature |
| 188 | */ |
| 189 | static void ixgbe_ptp_disable_sdp(struct ixgbe_hw *hw) |
| 190 | { |
| 191 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_TIMESYNC); |
| 192 | IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0); |
| 193 | IXGBE_WRITE_FLUSH(hw); |
| 194 | } |
| 195 | |
| 196 | /** |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 197 | * ixgbe_ptp_read - read raw cycle counter (to be used by time counter) |
Ben Hutchings | 49ce9c2 | 2012-07-10 10:56:00 +0000 | [diff] [blame] | 198 | * @cc: the cyclecounter structure |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 199 | * |
| 200 | * this function reads the cyclecounter registers and is called by the |
| 201 | * cyclecounter structure used to construct a ns counter from the |
| 202 | * arbitrary fixed point registers |
| 203 | */ |
| 204 | static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc) |
| 205 | { |
| 206 | struct ixgbe_adapter *adapter = |
| 207 | container_of(cc, struct ixgbe_adapter, cc); |
| 208 | struct ixgbe_hw *hw = &adapter->hw; |
| 209 | u64 stamp = 0; |
| 210 | |
| 211 | stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); |
| 212 | stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; |
| 213 | |
| 214 | return stamp; |
| 215 | } |
| 216 | |
| 217 | /** |
| 218 | * ixgbe_ptp_adjfreq |
Ben Hutchings | 49ce9c2 | 2012-07-10 10:56:00 +0000 | [diff] [blame] | 219 | * @ptp: the ptp clock structure |
| 220 | * @ppb: parts per billion adjustment from base |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 221 | * |
| 222 | * adjust the frequency of the ptp cycle counter by the |
| 223 | * indicated ppb from the base frequency. |
| 224 | */ |
| 225 | static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) |
| 226 | { |
| 227 | struct ixgbe_adapter *adapter = |
| 228 | container_of(ptp, struct ixgbe_adapter, ptp_caps); |
| 229 | struct ixgbe_hw *hw = &adapter->hw; |
| 230 | u64 freq; |
| 231 | u32 diff, incval; |
| 232 | int neg_adj = 0; |
| 233 | |
| 234 | if (ppb < 0) { |
| 235 | neg_adj = 1; |
| 236 | ppb = -ppb; |
| 237 | } |
| 238 | |
| 239 | smp_mb(); |
| 240 | incval = ACCESS_ONCE(adapter->base_incval); |
| 241 | |
| 242 | freq = incval; |
| 243 | freq *= ppb; |
| 244 | diff = div_u64(freq, 1000000000ULL); |
| 245 | |
| 246 | incval = neg_adj ? (incval - diff) : (incval + diff); |
| 247 | |
| 248 | switch (hw->mac.type) { |
| 249 | case ixgbe_mac_X540: |
| 250 | IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); |
| 251 | break; |
| 252 | case ixgbe_mac_82599EB: |
| 253 | IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, |
| 254 | (1 << IXGBE_INCPER_SHIFT_82599) | |
| 255 | incval); |
| 256 | break; |
| 257 | default: |
| 258 | break; |
| 259 | } |
| 260 | |
| 261 | return 0; |
| 262 | } |
| 263 | |
| 264 | /** |
| 265 | * ixgbe_ptp_adjtime |
Ben Hutchings | 49ce9c2 | 2012-07-10 10:56:00 +0000 | [diff] [blame] | 266 | * @ptp: the ptp clock structure |
| 267 | * @delta: offset to adjust the cycle counter by |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 268 | * |
| 269 | * adjust the timer by resetting the timecounter structure. |
| 270 | */ |
| 271 | static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) |
| 272 | { |
| 273 | struct ixgbe_adapter *adapter = |
| 274 | container_of(ptp, struct ixgbe_adapter, ptp_caps); |
| 275 | unsigned long flags; |
| 276 | u64 now; |
| 277 | |
Jacob Keller | 8208367 | 2012-08-01 07:12:25 +0000 | [diff] [blame^] | 278 | ixgbe_ptp_disable_sdp(&adapter->hw); |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 279 | spin_lock_irqsave(&adapter->tmreg_lock, flags); |
| 280 | |
| 281 | now = timecounter_read(&adapter->tc); |
| 282 | now += delta; |
| 283 | |
| 284 | /* reset the timecounter */ |
| 285 | timecounter_init(&adapter->tc, |
| 286 | &adapter->cc, |
| 287 | now); |
| 288 | |
| 289 | spin_unlock_irqrestore(&adapter->tmreg_lock, flags); |
Jacob Keller | 8208367 | 2012-08-01 07:12:25 +0000 | [diff] [blame^] | 290 | ixgbe_ptp_enable_sdp(adapter); |
| 291 | |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 292 | return 0; |
| 293 | } |
| 294 | |
| 295 | /** |
| 296 | * ixgbe_ptp_gettime |
Ben Hutchings | 49ce9c2 | 2012-07-10 10:56:00 +0000 | [diff] [blame] | 297 | * @ptp: the ptp clock structure |
| 298 | * @ts: timespec structure to hold the current time value |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 299 | * |
| 300 | * read the timecounter and return the correct value on ns, |
| 301 | * after converting it into a struct timespec. |
| 302 | */ |
| 303 | static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) |
| 304 | { |
| 305 | struct ixgbe_adapter *adapter = |
| 306 | container_of(ptp, struct ixgbe_adapter, ptp_caps); |
| 307 | u64 ns; |
| 308 | u32 remainder; |
| 309 | unsigned long flags; |
| 310 | |
| 311 | spin_lock_irqsave(&adapter->tmreg_lock, flags); |
| 312 | ns = timecounter_read(&adapter->tc); |
| 313 | spin_unlock_irqrestore(&adapter->tmreg_lock, flags); |
| 314 | |
| 315 | ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); |
| 316 | ts->tv_nsec = remainder; |
| 317 | |
| 318 | return 0; |
| 319 | } |
| 320 | |
| 321 | /** |
| 322 | * ixgbe_ptp_settime |
Ben Hutchings | 49ce9c2 | 2012-07-10 10:56:00 +0000 | [diff] [blame] | 323 | * @ptp: the ptp clock structure |
| 324 | * @ts: the timespec containing the new time for the cycle counter |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 325 | * |
| 326 | * reset the timecounter to use a new base value instead of the kernel |
| 327 | * wall timer value. |
| 328 | */ |
| 329 | static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, |
| 330 | const struct timespec *ts) |
| 331 | { |
| 332 | struct ixgbe_adapter *adapter = |
| 333 | container_of(ptp, struct ixgbe_adapter, ptp_caps); |
| 334 | u64 ns; |
| 335 | unsigned long flags; |
| 336 | |
| 337 | ns = ts->tv_sec * 1000000000ULL; |
| 338 | ns += ts->tv_nsec; |
| 339 | |
Jacob Keller | 8208367 | 2012-08-01 07:12:25 +0000 | [diff] [blame^] | 340 | ixgbe_ptp_disable_sdp(&adapter->hw); |
| 341 | |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 342 | /* reset the timecounter */ |
| 343 | spin_lock_irqsave(&adapter->tmreg_lock, flags); |
| 344 | timecounter_init(&adapter->tc, &adapter->cc, ns); |
| 345 | spin_unlock_irqrestore(&adapter->tmreg_lock, flags); |
| 346 | |
Jacob Keller | 8208367 | 2012-08-01 07:12:25 +0000 | [diff] [blame^] | 347 | ixgbe_ptp_enable_sdp(adapter); |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 348 | return 0; |
| 349 | } |
| 350 | |
| 351 | /** |
| 352 | * ixgbe_ptp_enable |
Ben Hutchings | 49ce9c2 | 2012-07-10 10:56:00 +0000 | [diff] [blame] | 353 | * @ptp: the ptp clock structure |
| 354 | * @rq: the requested feature to change |
| 355 | * @on: whether to enable or disable the feature |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 356 | * |
| 357 | * enable (or disable) ancillary features of the phc subsystem. |
Jacob E Keller | 681ae1a | 2012-05-01 05:24:41 +0000 | [diff] [blame] | 358 | * our driver only supports the PPS feature on the X540 |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 359 | */ |
| 360 | static int ixgbe_ptp_enable(struct ptp_clock_info *ptp, |
| 361 | struct ptp_clock_request *rq, int on) |
| 362 | { |
Jacob E Keller | 681ae1a | 2012-05-01 05:24:41 +0000 | [diff] [blame] | 363 | struct ixgbe_adapter *adapter = |
| 364 | container_of(ptp, struct ixgbe_adapter, ptp_caps); |
| 365 | |
| 366 | /** |
| 367 | * When PPS is enabled, unmask the interrupt for the ClockOut |
| 368 | * feature, so that the interrupt handler can send the PPS |
| 369 | * event when the clock SDP triggers. Clear mask when PPS is |
| 370 | * disabled |
| 371 | */ |
| 372 | if (rq->type == PTP_CLK_REQ_PPS) { |
| 373 | switch (adapter->hw.mac.type) { |
| 374 | case ixgbe_mac_X540: |
| 375 | if (on) |
| 376 | adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; |
| 377 | else |
| 378 | adapter->flags2 &= |
| 379 | ~IXGBE_FLAG2_PTP_PPS_ENABLED; |
| 380 | return 0; |
| 381 | default: |
| 382 | break; |
| 383 | } |
| 384 | } |
| 385 | |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 386 | return -ENOTSUPP; |
| 387 | } |
| 388 | |
| 389 | /** |
Jacob E Keller | 681ae1a | 2012-05-01 05:24:41 +0000 | [diff] [blame] | 390 | * ixgbe_ptp_check_pps_event |
Ben Hutchings | 49ce9c2 | 2012-07-10 10:56:00 +0000 | [diff] [blame] | 391 | * @adapter: the private adapter structure |
| 392 | * @eicr: the interrupt cause register value |
Jacob E Keller | 681ae1a | 2012-05-01 05:24:41 +0000 | [diff] [blame] | 393 | * |
| 394 | * This function is called by the interrupt routine when checking for |
| 395 | * interrupts. It will check and handle a pps event. |
| 396 | */ |
| 397 | void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) |
| 398 | { |
| 399 | struct ixgbe_hw *hw = &adapter->hw; |
| 400 | struct ptp_clock_event event; |
| 401 | |
| 402 | event.type = PTP_CLOCK_PPS; |
| 403 | |
| 404 | /* Make sure ptp clock is valid, and PPS event enabled */ |
| 405 | if (!adapter->ptp_clock || |
| 406 | !(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) |
| 407 | return; |
| 408 | |
Jacob Keller | 0ede4a6 | 2012-05-22 06:08:32 +0000 | [diff] [blame] | 409 | if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) { |
| 410 | switch (hw->mac.type) { |
| 411 | case ixgbe_mac_X540: |
Jacob E Keller | 681ae1a | 2012-05-01 05:24:41 +0000 | [diff] [blame] | 412 | ptp_clock_event(adapter->ptp_clock, &event); |
Jacob Keller | 0ede4a6 | 2012-05-22 06:08:32 +0000 | [diff] [blame] | 413 | break; |
| 414 | default: |
| 415 | break; |
| 416 | } |
Jacob E Keller | 681ae1a | 2012-05-01 05:24:41 +0000 | [diff] [blame] | 417 | } |
| 418 | } |
| 419 | |
Jacob E Keller | 681ae1a | 2012-05-01 05:24:41 +0000 | [diff] [blame] | 420 | |
| 421 | /** |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 422 | * ixgbe_ptp_overflow_check - delayed work to detect SYSTIME overflow |
| 423 | * @work: structure containing information about this work task |
| 424 | * |
| 425 | * this work function is scheduled to continue reading the timecounter |
| 426 | * in order to prevent missing when the system time registers wrap |
| 427 | * around. This needs to be run approximately twice a minute when no |
| 428 | * PTP activity is occurring. |
| 429 | */ |
| 430 | void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) |
| 431 | { |
| 432 | unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies; |
| 433 | struct timespec ts; |
| 434 | |
| 435 | if ((adapter->flags2 & IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED) && |
| 436 | (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) { |
| 437 | ixgbe_ptp_gettime(&adapter->ptp_caps, &ts); |
| 438 | adapter->last_overflow_check = jiffies; |
| 439 | } |
| 440 | } |
| 441 | |
| 442 | /** |
Jacob Keller | 1d1a79b | 2012-05-22 06:18:08 +0000 | [diff] [blame] | 443 | * ixgbe_ptp_match - determine if this skb matches a ptp packet |
| 444 | * @skb: pointer to the skb |
| 445 | * @hwtstamp: pointer to the hwtstamp_config to check |
| 446 | * |
| 447 | * Determine whether the skb should have been timestamped, assuming the |
| 448 | * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet |
| 449 | * should have a timestamp waiting in the registers, and 0 otherwise. |
| 450 | * |
| 451 | * V1 packets have to check the version type to determine whether they are |
| 452 | * correct. However, we can't directly access the data because it might be |
| 453 | * fragmented in the SKB, in paged memory. In order to work around this, we |
| 454 | * use skb_copy_bits which will properly copy the data whether it is in the |
| 455 | * paged memory fragments or not. We have to copy the IP header as well as the |
| 456 | * message type. |
| 457 | */ |
| 458 | static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter) |
| 459 | { |
| 460 | struct iphdr iph; |
| 461 | u8 msgtype; |
| 462 | unsigned int type, offset; |
| 463 | |
| 464 | if (rx_filter == HWTSTAMP_FILTER_NONE) |
| 465 | return 0; |
| 466 | |
| 467 | type = sk_run_filter(skb, ptp_filter); |
| 468 | |
| 469 | if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT)) |
| 470 | return type & PTP_CLASS_V2; |
| 471 | |
| 472 | /* For the remaining cases actually check message type */ |
| 473 | switch (type) { |
| 474 | case PTP_CLASS_V1_IPV4: |
| 475 | skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph)); |
| 476 | offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL; |
| 477 | break; |
| 478 | case PTP_CLASS_V1_IPV6: |
| 479 | offset = OFF_PTP6 + OFF_PTP_CONTROL; |
| 480 | break; |
| 481 | default: |
| 482 | /* other cases invalid or handled above */ |
| 483 | return 0; |
| 484 | } |
| 485 | |
| 486 | /* Make sure our buffer is long enough */ |
| 487 | if (skb->len < offset) |
| 488 | return 0; |
| 489 | |
| 490 | skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype)); |
| 491 | |
| 492 | switch (rx_filter) { |
| 493 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: |
| 494 | return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG); |
| 495 | break; |
| 496 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: |
| 497 | return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG); |
| 498 | break; |
| 499 | default: |
| 500 | return 0; |
| 501 | } |
| 502 | } |
| 503 | |
| 504 | /** |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 505 | * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp |
| 506 | * @q_vector: structure containing interrupt and ring information |
| 507 | * @skb: particular skb to send timestamp with |
| 508 | * |
| 509 | * if the timestamp is valid, we convert it into the timecounter ns |
| 510 | * value, then store that result into the shhwtstamps structure which |
| 511 | * is passed up the network stack |
| 512 | */ |
| 513 | void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector, |
| 514 | struct sk_buff *skb) |
| 515 | { |
| 516 | struct ixgbe_adapter *adapter; |
| 517 | struct ixgbe_hw *hw; |
| 518 | struct skb_shared_hwtstamps shhwtstamps; |
| 519 | u64 regval = 0, ns; |
| 520 | u32 tsynctxctl; |
| 521 | unsigned long flags; |
| 522 | |
| 523 | /* we cannot process timestamps on a ring without a q_vector */ |
| 524 | if (!q_vector || !q_vector->adapter) |
| 525 | return; |
| 526 | |
| 527 | adapter = q_vector->adapter; |
| 528 | hw = &adapter->hw; |
| 529 | |
| 530 | tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); |
| 531 | regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); |
| 532 | regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32; |
| 533 | |
| 534 | /* |
| 535 | * if TX timestamp is not valid, exit after clearing the |
| 536 | * timestamp registers |
| 537 | */ |
| 538 | if (!(tsynctxctl & IXGBE_TSYNCTXCTL_VALID)) |
| 539 | return; |
| 540 | |
| 541 | spin_lock_irqsave(&adapter->tmreg_lock, flags); |
| 542 | ns = timecounter_cyc2time(&adapter->tc, regval); |
| 543 | spin_unlock_irqrestore(&adapter->tmreg_lock, flags); |
| 544 | |
| 545 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
| 546 | shhwtstamps.hwtstamp = ns_to_ktime(ns); |
| 547 | skb_tstamp_tx(skb, &shhwtstamps); |
| 548 | } |
| 549 | |
| 550 | /** |
| 551 | * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp |
| 552 | * @q_vector: structure containing interrupt and ring information |
Jacob Keller | 1d1a79b | 2012-05-22 06:18:08 +0000 | [diff] [blame] | 553 | * @rx_desc: the rx descriptor |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 554 | * @skb: particular skb to send timestamp with |
| 555 | * |
| 556 | * if the timestamp is valid, we convert it into the timecounter ns |
| 557 | * value, then store that result into the shhwtstamps structure which |
| 558 | * is passed up the network stack |
| 559 | */ |
| 560 | void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, |
Jacob Keller | 1d1a79b | 2012-05-22 06:18:08 +0000 | [diff] [blame] | 561 | union ixgbe_adv_rx_desc *rx_desc, |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 562 | struct sk_buff *skb) |
| 563 | { |
| 564 | struct ixgbe_adapter *adapter; |
| 565 | struct ixgbe_hw *hw; |
| 566 | struct skb_shared_hwtstamps *shhwtstamps; |
| 567 | u64 regval = 0, ns; |
| 568 | u32 tsyncrxctl; |
| 569 | unsigned long flags; |
| 570 | |
| 571 | /* we cannot process timestamps on a ring without a q_vector */ |
| 572 | if (!q_vector || !q_vector->adapter) |
| 573 | return; |
| 574 | |
| 575 | adapter = q_vector->adapter; |
| 576 | hw = &adapter->hw; |
| 577 | |
| 578 | tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); |
Jacob Keller | 1d1a79b | 2012-05-22 06:18:08 +0000 | [diff] [blame] | 579 | |
| 580 | /* Check if we have a valid timestamp and make sure the skb should |
| 581 | * have been timestamped */ |
| 582 | if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) || |
| 583 | !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter))) |
| 584 | return; |
| 585 | |
| 586 | /* |
| 587 | * Always read the registers, in order to clear a possible fault |
| 588 | * because of stagnant RX timestamp values for a packet that never |
| 589 | * reached the queue. |
| 590 | */ |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 591 | regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); |
| 592 | regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; |
| 593 | |
| 594 | /* |
Jacob Keller | 1d1a79b | 2012-05-22 06:18:08 +0000 | [diff] [blame] | 595 | * If the timestamp bit is set in the packet's descriptor, we know the |
| 596 | * timestamp belongs to this packet. No other packet can be |
| 597 | * timestamped until the registers for timestamping have been read. |
| 598 | * Therefor only one packet with this bit can be in the queue at a |
| 599 | * time, and the rx timestamp values that were in the registers belong |
| 600 | * to this packet. |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 601 | * |
| 602 | * If nothing went wrong, then it should have a skb_shared_tx that we |
| 603 | * can turn into a skb_shared_hwtstamps. |
| 604 | */ |
Jacob Keller | 1d1a79b | 2012-05-22 06:18:08 +0000 | [diff] [blame] | 605 | if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 606 | return; |
| 607 | |
| 608 | spin_lock_irqsave(&adapter->tmreg_lock, flags); |
| 609 | ns = timecounter_cyc2time(&adapter->tc, regval); |
| 610 | spin_unlock_irqrestore(&adapter->tmreg_lock, flags); |
| 611 | |
| 612 | shhwtstamps = skb_hwtstamps(skb); |
| 613 | shhwtstamps->hwtstamp = ns_to_ktime(ns); |
| 614 | } |
| 615 | |
| 616 | /** |
| 617 | * ixgbe_ptp_hwtstamp_ioctl - control hardware time stamping |
| 618 | * @adapter: pointer to adapter struct |
| 619 | * @ifreq: ioctl data |
| 620 | * @cmd: particular ioctl requested |
| 621 | * |
| 622 | * Outgoing time stamping can be enabled and disabled. Play nice and |
| 623 | * disable it when requested, although it shouldn't case any overhead |
| 624 | * when no packet needs it. At most one packet in the queue may be |
| 625 | * marked for time stamping, otherwise it would be impossible to tell |
| 626 | * for sure to which packet the hardware time stamp belongs. |
| 627 | * |
| 628 | * Incoming time stamping has to be configured via the hardware |
| 629 | * filters. Not all combinations are supported, in particular event |
| 630 | * type has to be specified. Matching the kind of event packet is |
| 631 | * not supported, with the exception of "all V2 events regardless of |
| 632 | * level 2 or 4". |
Jacob Keller | c19197a | 2012-05-22 06:08:37 +0000 | [diff] [blame] | 633 | * |
| 634 | * Since hardware always timestamps Path delay packets when timestamping V2 |
| 635 | * packets, regardless of the type specified in the register, only use V2 |
| 636 | * Event mode. This more accurately tells the user what the hardware is going |
| 637 | * to do anyways. |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 638 | */ |
| 639 | int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, |
| 640 | struct ifreq *ifr, int cmd) |
| 641 | { |
| 642 | struct ixgbe_hw *hw = &adapter->hw; |
| 643 | struct hwtstamp_config config; |
| 644 | u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; |
| 645 | u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; |
| 646 | u32 tsync_rx_mtrl = 0; |
| 647 | bool is_l4 = false; |
| 648 | bool is_l2 = false; |
| 649 | u32 regval; |
| 650 | |
| 651 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) |
| 652 | return -EFAULT; |
| 653 | |
| 654 | /* reserved for future extensions */ |
| 655 | if (config.flags) |
| 656 | return -EINVAL; |
| 657 | |
| 658 | switch (config.tx_type) { |
| 659 | case HWTSTAMP_TX_OFF: |
| 660 | tsync_tx_ctl = 0; |
| 661 | case HWTSTAMP_TX_ON: |
| 662 | break; |
| 663 | default: |
| 664 | return -ERANGE; |
| 665 | } |
| 666 | |
| 667 | switch (config.rx_filter) { |
| 668 | case HWTSTAMP_FILTER_NONE: |
| 669 | tsync_rx_ctl = 0; |
| 670 | break; |
| 671 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: |
| 672 | tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; |
| 673 | tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG; |
| 674 | is_l4 = true; |
| 675 | break; |
| 676 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: |
| 677 | tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; |
| 678 | tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG; |
| 679 | is_l4 = true; |
| 680 | break; |
Jacob Keller | c19197a | 2012-05-22 06:08:37 +0000 | [diff] [blame] | 681 | case HWTSTAMP_FILTER_PTP_V2_EVENT: |
| 682 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: |
| 683 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 684 | case HWTSTAMP_FILTER_PTP_V2_SYNC: |
| 685 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: |
| 686 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 687 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: |
| 688 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: |
| 689 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 690 | tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 691 | is_l2 = true; |
| 692 | is_l4 = true; |
Jacob Keller | 1d1a79b | 2012-05-22 06:18:08 +0000 | [diff] [blame] | 693 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 694 | break; |
| 695 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: |
| 696 | case HWTSTAMP_FILTER_ALL: |
| 697 | default: |
| 698 | /* |
Jacob Keller | 1d1a79b | 2012-05-22 06:18:08 +0000 | [diff] [blame] | 699 | * register RXMTRL must be set in order to do V1 packets, |
| 700 | * therefore it is not possible to time stamp both V1 Sync and |
| 701 | * Delay_Req messages and hardware does not support |
| 702 | * timestamping all packets => return error |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 703 | */ |
Jacob Keller | 1d1a79b | 2012-05-22 06:18:08 +0000 | [diff] [blame] | 704 | config.rx_filter = HWTSTAMP_FILTER_NONE; |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 705 | return -ERANGE; |
| 706 | } |
| 707 | |
| 708 | if (hw->mac.type == ixgbe_mac_82598EB) { |
| 709 | if (tsync_rx_ctl | tsync_tx_ctl) |
| 710 | return -ERANGE; |
| 711 | return 0; |
| 712 | } |
| 713 | |
Jacob Keller | 1d1a79b | 2012-05-22 06:18:08 +0000 | [diff] [blame] | 714 | /* Store filter value for later use */ |
| 715 | adapter->rx_hwtstamp_filter = config.rx_filter; |
| 716 | |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 717 | /* define ethertype filter for timestamped packets */ |
| 718 | if (is_l2) |
| 719 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), |
| 720 | (IXGBE_ETQF_FILTER_EN | /* enable filter */ |
| 721 | IXGBE_ETQF_1588 | /* enable timestamping */ |
| 722 | ETH_P_1588)); /* 1588 eth protocol type */ |
| 723 | else |
| 724 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 0); |
| 725 | |
| 726 | #define PTP_PORT 319 |
| 727 | /* L4 Queue Filter[3]: filter by destination port and protocol */ |
| 728 | if (is_l4) { |
| 729 | u32 ftqf = (IXGBE_FTQF_PROTOCOL_UDP /* UDP */ |
| 730 | | IXGBE_FTQF_POOL_MASK_EN /* Pool not compared */ |
| 731 | | IXGBE_FTQF_QUEUE_ENABLE); |
| 732 | |
| 733 | ftqf |= ((IXGBE_FTQF_PROTOCOL_COMP_MASK /* protocol check */ |
| 734 | & IXGBE_FTQF_DEST_PORT_MASK /* dest check */ |
| 735 | & IXGBE_FTQF_SOURCE_PORT_MASK) /* source check */ |
| 736 | << IXGBE_FTQF_5TUPLE_MASK_SHIFT); |
| 737 | |
| 738 | IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(3), |
| 739 | (3 << IXGBE_IMIR_RX_QUEUE_SHIFT_82599 | |
| 740 | IXGBE_IMIR_SIZE_BP_82599)); |
| 741 | |
| 742 | /* enable port check */ |
| 743 | IXGBE_WRITE_REG(hw, IXGBE_SDPQF(3), |
| 744 | (htons(PTP_PORT) | |
| 745 | htons(PTP_PORT) << 16)); |
| 746 | |
| 747 | IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), ftqf); |
| 748 | |
| 749 | tsync_rx_mtrl |= PTP_PORT << 16; |
| 750 | } else { |
| 751 | IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), 0); |
| 752 | } |
| 753 | |
| 754 | /* enable/disable TX */ |
| 755 | regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); |
| 756 | regval &= ~IXGBE_TSYNCTXCTL_ENABLED; |
| 757 | regval |= tsync_tx_ctl; |
| 758 | IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, regval); |
| 759 | |
| 760 | /* enable/disable RX */ |
| 761 | regval = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); |
| 762 | regval &= ~(IXGBE_TSYNCRXCTL_ENABLED | IXGBE_TSYNCRXCTL_TYPE_MASK); |
| 763 | regval |= tsync_rx_ctl; |
| 764 | IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, regval); |
| 765 | |
| 766 | /* define which PTP packets are time stamped */ |
| 767 | IXGBE_WRITE_REG(hw, IXGBE_RXMTRL, tsync_rx_mtrl); |
| 768 | |
| 769 | IXGBE_WRITE_FLUSH(hw); |
| 770 | |
| 771 | /* clear TX/RX time stamp registers, just to be sure */ |
| 772 | regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH); |
| 773 | regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH); |
| 774 | |
| 775 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? |
| 776 | -EFAULT : 0; |
| 777 | } |
| 778 | |
| 779 | /** |
| 780 | * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw |
Ben Hutchings | 49ce9c2 | 2012-07-10 10:56:00 +0000 | [diff] [blame] | 781 | * @adapter: pointer to the adapter structure |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 782 | * |
| 783 | * this function initializes the timecounter and cyclecounter |
| 784 | * structures for use in generated a ns counter from the arbitrary |
| 785 | * fixed point cycles registers in the hardware. |
| 786 | * |
| 787 | * A change in link speed impacts the frequency of the DMA clock on |
| 788 | * the device, which is used to generate the cycle counter |
| 789 | * registers. Therefor this function is called whenever the link speed |
| 790 | * changes. |
Jacob E Keller | 681ae1a | 2012-05-01 05:24:41 +0000 | [diff] [blame] | 791 | * |
| 792 | * This function also turns on the SDP pin for clock out feature (X540 |
| 793 | * only), because this is where the shift is first calculated. |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 794 | */ |
| 795 | void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) |
| 796 | { |
| 797 | struct ixgbe_hw *hw = &adapter->hw; |
| 798 | u32 incval = 0; |
Jacob Keller | b6138ed | 2012-06-16 23:29:00 +0000 | [diff] [blame] | 799 | u32 timinca = 0; |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 800 | u32 shift = 0; |
| 801 | u32 cycle_speed; |
| 802 | unsigned long flags; |
| 803 | |
| 804 | /** |
| 805 | * Determine what speed we need to set the cyclecounter |
| 806 | * for. It should be different for 100Mb, 1Gb, and 10Gb. Treat |
| 807 | * unknown speeds as 10Gb. (Hence why we can't just copy the |
| 808 | * link_speed. |
| 809 | */ |
| 810 | switch (adapter->link_speed) { |
| 811 | case IXGBE_LINK_SPEED_100_FULL: |
| 812 | case IXGBE_LINK_SPEED_1GB_FULL: |
| 813 | case IXGBE_LINK_SPEED_10GB_FULL: |
| 814 | cycle_speed = adapter->link_speed; |
| 815 | break; |
| 816 | default: |
| 817 | /* cycle speed should be 10Gb when there is no link */ |
| 818 | cycle_speed = IXGBE_LINK_SPEED_10GB_FULL; |
| 819 | break; |
| 820 | } |
| 821 | |
Jacob Keller | b6138ed | 2012-06-16 23:29:00 +0000 | [diff] [blame] | 822 | /* |
| 823 | * grab the current TIMINCA value from the register so that it can be |
| 824 | * double checked. If the register value has been cleared, it must be |
| 825 | * reset to the correct value for generating a cyclecounter. If |
| 826 | * TIMINCA is zero, the SYSTIME registers do not increment at all. |
| 827 | */ |
| 828 | timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA); |
| 829 | |
| 830 | /* Bail if the cycle speed didn't change and TIMINCA is non-zero */ |
| 831 | if (adapter->cycle_speed == cycle_speed && timinca) |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 832 | return; |
| 833 | |
Jacob E Keller | 681ae1a | 2012-05-01 05:24:41 +0000 | [diff] [blame] | 834 | /* disable the SDP clock out */ |
| 835 | ixgbe_ptp_disable_sdp(hw); |
| 836 | |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 837 | /** |
| 838 | * Scale the NIC cycle counter by a large factor so that |
| 839 | * relatively small corrections to the frequency can be added |
| 840 | * or subtracted. The drawbacks of a large factor include |
| 841 | * (a) the clock register overflows more quickly, (b) the cycle |
| 842 | * counter structure must be able to convert the systime value |
| 843 | * to nanoseconds using only a multiplier and a right-shift, |
| 844 | * and (c) the value must fit within the timinca register space |
| 845 | * => math based on internal DMA clock rate and available bits |
| 846 | */ |
| 847 | switch (cycle_speed) { |
| 848 | case IXGBE_LINK_SPEED_100_FULL: |
| 849 | incval = IXGBE_INCVAL_100; |
| 850 | shift = IXGBE_INCVAL_SHIFT_100; |
| 851 | break; |
| 852 | case IXGBE_LINK_SPEED_1GB_FULL: |
| 853 | incval = IXGBE_INCVAL_1GB; |
| 854 | shift = IXGBE_INCVAL_SHIFT_1GB; |
| 855 | break; |
| 856 | case IXGBE_LINK_SPEED_10GB_FULL: |
| 857 | incval = IXGBE_INCVAL_10GB; |
| 858 | shift = IXGBE_INCVAL_SHIFT_10GB; |
| 859 | break; |
| 860 | } |
| 861 | |
| 862 | /** |
| 863 | * Modify the calculated values to fit within the correct |
| 864 | * number of bits specified by the hardware. The 82599 doesn't |
| 865 | * have the same space as the X540, so bitshift the calculated |
| 866 | * values to fit. |
| 867 | */ |
| 868 | switch (hw->mac.type) { |
| 869 | case ixgbe_mac_X540: |
| 870 | IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); |
| 871 | break; |
| 872 | case ixgbe_mac_82599EB: |
| 873 | incval >>= IXGBE_INCVAL_SHIFT_82599; |
| 874 | shift -= IXGBE_INCVAL_SHIFT_82599; |
| 875 | IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, |
| 876 | (1 << IXGBE_INCPER_SHIFT_82599) | |
| 877 | incval); |
| 878 | break; |
| 879 | default: |
| 880 | /* other devices aren't supported */ |
| 881 | return; |
| 882 | } |
| 883 | |
| 884 | /* reset the system time registers */ |
| 885 | IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000); |
| 886 | IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); |
| 887 | IXGBE_WRITE_FLUSH(hw); |
| 888 | |
| 889 | /* store the new cycle speed */ |
| 890 | adapter->cycle_speed = cycle_speed; |
| 891 | |
| 892 | ACCESS_ONCE(adapter->base_incval) = incval; |
| 893 | smp_mb(); |
| 894 | |
| 895 | /* grab the ptp lock */ |
| 896 | spin_lock_irqsave(&adapter->tmreg_lock, flags); |
| 897 | |
| 898 | memset(&adapter->cc, 0, sizeof(adapter->cc)); |
| 899 | adapter->cc.read = ixgbe_ptp_read; |
| 900 | adapter->cc.mask = CLOCKSOURCE_MASK(64); |
| 901 | adapter->cc.shift = shift; |
| 902 | adapter->cc.mult = 1; |
| 903 | |
| 904 | /* reset the ns time counter */ |
| 905 | timecounter_init(&adapter->tc, &adapter->cc, |
| 906 | ktime_to_ns(ktime_get_real())); |
| 907 | |
| 908 | spin_unlock_irqrestore(&adapter->tmreg_lock, flags); |
Jacob Keller | 8208367 | 2012-08-01 07:12:25 +0000 | [diff] [blame^] | 909 | |
| 910 | /* Now that the shift has been calculated and the systime |
| 911 | * registers reset, (re-)enable the Clock out feature |
| 912 | */ |
| 913 | ixgbe_ptp_enable_sdp(adapter); |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 914 | } |
| 915 | |
| 916 | /** |
| 917 | * ixgbe_ptp_init |
Ben Hutchings | 49ce9c2 | 2012-07-10 10:56:00 +0000 | [diff] [blame] | 918 | * @adapter: the ixgbe private adapter structure |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 919 | * |
| 920 | * This function performs the required steps for enabling ptp |
| 921 | * support. If ptp support has already been loaded it simply calls the |
| 922 | * cyclecounter init routine and exits. |
| 923 | */ |
| 924 | void ixgbe_ptp_init(struct ixgbe_adapter *adapter) |
| 925 | { |
| 926 | struct net_device *netdev = adapter->netdev; |
| 927 | |
| 928 | switch (adapter->hw.mac.type) { |
| 929 | case ixgbe_mac_X540: |
Jacob E Keller | 681ae1a | 2012-05-01 05:24:41 +0000 | [diff] [blame] | 930 | snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); |
| 931 | adapter->ptp_caps.owner = THIS_MODULE; |
| 932 | adapter->ptp_caps.max_adj = 250000000; |
| 933 | adapter->ptp_caps.n_alarm = 0; |
| 934 | adapter->ptp_caps.n_ext_ts = 0; |
| 935 | adapter->ptp_caps.n_per_out = 0; |
| 936 | adapter->ptp_caps.pps = 1; |
| 937 | adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; |
| 938 | adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; |
| 939 | adapter->ptp_caps.gettime = ixgbe_ptp_gettime; |
| 940 | adapter->ptp_caps.settime = ixgbe_ptp_settime; |
| 941 | adapter->ptp_caps.enable = ixgbe_ptp_enable; |
| 942 | break; |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 943 | case ixgbe_mac_82599EB: |
| 944 | snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); |
| 945 | adapter->ptp_caps.owner = THIS_MODULE; |
| 946 | adapter->ptp_caps.max_adj = 250000000; |
| 947 | adapter->ptp_caps.n_alarm = 0; |
| 948 | adapter->ptp_caps.n_ext_ts = 0; |
| 949 | adapter->ptp_caps.n_per_out = 0; |
| 950 | adapter->ptp_caps.pps = 0; |
| 951 | adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; |
| 952 | adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; |
| 953 | adapter->ptp_caps.gettime = ixgbe_ptp_gettime; |
| 954 | adapter->ptp_caps.settime = ixgbe_ptp_settime; |
| 955 | adapter->ptp_caps.enable = ixgbe_ptp_enable; |
| 956 | break; |
| 957 | default: |
| 958 | adapter->ptp_clock = NULL; |
| 959 | return; |
| 960 | } |
| 961 | |
Jacob Keller | 1d1a79b | 2012-05-22 06:18:08 +0000 | [diff] [blame] | 962 | /* initialize the ptp filter */ |
| 963 | if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) |
| 964 | e_dev_warn("ptp_filter_init failed\n"); |
| 965 | |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 966 | spin_lock_init(&adapter->tmreg_lock); |
| 967 | |
| 968 | ixgbe_ptp_start_cyclecounter(adapter); |
| 969 | |
| 970 | /* (Re)start the overflow check */ |
| 971 | adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED; |
| 972 | |
Richard Cochran | 1ef7615 | 2012-09-22 07:02:03 +0000 | [diff] [blame] | 973 | adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, |
| 974 | &adapter->pdev->dev); |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 975 | if (IS_ERR(adapter->ptp_clock)) { |
| 976 | adapter->ptp_clock = NULL; |
| 977 | e_dev_err("ptp_clock_register failed\n"); |
| 978 | } else |
| 979 | e_dev_info("registered PHC device on %s\n", netdev->name); |
| 980 | |
| 981 | return; |
| 982 | } |
| 983 | |
| 984 | /** |
| 985 | * ixgbe_ptp_stop - disable ptp device and stop the overflow check |
| 986 | * @adapter: pointer to adapter struct |
| 987 | * |
| 988 | * this function stops the ptp support, and cancels the delayed work. |
| 989 | */ |
| 990 | void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) |
| 991 | { |
Jacob E Keller | 681ae1a | 2012-05-01 05:24:41 +0000 | [diff] [blame] | 992 | ixgbe_ptp_disable_sdp(&adapter->hw); |
| 993 | |
Jacob Keller | 3a6a4ed | 2012-05-01 05:24:58 +0000 | [diff] [blame] | 994 | /* stop the overflow check task */ |
| 995 | adapter->flags2 &= ~IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED; |
| 996 | |
| 997 | if (adapter->ptp_clock) { |
| 998 | ptp_clock_unregister(adapter->ptp_clock); |
| 999 | adapter->ptp_clock = NULL; |
| 1000 | e_dev_info("removed PHC on %s\n", |
| 1001 | adapter->netdev->name); |
| 1002 | } |
| 1003 | } |