blob: 6326a915d7fd30ed4ae92bd4c51569fc16f360af [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51#include <linux/delay.h>
52#include <linux/pci.h>
53#include <linux/vmalloc.h>
54
55#include "hfi.h"
56#include "twsi.h"
57
58/*
59 * QSFP support for hfi driver, using "Two Wire Serial Interface" driver
60 * in twsi.c
61 */
62#define I2C_MAX_RETRY 4
63
64/*
65 * Unlocked i2c write. Must hold dd->qsfp_i2c_mutex.
66 */
67static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
68 int offset, void *bp, int len)
69{
70 struct hfi1_devdata *dd = ppd->dd;
71 int ret, cnt;
72 u8 *buff = bp;
73
74 /* Make sure TWSI bus is in sane state. */
75 ret = hfi1_twsi_reset(dd, target);
76 if (ret) {
77 hfi1_dev_porterr(dd, ppd->port,
78 "I2C interface Reset for write failed\n");
79 return -EIO;
80 }
81
82 cnt = 0;
83 while (cnt < len) {
84 int wlen = len - cnt;
85
86 ret = hfi1_twsi_blk_wr(dd, target, i2c_addr, offset,
87 buff + cnt, wlen);
88 if (ret) {
89 /* hfi1_twsi_blk_wr() 1 for error, else 0 */
90 return -EIO;
91 }
92 offset += wlen;
93 cnt += wlen;
94 }
95
96 /* Must wait min 20us between qsfp i2c transactions */
97 udelay(20);
98
99 return cnt;
100}
101
102int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
103 void *bp, int len)
104{
105 struct hfi1_devdata *dd = ppd->dd;
106 int ret;
107
108 ret = mutex_lock_interruptible(&dd->qsfp_i2c_mutex);
109 if (!ret) {
110 ret = __i2c_write(ppd, target, i2c_addr, offset, bp, len);
111 mutex_unlock(&dd->qsfp_i2c_mutex);
112 }
113
114 return ret;
115}
116
117/*
118 * Unlocked i2c read. Must hold dd->qsfp_i2c_mutex.
119 */
120static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
121 int offset, void *bp, int len)
122{
123 struct hfi1_devdata *dd = ppd->dd;
124 int ret, cnt, pass = 0;
125 int stuck = 0;
126 u8 *buff = bp;
127
128 /* Make sure TWSI bus is in sane state. */
129 ret = hfi1_twsi_reset(dd, target);
130 if (ret) {
131 hfi1_dev_porterr(dd, ppd->port,
132 "I2C interface Reset for read failed\n");
133 ret = -EIO;
134 stuck = 1;
135 goto exit;
136 }
137
138 cnt = 0;
139 while (cnt < len) {
140 int rlen = len - cnt;
141
142 ret = hfi1_twsi_blk_rd(dd, target, i2c_addr, offset,
143 buff + cnt, rlen);
144 /* Some QSFP's fail first try. Retry as experiment */
145 if (ret && cnt == 0 && ++pass < I2C_MAX_RETRY)
146 continue;
147 if (ret) {
148 /* hfi1_twsi_blk_rd() 1 for error, else 0 */
149 ret = -EIO;
150 goto exit;
151 }
152 offset += rlen;
153 cnt += rlen;
154 }
155
156 ret = cnt;
157
158exit:
159 if (stuck)
160 dd_dev_err(dd, "I2C interface bus stuck non-idle\n");
161
162 if (pass >= I2C_MAX_RETRY && ret)
163 hfi1_dev_porterr(dd, ppd->port,
164 "I2C failed even retrying\n");
165 else if (pass)
166 hfi1_dev_porterr(dd, ppd->port, "I2C retries: %d\n", pass);
167
168 /* Must wait min 20us between qsfp i2c transactions */
169 udelay(20);
170
171 return ret;
172}
173
174int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
175 void *bp, int len)
176{
177 struct hfi1_devdata *dd = ppd->dd;
178 int ret;
179
180 ret = mutex_lock_interruptible(&dd->qsfp_i2c_mutex);
181 if (!ret) {
182 ret = __i2c_read(ppd, target, i2c_addr, offset, bp, len);
183 mutex_unlock(&dd->qsfp_i2c_mutex);
184 }
185
186 return ret;
187}
188
189int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
190 int len)
191{
192 int count = 0;
193 int offset;
194 int nwrite;
195 int ret;
196 u8 page;
197
198 ret = mutex_lock_interruptible(&ppd->dd->qsfp_i2c_mutex);
199 if (ret)
200 return ret;
201
202 while (count < len) {
203 /*
204 * Set the qsfp page based on a zero-based addresss
205 * and a page size of QSFP_PAGESIZE bytes.
206 */
207 page = (u8)(addr / QSFP_PAGESIZE);
208
209 ret = __i2c_write(ppd, target, QSFP_DEV,
210 QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
211 if (ret != 1) {
212 hfi1_dev_porterr(
213 ppd->dd,
214 ppd->port,
215 "can't write QSFP_PAGE_SELECT_BYTE: %d\n", ret);
216 ret = -EIO;
217 break;
218 }
219
220 /* truncate write to end of page if crossing page boundary */
221 offset = addr % QSFP_PAGESIZE;
222 nwrite = len - count;
223 if ((offset + nwrite) > QSFP_PAGESIZE)
224 nwrite = QSFP_PAGESIZE - offset;
225
226 ret = __i2c_write(ppd, target, QSFP_DEV, offset, bp + count,
227 nwrite);
228 if (ret <= 0) /* stop on error or nothing read */
229 break;
230
231 count += ret;
232 addr += ret;
233 }
234
235 mutex_unlock(&ppd->dd->qsfp_i2c_mutex);
236
237 if (ret < 0)
238 return ret;
239 return count;
240}
241
242int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
243 int len)
244{
245 int count = 0;
246 int offset;
247 int nread;
248 int ret;
249 u8 page;
250
251 ret = mutex_lock_interruptible(&ppd->dd->qsfp_i2c_mutex);
252 if (ret)
253 return ret;
254
255 while (count < len) {
256 /*
257 * Set the qsfp page based on a zero-based address
258 * and a page size of QSFP_PAGESIZE bytes.
259 */
260 page = (u8)(addr / QSFP_PAGESIZE);
261 ret = __i2c_write(ppd, target, QSFP_DEV,
262 QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
263 if (ret != 1) {
264 hfi1_dev_porterr(
265 ppd->dd,
266 ppd->port,
267 "can't write QSFP_PAGE_SELECT_BYTE: %d\n", ret);
268 ret = -EIO;
269 break;
270 }
271
272 /* truncate read to end of page if crossing page boundary */
273 offset = addr % QSFP_PAGESIZE;
274 nread = len - count;
275 if ((offset + nread) > QSFP_PAGESIZE)
276 nread = QSFP_PAGESIZE - offset;
277
278 ret = __i2c_read(ppd, target, QSFP_DEV, offset, bp + count,
279 nread);
280 if (ret <= 0) /* stop on error or nothing read */
281 break;
282
283 count += ret;
284 addr += ret;
285 }
286
287 mutex_unlock(&ppd->dd->qsfp_i2c_mutex);
288
289 if (ret < 0)
290 return ret;
291 return count;
292}
293
294/*
295 * This function caches the QSFP memory range in 128 byte chunks.
296 * As an example, the next byte after address 255 is byte 128 from
297 * upper page 01H (if existing) rather than byte 0 from lower page 00H.
298 */
299int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp)
300{
301 u32 target = ppd->dd->hfi1_id;
302 int ret;
303 unsigned long flags;
304 u8 *cache = &cp->cache[0];
305
306 /* ensure sane contents on invalid reads, for cable swaps */
307 memset(cache, 0, (QSFP_MAX_NUM_PAGES*128));
308 dd_dev_info(ppd->dd, "%s: called\n", __func__);
309 if (!qsfp_mod_present(ppd)) {
310 ret = -ENODEV;
311 goto bail;
312 }
313
314 ret = qsfp_read(ppd, target, 0, cache, 256);
315 if (ret != 256) {
316 dd_dev_info(ppd->dd,
317 "%s: Read of pages 00H failed, expected 256, got %d\n",
318 __func__, ret);
319 goto bail;
320 }
321
322 if (cache[0] != 0x0C && cache[0] != 0x0D)
323 goto bail;
324
325 /* Is paging enabled? */
326 if (!(cache[2] & 4)) {
327
328 /* Paging enabled, page 03 required */
329 if ((cache[195] & 0xC0) == 0xC0) {
330 /* all */
331 ret = qsfp_read(ppd, target, 384, cache + 256, 128);
332 if (ret <= 0 || ret != 128) {
333 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
334 goto bail;
335 }
336 ret = qsfp_read(ppd, target, 640, cache + 384, 128);
337 if (ret <= 0 || ret != 128) {
338 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
339 goto bail;
340 }
341 ret = qsfp_read(ppd, target, 896, cache + 512, 128);
342 if (ret <= 0 || ret != 128) {
343 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
344 goto bail;
345 }
346 } else if ((cache[195] & 0x80) == 0x80) {
347 /* only page 2 and 3 */
348 ret = qsfp_read(ppd, target, 640, cache + 384, 128);
349 if (ret <= 0 || ret != 128) {
350 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
351 goto bail;
352 }
353 ret = qsfp_read(ppd, target, 896, cache + 512, 128);
354 if (ret <= 0 || ret != 128) {
355 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
356 goto bail;
357 }
358 } else if ((cache[195] & 0x40) == 0x40) {
359 /* only page 1 and 3 */
360 ret = qsfp_read(ppd, target, 384, cache + 256, 128);
361 if (ret <= 0 || ret != 128) {
362 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
363 goto bail;
364 }
365 ret = qsfp_read(ppd, target, 896, cache + 512, 128);
366 if (ret <= 0 || ret != 128) {
367 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
368 goto bail;
369 }
370 } else {
371 /* only page 3 */
372 ret = qsfp_read(ppd, target, 896, cache + 512, 128);
373 if (ret <= 0 || ret != 128) {
374 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
375 goto bail;
376 }
377 }
378 }
379
380 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
381 ppd->qsfp_info.cache_valid = 1;
382 ppd->qsfp_info.cache_refresh_required = 0;
383 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
384
385 return 0;
386
387bail:
388 memset(cache, 0, (QSFP_MAX_NUM_PAGES*128));
389 return ret;
390}
391
392const char * const hfi1_qsfp_devtech[16] = {
393 "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
394 "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
395 "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
396 "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
397};
398
399#define QSFP_DUMP_CHUNK 16 /* Holds longest string */
400#define QSFP_DEFAULT_HDR_CNT 224
401
402static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
403
404int qsfp_mod_present(struct hfi1_pportdata *ppd)
405{
Easwar Hariharan3c2f85b2015-10-26 10:28:31 -0400406 struct hfi1_devdata *dd = ppd->dd;
407 u64 reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400408
Easwar Hariharan3c2f85b2015-10-26 10:28:31 -0400409 reg = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
410 return !(reg & QSFP_HFI0_MODPRST_N);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400411}
412
413/*
414 * This function maps QSFP memory addresses in 128 byte chunks in the following
415 * fashion per the CableInfo SMA query definition in the IBA 1.3 spec/OPA Gen 1
416 * spec
417 * For addr 000-127, lower page 00h
418 * For addr 128-255, upper page 00h
419 * For addr 256-383, upper page 01h
420 * For addr 384-511, upper page 02h
421 * For addr 512-639, upper page 03h
422 *
423 * For addresses beyond this range, it returns the invalid range of data buffer
424 * set to 0.
425 * For upper pages that are optional, if they are not valid, returns the
426 * particular range of bytes in the data buffer set to 0.
427 */
428int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
429 u8 *data)
430{
431 struct hfi1_pportdata *ppd;
432 u32 excess_len = 0;
433 int ret = 0;
434
435 if (port_num > dd->num_pports || port_num < 1) {
436 dd_dev_info(dd, "%s: Invalid port number %d\n",
437 __func__, port_num);
438 ret = -EINVAL;
439 goto set_zeroes;
440 }
441
442 ppd = dd->pport + (port_num - 1);
443 if (!qsfp_mod_present(ppd)) {
444 ret = -ENODEV;
445 goto set_zeroes;
446 }
447
448 if (!ppd->qsfp_info.cache_valid) {
449 ret = -EINVAL;
450 goto set_zeroes;
451 }
452
453 if (addr >= (QSFP_MAX_NUM_PAGES * 128)) {
454 ret = -ERANGE;
455 goto set_zeroes;
456 }
457
458 if ((addr + len) > (QSFP_MAX_NUM_PAGES * 128)) {
459 excess_len = (addr + len) - (QSFP_MAX_NUM_PAGES * 128);
460 memcpy(data, &ppd->qsfp_info.cache[addr], (len - excess_len));
461 data += (len - excess_len);
462 goto set_zeroes;
463 }
464
465 memcpy(data, &ppd->qsfp_info.cache[addr], len);
466 return 0;
467
468set_zeroes:
469 memset(data, 0, excess_len);
470 return ret;
471}
472
473int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
474{
475 u8 *cache = &ppd->qsfp_info.cache[0];
476 u8 bin_buff[QSFP_DUMP_CHUNK];
477 char lenstr[6];
Amitoj Kaur Chawla463f8e72015-10-29 13:35:06 +0530478 int sofar;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400479 int bidx = 0;
480 u8 *atten = &cache[QSFP_ATTEN_OFFS];
481 u8 *vendor_oui = &cache[QSFP_VOUI_OFFS];
482
483 sofar = 0;
484 lenstr[0] = ' ';
485 lenstr[1] = '\0';
486
487 if (ppd->qsfp_info.cache_valid) {
488
489 if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
490 sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]);
491
492 sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",
493 pwr_codes +
494 (QSFP_PWR(cache[QSFP_MOD_PWR_OFFS]) * 4));
495
496 sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n",
497 lenstr,
498 hfi1_qsfp_devtech[(cache[QSFP_MOD_TECH_OFFS]) >> 4]);
499
500 sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
501 QSFP_VEND_LEN, &cache[QSFP_VEND_OFFS]);
502
503 sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
504 QSFP_OUI(vendor_oui));
505
506 sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
507 QSFP_PN_LEN, &cache[QSFP_PN_OFFS]);
508
509 sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
510 QSFP_REV_LEN, &cache[QSFP_REV_OFFS]);
511
512 if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
513 sofar += scnprintf(buf + sofar, len - sofar,
514 "Atten:%d, %d\n",
515 QSFP_ATTEN_SDR(atten),
516 QSFP_ATTEN_DDR(atten));
517
518 sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
519 QSFP_SN_LEN, &cache[QSFP_SN_OFFS]);
520
521 sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
522 QSFP_DATE_LEN, &cache[QSFP_DATE_OFFS]);
523
524 sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
525 QSFP_LOT_LEN, &cache[QSFP_LOT_OFFS]);
526
527 while (bidx < QSFP_DEFAULT_HDR_CNT) {
528 int iidx;
529
530 memcpy(bin_buff, &cache[bidx], QSFP_DUMP_CHUNK);
531 for (iidx = 0; iidx < QSFP_DUMP_CHUNK; ++iidx) {
532 sofar += scnprintf(buf + sofar, len-sofar,
533 " %02X", bin_buff[iidx]);
534 }
535 sofar += scnprintf(buf + sofar, len - sofar, "\n");
536 bidx += QSFP_DUMP_CHUNK;
537 }
538 }
Amitoj Kaur Chawla463f8e72015-10-29 13:35:06 +0530539 return sofar;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400540}