blob: 381063484ee469f989c1a7bdd71df4702686e8e4 [file] [log] [blame]
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
Ben Hutchings0a6f40c2011-02-25 00:01:34 +00003 * Copyright 2008-2011 Solarflare Communications Inc.
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/delay.h>
11#include "net_driver.h"
12#include "nic.h"
13#include "io.h"
Ben Hutchings8b8a95a2012-09-18 01:57:07 +010014#include "farch_regs.h"
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000015#include "mcdi_pcol.h"
16#include "phy.h"
17
18/**************************************************************************
19 *
20 * Management-Controller-to-Driver Interface
21 *
22 **************************************************************************
23 */
24
Ben Hutchingsebf98e72012-12-01 02:21:17 +000025#define MCDI_RPC_TIMEOUT (10 * HZ)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000026
Ben Hutchings3f713bf2011-12-20 23:39:31 +000027/* A reboot/assertion causes the MCDI status word to be set after the
28 * command word is set or a REBOOT event is sent. If we notice a reboot
29 * via these mechanisms then wait 10ms for the status word to be set. */
30#define MCDI_STATUS_DELAY_US 100
31#define MCDI_STATUS_DELAY_COUNT 100
32#define MCDI_STATUS_SLEEP_MS \
33 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000034
35#define SEQ_MASK \
36 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
37
38static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
39{
Ben Hutchingsf3ad5002012-09-18 02:33:56 +010040 EFX_BUG_ON_PARANOID(!efx->mcdi);
41 return &efx->mcdi->iface;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000042}
43
Ben Hutchingsf073dde2012-09-18 02:33:55 +010044int efx_mcdi_init(struct efx_nic *efx)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000045{
46 struct efx_mcdi_iface *mcdi;
47
Ben Hutchingsf3ad5002012-09-18 02:33:56 +010048 efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
49 if (!efx->mcdi)
50 return -ENOMEM;
51
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000052 mcdi = efx_mcdi(efx);
53 init_waitqueue_head(&mcdi->wq);
54 spin_lock_init(&mcdi->iface_lock);
55 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
56 mcdi->mode = MCDI_MODE_POLL;
57
58 (void) efx_mcdi_poll_reboot(efx);
Ben Hutchingsf073dde2012-09-18 02:33:55 +010059
60 /* Recover from a failed assertion before probing */
61 return efx_mcdi_handle_assertion(efx);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000062}
63
Ben Hutchingsf3ad5002012-09-18 02:33:56 +010064void efx_mcdi_fini(struct efx_nic *efx)
65{
66 BUG_ON(efx->mcdi &&
67 atomic_read(&efx->mcdi->iface.state) != MCDI_STATE_QUIESCENT);
68 kfree(efx->mcdi);
69}
70
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000071static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
Ben Hutchings9528b922012-09-14 17:31:41 +010072 const efx_dword_t *inbuf, size_t inlen)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000073{
74 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +010075 efx_dword_t hdr[2];
76 size_t hdr_len;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000077 u32 xflags, seqno;
78
79 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +000080
81 seqno = mcdi->seqno & SEQ_MASK;
82 xflags = 0;
83 if (mcdi->mode == MCDI_MODE_EVENTS)
84 xflags |= MCDI_HEADER_XFLAGS_EVREQ;
85
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +010086 if (efx->type->mcdi_max_ver == 1) {
87 /* MCDI v1 */
88 EFX_POPULATE_DWORD_6(hdr[0],
89 MCDI_HEADER_RESPONSE, 0,
90 MCDI_HEADER_RESYNC, 1,
91 MCDI_HEADER_CODE, cmd,
92 MCDI_HEADER_DATALEN, inlen,
93 MCDI_HEADER_SEQ, seqno,
94 MCDI_HEADER_XFLAGS, xflags);
95 hdr_len = 4;
96 } else {
97 /* MCDI v2 */
98 BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
99 EFX_POPULATE_DWORD_6(hdr[0],
100 MCDI_HEADER_RESPONSE, 0,
101 MCDI_HEADER_RESYNC, 1,
102 MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
103 MCDI_HEADER_DATALEN, 0,
104 MCDI_HEADER_SEQ, seqno,
105 MCDI_HEADER_XFLAGS, xflags);
106 EFX_POPULATE_DWORD_2(hdr[1],
107 MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
108 MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
109 hdr_len = 8;
110 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000111
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100112 efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000113}
114
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100115static int efx_mcdi_errno(unsigned int mcdi_err)
116{
117 switch (mcdi_err) {
118 case 0:
119 return 0;
120#define TRANSLATE_ERROR(name) \
121 case MC_CMD_ERR_ ## name: \
122 return -name;
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100123 TRANSLATE_ERROR(EPERM);
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100124 TRANSLATE_ERROR(ENOENT);
125 TRANSLATE_ERROR(EINTR);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100126 TRANSLATE_ERROR(EAGAIN);
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100127 TRANSLATE_ERROR(EACCES);
128 TRANSLATE_ERROR(EBUSY);
129 TRANSLATE_ERROR(EINVAL);
130 TRANSLATE_ERROR(EDEADLK);
131 TRANSLATE_ERROR(ENOSYS);
132 TRANSLATE_ERROR(ETIME);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100133 TRANSLATE_ERROR(EALREADY);
134 TRANSLATE_ERROR(ENOSPC);
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100135#undef TRANSLATE_ERROR
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100136 case MC_CMD_ERR_ALLOC_FAIL:
137 return -ENOBUFS;
138 case MC_CMD_ERR_MAC_EXIST:
139 return -EADDRINUSE;
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100140 default:
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100141 return -EPROTO;
142 }
143}
144
145static void efx_mcdi_read_response_header(struct efx_nic *efx)
146{
147 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
148 unsigned int respseq, respcmd, error;
149 efx_dword_t hdr;
150
151 efx->type->mcdi_read_response(efx, &hdr, 0, 4);
152 respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
153 respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
154 error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
155
156 if (respcmd != MC_CMD_V2_EXTN) {
157 mcdi->resp_hdr_len = 4;
158 mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
159 } else {
160 efx->type->mcdi_read_response(efx, &hdr, 4, 4);
161 mcdi->resp_hdr_len = 8;
162 mcdi->resp_data_len =
163 EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
164 }
165
166 if (error && mcdi->resp_data_len == 0) {
167 netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
168 mcdi->resprc = -EIO;
169 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
170 netif_err(efx, hw, efx->net_dev,
171 "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
172 respseq, mcdi->seqno);
173 mcdi->resprc = -EIO;
174 } else if (error) {
175 efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
176 mcdi->resprc =
177 efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
178 } else {
179 mcdi->resprc = 0;
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100180 }
181}
182
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000183static int efx_mcdi_poll(struct efx_nic *efx)
184{
185 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
Ben Hutchingsebf98e72012-12-01 02:21:17 +0000186 unsigned long time, finish;
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100187 unsigned int spins;
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100188 int rc;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000189
190 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100191 rc = efx_mcdi_poll_reboot(efx);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100192 if (rc) {
Ben Hutchings369327f2012-10-26 17:53:12 +0100193 spin_lock_bh(&mcdi->iface_lock);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100194 mcdi->resprc = rc;
195 mcdi->resp_hdr_len = 0;
196 mcdi->resp_data_len = 0;
Ben Hutchings369327f2012-10-26 17:53:12 +0100197 spin_unlock_bh(&mcdi->iface_lock);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100198 return 0;
199 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000200
201 /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
202 * because generally mcdi responses are fast. After that, back off
203 * and poll once a jiffy (approximately)
204 */
205 spins = TICK_USEC;
Ben Hutchingsebf98e72012-12-01 02:21:17 +0000206 finish = jiffies + MCDI_RPC_TIMEOUT;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000207
208 while (1) {
209 if (spins != 0) {
210 --spins;
211 udelay(1);
Ben Hutchings55029c12010-01-13 04:34:25 +0000212 } else {
213 schedule_timeout_uninterruptible(1);
214 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000215
Ben Hutchingsebf98e72012-12-01 02:21:17 +0000216 time = jiffies;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000217
Ben Hutchings86c432c2011-09-01 12:09:29 +0000218 rmb();
Ben Hutchingsf3ad5002012-09-18 02:33:56 +0100219 if (efx->type->mcdi_poll_response(efx))
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000220 break;
221
Ben Hutchingsebf98e72012-12-01 02:21:17 +0000222 if (time_after(time, finish))
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000223 return -ETIMEDOUT;
224 }
225
Ben Hutchings369327f2012-10-26 17:53:12 +0100226 spin_lock_bh(&mcdi->iface_lock);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100227 efx_mcdi_read_response_header(efx);
Ben Hutchings369327f2012-10-26 17:53:12 +0100228 spin_unlock_bh(&mcdi->iface_lock);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000229
230 /* Return rc=0 like wait_event_timeout() */
231 return 0;
232}
233
Ben Hutchings876be082012-10-01 20:58:35 +0100234/* Test and clear MC-rebooted flag for this port/function; reset
235 * software state as necessary.
236 */
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000237int efx_mcdi_poll_reboot(struct efx_nic *efx)
238{
Ben Hutchingsf3ad5002012-09-18 02:33:56 +0100239 if (!efx->mcdi)
240 return 0;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000241
Ben Hutchingscd0ecc92012-12-14 21:52:56 +0000242 return efx->type->mcdi_poll_reboot(efx);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000243}
244
245static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
246{
247 /* Wait until the interface becomes QUIESCENT and we win the race
248 * to mark it RUNNING. */
249 wait_event(mcdi->wq,
250 atomic_cmpxchg(&mcdi->state,
251 MCDI_STATE_QUIESCENT,
252 MCDI_STATE_RUNNING)
253 == MCDI_STATE_QUIESCENT);
254}
255
256static int efx_mcdi_await_completion(struct efx_nic *efx)
257{
258 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
259
260 if (wait_event_timeout(
261 mcdi->wq,
262 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
Ben Hutchingsebf98e72012-12-01 02:21:17 +0000263 MCDI_RPC_TIMEOUT) == 0)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000264 return -ETIMEDOUT;
265
266 /* Check if efx_mcdi_set_mode() switched us back to polled completions.
267 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
268 * completed the request first, then we'll just end up completing the
269 * request again, which is safe.
270 *
271 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
272 * wait_event_timeout() implicitly provides.
273 */
274 if (mcdi->mode == MCDI_MODE_POLL)
275 return efx_mcdi_poll(efx);
276
277 return 0;
278}
279
280static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
281{
282 /* If the interface is RUNNING, then move to COMPLETED and wake any
283 * waiters. If the interface isn't in RUNNING then we've received a
284 * duplicate completion after we've already transitioned back to
285 * QUIESCENT. [A subsequent invocation would increment seqno, so would
286 * have failed the seqno check].
287 */
288 if (atomic_cmpxchg(&mcdi->state,
289 MCDI_STATE_RUNNING,
290 MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
291 wake_up(&mcdi->wq);
292 return true;
293 }
294
295 return false;
296}
297
298static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
299{
300 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
301 wake_up(&mcdi->wq);
302}
303
304static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100305 unsigned int datalen, unsigned int mcdi_err)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000306{
307 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
308 bool wake = false;
309
310 spin_lock(&mcdi->iface_lock);
311
312 if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
313 if (mcdi->credits)
314 /* The request has been cancelled */
315 --mcdi->credits;
316 else
Ben Hutchings62776d02010-06-23 11:30:07 +0000317 netif_err(efx, hw, efx->net_dev,
318 "MC response mismatch tx seq 0x%x rx "
319 "seq 0x%x\n", seqno, mcdi->seqno);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000320 } else {
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100321 if (efx->type->mcdi_max_ver >= 2) {
322 /* MCDI v2 responses don't fit in an event */
323 efx_mcdi_read_response_header(efx);
324 } else {
325 mcdi->resprc = efx_mcdi_errno(mcdi_err);
326 mcdi->resp_hdr_len = 4;
327 mcdi->resp_data_len = datalen;
328 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000329
330 wake = true;
331 }
332
333 spin_unlock(&mcdi->iface_lock);
334
335 if (wake)
336 efx_mcdi_complete(mcdi);
337}
338
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000339int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
Ben Hutchings9528b922012-09-14 17:31:41 +0100340 const efx_dword_t *inbuf, size_t inlen,
341 efx_dword_t *outbuf, size_t outlen,
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000342 size_t *outlen_actual)
343{
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100344 int rc;
345
346 rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
347 if (rc)
348 return rc;
Stuart Hodgsonc3cba722012-07-16 17:40:47 +0100349 return efx_mcdi_rpc_finish(efx, cmd, inlen,
350 outbuf, outlen, outlen_actual);
351}
352
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100353int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
354 const efx_dword_t *inbuf, size_t inlen)
Stuart Hodgsonc3cba722012-07-16 17:40:47 +0100355{
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000356 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
Stuart Hodgsonc3cba722012-07-16 17:40:47 +0100357
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100358 if (efx->type->mcdi_max_ver < 0 ||
359 (efx->type->mcdi_max_ver < 2 &&
360 cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
361 return -EINVAL;
362
363 if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
364 (efx->type->mcdi_max_ver < 2 &&
365 inlen > MCDI_CTL_SDU_LEN_MAX_V1))
366 return -EMSGSIZE;
367
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000368 efx_mcdi_acquire(mcdi);
369
370 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
371 spin_lock_bh(&mcdi->iface_lock);
372 ++mcdi->seqno;
373 spin_unlock_bh(&mcdi->iface_lock);
374
375 efx_mcdi_copyin(efx, cmd, inbuf, inlen);
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100376 return 0;
Stuart Hodgsonc3cba722012-07-16 17:40:47 +0100377}
378
379int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
Ben Hutchings9528b922012-09-14 17:31:41 +0100380 efx_dword_t *outbuf, size_t outlen,
381 size_t *outlen_actual)
Stuart Hodgsonc3cba722012-07-16 17:40:47 +0100382{
383 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
384 int rc;
385
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000386 if (mcdi->mode == MCDI_MODE_POLL)
387 rc = efx_mcdi_poll(efx);
388 else
389 rc = efx_mcdi_await_completion(efx);
390
391 if (rc != 0) {
392 /* Close the race with efx_mcdi_ev_cpl() executing just too late
393 * and completing a request we've just cancelled, by ensuring
394 * that the seqno check therein fails.
395 */
396 spin_lock_bh(&mcdi->iface_lock);
397 ++mcdi->seqno;
398 ++mcdi->credits;
399 spin_unlock_bh(&mcdi->iface_lock);
400
Ben Hutchings62776d02010-06-23 11:30:07 +0000401 netif_err(efx, hw, efx->net_dev,
402 "MC command 0x%x inlen %d mode %d timed out\n",
403 cmd, (int)inlen, mcdi->mode);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000404 } else {
Ben Hutchings369327f2012-10-26 17:53:12 +0100405 size_t hdr_len, data_len;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000406
407 /* At the very least we need a memory barrier here to ensure
408 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
409 * a spurious efx_mcdi_ev_cpl() running concurrently by
410 * acquiring the iface_lock. */
411 spin_lock_bh(&mcdi->iface_lock);
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100412 rc = mcdi->resprc;
Ben Hutchings369327f2012-10-26 17:53:12 +0100413 hdr_len = mcdi->resp_hdr_len;
414 data_len = mcdi->resp_data_len;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000415 spin_unlock_bh(&mcdi->iface_lock);
416
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100417 BUG_ON(rc > 0);
418
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000419 if (rc == 0) {
Ben Hutchings369327f2012-10-26 17:53:12 +0100420 efx->type->mcdi_read_response(efx, outbuf, hdr_len,
421 min(outlen, data_len));
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000422 if (outlen_actual != NULL)
Ben Hutchings369327f2012-10-26 17:53:12 +0100423 *outlen_actual = data_len;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000424 } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
425 ; /* Don't reset if MC_CMD_REBOOT returns EIO */
426 else if (rc == -EIO || rc == -EINTR) {
Ben Hutchings62776d02010-06-23 11:30:07 +0000427 netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
428 -rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000429 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
430 } else
Ben Hutchingsf18ca362010-12-02 13:46:09 +0000431 netif_dbg(efx, hw, efx->net_dev,
Ben Hutchings62776d02010-06-23 11:30:07 +0000432 "MC command 0x%x inlen %d failed rc=%d\n",
433 cmd, (int)inlen, -rc);
Ben Hutchings3f713bf2011-12-20 23:39:31 +0000434
435 if (rc == -EIO || rc == -EINTR) {
436 msleep(MCDI_STATUS_SLEEP_MS);
437 efx_mcdi_poll_reboot(efx);
438 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000439 }
440
441 efx_mcdi_release(mcdi);
442 return rc;
443}
444
445void efx_mcdi_mode_poll(struct efx_nic *efx)
446{
447 struct efx_mcdi_iface *mcdi;
448
Ben Hutchingsf3ad5002012-09-18 02:33:56 +0100449 if (!efx->mcdi)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000450 return;
451
452 mcdi = efx_mcdi(efx);
453 if (mcdi->mode == MCDI_MODE_POLL)
454 return;
455
456 /* We can switch from event completion to polled completion, because
457 * mcdi requests are always completed in shared memory. We do this by
458 * switching the mode to POLL'd then completing the request.
459 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
460 *
461 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
462 * which efx_mcdi_complete() provides for us.
463 */
464 mcdi->mode = MCDI_MODE_POLL;
465
466 efx_mcdi_complete(mcdi);
467}
468
469void efx_mcdi_mode_event(struct efx_nic *efx)
470{
471 struct efx_mcdi_iface *mcdi;
472
Ben Hutchingsf3ad5002012-09-18 02:33:56 +0100473 if (!efx->mcdi)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000474 return;
475
476 mcdi = efx_mcdi(efx);
477
478 if (mcdi->mode == MCDI_MODE_EVENTS)
479 return;
480
481 /* We can't switch from polled to event completion in the middle of a
482 * request, because the completion method is specified in the request.
483 * So acquire the interface to serialise the requestors. We don't need
484 * to acquire the iface_lock to change the mode here, but we do need a
485 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
486 * efx_mcdi_acquire() provides.
487 */
488 efx_mcdi_acquire(mcdi);
489 mcdi->mode = MCDI_MODE_EVENTS;
490 efx_mcdi_release(mcdi);
491}
492
493static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
494{
495 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
496
497 /* If there is an outstanding MCDI request, it has been terminated
498 * either by a BADASSERT or REBOOT event. If the mcdi interface is
499 * in polled mode, then do nothing because the MC reboot handler will
500 * set the header correctly. However, if the mcdi interface is waiting
501 * for a CMDDONE event it won't receive it [and since all MCDI events
502 * are sent to the same queue, we can't be racing with
503 * efx_mcdi_ev_cpl()]
504 *
505 * There's a race here with efx_mcdi_rpc(), because we might receive
506 * a REBOOT event *before* the request has been copied out. In polled
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300507 * mode (during startup) this is irrelevant, because efx_mcdi_complete()
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000508 * is ignored. In event mode, this condition is just an edge-case of
509 * receiving a REBOOT event after posting the MCDI request. Did the mc
510 * reboot before or after the copyout? The best we can do always is
511 * just return failure.
512 */
513 spin_lock(&mcdi->iface_lock);
514 if (efx_mcdi_complete(mcdi)) {
515 if (mcdi->mode == MCDI_MODE_EVENTS) {
516 mcdi->resprc = rc;
Ben Hutchingsdf2cd8a2012-09-19 00:56:18 +0100517 mcdi->resp_hdr_len = 0;
518 mcdi->resp_data_len = 0;
Steve Hodgson18e3ee22010-12-02 13:46:55 +0000519 ++mcdi->credits;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000520 }
Ben Hutchings3f713bf2011-12-20 23:39:31 +0000521 } else {
522 int count;
523
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000524 /* Nobody was waiting for an MCDI request, so trigger a reset */
525 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
526
Ben Hutchings3f713bf2011-12-20 23:39:31 +0000527 /* Consume the status word since efx_mcdi_rpc_finish() won't */
528 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
529 if (efx_mcdi_poll_reboot(efx))
530 break;
531 udelay(MCDI_STATUS_DELAY_US);
532 }
533 }
534
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000535 spin_unlock(&mcdi->iface_lock);
536}
537
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000538/* Called from falcon_process_eventq for MCDI events */
539void efx_mcdi_process_event(struct efx_channel *channel,
540 efx_qword_t *event)
541{
542 struct efx_nic *efx = channel->efx;
543 int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
544 u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
545
546 switch (code) {
547 case MCDI_EVENT_CODE_BADSSERT:
Ben Hutchings62776d02010-06-23 11:30:07 +0000548 netif_err(efx, hw, efx->net_dev,
549 "MC watchdog or assertion failure at 0x%x\n", data);
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100550 efx_mcdi_ev_death(efx, -EINTR);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000551 break;
552
553 case MCDI_EVENT_CODE_PMNOTICE:
Ben Hutchings62776d02010-06-23 11:30:07 +0000554 netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000555 break;
556
557 case MCDI_EVENT_CODE_CMDDONE:
558 efx_mcdi_ev_cpl(efx,
559 MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
560 MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
561 MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
562 break;
563
564 case MCDI_EVENT_CODE_LINKCHANGE:
565 efx_mcdi_process_link_change(efx, event);
566 break;
567 case MCDI_EVENT_CODE_SENSOREVT:
568 efx_mcdi_sensor_event(efx, event);
569 break;
570 case MCDI_EVENT_CODE_SCHEDERR:
Ben Hutchings62776d02010-06-23 11:30:07 +0000571 netif_info(efx, hw, efx->net_dev,
572 "MC Scheduler error address=0x%x\n", data);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000573 break;
574 case MCDI_EVENT_CODE_REBOOT:
Ben Hutchings62776d02010-06-23 11:30:07 +0000575 netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
Ben Hutchings5bc283e2012-10-08 21:43:00 +0100576 efx_mcdi_ev_death(efx, -EIO);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000577 break;
578 case MCDI_EVENT_CODE_MAC_STATS_DMA:
579 /* MAC stats are gather lazily. We can ignore this. */
580 break;
Ben Hutchingscd2d5b52012-02-14 00:48:07 +0000581 case MCDI_EVENT_CODE_FLR:
582 efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
583 break;
Stuart Hodgson7c236c42012-09-03 11:09:36 +0100584 case MCDI_EVENT_CODE_PTP_RX:
585 case MCDI_EVENT_CODE_PTP_FAULT:
586 case MCDI_EVENT_CODE_PTP_PPS:
587 efx_ptp_event(efx, event);
588 break;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000589
Alexandre Rames3de82b92013-06-13 11:36:15 +0100590 case MCDI_EVENT_CODE_TX_ERR:
591 case MCDI_EVENT_CODE_RX_ERR:
592 netif_err(efx, hw, efx->net_dev,
593 "%s DMA error (event: "EFX_QWORD_FMT")\n",
594 code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
595 EFX_QWORD_VAL(*event));
596 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
597 break;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000598 default:
Ben Hutchings62776d02010-06-23 11:30:07 +0000599 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
600 code);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000601 }
602}
603
604/**************************************************************************
605 *
606 * Specific request functions
607 *
608 **************************************************************************
609 */
610
Ben Hutchingse5f0fd22011-02-24 23:57:47 +0000611void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000612{
Ben Hutchings59cfc472012-09-14 17:30:10 +0100613 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000614 size_t outlength;
615 const __le16 *ver_words;
616 int rc;
617
618 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
619
620 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
621 outbuf, sizeof(outbuf), &outlength);
622 if (rc)
623 goto fail;
624
Ben Hutchings05a93202011-12-20 00:44:06 +0000625 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
Ben Hutchings00bbb4a2010-04-28 09:27:14 +0000626 rc = -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000627 goto fail;
628 }
629
630 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
Ben Hutchingse5f0fd22011-02-24 23:57:47 +0000631 snprintf(buf, len, "%u.%u.%u.%u",
632 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
633 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
634 return;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000635
636fail:
Ben Hutchings62776d02010-06-23 11:30:07 +0000637 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingse5f0fd22011-02-24 23:57:47 +0000638 buf[0] = 0;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000639}
640
641int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
642 bool *was_attached)
643{
Ben Hutchings59cfc472012-09-14 17:30:10 +0100644 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
645 MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000646 size_t outlen;
647 int rc;
648
649 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
650 driver_operating ? 1 : 0);
651 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
Ben Hutchingsf2b0bef2013-08-20 20:35:50 +0100652 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000653
654 rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
655 outbuf, sizeof(outbuf), &outlen);
656 if (rc)
657 goto fail;
Ben Hutchings00bbb4a2010-04-28 09:27:14 +0000658 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
659 rc = -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000660 goto fail;
Ben Hutchings00bbb4a2010-04-28 09:27:14 +0000661 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000662
663 if (was_attached != NULL)
664 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
665 return 0;
666
667fail:
Ben Hutchings62776d02010-06-23 11:30:07 +0000668 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000669 return rc;
670}
671
672int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
Matthew Slattery6aa9c7f2010-07-14 15:36:19 +0100673 u16 *fw_subtype_list, u32 *capabilities)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000674{
Ben Hutchings59cfc472012-09-14 17:30:10 +0100675 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +0100676 size_t outlen, i;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000677 int port_num = efx_port_num(efx);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000678 int rc;
679
680 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
681
682 rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
683 outbuf, sizeof(outbuf), &outlen);
684 if (rc)
685 goto fail;
686
Ben Hutchings05a93202011-12-20 00:44:06 +0000687 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
Ben Hutchings00bbb4a2010-04-28 09:27:14 +0000688 rc = -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000689 goto fail;
690 }
691
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000692 if (mac_address)
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +0100693 memcpy(mac_address,
694 port_num ?
695 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
696 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
697 ETH_ALEN);
Ben Hutchingsbfeed902012-09-07 00:58:10 +0100698 if (fw_subtype_list) {
Ben Hutchingsbfeed902012-09-07 00:58:10 +0100699 for (i = 0;
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +0100700 i < MCDI_VAR_ARRAY_LEN(outlen,
701 GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
702 i++)
703 fw_subtype_list[i] = MCDI_ARRAY_WORD(
704 outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
705 for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
706 fw_subtype_list[i] = 0;
Ben Hutchingsbfeed902012-09-07 00:58:10 +0100707 }
Matthew Slattery6aa9c7f2010-07-14 15:36:19 +0100708 if (capabilities) {
709 if (port_num)
710 *capabilities = MCDI_DWORD(outbuf,
711 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
712 else
713 *capabilities = MCDI_DWORD(outbuf,
714 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
715 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000716
717 return 0;
718
719fail:
Ben Hutchings62776d02010-06-23 11:30:07 +0000720 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
721 __func__, rc, (int)outlen);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000722
723 return rc;
724}
725
726int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
727{
Ben Hutchings59cfc472012-09-14 17:30:10 +0100728 MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000729 u32 dest = 0;
730 int rc;
731
732 if (uart)
733 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
734 if (evq)
735 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
736
737 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
738 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
739
740 BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
741
742 rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
743 NULL, 0, NULL);
744 if (rc)
745 goto fail;
746
747 return 0;
748
749fail:
Ben Hutchings62776d02010-06-23 11:30:07 +0000750 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000751 return rc;
752}
753
754int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
755{
Ben Hutchings59cfc472012-09-14 17:30:10 +0100756 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000757 size_t outlen;
758 int rc;
759
760 BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
761
762 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
763 outbuf, sizeof(outbuf), &outlen);
764 if (rc)
765 goto fail;
Ben Hutchings00bbb4a2010-04-28 09:27:14 +0000766 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
767 rc = -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000768 goto fail;
Ben Hutchings00bbb4a2010-04-28 09:27:14 +0000769 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000770
771 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
772 return 0;
773
774fail:
Ben Hutchings62776d02010-06-23 11:30:07 +0000775 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
776 __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000777 return rc;
778}
779
780int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
781 size_t *size_out, size_t *erase_size_out,
782 bool *protected_out)
783{
Ben Hutchings59cfc472012-09-14 17:30:10 +0100784 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
785 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000786 size_t outlen;
787 int rc;
788
789 MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
790
791 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
792 outbuf, sizeof(outbuf), &outlen);
793 if (rc)
794 goto fail;
Ben Hutchings00bbb4a2010-04-28 09:27:14 +0000795 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
796 rc = -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000797 goto fail;
Ben Hutchings00bbb4a2010-04-28 09:27:14 +0000798 }
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000799
800 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
801 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
802 *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
Ben Hutchings05a93202011-12-20 00:44:06 +0000803 (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000804 return 0;
805
806fail:
Ben Hutchings62776d02010-06-23 11:30:07 +0000807 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000808 return rc;
809}
810
Ben Hutchings2e803402010-02-03 09:31:01 +0000811static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
812{
Ben Hutchings59cfc472012-09-14 17:30:10 +0100813 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
814 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
Ben Hutchings2e803402010-02-03 09:31:01 +0000815 int rc;
816
817 MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
818
819 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
820 outbuf, sizeof(outbuf), NULL);
821 if (rc)
822 return rc;
823
824 switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
825 case MC_CMD_NVRAM_TEST_PASS:
826 case MC_CMD_NVRAM_TEST_NOTSUPP:
827 return 0;
828 default:
829 return -EIO;
830 }
831}
832
833int efx_mcdi_nvram_test_all(struct efx_nic *efx)
834{
835 u32 nvram_types;
836 unsigned int type;
837 int rc;
838
839 rc = efx_mcdi_nvram_types(efx, &nvram_types);
840 if (rc)
Ben Hutchingsb548a982010-04-28 09:28:36 +0000841 goto fail1;
Ben Hutchings2e803402010-02-03 09:31:01 +0000842
843 type = 0;
844 while (nvram_types != 0) {
845 if (nvram_types & 1) {
846 rc = efx_mcdi_nvram_test(efx, type);
847 if (rc)
Ben Hutchingsb548a982010-04-28 09:28:36 +0000848 goto fail2;
Ben Hutchings2e803402010-02-03 09:31:01 +0000849 }
850 type++;
851 nvram_types >>= 1;
852 }
853
854 return 0;
Ben Hutchingsb548a982010-04-28 09:28:36 +0000855
856fail2:
Ben Hutchings62776d02010-06-23 11:30:07 +0000857 netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
858 __func__, type);
Ben Hutchingsb548a982010-04-28 09:28:36 +0000859fail1:
Ben Hutchings62776d02010-06-23 11:30:07 +0000860 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsb548a982010-04-28 09:28:36 +0000861 return rc;
Ben Hutchings2e803402010-02-03 09:31:01 +0000862}
863
Steve Hodgson8b2103a2010-02-03 09:30:17 +0000864static int efx_mcdi_read_assertion(struct efx_nic *efx)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000865{
Ben Hutchings59cfc472012-09-14 17:30:10 +0100866 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
867 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +0100868 unsigned int flags, index;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000869 const char *reason;
870 size_t outlen;
871 int retry;
872 int rc;
873
Steve Hodgson8b2103a2010-02-03 09:30:17 +0000874 /* Attempt to read any stored assertion state before we reboot
875 * the mcfw out of the assertion handler. Retry twice, once
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000876 * because a boot-time assertion might cause this command to fail
877 * with EINTR. And once again because GET_ASSERTS can race with
878 * MC_CMD_REBOOT running on the other port. */
879 retry = 2;
880 do {
Steve Hodgson8b2103a2010-02-03 09:30:17 +0000881 MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000882 rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
Steve Hodgson8b2103a2010-02-03 09:30:17 +0000883 inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
884 outbuf, sizeof(outbuf), &outlen);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000885 } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
886
887 if (rc)
888 return rc;
889 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
Ben Hutchings00bbb4a2010-04-28 09:27:14 +0000890 return -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000891
Steve Hodgson8b2103a2010-02-03 09:30:17 +0000892 /* Print out any recorded assertion state */
893 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000894 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
895 return 0;
896
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000897 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
898 ? "system-level assertion"
899 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
900 ? "thread-level assertion"
901 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
902 ? "watchdog reset"
903 : "unknown assertion";
Ben Hutchings62776d02010-06-23 11:30:07 +0000904 netif_err(efx, hw, efx->net_dev,
905 "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
906 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
907 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000908
909 /* Print out the registers */
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +0100910 for (index = 0;
911 index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
912 index++)
913 netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
914 1 + index,
915 MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
916 index));
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000917
918 return 0;
919}
920
Steve Hodgson8b2103a2010-02-03 09:30:17 +0000921static void efx_mcdi_exit_assertion(struct efx_nic *efx)
922{
Ben Hutchings59cfc472012-09-14 17:30:10 +0100923 MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
Steve Hodgson8b2103a2010-02-03 09:30:17 +0000924
Ben Hutchings0f1e54ae2012-07-02 23:37:40 +0100925 /* If the MC is running debug firmware, it might now be
926 * waiting for a debugger to attach, but we just want it to
927 * reboot. We set a flag that makes the command a no-op if it
928 * has already done so. We don't know what return code to
929 * expect (0 or -EIO), so ignore it.
930 */
Steve Hodgson8b2103a2010-02-03 09:30:17 +0000931 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
932 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
933 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
Ben Hutchings0f1e54ae2012-07-02 23:37:40 +0100934 (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
935 NULL, 0, NULL);
Steve Hodgson8b2103a2010-02-03 09:30:17 +0000936}
937
938int efx_mcdi_handle_assertion(struct efx_nic *efx)
939{
940 int rc;
941
942 rc = efx_mcdi_read_assertion(efx);
943 if (rc)
944 return rc;
945
946 efx_mcdi_exit_assertion(efx);
947
948 return 0;
949}
950
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000951void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
952{
Ben Hutchings59cfc472012-09-14 17:30:10 +0100953 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000954 int rc;
955
956 BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
957 BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
958 BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
959
960 BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
961
962 MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
963
964 rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
965 NULL, 0, NULL);
966 if (rc)
Ben Hutchings62776d02010-06-23 11:30:07 +0000967 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
968 __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000969}
970
Ben Hutchings6bff8612012-09-18 02:33:52 +0100971static int efx_mcdi_reset_port(struct efx_nic *efx)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000972{
Ben Hutchings05a93202011-12-20 00:44:06 +0000973 int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000974 if (rc)
Ben Hutchings62776d02010-06-23 11:30:07 +0000975 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
976 __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000977 return rc;
978}
979
Ben Hutchings6bff8612012-09-18 02:33:52 +0100980static int efx_mcdi_reset_mc(struct efx_nic *efx)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000981{
Ben Hutchings59cfc472012-09-14 17:30:10 +0100982 MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000983 int rc;
984
985 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
986 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
987 rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
988 NULL, 0, NULL);
989 /* White is black, and up is down */
990 if (rc == -EIO)
991 return 0;
992 if (rc == 0)
993 rc = -EIO;
Ben Hutchings62776d02010-06-23 11:30:07 +0000994 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +0000995 return rc;
996}
997
Ben Hutchings6bff8612012-09-18 02:33:52 +0100998enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
999{
1000 return RESET_TYPE_RECOVER_OR_ALL;
1001}
1002
1003int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
1004{
1005 int rc;
1006
1007 /* Recover from a failed assertion pre-reset */
1008 rc = efx_mcdi_handle_assertion(efx);
1009 if (rc)
1010 return rc;
1011
1012 if (method == RESET_TYPE_WORLD)
1013 return efx_mcdi_reset_mc(efx);
1014 else
1015 return efx_mcdi_reset_port(efx);
1016}
1017
stephen hemmingerd2156972010-10-18 05:27:31 +00001018static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1019 const u8 *mac, int *id_out)
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001020{
Ben Hutchings59cfc472012-09-14 17:30:10 +01001021 MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
1022 MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001023 size_t outlen;
1024 int rc;
1025
1026 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
1027 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
1028 MC_CMD_FILTER_MODE_SIMPLE);
1029 memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
1030
1031 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
1032 outbuf, sizeof(outbuf), &outlen);
1033 if (rc)
1034 goto fail;
1035
1036 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
Ben Hutchings00bbb4a2010-04-28 09:27:14 +00001037 rc = -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001038 goto fail;
1039 }
1040
1041 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
1042
1043 return 0;
1044
1045fail:
1046 *id_out = -1;
Ben Hutchings62776d02010-06-23 11:30:07 +00001047 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001048 return rc;
1049
1050}
1051
1052
1053int
1054efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
1055{
1056 return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
1057}
1058
1059
1060int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1061{
Ben Hutchings59cfc472012-09-14 17:30:10 +01001062 MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001063 size_t outlen;
1064 int rc;
1065
1066 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
1067 outbuf, sizeof(outbuf), &outlen);
1068 if (rc)
1069 goto fail;
1070
1071 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
Ben Hutchings00bbb4a2010-04-28 09:27:14 +00001072 rc = -EIO;
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001073 goto fail;
1074 }
1075
1076 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
1077
1078 return 0;
1079
1080fail:
1081 *id_out = -1;
Ben Hutchings62776d02010-06-23 11:30:07 +00001082 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001083 return rc;
1084}
1085
1086
1087int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
1088{
Ben Hutchings59cfc472012-09-14 17:30:10 +01001089 MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001090 int rc;
1091
1092 MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
1093
1094 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
1095 NULL, 0, NULL);
1096 if (rc)
1097 goto fail;
1098
1099 return 0;
1100
1101fail:
Ben Hutchings62776d02010-06-23 11:30:07 +00001102 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001103 return rc;
1104}
1105
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001106int efx_mcdi_flush_rxqs(struct efx_nic *efx)
1107{
1108 struct efx_channel *channel;
1109 struct efx_rx_queue *rx_queue;
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +01001110 MCDI_DECLARE_BUF(inbuf,
1111 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001112 int rc, count;
1113
Ben Hutchings45078372012-09-19 02:53:34 +01001114 BUILD_BUG_ON(EFX_MAX_CHANNELS >
1115 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
1116
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001117 count = 0;
1118 efx_for_each_channel(channel, efx) {
1119 efx_for_each_channel_rx_queue(rx_queue, channel) {
1120 if (rx_queue->flush_pending) {
1121 rx_queue->flush_pending = false;
1122 atomic_dec(&efx->rxq_flush_pending);
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +01001123 MCDI_SET_ARRAY_DWORD(
1124 inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
1125 count, efx_rx_queue_index(rx_queue));
1126 count++;
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001127 }
1128 }
1129 }
1130
Ben Hutchingsc5bb0e92012-09-14 17:31:33 +01001131 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
1132 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
Ben Hutchingsbbec9692012-09-11 18:25:13 +01001133 WARN_ON(rc < 0);
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001134
Ben Hutchingscd2d5b52012-02-14 00:48:07 +00001135 return rc;
1136}
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001137
1138int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
1139{
1140 int rc;
1141
1142 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
1143 if (rc)
1144 goto fail;
1145
1146 return 0;
1147
1148fail:
Ben Hutchings62776d02010-06-23 11:30:07 +00001149 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Ben Hutchingsafd4aea2009-11-29 15:15:25 +00001150 return rc;
1151}
1152
Ben Hutchings45a3fd52012-11-28 04:38:14 +00001153#ifdef CONFIG_SFC_MTD
1154
1155#define EFX_MCDI_NVRAM_LEN_MAX 128
1156
1157static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
1158{
1159 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
1160 int rc;
1161
1162 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
1163
1164 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
1165
1166 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
1167 NULL, 0, NULL);
1168 if (rc)
1169 goto fail;
1170
1171 return 0;
1172
1173fail:
1174 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1175 return rc;
1176}
1177
1178static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
1179 loff_t offset, u8 *buffer, size_t length)
1180{
1181 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
1182 MCDI_DECLARE_BUF(outbuf,
1183 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
1184 size_t outlen;
1185 int rc;
1186
1187 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
1188 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
1189 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
1190
1191 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
1192 outbuf, sizeof(outbuf), &outlen);
1193 if (rc)
1194 goto fail;
1195
1196 memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
1197 return 0;
1198
1199fail:
1200 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1201 return rc;
1202}
1203
1204static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
1205 loff_t offset, const u8 *buffer, size_t length)
1206{
1207 MCDI_DECLARE_BUF(inbuf,
1208 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
1209 int rc;
1210
1211 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
1212 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
1213 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
1214 memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
1215
1216 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
1217
1218 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
1219 ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
1220 NULL, 0, NULL);
1221 if (rc)
1222 goto fail;
1223
1224 return 0;
1225
1226fail:
1227 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1228 return rc;
1229}
1230
1231static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
1232 loff_t offset, size_t length)
1233{
1234 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
1235 int rc;
1236
1237 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
1238 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
1239 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
1240
1241 BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
1242
1243 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
1244 NULL, 0, NULL);
1245 if (rc)
1246 goto fail;
1247
1248 return 0;
1249
1250fail:
1251 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1252 return rc;
1253}
1254
1255static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
1256{
1257 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
1258 int rc;
1259
1260 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
1261
1262 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
1263
1264 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
1265 NULL, 0, NULL);
1266 if (rc)
1267 goto fail;
1268
1269 return 0;
1270
1271fail:
1272 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1273 return rc;
1274}
1275
1276int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
1277 size_t len, size_t *retlen, u8 *buffer)
1278{
1279 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1280 struct efx_nic *efx = mtd->priv;
1281 loff_t offset = start;
1282 loff_t end = min_t(loff_t, start + len, mtd->size);
1283 size_t chunk;
1284 int rc = 0;
1285
1286 while (offset < end) {
1287 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
1288 rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
1289 buffer, chunk);
1290 if (rc)
1291 goto out;
1292 offset += chunk;
1293 buffer += chunk;
1294 }
1295out:
1296 *retlen = offset - start;
1297 return rc;
1298}
1299
1300int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
1301{
1302 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1303 struct efx_nic *efx = mtd->priv;
1304 loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
1305 loff_t end = min_t(loff_t, start + len, mtd->size);
1306 size_t chunk = part->common.mtd.erasesize;
1307 int rc = 0;
1308
1309 if (!part->updating) {
1310 rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
1311 if (rc)
1312 goto out;
1313 part->updating = true;
1314 }
1315
1316 /* The MCDI interface can in fact do multiple erase blocks at once;
1317 * but erasing may be slow, so we make multiple calls here to avoid
1318 * tripping the MCDI RPC timeout. */
1319 while (offset < end) {
1320 rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
1321 chunk);
1322 if (rc)
1323 goto out;
1324 offset += chunk;
1325 }
1326out:
1327 return rc;
1328}
1329
1330int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
1331 size_t len, size_t *retlen, const u8 *buffer)
1332{
1333 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1334 struct efx_nic *efx = mtd->priv;
1335 loff_t offset = start;
1336 loff_t end = min_t(loff_t, start + len, mtd->size);
1337 size_t chunk;
1338 int rc = 0;
1339
1340 if (!part->updating) {
1341 rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
1342 if (rc)
1343 goto out;
1344 part->updating = true;
1345 }
1346
1347 while (offset < end) {
1348 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
1349 rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
1350 buffer, chunk);
1351 if (rc)
1352 goto out;
1353 offset += chunk;
1354 buffer += chunk;
1355 }
1356out:
1357 *retlen = offset - start;
1358 return rc;
1359}
1360
1361int efx_mcdi_mtd_sync(struct mtd_info *mtd)
1362{
1363 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1364 struct efx_nic *efx = mtd->priv;
1365 int rc = 0;
1366
1367 if (part->updating) {
1368 part->updating = false;
1369 rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
1370 }
1371
1372 return rc;
1373}
1374
1375void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
1376{
1377 struct efx_mcdi_mtd_partition *mcdi_part =
1378 container_of(part, struct efx_mcdi_mtd_partition, common);
1379 struct efx_nic *efx = part->mtd.priv;
1380
1381 snprintf(part->name, sizeof(part->name), "%s %s:%02x",
1382 efx->name, part->type_name, mcdi_part->fw_subtype);
1383}
1384
1385#endif /* CONFIG_SFC_MTD */