blob: 425713fb72e5f911feb2183db75ff96c3131bca9 [file] [log] [blame]
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
Greg Rosedc641b72013-12-18 13:45:51 +00004 * Copyright(c) 2013 - 2014 Intel Corporation.
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
Greg Rosedc641b72013-12-18 13:45:51 +000015 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +000017 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27#include "i40e_prototype.h"
28
29/**
Shannon Nelson3e261862014-02-06 05:51:06 +000030 * i40e_init_nvm_ops - Initialize NVM function pointers
31 * @hw: pointer to the HW structure
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +000032 *
Shannon Nelson3e261862014-02-06 05:51:06 +000033 * Setup the function pointers and the NVM info structure. Should be called
34 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
35 * Please notice that the NVM term is used here (& in all methods covered
36 * in this file) as an equivalent of the FLASH part mapped into the SR.
37 * We are accessing FLASH always thru the Shadow RAM.
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +000038 **/
39i40e_status i40e_init_nvm(struct i40e_hw *hw)
40{
41 struct i40e_nvm_info *nvm = &hw->nvm;
42 i40e_status ret_code = 0;
43 u32 fla, gens;
44 u8 sr_size;
45
46 /* The SR size is stored regardless of the nvm programming mode
47 * as the blank mode may be used in the factory line.
48 */
49 gens = rd32(hw, I40E_GLNVM_GENS);
50 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
51 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
Shannon Nelson3e261862014-02-06 05:51:06 +000052 /* Switching to words (sr_size contains power of 2KB) */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -040053 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +000054
Shannon Nelson3e261862014-02-06 05:51:06 +000055 /* Check if we are in the normal or blank NVM programming mode */
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +000056 fla = rd32(hw, I40E_GLNVM_FLA);
Shannon Nelson3e261862014-02-06 05:51:06 +000057 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
58 /* Max NVM timeout */
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +000059 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
60 nvm->blank_nvm_mode = false;
Shannon Nelson3e261862014-02-06 05:51:06 +000061 } else { /* Blank programming mode */
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +000062 nvm->blank_nvm_mode = true;
63 ret_code = I40E_ERR_NVM_BLANK_MODE;
Shannon Nelson74d0d0e2014-11-13 08:23:15 +000064 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +000065 }
66
67 return ret_code;
68}
69
70/**
Shannon Nelson3e261862014-02-06 05:51:06 +000071 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
72 * @hw: pointer to the HW structure
73 * @access: NVM access type (read or write)
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +000074 *
Shannon Nelson3e261862014-02-06 05:51:06 +000075 * This function will request NVM ownership for reading
76 * via the proper Admin Command.
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +000077 **/
78i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
79 enum i40e_aq_resource_access_type access)
80{
81 i40e_status ret_code = 0;
82 u64 gtime, timeout;
Shannon Nelsonc509c1d2014-11-13 08:23:19 +000083 u64 time_left = 0;
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +000084
85 if (hw->nvm.blank_nvm_mode)
86 goto i40e_i40e_acquire_nvm_exit;
87
88 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
Shannon Nelsonc509c1d2014-11-13 08:23:19 +000089 0, &time_left, NULL);
Shannon Nelson3e261862014-02-06 05:51:06 +000090 /* Reading the Global Device Timer */
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +000091 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
92
Shannon Nelson3e261862014-02-06 05:51:06 +000093 /* Store the timeout */
Shannon Nelsonc509c1d2014-11-13 08:23:19 +000094 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +000095
Shannon Nelsona3f0b382014-11-13 08:23:21 +000096 if (ret_code)
97 i40e_debug(hw, I40E_DEBUG_NVM,
98 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
99 access, time_left, ret_code, hw->aq.asq_last_status);
100
101 if (ret_code && time_left) {
Shannon Nelson3e261862014-02-06 05:51:06 +0000102 /* Poll until the current NVM owner timeouts */
Shannon Nelsonc509c1d2014-11-13 08:23:19 +0000103 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
Shannon Nelsona3f0b382014-11-13 08:23:21 +0000104 while ((gtime < timeout) && time_left) {
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000105 usleep_range(10000, 20000);
Shannon Nelsonc509c1d2014-11-13 08:23:19 +0000106 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000107 ret_code = i40e_aq_request_resource(hw,
108 I40E_NVM_RESOURCE_ID,
Shannon Nelsonc509c1d2014-11-13 08:23:19 +0000109 access, 0, &time_left,
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000110 NULL);
111 if (!ret_code) {
112 hw->nvm.hw_semaphore_timeout =
Shannon Nelsonc509c1d2014-11-13 08:23:19 +0000113 I40E_MS_TO_GTIME(time_left) + gtime;
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000114 break;
115 }
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000116 }
117 if (ret_code) {
118 hw->nvm.hw_semaphore_timeout = 0;
Shannon Nelson74d0d0e2014-11-13 08:23:15 +0000119 i40e_debug(hw, I40E_DEBUG_NVM,
Shannon Nelsona3f0b382014-11-13 08:23:21 +0000120 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
121 time_left, ret_code, hw->aq.asq_last_status);
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000122 }
123 }
124
125i40e_i40e_acquire_nvm_exit:
126 return ret_code;
127}
128
129/**
Shannon Nelson3e261862014-02-06 05:51:06 +0000130 * i40e_release_nvm - Generic request for releasing the NVM ownership
131 * @hw: pointer to the HW structure
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000132 *
Shannon Nelson3e261862014-02-06 05:51:06 +0000133 * This function will release NVM resource via the proper Admin Command.
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000134 **/
135void i40e_release_nvm(struct i40e_hw *hw)
136{
Paul M Stillwell Jr981e25c2017-06-20 15:16:55 -0700137 i40e_status ret_code = I40E_SUCCESS;
138 u32 total_delay = 0;
139
140 if (hw->nvm.blank_nvm_mode)
141 return;
142
143 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
144
145 /* there are some rare cases when trying to release the resource
146 * results in an admin Q timeout, so handle them correctly
147 */
148 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
149 (total_delay < hw->aq.asq_cmd_timeout)) {
150 usleep_range(1000, 2000);
151 ret_code = i40e_aq_release_resource(hw,
152 I40E_NVM_RESOURCE_ID,
153 0, NULL);
154 total_delay++;
155 }
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000156}
157
158/**
Shannon Nelson3e261862014-02-06 05:51:06 +0000159 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
160 * @hw: pointer to the HW structure
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000161 *
Shannon Nelson3e261862014-02-06 05:51:06 +0000162 * Polls the SRCTL Shadow RAM register done bit.
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000163 **/
164static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
165{
166 i40e_status ret_code = I40E_ERR_TIMEOUT;
167 u32 srctl, wait_cnt;
168
Shannon Nelson3e261862014-02-06 05:51:06 +0000169 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000170 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
171 srctl = rd32(hw, I40E_GLNVM_SRCTL);
172 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
173 ret_code = 0;
174 break;
175 }
176 udelay(5);
177 }
178 if (ret_code == I40E_ERR_TIMEOUT)
Shannon Nelson74d0d0e2014-11-13 08:23:15 +0000179 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000180 return ret_code;
181}
182
183/**
Kamil Krawczykd1bbe0e2015-01-24 09:58:33 +0000184 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
Shannon Nelson3e261862014-02-06 05:51:06 +0000185 * @hw: pointer to the HW structure
186 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
187 * @data: word read from the Shadow RAM
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000188 *
Shannon Nelson3e261862014-02-06 05:51:06 +0000189 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000190 **/
Shannon Nelson37a29732015-02-27 09:15:19 +0000191static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
192 u16 *data)
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000193{
194 i40e_status ret_code = I40E_ERR_TIMEOUT;
195 u32 sr_reg;
196
197 if (offset >= hw->nvm.sr_size) {
Shannon Nelson74d0d0e2014-11-13 08:23:15 +0000198 i40e_debug(hw, I40E_DEBUG_NVM,
199 "NVM read error: offset %d beyond Shadow RAM limit %d\n",
200 offset, hw->nvm.sr_size);
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000201 ret_code = I40E_ERR_PARAM;
202 goto read_nvm_exit;
203 }
204
Shannon Nelson3e261862014-02-06 05:51:06 +0000205 /* Poll the done bit first */
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000206 ret_code = i40e_poll_sr_srctl_done_bit(hw);
207 if (!ret_code) {
Shannon Nelson3e261862014-02-06 05:51:06 +0000208 /* Write the address and start reading */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400209 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
210 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000211 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
212
Shannon Nelson3e261862014-02-06 05:51:06 +0000213 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000214 ret_code = i40e_poll_sr_srctl_done_bit(hw);
215 if (!ret_code) {
216 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
217 *data = (u16)((sr_reg &
218 I40E_GLNVM_SRDATA_RDDATA_MASK)
219 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
220 }
221 }
222 if (ret_code)
Shannon Nelson74d0d0e2014-11-13 08:23:15 +0000223 i40e_debug(hw, I40E_DEBUG_NVM,
224 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
225 offset);
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000226
227read_nvm_exit:
228 return ret_code;
229}
230
231/**
Shannon Nelson7073f462015-06-05 12:20:34 -0400232 * i40e_read_nvm_aq - Read Shadow RAM.
233 * @hw: pointer to the HW structure.
234 * @module_pointer: module pointer location in words from the NVM beginning
235 * @offset: offset in words from module start
236 * @words: number of words to write
237 * @data: buffer with words to write to the Shadow RAM
238 * @last_command: tells the AdminQ that this is the last command
239 *
240 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
241 **/
242static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
243 u32 offset, u16 words, void *data,
244 bool last_command)
245{
246 i40e_status ret_code = I40E_ERR_NVM;
247 struct i40e_asq_cmd_details cmd_details;
248
249 memset(&cmd_details, 0, sizeof(cmd_details));
Jacob Keller3c8f3e92017-09-01 13:43:08 -0700250 cmd_details.wb_desc = &hw->nvm_wb_desc;
Shannon Nelson7073f462015-06-05 12:20:34 -0400251
252 /* Here we are checking the SR limit only for the flat memory model.
253 * We cannot do it for the module-based model, as we did not acquire
254 * the NVM resource yet (we cannot get the module pointer value).
255 * Firmware will check the module-based model.
256 */
257 if ((offset + words) > hw->nvm.sr_size)
258 i40e_debug(hw, I40E_DEBUG_NVM,
259 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
260 (offset + words), hw->nvm.sr_size);
261 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
262 /* We can write only up to 4KB (one sector), in one AQ write */
263 i40e_debug(hw, I40E_DEBUG_NVM,
264 "NVM write fail error: tried to write %d words, limit is %d.\n",
265 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
266 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
267 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
268 /* A single write cannot spread over two sectors */
269 i40e_debug(hw, I40E_DEBUG_NVM,
270 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
271 offset, words);
272 else
273 ret_code = i40e_aq_read_nvm(hw, module_pointer,
274 2 * offset, /*bytes*/
275 2 * words, /*bytes*/
276 data, last_command, &cmd_details);
277
278 return ret_code;
279}
280
281/**
282 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
283 * @hw: pointer to the HW structure
284 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
285 * @data: word read from the Shadow RAM
286 *
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -0700287 * Reads one 16 bit word from the Shadow RAM using the AdminQ
Shannon Nelson7073f462015-06-05 12:20:34 -0400288 **/
289static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
290 u16 *data)
291{
292 i40e_status ret_code = I40E_ERR_TIMEOUT;
293
294 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
295 *data = le16_to_cpu(*(__le16 *)data);
296
297 return ret_code;
298}
299
300/**
Stefano Brivioe836e322017-09-06 10:11:38 +0200301 * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
Kamil Krawczykd1bbe0e2015-01-24 09:58:33 +0000302 * @hw: pointer to the HW structure
303 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
304 * @data: word read from the Shadow RAM
305 *
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -0700306 * Reads one 16 bit word from the Shadow RAM.
307 *
308 * Do not use this function except in cases where the nvm lock is already
309 * taken via i40e_acquire_nvm().
310 **/
311static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
312 u16 offset, u16 *data)
313{
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -0700314 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
Stefano Brivio2c4d36b2017-09-06 10:11:39 +0200315 return i40e_read_nvm_word_aq(hw, offset, data);
316
317 return i40e_read_nvm_word_srctl(hw, offset, data);
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -0700318}
319
320/**
321 * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
322 * @hw: pointer to the HW structure
323 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
324 * @data: word read from the Shadow RAM
325 *
326 * Reads one 16 bit word from the Shadow RAM.
Kamil Krawczykd1bbe0e2015-01-24 09:58:33 +0000327 **/
328i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
329 u16 *data)
330{
Jacob Keller3d72aeb2017-10-27 11:06:55 -0400331 i40e_status ret_code = 0;
Anjali Singhai07f89be2015-09-24 15:26:32 -0700332
Jacob Keller3d72aeb2017-10-27 11:06:55 -0400333 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
334 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -0700335 if (ret_code)
336 return ret_code;
337
338 ret_code = __i40e_read_nvm_word(hw, offset, data);
339
Jacob Keller3d72aeb2017-10-27 11:06:55 -0400340 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
341 i40e_release_nvm(hw);
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -0700342
Anjali Singhai07f89be2015-09-24 15:26:32 -0700343 return ret_code;
Kamil Krawczykd1bbe0e2015-01-24 09:58:33 +0000344}
345
346/**
347 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
348 * @hw: pointer to the HW structure
349 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
350 * @words: (in) number of words to read; (out) number of words actually read
351 * @data: words read from the Shadow RAM
352 *
353 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
354 * method. The buffer read is preceded by the NVM ownership take
355 * and followed by the release.
356 **/
Shannon Nelson37a29732015-02-27 09:15:19 +0000357static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
358 u16 *words, u16 *data)
Kamil Krawczykd1bbe0e2015-01-24 09:58:33 +0000359{
360 i40e_status ret_code = 0;
361 u16 index, word;
362
363 /* Loop thru the selected region */
364 for (word = 0; word < *words; word++) {
365 index = offset + word;
366 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
367 if (ret_code)
368 break;
369 }
370
371 /* Update the number of words read from the Shadow RAM */
372 *words = word;
373
374 return ret_code;
375}
376
377/**
Shannon Nelson7073f462015-06-05 12:20:34 -0400378 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
379 * @hw: pointer to the HW structure
380 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
381 * @words: (in) number of words to read; (out) number of words actually read
382 * @data: words read from the Shadow RAM
383 *
384 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
385 * method. The buffer read is preceded by the NVM ownership take
386 * and followed by the release.
387 **/
388static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
389 u16 *words, u16 *data)
390{
391 i40e_status ret_code;
Colin Ian King793c6f82017-11-05 13:04:29 +0000392 u16 read_size;
Shannon Nelson7073f462015-06-05 12:20:34 -0400393 bool last_cmd = false;
394 u16 words_read = 0;
395 u16 i = 0;
396
397 do {
398 /* Calculate number of bytes we should read in this step.
399 * FVL AQ do not allow to read more than one page at a time or
400 * to cross page boundaries.
401 */
402 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
403 read_size = min(*words,
404 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
405 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
406 else
407 read_size = min((*words - words_read),
408 I40E_SR_SECTOR_SIZE_IN_WORDS);
409
410 /* Check if this is last command, if so set proper flag */
411 if ((words_read + read_size) >= *words)
412 last_cmd = true;
413
414 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
415 data + words_read, last_cmd);
416 if (ret_code)
417 goto read_nvm_buffer_aq_exit;
418
419 /* Increment counter for words already read and move offset to
420 * new read location
421 */
422 words_read += read_size;
423 offset += read_size;
424 } while (words_read < *words);
425
426 for (i = 0; i < *words; i++)
427 data[i] = le16_to_cpu(((__le16 *)data)[i]);
428
429read_nvm_buffer_aq_exit:
430 *words = words_read;
431 return ret_code;
432}
433
434/**
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -0700435 * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
Shannon Nelson3e261862014-02-06 05:51:06 +0000436 * @hw: pointer to the HW structure
437 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
438 * @words: (in) number of words to read; (out) number of words actually read
439 * @data: words read from the Shadow RAM
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000440 *
Shannon Nelson3e261862014-02-06 05:51:06 +0000441 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -0700442 * method.
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000443 **/
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -0700444static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
445 u16 offset, u16 *words,
446 u16 *data)
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000447{
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -0700448 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
Stefano Brivio2c4d36b2017-09-06 10:11:39 +0200449 return i40e_read_nvm_buffer_aq(hw, offset, words, data);
450
451 return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000452}
453
454/**
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000455 * i40e_write_nvm_aq - Writes Shadow RAM.
456 * @hw: pointer to the HW structure.
457 * @module_pointer: module pointer location in words from the NVM beginning
458 * @offset: offset in words from module start
459 * @words: number of words to write
460 * @data: buffer with words to write to the Shadow RAM
461 * @last_command: tells the AdminQ that this is the last command
462 *
463 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
464 **/
Wei Yongjun952d9632014-07-30 09:02:53 +0000465static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
466 u32 offset, u16 words, void *data,
467 bool last_command)
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000468{
469 i40e_status ret_code = I40E_ERR_NVM;
Shannon Nelson6b5c1b82015-08-28 17:55:47 -0400470 struct i40e_asq_cmd_details cmd_details;
471
472 memset(&cmd_details, 0, sizeof(cmd_details));
473 cmd_details.wb_desc = &hw->nvm_wb_desc;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000474
475 /* Here we are checking the SR limit only for the flat memory model.
476 * We cannot do it for the module-based model, as we did not acquire
477 * the NVM resource yet (we cannot get the module pointer value).
478 * Firmware will check the module-based model.
479 */
480 if ((offset + words) > hw->nvm.sr_size)
Shannon Nelson74d0d0e2014-11-13 08:23:15 +0000481 i40e_debug(hw, I40E_DEBUG_NVM,
482 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
483 (offset + words), hw->nvm.sr_size);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000484 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
485 /* We can write only up to 4KB (one sector), in one AQ write */
Shannon Nelson74d0d0e2014-11-13 08:23:15 +0000486 i40e_debug(hw, I40E_DEBUG_NVM,
487 "NVM write fail error: tried to write %d words, limit is %d.\n",
488 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000489 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
490 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
491 /* A single write cannot spread over two sectors */
Shannon Nelson74d0d0e2014-11-13 08:23:15 +0000492 i40e_debug(hw, I40E_DEBUG_NVM,
493 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
494 offset, words);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000495 else
496 ret_code = i40e_aq_update_nvm(hw, module_pointer,
497 2 * offset, /*bytes*/
498 2 * words, /*bytes*/
Shannon Nelson6b5c1b82015-08-28 17:55:47 -0400499 data, last_command, &cmd_details);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000500
501 return ret_code;
502}
503
504/**
Shannon Nelson3e261862014-02-06 05:51:06 +0000505 * i40e_calc_nvm_checksum - Calculates and returns the checksum
506 * @hw: pointer to hardware structure
507 * @checksum: pointer to the checksum
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000508 *
Shannon Nelson3e261862014-02-06 05:51:06 +0000509 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
510 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
511 * is customer specific and unknown. Therefore, this function skips all maximum
512 * possible size of VPD (1kB).
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000513 **/
514static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
515 u16 *checksum)
516{
Jean Sacren0e5229c2015-10-13 01:06:31 -0600517 i40e_status ret_code;
Kamil Krawczykd1bbe0e2015-01-24 09:58:33 +0000518 struct i40e_virt_mem vmem;
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000519 u16 pcie_alt_module = 0;
520 u16 checksum_local = 0;
521 u16 vpd_module = 0;
Kamil Krawczykd1bbe0e2015-01-24 09:58:33 +0000522 u16 *data;
523 u16 i = 0;
524
525 ret_code = i40e_allocate_virt_mem(hw, &vmem,
526 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
527 if (ret_code)
528 goto i40e_calc_nvm_checksum_exit;
529 data = (u16 *)vmem.va;
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000530
531 /* read pointer to VPD area */
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -0700532 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000533 if (ret_code) {
534 ret_code = I40E_ERR_NVM_CHECKSUM;
535 goto i40e_calc_nvm_checksum_exit;
536 }
537
538 /* read pointer to PCIe Alt Auto-load module */
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -0700539 ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
540 &pcie_alt_module);
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000541 if (ret_code) {
542 ret_code = I40E_ERR_NVM_CHECKSUM;
543 goto i40e_calc_nvm_checksum_exit;
544 }
545
546 /* Calculate SW checksum that covers the whole 64kB shadow RAM
547 * except the VPD and PCIe ALT Auto-load modules
548 */
549 for (i = 0; i < hw->nvm.sr_size; i++) {
Kamil Krawczykd1bbe0e2015-01-24 09:58:33 +0000550 /* Read SR page */
551 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
552 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
553
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -0700554 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
Kamil Krawczykd1bbe0e2015-01-24 09:58:33 +0000555 if (ret_code) {
556 ret_code = I40E_ERR_NVM_CHECKSUM;
557 goto i40e_calc_nvm_checksum_exit;
558 }
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000559 }
560
Kamil Krawczykd1bbe0e2015-01-24 09:58:33 +0000561 /* Skip Checksum word */
562 if (i == I40E_SR_SW_CHECKSUM_WORD)
563 continue;
564 /* Skip VPD module (convert byte size to word count) */
565 if ((i >= (u32)vpd_module) &&
566 (i < ((u32)vpd_module +
567 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
568 continue;
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000569 }
Kamil Krawczykd1bbe0e2015-01-24 09:58:33 +0000570 /* Skip PCIe ALT module (convert byte size to word count) */
571 if ((i >= (u32)pcie_alt_module) &&
572 (i < ((u32)pcie_alt_module +
573 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
574 continue;
575 }
576
577 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000578 }
579
580 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
581
582i40e_calc_nvm_checksum_exit:
Kamil Krawczykd1bbe0e2015-01-24 09:58:33 +0000583 i40e_free_virt_mem(hw, &vmem);
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000584 return ret_code;
585}
586
587/**
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000588 * i40e_update_nvm_checksum - Updates the NVM checksum
589 * @hw: pointer to hardware structure
590 *
591 * NVM ownership must be acquired before calling this function and released
592 * on ARQ completion event reception by caller.
593 * This function will commit SR to NVM.
594 **/
595i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
596{
Jean Sacren0e5229c2015-10-13 01:06:31 -0600597 i40e_status ret_code;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000598 u16 checksum;
Jesse Brandeburgdd38c582015-08-26 15:14:18 -0400599 __le16 le_sum;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000600
601 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
Jean Sacren2fc4cd52015-10-13 01:06:32 -0600602 if (!ret_code) {
603 le_sum = cpu_to_le16(checksum);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000604 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
Jesse Brandeburgdd38c582015-08-26 15:14:18 -0400605 1, &le_sum, true);
Jean Sacren2fc4cd52015-10-13 01:06:32 -0600606 }
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000607
608 return ret_code;
609}
610
611/**
Shannon Nelson3e261862014-02-06 05:51:06 +0000612 * i40e_validate_nvm_checksum - Validate EEPROM checksum
613 * @hw: pointer to hardware structure
614 * @checksum: calculated checksum
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000615 *
Shannon Nelson3e261862014-02-06 05:51:06 +0000616 * Performs checksum calculation and validates the NVM SW checksum. If the
617 * caller does not need checksum, the value can be NULL.
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000618 **/
619i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
620 u16 *checksum)
621{
622 i40e_status ret_code = 0;
623 u16 checksum_sr = 0;
Jesse Brandeburge15c9fa2014-01-17 15:36:31 -0800624 u16 checksum_local = 0;
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000625
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -0700626 /* We must acquire the NVM lock in order to correctly synchronize the
627 * NVM accesses across multiple PFs. Without doing so it is possible
628 * for one of the PFs to read invalid data potentially indicating that
629 * the checksum is invalid.
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000630 */
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -0700631 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
632 if (ret_code)
633 return ret_code;
634 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
635 __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
636 i40e_release_nvm(hw);
637 if (ret_code)
638 return ret_code;
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000639
640 /* Verify read checksum from EEPROM is the same as
641 * calculated checksum
642 */
643 if (checksum_local != checksum_sr)
644 ret_code = I40E_ERR_NVM_CHECKSUM;
645
646 /* If the user cares, return the calculated checksum */
647 if (checksum)
648 *checksum = checksum_local;
649
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +0000650 return ret_code;
651}
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000652
653static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
654 struct i40e_nvm_access *cmd,
Shannon Nelson79afe832015-07-23 16:54:33 -0400655 u8 *bytes, int *perrno);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000656static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
657 struct i40e_nvm_access *cmd,
Shannon Nelson79afe832015-07-23 16:54:33 -0400658 u8 *bytes, int *perrno);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000659static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
660 struct i40e_nvm_access *cmd,
661 u8 *bytes, int *errno);
662static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
663 struct i40e_nvm_access *cmd,
Shannon Nelson79afe832015-07-23 16:54:33 -0400664 int *perrno);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000665static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
666 struct i40e_nvm_access *cmd,
Shannon Nelson79afe832015-07-23 16:54:33 -0400667 int *perrno);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000668static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
669 struct i40e_nvm_access *cmd,
Shannon Nelson79afe832015-07-23 16:54:33 -0400670 u8 *bytes, int *perrno);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000671static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
672 struct i40e_nvm_access *cmd,
Shannon Nelson79afe832015-07-23 16:54:33 -0400673 u8 *bytes, int *perrno);
Shannon Nelsone4c83c22015-08-28 17:55:50 -0400674static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
675 struct i40e_nvm_access *cmd,
676 u8 *bytes, int *perrno);
Shannon Nelsonb72dc7b2015-08-28 17:55:51 -0400677static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
678 struct i40e_nvm_access *cmd,
679 u8 *bytes, int *perrno);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000680static inline u8 i40e_nvmupd_get_module(u32 val)
681{
682 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
683}
684static inline u8 i40e_nvmupd_get_transaction(u32 val)
685{
686 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
687}
688
Jingjing Wu4e68adfe2015-09-28 14:12:31 -0400689static const char * const i40e_nvm_update_state_str[] = {
Shannon Nelson74d0d0e2014-11-13 08:23:15 +0000690 "I40E_NVMUPD_INVALID",
691 "I40E_NVMUPD_READ_CON",
692 "I40E_NVMUPD_READ_SNT",
693 "I40E_NVMUPD_READ_LCB",
694 "I40E_NVMUPD_READ_SA",
695 "I40E_NVMUPD_WRITE_ERA",
696 "I40E_NVMUPD_WRITE_CON",
697 "I40E_NVMUPD_WRITE_SNT",
698 "I40E_NVMUPD_WRITE_LCB",
699 "I40E_NVMUPD_WRITE_SA",
700 "I40E_NVMUPD_CSUM_CON",
701 "I40E_NVMUPD_CSUM_SA",
702 "I40E_NVMUPD_CSUM_LCB",
Shannon Nelson0af8e9d2015-08-28 17:55:48 -0400703 "I40E_NVMUPD_STATUS",
Shannon Nelsone4c83c22015-08-28 17:55:50 -0400704 "I40E_NVMUPD_EXEC_AQ",
Shannon Nelsonb72dc7b2015-08-28 17:55:51 -0400705 "I40E_NVMUPD_GET_AQ_RESULT",
Shannon Nelson74d0d0e2014-11-13 08:23:15 +0000706};
707
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000708/**
709 * i40e_nvmupd_command - Process an NVM update command
710 * @hw: pointer to hardware structure
711 * @cmd: pointer to nvm update command
712 * @bytes: pointer to the data buffer
Shannon Nelson79afe832015-07-23 16:54:33 -0400713 * @perrno: pointer to return error code
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000714 *
715 * Dispatches command depending on what update state is current
716 **/
717i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
718 struct i40e_nvm_access *cmd,
Shannon Nelson79afe832015-07-23 16:54:33 -0400719 u8 *bytes, int *perrno)
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000720{
721 i40e_status status;
Shannon Nelson0af8e9d2015-08-28 17:55:48 -0400722 enum i40e_nvmupd_cmd upd_cmd;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000723
724 /* assume success */
Shannon Nelson79afe832015-07-23 16:54:33 -0400725 *perrno = 0;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000726
Shannon Nelson0af8e9d2015-08-28 17:55:48 -0400727 /* early check for status command and debug msgs */
728 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
729
Shannon Nelsonfed2db92016-04-12 08:30:43 -0700730 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
Shannon Nelson0af8e9d2015-08-28 17:55:48 -0400731 i40e_nvm_update_state_str[upd_cmd],
732 hw->nvmupd_state,
Shannon Nelsonfed2db92016-04-12 08:30:43 -0700733 hw->nvm_release_on_done, hw->nvm_wait_opcode,
Shannon Nelson1d73b2d2015-12-23 12:05:51 -0800734 cmd->command, cmd->config, cmd->offset, cmd->data_size);
Shannon Nelson0af8e9d2015-08-28 17:55:48 -0400735
736 if (upd_cmd == I40E_NVMUPD_INVALID) {
737 *perrno = -EFAULT;
738 i40e_debug(hw, I40E_DEBUG_NVM,
739 "i40e_nvmupd_validate_command returns %d errno %d\n",
740 upd_cmd, *perrno);
741 }
742
743 /* a status request returns immediately rather than
744 * going into the state machine
745 */
746 if (upd_cmd == I40E_NVMUPD_STATUS) {
Shannon Nelsonfed2db92016-04-12 08:30:43 -0700747 if (!cmd->data_size) {
748 *perrno = -EFAULT;
749 return I40E_ERR_BUF_TOO_SHORT;
750 }
751
Shannon Nelson0af8e9d2015-08-28 17:55:48 -0400752 bytes[0] = hw->nvmupd_state;
Shannon Nelsonfed2db92016-04-12 08:30:43 -0700753
754 if (cmd->data_size >= 4) {
755 bytes[1] = 0;
756 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
757 }
758
Maciej Sosin81fa7c92016-10-11 15:26:57 -0700759 /* Clear error status on read */
760 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
761 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
762
Shannon Nelson0af8e9d2015-08-28 17:55:48 -0400763 return 0;
764 }
765
Maciej Sosin81fa7c92016-10-11 15:26:57 -0700766 /* Clear status even it is not read and log */
767 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
768 i40e_debug(hw, I40E_DEBUG_NVM,
769 "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
770 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
771 }
772
Sudheer Mogilappagari2bf01932017-07-12 05:46:07 -0400773 /* Acquire lock to prevent race condition where adminq_task
774 * can execute after i40e_nvmupd_nvm_read/write but before state
Sudheer Mogilappagari167d52e2017-08-27 15:07:47 -0700775 * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
776 *
777 * During NVMUpdate, it is observed that lock could be held for
778 * ~5ms for most commands. However lock is held for ~60ms for
779 * NVMUPD_CSUM_LCB command.
Sudheer Mogilappagari2bf01932017-07-12 05:46:07 -0400780 */
781 mutex_lock(&hw->aq.arq_mutex);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000782 switch (hw->nvmupd_state) {
783 case I40E_NVMUPD_STATE_INIT:
Shannon Nelson79afe832015-07-23 16:54:33 -0400784 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000785 break;
786
787 case I40E_NVMUPD_STATE_READING:
Shannon Nelson79afe832015-07-23 16:54:33 -0400788 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000789 break;
790
791 case I40E_NVMUPD_STATE_WRITING:
Shannon Nelson79afe832015-07-23 16:54:33 -0400792 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000793 break;
794
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -0400795 case I40E_NVMUPD_STATE_INIT_WAIT:
796 case I40E_NVMUPD_STATE_WRITE_WAIT:
Shannon Nelsonfed2db92016-04-12 08:30:43 -0700797 /* if we need to stop waiting for an event, clear
798 * the wait info and return before doing anything else
799 */
800 if (cmd->offset == 0xffff) {
801 i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
Sudheer Mogilappagari167d52e2017-08-27 15:07:47 -0700802 status = 0;
803 goto exit;
Shannon Nelsonfed2db92016-04-12 08:30:43 -0700804 }
805
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -0400806 status = I40E_ERR_NOT_READY;
807 *perrno = -EBUSY;
808 break;
809
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000810 default:
811 /* invalid state, should never happen */
Shannon Nelson74d0d0e2014-11-13 08:23:15 +0000812 i40e_debug(hw, I40E_DEBUG_NVM,
813 "NVMUPD: no such state %d\n", hw->nvmupd_state);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000814 status = I40E_NOT_SUPPORTED;
Shannon Nelson79afe832015-07-23 16:54:33 -0400815 *perrno = -ESRCH;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000816 break;
817 }
Sudheer Mogilappagari167d52e2017-08-27 15:07:47 -0700818exit:
Sudheer Mogilappagari2bf01932017-07-12 05:46:07 -0400819 mutex_unlock(&hw->aq.arq_mutex);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000820 return status;
821}
822
823/**
824 * i40e_nvmupd_state_init - Handle NVM update state Init
825 * @hw: pointer to hardware structure
826 * @cmd: pointer to nvm update command buffer
827 * @bytes: pointer to the data buffer
Shannon Nelson79afe832015-07-23 16:54:33 -0400828 * @perrno: pointer to return error code
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000829 *
830 * Process legitimate commands of the Init state and conditionally set next
831 * state. Reject all other commands.
832 **/
833static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
834 struct i40e_nvm_access *cmd,
Shannon Nelson79afe832015-07-23 16:54:33 -0400835 u8 *bytes, int *perrno)
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000836{
837 i40e_status status = 0;
838 enum i40e_nvmupd_cmd upd_cmd;
839
Shannon Nelson79afe832015-07-23 16:54:33 -0400840 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000841
842 switch (upd_cmd) {
843 case I40E_NVMUPD_READ_SA:
844 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
845 if (status) {
Shannon Nelson79afe832015-07-23 16:54:33 -0400846 *perrno = i40e_aq_rc_to_posix(status,
Shannon Nelsonbf848f32014-11-13 08:23:22 +0000847 hw->aq.asq_last_status);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000848 } else {
Shannon Nelson79afe832015-07-23 16:54:33 -0400849 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000850 i40e_release_nvm(hw);
851 }
852 break;
853
854 case I40E_NVMUPD_READ_SNT:
855 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
856 if (status) {
Shannon Nelson79afe832015-07-23 16:54:33 -0400857 *perrno = i40e_aq_rc_to_posix(status,
Shannon Nelsonbf848f32014-11-13 08:23:22 +0000858 hw->aq.asq_last_status);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000859 } else {
Shannon Nelson79afe832015-07-23 16:54:33 -0400860 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
Shannon Nelson0fdd0522014-11-13 08:23:20 +0000861 if (status)
862 i40e_release_nvm(hw);
863 else
864 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000865 }
866 break;
867
868 case I40E_NVMUPD_WRITE_ERA:
869 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
870 if (status) {
Shannon Nelson79afe832015-07-23 16:54:33 -0400871 *perrno = i40e_aq_rc_to_posix(status,
Shannon Nelsonbf848f32014-11-13 08:23:22 +0000872 hw->aq.asq_last_status);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000873 } else {
Shannon Nelson79afe832015-07-23 16:54:33 -0400874 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -0400875 if (status) {
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000876 i40e_release_nvm(hw);
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -0400877 } else {
Shannon Nelson437f82a2016-04-01 03:56:09 -0700878 hw->nvm_release_on_done = true;
Shannon Nelsonfed2db92016-04-12 08:30:43 -0700879 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -0400880 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
881 }
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000882 }
883 break;
884
885 case I40E_NVMUPD_WRITE_SA:
886 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
887 if (status) {
Shannon Nelson79afe832015-07-23 16:54:33 -0400888 *perrno = i40e_aq_rc_to_posix(status,
Shannon Nelsonbf848f32014-11-13 08:23:22 +0000889 hw->aq.asq_last_status);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000890 } else {
Shannon Nelson79afe832015-07-23 16:54:33 -0400891 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -0400892 if (status) {
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000893 i40e_release_nvm(hw);
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -0400894 } else {
Shannon Nelson437f82a2016-04-01 03:56:09 -0700895 hw->nvm_release_on_done = true;
Shannon Nelsonfed2db92016-04-12 08:30:43 -0700896 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -0400897 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
898 }
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000899 }
900 break;
901
902 case I40E_NVMUPD_WRITE_SNT:
903 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
904 if (status) {
Shannon Nelson79afe832015-07-23 16:54:33 -0400905 *perrno = i40e_aq_rc_to_posix(status,
Shannon Nelsonbf848f32014-11-13 08:23:22 +0000906 hw->aq.asq_last_status);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000907 } else {
Shannon Nelson79afe832015-07-23 16:54:33 -0400908 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
Shannon Nelsonfed2db92016-04-12 08:30:43 -0700909 if (status) {
Shannon Nelson0fdd0522014-11-13 08:23:20 +0000910 i40e_release_nvm(hw);
Shannon Nelsonfed2db92016-04-12 08:30:43 -0700911 } else {
912 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -0400913 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
Shannon Nelsonfed2db92016-04-12 08:30:43 -0700914 }
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000915 }
916 break;
917
918 case I40E_NVMUPD_CSUM_SA:
919 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
920 if (status) {
Shannon Nelson79afe832015-07-23 16:54:33 -0400921 *perrno = i40e_aq_rc_to_posix(status,
Shannon Nelsonbf848f32014-11-13 08:23:22 +0000922 hw->aq.asq_last_status);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000923 } else {
924 status = i40e_update_nvm_checksum(hw);
925 if (status) {
Shannon Nelson79afe832015-07-23 16:54:33 -0400926 *perrno = hw->aq.asq_last_status ?
Shannon Nelsonbf848f32014-11-13 08:23:22 +0000927 i40e_aq_rc_to_posix(status,
928 hw->aq.asq_last_status) :
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000929 -EIO;
930 i40e_release_nvm(hw);
931 } else {
Shannon Nelson437f82a2016-04-01 03:56:09 -0700932 hw->nvm_release_on_done = true;
Shannon Nelsonfed2db92016-04-12 08:30:43 -0700933 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -0400934 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000935 }
936 }
937 break;
938
Shannon Nelsone4c83c22015-08-28 17:55:50 -0400939 case I40E_NVMUPD_EXEC_AQ:
940 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
941 break;
942
Shannon Nelsonb72dc7b2015-08-28 17:55:51 -0400943 case I40E_NVMUPD_GET_AQ_RESULT:
944 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
945 break;
946
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000947 default:
Shannon Nelson74d0d0e2014-11-13 08:23:15 +0000948 i40e_debug(hw, I40E_DEBUG_NVM,
949 "NVMUPD: bad cmd %s in init state\n",
950 i40e_nvm_update_state_str[upd_cmd]);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000951 status = I40E_ERR_NVM;
Shannon Nelson79afe832015-07-23 16:54:33 -0400952 *perrno = -ESRCH;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000953 break;
954 }
955 return status;
956}
957
958/**
959 * i40e_nvmupd_state_reading - Handle NVM update state Reading
960 * @hw: pointer to hardware structure
961 * @cmd: pointer to nvm update command buffer
962 * @bytes: pointer to the data buffer
Shannon Nelson79afe832015-07-23 16:54:33 -0400963 * @perrno: pointer to return error code
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000964 *
965 * NVM ownership is already held. Process legitimate commands and set any
966 * change in state; reject all other commands.
967 **/
968static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
969 struct i40e_nvm_access *cmd,
Shannon Nelson79afe832015-07-23 16:54:33 -0400970 u8 *bytes, int *perrno)
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000971{
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -0400972 i40e_status status = 0;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000973 enum i40e_nvmupd_cmd upd_cmd;
974
Shannon Nelson79afe832015-07-23 16:54:33 -0400975 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000976
977 switch (upd_cmd) {
978 case I40E_NVMUPD_READ_SA:
979 case I40E_NVMUPD_READ_CON:
Shannon Nelson79afe832015-07-23 16:54:33 -0400980 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000981 break;
982
983 case I40E_NVMUPD_READ_LCB:
Shannon Nelson79afe832015-07-23 16:54:33 -0400984 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000985 i40e_release_nvm(hw);
986 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
987 break;
988
989 default:
Shannon Nelson74d0d0e2014-11-13 08:23:15 +0000990 i40e_debug(hw, I40E_DEBUG_NVM,
991 "NVMUPD: bad cmd %s in reading state.\n",
992 i40e_nvm_update_state_str[upd_cmd]);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000993 status = I40E_NOT_SUPPORTED;
Shannon Nelson79afe832015-07-23 16:54:33 -0400994 *perrno = -ESRCH;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +0000995 break;
996 }
997 return status;
998}
999
1000/**
1001 * i40e_nvmupd_state_writing - Handle NVM update state Writing
1002 * @hw: pointer to hardware structure
1003 * @cmd: pointer to nvm update command buffer
1004 * @bytes: pointer to the data buffer
Shannon Nelson79afe832015-07-23 16:54:33 -04001005 * @perrno: pointer to return error code
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001006 *
1007 * NVM ownership is already held. Process legitimate commands and set any
1008 * change in state; reject all other commands
1009 **/
1010static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
1011 struct i40e_nvm_access *cmd,
Shannon Nelson79afe832015-07-23 16:54:33 -04001012 u8 *bytes, int *perrno)
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001013{
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -04001014 i40e_status status = 0;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001015 enum i40e_nvmupd_cmd upd_cmd;
Shannon Nelson2c47e352015-02-21 06:45:10 +00001016 bool retry_attempt = false;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001017
Shannon Nelson79afe832015-07-23 16:54:33 -04001018 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001019
Shannon Nelson2c47e352015-02-21 06:45:10 +00001020retry:
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001021 switch (upd_cmd) {
1022 case I40E_NVMUPD_WRITE_CON:
Shannon Nelson79afe832015-07-23 16:54:33 -04001023 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
Shannon Nelsonfed2db92016-04-12 08:30:43 -07001024 if (!status) {
1025 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -04001026 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
Shannon Nelsonfed2db92016-04-12 08:30:43 -07001027 }
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001028 break;
1029
1030 case I40E_NVMUPD_WRITE_LCB:
Shannon Nelson79afe832015-07-23 16:54:33 -04001031 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -04001032 if (status) {
1033 *perrno = hw->aq.asq_last_status ?
1034 i40e_aq_rc_to_posix(status,
1035 hw->aq.asq_last_status) :
1036 -EIO;
1037 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1038 } else {
Shannon Nelson437f82a2016-04-01 03:56:09 -07001039 hw->nvm_release_on_done = true;
Shannon Nelsonfed2db92016-04-12 08:30:43 -07001040 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -04001041 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1042 }
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001043 break;
1044
1045 case I40E_NVMUPD_CSUM_CON:
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -07001046 /* Assumes the caller has acquired the nvm */
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001047 status = i40e_update_nvm_checksum(hw);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001048 if (status) {
Shannon Nelson79afe832015-07-23 16:54:33 -04001049 *perrno = hw->aq.asq_last_status ?
Shannon Nelsonbf848f32014-11-13 08:23:22 +00001050 i40e_aq_rc_to_posix(status,
1051 hw->aq.asq_last_status) :
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001052 -EIO;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001053 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -04001054 } else {
Shannon Nelsonfed2db92016-04-12 08:30:43 -07001055 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -04001056 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001057 }
1058 break;
1059
Shannon Nelson0fdd0522014-11-13 08:23:20 +00001060 case I40E_NVMUPD_CSUM_LCB:
Anjali Singhai Jain09f79fd2017-09-01 13:42:49 -07001061 /* Assumes the caller has acquired the nvm */
Shannon Nelson0fdd0522014-11-13 08:23:20 +00001062 status = i40e_update_nvm_checksum(hw);
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -04001063 if (status) {
Shannon Nelson79afe832015-07-23 16:54:33 -04001064 *perrno = hw->aq.asq_last_status ?
Shannon Nelsonbf848f32014-11-13 08:23:22 +00001065 i40e_aq_rc_to_posix(status,
1066 hw->aq.asq_last_status) :
Shannon Nelson0fdd0522014-11-13 08:23:20 +00001067 -EIO;
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -04001068 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1069 } else {
Shannon Nelson437f82a2016-04-01 03:56:09 -07001070 hw->nvm_release_on_done = true;
Shannon Nelsonfed2db92016-04-12 08:30:43 -07001071 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
Shannon Nelson2f1b5bc2015-08-28 17:55:49 -04001072 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1073 }
Shannon Nelson0fdd0522014-11-13 08:23:20 +00001074 break;
1075
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001076 default:
Shannon Nelson74d0d0e2014-11-13 08:23:15 +00001077 i40e_debug(hw, I40E_DEBUG_NVM,
1078 "NVMUPD: bad cmd %s in writing state.\n",
1079 i40e_nvm_update_state_str[upd_cmd]);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001080 status = I40E_NOT_SUPPORTED;
Shannon Nelson79afe832015-07-23 16:54:33 -04001081 *perrno = -ESRCH;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001082 break;
1083 }
Shannon Nelson2c47e352015-02-21 06:45:10 +00001084
1085 /* In some circumstances, a multi-write transaction takes longer
1086 * than the default 3 minute timeout on the write semaphore. If
1087 * the write failed with an EBUSY status, this is likely the problem,
1088 * so here we try to reacquire the semaphore then retry the write.
1089 * We only do one retry, then give up.
1090 */
1091 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1092 !retry_attempt) {
1093 i40e_status old_status = status;
1094 u32 old_asq_status = hw->aq.asq_last_status;
1095 u32 gtime;
1096
1097 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1098 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1099 i40e_debug(hw, I40E_DEBUG_ALL,
1100 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1101 gtime, hw->nvm.hw_semaphore_timeout);
1102 i40e_release_nvm(hw);
1103 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1104 if (status) {
1105 i40e_debug(hw, I40E_DEBUG_ALL,
1106 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1107 hw->aq.asq_last_status);
1108 status = old_status;
1109 hw->aq.asq_last_status = old_asq_status;
1110 } else {
1111 retry_attempt = true;
1112 goto retry;
1113 }
1114 }
1115 }
1116
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001117 return status;
1118}
1119
1120/**
Shannon Nelsonbab2fb62016-04-01 03:56:11 -07001121 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1122 * @hw: pointer to the hardware structure
1123 * @opcode: the event that just happened
1124 **/
1125void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
1126{
Shannon Nelsonfed2db92016-04-12 08:30:43 -07001127 if (opcode == hw->nvm_wait_opcode) {
Shannon Nelsonbab2fb62016-04-01 03:56:11 -07001128 i40e_debug(hw, I40E_DEBUG_NVM,
1129 "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
1130 if (hw->nvm_release_on_done) {
1131 i40e_release_nvm(hw);
1132 hw->nvm_release_on_done = false;
1133 }
Shannon Nelsonfed2db92016-04-12 08:30:43 -07001134 hw->nvm_wait_opcode = 0;
Shannon Nelsonbab2fb62016-04-01 03:56:11 -07001135
Maciej Sosin81fa7c92016-10-11 15:26:57 -07001136 if (hw->aq.arq_last_status) {
1137 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1138 return;
1139 }
1140
Shannon Nelsonbab2fb62016-04-01 03:56:11 -07001141 switch (hw->nvmupd_state) {
1142 case I40E_NVMUPD_STATE_INIT_WAIT:
1143 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1144 break;
1145
1146 case I40E_NVMUPD_STATE_WRITE_WAIT:
1147 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1148 break;
1149
1150 default:
1151 break;
1152 }
1153 }
1154}
1155
1156/**
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001157 * i40e_nvmupd_validate_command - Validate given command
1158 * @hw: pointer to hardware structure
1159 * @cmd: pointer to nvm update command buffer
Shannon Nelson79afe832015-07-23 16:54:33 -04001160 * @perrno: pointer to return error code
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001161 *
1162 * Return one of the valid command types or I40E_NVMUPD_INVALID
1163 **/
1164static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1165 struct i40e_nvm_access *cmd,
Shannon Nelson79afe832015-07-23 16:54:33 -04001166 int *perrno)
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001167{
1168 enum i40e_nvmupd_cmd upd_cmd;
Shannon Nelson0af8e9d2015-08-28 17:55:48 -04001169 u8 module, transaction;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001170
1171 /* anything that doesn't match a recognized case is an error */
1172 upd_cmd = I40E_NVMUPD_INVALID;
1173
1174 transaction = i40e_nvmupd_get_transaction(cmd->config);
Shannon Nelson0af8e9d2015-08-28 17:55:48 -04001175 module = i40e_nvmupd_get_module(cmd->config);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001176
1177 /* limits on data size */
1178 if ((cmd->data_size < 1) ||
1179 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
Shannon Nelson74d0d0e2014-11-13 08:23:15 +00001180 i40e_debug(hw, I40E_DEBUG_NVM,
1181 "i40e_nvmupd_validate_command data_size %d\n",
1182 cmd->data_size);
Shannon Nelson79afe832015-07-23 16:54:33 -04001183 *perrno = -EFAULT;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001184 return I40E_NVMUPD_INVALID;
1185 }
1186
1187 switch (cmd->command) {
1188 case I40E_NVM_READ:
1189 switch (transaction) {
1190 case I40E_NVM_CON:
1191 upd_cmd = I40E_NVMUPD_READ_CON;
1192 break;
1193 case I40E_NVM_SNT:
1194 upd_cmd = I40E_NVMUPD_READ_SNT;
1195 break;
1196 case I40E_NVM_LCB:
1197 upd_cmd = I40E_NVMUPD_READ_LCB;
1198 break;
1199 case I40E_NVM_SA:
1200 upd_cmd = I40E_NVMUPD_READ_SA;
1201 break;
Shannon Nelson0af8e9d2015-08-28 17:55:48 -04001202 case I40E_NVM_EXEC:
1203 if (module == 0xf)
1204 upd_cmd = I40E_NVMUPD_STATUS;
Shannon Nelsonb72dc7b2015-08-28 17:55:51 -04001205 else if (module == 0)
1206 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
Shannon Nelson0af8e9d2015-08-28 17:55:48 -04001207 break;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001208 }
1209 break;
1210
1211 case I40E_NVM_WRITE:
1212 switch (transaction) {
1213 case I40E_NVM_CON:
1214 upd_cmd = I40E_NVMUPD_WRITE_CON;
1215 break;
1216 case I40E_NVM_SNT:
1217 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1218 break;
1219 case I40E_NVM_LCB:
1220 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1221 break;
1222 case I40E_NVM_SA:
1223 upd_cmd = I40E_NVMUPD_WRITE_SA;
1224 break;
1225 case I40E_NVM_ERA:
1226 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1227 break;
1228 case I40E_NVM_CSUM:
1229 upd_cmd = I40E_NVMUPD_CSUM_CON;
1230 break;
1231 case (I40E_NVM_CSUM|I40E_NVM_SA):
1232 upd_cmd = I40E_NVMUPD_CSUM_SA;
1233 break;
1234 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1235 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1236 break;
Shannon Nelsone4c83c22015-08-28 17:55:50 -04001237 case I40E_NVM_EXEC:
1238 if (module == 0)
1239 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1240 break;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001241 }
1242 break;
1243 }
1244
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001245 return upd_cmd;
1246}
1247
1248/**
Shannon Nelsone4c83c22015-08-28 17:55:50 -04001249 * i40e_nvmupd_exec_aq - Run an AQ command
1250 * @hw: pointer to hardware structure
1251 * @cmd: pointer to nvm update command buffer
1252 * @bytes: pointer to the data buffer
1253 * @perrno: pointer to return error code
1254 *
1255 * cmd structure contains identifiers and data buffer
1256 **/
1257static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1258 struct i40e_nvm_access *cmd,
1259 u8 *bytes, int *perrno)
1260{
1261 struct i40e_asq_cmd_details cmd_details;
1262 i40e_status status;
1263 struct i40e_aq_desc *aq_desc;
1264 u32 buff_size = 0;
1265 u8 *buff = NULL;
1266 u32 aq_desc_len;
1267 u32 aq_data_len;
1268
1269 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1270 memset(&cmd_details, 0, sizeof(cmd_details));
1271 cmd_details.wb_desc = &hw->nvm_wb_desc;
1272
1273 aq_desc_len = sizeof(struct i40e_aq_desc);
1274 memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1275
1276 /* get the aq descriptor */
1277 if (cmd->data_size < aq_desc_len) {
1278 i40e_debug(hw, I40E_DEBUG_NVM,
1279 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1280 cmd->data_size, aq_desc_len);
1281 *perrno = -EINVAL;
1282 return I40E_ERR_PARAM;
1283 }
1284 aq_desc = (struct i40e_aq_desc *)bytes;
1285
1286 /* if data buffer needed, make sure it's ready */
1287 aq_data_len = cmd->data_size - aq_desc_len;
1288 buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
1289 if (buff_size) {
1290 if (!hw->nvm_buff.va) {
1291 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1292 hw->aq.asq_buf_size);
1293 if (status)
1294 i40e_debug(hw, I40E_DEBUG_NVM,
1295 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1296 status);
1297 }
1298
1299 if (hw->nvm_buff.va) {
1300 buff = hw->nvm_buff.va;
1301 memcpy(buff, &bytes[aq_desc_len], aq_data_len);
1302 }
1303 }
1304
1305 /* and away we go! */
1306 status = i40e_asq_send_command(hw, aq_desc, buff,
1307 buff_size, &cmd_details);
1308 if (status) {
1309 i40e_debug(hw, I40E_DEBUG_NVM,
1310 "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1311 i40e_stat_str(hw, status),
1312 i40e_aq_str(hw, hw->aq.asq_last_status));
1313 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1314 }
1315
Shannon Nelsonfed2db92016-04-12 08:30:43 -07001316 /* should we wait for a followup event? */
1317 if (cmd->offset) {
1318 hw->nvm_wait_opcode = cmd->offset;
1319 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1320 }
1321
Shannon Nelsone4c83c22015-08-28 17:55:50 -04001322 return status;
1323}
1324
1325/**
Shannon Nelsonb72dc7b2015-08-28 17:55:51 -04001326 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1327 * @hw: pointer to hardware structure
1328 * @cmd: pointer to nvm update command buffer
1329 * @bytes: pointer to the data buffer
1330 * @perrno: pointer to return error code
1331 *
1332 * cmd structure contains identifiers and data buffer
1333 **/
1334static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1335 struct i40e_nvm_access *cmd,
1336 u8 *bytes, int *perrno)
1337{
1338 u32 aq_total_len;
1339 u32 aq_desc_len;
1340 int remainder;
1341 u8 *buff;
1342
1343 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1344
1345 aq_desc_len = sizeof(struct i40e_aq_desc);
1346 aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
1347
1348 /* check offset range */
1349 if (cmd->offset > aq_total_len) {
1350 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1351 __func__, cmd->offset, aq_total_len);
1352 *perrno = -EINVAL;
1353 return I40E_ERR_PARAM;
1354 }
1355
1356 /* check copylength range */
1357 if (cmd->data_size > (aq_total_len - cmd->offset)) {
1358 int new_len = aq_total_len - cmd->offset;
1359
1360 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1361 __func__, cmd->data_size, new_len);
1362 cmd->data_size = new_len;
1363 }
1364
1365 remainder = cmd->data_size;
1366 if (cmd->offset < aq_desc_len) {
1367 u32 len = aq_desc_len - cmd->offset;
1368
1369 len = min(len, cmd->data_size);
1370 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1371 __func__, cmd->offset, cmd->offset + len);
1372
1373 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1374 memcpy(bytes, buff, len);
1375
1376 bytes += len;
1377 remainder -= len;
1378 buff = hw->nvm_buff.va;
1379 } else {
1380 buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1381 }
1382
1383 if (remainder > 0) {
1384 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1385
1386 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1387 __func__, start_byte, start_byte + remainder);
1388 memcpy(bytes, buff, remainder);
1389 }
1390
1391 return 0;
1392}
1393
1394/**
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001395 * i40e_nvmupd_nvm_read - Read NVM
1396 * @hw: pointer to hardware structure
1397 * @cmd: pointer to nvm update command buffer
1398 * @bytes: pointer to the data buffer
Shannon Nelson79afe832015-07-23 16:54:33 -04001399 * @perrno: pointer to return error code
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001400 *
1401 * cmd structure contains identifiers and data buffer
1402 **/
1403static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1404 struct i40e_nvm_access *cmd,
Shannon Nelson79afe832015-07-23 16:54:33 -04001405 u8 *bytes, int *perrno)
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001406{
Shannon Nelson6b5c1b82015-08-28 17:55:47 -04001407 struct i40e_asq_cmd_details cmd_details;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001408 i40e_status status;
1409 u8 module, transaction;
1410 bool last;
1411
1412 transaction = i40e_nvmupd_get_transaction(cmd->config);
1413 module = i40e_nvmupd_get_module(cmd->config);
1414 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001415
Shannon Nelson6b5c1b82015-08-28 17:55:47 -04001416 memset(&cmd_details, 0, sizeof(cmd_details));
1417 cmd_details.wb_desc = &hw->nvm_wb_desc;
1418
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001419 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
Shannon Nelson6b5c1b82015-08-28 17:55:47 -04001420 bytes, last, &cmd_details);
Shannon Nelson74d0d0e2014-11-13 08:23:15 +00001421 if (status) {
1422 i40e_debug(hw, I40E_DEBUG_NVM,
1423 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1424 module, cmd->offset, cmd->data_size);
1425 i40e_debug(hw, I40E_DEBUG_NVM,
1426 "i40e_nvmupd_nvm_read status %d aq %d\n",
1427 status, hw->aq.asq_last_status);
Shannon Nelson79afe832015-07-23 16:54:33 -04001428 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
Shannon Nelson74d0d0e2014-11-13 08:23:15 +00001429 }
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001430
1431 return status;
1432}
1433
1434/**
1435 * i40e_nvmupd_nvm_erase - Erase an NVM module
1436 * @hw: pointer to hardware structure
1437 * @cmd: pointer to nvm update command buffer
Shannon Nelson79afe832015-07-23 16:54:33 -04001438 * @perrno: pointer to return error code
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001439 *
1440 * module, offset, data_size and data are in cmd structure
1441 **/
1442static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1443 struct i40e_nvm_access *cmd,
Shannon Nelson79afe832015-07-23 16:54:33 -04001444 int *perrno)
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001445{
1446 i40e_status status = 0;
Shannon Nelson6b5c1b82015-08-28 17:55:47 -04001447 struct i40e_asq_cmd_details cmd_details;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001448 u8 module, transaction;
1449 bool last;
1450
1451 transaction = i40e_nvmupd_get_transaction(cmd->config);
1452 module = i40e_nvmupd_get_module(cmd->config);
1453 last = (transaction & I40E_NVM_LCB);
Shannon Nelson6b5c1b82015-08-28 17:55:47 -04001454
1455 memset(&cmd_details, 0, sizeof(cmd_details));
1456 cmd_details.wb_desc = &hw->nvm_wb_desc;
1457
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001458 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
Shannon Nelson6b5c1b82015-08-28 17:55:47 -04001459 last, &cmd_details);
Shannon Nelson74d0d0e2014-11-13 08:23:15 +00001460 if (status) {
1461 i40e_debug(hw, I40E_DEBUG_NVM,
1462 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1463 module, cmd->offset, cmd->data_size);
1464 i40e_debug(hw, I40E_DEBUG_NVM,
1465 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1466 status, hw->aq.asq_last_status);
Shannon Nelson79afe832015-07-23 16:54:33 -04001467 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
Shannon Nelson74d0d0e2014-11-13 08:23:15 +00001468 }
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001469
1470 return status;
1471}
1472
1473/**
1474 * i40e_nvmupd_nvm_write - Write NVM
1475 * @hw: pointer to hardware structure
1476 * @cmd: pointer to nvm update command buffer
1477 * @bytes: pointer to the data buffer
Shannon Nelson79afe832015-07-23 16:54:33 -04001478 * @perrno: pointer to return error code
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001479 *
1480 * module, offset, data_size and data are in cmd structure
1481 **/
1482static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1483 struct i40e_nvm_access *cmd,
Shannon Nelson79afe832015-07-23 16:54:33 -04001484 u8 *bytes, int *perrno)
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001485{
1486 i40e_status status = 0;
Shannon Nelson6b5c1b82015-08-28 17:55:47 -04001487 struct i40e_asq_cmd_details cmd_details;
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001488 u8 module, transaction;
1489 bool last;
1490
1491 transaction = i40e_nvmupd_get_transaction(cmd->config);
1492 module = i40e_nvmupd_get_module(cmd->config);
1493 last = (transaction & I40E_NVM_LCB);
Shannon Nelson74d0d0e2014-11-13 08:23:15 +00001494
Shannon Nelson6b5c1b82015-08-28 17:55:47 -04001495 memset(&cmd_details, 0, sizeof(cmd_details));
1496 cmd_details.wb_desc = &hw->nvm_wb_desc;
1497
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001498 status = i40e_aq_update_nvm(hw, module, cmd->offset,
Shannon Nelson6b5c1b82015-08-28 17:55:47 -04001499 (u16)cmd->data_size, bytes, last,
1500 &cmd_details);
Shannon Nelson74d0d0e2014-11-13 08:23:15 +00001501 if (status) {
1502 i40e_debug(hw, I40E_DEBUG_NVM,
1503 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1504 module, cmd->offset, cmd->data_size);
1505 i40e_debug(hw, I40E_DEBUG_NVM,
1506 "i40e_nvmupd_nvm_write status %d aq %d\n",
1507 status, hw->aq.asq_last_status);
Shannon Nelson79afe832015-07-23 16:54:33 -04001508 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
Shannon Nelson74d0d0e2014-11-13 08:23:15 +00001509 }
Shannon Nelsoncd552cb2014-07-09 07:46:09 +00001510
1511 return status;
1512}