blob: 0c524fa9f8111a092c07d69a442c75dea43f0f42 [file] [log] [blame]
Jesse Brandeburg56a62fc2013-09-11 08:40:12 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40e_status.h"
29#include "i40e_type.h"
30#include "i40e_register.h"
31#include "i40e_adminq.h"
32#include "i40e_prototype.h"
33
34/**
35 * i40e_adminq_init_regs - Initialize AdminQ registers
36 * @hw: pointer to the hardware structure
37 *
38 * This assumes the alloc_asq and alloc_arq functions have already been called
39 **/
40static void i40e_adminq_init_regs(struct i40e_hw *hw)
41{
42 /* set head and tail registers in our local struct */
43 if (hw->mac.type == I40E_MAC_VF) {
44 hw->aq.asq.tail = I40E_VF_ATQT1;
45 hw->aq.asq.head = I40E_VF_ATQH1;
46 hw->aq.arq.tail = I40E_VF_ARQT1;
47 hw->aq.arq.head = I40E_VF_ARQH1;
48 } else {
49 hw->aq.asq.tail = I40E_PF_ATQT;
50 hw->aq.asq.head = I40E_PF_ATQH;
51 hw->aq.arq.tail = I40E_PF_ARQT;
52 hw->aq.arq.head = I40E_PF_ARQH;
53 }
54}
55
56/**
57 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
58 * @hw: pointer to the hardware structure
59 **/
60static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
61{
62 i40e_status ret_code;
63 struct i40e_virt_mem mem;
64
65 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
66 i40e_mem_atq_ring,
67 (hw->aq.num_asq_entries *
68 sizeof(struct i40e_aq_desc)),
69 I40E_ADMINQ_DESC_ALIGNMENT);
70 if (ret_code)
71 return ret_code;
72
73 hw->aq.asq.desc = hw->aq.asq_mem.va;
74 hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
75
76 ret_code = i40e_allocate_virt_mem(hw, &mem,
77 (hw->aq.num_asq_entries *
78 sizeof(struct i40e_asq_cmd_details)));
79 if (ret_code) {
80 i40e_free_dma_mem(hw, &hw->aq.asq_mem);
81 hw->aq.asq_mem.va = NULL;
82 hw->aq.asq_mem.pa = 0;
83 return ret_code;
84 }
85
86 hw->aq.asq.details = mem.va;
87
88 return ret_code;
89}
90
91/**
92 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
93 * @hw: pointer to the hardware structure
94 **/
95static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
96{
97 i40e_status ret_code;
98
99 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
100 i40e_mem_arq_ring,
101 (hw->aq.num_arq_entries *
102 sizeof(struct i40e_aq_desc)),
103 I40E_ADMINQ_DESC_ALIGNMENT);
104 if (ret_code)
105 return ret_code;
106
107 hw->aq.arq.desc = hw->aq.arq_mem.va;
108 hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
109
110 return ret_code;
111}
112
113/**
114 * i40e_free_adminq_asq - Free Admin Queue send rings
115 * @hw: pointer to the hardware structure
116 *
117 * This assumes the posted send buffers have already been cleaned
118 * and de-allocated
119 **/
120static void i40e_free_adminq_asq(struct i40e_hw *hw)
121{
122 struct i40e_virt_mem mem;
123
124 i40e_free_dma_mem(hw, &hw->aq.asq_mem);
125 hw->aq.asq_mem.va = NULL;
126 hw->aq.asq_mem.pa = 0;
127 mem.va = hw->aq.asq.details;
128 i40e_free_virt_mem(hw, &mem);
129 hw->aq.asq.details = NULL;
130}
131
132/**
133 * i40e_free_adminq_arq - Free Admin Queue receive rings
134 * @hw: pointer to the hardware structure
135 *
136 * This assumes the posted receive buffers have already been cleaned
137 * and de-allocated
138 **/
139static void i40e_free_adminq_arq(struct i40e_hw *hw)
140{
141 i40e_free_dma_mem(hw, &hw->aq.arq_mem);
142 hw->aq.arq_mem.va = NULL;
143 hw->aq.arq_mem.pa = 0;
144}
145
146/**
147 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
148 * @hw: pointer to the hardware structure
149 **/
150static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
151{
152 i40e_status ret_code;
153 struct i40e_aq_desc *desc;
154 struct i40e_virt_mem mem;
155 struct i40e_dma_mem *bi;
156 int i;
157
158 /* We'll be allocating the buffer info memory first, then we can
159 * allocate the mapped buffers for the event processing
160 */
161
162 /* buffer_info structures do not need alignment */
163 ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
164 sizeof(struct i40e_dma_mem)));
165 if (ret_code)
166 goto alloc_arq_bufs;
167 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
168
169 /* allocate the mapped buffers */
170 for (i = 0; i < hw->aq.num_arq_entries; i++) {
171 bi = &hw->aq.arq.r.arq_bi[i];
172 ret_code = i40e_allocate_dma_mem(hw, bi,
173 i40e_mem_arq_buf,
174 hw->aq.arq_buf_size,
175 I40E_ADMINQ_DESC_ALIGNMENT);
176 if (ret_code)
177 goto unwind_alloc_arq_bufs;
178
179 /* now configure the descriptors for use */
180 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
181
182 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
183 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
184 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
185 desc->opcode = 0;
186 /* This is in accordance with Admin queue design, there is no
187 * register for buffer size configuration
188 */
189 desc->datalen = cpu_to_le16((u16)bi->size);
190 desc->retval = 0;
191 desc->cookie_high = 0;
192 desc->cookie_low = 0;
193 desc->params.external.addr_high =
194 cpu_to_le32(upper_32_bits(bi->pa));
195 desc->params.external.addr_low =
196 cpu_to_le32(lower_32_bits(bi->pa));
197 desc->params.external.param0 = 0;
198 desc->params.external.param1 = 0;
199 }
200
201alloc_arq_bufs:
202 return ret_code;
203
204unwind_alloc_arq_bufs:
205 /* don't try to free the one that failed... */
206 i--;
207 for (; i >= 0; i--)
208 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
209 mem.va = hw->aq.arq.r.arq_bi;
210 i40e_free_virt_mem(hw, &mem);
211
212 return ret_code;
213}
214
215/**
216 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
217 * @hw: pointer to the hardware structure
218 **/
219static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
220{
221 i40e_status ret_code;
222 struct i40e_virt_mem mem;
223 struct i40e_dma_mem *bi;
224 int i;
225
226 /* No mapped memory needed yet, just the buffer info structures */
227 ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
228 sizeof(struct i40e_dma_mem)));
229 if (ret_code)
230 goto alloc_asq_bufs;
231 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
232
233 /* allocate the mapped buffers */
234 for (i = 0; i < hw->aq.num_asq_entries; i++) {
235 bi = &hw->aq.asq.r.asq_bi[i];
236 ret_code = i40e_allocate_dma_mem(hw, bi,
237 i40e_mem_asq_buf,
238 hw->aq.asq_buf_size,
239 I40E_ADMINQ_DESC_ALIGNMENT);
240 if (ret_code)
241 goto unwind_alloc_asq_bufs;
242 }
243alloc_asq_bufs:
244 return ret_code;
245
246unwind_alloc_asq_bufs:
247 /* don't try to free the one that failed... */
248 i--;
249 for (; i >= 0; i--)
250 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
251 mem.va = hw->aq.asq.r.asq_bi;
252 i40e_free_virt_mem(hw, &mem);
253
254 return ret_code;
255}
256
257/**
258 * i40e_free_arq_bufs - Free receive queue buffer info elements
259 * @hw: pointer to the hardware structure
260 **/
261static void i40e_free_arq_bufs(struct i40e_hw *hw)
262{
263 struct i40e_virt_mem mem;
264 int i;
265
266 for (i = 0; i < hw->aq.num_arq_entries; i++)
267 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
268
269 mem.va = hw->aq.arq.r.arq_bi;
270 i40e_free_virt_mem(hw, &mem);
271}
272
273/**
274 * i40e_free_asq_bufs - Free send queue buffer info elements
275 * @hw: pointer to the hardware structure
276 **/
277static void i40e_free_asq_bufs(struct i40e_hw *hw)
278{
279 struct i40e_virt_mem mem;
280 int i;
281
282 /* only unmap if the address is non-NULL */
283 for (i = 0; i < hw->aq.num_asq_entries; i++)
284 if (hw->aq.asq.r.asq_bi[i].pa)
285 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
286
287 /* now free the buffer info list */
288 mem.va = hw->aq.asq.r.asq_bi;
289 i40e_free_virt_mem(hw, &mem);
290}
291
292/**
293 * i40e_config_asq_regs - configure ASQ registers
294 * @hw: pointer to the hardware structure
295 *
296 * Configure base address and length registers for the transmit queue
297 **/
298static void i40e_config_asq_regs(struct i40e_hw *hw)
299{
300 if (hw->mac.type == I40E_MAC_VF) {
301 /* configure the transmit queue */
302 wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
303 wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
304 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
305 I40E_VF_ATQLEN1_ATQENABLE_MASK));
306 } else {
307 /* configure the transmit queue */
308 wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
309 wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
310 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
311 I40E_PF_ATQLEN_ATQENABLE_MASK));
312 }
313}
314
315/**
316 * i40e_config_arq_regs - ARQ register configuration
317 * @hw: pointer to the hardware structure
318 *
319 * Configure base address and length registers for the receive (event queue)
320 **/
321static void i40e_config_arq_regs(struct i40e_hw *hw)
322{
323 if (hw->mac.type == I40E_MAC_VF) {
324 /* configure the receive queue */
325 wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
326 wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
327 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
328 I40E_VF_ARQLEN1_ARQENABLE_MASK));
329 } else {
330 /* configure the receive queue */
331 wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
332 wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
333 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
334 I40E_PF_ARQLEN_ARQENABLE_MASK));
335 }
336
337 /* Update tail in the HW to post pre-allocated buffers */
338 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
339}
340
341/**
342 * i40e_init_asq - main initialization routine for ASQ
343 * @hw: pointer to the hardware structure
344 *
345 * This is the main initialization routine for the Admin Send Queue
346 * Prior to calling this function, drivers *MUST* set the following fields
347 * in the hw->aq structure:
348 * - hw->aq.num_asq_entries
349 * - hw->aq.arq_buf_size
350 *
351 * Do *NOT* hold the lock when calling this as the memory allocation routines
352 * called are not going to be atomic context safe
353 **/
354static i40e_status i40e_init_asq(struct i40e_hw *hw)
355{
356 i40e_status ret_code = 0;
357
358 if (hw->aq.asq.count > 0) {
359 /* queue already initialized */
360 ret_code = I40E_ERR_NOT_READY;
361 goto init_adminq_exit;
362 }
363
364 /* verify input for valid configuration */
365 if ((hw->aq.num_asq_entries == 0) ||
366 (hw->aq.asq_buf_size == 0)) {
367 ret_code = I40E_ERR_CONFIG;
368 goto init_adminq_exit;
369 }
370
371 hw->aq.asq.next_to_use = 0;
372 hw->aq.asq.next_to_clean = 0;
373 hw->aq.asq.count = hw->aq.num_asq_entries;
374
375 /* allocate the ring memory */
376 ret_code = i40e_alloc_adminq_asq_ring(hw);
377 if (ret_code)
378 goto init_adminq_exit;
379
380 /* allocate buffers in the rings */
381 ret_code = i40e_alloc_asq_bufs(hw);
382 if (ret_code)
383 goto init_adminq_free_rings;
384
385 /* initialize base registers */
386 i40e_config_asq_regs(hw);
387
388 /* success! */
389 goto init_adminq_exit;
390
391init_adminq_free_rings:
392 i40e_free_adminq_asq(hw);
393
394init_adminq_exit:
395 return ret_code;
396}
397
398/**
399 * i40e_init_arq - initialize ARQ
400 * @hw: pointer to the hardware structure
401 *
402 * The main initialization routine for the Admin Receive (Event) Queue.
403 * Prior to calling this function, drivers *MUST* set the following fields
404 * in the hw->aq structure:
405 * - hw->aq.num_asq_entries
406 * - hw->aq.arq_buf_size
407 *
408 * Do *NOT* hold the lock when calling this as the memory allocation routines
409 * called are not going to be atomic context safe
410 **/
411static i40e_status i40e_init_arq(struct i40e_hw *hw)
412{
413 i40e_status ret_code = 0;
414
415 if (hw->aq.arq.count > 0) {
416 /* queue already initialized */
417 ret_code = I40E_ERR_NOT_READY;
418 goto init_adminq_exit;
419 }
420
421 /* verify input for valid configuration */
422 if ((hw->aq.num_arq_entries == 0) ||
423 (hw->aq.arq_buf_size == 0)) {
424 ret_code = I40E_ERR_CONFIG;
425 goto init_adminq_exit;
426 }
427
428 hw->aq.arq.next_to_use = 0;
429 hw->aq.arq.next_to_clean = 0;
430 hw->aq.arq.count = hw->aq.num_arq_entries;
431
432 /* allocate the ring memory */
433 ret_code = i40e_alloc_adminq_arq_ring(hw);
434 if (ret_code)
435 goto init_adminq_exit;
436
437 /* allocate buffers in the rings */
438 ret_code = i40e_alloc_arq_bufs(hw);
439 if (ret_code)
440 goto init_adminq_free_rings;
441
442 /* initialize base registers */
443 i40e_config_arq_regs(hw);
444
445 /* success! */
446 goto init_adminq_exit;
447
448init_adminq_free_rings:
449 i40e_free_adminq_arq(hw);
450
451init_adminq_exit:
452 return ret_code;
453}
454
455/**
456 * i40e_shutdown_asq - shutdown the ASQ
457 * @hw: pointer to the hardware structure
458 *
459 * The main shutdown routine for the Admin Send Queue
460 **/
461static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
462{
463 i40e_status ret_code = 0;
464
465 if (hw->aq.asq.count == 0)
466 return I40E_ERR_NOT_READY;
467
468 /* Stop firmware AdminQ processing */
469 if (hw->mac.type == I40E_MAC_VF)
470 wr32(hw, I40E_VF_ATQLEN1, 0);
471 else
472 wr32(hw, I40E_PF_ATQLEN, 0);
473
474 /* make sure lock is available */
475 mutex_lock(&hw->aq.asq_mutex);
476
477 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
478
479 /* free ring buffers */
480 i40e_free_asq_bufs(hw);
481 /* free the ring descriptors */
482 i40e_free_adminq_asq(hw);
483
484 mutex_unlock(&hw->aq.asq_mutex);
485
486 return ret_code;
487}
488
489/**
490 * i40e_shutdown_arq - shutdown ARQ
491 * @hw: pointer to the hardware structure
492 *
493 * The main shutdown routine for the Admin Receive Queue
494 **/
495static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
496{
497 i40e_status ret_code = 0;
498
499 if (hw->aq.arq.count == 0)
500 return I40E_ERR_NOT_READY;
501
502 /* Stop firmware AdminQ processing */
503 if (hw->mac.type == I40E_MAC_VF)
504 wr32(hw, I40E_VF_ARQLEN1, 0);
505 else
506 wr32(hw, I40E_PF_ARQLEN, 0);
507
508 /* make sure lock is available */
509 mutex_lock(&hw->aq.arq_mutex);
510
511 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
512
513 /* free ring buffers */
514 i40e_free_arq_bufs(hw);
515 /* free the ring descriptors */
516 i40e_free_adminq_arq(hw);
517
518 mutex_unlock(&hw->aq.arq_mutex);
519
520 return ret_code;
521}
522
523/**
524 * i40e_init_adminq - main initialization routine for Admin Queue
525 * @hw: pointer to the hardware structure
526 *
527 * Prior to calling this function, drivers *MUST* set the following fields
528 * in the hw->aq structure:
529 * - hw->aq.num_asq_entries
530 * - hw->aq.num_arq_entries
531 * - hw->aq.arq_buf_size
532 * - hw->aq.asq_buf_size
533 **/
534i40e_status i40e_init_adminq(struct i40e_hw *hw)
535{
536 u16 eetrack_lo, eetrack_hi;
537 i40e_status ret_code;
538
539 /* verify input for valid configuration */
540 if ((hw->aq.num_arq_entries == 0) ||
541 (hw->aq.num_asq_entries == 0) ||
542 (hw->aq.arq_buf_size == 0) ||
543 (hw->aq.asq_buf_size == 0)) {
544 ret_code = I40E_ERR_CONFIG;
545 goto init_adminq_exit;
546 }
547
548 /* initialize locks */
549 mutex_init(&hw->aq.asq_mutex);
550 mutex_init(&hw->aq.arq_mutex);
551
552 /* Set up register offsets */
553 i40e_adminq_init_regs(hw);
554
555 /* allocate the ASQ */
556 ret_code = i40e_init_asq(hw);
557 if (ret_code)
558 goto init_adminq_destroy_locks;
559
560 /* allocate the ARQ */
561 ret_code = i40e_init_arq(hw);
562 if (ret_code)
563 goto init_adminq_free_asq;
564
565 ret_code = i40e_aq_get_firmware_version(hw,
566 &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver,
567 &hw->aq.api_maj_ver, &hw->aq.api_min_ver,
568 NULL);
569 if (ret_code)
570 goto init_adminq_free_arq;
571
572 if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
573 hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
574 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
575 goto init_adminq_free_arq;
576 }
577 i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
578 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
579 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
580 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
581
582 ret_code = i40e_aq_set_hmc_resource_profile(hw,
583 I40E_HMC_PROFILE_DEFAULT,
584 0,
585 NULL);
586 ret_code = 0;
587
588 /* success! */
589 goto init_adminq_exit;
590
591init_adminq_free_arq:
592 i40e_shutdown_arq(hw);
593init_adminq_free_asq:
594 i40e_shutdown_asq(hw);
595init_adminq_destroy_locks:
596
597init_adminq_exit:
598 return ret_code;
599}
600
601/**
602 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
603 * @hw: pointer to the hardware structure
604 **/
605i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
606{
607 i40e_status ret_code = 0;
608
609 i40e_shutdown_asq(hw);
610 i40e_shutdown_arq(hw);
611
612 /* destroy the locks */
613
614 return ret_code;
615}
616
617/**
618 * i40e_clean_asq - cleans Admin send queue
619 * @asq: pointer to the adminq send ring
620 *
621 * returns the number of free desc
622 **/
623static u16 i40e_clean_asq(struct i40e_hw *hw)
624{
625 struct i40e_adminq_ring *asq = &(hw->aq.asq);
626 struct i40e_asq_cmd_details *details;
627 u16 ntc = asq->next_to_clean;
628 struct i40e_aq_desc desc_cb;
629 struct i40e_aq_desc *desc;
630
631 desc = I40E_ADMINQ_DESC(*asq, ntc);
632 details = I40E_ADMINQ_DETAILS(*asq, ntc);
633 while (rd32(hw, hw->aq.asq.head) != ntc) {
634 if (details->callback) {
635 I40E_ADMINQ_CALLBACK cb_func =
636 (I40E_ADMINQ_CALLBACK)details->callback;
637 desc_cb = *desc;
638 cb_func(hw, &desc_cb);
639 }
640 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
641 memset((void *)details, 0,
642 sizeof(struct i40e_asq_cmd_details));
643 ntc++;
644 if (ntc == asq->count)
645 ntc = 0;
646 desc = I40E_ADMINQ_DESC(*asq, ntc);
647 details = I40E_ADMINQ_DETAILS(*asq, ntc);
648 }
649
650 asq->next_to_clean = ntc;
651
652 return I40E_DESC_UNUSED(asq);
653}
654
655/**
656 * i40e_asq_done - check if FW has processed the Admin Send Queue
657 * @hw: pointer to the hw struct
658 *
659 * Returns true if the firmware has processed all descriptors on the
660 * admin send queue. Returns false if there are still requests pending.
661 **/
662bool i40e_asq_done(struct i40e_hw *hw)
663{
664 /* AQ designers suggest use of head for better
665 * timing reliability than DD bit
666 */
667 return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
668
669}
670
671/**
672 * i40e_asq_send_command - send command to Admin Queue
673 * @hw: pointer to the hw struct
674 * @desc: prefilled descriptor describing the command (non DMA mem)
675 * @buff: buffer to use for indirect commands
676 * @buff_size: size of buffer for indirect commands
677 * @opaque: pointer to info to be used in async cleanup
678 *
679 * This is the main send command driver routine for the Admin Queue send
680 * queue. It runs the queue, cleans the queue, etc
681 **/
682i40e_status i40e_asq_send_command(struct i40e_hw *hw,
683 struct i40e_aq_desc *desc,
684 void *buff, /* can be NULL */
685 u16 buff_size,
686 struct i40e_asq_cmd_details *cmd_details)
687{
688 i40e_status status = 0;
689 struct i40e_dma_mem *dma_buff = NULL;
690 struct i40e_asq_cmd_details *details;
691 struct i40e_aq_desc *desc_on_ring;
692 bool cmd_completed = false;
693 u16 retval = 0;
694
695 if (hw->aq.asq.count == 0) {
696 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
697 "AQTX: Admin queue not initialized.\n");
698 status = I40E_ERR_QUEUE_EMPTY;
699 goto asq_send_command_exit;
700 }
701
702 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
703 if (cmd_details) {
704 memcpy(details, cmd_details,
705 sizeof(struct i40e_asq_cmd_details));
706
707 /* If the cmd_details are defined copy the cookie. The
708 * cpu_to_le32 is not needed here because the data is ignored
709 * by the FW, only used by the driver
710 */
711 if (details->cookie) {
712 desc->cookie_high =
713 cpu_to_le32(upper_32_bits(details->cookie));
714 desc->cookie_low =
715 cpu_to_le32(lower_32_bits(details->cookie));
716 }
717 } else {
718 memset(details, 0, sizeof(struct i40e_asq_cmd_details));
719 }
720
721 /* clear requested flags and then set additional flags if defined */
722 desc->flags &= ~cpu_to_le16(details->flags_dis);
723 desc->flags |= cpu_to_le16(details->flags_ena);
724
725 mutex_lock(&hw->aq.asq_mutex);
726
727 if (buff_size > hw->aq.asq_buf_size) {
728 i40e_debug(hw,
729 I40E_DEBUG_AQ_MESSAGE,
730 "AQTX: Invalid buffer size: %d.\n",
731 buff_size);
732 status = I40E_ERR_INVALID_SIZE;
733 goto asq_send_command_error;
734 }
735
736 if (details->postpone && !details->async) {
737 i40e_debug(hw,
738 I40E_DEBUG_AQ_MESSAGE,
739 "AQTX: Async flag not set along with postpone flag");
740 status = I40E_ERR_PARAM;
741 goto asq_send_command_error;
742 }
743
744 /* call clean and check queue available function to reclaim the
745 * descriptors that were processed by FW, the function returns the
746 * number of desc available
747 */
748 /* the clean function called here could be called in a separate thread
749 * in case of asynchronous completions
750 */
751 if (i40e_clean_asq(hw) == 0) {
752 i40e_debug(hw,
753 I40E_DEBUG_AQ_MESSAGE,
754 "AQTX: Error queue is full.\n");
755 status = I40E_ERR_ADMIN_QUEUE_FULL;
756 goto asq_send_command_error;
757 }
758
759 /* initialize the temp desc pointer with the right desc */
760 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
761
762 /* if the desc is available copy the temp desc to the right place */
763 memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc));
764
765 /* if buff is not NULL assume indirect command */
766 if (buff != NULL) {
767 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
768 /* copy the user buff into the respective DMA buff */
769 memcpy(dma_buff->va, buff, buff_size);
770 desc_on_ring->datalen = cpu_to_le16(buff_size);
771
772 /* Update the address values in the desc with the pa value
773 * for respective buffer
774 */
775 desc_on_ring->params.external.addr_high =
776 cpu_to_le32(upper_32_bits(dma_buff->pa));
777 desc_on_ring->params.external.addr_low =
778 cpu_to_le32(lower_32_bits(dma_buff->pa));
779 }
780
781 /* bump the tail */
782 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
783 (hw->aq.asq.next_to_use)++;
784 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
785 hw->aq.asq.next_to_use = 0;
786 if (!details->postpone)
787 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
788
789 /* if cmd_details are not defined or async flag is not set,
790 * we need to wait for desc write back
791 */
792 if (!details->async && !details->postpone) {
793 u32 total_delay = 0;
794 u32 delay_len = 10;
795
796 do {
797 /* AQ designers suggest use of head for better
798 * timing reliability than DD bit
799 */
800 if (i40e_asq_done(hw))
801 break;
802 /* ugh! delay while spin_lock */
803 udelay(delay_len);
804 total_delay += delay_len;
805 } while (total_delay < I40E_ASQ_CMD_TIMEOUT);
806 }
807
808 /* if ready, copy the desc back to temp */
809 if (i40e_asq_done(hw)) {
810 memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc));
811 if (buff != NULL)
812 memcpy(buff, dma_buff->va, buff_size);
813 retval = le16_to_cpu(desc->retval);
814 if (retval != 0) {
815 i40e_debug(hw,
816 I40E_DEBUG_AQ_MESSAGE,
817 "AQTX: Command completed with error 0x%X.\n",
818 retval);
819 /* strip off FW internal code */
820 retval &= 0xff;
821 }
822 cmd_completed = true;
823 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
824 status = 0;
825 else
826 status = I40E_ERR_ADMIN_QUEUE_ERROR;
827 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
828 }
829
830 /* update the error if time out occurred */
831 if ((!cmd_completed) &&
832 (!details->async && !details->postpone)) {
833 i40e_debug(hw,
834 I40E_DEBUG_AQ_MESSAGE,
835 "AQTX: Writeback timeout.\n");
836 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
837 }
838
839asq_send_command_error:
840 mutex_unlock(&hw->aq.asq_mutex);
841asq_send_command_exit:
842 return status;
843}
844
845/**
846 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
847 * @desc: pointer to the temp descriptor (non DMA mem)
848 * @opcode: the opcode can be used to decide which flags to turn off or on
849 *
850 * Fill the desc with default values
851 **/
852void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
853 u16 opcode)
854{
855 /* zero out the desc */
856 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
857 desc->opcode = cpu_to_le16(opcode);
858 desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
859}
860
861/**
862 * i40e_clean_arq_element
863 * @hw: pointer to the hw struct
864 * @e: event info from the receive descriptor, includes any buffers
865 * @pending: number of events that could be left to process
866 *
867 * This function cleans one Admin Receive Queue element and returns
868 * the contents through e. It can also return how many events are
869 * left to process through 'pending'
870 **/
871i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
872 struct i40e_arq_event_info *e,
873 u16 *pending)
874{
875 i40e_status ret_code = 0;
876 u16 ntc = hw->aq.arq.next_to_clean;
877 struct i40e_aq_desc *desc;
878 struct i40e_dma_mem *bi;
879 u16 desc_idx;
880 u16 datalen;
881 u16 flags;
882 u16 ntu;
883
884 /* take the lock before we start messing with the ring */
885 mutex_lock(&hw->aq.arq_mutex);
886
887 /* set next_to_use to head */
888 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
889 if (ntu == ntc) {
890 /* nothing to do - shouldn't need to update ring's values */
891 i40e_debug(hw,
892 I40E_DEBUG_AQ_MESSAGE,
893 "AQRX: Queue is empty.\n");
894 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
895 goto clean_arq_element_out;
896 }
897
898 /* now clean the next descriptor */
899 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
900 desc_idx = ntc;
901 i40e_debug_aq(hw,
902 I40E_DEBUG_AQ_COMMAND,
903 (void *)desc,
904 hw->aq.arq.r.arq_bi[desc_idx].va);
905
906 flags = le16_to_cpu(desc->flags);
907 if (flags & I40E_AQ_FLAG_ERR) {
908 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
909 hw->aq.arq_last_status =
910 (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
911 i40e_debug(hw,
912 I40E_DEBUG_AQ_MESSAGE,
913 "AQRX: Event received with error 0x%X.\n",
914 hw->aq.arq_last_status);
915 } else {
916 memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc));
917 datalen = le16_to_cpu(desc->datalen);
918 e->msg_size = min(datalen, e->msg_size);
919 if (e->msg_buf != NULL && (e->msg_size != 0))
920 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
921 e->msg_size);
922 }
923
924 /* Restore the original datalen and buffer address in the desc,
925 * FW updates datalen to indicate the event message
926 * size
927 */
928 bi = &hw->aq.arq.r.arq_bi[ntc];
929 desc->datalen = cpu_to_le16((u16)bi->size);
930 desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
931 desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
932
933 /* set tail = the last cleaned desc index. */
934 wr32(hw, hw->aq.arq.tail, ntc);
935 /* ntc is updated to tail + 1 */
936 ntc++;
937 if (ntc == hw->aq.num_arq_entries)
938 ntc = 0;
939 hw->aq.arq.next_to_clean = ntc;
940 hw->aq.arq.next_to_use = ntu;
941
942clean_arq_element_out:
943 /* Set pending if needed, unlock and return */
944 if (pending != NULL)
945 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
946 mutex_unlock(&hw->aq.arq_mutex);
947
948 return ret_code;
949}
950
951void i40e_resume_aq(struct i40e_hw *hw)
952{
953 u32 reg = 0;
954
955 /* Registers are reset after PF reset */
956 hw->aq.asq.next_to_use = 0;
957 hw->aq.asq.next_to_clean = 0;
958
959 i40e_config_asq_regs(hw);
960 reg = hw->aq.num_asq_entries;
961
962 if (hw->mac.type == I40E_MAC_VF) {
963 reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
964 wr32(hw, I40E_VF_ATQLEN1, reg);
965 } else {
966 reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
967 wr32(hw, I40E_PF_ATQLEN, reg);
968 }
969
970 hw->aq.arq.next_to_use = 0;
971 hw->aq.arq.next_to_clean = 0;
972
973 i40e_config_arq_regs(hw);
974 reg = hw->aq.num_arq_entries;
975
976 if (hw->mac.type == I40E_MAC_VF) {
977 reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
978 wr32(hw, I40E_VF_ARQLEN1, reg);
979 } else {
980 reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
981 wr32(hw, I40E_PF_ARQLEN, reg);
982 }
983}