blob: c79df257f830d7791580b81ae68028d8ee18d65e [file] [log] [blame]
Greg Rosed358aa92013-12-21 06:13:11 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Contact Information:
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 *
22 ******************************************************************************/
23
24#include "i40e_status.h"
25#include "i40e_type.h"
26#include "i40e_register.h"
27#include "i40e_adminq.h"
28#include "i40e_prototype.h"
29
30/**
Shannon Nelsonc9296ad2014-03-14 07:32:22 +000031 * i40e_is_nvm_update_op - return true if this is an NVM update operation
32 * @desc: API request descriptor
33 **/
34static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
35{
36 return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
37 (desc->opcode == i40e_aqc_opc_nvm_update);
38}
39
40/**
Greg Rosed358aa92013-12-21 06:13:11 +000041 * i40e_adminq_init_regs - Initialize AdminQ registers
42 * @hw: pointer to the hardware structure
43 *
44 * This assumes the alloc_asq and alloc_arq functions have already been called
45 **/
46static void i40e_adminq_init_regs(struct i40e_hw *hw)
47{
48 /* set head and tail registers in our local struct */
49 if (hw->mac.type == I40E_MAC_VF) {
50 hw->aq.asq.tail = I40E_VF_ATQT1;
51 hw->aq.asq.head = I40E_VF_ATQH1;
52 hw->aq.asq.len = I40E_VF_ATQLEN1;
53 hw->aq.arq.tail = I40E_VF_ARQT1;
54 hw->aq.arq.head = I40E_VF_ARQH1;
55 hw->aq.arq.len = I40E_VF_ARQLEN1;
56 } else {
57 hw->aq.asq.tail = I40E_PF_ATQT;
58 hw->aq.asq.head = I40E_PF_ATQH;
59 hw->aq.asq.len = I40E_PF_ATQLEN;
60 hw->aq.arq.tail = I40E_PF_ARQT;
61 hw->aq.arq.head = I40E_PF_ARQH;
62 hw->aq.arq.len = I40E_PF_ARQLEN;
63 }
64}
65
66/**
67 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
68 * @hw: pointer to the hardware structure
69 **/
70static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
71{
72 i40e_status ret_code;
73
74 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
75 i40e_mem_atq_ring,
76 (hw->aq.num_asq_entries *
77 sizeof(struct i40e_aq_desc)),
78 I40E_ADMINQ_DESC_ALIGNMENT);
79 if (ret_code)
80 return ret_code;
81
82 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
83 (hw->aq.num_asq_entries *
84 sizeof(struct i40e_asq_cmd_details)));
85 if (ret_code) {
86 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
87 return ret_code;
88 }
89
90 return ret_code;
91}
92
93/**
94 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
95 * @hw: pointer to the hardware structure
96 **/
97static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
98{
99 i40e_status ret_code;
100
101 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
102 i40e_mem_arq_ring,
103 (hw->aq.num_arq_entries *
104 sizeof(struct i40e_aq_desc)),
105 I40E_ADMINQ_DESC_ALIGNMENT);
106
107 return ret_code;
108}
109
110/**
111 * i40e_free_adminq_asq - Free Admin Queue send rings
112 * @hw: pointer to the hardware structure
113 *
114 * This assumes the posted send buffers have already been cleaned
115 * and de-allocated
116 **/
117static void i40e_free_adminq_asq(struct i40e_hw *hw)
118{
119 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
120}
121
122/**
123 * i40e_free_adminq_arq - Free Admin Queue receive rings
124 * @hw: pointer to the hardware structure
125 *
126 * This assumes the posted receive buffers have already been cleaned
127 * and de-allocated
128 **/
129static void i40e_free_adminq_arq(struct i40e_hw *hw)
130{
131 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
132}
133
134/**
135 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
136 * @hw: pointer to the hardware structure
137 **/
138static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
139{
140 i40e_status ret_code;
141 struct i40e_aq_desc *desc;
142 struct i40e_dma_mem *bi;
143 int i;
144
145 /* We'll be allocating the buffer info memory first, then we can
146 * allocate the mapped buffers for the event processing
147 */
148
149 /* buffer_info structures do not need alignment */
150 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
151 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
152 if (ret_code)
153 goto alloc_arq_bufs;
154 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
155
156 /* allocate the mapped buffers */
157 for (i = 0; i < hw->aq.num_arq_entries; i++) {
158 bi = &hw->aq.arq.r.arq_bi[i];
159 ret_code = i40e_allocate_dma_mem(hw, bi,
160 i40e_mem_arq_buf,
161 hw->aq.arq_buf_size,
162 I40E_ADMINQ_DESC_ALIGNMENT);
163 if (ret_code)
164 goto unwind_alloc_arq_bufs;
165
166 /* now configure the descriptors for use */
167 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
168
169 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
170 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
171 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
172 desc->opcode = 0;
173 /* This is in accordance with Admin queue design, there is no
174 * register for buffer size configuration
175 */
176 desc->datalen = cpu_to_le16((u16)bi->size);
177 desc->retval = 0;
178 desc->cookie_high = 0;
179 desc->cookie_low = 0;
180 desc->params.external.addr_high =
181 cpu_to_le32(upper_32_bits(bi->pa));
182 desc->params.external.addr_low =
183 cpu_to_le32(lower_32_bits(bi->pa));
184 desc->params.external.param0 = 0;
185 desc->params.external.param1 = 0;
186 }
187
188alloc_arq_bufs:
189 return ret_code;
190
191unwind_alloc_arq_bufs:
192 /* don't try to free the one that failed... */
193 i--;
194 for (; i >= 0; i--)
195 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
196 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
197
198 return ret_code;
199}
200
201/**
202 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
203 * @hw: pointer to the hardware structure
204 **/
205static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
206{
207 i40e_status ret_code;
208 struct i40e_dma_mem *bi;
209 int i;
210
211 /* No mapped memory needed yet, just the buffer info structures */
212 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
213 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
214 if (ret_code)
215 goto alloc_asq_bufs;
216 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
217
218 /* allocate the mapped buffers */
219 for (i = 0; i < hw->aq.num_asq_entries; i++) {
220 bi = &hw->aq.asq.r.asq_bi[i];
221 ret_code = i40e_allocate_dma_mem(hw, bi,
222 i40e_mem_asq_buf,
223 hw->aq.asq_buf_size,
224 I40E_ADMINQ_DESC_ALIGNMENT);
225 if (ret_code)
226 goto unwind_alloc_asq_bufs;
227 }
228alloc_asq_bufs:
229 return ret_code;
230
231unwind_alloc_asq_bufs:
232 /* don't try to free the one that failed... */
233 i--;
234 for (; i >= 0; i--)
235 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
236 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
237
238 return ret_code;
239}
240
241/**
242 * i40e_free_arq_bufs - Free receive queue buffer info elements
243 * @hw: pointer to the hardware structure
244 **/
245static void i40e_free_arq_bufs(struct i40e_hw *hw)
246{
247 int i;
248
249 /* free descriptors */
250 for (i = 0; i < hw->aq.num_arq_entries; i++)
251 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
252
253 /* free the descriptor memory */
254 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
255
256 /* free the dma header */
257 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
258}
259
260/**
261 * i40e_free_asq_bufs - Free send queue buffer info elements
262 * @hw: pointer to the hardware structure
263 **/
264static void i40e_free_asq_bufs(struct i40e_hw *hw)
265{
266 int i;
267
268 /* only unmap if the address is non-NULL */
269 for (i = 0; i < hw->aq.num_asq_entries; i++)
270 if (hw->aq.asq.r.asq_bi[i].pa)
271 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
272
273 /* free the buffer info list */
274 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
275
276 /* free the descriptor memory */
277 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
278
279 /* free the dma header */
280 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
281}
282
283/**
284 * i40e_config_asq_regs - configure ASQ registers
285 * @hw: pointer to the hardware structure
286 *
287 * Configure base address and length registers for the transmit queue
288 **/
289static void i40e_config_asq_regs(struct i40e_hw *hw)
290{
291 if (hw->mac.type == I40E_MAC_VF) {
292 /* configure the transmit queue */
293 wr32(hw, I40E_VF_ATQBAH1,
294 upper_32_bits(hw->aq.asq.desc_buf.pa));
295 wr32(hw, I40E_VF_ATQBAL1,
296 lower_32_bits(hw->aq.asq.desc_buf.pa));
297 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
298 I40E_VF_ATQLEN1_ATQENABLE_MASK));
299 } else {
300 /* configure the transmit queue */
301 wr32(hw, I40E_PF_ATQBAH,
302 upper_32_bits(hw->aq.asq.desc_buf.pa));
303 wr32(hw, I40E_PF_ATQBAL,
304 lower_32_bits(hw->aq.asq.desc_buf.pa));
305 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
306 I40E_PF_ATQLEN_ATQENABLE_MASK));
307 }
308}
309
310/**
311 * i40e_config_arq_regs - ARQ register configuration
312 * @hw: pointer to the hardware structure
313 *
314 * Configure base address and length registers for the receive (event queue)
315 **/
316static void i40e_config_arq_regs(struct i40e_hw *hw)
317{
318 if (hw->mac.type == I40E_MAC_VF) {
319 /* configure the receive queue */
320 wr32(hw, I40E_VF_ARQBAH1,
321 upper_32_bits(hw->aq.arq.desc_buf.pa));
322 wr32(hw, I40E_VF_ARQBAL1,
323 lower_32_bits(hw->aq.arq.desc_buf.pa));
324 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
325 I40E_VF_ARQLEN1_ARQENABLE_MASK));
326 } else {
327 /* configure the receive queue */
328 wr32(hw, I40E_PF_ARQBAH,
329 upper_32_bits(hw->aq.arq.desc_buf.pa));
330 wr32(hw, I40E_PF_ARQBAL,
331 lower_32_bits(hw->aq.arq.desc_buf.pa));
332 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
333 I40E_PF_ARQLEN_ARQENABLE_MASK));
334 }
335
336 /* Update tail in the HW to post pre-allocated buffers */
337 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
338}
339
340/**
341 * i40e_init_asq - main initialization routine for ASQ
342 * @hw: pointer to the hardware structure
343 *
344 * This is the main initialization routine for the Admin Send Queue
345 * Prior to calling this function, drivers *MUST* set the following fields
346 * in the hw->aq structure:
347 * - hw->aq.num_asq_entries
348 * - hw->aq.arq_buf_size
349 *
350 * Do *NOT* hold the lock when calling this as the memory allocation routines
351 * called are not going to be atomic context safe
352 **/
353static i40e_status i40e_init_asq(struct i40e_hw *hw)
354{
355 i40e_status ret_code = 0;
356
357 if (hw->aq.asq.count > 0) {
358 /* queue already initialized */
359 ret_code = I40E_ERR_NOT_READY;
360 goto init_adminq_exit;
361 }
362
363 /* verify input for valid configuration */
364 if ((hw->aq.num_asq_entries == 0) ||
365 (hw->aq.asq_buf_size == 0)) {
366 ret_code = I40E_ERR_CONFIG;
367 goto init_adminq_exit;
368 }
369
370 hw->aq.asq.next_to_use = 0;
371 hw->aq.asq.next_to_clean = 0;
372 hw->aq.asq.count = hw->aq.num_asq_entries;
373
374 /* allocate the ring memory */
375 ret_code = i40e_alloc_adminq_asq_ring(hw);
376 if (ret_code)
377 goto init_adminq_exit;
378
379 /* allocate buffers in the rings */
380 ret_code = i40e_alloc_asq_bufs(hw);
381 if (ret_code)
382 goto init_adminq_free_rings;
383
384 /* initialize base registers */
385 i40e_config_asq_regs(hw);
386
387 /* success! */
388 goto init_adminq_exit;
389
390init_adminq_free_rings:
391 i40e_free_adminq_asq(hw);
392
393init_adminq_exit:
394 return ret_code;
395}
396
397/**
398 * i40e_init_arq - initialize ARQ
399 * @hw: pointer to the hardware structure
400 *
401 * The main initialization routine for the Admin Receive (Event) Queue.
402 * Prior to calling this function, drivers *MUST* set the following fields
403 * in the hw->aq structure:
404 * - hw->aq.num_asq_entries
405 * - hw->aq.arq_buf_size
406 *
407 * Do *NOT* hold the lock when calling this as the memory allocation routines
408 * called are not going to be atomic context safe
409 **/
410static i40e_status i40e_init_arq(struct i40e_hw *hw)
411{
412 i40e_status ret_code = 0;
413
414 if (hw->aq.arq.count > 0) {
415 /* queue already initialized */
416 ret_code = I40E_ERR_NOT_READY;
417 goto init_adminq_exit;
418 }
419
420 /* verify input for valid configuration */
421 if ((hw->aq.num_arq_entries == 0) ||
422 (hw->aq.arq_buf_size == 0)) {
423 ret_code = I40E_ERR_CONFIG;
424 goto init_adminq_exit;
425 }
426
427 hw->aq.arq.next_to_use = 0;
428 hw->aq.arq.next_to_clean = 0;
429 hw->aq.arq.count = hw->aq.num_arq_entries;
430
431 /* allocate the ring memory */
432 ret_code = i40e_alloc_adminq_arq_ring(hw);
433 if (ret_code)
434 goto init_adminq_exit;
435
436 /* allocate buffers in the rings */
437 ret_code = i40e_alloc_arq_bufs(hw);
438 if (ret_code)
439 goto init_adminq_free_rings;
440
441 /* initialize base registers */
442 i40e_config_arq_regs(hw);
443
444 /* success! */
445 goto init_adminq_exit;
446
447init_adminq_free_rings:
448 i40e_free_adminq_arq(hw);
449
450init_adminq_exit:
451 return ret_code;
452}
453
454/**
455 * i40e_shutdown_asq - shutdown the ASQ
456 * @hw: pointer to the hardware structure
457 *
458 * The main shutdown routine for the Admin Send Queue
459 **/
460static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
461{
462 i40e_status ret_code = 0;
463
464 if (hw->aq.asq.count == 0)
465 return I40E_ERR_NOT_READY;
466
467 /* Stop firmware AdminQ processing */
468 wr32(hw, hw->aq.asq.head, 0);
469 wr32(hw, hw->aq.asq.tail, 0);
470 wr32(hw, hw->aq.asq.len, 0);
471
472 /* make sure lock is available */
473 mutex_lock(&hw->aq.asq_mutex);
474
475 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
476
477 /* free ring buffers */
478 i40e_free_asq_bufs(hw);
479
480 mutex_unlock(&hw->aq.asq_mutex);
481
482 return ret_code;
483}
484
485/**
486 * i40e_shutdown_arq - shutdown ARQ
487 * @hw: pointer to the hardware structure
488 *
489 * The main shutdown routine for the Admin Receive Queue
490 **/
491static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
492{
493 i40e_status ret_code = 0;
494
495 if (hw->aq.arq.count == 0)
496 return I40E_ERR_NOT_READY;
497
498 /* Stop firmware AdminQ processing */
499 wr32(hw, hw->aq.arq.head, 0);
500 wr32(hw, hw->aq.arq.tail, 0);
501 wr32(hw, hw->aq.arq.len, 0);
502
503 /* make sure lock is available */
504 mutex_lock(&hw->aq.arq_mutex);
505
506 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
507
508 /* free ring buffers */
509 i40e_free_arq_bufs(hw);
510
511 mutex_unlock(&hw->aq.arq_mutex);
512
513 return ret_code;
514}
515
516/**
517 * i40evf_init_adminq - main initialization routine for Admin Queue
518 * @hw: pointer to the hardware structure
519 *
520 * Prior to calling this function, drivers *MUST* set the following fields
521 * in the hw->aq structure:
522 * - hw->aq.num_asq_entries
523 * - hw->aq.num_arq_entries
524 * - hw->aq.arq_buf_size
525 * - hw->aq.asq_buf_size
526 **/
527i40e_status i40evf_init_adminq(struct i40e_hw *hw)
528{
529 i40e_status ret_code;
530
531 /* verify input for valid configuration */
532 if ((hw->aq.num_arq_entries == 0) ||
533 (hw->aq.num_asq_entries == 0) ||
534 (hw->aq.arq_buf_size == 0) ||
535 (hw->aq.asq_buf_size == 0)) {
536 ret_code = I40E_ERR_CONFIG;
537 goto init_adminq_exit;
538 }
539
540 /* initialize locks */
541 mutex_init(&hw->aq.asq_mutex);
542 mutex_init(&hw->aq.arq_mutex);
543
544 /* Set up register offsets */
545 i40e_adminq_init_regs(hw);
546
547 /* allocate the ASQ */
548 ret_code = i40e_init_asq(hw);
549 if (ret_code)
550 goto init_adminq_destroy_locks;
551
552 /* allocate the ARQ */
553 ret_code = i40e_init_arq(hw);
554 if (ret_code)
555 goto init_adminq_free_asq;
556
557 /* success! */
558 goto init_adminq_exit;
559
560init_adminq_free_asq:
561 i40e_shutdown_asq(hw);
562init_adminq_destroy_locks:
563
564init_adminq_exit:
565 return ret_code;
566}
567
568/**
569 * i40evf_shutdown_adminq - shutdown routine for the Admin Queue
570 * @hw: pointer to the hardware structure
571 **/
572i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
573{
574 i40e_status ret_code = 0;
575
576 if (i40evf_check_asq_alive(hw))
577 i40evf_aq_queue_shutdown(hw, true);
578
579 i40e_shutdown_asq(hw);
580 i40e_shutdown_arq(hw);
581
582 /* destroy the locks */
583
584 return ret_code;
585}
586
587/**
588 * i40e_clean_asq - cleans Admin send queue
589 * @hw: pointer to the hardware structure
590 *
591 * returns the number of free desc
592 **/
593static u16 i40e_clean_asq(struct i40e_hw *hw)
594{
595 struct i40e_adminq_ring *asq = &(hw->aq.asq);
596 struct i40e_asq_cmd_details *details;
597 u16 ntc = asq->next_to_clean;
598 struct i40e_aq_desc desc_cb;
599 struct i40e_aq_desc *desc;
600
601 desc = I40E_ADMINQ_DESC(*asq, ntc);
602 details = I40E_ADMINQ_DETAILS(*asq, ntc);
603 while (rd32(hw, hw->aq.asq.head) != ntc) {
604 if (details->callback) {
605 I40E_ADMINQ_CALLBACK cb_func =
606 (I40E_ADMINQ_CALLBACK)details->callback;
607 desc_cb = *desc;
608 cb_func(hw, &desc_cb);
609 }
610 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
611 memset((void *)details, 0,
612 sizeof(struct i40e_asq_cmd_details));
613 ntc++;
614 if (ntc == asq->count)
615 ntc = 0;
616 desc = I40E_ADMINQ_DESC(*asq, ntc);
617 details = I40E_ADMINQ_DETAILS(*asq, ntc);
618 }
619
620 asq->next_to_clean = ntc;
621
622 return I40E_DESC_UNUSED(asq);
623}
624
625/**
626 * i40evf_asq_done - check if FW has processed the Admin Send Queue
627 * @hw: pointer to the hw struct
628 *
629 * Returns true if the firmware has processed all descriptors on the
630 * admin send queue. Returns false if there are still requests pending.
631 **/
632bool i40evf_asq_done(struct i40e_hw *hw)
633{
634 /* AQ designers suggest use of head for better
635 * timing reliability than DD bit
636 */
637 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
638
639}
640
641/**
642 * i40evf_asq_send_command - send command to Admin Queue
643 * @hw: pointer to the hw struct
644 * @desc: prefilled descriptor describing the command (non DMA mem)
645 * @buff: buffer to use for indirect commands
646 * @buff_size: size of buffer for indirect commands
647 * @cmd_details: pointer to command details structure
648 *
649 * This is the main send command driver routine for the Admin Queue send
650 * queue. It runs the queue, cleans the queue, etc
651 **/
652i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
653 struct i40e_aq_desc *desc,
654 void *buff, /* can be NULL */
655 u16 buff_size,
656 struct i40e_asq_cmd_details *cmd_details)
657{
658 i40e_status status = 0;
659 struct i40e_dma_mem *dma_buff = NULL;
660 struct i40e_asq_cmd_details *details;
661 struct i40e_aq_desc *desc_on_ring;
662 bool cmd_completed = false;
663 u16 retval = 0;
664
665 if (hw->aq.asq.count == 0) {
666 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
667 "AQTX: Admin queue not initialized.\n");
668 status = I40E_ERR_QUEUE_EMPTY;
669 goto asq_send_command_exit;
670 }
671
Shannon Nelsonc9296ad2014-03-14 07:32:22 +0000672 if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
673 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
674 status = I40E_ERR_NVM;
675 goto asq_send_command_exit;
676 }
677
Greg Rosed358aa92013-12-21 06:13:11 +0000678 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
679 if (cmd_details) {
680 *details = *cmd_details;
681
682 /* If the cmd_details are defined copy the cookie. The
683 * cpu_to_le32 is not needed here because the data is ignored
684 * by the FW, only used by the driver
685 */
686 if (details->cookie) {
687 desc->cookie_high =
688 cpu_to_le32(upper_32_bits(details->cookie));
689 desc->cookie_low =
690 cpu_to_le32(lower_32_bits(details->cookie));
691 }
692 } else {
693 memset(details, 0, sizeof(struct i40e_asq_cmd_details));
694 }
695
696 /* clear requested flags and then set additional flags if defined */
697 desc->flags &= ~cpu_to_le16(details->flags_dis);
698 desc->flags |= cpu_to_le16(details->flags_ena);
699
700 mutex_lock(&hw->aq.asq_mutex);
701
702 if (buff_size > hw->aq.asq_buf_size) {
703 i40e_debug(hw,
704 I40E_DEBUG_AQ_MESSAGE,
705 "AQTX: Invalid buffer size: %d.\n",
706 buff_size);
707 status = I40E_ERR_INVALID_SIZE;
708 goto asq_send_command_error;
709 }
710
711 if (details->postpone && !details->async) {
712 i40e_debug(hw,
713 I40E_DEBUG_AQ_MESSAGE,
714 "AQTX: Async flag not set along with postpone flag");
715 status = I40E_ERR_PARAM;
716 goto asq_send_command_error;
717 }
718
719 /* call clean and check queue available function to reclaim the
720 * descriptors that were processed by FW, the function returns the
721 * number of desc available
722 */
723 /* the clean function called here could be called in a separate thread
724 * in case of asynchronous completions
725 */
726 if (i40e_clean_asq(hw) == 0) {
727 i40e_debug(hw,
728 I40E_DEBUG_AQ_MESSAGE,
729 "AQTX: Error queue is full.\n");
730 status = I40E_ERR_ADMIN_QUEUE_FULL;
731 goto asq_send_command_error;
732 }
733
734 /* initialize the temp desc pointer with the right desc */
735 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
736
737 /* if the desc is available copy the temp desc to the right place */
738 *desc_on_ring = *desc;
739
740 /* if buff is not NULL assume indirect command */
741 if (buff != NULL) {
742 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
743 /* copy the user buff into the respective DMA buff */
744 memcpy(dma_buff->va, buff, buff_size);
745 desc_on_ring->datalen = cpu_to_le16(buff_size);
746
747 /* Update the address values in the desc with the pa value
748 * for respective buffer
749 */
750 desc_on_ring->params.external.addr_high =
751 cpu_to_le32(upper_32_bits(dma_buff->pa));
752 desc_on_ring->params.external.addr_low =
753 cpu_to_le32(lower_32_bits(dma_buff->pa));
754 }
755
756 /* bump the tail */
757 i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
758 (hw->aq.asq.next_to_use)++;
759 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
760 hw->aq.asq.next_to_use = 0;
761 if (!details->postpone)
762 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
763
764 /* if cmd_details are not defined or async flag is not set,
765 * we need to wait for desc write back
766 */
767 if (!details->async && !details->postpone) {
768 u32 total_delay = 0;
769 u32 delay_len = 10;
770
771 do {
772 /* AQ designers suggest use of head for better
773 * timing reliability than DD bit
774 */
775 if (i40evf_asq_done(hw))
776 break;
777 /* ugh! delay while spin_lock */
778 udelay(delay_len);
779 total_delay += delay_len;
780 } while (total_delay < I40E_ASQ_CMD_TIMEOUT);
781 }
782
783 /* if ready, copy the desc back to temp */
784 if (i40evf_asq_done(hw)) {
785 *desc = *desc_on_ring;
786 if (buff != NULL)
787 memcpy(buff, dma_buff->va, buff_size);
788 retval = le16_to_cpu(desc->retval);
789 if (retval != 0) {
790 i40e_debug(hw,
791 I40E_DEBUG_AQ_MESSAGE,
792 "AQTX: Command completed with error 0x%X.\n",
793 retval);
794 /* strip off FW internal code */
795 retval &= 0xff;
796 }
797 cmd_completed = true;
798 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
799 status = 0;
800 else
801 status = I40E_ERR_ADMIN_QUEUE_ERROR;
802 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
803 }
804
Shannon Nelsonc9296ad2014-03-14 07:32:22 +0000805 if (i40e_is_nvm_update_op(desc))
806 hw->aq.nvm_busy = true;
807
Greg Rosed358aa92013-12-21 06:13:11 +0000808 /* update the error if time out occurred */
809 if ((!cmd_completed) &&
810 (!details->async && !details->postpone)) {
811 i40e_debug(hw,
812 I40E_DEBUG_AQ_MESSAGE,
813 "AQTX: Writeback timeout.\n");
814 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
815 }
816
817asq_send_command_error:
818 mutex_unlock(&hw->aq.asq_mutex);
819asq_send_command_exit:
820 return status;
821}
822
823/**
824 * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
825 * @desc: pointer to the temp descriptor (non DMA mem)
826 * @opcode: the opcode can be used to decide which flags to turn off or on
827 *
828 * Fill the desc with default values
829 **/
830void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
831 u16 opcode)
832{
833 /* zero out the desc */
834 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
835 desc->opcode = cpu_to_le16(opcode);
836 desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
837}
838
839/**
840 * i40evf_clean_arq_element
841 * @hw: pointer to the hw struct
842 * @e: event info from the receive descriptor, includes any buffers
843 * @pending: number of events that could be left to process
844 *
845 * This function cleans one Admin Receive Queue element and returns
846 * the contents through e. It can also return how many events are
847 * left to process through 'pending'
848 **/
849i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
850 struct i40e_arq_event_info *e,
851 u16 *pending)
852{
853 i40e_status ret_code = 0;
854 u16 ntc = hw->aq.arq.next_to_clean;
855 struct i40e_aq_desc *desc;
856 struct i40e_dma_mem *bi;
857 u16 desc_idx;
858 u16 datalen;
859 u16 flags;
860 u16 ntu;
861
862 /* take the lock before we start messing with the ring */
863 mutex_lock(&hw->aq.arq_mutex);
864
865 /* set next_to_use to head */
866 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
867 if (ntu == ntc) {
868 /* nothing to do - shouldn't need to update ring's values */
869 i40e_debug(hw,
870 I40E_DEBUG_AQ_MESSAGE,
871 "AQRX: Queue is empty.\n");
872 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
873 goto clean_arq_element_out;
874 }
875
876 /* now clean the next descriptor */
877 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
878 desc_idx = ntc;
879 i40evf_debug_aq(hw,
880 I40E_DEBUG_AQ_COMMAND,
881 (void *)desc,
882 hw->aq.arq.r.arq_bi[desc_idx].va);
883
884 flags = le16_to_cpu(desc->flags);
885 if (flags & I40E_AQ_FLAG_ERR) {
886 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
887 hw->aq.arq_last_status =
888 (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
889 i40e_debug(hw,
890 I40E_DEBUG_AQ_MESSAGE,
891 "AQRX: Event received with error 0x%X.\n",
892 hw->aq.arq_last_status);
893 } else {
894 e->desc = *desc;
895 datalen = le16_to_cpu(desc->datalen);
896 e->msg_size = min(datalen, e->msg_size);
897 if (e->msg_buf != NULL && (e->msg_size != 0))
898 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
899 e->msg_size);
900 }
901
Shannon Nelsonc9296ad2014-03-14 07:32:22 +0000902 if (i40e_is_nvm_update_op(&e->desc))
903 hw->aq.nvm_busy = false;
904
Greg Rosed358aa92013-12-21 06:13:11 +0000905 /* Restore the original datalen and buffer address in the desc,
906 * FW updates datalen to indicate the event message
907 * size
908 */
909 bi = &hw->aq.arq.r.arq_bi[ntc];
910 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
911
912 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
913 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
914 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
915 desc->datalen = cpu_to_le16((u16)bi->size);
916 desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
917 desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
918
919 /* set tail = the last cleaned desc index. */
920 wr32(hw, hw->aq.arq.tail, ntc);
921 /* ntc is updated to tail + 1 */
922 ntc++;
923 if (ntc == hw->aq.num_arq_entries)
924 ntc = 0;
925 hw->aq.arq.next_to_clean = ntc;
926 hw->aq.arq.next_to_use = ntu;
927
928clean_arq_element_out:
929 /* Set pending if needed, unlock and return */
930 if (pending != NULL)
931 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
932 mutex_unlock(&hw->aq.arq_mutex);
933
934 return ret_code;
935}
936
937void i40evf_resume_aq(struct i40e_hw *hw)
938{
939 /* Registers are reset after PF reset */
940 hw->aq.asq.next_to_use = 0;
941 hw->aq.asq.next_to_clean = 0;
942
943 i40e_config_asq_regs(hw);
944
945 hw->aq.arq.next_to_use = 0;
946 hw->aq.arq.next_to_clean = 0;
947
948 i40e_config_arq_regs(hw);
949}