blob: 5470ce95936ed483abc547f1ef12b1ef30a34721 [file] [log] [blame]
Greg Rosed358aa92013-12-21 06:13:11 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Contact Information:
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 *
22 ******************************************************************************/
23
24#include "i40e_status.h"
25#include "i40e_type.h"
26#include "i40e_register.h"
27#include "i40e_adminq.h"
28#include "i40e_prototype.h"
29
30/**
31 * i40e_adminq_init_regs - Initialize AdminQ registers
32 * @hw: pointer to the hardware structure
33 *
34 * This assumes the alloc_asq and alloc_arq functions have already been called
35 **/
36static void i40e_adminq_init_regs(struct i40e_hw *hw)
37{
38 /* set head and tail registers in our local struct */
39 if (hw->mac.type == I40E_MAC_VF) {
40 hw->aq.asq.tail = I40E_VF_ATQT1;
41 hw->aq.asq.head = I40E_VF_ATQH1;
42 hw->aq.asq.len = I40E_VF_ATQLEN1;
43 hw->aq.arq.tail = I40E_VF_ARQT1;
44 hw->aq.arq.head = I40E_VF_ARQH1;
45 hw->aq.arq.len = I40E_VF_ARQLEN1;
46 } else {
47 hw->aq.asq.tail = I40E_PF_ATQT;
48 hw->aq.asq.head = I40E_PF_ATQH;
49 hw->aq.asq.len = I40E_PF_ATQLEN;
50 hw->aq.arq.tail = I40E_PF_ARQT;
51 hw->aq.arq.head = I40E_PF_ARQH;
52 hw->aq.arq.len = I40E_PF_ARQLEN;
53 }
54}
55
56/**
57 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
58 * @hw: pointer to the hardware structure
59 **/
60static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
61{
62 i40e_status ret_code;
63
64 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
65 i40e_mem_atq_ring,
66 (hw->aq.num_asq_entries *
67 sizeof(struct i40e_aq_desc)),
68 I40E_ADMINQ_DESC_ALIGNMENT);
69 if (ret_code)
70 return ret_code;
71
72 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
73 (hw->aq.num_asq_entries *
74 sizeof(struct i40e_asq_cmd_details)));
75 if (ret_code) {
76 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
77 return ret_code;
78 }
79
80 return ret_code;
81}
82
83/**
84 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
85 * @hw: pointer to the hardware structure
86 **/
87static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
88{
89 i40e_status ret_code;
90
91 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
92 i40e_mem_arq_ring,
93 (hw->aq.num_arq_entries *
94 sizeof(struct i40e_aq_desc)),
95 I40E_ADMINQ_DESC_ALIGNMENT);
96
97 return ret_code;
98}
99
100/**
101 * i40e_free_adminq_asq - Free Admin Queue send rings
102 * @hw: pointer to the hardware structure
103 *
104 * This assumes the posted send buffers have already been cleaned
105 * and de-allocated
106 **/
107static void i40e_free_adminq_asq(struct i40e_hw *hw)
108{
109 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
110}
111
112/**
113 * i40e_free_adminq_arq - Free Admin Queue receive rings
114 * @hw: pointer to the hardware structure
115 *
116 * This assumes the posted receive buffers have already been cleaned
117 * and de-allocated
118 **/
119static void i40e_free_adminq_arq(struct i40e_hw *hw)
120{
121 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
122}
123
124/**
125 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
126 * @hw: pointer to the hardware structure
127 **/
128static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
129{
130 i40e_status ret_code;
131 struct i40e_aq_desc *desc;
132 struct i40e_dma_mem *bi;
133 int i;
134
135 /* We'll be allocating the buffer info memory first, then we can
136 * allocate the mapped buffers for the event processing
137 */
138
139 /* buffer_info structures do not need alignment */
140 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
141 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
142 if (ret_code)
143 goto alloc_arq_bufs;
144 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
145
146 /* allocate the mapped buffers */
147 for (i = 0; i < hw->aq.num_arq_entries; i++) {
148 bi = &hw->aq.arq.r.arq_bi[i];
149 ret_code = i40e_allocate_dma_mem(hw, bi,
150 i40e_mem_arq_buf,
151 hw->aq.arq_buf_size,
152 I40E_ADMINQ_DESC_ALIGNMENT);
153 if (ret_code)
154 goto unwind_alloc_arq_bufs;
155
156 /* now configure the descriptors for use */
157 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
158
159 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
160 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
161 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
162 desc->opcode = 0;
163 /* This is in accordance with Admin queue design, there is no
164 * register for buffer size configuration
165 */
166 desc->datalen = cpu_to_le16((u16)bi->size);
167 desc->retval = 0;
168 desc->cookie_high = 0;
169 desc->cookie_low = 0;
170 desc->params.external.addr_high =
171 cpu_to_le32(upper_32_bits(bi->pa));
172 desc->params.external.addr_low =
173 cpu_to_le32(lower_32_bits(bi->pa));
174 desc->params.external.param0 = 0;
175 desc->params.external.param1 = 0;
176 }
177
178alloc_arq_bufs:
179 return ret_code;
180
181unwind_alloc_arq_bufs:
182 /* don't try to free the one that failed... */
183 i--;
184 for (; i >= 0; i--)
185 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
186 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
187
188 return ret_code;
189}
190
191/**
192 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
193 * @hw: pointer to the hardware structure
194 **/
195static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
196{
197 i40e_status ret_code;
198 struct i40e_dma_mem *bi;
199 int i;
200
201 /* No mapped memory needed yet, just the buffer info structures */
202 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
203 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
204 if (ret_code)
205 goto alloc_asq_bufs;
206 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
207
208 /* allocate the mapped buffers */
209 for (i = 0; i < hw->aq.num_asq_entries; i++) {
210 bi = &hw->aq.asq.r.asq_bi[i];
211 ret_code = i40e_allocate_dma_mem(hw, bi,
212 i40e_mem_asq_buf,
213 hw->aq.asq_buf_size,
214 I40E_ADMINQ_DESC_ALIGNMENT);
215 if (ret_code)
216 goto unwind_alloc_asq_bufs;
217 }
218alloc_asq_bufs:
219 return ret_code;
220
221unwind_alloc_asq_bufs:
222 /* don't try to free the one that failed... */
223 i--;
224 for (; i >= 0; i--)
225 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
226 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
227
228 return ret_code;
229}
230
231/**
232 * i40e_free_arq_bufs - Free receive queue buffer info elements
233 * @hw: pointer to the hardware structure
234 **/
235static void i40e_free_arq_bufs(struct i40e_hw *hw)
236{
237 int i;
238
239 /* free descriptors */
240 for (i = 0; i < hw->aq.num_arq_entries; i++)
241 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
242
243 /* free the descriptor memory */
244 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
245
246 /* free the dma header */
247 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
248}
249
250/**
251 * i40e_free_asq_bufs - Free send queue buffer info elements
252 * @hw: pointer to the hardware structure
253 **/
254static void i40e_free_asq_bufs(struct i40e_hw *hw)
255{
256 int i;
257
258 /* only unmap if the address is non-NULL */
259 for (i = 0; i < hw->aq.num_asq_entries; i++)
260 if (hw->aq.asq.r.asq_bi[i].pa)
261 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
262
263 /* free the buffer info list */
264 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
265
266 /* free the descriptor memory */
267 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
268
269 /* free the dma header */
270 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
271}
272
273/**
274 * i40e_config_asq_regs - configure ASQ registers
275 * @hw: pointer to the hardware structure
276 *
277 * Configure base address and length registers for the transmit queue
278 **/
279static void i40e_config_asq_regs(struct i40e_hw *hw)
280{
281 if (hw->mac.type == I40E_MAC_VF) {
282 /* configure the transmit queue */
283 wr32(hw, I40E_VF_ATQBAH1,
284 upper_32_bits(hw->aq.asq.desc_buf.pa));
285 wr32(hw, I40E_VF_ATQBAL1,
286 lower_32_bits(hw->aq.asq.desc_buf.pa));
287 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
288 I40E_VF_ATQLEN1_ATQENABLE_MASK));
289 } else {
290 /* configure the transmit queue */
291 wr32(hw, I40E_PF_ATQBAH,
292 upper_32_bits(hw->aq.asq.desc_buf.pa));
293 wr32(hw, I40E_PF_ATQBAL,
294 lower_32_bits(hw->aq.asq.desc_buf.pa));
295 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
296 I40E_PF_ATQLEN_ATQENABLE_MASK));
297 }
298}
299
300/**
301 * i40e_config_arq_regs - ARQ register configuration
302 * @hw: pointer to the hardware structure
303 *
304 * Configure base address and length registers for the receive (event queue)
305 **/
306static void i40e_config_arq_regs(struct i40e_hw *hw)
307{
308 if (hw->mac.type == I40E_MAC_VF) {
309 /* configure the receive queue */
310 wr32(hw, I40E_VF_ARQBAH1,
311 upper_32_bits(hw->aq.arq.desc_buf.pa));
312 wr32(hw, I40E_VF_ARQBAL1,
313 lower_32_bits(hw->aq.arq.desc_buf.pa));
314 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
315 I40E_VF_ARQLEN1_ARQENABLE_MASK));
316 } else {
317 /* configure the receive queue */
318 wr32(hw, I40E_PF_ARQBAH,
319 upper_32_bits(hw->aq.arq.desc_buf.pa));
320 wr32(hw, I40E_PF_ARQBAL,
321 lower_32_bits(hw->aq.arq.desc_buf.pa));
322 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
323 I40E_PF_ARQLEN_ARQENABLE_MASK));
324 }
325
326 /* Update tail in the HW to post pre-allocated buffers */
327 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
328}
329
330/**
331 * i40e_init_asq - main initialization routine for ASQ
332 * @hw: pointer to the hardware structure
333 *
334 * This is the main initialization routine for the Admin Send Queue
335 * Prior to calling this function, drivers *MUST* set the following fields
336 * in the hw->aq structure:
337 * - hw->aq.num_asq_entries
338 * - hw->aq.arq_buf_size
339 *
340 * Do *NOT* hold the lock when calling this as the memory allocation routines
341 * called are not going to be atomic context safe
342 **/
343static i40e_status i40e_init_asq(struct i40e_hw *hw)
344{
345 i40e_status ret_code = 0;
346
347 if (hw->aq.asq.count > 0) {
348 /* queue already initialized */
349 ret_code = I40E_ERR_NOT_READY;
350 goto init_adminq_exit;
351 }
352
353 /* verify input for valid configuration */
354 if ((hw->aq.num_asq_entries == 0) ||
355 (hw->aq.asq_buf_size == 0)) {
356 ret_code = I40E_ERR_CONFIG;
357 goto init_adminq_exit;
358 }
359
360 hw->aq.asq.next_to_use = 0;
361 hw->aq.asq.next_to_clean = 0;
362 hw->aq.asq.count = hw->aq.num_asq_entries;
363
364 /* allocate the ring memory */
365 ret_code = i40e_alloc_adminq_asq_ring(hw);
366 if (ret_code)
367 goto init_adminq_exit;
368
369 /* allocate buffers in the rings */
370 ret_code = i40e_alloc_asq_bufs(hw);
371 if (ret_code)
372 goto init_adminq_free_rings;
373
374 /* initialize base registers */
375 i40e_config_asq_regs(hw);
376
377 /* success! */
378 goto init_adminq_exit;
379
380init_adminq_free_rings:
381 i40e_free_adminq_asq(hw);
382
383init_adminq_exit:
384 return ret_code;
385}
386
387/**
388 * i40e_init_arq - initialize ARQ
389 * @hw: pointer to the hardware structure
390 *
391 * The main initialization routine for the Admin Receive (Event) Queue.
392 * Prior to calling this function, drivers *MUST* set the following fields
393 * in the hw->aq structure:
394 * - hw->aq.num_asq_entries
395 * - hw->aq.arq_buf_size
396 *
397 * Do *NOT* hold the lock when calling this as the memory allocation routines
398 * called are not going to be atomic context safe
399 **/
400static i40e_status i40e_init_arq(struct i40e_hw *hw)
401{
402 i40e_status ret_code = 0;
403
404 if (hw->aq.arq.count > 0) {
405 /* queue already initialized */
406 ret_code = I40E_ERR_NOT_READY;
407 goto init_adminq_exit;
408 }
409
410 /* verify input for valid configuration */
411 if ((hw->aq.num_arq_entries == 0) ||
412 (hw->aq.arq_buf_size == 0)) {
413 ret_code = I40E_ERR_CONFIG;
414 goto init_adminq_exit;
415 }
416
417 hw->aq.arq.next_to_use = 0;
418 hw->aq.arq.next_to_clean = 0;
419 hw->aq.arq.count = hw->aq.num_arq_entries;
420
421 /* allocate the ring memory */
422 ret_code = i40e_alloc_adminq_arq_ring(hw);
423 if (ret_code)
424 goto init_adminq_exit;
425
426 /* allocate buffers in the rings */
427 ret_code = i40e_alloc_arq_bufs(hw);
428 if (ret_code)
429 goto init_adminq_free_rings;
430
431 /* initialize base registers */
432 i40e_config_arq_regs(hw);
433
434 /* success! */
435 goto init_adminq_exit;
436
437init_adminq_free_rings:
438 i40e_free_adminq_arq(hw);
439
440init_adminq_exit:
441 return ret_code;
442}
443
444/**
445 * i40e_shutdown_asq - shutdown the ASQ
446 * @hw: pointer to the hardware structure
447 *
448 * The main shutdown routine for the Admin Send Queue
449 **/
450static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
451{
452 i40e_status ret_code = 0;
453
454 if (hw->aq.asq.count == 0)
455 return I40E_ERR_NOT_READY;
456
457 /* Stop firmware AdminQ processing */
458 wr32(hw, hw->aq.asq.head, 0);
459 wr32(hw, hw->aq.asq.tail, 0);
460 wr32(hw, hw->aq.asq.len, 0);
461
462 /* make sure lock is available */
463 mutex_lock(&hw->aq.asq_mutex);
464
465 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
466
467 /* free ring buffers */
468 i40e_free_asq_bufs(hw);
469
470 mutex_unlock(&hw->aq.asq_mutex);
471
472 return ret_code;
473}
474
475/**
476 * i40e_shutdown_arq - shutdown ARQ
477 * @hw: pointer to the hardware structure
478 *
479 * The main shutdown routine for the Admin Receive Queue
480 **/
481static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
482{
483 i40e_status ret_code = 0;
484
485 if (hw->aq.arq.count == 0)
486 return I40E_ERR_NOT_READY;
487
488 /* Stop firmware AdminQ processing */
489 wr32(hw, hw->aq.arq.head, 0);
490 wr32(hw, hw->aq.arq.tail, 0);
491 wr32(hw, hw->aq.arq.len, 0);
492
493 /* make sure lock is available */
494 mutex_lock(&hw->aq.arq_mutex);
495
496 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
497
498 /* free ring buffers */
499 i40e_free_arq_bufs(hw);
500
501 mutex_unlock(&hw->aq.arq_mutex);
502
503 return ret_code;
504}
505
506/**
507 * i40evf_init_adminq - main initialization routine for Admin Queue
508 * @hw: pointer to the hardware structure
509 *
510 * Prior to calling this function, drivers *MUST* set the following fields
511 * in the hw->aq structure:
512 * - hw->aq.num_asq_entries
513 * - hw->aq.num_arq_entries
514 * - hw->aq.arq_buf_size
515 * - hw->aq.asq_buf_size
516 **/
517i40e_status i40evf_init_adminq(struct i40e_hw *hw)
518{
519 i40e_status ret_code;
520
521 /* verify input for valid configuration */
522 if ((hw->aq.num_arq_entries == 0) ||
523 (hw->aq.num_asq_entries == 0) ||
524 (hw->aq.arq_buf_size == 0) ||
525 (hw->aq.asq_buf_size == 0)) {
526 ret_code = I40E_ERR_CONFIG;
527 goto init_adminq_exit;
528 }
529
530 /* initialize locks */
531 mutex_init(&hw->aq.asq_mutex);
532 mutex_init(&hw->aq.arq_mutex);
533
534 /* Set up register offsets */
535 i40e_adminq_init_regs(hw);
536
537 /* allocate the ASQ */
538 ret_code = i40e_init_asq(hw);
539 if (ret_code)
540 goto init_adminq_destroy_locks;
541
542 /* allocate the ARQ */
543 ret_code = i40e_init_arq(hw);
544 if (ret_code)
545 goto init_adminq_free_asq;
546
547 /* success! */
548 goto init_adminq_exit;
549
550init_adminq_free_asq:
551 i40e_shutdown_asq(hw);
552init_adminq_destroy_locks:
553
554init_adminq_exit:
555 return ret_code;
556}
557
558/**
559 * i40evf_shutdown_adminq - shutdown routine for the Admin Queue
560 * @hw: pointer to the hardware structure
561 **/
562i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
563{
564 i40e_status ret_code = 0;
565
566 if (i40evf_check_asq_alive(hw))
567 i40evf_aq_queue_shutdown(hw, true);
568
569 i40e_shutdown_asq(hw);
570 i40e_shutdown_arq(hw);
571
572 /* destroy the locks */
573
574 return ret_code;
575}
576
577/**
578 * i40e_clean_asq - cleans Admin send queue
579 * @hw: pointer to the hardware structure
580 *
581 * returns the number of free desc
582 **/
583static u16 i40e_clean_asq(struct i40e_hw *hw)
584{
585 struct i40e_adminq_ring *asq = &(hw->aq.asq);
586 struct i40e_asq_cmd_details *details;
587 u16 ntc = asq->next_to_clean;
588 struct i40e_aq_desc desc_cb;
589 struct i40e_aq_desc *desc;
590
591 desc = I40E_ADMINQ_DESC(*asq, ntc);
592 details = I40E_ADMINQ_DETAILS(*asq, ntc);
593 while (rd32(hw, hw->aq.asq.head) != ntc) {
594 if (details->callback) {
595 I40E_ADMINQ_CALLBACK cb_func =
596 (I40E_ADMINQ_CALLBACK)details->callback;
597 desc_cb = *desc;
598 cb_func(hw, &desc_cb);
599 }
600 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
601 memset((void *)details, 0,
602 sizeof(struct i40e_asq_cmd_details));
603 ntc++;
604 if (ntc == asq->count)
605 ntc = 0;
606 desc = I40E_ADMINQ_DESC(*asq, ntc);
607 details = I40E_ADMINQ_DETAILS(*asq, ntc);
608 }
609
610 asq->next_to_clean = ntc;
611
612 return I40E_DESC_UNUSED(asq);
613}
614
615/**
616 * i40evf_asq_done - check if FW has processed the Admin Send Queue
617 * @hw: pointer to the hw struct
618 *
619 * Returns true if the firmware has processed all descriptors on the
620 * admin send queue. Returns false if there are still requests pending.
621 **/
622bool i40evf_asq_done(struct i40e_hw *hw)
623{
624 /* AQ designers suggest use of head for better
625 * timing reliability than DD bit
626 */
627 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
628
629}
630
631/**
632 * i40evf_asq_send_command - send command to Admin Queue
633 * @hw: pointer to the hw struct
634 * @desc: prefilled descriptor describing the command (non DMA mem)
635 * @buff: buffer to use for indirect commands
636 * @buff_size: size of buffer for indirect commands
637 * @cmd_details: pointer to command details structure
638 *
639 * This is the main send command driver routine for the Admin Queue send
640 * queue. It runs the queue, cleans the queue, etc
641 **/
642i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
643 struct i40e_aq_desc *desc,
644 void *buff, /* can be NULL */
645 u16 buff_size,
646 struct i40e_asq_cmd_details *cmd_details)
647{
648 i40e_status status = 0;
649 struct i40e_dma_mem *dma_buff = NULL;
650 struct i40e_asq_cmd_details *details;
651 struct i40e_aq_desc *desc_on_ring;
652 bool cmd_completed = false;
653 u16 retval = 0;
654
655 if (hw->aq.asq.count == 0) {
656 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
657 "AQTX: Admin queue not initialized.\n");
658 status = I40E_ERR_QUEUE_EMPTY;
659 goto asq_send_command_exit;
660 }
661
662 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
663 if (cmd_details) {
664 *details = *cmd_details;
665
666 /* If the cmd_details are defined copy the cookie. The
667 * cpu_to_le32 is not needed here because the data is ignored
668 * by the FW, only used by the driver
669 */
670 if (details->cookie) {
671 desc->cookie_high =
672 cpu_to_le32(upper_32_bits(details->cookie));
673 desc->cookie_low =
674 cpu_to_le32(lower_32_bits(details->cookie));
675 }
676 } else {
677 memset(details, 0, sizeof(struct i40e_asq_cmd_details));
678 }
679
680 /* clear requested flags and then set additional flags if defined */
681 desc->flags &= ~cpu_to_le16(details->flags_dis);
682 desc->flags |= cpu_to_le16(details->flags_ena);
683
684 mutex_lock(&hw->aq.asq_mutex);
685
686 if (buff_size > hw->aq.asq_buf_size) {
687 i40e_debug(hw,
688 I40E_DEBUG_AQ_MESSAGE,
689 "AQTX: Invalid buffer size: %d.\n",
690 buff_size);
691 status = I40E_ERR_INVALID_SIZE;
692 goto asq_send_command_error;
693 }
694
695 if (details->postpone && !details->async) {
696 i40e_debug(hw,
697 I40E_DEBUG_AQ_MESSAGE,
698 "AQTX: Async flag not set along with postpone flag");
699 status = I40E_ERR_PARAM;
700 goto asq_send_command_error;
701 }
702
703 /* call clean and check queue available function to reclaim the
704 * descriptors that were processed by FW, the function returns the
705 * number of desc available
706 */
707 /* the clean function called here could be called in a separate thread
708 * in case of asynchronous completions
709 */
710 if (i40e_clean_asq(hw) == 0) {
711 i40e_debug(hw,
712 I40E_DEBUG_AQ_MESSAGE,
713 "AQTX: Error queue is full.\n");
714 status = I40E_ERR_ADMIN_QUEUE_FULL;
715 goto asq_send_command_error;
716 }
717
718 /* initialize the temp desc pointer with the right desc */
719 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
720
721 /* if the desc is available copy the temp desc to the right place */
722 *desc_on_ring = *desc;
723
724 /* if buff is not NULL assume indirect command */
725 if (buff != NULL) {
726 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
727 /* copy the user buff into the respective DMA buff */
728 memcpy(dma_buff->va, buff, buff_size);
729 desc_on_ring->datalen = cpu_to_le16(buff_size);
730
731 /* Update the address values in the desc with the pa value
732 * for respective buffer
733 */
734 desc_on_ring->params.external.addr_high =
735 cpu_to_le32(upper_32_bits(dma_buff->pa));
736 desc_on_ring->params.external.addr_low =
737 cpu_to_le32(lower_32_bits(dma_buff->pa));
738 }
739
740 /* bump the tail */
741 i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
742 (hw->aq.asq.next_to_use)++;
743 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
744 hw->aq.asq.next_to_use = 0;
745 if (!details->postpone)
746 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
747
748 /* if cmd_details are not defined or async flag is not set,
749 * we need to wait for desc write back
750 */
751 if (!details->async && !details->postpone) {
752 u32 total_delay = 0;
753 u32 delay_len = 10;
754
755 do {
756 /* AQ designers suggest use of head for better
757 * timing reliability than DD bit
758 */
759 if (i40evf_asq_done(hw))
760 break;
761 /* ugh! delay while spin_lock */
762 udelay(delay_len);
763 total_delay += delay_len;
764 } while (total_delay < I40E_ASQ_CMD_TIMEOUT);
765 }
766
767 /* if ready, copy the desc back to temp */
768 if (i40evf_asq_done(hw)) {
769 *desc = *desc_on_ring;
770 if (buff != NULL)
771 memcpy(buff, dma_buff->va, buff_size);
772 retval = le16_to_cpu(desc->retval);
773 if (retval != 0) {
774 i40e_debug(hw,
775 I40E_DEBUG_AQ_MESSAGE,
776 "AQTX: Command completed with error 0x%X.\n",
777 retval);
778 /* strip off FW internal code */
779 retval &= 0xff;
780 }
781 cmd_completed = true;
782 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
783 status = 0;
784 else
785 status = I40E_ERR_ADMIN_QUEUE_ERROR;
786 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
787 }
788
789 /* update the error if time out occurred */
790 if ((!cmd_completed) &&
791 (!details->async && !details->postpone)) {
792 i40e_debug(hw,
793 I40E_DEBUG_AQ_MESSAGE,
794 "AQTX: Writeback timeout.\n");
795 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
796 }
797
798asq_send_command_error:
799 mutex_unlock(&hw->aq.asq_mutex);
800asq_send_command_exit:
801 return status;
802}
803
804/**
805 * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
806 * @desc: pointer to the temp descriptor (non DMA mem)
807 * @opcode: the opcode can be used to decide which flags to turn off or on
808 *
809 * Fill the desc with default values
810 **/
811void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
812 u16 opcode)
813{
814 /* zero out the desc */
815 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
816 desc->opcode = cpu_to_le16(opcode);
817 desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
818}
819
820/**
821 * i40evf_clean_arq_element
822 * @hw: pointer to the hw struct
823 * @e: event info from the receive descriptor, includes any buffers
824 * @pending: number of events that could be left to process
825 *
826 * This function cleans one Admin Receive Queue element and returns
827 * the contents through e. It can also return how many events are
828 * left to process through 'pending'
829 **/
830i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
831 struct i40e_arq_event_info *e,
832 u16 *pending)
833{
834 i40e_status ret_code = 0;
835 u16 ntc = hw->aq.arq.next_to_clean;
836 struct i40e_aq_desc *desc;
837 struct i40e_dma_mem *bi;
838 u16 desc_idx;
839 u16 datalen;
840 u16 flags;
841 u16 ntu;
842
843 /* take the lock before we start messing with the ring */
844 mutex_lock(&hw->aq.arq_mutex);
845
846 /* set next_to_use to head */
847 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
848 if (ntu == ntc) {
849 /* nothing to do - shouldn't need to update ring's values */
850 i40e_debug(hw,
851 I40E_DEBUG_AQ_MESSAGE,
852 "AQRX: Queue is empty.\n");
853 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
854 goto clean_arq_element_out;
855 }
856
857 /* now clean the next descriptor */
858 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
859 desc_idx = ntc;
860 i40evf_debug_aq(hw,
861 I40E_DEBUG_AQ_COMMAND,
862 (void *)desc,
863 hw->aq.arq.r.arq_bi[desc_idx].va);
864
865 flags = le16_to_cpu(desc->flags);
866 if (flags & I40E_AQ_FLAG_ERR) {
867 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
868 hw->aq.arq_last_status =
869 (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
870 i40e_debug(hw,
871 I40E_DEBUG_AQ_MESSAGE,
872 "AQRX: Event received with error 0x%X.\n",
873 hw->aq.arq_last_status);
874 } else {
875 e->desc = *desc;
876 datalen = le16_to_cpu(desc->datalen);
877 e->msg_size = min(datalen, e->msg_size);
878 if (e->msg_buf != NULL && (e->msg_size != 0))
879 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
880 e->msg_size);
881 }
882
883 /* Restore the original datalen and buffer address in the desc,
884 * FW updates datalen to indicate the event message
885 * size
886 */
887 bi = &hw->aq.arq.r.arq_bi[ntc];
888 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
889
890 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
891 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
892 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
893 desc->datalen = cpu_to_le16((u16)bi->size);
894 desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
895 desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
896
897 /* set tail = the last cleaned desc index. */
898 wr32(hw, hw->aq.arq.tail, ntc);
899 /* ntc is updated to tail + 1 */
900 ntc++;
901 if (ntc == hw->aq.num_arq_entries)
902 ntc = 0;
903 hw->aq.arq.next_to_clean = ntc;
904 hw->aq.arq.next_to_use = ntu;
905
906clean_arq_element_out:
907 /* Set pending if needed, unlock and return */
908 if (pending != NULL)
909 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
910 mutex_unlock(&hw->aq.arq_mutex);
911
912 return ret_code;
913}
914
915void i40evf_resume_aq(struct i40e_hw *hw)
916{
917 /* Registers are reset after PF reset */
918 hw->aq.asq.next_to_use = 0;
919 hw->aq.asq.next_to_clean = 0;
920
921 i40e_config_asq_regs(hw);
922
923 hw->aq.arq.next_to_use = 0;
924 hw->aq.arq.next_to_clean = 0;
925
926 i40e_config_arq_regs(hw);
927}