blob: f7896c2097e456fd7b30b21eda424854f1aaddd8 [file] [log] [blame]
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001/* bnx2x_sp.c: Broadcom Everest network driver.
2 *
Ariel Elior85b26ea2012-01-26 06:01:54 +00003 * Copyright (c) 2011-2012 Broadcom Corporation
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
17 *
18 */
Joe Perchesf1deab52011-08-14 12:16:21 +000019
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000022#include <linux/module.h>
23#include <linux/crc32.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/crc32c.h>
27#include "bnx2x.h"
28#include "bnx2x_cmn.h"
29#include "bnx2x_sp.h"
30
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030031#define BNX2X_MAX_EMUL_MULTI 16
32
Ariel Eliored5162a2011-12-05 21:52:24 +000033#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
34
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030035/**** Exe Queue interfaces ****/
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000036
37/**
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030038 * bnx2x_exe_queue_init - init the Exe Queue object
39 *
40 * @o: poiter to the object
41 * @exe_len: length
42 * @owner: poiter to the owner
43 * @validate: validate function pointer
44 * @optimize: optimize function pointer
45 * @exec: execute function pointer
46 * @get: get function pointer
47 */
48static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
49 struct bnx2x_exe_queue_obj *o,
50 int exe_len,
51 union bnx2x_qable_obj *owner,
52 exe_q_validate validate,
Yuval Mintz460a25c2012-01-23 07:31:51 +000053 exe_q_remove remove,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030054 exe_q_optimize optimize,
55 exe_q_execute exec,
56 exe_q_get get)
57{
58 memset(o, 0, sizeof(*o));
59
60 INIT_LIST_HEAD(&o->exe_queue);
61 INIT_LIST_HEAD(&o->pending_comp);
62
63 spin_lock_init(&o->lock);
64
65 o->exe_chunk_len = exe_len;
66 o->owner = owner;
67
68 /* Owner specific callbacks */
69 o->validate = validate;
Yuval Mintz460a25c2012-01-23 07:31:51 +000070 o->remove = remove;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030071 o->optimize = optimize;
72 o->execute = exec;
73 o->get = get;
74
75 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
76 "length of %d\n", exe_len);
77}
78
79static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
80 struct bnx2x_exeq_elem *elem)
81{
82 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
83 kfree(elem);
84}
85
86static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
87{
88 struct bnx2x_exeq_elem *elem;
89 int cnt = 0;
90
91 spin_lock_bh(&o->lock);
92
93 list_for_each_entry(elem, &o->exe_queue, link)
94 cnt++;
95
96 spin_unlock_bh(&o->lock);
97
98 return cnt;
99}
100
101/**
102 * bnx2x_exe_queue_add - add a new element to the execution queue
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000103 *
104 * @bp: driver handle
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300105 * @o: queue
106 * @cmd: new command to add
107 * @restore: true - do not optimize the command
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000108 *
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300109 * If the element is optimized or is illegal, frees it.
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000110 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300111static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
112 struct bnx2x_exe_queue_obj *o,
113 struct bnx2x_exeq_elem *elem,
114 bool restore)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000115{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300116 int rc;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000117
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300118 spin_lock_bh(&o->lock);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000119
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300120 if (!restore) {
121 /* Try to cancel this element queue */
122 rc = o->optimize(bp, o->owner, elem);
123 if (rc)
124 goto free_and_exit;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000125
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300126 /* Check if this request is ok */
127 rc = o->validate(bp, o->owner, elem);
128 if (rc) {
129 BNX2X_ERR("Preamble failed: %d\n", rc);
130 goto free_and_exit;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000131 }
132 }
133
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300134 /* If so, add it to the execution queue */
135 list_add_tail(&elem->link, &o->exe_queue);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000136
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300137 spin_unlock_bh(&o->lock);
138
139 return 0;
140
141free_and_exit:
142 bnx2x_exe_queue_free_elem(bp, elem);
143
144 spin_unlock_bh(&o->lock);
145
146 return rc;
147
148}
149
150static inline void __bnx2x_exe_queue_reset_pending(
151 struct bnx2x *bp,
152 struct bnx2x_exe_queue_obj *o)
153{
154 struct bnx2x_exeq_elem *elem;
155
156 while (!list_empty(&o->pending_comp)) {
157 elem = list_first_entry(&o->pending_comp,
158 struct bnx2x_exeq_elem, link);
159
160 list_del(&elem->link);
161 bnx2x_exe_queue_free_elem(bp, elem);
162 }
163}
164
165static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
166 struct bnx2x_exe_queue_obj *o)
167{
168
169 spin_lock_bh(&o->lock);
170
171 __bnx2x_exe_queue_reset_pending(bp, o);
172
173 spin_unlock_bh(&o->lock);
174
175}
176
177/**
178 * bnx2x_exe_queue_step - execute one execution chunk atomically
179 *
180 * @bp: driver handle
181 * @o: queue
182 * @ramrod_flags: flags
183 *
184 * (Atomicy is ensured using the exe_queue->lock).
185 */
186static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
187 struct bnx2x_exe_queue_obj *o,
188 unsigned long *ramrod_flags)
189{
190 struct bnx2x_exeq_elem *elem, spacer;
191 int cur_len = 0, rc;
192
193 memset(&spacer, 0, sizeof(spacer));
194
195 spin_lock_bh(&o->lock);
196
197 /*
198 * Next step should not be performed until the current is finished,
199 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
200 * properly clear object internals without sending any command to the FW
201 * which also implies there won't be any completion to clear the
202 * 'pending' list.
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000203 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300204 if (!list_empty(&o->pending_comp)) {
205 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
206 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
207 "resetting pending_comp\n");
208 __bnx2x_exe_queue_reset_pending(bp, o);
209 } else {
210 spin_unlock_bh(&o->lock);
211 return 1;
212 }
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000213 }
214
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300215 /*
216 * Run through the pending commands list and create a next
217 * execution chunk.
218 */
219 while (!list_empty(&o->exe_queue)) {
220 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
221 link);
222 WARN_ON(!elem->cmd_len);
223
224 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
225 cur_len += elem->cmd_len;
226 /*
227 * Prevent from both lists being empty when moving an
228 * element. This will allow the call of
229 * bnx2x_exe_queue_empty() without locking.
230 */
231 list_add_tail(&spacer.link, &o->pending_comp);
232 mb();
233 list_del(&elem->link);
234 list_add_tail(&elem->link, &o->pending_comp);
235 list_del(&spacer.link);
236 } else
237 break;
238 }
239
240 /* Sanity check */
241 if (!cur_len) {
242 spin_unlock_bh(&o->lock);
243 return 0;
244 }
245
246 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
247 if (rc < 0)
248 /*
249 * In case of an error return the commands back to the queue
250 * and reset the pending_comp.
251 */
252 list_splice_init(&o->pending_comp, &o->exe_queue);
253 else if (!rc)
254 /*
255 * If zero is returned, means there are no outstanding pending
256 * completions and we may dismiss the pending list.
257 */
258 __bnx2x_exe_queue_reset_pending(bp, o);
259
260 spin_unlock_bh(&o->lock);
261 return rc;
262}
263
264static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
265{
266 bool empty = list_empty(&o->exe_queue);
267
268 /* Don't reorder!!! */
269 mb();
270
271 return empty && list_empty(&o->pending_comp);
272}
273
274static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
275 struct bnx2x *bp)
276{
277 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
278 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
279}
280
281/************************ raw_obj functions ***********************************/
282static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
283{
284 return !!test_bit(o->state, o->pstate);
285}
286
287static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
288{
289 smp_mb__before_clear_bit();
290 clear_bit(o->state, o->pstate);
291 smp_mb__after_clear_bit();
292}
293
294static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
295{
296 smp_mb__before_clear_bit();
297 set_bit(o->state, o->pstate);
298 smp_mb__after_clear_bit();
299}
300
301/**
302 * bnx2x_state_wait - wait until the given bit(state) is cleared
303 *
304 * @bp: device handle
305 * @state: state which is to be cleared
306 * @state_p: state buffer
307 *
308 */
309static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
310 unsigned long *pstate)
311{
312 /* can take a while if any port is running */
313 int cnt = 5000;
314
315
316 if (CHIP_REV_IS_EMUL(bp))
317 cnt *= 20;
318
319 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
320
321 might_sleep();
322 while (cnt--) {
323 if (!test_bit(state, pstate)) {
324#ifdef BNX2X_STOP_ON_ERROR
325 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
326#endif
327 return 0;
328 }
329
330 usleep_range(1000, 1000);
331
332 if (bp->panic)
333 return -EIO;
334 }
335
336 /* timeout! */
337 BNX2X_ERR("timeout waiting for state %d\n", state);
338#ifdef BNX2X_STOP_ON_ERROR
339 bnx2x_panic();
340#endif
341
342 return -EBUSY;
343}
344
345static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
346{
347 return bnx2x_state_wait(bp, raw->state, raw->pstate);
348}
349
350/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
351/* credit handling callbacks */
352static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
353{
354 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
355
356 WARN_ON(!mp);
357
358 return mp->get_entry(mp, offset);
359}
360
361static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
362{
363 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
364
365 WARN_ON(!mp);
366
367 return mp->get(mp, 1);
368}
369
370static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
371{
372 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
373
374 WARN_ON(!vp);
375
376 return vp->get_entry(vp, offset);
377}
378
379static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
380{
381 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
382
383 WARN_ON(!vp);
384
385 return vp->get(vp, 1);
386}
387
388static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
389{
390 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
391 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
392
393 if (!mp->get(mp, 1))
394 return false;
395
396 if (!vp->get(vp, 1)) {
397 mp->put(mp, 1);
398 return false;
399 }
400
401 return true;
402}
403
404static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
405{
406 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
407
408 return mp->put_entry(mp, offset);
409}
410
411static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
412{
413 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
414
415 return mp->put(mp, 1);
416}
417
418static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
419{
420 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
421
422 return vp->put_entry(vp, offset);
423}
424
425static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
426{
427 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
428
429 return vp->put(vp, 1);
430}
431
432static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
433{
434 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
435 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
436
437 if (!mp->put(mp, 1))
438 return false;
439
440 if (!vp->put(vp, 1)) {
441 mp->get(mp, 1);
442 return false;
443 }
444
445 return true;
446}
447
Ariel Eliored5162a2011-12-05 21:52:24 +0000448static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
449 int n, u8 *buf)
450{
451 struct bnx2x_vlan_mac_registry_elem *pos;
452 u8 *next = buf;
453 int counter = 0;
454
455 /* traverse list */
456 list_for_each_entry(pos, &o->head, link) {
457 if (counter < n) {
458 /* place leading zeroes in buffer */
459 memset(next, 0, MAC_LEADING_ZERO_CNT);
460
461 /* place mac after leading zeroes*/
462 memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
463 ETH_ALEN);
464
465 /* calculate address of next element and
466 * advance counter
467 */
468 counter++;
469 next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
470
471 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
472 counter, next, pos->u.mac.mac);
473 }
474 }
475 return counter * ETH_ALEN;
476}
477
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300478/* check_add() callbacks */
479static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
480 union bnx2x_classification_ramrod_data *data)
481{
482 struct bnx2x_vlan_mac_registry_elem *pos;
483
484 if (!is_valid_ether_addr(data->mac.mac))
485 return -EINVAL;
486
487 /* Check if a requested MAC already exists */
488 list_for_each_entry(pos, &o->head, link)
489 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
490 return -EEXIST;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000491
492 return 0;
493}
494
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300495static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
496 union bnx2x_classification_ramrod_data *data)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000497{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300498 struct bnx2x_vlan_mac_registry_elem *pos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000499
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300500 list_for_each_entry(pos, &o->head, link)
501 if (data->vlan.vlan == pos->u.vlan.vlan)
502 return -EEXIST;
503
504 return 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000505}
506
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300507static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
508 union bnx2x_classification_ramrod_data *data)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000509{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300510 struct bnx2x_vlan_mac_registry_elem *pos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000511
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300512 list_for_each_entry(pos, &o->head, link)
513 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
514 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
515 ETH_ALEN)))
516 return -EEXIST;
517
518 return 0;
519}
520
521
522/* check_del() callbacks */
523static struct bnx2x_vlan_mac_registry_elem *
524 bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
525 union bnx2x_classification_ramrod_data *data)
526{
527 struct bnx2x_vlan_mac_registry_elem *pos;
528
529 list_for_each_entry(pos, &o->head, link)
530 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
531 return pos;
532
533 return NULL;
534}
535
536static struct bnx2x_vlan_mac_registry_elem *
537 bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
538 union bnx2x_classification_ramrod_data *data)
539{
540 struct bnx2x_vlan_mac_registry_elem *pos;
541
542 list_for_each_entry(pos, &o->head, link)
543 if (data->vlan.vlan == pos->u.vlan.vlan)
544 return pos;
545
546 return NULL;
547}
548
549static struct bnx2x_vlan_mac_registry_elem *
550 bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
551 union bnx2x_classification_ramrod_data *data)
552{
553 struct bnx2x_vlan_mac_registry_elem *pos;
554
555 list_for_each_entry(pos, &o->head, link)
556 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
557 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
558 ETH_ALEN)))
559 return pos;
560
561 return NULL;
562}
563
564/* check_move() callback */
565static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
566 struct bnx2x_vlan_mac_obj *dst_o,
567 union bnx2x_classification_ramrod_data *data)
568{
569 struct bnx2x_vlan_mac_registry_elem *pos;
570 int rc;
571
572 /* Check if we can delete the requested configuration from the first
573 * object.
574 */
575 pos = src_o->check_del(src_o, data);
576
577 /* check if configuration can be added */
578 rc = dst_o->check_add(dst_o, data);
579
580 /* If this classification can not be added (is already set)
581 * or can't be deleted - return an error.
582 */
583 if (rc || !pos)
584 return false;
585
586 return true;
587}
588
589static bool bnx2x_check_move_always_err(
590 struct bnx2x_vlan_mac_obj *src_o,
591 struct bnx2x_vlan_mac_obj *dst_o,
592 union bnx2x_classification_ramrod_data *data)
593{
594 return false;
595}
596
597
598static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
599{
600 struct bnx2x_raw_obj *raw = &o->raw;
601 u8 rx_tx_flag = 0;
602
603 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
604 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
605 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
606
607 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
608 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
609 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
610
611 return rx_tx_flag;
612}
613
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300614
615static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
616 bool add, unsigned char *dev_addr, int index)
617{
618 u32 wb_data[2];
619 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
620 NIG_REG_LLH0_FUNC_MEM;
621
Yuval Mintz0a52fd02012-03-12 08:53:07 +0000622 if (!IS_MF_SI(bp) || index > BNX2X_LLH_CAM_MAX_PF_LINE)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300623 return;
624
625 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
626 (add ? "ADD" : "DELETE"), index);
627
628 if (add) {
629 /* LLH_FUNC_MEM is a u64 WB register */
630 reg_offset += 8*index;
631
632 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
633 (dev_addr[4] << 8) | dev_addr[5]);
634 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
635
636 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
637 }
638
639 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
640 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
641}
642
643/**
644 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
645 *
646 * @bp: device handle
647 * @o: queue for which we want to configure this rule
648 * @add: if true the command is an ADD command, DEL otherwise
649 * @opcode: CLASSIFY_RULE_OPCODE_XXX
650 * @hdr: pointer to a header to setup
651 *
652 */
653static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
654 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
655 struct eth_classify_cmd_header *hdr)
656{
657 struct bnx2x_raw_obj *raw = &o->raw;
658
659 hdr->client_id = raw->cl_id;
660 hdr->func_id = raw->func_id;
661
662 /* Rx or/and Tx (internal switching) configuration ? */
663 hdr->cmd_general_data |=
664 bnx2x_vlan_mac_get_rx_tx_flag(o);
665
666 if (add)
667 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
668
669 hdr->cmd_general_data |=
670 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
671}
672
673/**
674 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
675 *
676 * @cid: connection id
677 * @type: BNX2X_FILTER_XXX_PENDING
678 * @hdr: poiter to header to setup
679 * @rule_cnt:
680 *
681 * currently we always configure one rule and echo field to contain a CID and an
682 * opcode type.
683 */
684static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
685 struct eth_classify_header *hdr, int rule_cnt)
686{
687 hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
688 hdr->rule_cnt = (u8)rule_cnt;
689}
690
691
692/* hw_config() callbacks */
693static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
694 struct bnx2x_vlan_mac_obj *o,
695 struct bnx2x_exeq_elem *elem, int rule_idx,
696 int cam_offset)
697{
698 struct bnx2x_raw_obj *raw = &o->raw;
699 struct eth_classify_rules_ramrod_data *data =
700 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
701 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
702 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
703 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
704 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
705 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
706
707 /*
708 * Set LLH CAM entry: currently only iSCSI and ETH macs are
709 * relevant. In addition, current implementation is tuned for a
710 * single ETH MAC.
711 *
712 * When multiple unicast ETH MACs PF configuration in switch
713 * independent mode is required (NetQ, multiple netdev MACs,
714 * etc.), consider better utilisation of 8 per function MAC
715 * entries in the LLH register. There is also
716 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
717 * total number of CAM entries to 16.
718 *
719 * Currently we won't configure NIG for MACs other than a primary ETH
720 * MAC and iSCSI L2 MAC.
721 *
722 * If this MAC is moving from one Queue to another, no need to change
723 * NIG configuration.
724 */
725 if (cmd != BNX2X_VLAN_MAC_MOVE) {
726 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
727 bnx2x_set_mac_in_nig(bp, add, mac,
Yuval Mintz0a52fd02012-03-12 08:53:07 +0000728 BNX2X_LLH_CAM_ISCSI_ETH_LINE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300729 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
Yuval Mintz0a52fd02012-03-12 08:53:07 +0000730 bnx2x_set_mac_in_nig(bp, add, mac,
731 BNX2X_LLH_CAM_ETH_LINE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300732 }
733
734 /* Reset the ramrod data buffer for the first rule */
735 if (rule_idx == 0)
736 memset(data, 0, sizeof(*data));
737
738 /* Setup a command header */
739 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
740 &rule_entry->mac.header);
741
Joe Perches0f9dad12011-08-14 12:16:19 +0000742 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
743 add ? "add" : "delete", mac, raw->cl_id);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300744
745 /* Set a MAC itself */
746 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
747 &rule_entry->mac.mac_mid,
748 &rule_entry->mac.mac_lsb, mac);
749
750 /* MOVE: Add a rule that will add this MAC to the target Queue */
751 if (cmd == BNX2X_VLAN_MAC_MOVE) {
752 rule_entry++;
753 rule_cnt++;
754
755 /* Setup ramrod data */
756 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
757 elem->cmd_data.vlan_mac.target_obj,
758 true, CLASSIFY_RULE_OPCODE_MAC,
759 &rule_entry->mac.header);
760
761 /* Set a MAC itself */
762 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
763 &rule_entry->mac.mac_mid,
764 &rule_entry->mac.mac_lsb, mac);
765 }
766
767 /* Set the ramrod data header */
768 /* TODO: take this to the higher level in order to prevent multiple
769 writing */
770 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
771 rule_cnt);
772}
773
774/**
775 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
776 *
777 * @bp: device handle
778 * @o: queue
779 * @type:
780 * @cam_offset: offset in cam memory
781 * @hdr: pointer to a header to setup
782 *
783 * E1/E1H
784 */
785static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
786 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
787 struct mac_configuration_hdr *hdr)
788{
789 struct bnx2x_raw_obj *r = &o->raw;
790
791 hdr->length = 1;
792 hdr->offset = (u8)cam_offset;
793 hdr->client_id = 0xff;
794 hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
795}
796
797static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
798 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
799 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
800{
801 struct bnx2x_raw_obj *r = &o->raw;
802 u32 cl_bit_vec = (1 << r->cl_id);
803
804 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
805 cfg_entry->pf_id = r->func_id;
806 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
807
808 if (add) {
809 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
810 T_ETH_MAC_COMMAND_SET);
811 SET_FLAG(cfg_entry->flags,
812 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
813
814 /* Set a MAC in a ramrod data */
815 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
816 &cfg_entry->middle_mac_addr,
817 &cfg_entry->lsb_mac_addr, mac);
818 } else
819 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
820 T_ETH_MAC_COMMAND_INVALIDATE);
821}
822
823static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
824 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
825 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
826{
827 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
828 struct bnx2x_raw_obj *raw = &o->raw;
829
830 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
831 &config->hdr);
832 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
833 cfg_entry);
834
Joe Perches0f9dad12011-08-14 12:16:19 +0000835 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
836 add ? "setting" : "clearing",
837 mac, raw->cl_id, cam_offset);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300838}
839
840/**
841 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
842 *
843 * @bp: device handle
844 * @o: bnx2x_vlan_mac_obj
845 * @elem: bnx2x_exeq_elem
846 * @rule_idx: rule_idx
847 * @cam_offset: cam_offset
848 */
849static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
850 struct bnx2x_vlan_mac_obj *o,
851 struct bnx2x_exeq_elem *elem, int rule_idx,
852 int cam_offset)
853{
854 struct bnx2x_raw_obj *raw = &o->raw;
855 struct mac_configuration_cmd *config =
856 (struct mac_configuration_cmd *)(raw->rdata);
857 /*
858 * 57710 and 57711 do not support MOVE command,
859 * so it's either ADD or DEL
860 */
861 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
862 true : false;
863
864 /* Reset the ramrod data buffer */
865 memset(config, 0, sizeof(*config));
866
867 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
868 cam_offset, add,
869 elem->cmd_data.vlan_mac.u.mac.mac, 0,
870 ETH_VLAN_FILTER_ANY_VLAN, config);
871}
872
873static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
874 struct bnx2x_vlan_mac_obj *o,
875 struct bnx2x_exeq_elem *elem, int rule_idx,
876 int cam_offset)
877{
878 struct bnx2x_raw_obj *raw = &o->raw;
879 struct eth_classify_rules_ramrod_data *data =
880 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
881 int rule_cnt = rule_idx + 1;
882 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
883 int cmd = elem->cmd_data.vlan_mac.cmd;
884 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
885 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
886
887 /* Reset the ramrod data buffer for the first rule */
888 if (rule_idx == 0)
889 memset(data, 0, sizeof(*data));
890
891 /* Set a rule header */
892 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
893 &rule_entry->vlan.header);
894
895 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
896 vlan);
897
898 /* Set a VLAN itself */
899 rule_entry->vlan.vlan = cpu_to_le16(vlan);
900
901 /* MOVE: Add a rule that will add this MAC to the target Queue */
902 if (cmd == BNX2X_VLAN_MAC_MOVE) {
903 rule_entry++;
904 rule_cnt++;
905
906 /* Setup ramrod data */
907 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
908 elem->cmd_data.vlan_mac.target_obj,
909 true, CLASSIFY_RULE_OPCODE_VLAN,
910 &rule_entry->vlan.header);
911
912 /* Set a VLAN itself */
913 rule_entry->vlan.vlan = cpu_to_le16(vlan);
914 }
915
916 /* Set the ramrod data header */
917 /* TODO: take this to the higher level in order to prevent multiple
918 writing */
919 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
920 rule_cnt);
921}
922
923static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
924 struct bnx2x_vlan_mac_obj *o,
925 struct bnx2x_exeq_elem *elem,
926 int rule_idx, int cam_offset)
927{
928 struct bnx2x_raw_obj *raw = &o->raw;
929 struct eth_classify_rules_ramrod_data *data =
930 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
931 int rule_cnt = rule_idx + 1;
932 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
933 int cmd = elem->cmd_data.vlan_mac.cmd;
934 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
935 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
936 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
937
938
939 /* Reset the ramrod data buffer for the first rule */
940 if (rule_idx == 0)
941 memset(data, 0, sizeof(*data));
942
943 /* Set a rule header */
944 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
945 &rule_entry->pair.header);
946
947 /* Set VLAN and MAC themselvs */
948 rule_entry->pair.vlan = cpu_to_le16(vlan);
949 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
950 &rule_entry->pair.mac_mid,
951 &rule_entry->pair.mac_lsb, mac);
952
953 /* MOVE: Add a rule that will add this MAC to the target Queue */
954 if (cmd == BNX2X_VLAN_MAC_MOVE) {
955 rule_entry++;
956 rule_cnt++;
957
958 /* Setup ramrod data */
959 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
960 elem->cmd_data.vlan_mac.target_obj,
961 true, CLASSIFY_RULE_OPCODE_PAIR,
962 &rule_entry->pair.header);
963
964 /* Set a VLAN itself */
965 rule_entry->pair.vlan = cpu_to_le16(vlan);
966 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
967 &rule_entry->pair.mac_mid,
968 &rule_entry->pair.mac_lsb, mac);
969 }
970
971 /* Set the ramrod data header */
972 /* TODO: take this to the higher level in order to prevent multiple
973 writing */
974 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
975 rule_cnt);
976}
977
978/**
979 * bnx2x_set_one_vlan_mac_e1h -
980 *
981 * @bp: device handle
982 * @o: bnx2x_vlan_mac_obj
983 * @elem: bnx2x_exeq_elem
984 * @rule_idx: rule_idx
985 * @cam_offset: cam_offset
986 */
987static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
988 struct bnx2x_vlan_mac_obj *o,
989 struct bnx2x_exeq_elem *elem,
990 int rule_idx, int cam_offset)
991{
992 struct bnx2x_raw_obj *raw = &o->raw;
993 struct mac_configuration_cmd *config =
994 (struct mac_configuration_cmd *)(raw->rdata);
995 /*
996 * 57710 and 57711 do not support MOVE command,
997 * so it's either ADD or DEL
998 */
999 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1000 true : false;
1001
1002 /* Reset the ramrod data buffer */
1003 memset(config, 0, sizeof(*config));
1004
1005 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1006 cam_offset, add,
1007 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1008 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1009 ETH_VLAN_FILTER_CLASSIFY, config);
1010}
1011
1012#define list_next_entry(pos, member) \
1013 list_entry((pos)->member.next, typeof(*(pos)), member)
1014
1015/**
1016 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1017 *
1018 * @bp: device handle
1019 * @p: command parameters
1020 * @ppos: pointer to the cooky
1021 *
1022 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1023 * previously configured elements list.
1024 *
1025 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1026 * into an account
1027 *
1028 * pointer to the cooky - that should be given back in the next call to make
1029 * function handle the next element. If *ppos is set to NULL it will restart the
1030 * iterator. If returned *ppos == NULL this means that the last element has been
1031 * handled.
1032 *
1033 */
1034static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1035 struct bnx2x_vlan_mac_ramrod_params *p,
1036 struct bnx2x_vlan_mac_registry_elem **ppos)
1037{
1038 struct bnx2x_vlan_mac_registry_elem *pos;
1039 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1040
1041 /* If list is empty - there is nothing to do here */
1042 if (list_empty(&o->head)) {
1043 *ppos = NULL;
1044 return 0;
1045 }
1046
1047 /* make a step... */
1048 if (*ppos == NULL)
1049 *ppos = list_first_entry(&o->head,
1050 struct bnx2x_vlan_mac_registry_elem,
1051 link);
1052 else
1053 *ppos = list_next_entry(*ppos, link);
1054
1055 pos = *ppos;
1056
1057 /* If it's the last step - return NULL */
1058 if (list_is_last(&pos->link, &o->head))
1059 *ppos = NULL;
1060
1061 /* Prepare a 'user_req' */
1062 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1063
1064 /* Set the command */
1065 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1066
1067 /* Set vlan_mac_flags */
1068 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1069
1070 /* Set a restore bit */
1071 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1072
1073 return bnx2x_config_vlan_mac(bp, p);
1074}
1075
1076/*
1077 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1078 * pointer to an element with a specific criteria and NULL if such an element
1079 * hasn't been found.
1080 */
1081static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1082 struct bnx2x_exe_queue_obj *o,
1083 struct bnx2x_exeq_elem *elem)
1084{
1085 struct bnx2x_exeq_elem *pos;
1086 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1087
1088 /* Check pending for execution commands */
1089 list_for_each_entry(pos, &o->exe_queue, link)
1090 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1091 sizeof(*data)) &&
1092 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1093 return pos;
1094
1095 return NULL;
1096}
1097
1098static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1099 struct bnx2x_exe_queue_obj *o,
1100 struct bnx2x_exeq_elem *elem)
1101{
1102 struct bnx2x_exeq_elem *pos;
1103 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1104
1105 /* Check pending for execution commands */
1106 list_for_each_entry(pos, &o->exe_queue, link)
1107 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1108 sizeof(*data)) &&
1109 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1110 return pos;
1111
1112 return NULL;
1113}
1114
1115static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1116 struct bnx2x_exe_queue_obj *o,
1117 struct bnx2x_exeq_elem *elem)
1118{
1119 struct bnx2x_exeq_elem *pos;
1120 struct bnx2x_vlan_mac_ramrod_data *data =
1121 &elem->cmd_data.vlan_mac.u.vlan_mac;
1122
1123 /* Check pending for execution commands */
1124 list_for_each_entry(pos, &o->exe_queue, link)
1125 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1126 sizeof(*data)) &&
1127 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1128 return pos;
1129
1130 return NULL;
1131}
1132
1133/**
1134 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1135 *
1136 * @bp: device handle
1137 * @qo: bnx2x_qable_obj
1138 * @elem: bnx2x_exeq_elem
1139 *
1140 * Checks that the requested configuration can be added. If yes and if
1141 * requested, consume CAM credit.
1142 *
1143 * The 'validate' is run after the 'optimize'.
1144 *
1145 */
1146static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1147 union bnx2x_qable_obj *qo,
1148 struct bnx2x_exeq_elem *elem)
1149{
1150 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1151 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1152 int rc;
1153
1154 /* Check the registry */
1155 rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
1156 if (rc) {
1157 DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
1158 "current registry state\n");
1159 return rc;
1160 }
1161
1162 /*
1163 * Check if there is a pending ADD command for this
1164 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1165 */
1166 if (exeq->get(exeq, elem)) {
1167 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1168 return -EEXIST;
1169 }
1170
1171 /*
1172 * TODO: Check the pending MOVE from other objects where this
1173 * object is a destination object.
1174 */
1175
1176 /* Consume the credit if not requested not to */
1177 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1178 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1179 o->get_credit(o)))
1180 return -EINVAL;
1181
1182 return 0;
1183}
1184
1185/**
1186 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1187 *
1188 * @bp: device handle
1189 * @qo: quable object to check
1190 * @elem: element that needs to be deleted
1191 *
1192 * Checks that the requested configuration can be deleted. If yes and if
1193 * requested, returns a CAM credit.
1194 *
1195 * The 'validate' is run after the 'optimize'.
1196 */
1197static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1198 union bnx2x_qable_obj *qo,
1199 struct bnx2x_exeq_elem *elem)
1200{
1201 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1202 struct bnx2x_vlan_mac_registry_elem *pos;
1203 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1204 struct bnx2x_exeq_elem query_elem;
1205
1206 /* If this classification can not be deleted (doesn't exist)
1207 * - return a BNX2X_EXIST.
1208 */
1209 pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1210 if (!pos) {
1211 DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
1212 "current registry state\n");
1213 return -EEXIST;
1214 }
1215
1216 /*
1217 * Check if there are pending DEL or MOVE commands for this
1218 * MAC/VLAN/VLAN-MAC. Return an error if so.
1219 */
1220 memcpy(&query_elem, elem, sizeof(query_elem));
1221
1222 /* Check for MOVE commands */
1223 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1224 if (exeq->get(exeq, &query_elem)) {
1225 BNX2X_ERR("There is a pending MOVE command already\n");
1226 return -EINVAL;
1227 }
1228
1229 /* Check for DEL commands */
1230 if (exeq->get(exeq, elem)) {
1231 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1232 return -EEXIST;
1233 }
1234
1235 /* Return the credit to the credit pool if not requested not to */
1236 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1237 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1238 o->put_credit(o))) {
1239 BNX2X_ERR("Failed to return a credit\n");
1240 return -EINVAL;
1241 }
1242
1243 return 0;
1244}
1245
1246/**
1247 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1248 *
1249 * @bp: device handle
1250 * @qo: quable object to check (source)
1251 * @elem: element that needs to be moved
1252 *
1253 * Checks that the requested configuration can be moved. If yes and if
1254 * requested, returns a CAM credit.
1255 *
1256 * The 'validate' is run after the 'optimize'.
1257 */
1258static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1259 union bnx2x_qable_obj *qo,
1260 struct bnx2x_exeq_elem *elem)
1261{
1262 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1263 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1264 struct bnx2x_exeq_elem query_elem;
1265 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1266 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1267
1268 /*
1269 * Check if we can perform this operation based on the current registry
1270 * state.
1271 */
1272 if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1273 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
1274 "current registry state\n");
1275 return -EINVAL;
1276 }
1277
1278 /*
1279 * Check if there is an already pending DEL or MOVE command for the
1280 * source object or ADD command for a destination object. Return an
1281 * error if so.
1282 */
1283 memcpy(&query_elem, elem, sizeof(query_elem));
1284
1285 /* Check DEL on source */
1286 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1287 if (src_exeq->get(src_exeq, &query_elem)) {
1288 BNX2X_ERR("There is a pending DEL command on the source "
1289 "queue already\n");
1290 return -EINVAL;
1291 }
1292
1293 /* Check MOVE on source */
1294 if (src_exeq->get(src_exeq, elem)) {
1295 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1296 return -EEXIST;
1297 }
1298
1299 /* Check ADD on destination */
1300 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1301 if (dest_exeq->get(dest_exeq, &query_elem)) {
1302 BNX2X_ERR("There is a pending ADD command on the "
1303 "destination queue already\n");
1304 return -EINVAL;
1305 }
1306
1307 /* Consume the credit if not requested not to */
1308 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1309 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1310 dest_o->get_credit(dest_o)))
1311 return -EINVAL;
1312
1313 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1314 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1315 src_o->put_credit(src_o))) {
1316 /* return the credit taken from dest... */
1317 dest_o->put_credit(dest_o);
1318 return -EINVAL;
1319 }
1320
1321 return 0;
1322}
1323
1324static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1325 union bnx2x_qable_obj *qo,
1326 struct bnx2x_exeq_elem *elem)
1327{
1328 switch (elem->cmd_data.vlan_mac.cmd) {
1329 case BNX2X_VLAN_MAC_ADD:
1330 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1331 case BNX2X_VLAN_MAC_DEL:
1332 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1333 case BNX2X_VLAN_MAC_MOVE:
1334 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1335 default:
1336 return -EINVAL;
1337 }
1338}
1339
Yuval Mintz460a25c2012-01-23 07:31:51 +00001340static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1341 union bnx2x_qable_obj *qo,
1342 struct bnx2x_exeq_elem *elem)
1343{
1344 int rc = 0;
1345
1346 /* If consumption wasn't required, nothing to do */
1347 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1348 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1349 return 0;
1350
1351 switch (elem->cmd_data.vlan_mac.cmd) {
1352 case BNX2X_VLAN_MAC_ADD:
1353 case BNX2X_VLAN_MAC_MOVE:
1354 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1355 break;
1356 case BNX2X_VLAN_MAC_DEL:
1357 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1358 break;
1359 default:
1360 return -EINVAL;
1361 }
1362
1363 if (rc != true)
1364 return -EINVAL;
1365
1366 return 0;
1367}
1368
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001369/**
1370 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1371 *
1372 * @bp: device handle
1373 * @o: bnx2x_vlan_mac_obj
1374 *
1375 */
1376static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1377 struct bnx2x_vlan_mac_obj *o)
1378{
1379 int cnt = 5000, rc;
1380 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1381 struct bnx2x_raw_obj *raw = &o->raw;
1382
1383 while (cnt--) {
1384 /* Wait for the current command to complete */
1385 rc = raw->wait_comp(bp, raw);
1386 if (rc)
1387 return rc;
1388
1389 /* Wait until there are no pending commands */
1390 if (!bnx2x_exe_queue_empty(exeq))
1391 usleep_range(1000, 1000);
1392 else
1393 return 0;
1394 }
1395
1396 return -EBUSY;
1397}
1398
1399/**
1400 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1401 *
1402 * @bp: device handle
1403 * @o: bnx2x_vlan_mac_obj
1404 * @cqe:
1405 * @cont: if true schedule next execution chunk
1406 *
1407 */
1408static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1409 struct bnx2x_vlan_mac_obj *o,
1410 union event_ring_elem *cqe,
1411 unsigned long *ramrod_flags)
1412{
1413 struct bnx2x_raw_obj *r = &o->raw;
1414 int rc;
1415
1416 /* Reset pending list */
1417 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1418
1419 /* Clear pending */
1420 r->clear_pending(r);
1421
1422 /* If ramrod failed this is most likely a SW bug */
1423 if (cqe->message.error)
1424 return -EINVAL;
1425
1426 /* Run the next bulk of pending commands if requeted */
1427 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1428 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1429 if (rc < 0)
1430 return rc;
1431 }
1432
1433 /* If there is more work to do return PENDING */
1434 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1435 return 1;
1436
1437 return 0;
1438}
1439
1440/**
1441 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1442 *
1443 * @bp: device handle
1444 * @o: bnx2x_qable_obj
1445 * @elem: bnx2x_exeq_elem
1446 */
1447static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1448 union bnx2x_qable_obj *qo,
1449 struct bnx2x_exeq_elem *elem)
1450{
1451 struct bnx2x_exeq_elem query, *pos;
1452 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1453 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1454
1455 memcpy(&query, elem, sizeof(query));
1456
1457 switch (elem->cmd_data.vlan_mac.cmd) {
1458 case BNX2X_VLAN_MAC_ADD:
1459 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1460 break;
1461 case BNX2X_VLAN_MAC_DEL:
1462 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1463 break;
1464 default:
1465 /* Don't handle anything other than ADD or DEL */
1466 return 0;
1467 }
1468
1469 /* If we found the appropriate element - delete it */
1470 pos = exeq->get(exeq, &query);
1471 if (pos) {
1472
1473 /* Return the credit of the optimized command */
1474 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1475 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1476 if ((query.cmd_data.vlan_mac.cmd ==
1477 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1478 BNX2X_ERR("Failed to return the credit for the "
1479 "optimized ADD command\n");
1480 return -EINVAL;
1481 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1482 BNX2X_ERR("Failed to recover the credit from "
1483 "the optimized DEL command\n");
1484 return -EINVAL;
1485 }
1486 }
1487
1488 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1489 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1490 "ADD" : "DEL");
1491
1492 list_del(&pos->link);
1493 bnx2x_exe_queue_free_elem(bp, pos);
1494 return 1;
1495 }
1496
1497 return 0;
1498}
1499
1500/**
1501 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1502 *
1503 * @bp: device handle
1504 * @o:
1505 * @elem:
1506 * @restore:
1507 * @re:
1508 *
1509 * prepare a registry element according to the current command request.
1510 */
1511static inline int bnx2x_vlan_mac_get_registry_elem(
1512 struct bnx2x *bp,
1513 struct bnx2x_vlan_mac_obj *o,
1514 struct bnx2x_exeq_elem *elem,
1515 bool restore,
1516 struct bnx2x_vlan_mac_registry_elem **re)
1517{
1518 int cmd = elem->cmd_data.vlan_mac.cmd;
1519 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1520
1521 /* Allocate a new registry element if needed. */
1522 if (!restore &&
1523 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1524 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1525 if (!reg_elem)
1526 return -ENOMEM;
1527
1528 /* Get a new CAM offset */
1529 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1530 /*
1531 * This shell never happen, because we have checked the
1532 * CAM availiability in the 'validate'.
1533 */
1534 WARN_ON(1);
1535 kfree(reg_elem);
1536 return -EINVAL;
1537 }
1538
1539 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1540
1541 /* Set a VLAN-MAC data */
1542 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1543 sizeof(reg_elem->u));
1544
1545 /* Copy the flags (needed for DEL and RESTORE flows) */
1546 reg_elem->vlan_mac_flags =
1547 elem->cmd_data.vlan_mac.vlan_mac_flags;
1548 } else /* DEL, RESTORE */
1549 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1550
1551 *re = reg_elem;
1552 return 0;
1553}
1554
1555/**
1556 * bnx2x_execute_vlan_mac - execute vlan mac command
1557 *
1558 * @bp: device handle
1559 * @qo:
1560 * @exe_chunk:
1561 * @ramrod_flags:
1562 *
1563 * go and send a ramrod!
1564 */
1565static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1566 union bnx2x_qable_obj *qo,
1567 struct list_head *exe_chunk,
1568 unsigned long *ramrod_flags)
1569{
1570 struct bnx2x_exeq_elem *elem;
1571 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1572 struct bnx2x_raw_obj *r = &o->raw;
1573 int rc, idx = 0;
1574 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1575 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1576 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1577 int cmd;
1578
1579 /*
1580 * If DRIVER_ONLY execution is requested, cleanup a registry
1581 * and exit. Otherwise send a ramrod to FW.
1582 */
1583 if (!drv_only) {
1584 WARN_ON(r->check_pending(r));
1585
1586 /* Set pending */
1587 r->set_pending(r);
1588
1589 /* Fill tha ramrod data */
1590 list_for_each_entry(elem, exe_chunk, link) {
1591 cmd = elem->cmd_data.vlan_mac.cmd;
1592 /*
1593 * We will add to the target object in MOVE command, so
1594 * change the object for a CAM search.
1595 */
1596 if (cmd == BNX2X_VLAN_MAC_MOVE)
1597 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1598 else
1599 cam_obj = o;
1600
1601 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1602 elem, restore,
1603 &reg_elem);
1604 if (rc)
1605 goto error_exit;
1606
1607 WARN_ON(!reg_elem);
1608
1609 /* Push a new entry into the registry */
1610 if (!restore &&
1611 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1612 (cmd == BNX2X_VLAN_MAC_MOVE)))
1613 list_add(&reg_elem->link, &cam_obj->head);
1614
1615 /* Configure a single command in a ramrod data buffer */
1616 o->set_one_rule(bp, o, elem, idx,
1617 reg_elem->cam_offset);
1618
1619 /* MOVE command consumes 2 entries in the ramrod data */
1620 if (cmd == BNX2X_VLAN_MAC_MOVE)
1621 idx += 2;
1622 else
1623 idx++;
1624 }
1625
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00001626 /*
1627 * No need for an explicit memory barrier here as long we would
1628 * need to ensure the ordering of writing to the SPQ element
1629 * and updating of the SPQ producer which involves a memory
1630 * read and we will have to put a full memory barrier there
1631 * (inside bnx2x_sp_post()).
1632 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001633
1634 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1635 U64_HI(r->rdata_mapping),
1636 U64_LO(r->rdata_mapping),
1637 ETH_CONNECTION_TYPE);
1638 if (rc)
1639 goto error_exit;
1640 }
1641
1642 /* Now, when we are done with the ramrod - clean up the registry */
1643 list_for_each_entry(elem, exe_chunk, link) {
1644 cmd = elem->cmd_data.vlan_mac.cmd;
1645 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1646 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1647 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1648
1649 WARN_ON(!reg_elem);
1650
1651 o->put_cam_offset(o, reg_elem->cam_offset);
1652 list_del(&reg_elem->link);
1653 kfree(reg_elem);
1654 }
1655 }
1656
1657 if (!drv_only)
1658 return 1;
1659 else
1660 return 0;
1661
1662error_exit:
1663 r->clear_pending(r);
1664
1665 /* Cleanup a registry in case of a failure */
1666 list_for_each_entry(elem, exe_chunk, link) {
1667 cmd = elem->cmd_data.vlan_mac.cmd;
1668
1669 if (cmd == BNX2X_VLAN_MAC_MOVE)
1670 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1671 else
1672 cam_obj = o;
1673
1674 /* Delete all newly added above entries */
1675 if (!restore &&
1676 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1677 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1678 reg_elem = o->check_del(cam_obj,
1679 &elem->cmd_data.vlan_mac.u);
1680 if (reg_elem) {
1681 list_del(&reg_elem->link);
1682 kfree(reg_elem);
1683 }
1684 }
1685 }
1686
1687 return rc;
1688}
1689
1690static inline int bnx2x_vlan_mac_push_new_cmd(
1691 struct bnx2x *bp,
1692 struct bnx2x_vlan_mac_ramrod_params *p)
1693{
1694 struct bnx2x_exeq_elem *elem;
1695 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1696 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1697
1698 /* Allocate the execution queue element */
1699 elem = bnx2x_exe_queue_alloc_elem(bp);
1700 if (!elem)
1701 return -ENOMEM;
1702
1703 /* Set the command 'length' */
1704 switch (p->user_req.cmd) {
1705 case BNX2X_VLAN_MAC_MOVE:
1706 elem->cmd_len = 2;
1707 break;
1708 default:
1709 elem->cmd_len = 1;
1710 }
1711
1712 /* Fill the object specific info */
1713 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1714
1715 /* Try to add a new command to the pending list */
1716 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1717}
1718
1719/**
1720 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1721 *
1722 * @bp: device handle
1723 * @p:
1724 *
1725 */
1726int bnx2x_config_vlan_mac(
1727 struct bnx2x *bp,
1728 struct bnx2x_vlan_mac_ramrod_params *p)
1729{
1730 int rc = 0;
1731 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1732 unsigned long *ramrod_flags = &p->ramrod_flags;
1733 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1734 struct bnx2x_raw_obj *raw = &o->raw;
1735
1736 /*
1737 * Add new elements to the execution list for commands that require it.
1738 */
1739 if (!cont) {
1740 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1741 if (rc)
1742 return rc;
1743 }
1744
1745 /*
1746 * If nothing will be executed further in this iteration we want to
1747 * return PENDING if there are pending commands
1748 */
1749 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1750 rc = 1;
1751
Vladislav Zolotarov79616892011-07-21 07:58:54 +00001752 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1753 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
1754 "clearing a pending bit.\n");
1755 raw->clear_pending(raw);
1756 }
1757
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001758 /* Execute commands if required */
1759 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1760 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1761 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1762 if (rc < 0)
1763 return rc;
1764 }
1765
1766 /*
1767 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1768 * then user want to wait until the last command is done.
1769 */
1770 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1771 /*
1772 * Wait maximum for the current exe_queue length iterations plus
1773 * one (for the current pending command).
1774 */
1775 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1776
1777 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1778 max_iterations--) {
1779
1780 /* Wait for the current command to complete */
1781 rc = raw->wait_comp(bp, raw);
1782 if (rc)
1783 return rc;
1784
1785 /* Make a next step */
1786 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1787 ramrod_flags);
1788 if (rc < 0)
1789 return rc;
1790 }
1791
1792 return 0;
1793 }
1794
1795 return rc;
1796}
1797
1798
1799
1800/**
1801 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1802 *
1803 * @bp: device handle
1804 * @o:
1805 * @vlan_mac_flags:
1806 * @ramrod_flags: execution flags to be used for this deletion
1807 *
1808 * if the last operation has completed successfully and there are no
1809 * moreelements left, positive value if the last operation has completed
1810 * successfully and there are more previously configured elements, negative
1811 * value is current operation has failed.
1812 */
1813static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1814 struct bnx2x_vlan_mac_obj *o,
1815 unsigned long *vlan_mac_flags,
1816 unsigned long *ramrod_flags)
1817{
1818 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1819 int rc = 0;
1820 struct bnx2x_vlan_mac_ramrod_params p;
1821 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1822 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1823
1824 /* Clear pending commands first */
1825
1826 spin_lock_bh(&exeq->lock);
1827
1828 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1829 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
Yuval Mintz460a25c2012-01-23 07:31:51 +00001830 *vlan_mac_flags) {
1831 rc = exeq->remove(bp, exeq->owner, exeq_pos);
1832 if (rc) {
1833 BNX2X_ERR("Failed to remove command\n");
Dan Carpentera44acd52012-01-24 21:59:31 +00001834 spin_unlock_bh(&exeq->lock);
Yuval Mintz460a25c2012-01-23 07:31:51 +00001835 return rc;
1836 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001837 list_del(&exeq_pos->link);
Yuval Mintz460a25c2012-01-23 07:31:51 +00001838 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001839 }
1840
1841 spin_unlock_bh(&exeq->lock);
1842
1843 /* Prepare a command request */
1844 memset(&p, 0, sizeof(p));
1845 p.vlan_mac_obj = o;
1846 p.ramrod_flags = *ramrod_flags;
1847 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1848
1849 /*
1850 * Add all but the last VLAN-MAC to the execution queue without actually
1851 * execution anything.
1852 */
1853 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1854 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1855 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1856
1857 list_for_each_entry(pos, &o->head, link) {
1858 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1859 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1860 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1861 rc = bnx2x_config_vlan_mac(bp, &p);
1862 if (rc < 0) {
1863 BNX2X_ERR("Failed to add a new DEL command\n");
1864 return rc;
1865 }
1866 }
1867 }
1868
1869 p.ramrod_flags = *ramrod_flags;
1870 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1871
1872 return bnx2x_config_vlan_mac(bp, &p);
1873}
1874
1875static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1876 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1877 unsigned long *pstate, bnx2x_obj_type type)
1878{
1879 raw->func_id = func_id;
1880 raw->cid = cid;
1881 raw->cl_id = cl_id;
1882 raw->rdata = rdata;
1883 raw->rdata_mapping = rdata_mapping;
1884 raw->state = state;
1885 raw->pstate = pstate;
1886 raw->obj_type = type;
1887 raw->check_pending = bnx2x_raw_check_pending;
1888 raw->clear_pending = bnx2x_raw_clear_pending;
1889 raw->set_pending = bnx2x_raw_set_pending;
1890 raw->wait_comp = bnx2x_raw_wait;
1891}
1892
1893static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1894 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1895 int state, unsigned long *pstate, bnx2x_obj_type type,
1896 struct bnx2x_credit_pool_obj *macs_pool,
1897 struct bnx2x_credit_pool_obj *vlans_pool)
1898{
1899 INIT_LIST_HEAD(&o->head);
1900
1901 o->macs_pool = macs_pool;
1902 o->vlans_pool = vlans_pool;
1903
1904 o->delete_all = bnx2x_vlan_mac_del_all;
1905 o->restore = bnx2x_vlan_mac_restore;
1906 o->complete = bnx2x_complete_vlan_mac;
1907 o->wait = bnx2x_wait_vlan_mac;
1908
1909 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1910 state, pstate, type);
1911}
1912
1913
1914void bnx2x_init_mac_obj(struct bnx2x *bp,
1915 struct bnx2x_vlan_mac_obj *mac_obj,
1916 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1917 dma_addr_t rdata_mapping, int state,
1918 unsigned long *pstate, bnx2x_obj_type type,
1919 struct bnx2x_credit_pool_obj *macs_pool)
1920{
1921 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1922
1923 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1924 rdata_mapping, state, pstate, type,
1925 macs_pool, NULL);
1926
1927 /* CAM credit pool handling */
1928 mac_obj->get_credit = bnx2x_get_credit_mac;
1929 mac_obj->put_credit = bnx2x_put_credit_mac;
1930 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1931 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1932
1933 if (CHIP_IS_E1x(bp)) {
1934 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1935 mac_obj->check_del = bnx2x_check_mac_del;
1936 mac_obj->check_add = bnx2x_check_mac_add;
1937 mac_obj->check_move = bnx2x_check_move_always_err;
1938 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1939
1940 /* Exe Queue */
1941 bnx2x_exe_queue_init(bp,
1942 &mac_obj->exe_queue, 1, qable_obj,
1943 bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00001944 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001945 bnx2x_optimize_vlan_mac,
1946 bnx2x_execute_vlan_mac,
1947 bnx2x_exeq_get_mac);
1948 } else {
1949 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1950 mac_obj->check_del = bnx2x_check_mac_del;
1951 mac_obj->check_add = bnx2x_check_mac_add;
1952 mac_obj->check_move = bnx2x_check_move;
1953 mac_obj->ramrod_cmd =
1954 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
Ariel Eliored5162a2011-12-05 21:52:24 +00001955 mac_obj->get_n_elements = bnx2x_get_n_elements;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001956
1957 /* Exe Queue */
1958 bnx2x_exe_queue_init(bp,
1959 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1960 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00001961 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001962 bnx2x_optimize_vlan_mac,
1963 bnx2x_execute_vlan_mac,
1964 bnx2x_exeq_get_mac);
1965 }
1966}
1967
1968void bnx2x_init_vlan_obj(struct bnx2x *bp,
1969 struct bnx2x_vlan_mac_obj *vlan_obj,
1970 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1971 dma_addr_t rdata_mapping, int state,
1972 unsigned long *pstate, bnx2x_obj_type type,
1973 struct bnx2x_credit_pool_obj *vlans_pool)
1974{
1975 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1976
1977 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1978 rdata_mapping, state, pstate, type, NULL,
1979 vlans_pool);
1980
1981 vlan_obj->get_credit = bnx2x_get_credit_vlan;
1982 vlan_obj->put_credit = bnx2x_put_credit_vlan;
1983 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1984 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1985
1986 if (CHIP_IS_E1x(bp)) {
1987 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1988 BUG();
1989 } else {
1990 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
1991 vlan_obj->check_del = bnx2x_check_vlan_del;
1992 vlan_obj->check_add = bnx2x_check_vlan_add;
1993 vlan_obj->check_move = bnx2x_check_move;
1994 vlan_obj->ramrod_cmd =
1995 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1996
1997 /* Exe Queue */
1998 bnx2x_exe_queue_init(bp,
1999 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2000 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002001 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002002 bnx2x_optimize_vlan_mac,
2003 bnx2x_execute_vlan_mac,
2004 bnx2x_exeq_get_vlan);
2005 }
2006}
2007
2008void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2009 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2010 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2011 dma_addr_t rdata_mapping, int state,
2012 unsigned long *pstate, bnx2x_obj_type type,
2013 struct bnx2x_credit_pool_obj *macs_pool,
2014 struct bnx2x_credit_pool_obj *vlans_pool)
2015{
2016 union bnx2x_qable_obj *qable_obj =
2017 (union bnx2x_qable_obj *)vlan_mac_obj;
2018
2019 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2020 rdata_mapping, state, pstate, type,
2021 macs_pool, vlans_pool);
2022
2023 /* CAM pool handling */
2024 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2025 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2026 /*
2027 * CAM offset is relevant for 57710 and 57711 chips only which have a
2028 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2029 * will be taken from MACs' pool object only.
2030 */
2031 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2032 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2033
2034 if (CHIP_IS_E1(bp)) {
2035 BNX2X_ERR("Do not support chips others than E2\n");
2036 BUG();
2037 } else if (CHIP_IS_E1H(bp)) {
2038 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2039 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2040 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2041 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2042 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2043
2044 /* Exe Queue */
2045 bnx2x_exe_queue_init(bp,
2046 &vlan_mac_obj->exe_queue, 1, qable_obj,
2047 bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002048 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002049 bnx2x_optimize_vlan_mac,
2050 bnx2x_execute_vlan_mac,
2051 bnx2x_exeq_get_vlan_mac);
2052 } else {
2053 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2054 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2055 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2056 vlan_mac_obj->check_move = bnx2x_check_move;
2057 vlan_mac_obj->ramrod_cmd =
2058 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2059
2060 /* Exe Queue */
2061 bnx2x_exe_queue_init(bp,
2062 &vlan_mac_obj->exe_queue,
2063 CLASSIFY_RULES_COUNT,
2064 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002065 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002066 bnx2x_optimize_vlan_mac,
2067 bnx2x_execute_vlan_mac,
2068 bnx2x_exeq_get_vlan_mac);
2069 }
2070
2071}
2072
2073/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2074static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2075 struct tstorm_eth_mac_filter_config *mac_filters,
2076 u16 pf_id)
2077{
2078 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2079
2080 u32 addr = BAR_TSTRORM_INTMEM +
2081 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2082
2083 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2084}
2085
2086static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2087 struct bnx2x_rx_mode_ramrod_params *p)
2088{
2089 /* update the bp MAC filter structure */
2090 u32 mask = (1 << p->cl_id);
2091
2092 struct tstorm_eth_mac_filter_config *mac_filters =
2093 (struct tstorm_eth_mac_filter_config *)p->rdata;
2094
2095 /* initial seeting is drop-all */
2096 u8 drop_all_ucast = 1, drop_all_mcast = 1;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002097 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2098 u8 unmatched_unicast = 0;
2099
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002100 /* In e1x there we only take into account rx acceot flag since tx switching
2101 * isn't enabled. */
2102 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002103 /* accept matched ucast */
2104 drop_all_ucast = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002105
2106 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002107 /* accept matched mcast */
2108 drop_all_mcast = 0;
2109
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002110 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002111 /* accept all mcast */
2112 drop_all_ucast = 0;
2113 accp_all_ucast = 1;
2114 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002115 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002116 /* accept all mcast */
2117 drop_all_mcast = 0;
2118 accp_all_mcast = 1;
2119 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002120 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002121 /* accept (all) bcast */
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002122 accp_all_bcast = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002123 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2124 /* accept unmatched unicasts */
2125 unmatched_unicast = 1;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002126
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002127 mac_filters->ucast_drop_all = drop_all_ucast ?
2128 mac_filters->ucast_drop_all | mask :
2129 mac_filters->ucast_drop_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002130
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002131 mac_filters->mcast_drop_all = drop_all_mcast ?
2132 mac_filters->mcast_drop_all | mask :
2133 mac_filters->mcast_drop_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002134
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002135 mac_filters->ucast_accept_all = accp_all_ucast ?
2136 mac_filters->ucast_accept_all | mask :
2137 mac_filters->ucast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002138
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002139 mac_filters->mcast_accept_all = accp_all_mcast ?
2140 mac_filters->mcast_accept_all | mask :
2141 mac_filters->mcast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002142
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002143 mac_filters->bcast_accept_all = accp_all_bcast ?
2144 mac_filters->bcast_accept_all | mask :
2145 mac_filters->bcast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002146
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002147 mac_filters->unmatched_unicast = unmatched_unicast ?
2148 mac_filters->unmatched_unicast | mask :
2149 mac_filters->unmatched_unicast & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002150
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002151 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2152 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2153 mac_filters->ucast_drop_all,
2154 mac_filters->mcast_drop_all,
2155 mac_filters->ucast_accept_all,
2156 mac_filters->mcast_accept_all,
2157 mac_filters->bcast_accept_all);
2158
2159 /* write the MAC filter structure*/
2160 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2161
2162 /* The operation is completed */
2163 clear_bit(p->state, p->pstate);
2164 smp_mb__after_clear_bit();
2165
2166 return 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002167}
2168
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002169/* Setup ramrod data */
2170static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2171 struct eth_classify_header *hdr,
2172 u8 rule_cnt)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002173{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002174 hdr->echo = cid;
2175 hdr->rule_cnt = rule_cnt;
2176}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002177
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002178static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2179 unsigned long accept_flags,
2180 struct eth_filter_rules_cmd *cmd,
2181 bool clear_accept_all)
2182{
2183 u16 state;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002184
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002185 /* start with 'drop-all' */
2186 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2187 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2188
2189 if (accept_flags) {
2190 if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2191 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2192
2193 if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2194 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2195
2196 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2197 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2198 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002199 }
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002200
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002201 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2202 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2203 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002204 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002205 if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2206 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002207
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002208 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2209 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2210 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2211 }
2212 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2213 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2214 }
2215
2216 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2217 if (clear_accept_all) {
2218 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2219 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2220 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2221 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2222 }
2223
2224 cmd->state = cpu_to_le16(state);
2225
2226}
2227
2228static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2229 struct bnx2x_rx_mode_ramrod_params *p)
2230{
2231 struct eth_filter_rules_ramrod_data *data = p->rdata;
2232 int rc;
2233 u8 rule_idx = 0;
2234
2235 /* Reset the ramrod data buffer */
2236 memset(data, 0, sizeof(*data));
2237
2238 /* Setup ramrod data */
2239
2240 /* Tx (internal switching) */
2241 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2242 data->rules[rule_idx].client_id = p->cl_id;
2243 data->rules[rule_idx].func_id = p->func_id;
2244
2245 data->rules[rule_idx].cmd_general_data =
2246 ETH_FILTER_RULES_CMD_TX_CMD;
2247
2248 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2249 &(data->rules[rule_idx++]), false);
2250 }
2251
2252 /* Rx */
2253 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2254 data->rules[rule_idx].client_id = p->cl_id;
2255 data->rules[rule_idx].func_id = p->func_id;
2256
2257 data->rules[rule_idx].cmd_general_data =
2258 ETH_FILTER_RULES_CMD_RX_CMD;
2259
2260 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2261 &(data->rules[rule_idx++]), false);
2262 }
2263
2264
2265 /*
2266 * If FCoE Queue configuration has been requested configure the Rx and
2267 * internal switching modes for this queue in separate rules.
2268 *
2269 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2270 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2271 */
2272 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2273 /* Tx (internal switching) */
2274 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2275 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2276 data->rules[rule_idx].func_id = p->func_id;
2277
2278 data->rules[rule_idx].cmd_general_data =
2279 ETH_FILTER_RULES_CMD_TX_CMD;
2280
2281 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2282 &(data->rules[rule_idx++]),
2283 true);
2284 }
2285
2286 /* Rx */
2287 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2288 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2289 data->rules[rule_idx].func_id = p->func_id;
2290
2291 data->rules[rule_idx].cmd_general_data =
2292 ETH_FILTER_RULES_CMD_RX_CMD;
2293
2294 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2295 &(data->rules[rule_idx++]),
2296 true);
2297 }
2298 }
2299
2300 /*
2301 * Set the ramrod header (most importantly - number of rules to
2302 * configure).
2303 */
2304 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2305
2306 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
2307 "tx_accept_flags 0x%lx\n",
2308 data->header.rule_cnt, p->rx_accept_flags,
2309 p->tx_accept_flags);
2310
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00002311 /*
2312 * No need for an explicit memory barrier here as long we would
2313 * need to ensure the ordering of writing to the SPQ element
2314 * and updating of the SPQ producer which involves a memory
2315 * read and we will have to put a full memory barrier there
2316 * (inside bnx2x_sp_post()).
2317 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002318
2319 /* Send a ramrod */
2320 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2321 U64_HI(p->rdata_mapping),
2322 U64_LO(p->rdata_mapping),
2323 ETH_CONNECTION_TYPE);
2324 if (rc)
2325 return rc;
2326
2327 /* Ramrod completion is pending */
2328 return 1;
2329}
2330
2331static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2332 struct bnx2x_rx_mode_ramrod_params *p)
2333{
2334 return bnx2x_state_wait(bp, p->state, p->pstate);
2335}
2336
2337static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2338 struct bnx2x_rx_mode_ramrod_params *p)
2339{
2340 /* Do nothing */
2341 return 0;
2342}
2343
2344int bnx2x_config_rx_mode(struct bnx2x *bp,
2345 struct bnx2x_rx_mode_ramrod_params *p)
2346{
2347 int rc;
2348
2349 /* Configure the new classification in the chip */
2350 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2351 if (rc < 0)
2352 return rc;
2353
2354 /* Wait for a ramrod completion if was requested */
2355 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2356 rc = p->rx_mode_obj->wait_comp(bp, p);
2357 if (rc)
2358 return rc;
2359 }
2360
2361 return rc;
2362}
2363
2364void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2365 struct bnx2x_rx_mode_obj *o)
2366{
2367 if (CHIP_IS_E1x(bp)) {
2368 o->wait_comp = bnx2x_empty_rx_mode_wait;
2369 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2370 } else {
2371 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2372 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2373 }
2374}
2375
2376/********************* Multicast verbs: SET, CLEAR ****************************/
2377static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2378{
2379 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2380}
2381
2382struct bnx2x_mcast_mac_elem {
2383 struct list_head link;
2384 u8 mac[ETH_ALEN];
2385 u8 pad[2]; /* For a natural alignment of the following buffer */
2386};
2387
2388struct bnx2x_pending_mcast_cmd {
2389 struct list_head link;
2390 int type; /* BNX2X_MCAST_CMD_X */
2391 union {
2392 struct list_head macs_head;
2393 u32 macs_num; /* Needed for DEL command */
2394 int next_bin; /* Needed for RESTORE flow with aprox match */
2395 } data;
2396
2397 bool done; /* set to true, when the command has been handled,
2398 * practically used in 57712 handling only, where one pending
2399 * command may be handled in a few operations. As long as for
2400 * other chips every operation handling is completed in a
2401 * single ramrod, there is no need to utilize this field.
2402 */
2403};
2404
2405static int bnx2x_mcast_wait(struct bnx2x *bp,
2406 struct bnx2x_mcast_obj *o)
2407{
2408 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2409 o->raw.wait_comp(bp, &o->raw))
2410 return -EBUSY;
2411
2412 return 0;
2413}
2414
2415static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2416 struct bnx2x_mcast_obj *o,
2417 struct bnx2x_mcast_ramrod_params *p,
2418 int cmd)
2419{
2420 int total_sz;
2421 struct bnx2x_pending_mcast_cmd *new_cmd;
2422 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2423 struct bnx2x_mcast_list_elem *pos;
2424 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2425 p->mcast_list_len : 0);
2426
2427 /* If the command is empty ("handle pending commands only"), break */
2428 if (!p->mcast_list_len)
2429 return 0;
2430
2431 total_sz = sizeof(*new_cmd) +
2432 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2433
2434 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2435 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2436
2437 if (!new_cmd)
2438 return -ENOMEM;
2439
2440 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
2441 "macs_list_len=%d\n", cmd, macs_list_len);
2442
2443 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2444
2445 new_cmd->type = cmd;
2446 new_cmd->done = false;
2447
2448 switch (cmd) {
2449 case BNX2X_MCAST_CMD_ADD:
2450 cur_mac = (struct bnx2x_mcast_mac_elem *)
2451 ((u8 *)new_cmd + sizeof(*new_cmd));
2452
2453 /* Push the MACs of the current command into the pendig command
2454 * MACs list: FIFO
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002455 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002456 list_for_each_entry(pos, &p->mcast_list, link) {
2457 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2458 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2459 cur_mac++;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002460 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002461
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002462 break;
2463
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002464 case BNX2X_MCAST_CMD_DEL:
2465 new_cmd->data.macs_num = p->mcast_list_len;
2466 break;
2467
2468 case BNX2X_MCAST_CMD_RESTORE:
2469 new_cmd->data.next_bin = 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002470 break;
2471
2472 default:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002473 BNX2X_ERR("Unknown command: %d\n", cmd);
2474 return -EINVAL;
2475 }
2476
2477 /* Push the new pending command to the tail of the pending list: FIFO */
2478 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2479
2480 o->set_sched(o);
2481
2482 return 1;
2483}
2484
2485/**
2486 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2487 *
2488 * @o:
2489 * @last: index to start looking from (including)
2490 *
2491 * returns the next found (set) bin or a negative value if none is found.
2492 */
2493static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2494{
2495 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2496
2497 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2498 if (o->registry.aprox_match.vec[i])
2499 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2500 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2501 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2502 vec, cur_bit)) {
2503 return cur_bit;
2504 }
2505 }
2506 inner_start = 0;
2507 }
2508
2509 /* None found */
2510 return -1;
2511}
2512
2513/**
2514 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2515 *
2516 * @o:
2517 *
2518 * returns the index of the found bin or -1 if none is found
2519 */
2520static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2521{
2522 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2523
2524 if (cur_bit >= 0)
2525 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2526
2527 return cur_bit;
2528}
2529
2530static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2531{
2532 struct bnx2x_raw_obj *raw = &o->raw;
2533 u8 rx_tx_flag = 0;
2534
2535 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2536 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2537 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2538
2539 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2540 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2541 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2542
2543 return rx_tx_flag;
2544}
2545
2546static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2547 struct bnx2x_mcast_obj *o, int idx,
2548 union bnx2x_mcast_config_data *cfg_data,
2549 int cmd)
2550{
2551 struct bnx2x_raw_obj *r = &o->raw;
2552 struct eth_multicast_rules_ramrod_data *data =
2553 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2554 u8 func_id = r->func_id;
2555 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2556 int bin;
2557
2558 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2559 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2560
2561 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2562
2563 /* Get a bin and update a bins' vector */
2564 switch (cmd) {
2565 case BNX2X_MCAST_CMD_ADD:
2566 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2567 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002568 break;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002569
2570 case BNX2X_MCAST_CMD_DEL:
2571 /* If there were no more bins to clear
2572 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2573 * clear any (0xff) bin.
2574 * See bnx2x_mcast_validate_e2() for explanation when it may
2575 * happen.
2576 */
2577 bin = bnx2x_mcast_clear_first_bin(o);
2578 break;
2579
2580 case BNX2X_MCAST_CMD_RESTORE:
2581 bin = cfg_data->bin;
2582 break;
2583
2584 default:
2585 BNX2X_ERR("Unknown command: %d\n", cmd);
2586 return;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002587 }
2588
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002589 DP(BNX2X_MSG_SP, "%s bin %d\n",
2590 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2591 "Setting" : "Clearing"), bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002592
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002593 data->rules[idx].bin_id = (u8)bin;
2594 data->rules[idx].func_id = func_id;
2595 data->rules[idx].engine_id = o->engine_id;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002596}
2597
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002598/**
2599 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2600 *
2601 * @bp: device handle
2602 * @o:
2603 * @start_bin: index in the registry to start from (including)
2604 * @rdata_idx: index in the ramrod data to start from
2605 *
2606 * returns last handled bin index or -1 if all bins have been handled
2607 */
2608static inline int bnx2x_mcast_handle_restore_cmd_e2(
2609 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2610 int *rdata_idx)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002611{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002612 int cur_bin, cnt = *rdata_idx;
2613 union bnx2x_mcast_config_data cfg_data = {0};
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002614
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002615 /* go through the registry and configure the bins from it */
2616 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2617 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002618
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002619 cfg_data.bin = (u8)cur_bin;
2620 o->set_one_rule(bp, o, cnt, &cfg_data,
2621 BNX2X_MCAST_CMD_RESTORE);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002622
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002623 cnt++;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002624
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002625 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002626
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002627 /* Break if we reached the maximum number
2628 * of rules.
2629 */
2630 if (cnt >= o->max_cmd_len)
2631 break;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002632 }
2633
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002634 *rdata_idx = cnt;
2635
2636 return cur_bin;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002637}
2638
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002639static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2640 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2641 int *line_idx)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002642{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002643 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2644 int cnt = *line_idx;
2645 union bnx2x_mcast_config_data cfg_data = {0};
2646
2647 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2648 link) {
2649
2650 cfg_data.mac = &pmac_pos->mac[0];
2651 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2652
2653 cnt++;
2654
Joe Perches0f9dad12011-08-14 12:16:19 +00002655 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2656 pmac_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002657
2658 list_del(&pmac_pos->link);
2659
2660 /* Break if we reached the maximum number
2661 * of rules.
2662 */
2663 if (cnt >= o->max_cmd_len)
2664 break;
2665 }
2666
2667 *line_idx = cnt;
2668
2669 /* if no more MACs to configure - we are done */
2670 if (list_empty(&cmd_pos->data.macs_head))
2671 cmd_pos->done = true;
2672}
2673
2674static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2675 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2676 int *line_idx)
2677{
2678 int cnt = *line_idx;
2679
2680 while (cmd_pos->data.macs_num) {
2681 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2682
2683 cnt++;
2684
2685 cmd_pos->data.macs_num--;
2686
2687 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2688 cmd_pos->data.macs_num, cnt);
2689
2690 /* Break if we reached the maximum
2691 * number of rules.
2692 */
2693 if (cnt >= o->max_cmd_len)
2694 break;
2695 }
2696
2697 *line_idx = cnt;
2698
2699 /* If we cleared all bins - we are done */
2700 if (!cmd_pos->data.macs_num)
2701 cmd_pos->done = true;
2702}
2703
2704static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2705 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2706 int *line_idx)
2707{
2708 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2709 line_idx);
2710
2711 if (cmd_pos->data.next_bin < 0)
2712 /* If o->set_restore returned -1 we are done */
2713 cmd_pos->done = true;
2714 else
2715 /* Start from the next bin next time */
2716 cmd_pos->data.next_bin++;
2717}
2718
2719static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2720 struct bnx2x_mcast_ramrod_params *p)
2721{
2722 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2723 int cnt = 0;
2724 struct bnx2x_mcast_obj *o = p->mcast_obj;
2725
2726 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2727 link) {
2728 switch (cmd_pos->type) {
2729 case BNX2X_MCAST_CMD_ADD:
2730 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2731 break;
2732
2733 case BNX2X_MCAST_CMD_DEL:
2734 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2735 break;
2736
2737 case BNX2X_MCAST_CMD_RESTORE:
2738 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2739 &cnt);
2740 break;
2741
2742 default:
2743 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2744 return -EINVAL;
2745 }
2746
2747 /* If the command has been completed - remove it from the list
2748 * and free the memory
2749 */
2750 if (cmd_pos->done) {
2751 list_del(&cmd_pos->link);
2752 kfree(cmd_pos);
2753 }
2754
2755 /* Break if we reached the maximum number of rules */
2756 if (cnt >= o->max_cmd_len)
2757 break;
2758 }
2759
2760 return cnt;
2761}
2762
2763static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2764 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2765 int *line_idx)
2766{
2767 struct bnx2x_mcast_list_elem *mlist_pos;
2768 union bnx2x_mcast_config_data cfg_data = {0};
2769 int cnt = *line_idx;
2770
2771 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2772 cfg_data.mac = mlist_pos->mac;
2773 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2774
2775 cnt++;
2776
Joe Perches0f9dad12011-08-14 12:16:19 +00002777 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2778 mlist_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002779 }
2780
2781 *line_idx = cnt;
2782}
2783
2784static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2785 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2786 int *line_idx)
2787{
2788 int cnt = *line_idx, i;
2789
2790 for (i = 0; i < p->mcast_list_len; i++) {
2791 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2792
2793 cnt++;
2794
2795 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2796 p->mcast_list_len - i - 1);
2797 }
2798
2799 *line_idx = cnt;
2800}
2801
2802/**
2803 * bnx2x_mcast_handle_current_cmd -
2804 *
2805 * @bp: device handle
2806 * @p:
2807 * @cmd:
2808 * @start_cnt: first line in the ramrod data that may be used
2809 *
2810 * This function is called iff there is enough place for the current command in
2811 * the ramrod data.
2812 * Returns number of lines filled in the ramrod data in total.
2813 */
2814static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2815 struct bnx2x_mcast_ramrod_params *p, int cmd,
2816 int start_cnt)
2817{
2818 struct bnx2x_mcast_obj *o = p->mcast_obj;
2819 int cnt = start_cnt;
2820
2821 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2822
2823 switch (cmd) {
2824 case BNX2X_MCAST_CMD_ADD:
2825 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2826 break;
2827
2828 case BNX2X_MCAST_CMD_DEL:
2829 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2830 break;
2831
2832 case BNX2X_MCAST_CMD_RESTORE:
2833 o->hdl_restore(bp, o, 0, &cnt);
2834 break;
2835
2836 default:
2837 BNX2X_ERR("Unknown command: %d\n", cmd);
2838 return -EINVAL;
2839 }
2840
2841 /* The current command has been handled */
2842 p->mcast_list_len = 0;
2843
2844 return cnt;
2845}
2846
2847static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2848 struct bnx2x_mcast_ramrod_params *p,
2849 int cmd)
2850{
2851 struct bnx2x_mcast_obj *o = p->mcast_obj;
2852 int reg_sz = o->get_registry_size(o);
2853
2854 switch (cmd) {
2855 /* DEL command deletes all currently configured MACs */
2856 case BNX2X_MCAST_CMD_DEL:
2857 o->set_registry_size(o, 0);
2858 /* Don't break */
2859
2860 /* RESTORE command will restore the entire multicast configuration */
2861 case BNX2X_MCAST_CMD_RESTORE:
2862 /* Here we set the approximate amount of work to do, which in
2863 * fact may be only less as some MACs in postponed ADD
2864 * command(s) scheduled before this command may fall into
2865 * the same bin and the actual number of bins set in the
2866 * registry would be less than we estimated here. See
2867 * bnx2x_mcast_set_one_rule_e2() for further details.
2868 */
2869 p->mcast_list_len = reg_sz;
2870 break;
2871
2872 case BNX2X_MCAST_CMD_ADD:
2873 case BNX2X_MCAST_CMD_CONT:
2874 /* Here we assume that all new MACs will fall into new bins.
2875 * However we will correct the real registry size after we
2876 * handle all pending commands.
2877 */
2878 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2879 break;
2880
2881 default:
2882 BNX2X_ERR("Unknown command: %d\n", cmd);
2883 return -EINVAL;
2884
2885 }
2886
2887 /* Increase the total number of MACs pending to be configured */
2888 o->total_pending_num += p->mcast_list_len;
2889
2890 return 0;
2891}
2892
2893static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2894 struct bnx2x_mcast_ramrod_params *p,
2895 int old_num_bins)
2896{
2897 struct bnx2x_mcast_obj *o = p->mcast_obj;
2898
2899 o->set_registry_size(o, old_num_bins);
2900 o->total_pending_num -= p->mcast_list_len;
2901}
2902
2903/**
2904 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2905 *
2906 * @bp: device handle
2907 * @p:
2908 * @len: number of rules to handle
2909 */
2910static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2911 struct bnx2x_mcast_ramrod_params *p,
2912 u8 len)
2913{
2914 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2915 struct eth_multicast_rules_ramrod_data *data =
2916 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2917
2918 data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2919 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
2920 data->header.rule_cnt = len;
2921}
2922
2923/**
2924 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2925 *
2926 * @bp: device handle
2927 * @o:
2928 *
2929 * Recalculate the actual number of set bins in the registry using Brian
2930 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2931 *
2932 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2933 */
2934static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2935 struct bnx2x_mcast_obj *o)
2936{
2937 int i, cnt = 0;
2938 u64 elem;
2939
2940 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2941 elem = o->registry.aprox_match.vec[i];
2942 for (; elem; cnt++)
2943 elem &= elem - 1;
2944 }
2945
2946 o->set_registry_size(o, cnt);
2947
2948 return 0;
2949}
2950
2951static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2952 struct bnx2x_mcast_ramrod_params *p,
2953 int cmd)
2954{
2955 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2956 struct bnx2x_mcast_obj *o = p->mcast_obj;
2957 struct eth_multicast_rules_ramrod_data *data =
2958 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2959 int cnt = 0, rc;
2960
2961 /* Reset the ramrod data buffer */
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002962 memset(data, 0, sizeof(*data));
2963
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002964 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2965
2966 /* If there are no more pending commands - clear SCHEDULED state */
2967 if (list_empty(&o->pending_cmds_head))
2968 o->clear_sched(o);
2969
2970 /* The below may be true iff there was enough room in ramrod
2971 * data for all pending commands and for the current
2972 * command. Otherwise the current command would have been added
2973 * to the pending commands and p->mcast_list_len would have been
2974 * zeroed.
2975 */
2976 if (p->mcast_list_len > 0)
2977 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2978
2979 /* We've pulled out some MACs - update the total number of
2980 * outstanding.
2981 */
2982 o->total_pending_num -= cnt;
2983
2984 /* send a ramrod */
2985 WARN_ON(o->total_pending_num < 0);
2986 WARN_ON(cnt > o->max_cmd_len);
2987
2988 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2989
2990 /* Update a registry size if there are no more pending operations.
2991 *
2992 * We don't want to change the value of the registry size if there are
2993 * pending operations because we want it to always be equal to the
2994 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2995 * set bins after the last requested operation in order to properly
2996 * evaluate the size of the next DEL/RESTORE operation.
2997 *
2998 * Note that we update the registry itself during command(s) handling
2999 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3000 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3001 * with a limited amount of update commands (per MAC/bin) and we don't
3002 * know in this scope what the actual state of bins configuration is
3003 * going to be after this ramrod.
3004 */
3005 if (!o->total_pending_num)
3006 bnx2x_mcast_refresh_registry_e2(bp, o);
3007
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003008 /*
3009 * If CLEAR_ONLY was requested - don't send a ramrod and clear
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003010 * RAMROD_PENDING status immediately.
3011 */
3012 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3013 raw->clear_pending(raw);
3014 return 0;
3015 } else {
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003016 /*
3017 * No need for an explicit memory barrier here as long we would
3018 * need to ensure the ordering of writing to the SPQ element
3019 * and updating of the SPQ producer which involves a memory
3020 * read and we will have to put a full memory barrier there
3021 * (inside bnx2x_sp_post()).
3022 */
3023
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003024 /* Send a ramrod */
3025 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3026 raw->cid, U64_HI(raw->rdata_mapping),
3027 U64_LO(raw->rdata_mapping),
3028 ETH_CONNECTION_TYPE);
3029 if (rc)
3030 return rc;
3031
3032 /* Ramrod completion is pending */
3033 return 1;
3034 }
3035}
3036
3037static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3038 struct bnx2x_mcast_ramrod_params *p,
3039 int cmd)
3040{
3041 /* Mark, that there is a work to do */
3042 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3043 p->mcast_list_len = 1;
3044
3045 return 0;
3046}
3047
3048static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3049 struct bnx2x_mcast_ramrod_params *p,
3050 int old_num_bins)
3051{
3052 /* Do nothing */
3053}
3054
3055#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3056do { \
3057 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3058} while (0)
3059
3060static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3061 struct bnx2x_mcast_obj *o,
3062 struct bnx2x_mcast_ramrod_params *p,
3063 u32 *mc_filter)
3064{
3065 struct bnx2x_mcast_list_elem *mlist_pos;
3066 int bit;
3067
3068 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3069 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3070 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3071
Joe Perches0f9dad12011-08-14 12:16:19 +00003072 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3073 mlist_pos->mac, bit);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003074
3075 /* bookkeeping... */
3076 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3077 bit);
3078 }
3079}
3080
3081static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3082 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3083 u32 *mc_filter)
3084{
3085 int bit;
3086
3087 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3088 bit >= 0;
3089 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3090 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3091 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3092 }
3093}
3094
3095/* On 57711 we write the multicast MACs' aproximate match
3096 * table by directly into the TSTORM's internal RAM. So we don't
3097 * really need to handle any tricks to make it work.
3098 */
3099static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3100 struct bnx2x_mcast_ramrod_params *p,
3101 int cmd)
3102{
3103 int i;
3104 struct bnx2x_mcast_obj *o = p->mcast_obj;
3105 struct bnx2x_raw_obj *r = &o->raw;
3106
3107 /* If CLEAR_ONLY has been requested - clear the registry
3108 * and clear a pending bit.
3109 */
3110 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3111 u32 mc_filter[MC_HASH_SIZE] = {0};
3112
3113 /* Set the multicast filter bits before writing it into
3114 * the internal memory.
3115 */
3116 switch (cmd) {
3117 case BNX2X_MCAST_CMD_ADD:
3118 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3119 break;
3120
3121 case BNX2X_MCAST_CMD_DEL:
Joe Perches94f05b02011-08-14 12:16:20 +00003122 DP(BNX2X_MSG_SP,
3123 "Invalidating multicast MACs configuration\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003124
3125 /* clear the registry */
3126 memset(o->registry.aprox_match.vec, 0,
3127 sizeof(o->registry.aprox_match.vec));
3128 break;
3129
3130 case BNX2X_MCAST_CMD_RESTORE:
3131 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3132 break;
3133
3134 default:
3135 BNX2X_ERR("Unknown command: %d\n", cmd);
3136 return -EINVAL;
3137 }
3138
3139 /* Set the mcast filter in the internal memory */
3140 for (i = 0; i < MC_HASH_SIZE; i++)
3141 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3142 } else
3143 /* clear the registry */
3144 memset(o->registry.aprox_match.vec, 0,
3145 sizeof(o->registry.aprox_match.vec));
3146
3147 /* We are done */
3148 r->clear_pending(r);
3149
3150 return 0;
3151}
3152
3153static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3154 struct bnx2x_mcast_ramrod_params *p,
3155 int cmd)
3156{
3157 struct bnx2x_mcast_obj *o = p->mcast_obj;
3158 int reg_sz = o->get_registry_size(o);
3159
3160 switch (cmd) {
3161 /* DEL command deletes all currently configured MACs */
3162 case BNX2X_MCAST_CMD_DEL:
3163 o->set_registry_size(o, 0);
3164 /* Don't break */
3165
3166 /* RESTORE command will restore the entire multicast configuration */
3167 case BNX2X_MCAST_CMD_RESTORE:
3168 p->mcast_list_len = reg_sz;
3169 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3170 cmd, p->mcast_list_len);
3171 break;
3172
3173 case BNX2X_MCAST_CMD_ADD:
3174 case BNX2X_MCAST_CMD_CONT:
3175 /* Multicast MACs on 57710 are configured as unicast MACs and
3176 * there is only a limited number of CAM entries for that
3177 * matter.
3178 */
3179 if (p->mcast_list_len > o->max_cmd_len) {
3180 BNX2X_ERR("Can't configure more than %d multicast MACs"
3181 "on 57710\n", o->max_cmd_len);
3182 return -EINVAL;
3183 }
3184 /* Every configured MAC should be cleared if DEL command is
3185 * called. Only the last ADD command is relevant as long as
3186 * every ADD commands overrides the previous configuration.
3187 */
3188 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3189 if (p->mcast_list_len > 0)
3190 o->set_registry_size(o, p->mcast_list_len);
3191
3192 break;
3193
3194 default:
3195 BNX2X_ERR("Unknown command: %d\n", cmd);
3196 return -EINVAL;
3197
3198 }
3199
3200 /* We want to ensure that commands are executed one by one for 57710.
3201 * Therefore each none-empty command will consume o->max_cmd_len.
3202 */
3203 if (p->mcast_list_len)
3204 o->total_pending_num += o->max_cmd_len;
3205
3206 return 0;
3207}
3208
3209static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3210 struct bnx2x_mcast_ramrod_params *p,
3211 int old_num_macs)
3212{
3213 struct bnx2x_mcast_obj *o = p->mcast_obj;
3214
3215 o->set_registry_size(o, old_num_macs);
3216
3217 /* If current command hasn't been handled yet and we are
3218 * here means that it's meant to be dropped and we have to
3219 * update the number of outstandling MACs accordingly.
3220 */
3221 if (p->mcast_list_len)
3222 o->total_pending_num -= o->max_cmd_len;
3223}
3224
3225static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3226 struct bnx2x_mcast_obj *o, int idx,
3227 union bnx2x_mcast_config_data *cfg_data,
3228 int cmd)
3229{
3230 struct bnx2x_raw_obj *r = &o->raw;
3231 struct mac_configuration_cmd *data =
3232 (struct mac_configuration_cmd *)(r->rdata);
3233
3234 /* copy mac */
3235 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3236 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3237 &data->config_table[idx].middle_mac_addr,
3238 &data->config_table[idx].lsb_mac_addr,
3239 cfg_data->mac);
3240
3241 data->config_table[idx].vlan_id = 0;
3242 data->config_table[idx].pf_id = r->func_id;
3243 data->config_table[idx].clients_bit_vector =
3244 cpu_to_le32(1 << r->cl_id);
3245
3246 SET_FLAG(data->config_table[idx].flags,
3247 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3248 T_ETH_MAC_COMMAND_SET);
3249 }
3250}
3251
3252/**
3253 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3254 *
3255 * @bp: device handle
3256 * @p:
3257 * @len: number of rules to handle
3258 */
3259static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3260 struct bnx2x_mcast_ramrod_params *p,
3261 u8 len)
3262{
3263 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3264 struct mac_configuration_cmd *data =
3265 (struct mac_configuration_cmd *)(r->rdata);
3266
3267 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3268 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3269 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3270
3271 data->hdr.offset = offset;
3272 data->hdr.client_id = 0xff;
3273 data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3274 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
3275 data->hdr.length = len;
3276}
3277
3278/**
3279 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3280 *
3281 * @bp: device handle
3282 * @o:
3283 * @start_idx: index in the registry to start from
3284 * @rdata_idx: index in the ramrod data to start from
3285 *
3286 * restore command for 57710 is like all other commands - always a stand alone
3287 * command - start_idx and rdata_idx will always be 0. This function will always
3288 * succeed.
3289 * returns -1 to comply with 57712 variant.
3290 */
3291static inline int bnx2x_mcast_handle_restore_cmd_e1(
3292 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3293 int *rdata_idx)
3294{
3295 struct bnx2x_mcast_mac_elem *elem;
3296 int i = 0;
3297 union bnx2x_mcast_config_data cfg_data = {0};
3298
3299 /* go through the registry and configure the MACs from it. */
3300 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3301 cfg_data.mac = &elem->mac[0];
3302 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3303
3304 i++;
3305
Joe Perches0f9dad12011-08-14 12:16:19 +00003306 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3307 cfg_data.mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003308 }
3309
3310 *rdata_idx = i;
3311
3312 return -1;
3313}
3314
3315
3316static inline int bnx2x_mcast_handle_pending_cmds_e1(
3317 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3318{
3319 struct bnx2x_pending_mcast_cmd *cmd_pos;
3320 struct bnx2x_mcast_mac_elem *pmac_pos;
3321 struct bnx2x_mcast_obj *o = p->mcast_obj;
3322 union bnx2x_mcast_config_data cfg_data = {0};
3323 int cnt = 0;
3324
3325
3326 /* If nothing to be done - return */
3327 if (list_empty(&o->pending_cmds_head))
3328 return 0;
3329
3330 /* Handle the first command */
3331 cmd_pos = list_first_entry(&o->pending_cmds_head,
3332 struct bnx2x_pending_mcast_cmd, link);
3333
3334 switch (cmd_pos->type) {
3335 case BNX2X_MCAST_CMD_ADD:
3336 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3337 cfg_data.mac = &pmac_pos->mac[0];
3338 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3339
3340 cnt++;
3341
Joe Perches0f9dad12011-08-14 12:16:19 +00003342 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3343 pmac_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003344 }
3345 break;
3346
3347 case BNX2X_MCAST_CMD_DEL:
3348 cnt = cmd_pos->data.macs_num;
3349 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3350 break;
3351
3352 case BNX2X_MCAST_CMD_RESTORE:
3353 o->hdl_restore(bp, o, 0, &cnt);
3354 break;
3355
3356 default:
3357 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3358 return -EINVAL;
3359 }
3360
3361 list_del(&cmd_pos->link);
3362 kfree(cmd_pos);
3363
3364 return cnt;
3365}
3366
3367/**
3368 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3369 *
3370 * @fw_hi:
3371 * @fw_mid:
3372 * @fw_lo:
3373 * @mac:
3374 */
3375static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3376 __le16 *fw_lo, u8 *mac)
3377{
3378 mac[1] = ((u8 *)fw_hi)[0];
3379 mac[0] = ((u8 *)fw_hi)[1];
3380 mac[3] = ((u8 *)fw_mid)[0];
3381 mac[2] = ((u8 *)fw_mid)[1];
3382 mac[5] = ((u8 *)fw_lo)[0];
3383 mac[4] = ((u8 *)fw_lo)[1];
3384}
3385
3386/**
3387 * bnx2x_mcast_refresh_registry_e1 -
3388 *
3389 * @bp: device handle
3390 * @cnt:
3391 *
3392 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3393 * and update the registry correspondingly: if ADD - allocate a memory and add
3394 * the entries to the registry (list), if DELETE - clear the registry and free
3395 * the memory.
3396 */
3397static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3398 struct bnx2x_mcast_obj *o)
3399{
3400 struct bnx2x_raw_obj *raw = &o->raw;
3401 struct bnx2x_mcast_mac_elem *elem;
3402 struct mac_configuration_cmd *data =
3403 (struct mac_configuration_cmd *)(raw->rdata);
3404
3405 /* If first entry contains a SET bit - the command was ADD,
3406 * otherwise - DEL_ALL
3407 */
3408 if (GET_FLAG(data->config_table[0].flags,
3409 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3410 int i, len = data->hdr.length;
3411
3412 /* Break if it was a RESTORE command */
3413 if (!list_empty(&o->registry.exact_match.macs))
3414 return 0;
3415
Thomas Meyer01e23742011-11-29 11:08:00 +00003416 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003417 if (!elem) {
3418 BNX2X_ERR("Failed to allocate registry memory\n");
3419 return -ENOMEM;
3420 }
3421
3422 for (i = 0; i < len; i++, elem++) {
3423 bnx2x_get_fw_mac_addr(
3424 &data->config_table[i].msb_mac_addr,
3425 &data->config_table[i].middle_mac_addr,
3426 &data->config_table[i].lsb_mac_addr,
3427 elem->mac);
Joe Perches0f9dad12011-08-14 12:16:19 +00003428 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3429 elem->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003430 list_add_tail(&elem->link,
3431 &o->registry.exact_match.macs);
3432 }
3433 } else {
3434 elem = list_first_entry(&o->registry.exact_match.macs,
3435 struct bnx2x_mcast_mac_elem, link);
3436 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3437 kfree(elem);
3438 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3439 }
3440
3441 return 0;
3442}
3443
3444static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3445 struct bnx2x_mcast_ramrod_params *p,
3446 int cmd)
3447{
3448 struct bnx2x_mcast_obj *o = p->mcast_obj;
3449 struct bnx2x_raw_obj *raw = &o->raw;
3450 struct mac_configuration_cmd *data =
3451 (struct mac_configuration_cmd *)(raw->rdata);
3452 int cnt = 0, i, rc;
3453
3454 /* Reset the ramrod data buffer */
3455 memset(data, 0, sizeof(*data));
3456
3457 /* First set all entries as invalid */
3458 for (i = 0; i < o->max_cmd_len ; i++)
3459 SET_FLAG(data->config_table[i].flags,
3460 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3461 T_ETH_MAC_COMMAND_INVALIDATE);
3462
3463 /* Handle pending commands first */
3464 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3465
3466 /* If there are no more pending commands - clear SCHEDULED state */
3467 if (list_empty(&o->pending_cmds_head))
3468 o->clear_sched(o);
3469
3470 /* The below may be true iff there were no pending commands */
3471 if (!cnt)
3472 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3473
3474 /* For 57710 every command has o->max_cmd_len length to ensure that
3475 * commands are done one at a time.
3476 */
3477 o->total_pending_num -= o->max_cmd_len;
3478
3479 /* send a ramrod */
3480
3481 WARN_ON(cnt > o->max_cmd_len);
3482
3483 /* Set ramrod header (in particular, a number of entries to update) */
3484 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3485
3486 /* update a registry: we need the registry contents to be always up
3487 * to date in order to be able to execute a RESTORE opcode. Here
3488 * we use the fact that for 57710 we sent one command at a time
3489 * hence we may take the registry update out of the command handling
3490 * and do it in a simpler way here.
3491 */
3492 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3493 if (rc)
3494 return rc;
3495
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003496 /*
3497 * If CLEAR_ONLY was requested - don't send a ramrod and clear
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003498 * RAMROD_PENDING status immediately.
3499 */
3500 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3501 raw->clear_pending(raw);
3502 return 0;
3503 } else {
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003504 /*
3505 * No need for an explicit memory barrier here as long we would
3506 * need to ensure the ordering of writing to the SPQ element
3507 * and updating of the SPQ producer which involves a memory
3508 * read and we will have to put a full memory barrier there
3509 * (inside bnx2x_sp_post()).
3510 */
3511
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003512 /* Send a ramrod */
3513 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3514 U64_HI(raw->rdata_mapping),
3515 U64_LO(raw->rdata_mapping),
3516 ETH_CONNECTION_TYPE);
3517 if (rc)
3518 return rc;
3519
3520 /* Ramrod completion is pending */
3521 return 1;
3522 }
3523
3524}
3525
3526static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3527{
3528 return o->registry.exact_match.num_macs_set;
3529}
3530
3531static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3532{
3533 return o->registry.aprox_match.num_bins_set;
3534}
3535
3536static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3537 int n)
3538{
3539 o->registry.exact_match.num_macs_set = n;
3540}
3541
3542static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3543 int n)
3544{
3545 o->registry.aprox_match.num_bins_set = n;
3546}
3547
3548int bnx2x_config_mcast(struct bnx2x *bp,
3549 struct bnx2x_mcast_ramrod_params *p,
3550 int cmd)
3551{
3552 struct bnx2x_mcast_obj *o = p->mcast_obj;
3553 struct bnx2x_raw_obj *r = &o->raw;
3554 int rc = 0, old_reg_size;
3555
3556 /* This is needed to recover number of currently configured mcast macs
3557 * in case of failure.
3558 */
3559 old_reg_size = o->get_registry_size(o);
3560
3561 /* Do some calculations and checks */
3562 rc = o->validate(bp, p, cmd);
3563 if (rc)
3564 return rc;
3565
3566 /* Return if there is no work to do */
3567 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3568 return 0;
3569
3570 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
3571 "o->max_cmd_len=%d\n", o->total_pending_num,
3572 p->mcast_list_len, o->max_cmd_len);
3573
3574 /* Enqueue the current command to the pending list if we can't complete
3575 * it in the current iteration
3576 */
3577 if (r->check_pending(r) ||
3578 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3579 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3580 if (rc < 0)
3581 goto error_exit1;
3582
3583 /* As long as the current command is in a command list we
3584 * don't need to handle it separately.
3585 */
3586 p->mcast_list_len = 0;
3587 }
3588
3589 if (!r->check_pending(r)) {
3590
3591 /* Set 'pending' state */
3592 r->set_pending(r);
3593
3594 /* Configure the new classification in the chip */
3595 rc = o->config_mcast(bp, p, cmd);
3596 if (rc < 0)
3597 goto error_exit2;
3598
3599 /* Wait for a ramrod completion if was requested */
3600 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3601 rc = o->wait_comp(bp, o);
3602 }
3603
3604 return rc;
3605
3606error_exit2:
3607 r->clear_pending(r);
3608
3609error_exit1:
3610 o->revert(bp, p, old_reg_size);
3611
3612 return rc;
3613}
3614
3615static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3616{
3617 smp_mb__before_clear_bit();
3618 clear_bit(o->sched_state, o->raw.pstate);
3619 smp_mb__after_clear_bit();
3620}
3621
3622static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3623{
3624 smp_mb__before_clear_bit();
3625 set_bit(o->sched_state, o->raw.pstate);
3626 smp_mb__after_clear_bit();
3627}
3628
3629static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3630{
3631 return !!test_bit(o->sched_state, o->raw.pstate);
3632}
3633
3634static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3635{
3636 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3637}
3638
3639void bnx2x_init_mcast_obj(struct bnx2x *bp,
3640 struct bnx2x_mcast_obj *mcast_obj,
3641 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3642 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3643 int state, unsigned long *pstate, bnx2x_obj_type type)
3644{
3645 memset(mcast_obj, 0, sizeof(*mcast_obj));
3646
3647 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3648 rdata, rdata_mapping, state, pstate, type);
3649
3650 mcast_obj->engine_id = engine_id;
3651
3652 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3653
3654 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3655 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3656 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3657 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3658
3659 if (CHIP_IS_E1(bp)) {
3660 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3661 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3662 mcast_obj->hdl_restore =
3663 bnx2x_mcast_handle_restore_cmd_e1;
3664 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3665
3666 if (CHIP_REV_IS_SLOW(bp))
3667 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3668 else
3669 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3670
3671 mcast_obj->wait_comp = bnx2x_mcast_wait;
3672 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3673 mcast_obj->validate = bnx2x_mcast_validate_e1;
3674 mcast_obj->revert = bnx2x_mcast_revert_e1;
3675 mcast_obj->get_registry_size =
3676 bnx2x_mcast_get_registry_size_exact;
3677 mcast_obj->set_registry_size =
3678 bnx2x_mcast_set_registry_size_exact;
3679
3680 /* 57710 is the only chip that uses the exact match for mcast
3681 * at the moment.
3682 */
3683 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3684
3685 } else if (CHIP_IS_E1H(bp)) {
3686 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3687 mcast_obj->enqueue_cmd = NULL;
3688 mcast_obj->hdl_restore = NULL;
3689 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3690
3691 /* 57711 doesn't send a ramrod, so it has unlimited credit
3692 * for one command.
3693 */
3694 mcast_obj->max_cmd_len = -1;
3695 mcast_obj->wait_comp = bnx2x_mcast_wait;
3696 mcast_obj->set_one_rule = NULL;
3697 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3698 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3699 mcast_obj->get_registry_size =
3700 bnx2x_mcast_get_registry_size_aprox;
3701 mcast_obj->set_registry_size =
3702 bnx2x_mcast_set_registry_size_aprox;
3703 } else {
3704 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3705 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3706 mcast_obj->hdl_restore =
3707 bnx2x_mcast_handle_restore_cmd_e2;
3708 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3709 /* TODO: There should be a proper HSI define for this number!!!
3710 */
3711 mcast_obj->max_cmd_len = 16;
3712 mcast_obj->wait_comp = bnx2x_mcast_wait;
3713 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3714 mcast_obj->validate = bnx2x_mcast_validate_e2;
3715 mcast_obj->revert = bnx2x_mcast_revert_e2;
3716 mcast_obj->get_registry_size =
3717 bnx2x_mcast_get_registry_size_aprox;
3718 mcast_obj->set_registry_size =
3719 bnx2x_mcast_set_registry_size_aprox;
3720 }
3721}
3722
3723/*************************** Credit handling **********************************/
3724
3725/**
3726 * atomic_add_ifless - add if the result is less than a given value.
3727 *
3728 * @v: pointer of type atomic_t
3729 * @a: the amount to add to v...
3730 * @u: ...if (v + a) is less than u.
3731 *
3732 * returns true if (v + a) was less than u, and false otherwise.
3733 *
3734 */
3735static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3736{
3737 int c, old;
3738
3739 c = atomic_read(v);
3740 for (;;) {
3741 if (unlikely(c + a >= u))
3742 return false;
3743
3744 old = atomic_cmpxchg((v), c, c + a);
3745 if (likely(old == c))
3746 break;
3747 c = old;
3748 }
3749
3750 return true;
3751}
3752
3753/**
3754 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3755 *
3756 * @v: pointer of type atomic_t
3757 * @a: the amount to dec from v...
3758 * @u: ...if (v - a) is more or equal than u.
3759 *
3760 * returns true if (v - a) was more or equal than u, and false
3761 * otherwise.
3762 */
3763static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3764{
3765 int c, old;
3766
3767 c = atomic_read(v);
3768 for (;;) {
3769 if (unlikely(c - a < u))
3770 return false;
3771
3772 old = atomic_cmpxchg((v), c, c - a);
3773 if (likely(old == c))
3774 break;
3775 c = old;
3776 }
3777
3778 return true;
3779}
3780
3781static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3782{
3783 bool rc;
3784
3785 smp_mb();
3786 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3787 smp_mb();
3788
3789 return rc;
3790}
3791
3792static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3793{
3794 bool rc;
3795
3796 smp_mb();
3797
3798 /* Don't let to refill if credit + cnt > pool_sz */
3799 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3800
3801 smp_mb();
3802
3803 return rc;
3804}
3805
3806static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3807{
3808 int cur_credit;
3809
3810 smp_mb();
3811 cur_credit = atomic_read(&o->credit);
3812
3813 return cur_credit;
3814}
3815
3816static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3817 int cnt)
3818{
3819 return true;
3820}
3821
3822
3823static bool bnx2x_credit_pool_get_entry(
3824 struct bnx2x_credit_pool_obj *o,
3825 int *offset)
3826{
3827 int idx, vec, i;
3828
3829 *offset = -1;
3830
3831 /* Find "internal cam-offset" then add to base for this object... */
3832 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3833
3834 /* Skip the current vector if there are no free entries in it */
3835 if (!o->pool_mirror[vec])
3836 continue;
3837
3838 /* If we've got here we are going to find a free entry */
3839 for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
3840 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3841
3842 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3843 /* Got one!! */
3844 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3845 *offset = o->base_pool_offset + idx;
3846 return true;
3847 }
3848 }
3849
3850 return false;
3851}
3852
3853static bool bnx2x_credit_pool_put_entry(
3854 struct bnx2x_credit_pool_obj *o,
3855 int offset)
3856{
3857 if (offset < o->base_pool_offset)
3858 return false;
3859
3860 offset -= o->base_pool_offset;
3861
3862 if (offset >= o->pool_sz)
3863 return false;
3864
3865 /* Return the entry to the pool */
3866 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3867
3868 return true;
3869}
3870
3871static bool bnx2x_credit_pool_put_entry_always_true(
3872 struct bnx2x_credit_pool_obj *o,
3873 int offset)
3874{
3875 return true;
3876}
3877
3878static bool bnx2x_credit_pool_get_entry_always_true(
3879 struct bnx2x_credit_pool_obj *o,
3880 int *offset)
3881{
3882 *offset = -1;
3883 return true;
3884}
3885/**
3886 * bnx2x_init_credit_pool - initialize credit pool internals.
3887 *
3888 * @p:
3889 * @base: Base entry in the CAM to use.
3890 * @credit: pool size.
3891 *
3892 * If base is negative no CAM entries handling will be performed.
3893 * If credit is negative pool operations will always succeed (unlimited pool).
3894 *
3895 */
3896static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3897 int base, int credit)
3898{
3899 /* Zero the object first */
3900 memset(p, 0, sizeof(*p));
3901
3902 /* Set the table to all 1s */
3903 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3904
3905 /* Init a pool as full */
3906 atomic_set(&p->credit, credit);
3907
3908 /* The total poll size */
3909 p->pool_sz = credit;
3910
3911 p->base_pool_offset = base;
3912
3913 /* Commit the change */
3914 smp_mb();
3915
3916 p->check = bnx2x_credit_pool_check;
3917
3918 /* if pool credit is negative - disable the checks */
3919 if (credit >= 0) {
3920 p->put = bnx2x_credit_pool_put;
3921 p->get = bnx2x_credit_pool_get;
3922 p->put_entry = bnx2x_credit_pool_put_entry;
3923 p->get_entry = bnx2x_credit_pool_get_entry;
3924 } else {
3925 p->put = bnx2x_credit_pool_always_true;
3926 p->get = bnx2x_credit_pool_always_true;
3927 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3928 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3929 }
3930
3931 /* If base is negative - disable entries handling */
3932 if (base < 0) {
3933 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3934 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3935 }
3936}
3937
3938void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3939 struct bnx2x_credit_pool_obj *p, u8 func_id,
3940 u8 func_num)
3941{
3942/* TODO: this will be defined in consts as well... */
3943#define BNX2X_CAM_SIZE_EMUL 5
3944
3945 int cam_sz;
3946
3947 if (CHIP_IS_E1(bp)) {
3948 /* In E1, Multicast is saved in cam... */
3949 if (!CHIP_REV_IS_SLOW(bp))
3950 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3951 else
3952 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3953
3954 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3955
3956 } else if (CHIP_IS_E1H(bp)) {
3957 /* CAM credit is equaly divided between all active functions
3958 * on the PORT!.
3959 */
3960 if ((func_num > 0)) {
3961 if (!CHIP_REV_IS_SLOW(bp))
3962 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3963 else
3964 cam_sz = BNX2X_CAM_SIZE_EMUL;
3965 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3966 } else {
3967 /* this should never happen! Block MAC operations. */
3968 bnx2x_init_credit_pool(p, 0, 0);
3969 }
3970
3971 } else {
3972
3973 /*
3974 * CAM credit is equaly divided between all active functions
3975 * on the PATH.
3976 */
3977 if ((func_num > 0)) {
3978 if (!CHIP_REV_IS_SLOW(bp))
3979 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3980 else
3981 cam_sz = BNX2X_CAM_SIZE_EMUL;
3982
3983 /*
3984 * No need for CAM entries handling for 57712 and
3985 * newer.
3986 */
3987 bnx2x_init_credit_pool(p, -1, cam_sz);
3988 } else {
3989 /* this should never happen! Block MAC operations. */
3990 bnx2x_init_credit_pool(p, 0, 0);
3991 }
3992
3993 }
3994}
3995
3996void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
3997 struct bnx2x_credit_pool_obj *p,
3998 u8 func_id,
3999 u8 func_num)
4000{
4001 if (CHIP_IS_E1x(bp)) {
4002 /*
4003 * There is no VLAN credit in HW on 57710 and 57711 only
4004 * MAC / MAC-VLAN can be set
4005 */
4006 bnx2x_init_credit_pool(p, 0, -1);
4007 } else {
4008 /*
4009 * CAM credit is equaly divided between all active functions
4010 * on the PATH.
4011 */
4012 if (func_num > 0) {
4013 int credit = MAX_VLAN_CREDIT_E2 / func_num;
4014 bnx2x_init_credit_pool(p, func_id * credit, credit);
4015 } else
4016 /* this should never happen! Block VLAN operations. */
4017 bnx2x_init_credit_pool(p, 0, 0);
4018 }
4019}
4020
4021/****************** RSS Configuration ******************/
4022/**
4023 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4024 *
4025 * @bp: driver hanlde
4026 * @p: pointer to rss configuration
4027 *
4028 * Prints it when NETIF_MSG_IFUP debug level is configured.
4029 */
4030static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4031 struct bnx2x_config_rss_params *p)
4032{
4033 int i;
4034
4035 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4036 DP(BNX2X_MSG_SP, "0x0000: ");
4037 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4038 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4039
4040 /* Print 4 bytes in a line */
4041 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4042 (((i + 1) & 0x3) == 0)) {
4043 DP_CONT(BNX2X_MSG_SP, "\n");
4044 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4045 }
4046 }
4047
4048 DP_CONT(BNX2X_MSG_SP, "\n");
4049}
4050
4051/**
4052 * bnx2x_setup_rss - configure RSS
4053 *
4054 * @bp: device handle
4055 * @p: rss configuration
4056 *
4057 * sends on UPDATE ramrod for that matter.
4058 */
4059static int bnx2x_setup_rss(struct bnx2x *bp,
4060 struct bnx2x_config_rss_params *p)
4061{
4062 struct bnx2x_rss_config_obj *o = p->rss_obj;
4063 struct bnx2x_raw_obj *r = &o->raw;
4064 struct eth_rss_update_ramrod_data *data =
4065 (struct eth_rss_update_ramrod_data *)(r->rdata);
4066 u8 rss_mode = 0;
4067 int rc;
4068
4069 memset(data, 0, sizeof(*data));
4070
4071 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4072
4073 /* Set an echo field */
4074 data->echo = (r->cid & BNX2X_SWCID_MASK) |
4075 (r->state << BNX2X_SWCID_SHIFT);
4076
4077 /* RSS mode */
4078 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4079 rss_mode = ETH_RSS_MODE_DISABLED;
4080 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4081 rss_mode = ETH_RSS_MODE_REGULAR;
4082 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
4083 rss_mode = ETH_RSS_MODE_VLAN_PRI;
4084 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
4085 rss_mode = ETH_RSS_MODE_E1HOV_PRI;
4086 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
4087 rss_mode = ETH_RSS_MODE_IP_DSCP;
4088
4089 data->rss_mode = rss_mode;
4090
4091 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4092
4093 /* RSS capabilities */
4094 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4095 data->capabilities |=
4096 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4097
4098 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4099 data->capabilities |=
4100 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4101
4102 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4103 data->capabilities |=
4104 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4105
4106 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4107 data->capabilities |=
4108 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4109
4110 /* Hashing mask */
4111 data->rss_result_mask = p->rss_result_mask;
4112
4113 /* RSS engine ID */
4114 data->rss_engine_id = o->engine_id;
4115
4116 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4117
4118 /* Indirection table */
4119 memcpy(data->indirection_table, p->ind_table,
4120 T_ETH_INDIRECTION_TABLE_SIZE);
4121
4122 /* Remember the last configuration */
4123 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4124
4125 /* Print the indirection table */
4126 if (netif_msg_ifup(bp))
4127 bnx2x_debug_print_ind_table(bp, p);
4128
4129 /* RSS keys */
4130 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4131 memcpy(&data->rss_key[0], &p->rss_key[0],
4132 sizeof(data->rss_key));
4133 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4134 }
4135
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004136 /*
4137 * No need for an explicit memory barrier here as long we would
4138 * need to ensure the ordering of writing to the SPQ element
4139 * and updating of the SPQ producer which involves a memory
4140 * read and we will have to put a full memory barrier there
4141 * (inside bnx2x_sp_post()).
4142 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004143
4144 /* Send a ramrod */
4145 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4146 U64_HI(r->rdata_mapping),
4147 U64_LO(r->rdata_mapping),
4148 ETH_CONNECTION_TYPE);
4149
4150 if (rc < 0)
4151 return rc;
4152
4153 return 1;
4154}
4155
4156void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4157 u8 *ind_table)
4158{
4159 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4160}
4161
4162int bnx2x_config_rss(struct bnx2x *bp,
4163 struct bnx2x_config_rss_params *p)
4164{
4165 int rc;
4166 struct bnx2x_rss_config_obj *o = p->rss_obj;
4167 struct bnx2x_raw_obj *r = &o->raw;
4168
4169 /* Do nothing if only driver cleanup was requested */
4170 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4171 return 0;
4172
4173 r->set_pending(r);
4174
4175 rc = o->config_rss(bp, p);
4176 if (rc < 0) {
4177 r->clear_pending(r);
4178 return rc;
4179 }
4180
4181 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4182 rc = r->wait_comp(bp, r);
4183
4184 return rc;
4185}
4186
4187
4188void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4189 struct bnx2x_rss_config_obj *rss_obj,
4190 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4191 void *rdata, dma_addr_t rdata_mapping,
4192 int state, unsigned long *pstate,
4193 bnx2x_obj_type type)
4194{
4195 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4196 rdata_mapping, state, pstate, type);
4197
4198 rss_obj->engine_id = engine_id;
4199 rss_obj->config_rss = bnx2x_setup_rss;
4200}
4201
4202/********************** Queue state object ***********************************/
4203
4204/**
4205 * bnx2x_queue_state_change - perform Queue state change transition
4206 *
4207 * @bp: device handle
4208 * @params: parameters to perform the transition
4209 *
4210 * returns 0 in case of successfully completed transition, negative error
4211 * code in case of failure, positive (EBUSY) value if there is a completion
4212 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4213 * not set in params->ramrod_flags for asynchronous commands).
4214 *
4215 */
4216int bnx2x_queue_state_change(struct bnx2x *bp,
4217 struct bnx2x_queue_state_params *params)
4218{
4219 struct bnx2x_queue_sp_obj *o = params->q_obj;
4220 int rc, pending_bit;
4221 unsigned long *pending = &o->pending;
4222
4223 /* Check that the requested transition is legal */
4224 if (o->check_transition(bp, o, params))
4225 return -EINVAL;
4226
4227 /* Set "pending" bit */
4228 pending_bit = o->set_pending(o, params);
4229
4230 /* Don't send a command if only driver cleanup was requested */
4231 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4232 o->complete_cmd(bp, o, pending_bit);
4233 else {
4234 /* Send a ramrod */
4235 rc = o->send_cmd(bp, params);
4236 if (rc) {
4237 o->next_state = BNX2X_Q_STATE_MAX;
4238 clear_bit(pending_bit, pending);
4239 smp_mb__after_clear_bit();
4240 return rc;
4241 }
4242
4243 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4244 rc = o->wait_comp(bp, o, pending_bit);
4245 if (rc)
4246 return rc;
4247
4248 return 0;
4249 }
4250 }
4251
4252 return !!test_bit(pending_bit, pending);
4253}
4254
4255
4256static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4257 struct bnx2x_queue_state_params *params)
4258{
4259 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4260
4261 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4262 * UPDATE command.
4263 */
4264 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4265 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4266 bit = BNX2X_Q_CMD_UPDATE;
4267 else
4268 bit = cmd;
4269
4270 set_bit(bit, &obj->pending);
4271 return bit;
4272}
4273
4274static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4275 struct bnx2x_queue_sp_obj *o,
4276 enum bnx2x_queue_cmd cmd)
4277{
4278 return bnx2x_state_wait(bp, cmd, &o->pending);
4279}
4280
4281/**
4282 * bnx2x_queue_comp_cmd - complete the state change command.
4283 *
4284 * @bp: device handle
4285 * @o:
4286 * @cmd:
4287 *
4288 * Checks that the arrived completion is expected.
4289 */
4290static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4291 struct bnx2x_queue_sp_obj *o,
4292 enum bnx2x_queue_cmd cmd)
4293{
4294 unsigned long cur_pending = o->pending;
4295
4296 if (!test_and_clear_bit(cmd, &cur_pending)) {
4297 BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
Ariel Elior6383c0b2011-07-14 08:31:57 +00004298 "pending 0x%lx, next_state %d\n", cmd,
4299 o->cids[BNX2X_PRIMARY_CID_INDEX],
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004300 o->state, cur_pending, o->next_state);
4301 return -EINVAL;
4302 }
4303
Ariel Elior6383c0b2011-07-14 08:31:57 +00004304 if (o->next_tx_only >= o->max_cos)
4305 /* >= becuase tx only must always be smaller than cos since the
4306 * primary connection suports COS 0
4307 */
4308 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4309 o->next_tx_only, o->max_cos);
4310
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004311 DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
Ariel Elior6383c0b2011-07-14 08:31:57 +00004312 "setting state to %d\n", cmd,
4313 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4314
4315 if (o->next_tx_only) /* print num tx-only if any exist */
Joe Perches94f05b02011-08-14 12:16:20 +00004316 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004317 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004318
4319 o->state = o->next_state;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004320 o->num_tx_only = o->next_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004321 o->next_state = BNX2X_Q_STATE_MAX;
4322
4323 /* It's important that o->state and o->next_state are
4324 * updated before o->pending.
4325 */
4326 wmb();
4327
4328 clear_bit(cmd, &o->pending);
4329 smp_mb__after_clear_bit();
4330
4331 return 0;
4332}
4333
4334static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4335 struct bnx2x_queue_state_params *cmd_params,
4336 struct client_init_ramrod_data *data)
4337{
4338 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004339
4340 /* Rx data */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004341
4342 /* IPv6 TPA supported for E2 and above only */
Vladislav Zolotarovf5219d82011-07-19 01:44:11 +00004343 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004344 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4345}
4346
Ariel Elior6383c0b2011-07-14 08:31:57 +00004347static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4348 struct bnx2x_queue_sp_obj *o,
4349 struct bnx2x_general_setup_params *params,
4350 struct client_init_general_data *gen_data,
4351 unsigned long *flags)
4352{
4353 gen_data->client_id = o->cl_id;
4354
4355 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4356 gen_data->statistics_counter_id =
4357 params->stat_id;
4358 gen_data->statistics_en_flg = 1;
4359 gen_data->statistics_zero_flg =
4360 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4361 } else
4362 gen_data->statistics_counter_id =
4363 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4364
4365 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4366 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4367 gen_data->sp_client_id = params->spcl_id;
4368 gen_data->mtu = cpu_to_le16(params->mtu);
4369 gen_data->func_id = o->func_id;
4370
4371
4372 gen_data->cos = params->cos;
4373
4374 gen_data->traffic_type =
4375 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4376 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4377
Joe Perches94f05b02011-08-14 12:16:20 +00004378 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004379 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4380}
4381
4382static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4383 struct bnx2x_txq_setup_params *params,
4384 struct client_init_tx_data *tx_data,
4385 unsigned long *flags)
4386{
4387 tx_data->enforce_security_flg =
4388 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4389 tx_data->default_vlan =
4390 cpu_to_le16(params->default_vlan);
4391 tx_data->default_vlan_flg =
4392 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4393 tx_data->tx_switching_flg =
4394 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4395 tx_data->anti_spoofing_flg =
4396 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4397 tx_data->tx_status_block_id = params->fw_sb_id;
4398 tx_data->tx_sb_index_number = params->sb_cq_index;
4399 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4400
4401 tx_data->tx_bd_page_base.lo =
4402 cpu_to_le32(U64_LO(params->dscr_map));
4403 tx_data->tx_bd_page_base.hi =
4404 cpu_to_le32(U64_HI(params->dscr_map));
4405
4406 /* Don't configure any Tx switching mode during queue SETUP */
4407 tx_data->state = 0;
4408}
4409
4410static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4411 struct rxq_pause_params *params,
4412 struct client_init_rx_data *rx_data)
4413{
4414 /* flow control data */
4415 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4416 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4417 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4418 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4419 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4420 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4421 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4422}
4423
4424static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4425 struct bnx2x_rxq_setup_params *params,
4426 struct client_init_rx_data *rx_data,
4427 unsigned long *flags)
4428{
Ariel Elior6383c0b2011-07-14 08:31:57 +00004429 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4430 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004431 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4432 CLIENT_INIT_RX_DATA_TPA_MODE;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004433 rx_data->vmqueue_mode_en_flg = 0;
4434
4435 rx_data->cache_line_alignment_log_size =
4436 params->cache_line_log;
4437 rx_data->enable_dynamic_hc =
4438 test_bit(BNX2X_Q_FLG_DHC, flags);
4439 rx_data->max_sges_for_packet = params->max_sges_pkt;
4440 rx_data->client_qzone_id = params->cl_qzone_id;
4441 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4442
4443 /* Always start in DROP_ALL mode */
4444 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4445 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4446
4447 /* We don't set drop flags */
4448 rx_data->drop_ip_cs_err_flg = 0;
4449 rx_data->drop_tcp_cs_err_flg = 0;
4450 rx_data->drop_ttl0_flg = 0;
4451 rx_data->drop_udp_cs_err_flg = 0;
4452 rx_data->inner_vlan_removal_enable_flg =
4453 test_bit(BNX2X_Q_FLG_VLAN, flags);
4454 rx_data->outer_vlan_removal_enable_flg =
4455 test_bit(BNX2X_Q_FLG_OV, flags);
4456 rx_data->status_block_id = params->fw_sb_id;
4457 rx_data->rx_sb_index_number = params->sb_cq_index;
4458 rx_data->max_tpa_queues = params->max_tpa_queues;
4459 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4460 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4461 rx_data->bd_page_base.lo =
4462 cpu_to_le32(U64_LO(params->dscr_map));
4463 rx_data->bd_page_base.hi =
4464 cpu_to_le32(U64_HI(params->dscr_map));
4465 rx_data->sge_page_base.lo =
4466 cpu_to_le32(U64_LO(params->sge_map));
4467 rx_data->sge_page_base.hi =
4468 cpu_to_le32(U64_HI(params->sge_map));
4469 rx_data->cqe_page_base.lo =
4470 cpu_to_le32(U64_LO(params->rcq_map));
4471 rx_data->cqe_page_base.hi =
4472 cpu_to_le32(U64_HI(params->rcq_map));
4473 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4474
4475 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4476 rx_data->approx_mcast_engine_id = o->func_id;
4477 rx_data->is_approx_mcast = 1;
4478 }
4479
4480 rx_data->rss_engine_id = params->rss_engine_id;
4481
4482 /* silent vlan removal */
4483 rx_data->silent_vlan_removal_flg =
4484 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4485 rx_data->silent_vlan_value =
4486 cpu_to_le16(params->silent_removal_value);
4487 rx_data->silent_vlan_mask =
4488 cpu_to_le16(params->silent_removal_mask);
4489
4490}
4491
4492/* initialize the general, tx and rx parts of a queue object */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004493static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4494 struct bnx2x_queue_state_params *cmd_params,
4495 struct client_init_ramrod_data *data)
4496{
Ariel Elior6383c0b2011-07-14 08:31:57 +00004497 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4498 &cmd_params->params.setup.gen_params,
4499 &data->general,
4500 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004501
Ariel Elior6383c0b2011-07-14 08:31:57 +00004502 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4503 &cmd_params->params.setup.txq_params,
4504 &data->tx,
4505 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004506
Ariel Elior6383c0b2011-07-14 08:31:57 +00004507 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4508 &cmd_params->params.setup.rxq_params,
4509 &data->rx,
4510 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004511
Ariel Elior6383c0b2011-07-14 08:31:57 +00004512 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4513 &cmd_params->params.setup.pause_params,
4514 &data->rx);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004515}
4516
Ariel Elior6383c0b2011-07-14 08:31:57 +00004517/* initialize the general and tx parts of a tx-only queue object */
4518static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4519 struct bnx2x_queue_state_params *cmd_params,
4520 struct tx_queue_init_ramrod_data *data)
4521{
4522 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4523 &cmd_params->params.tx_only.gen_params,
4524 &data->general,
4525 &cmd_params->params.tx_only.flags);
4526
4527 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4528 &cmd_params->params.tx_only.txq_params,
4529 &data->tx,
4530 &cmd_params->params.tx_only.flags);
4531
Joe Perches94f05b02011-08-14 12:16:20 +00004532 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x\n",cmd_params->q_obj->cids[0],
Ariel Elior6383c0b2011-07-14 08:31:57 +00004533 data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
4534}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004535
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004536/**
4537 * bnx2x_q_init - init HW/FW queue
4538 *
4539 * @bp: device handle
4540 * @params:
4541 *
4542 * HW/FW initial Queue configuration:
4543 * - HC: Rx and Tx
4544 * - CDU context validation
4545 *
4546 */
4547static inline int bnx2x_q_init(struct bnx2x *bp,
4548 struct bnx2x_queue_state_params *params)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004549{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004550 struct bnx2x_queue_sp_obj *o = params->q_obj;
4551 struct bnx2x_queue_init_params *init = &params->params.init;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004552 u16 hc_usec;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004553 u8 cos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004554
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004555 /* Tx HC configuration */
4556 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4557 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4558 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4559
4560 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4561 init->tx.sb_cq_index,
4562 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004563 hc_usec);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004564 }
4565
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004566 /* Rx HC configuration */
4567 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4568 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4569 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004570
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004571 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4572 init->rx.sb_cq_index,
4573 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4574 hc_usec);
4575 }
4576
4577 /* Set CDU context validation values */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004578 for (cos = 0; cos < o->max_cos; cos++) {
Joe Perches94f05b02011-08-14 12:16:20 +00004579 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004580 o->cids[cos], cos);
Joe Perches94f05b02011-08-14 12:16:20 +00004581 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004582 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4583 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004584
4585 /* As no ramrod is sent, complete the command immediately */
4586 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4587
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004588 mmiowb();
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004589 smp_mb();
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004590
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004591 return 0;
4592}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004593
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004594static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4595 struct bnx2x_queue_state_params *params)
4596{
4597 struct bnx2x_queue_sp_obj *o = params->q_obj;
4598 struct client_init_ramrod_data *rdata =
4599 (struct client_init_ramrod_data *)o->rdata;
4600 dma_addr_t data_mapping = o->rdata_mapping;
4601 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004602
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004603 /* Clear the ramrod data */
4604 memset(rdata, 0, sizeof(*rdata));
4605
4606 /* Fill the ramrod data */
4607 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4608
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004609 /*
4610 * No need for an explicit memory barrier here as long we would
4611 * need to ensure the ordering of writing to the SPQ element
4612 * and updating of the SPQ producer which involves a memory
4613 * read and we will have to put a full memory barrier there
4614 * (inside bnx2x_sp_post()).
4615 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004616
Ariel Elior6383c0b2011-07-14 08:31:57 +00004617 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4618 U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004619 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4620}
4621
4622static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4623 struct bnx2x_queue_state_params *params)
4624{
4625 struct bnx2x_queue_sp_obj *o = params->q_obj;
4626 struct client_init_ramrod_data *rdata =
4627 (struct client_init_ramrod_data *)o->rdata;
4628 dma_addr_t data_mapping = o->rdata_mapping;
4629 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4630
4631 /* Clear the ramrod data */
4632 memset(rdata, 0, sizeof(*rdata));
4633
4634 /* Fill the ramrod data */
4635 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4636 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4637
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004638 /*
4639 * No need for an explicit memory barrier here as long we would
4640 * need to ensure the ordering of writing to the SPQ element
4641 * and updating of the SPQ producer which involves a memory
4642 * read and we will have to put a full memory barrier there
4643 * (inside bnx2x_sp_post()).
4644 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004645
Ariel Elior6383c0b2011-07-14 08:31:57 +00004646 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4647 U64_HI(data_mapping),
4648 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4649}
4650
4651static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4652 struct bnx2x_queue_state_params *params)
4653{
4654 struct bnx2x_queue_sp_obj *o = params->q_obj;
4655 struct tx_queue_init_ramrod_data *rdata =
4656 (struct tx_queue_init_ramrod_data *)o->rdata;
4657 dma_addr_t data_mapping = o->rdata_mapping;
4658 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4659 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4660 &params->params.tx_only;
4661 u8 cid_index = tx_only_params->cid_index;
4662
4663
4664 if (cid_index >= o->max_cos) {
4665 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4666 o->cl_id, cid_index);
4667 return -EINVAL;
4668 }
4669
Joe Perches94f05b02011-08-14 12:16:20 +00004670 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004671 tx_only_params->gen_params.cos,
4672 tx_only_params->gen_params.spcl_id);
4673
4674 /* Clear the ramrod data */
4675 memset(rdata, 0, sizeof(*rdata));
4676
4677 /* Fill the ramrod data */
4678 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4679
4680 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
Joe Perches94f05b02011-08-14 12:16:20 +00004681 "sp-client id %d, cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004682 o->cids[cid_index],
4683 rdata->general.client_id,
4684 rdata->general.sp_client_id, rdata->general.cos);
4685
4686 /*
4687 * No need for an explicit memory barrier here as long we would
4688 * need to ensure the ordering of writing to the SPQ element
4689 * and updating of the SPQ producer which involves a memory
4690 * read and we will have to put a full memory barrier there
4691 * (inside bnx2x_sp_post()).
4692 */
4693
4694 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4695 U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004696 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4697}
4698
4699static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4700 struct bnx2x_queue_sp_obj *obj,
4701 struct bnx2x_queue_update_params *params,
4702 struct client_update_ramrod_data *data)
4703{
4704 /* Client ID of the client to update */
4705 data->client_id = obj->cl_id;
4706
4707 /* Function ID of the client to update */
4708 data->func_id = obj->func_id;
4709
4710 /* Default VLAN value */
4711 data->default_vlan = cpu_to_le16(params->def_vlan);
4712
4713 /* Inner VLAN stripping */
4714 data->inner_vlan_removal_enable_flg =
4715 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4716 data->inner_vlan_removal_change_flg =
4717 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4718 &params->update_flags);
4719
4720 /* Outer VLAN sripping */
4721 data->outer_vlan_removal_enable_flg =
4722 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4723 data->outer_vlan_removal_change_flg =
4724 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4725 &params->update_flags);
4726
4727 /* Drop packets that have source MAC that doesn't belong to this
4728 * Queue.
4729 */
4730 data->anti_spoofing_enable_flg =
4731 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4732 data->anti_spoofing_change_flg =
4733 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4734
4735 /* Activate/Deactivate */
4736 data->activate_flg =
4737 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4738 data->activate_change_flg =
4739 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4740
4741 /* Enable default VLAN */
4742 data->default_vlan_enable_flg =
4743 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4744 data->default_vlan_change_flg =
4745 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4746 &params->update_flags);
4747
4748 /* silent vlan removal */
4749 data->silent_vlan_change_flg =
4750 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4751 &params->update_flags);
4752 data->silent_vlan_removal_flg =
4753 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4754 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4755 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4756}
4757
4758static inline int bnx2x_q_send_update(struct bnx2x *bp,
4759 struct bnx2x_queue_state_params *params)
4760{
4761 struct bnx2x_queue_sp_obj *o = params->q_obj;
4762 struct client_update_ramrod_data *rdata =
4763 (struct client_update_ramrod_data *)o->rdata;
4764 dma_addr_t data_mapping = o->rdata_mapping;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004765 struct bnx2x_queue_update_params *update_params =
4766 &params->params.update;
4767 u8 cid_index = update_params->cid_index;
4768
4769 if (cid_index >= o->max_cos) {
4770 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4771 o->cl_id, cid_index);
4772 return -EINVAL;
4773 }
4774
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004775
4776 /* Clear the ramrod data */
4777 memset(rdata, 0, sizeof(*rdata));
4778
4779 /* Fill the ramrod data */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004780 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004781
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004782 /*
4783 * No need for an explicit memory barrier here as long we would
4784 * need to ensure the ordering of writing to the SPQ element
4785 * and updating of the SPQ producer which involves a memory
4786 * read and we will have to put a full memory barrier there
4787 * (inside bnx2x_sp_post()).
4788 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004789
Ariel Elior6383c0b2011-07-14 08:31:57 +00004790 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4791 o->cids[cid_index], U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004792 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4793}
4794
4795/**
4796 * bnx2x_q_send_deactivate - send DEACTIVATE command
4797 *
4798 * @bp: device handle
4799 * @params:
4800 *
4801 * implemented using the UPDATE command.
4802 */
4803static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4804 struct bnx2x_queue_state_params *params)
4805{
4806 struct bnx2x_queue_update_params *update = &params->params.update;
4807
4808 memset(update, 0, sizeof(*update));
4809
4810 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4811
4812 return bnx2x_q_send_update(bp, params);
4813}
4814
4815/**
4816 * bnx2x_q_send_activate - send ACTIVATE command
4817 *
4818 * @bp: device handle
4819 * @params:
4820 *
4821 * implemented using the UPDATE command.
4822 */
4823static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4824 struct bnx2x_queue_state_params *params)
4825{
4826 struct bnx2x_queue_update_params *update = &params->params.update;
4827
4828 memset(update, 0, sizeof(*update));
4829
4830 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4831 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4832
4833 return bnx2x_q_send_update(bp, params);
4834}
4835
4836static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4837 struct bnx2x_queue_state_params *params)
4838{
4839 /* TODO: Not implemented yet. */
4840 return -1;
4841}
4842
4843static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4844 struct bnx2x_queue_state_params *params)
4845{
4846 struct bnx2x_queue_sp_obj *o = params->q_obj;
4847
Ariel Elior6383c0b2011-07-14 08:31:57 +00004848 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4849 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004850 ETH_CONNECTION_TYPE);
4851}
4852
4853static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4854 struct bnx2x_queue_state_params *params)
4855{
4856 struct bnx2x_queue_sp_obj *o = params->q_obj;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004857 u8 cid_idx = params->params.cfc_del.cid_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004858
Ariel Elior6383c0b2011-07-14 08:31:57 +00004859 if (cid_idx >= o->max_cos) {
4860 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4861 o->cl_id, cid_idx);
4862 return -EINVAL;
4863 }
4864
4865 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4866 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004867}
4868
4869static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4870 struct bnx2x_queue_state_params *params)
4871{
4872 struct bnx2x_queue_sp_obj *o = params->q_obj;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004873 u8 cid_index = params->params.terminate.cid_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004874
Ariel Elior6383c0b2011-07-14 08:31:57 +00004875 if (cid_index >= o->max_cos) {
4876 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4877 o->cl_id, cid_index);
4878 return -EINVAL;
4879 }
4880
4881 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4882 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004883}
4884
4885static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4886 struct bnx2x_queue_state_params *params)
4887{
4888 struct bnx2x_queue_sp_obj *o = params->q_obj;
4889
Ariel Elior6383c0b2011-07-14 08:31:57 +00004890 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4891 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004892 ETH_CONNECTION_TYPE);
4893}
4894
4895static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4896 struct bnx2x_queue_state_params *params)
4897{
4898 switch (params->cmd) {
4899 case BNX2X_Q_CMD_INIT:
4900 return bnx2x_q_init(bp, params);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004901 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4902 return bnx2x_q_send_setup_tx_only(bp, params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004903 case BNX2X_Q_CMD_DEACTIVATE:
4904 return bnx2x_q_send_deactivate(bp, params);
4905 case BNX2X_Q_CMD_ACTIVATE:
4906 return bnx2x_q_send_activate(bp, params);
4907 case BNX2X_Q_CMD_UPDATE:
4908 return bnx2x_q_send_update(bp, params);
4909 case BNX2X_Q_CMD_UPDATE_TPA:
4910 return bnx2x_q_send_update_tpa(bp, params);
4911 case BNX2X_Q_CMD_HALT:
4912 return bnx2x_q_send_halt(bp, params);
4913 case BNX2X_Q_CMD_CFC_DEL:
4914 return bnx2x_q_send_cfc_del(bp, params);
4915 case BNX2X_Q_CMD_TERMINATE:
4916 return bnx2x_q_send_terminate(bp, params);
4917 case BNX2X_Q_CMD_EMPTY:
4918 return bnx2x_q_send_empty(bp, params);
4919 default:
4920 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4921 return -EINVAL;
4922 }
4923}
4924
4925static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4926 struct bnx2x_queue_state_params *params)
4927{
4928 switch (params->cmd) {
4929 case BNX2X_Q_CMD_SETUP:
4930 return bnx2x_q_send_setup_e1x(bp, params);
4931 case BNX2X_Q_CMD_INIT:
Ariel Elior6383c0b2011-07-14 08:31:57 +00004932 case BNX2X_Q_CMD_SETUP_TX_ONLY:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004933 case BNX2X_Q_CMD_DEACTIVATE:
4934 case BNX2X_Q_CMD_ACTIVATE:
4935 case BNX2X_Q_CMD_UPDATE:
4936 case BNX2X_Q_CMD_UPDATE_TPA:
4937 case BNX2X_Q_CMD_HALT:
4938 case BNX2X_Q_CMD_CFC_DEL:
4939 case BNX2X_Q_CMD_TERMINATE:
4940 case BNX2X_Q_CMD_EMPTY:
4941 return bnx2x_queue_send_cmd_cmn(bp, params);
4942 default:
4943 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4944 return -EINVAL;
4945 }
4946}
4947
4948static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4949 struct bnx2x_queue_state_params *params)
4950{
4951 switch (params->cmd) {
4952 case BNX2X_Q_CMD_SETUP:
4953 return bnx2x_q_send_setup_e2(bp, params);
4954 case BNX2X_Q_CMD_INIT:
Ariel Elior6383c0b2011-07-14 08:31:57 +00004955 case BNX2X_Q_CMD_SETUP_TX_ONLY:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004956 case BNX2X_Q_CMD_DEACTIVATE:
4957 case BNX2X_Q_CMD_ACTIVATE:
4958 case BNX2X_Q_CMD_UPDATE:
4959 case BNX2X_Q_CMD_UPDATE_TPA:
4960 case BNX2X_Q_CMD_HALT:
4961 case BNX2X_Q_CMD_CFC_DEL:
4962 case BNX2X_Q_CMD_TERMINATE:
4963 case BNX2X_Q_CMD_EMPTY:
4964 return bnx2x_queue_send_cmd_cmn(bp, params);
4965 default:
4966 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4967 return -EINVAL;
4968 }
4969}
4970
4971/**
4972 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4973 *
4974 * @bp: device handle
4975 * @o:
4976 * @params:
4977 *
4978 * (not Forwarding)
4979 * It both checks if the requested command is legal in a current
4980 * state and, if it's legal, sets a `next_state' in the object
4981 * that will be used in the completion flow to set the `state'
4982 * of the object.
4983 *
4984 * returns 0 if a requested command is a legal transition,
4985 * -EINVAL otherwise.
4986 */
4987static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4988 struct bnx2x_queue_sp_obj *o,
4989 struct bnx2x_queue_state_params *params)
4990{
4991 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4992 enum bnx2x_queue_cmd cmd = params->cmd;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004993 struct bnx2x_queue_update_params *update_params =
4994 &params->params.update;
4995 u8 next_tx_only = o->num_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004996
Dmitry Kravkov6debea82011-07-19 01:42:04 +00004997 /*
4998 * Forget all pending for completion commands if a driver only state
4999 * transition has been requested.
5000 */
5001 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5002 o->pending = 0;
5003 o->next_state = BNX2X_Q_STATE_MAX;
5004 }
5005
5006 /*
5007 * Don't allow a next state transition if we are in the middle of
5008 * the previous one.
5009 */
5010 if (o->pending)
5011 return -EBUSY;
5012
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005013 switch (state) {
5014 case BNX2X_Q_STATE_RESET:
5015 if (cmd == BNX2X_Q_CMD_INIT)
5016 next_state = BNX2X_Q_STATE_INITIALIZED;
5017
5018 break;
5019 case BNX2X_Q_STATE_INITIALIZED:
5020 if (cmd == BNX2X_Q_CMD_SETUP) {
5021 if (test_bit(BNX2X_Q_FLG_ACTIVE,
5022 &params->params.setup.flags))
5023 next_state = BNX2X_Q_STATE_ACTIVE;
5024 else
5025 next_state = BNX2X_Q_STATE_INACTIVE;
5026 }
5027
5028 break;
5029 case BNX2X_Q_STATE_ACTIVE:
5030 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5031 next_state = BNX2X_Q_STATE_INACTIVE;
5032
5033 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5034 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5035 next_state = BNX2X_Q_STATE_ACTIVE;
5036
Ariel Elior6383c0b2011-07-14 08:31:57 +00005037 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5038 next_state = BNX2X_Q_STATE_MULTI_COS;
5039 next_tx_only = 1;
5040 }
5041
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005042 else if (cmd == BNX2X_Q_CMD_HALT)
5043 next_state = BNX2X_Q_STATE_STOPPED;
5044
5045 else if (cmd == BNX2X_Q_CMD_UPDATE) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005046 /* If "active" state change is requested, update the
5047 * state accordingly.
5048 */
5049 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5050 &update_params->update_flags) &&
5051 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5052 &update_params->update_flags))
5053 next_state = BNX2X_Q_STATE_INACTIVE;
5054 else
5055 next_state = BNX2X_Q_STATE_ACTIVE;
5056 }
5057
5058 break;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005059 case BNX2X_Q_STATE_MULTI_COS:
5060 if (cmd == BNX2X_Q_CMD_TERMINATE)
5061 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5062
5063 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5064 next_state = BNX2X_Q_STATE_MULTI_COS;
5065 next_tx_only = o->num_tx_only + 1;
5066 }
5067
5068 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5069 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5070 next_state = BNX2X_Q_STATE_MULTI_COS;
5071
5072 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5073 /* If "active" state change is requested, update the
5074 * state accordingly.
5075 */
5076 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5077 &update_params->update_flags) &&
5078 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5079 &update_params->update_flags))
5080 next_state = BNX2X_Q_STATE_INACTIVE;
5081 else
5082 next_state = BNX2X_Q_STATE_MULTI_COS;
5083 }
5084
5085 break;
5086 case BNX2X_Q_STATE_MCOS_TERMINATED:
5087 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5088 next_tx_only = o->num_tx_only - 1;
5089 if (next_tx_only == 0)
5090 next_state = BNX2X_Q_STATE_ACTIVE;
5091 else
5092 next_state = BNX2X_Q_STATE_MULTI_COS;
5093 }
5094
5095 break;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005096 case BNX2X_Q_STATE_INACTIVE:
5097 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5098 next_state = BNX2X_Q_STATE_ACTIVE;
5099
5100 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5101 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5102 next_state = BNX2X_Q_STATE_INACTIVE;
5103
5104 else if (cmd == BNX2X_Q_CMD_HALT)
5105 next_state = BNX2X_Q_STATE_STOPPED;
5106
5107 else if (cmd == BNX2X_Q_CMD_UPDATE) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005108 /* If "active" state change is requested, update the
5109 * state accordingly.
5110 */
5111 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5112 &update_params->update_flags) &&
5113 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
Ariel Elior6383c0b2011-07-14 08:31:57 +00005114 &update_params->update_flags)){
5115 if (o->num_tx_only == 0)
5116 next_state = BNX2X_Q_STATE_ACTIVE;
5117 else /* tx only queues exist for this queue */
5118 next_state = BNX2X_Q_STATE_MULTI_COS;
5119 } else
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005120 next_state = BNX2X_Q_STATE_INACTIVE;
5121 }
5122
5123 break;
5124 case BNX2X_Q_STATE_STOPPED:
5125 if (cmd == BNX2X_Q_CMD_TERMINATE)
5126 next_state = BNX2X_Q_STATE_TERMINATED;
5127
5128 break;
5129 case BNX2X_Q_STATE_TERMINATED:
5130 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5131 next_state = BNX2X_Q_STATE_RESET;
5132
5133 break;
5134 default:
5135 BNX2X_ERR("Illegal state: %d\n", state);
5136 }
5137
5138 /* Transition is assured */
5139 if (next_state != BNX2X_Q_STATE_MAX) {
5140 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5141 state, cmd, next_state);
5142 o->next_state = next_state;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005143 o->next_tx_only = next_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005144 return 0;
5145 }
5146
5147 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5148
5149 return -EINVAL;
5150}
5151
5152void bnx2x_init_queue_obj(struct bnx2x *bp,
5153 struct bnx2x_queue_sp_obj *obj,
Ariel Elior6383c0b2011-07-14 08:31:57 +00005154 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5155 void *rdata,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005156 dma_addr_t rdata_mapping, unsigned long type)
5157{
5158 memset(obj, 0, sizeof(*obj));
5159
Ariel Elior6383c0b2011-07-14 08:31:57 +00005160 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5161 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5162
5163 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5164 obj->max_cos = cid_cnt;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005165 obj->cl_id = cl_id;
5166 obj->func_id = func_id;
5167 obj->rdata = rdata;
5168 obj->rdata_mapping = rdata_mapping;
5169 obj->type = type;
5170 obj->next_state = BNX2X_Q_STATE_MAX;
5171
5172 if (CHIP_IS_E1x(bp))
5173 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5174 else
5175 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5176
5177 obj->check_transition = bnx2x_queue_chk_transition;
5178
5179 obj->complete_cmd = bnx2x_queue_comp_cmd;
5180 obj->wait_comp = bnx2x_queue_wait_comp;
5181 obj->set_pending = bnx2x_queue_set_pending;
5182}
5183
Ariel Elior6383c0b2011-07-14 08:31:57 +00005184void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
5185 struct bnx2x_queue_sp_obj *obj,
5186 u32 cid, u8 index)
5187{
5188 obj->cids[index] = cid;
5189}
5190
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005191/********************** Function state object *********************************/
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005192enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5193 struct bnx2x_func_sp_obj *o)
5194{
5195 /* in the middle of transaction - return INVALID state */
5196 if (o->pending)
5197 return BNX2X_F_STATE_MAX;
5198
5199 /*
5200 * unsure the order of reading of o->pending and o->state
5201 * o->pending should be read first
5202 */
5203 rmb();
5204
5205 return o->state;
5206}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005207
5208static int bnx2x_func_wait_comp(struct bnx2x *bp,
5209 struct bnx2x_func_sp_obj *o,
5210 enum bnx2x_func_cmd cmd)
5211{
5212 return bnx2x_state_wait(bp, cmd, &o->pending);
5213}
5214
5215/**
5216 * bnx2x_func_state_change_comp - complete the state machine transition
5217 *
5218 * @bp: device handle
5219 * @o:
5220 * @cmd:
5221 *
5222 * Called on state change transition. Completes the state
5223 * machine transition only - no HW interaction.
5224 */
5225static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5226 struct bnx2x_func_sp_obj *o,
5227 enum bnx2x_func_cmd cmd)
5228{
5229 unsigned long cur_pending = o->pending;
5230
5231 if (!test_and_clear_bit(cmd, &cur_pending)) {
5232 BNX2X_ERR("Bad MC reply %d for func %d in state %d "
5233 "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
5234 o->state, cur_pending, o->next_state);
5235 return -EINVAL;
5236 }
5237
Joe Perches94f05b02011-08-14 12:16:20 +00005238 DP(BNX2X_MSG_SP,
5239 "Completing command %d for func %d, setting state to %d\n",
5240 cmd, BP_FUNC(bp), o->next_state);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005241
5242 o->state = o->next_state;
5243 o->next_state = BNX2X_F_STATE_MAX;
5244
5245 /* It's important that o->state and o->next_state are
5246 * updated before o->pending.
5247 */
5248 wmb();
5249
5250 clear_bit(cmd, &o->pending);
5251 smp_mb__after_clear_bit();
5252
5253 return 0;
5254}
5255
5256/**
5257 * bnx2x_func_comp_cmd - complete the state change command
5258 *
5259 * @bp: device handle
5260 * @o:
5261 * @cmd:
5262 *
5263 * Checks that the arrived completion is expected.
5264 */
5265static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5266 struct bnx2x_func_sp_obj *o,
5267 enum bnx2x_func_cmd cmd)
5268{
5269 /* Complete the state machine part first, check if it's a
5270 * legal completion.
5271 */
5272 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005273 return rc;
5274}
5275
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005276/**
5277 * bnx2x_func_chk_transition - perform function state machine transition
5278 *
5279 * @bp: device handle
5280 * @o:
5281 * @params:
5282 *
5283 * It both checks if the requested command is legal in a current
5284 * state and, if it's legal, sets a `next_state' in the object
5285 * that will be used in the completion flow to set the `state'
5286 * of the object.
5287 *
5288 * returns 0 if a requested command is a legal transition,
5289 * -EINVAL otherwise.
5290 */
5291static int bnx2x_func_chk_transition(struct bnx2x *bp,
5292 struct bnx2x_func_sp_obj *o,
5293 struct bnx2x_func_state_params *params)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005294{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005295 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5296 enum bnx2x_func_cmd cmd = params->cmd;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005297
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005298 /*
5299 * Forget all pending for completion commands if a driver only state
5300 * transition has been requested.
5301 */
5302 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5303 o->pending = 0;
5304 o->next_state = BNX2X_F_STATE_MAX;
5305 }
5306
5307 /*
5308 * Don't allow a next state transition if we are in the middle of
5309 * the previous one.
5310 */
5311 if (o->pending)
5312 return -EBUSY;
5313
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005314 switch (state) {
5315 case BNX2X_F_STATE_RESET:
5316 if (cmd == BNX2X_F_CMD_HW_INIT)
5317 next_state = BNX2X_F_STATE_INITIALIZED;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005318
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005319 break;
5320 case BNX2X_F_STATE_INITIALIZED:
5321 if (cmd == BNX2X_F_CMD_START)
5322 next_state = BNX2X_F_STATE_STARTED;
5323
5324 else if (cmd == BNX2X_F_CMD_HW_RESET)
5325 next_state = BNX2X_F_STATE_RESET;
5326
5327 break;
5328 case BNX2X_F_STATE_STARTED:
5329 if (cmd == BNX2X_F_CMD_STOP)
5330 next_state = BNX2X_F_STATE_INITIALIZED;
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005331 else if (cmd == BNX2X_F_CMD_TX_STOP)
5332 next_state = BNX2X_F_STATE_TX_STOPPED;
5333
5334 break;
5335 case BNX2X_F_STATE_TX_STOPPED:
5336 if (cmd == BNX2X_F_CMD_TX_START)
5337 next_state = BNX2X_F_STATE_STARTED;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005338
5339 break;
5340 default:
5341 BNX2X_ERR("Unknown state: %d\n", state);
5342 }
5343
5344 /* Transition is assured */
5345 if (next_state != BNX2X_F_STATE_MAX) {
5346 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5347 state, cmd, next_state);
5348 o->next_state = next_state;
5349 return 0;
5350 }
5351
5352 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5353 state, cmd);
5354
5355 return -EINVAL;
5356}
5357
5358/**
5359 * bnx2x_func_init_func - performs HW init at function stage
5360 *
5361 * @bp: device handle
5362 * @drv:
5363 *
5364 * Init HW when the current phase is
5365 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5366 * HW blocks.
5367 */
5368static inline int bnx2x_func_init_func(struct bnx2x *bp,
5369 const struct bnx2x_func_sp_drv_ops *drv)
5370{
5371 return drv->init_hw_func(bp);
5372}
5373
5374/**
5375 * bnx2x_func_init_port - performs HW init at port stage
5376 *
5377 * @bp: device handle
5378 * @drv:
5379 *
5380 * Init HW when the current phase is
5381 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5382 * FUNCTION-only HW blocks.
5383 *
5384 */
5385static inline int bnx2x_func_init_port(struct bnx2x *bp,
5386 const struct bnx2x_func_sp_drv_ops *drv)
5387{
5388 int rc = drv->init_hw_port(bp);
5389 if (rc)
5390 return rc;
5391
5392 return bnx2x_func_init_func(bp, drv);
5393}
5394
5395/**
5396 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5397 *
5398 * @bp: device handle
5399 * @drv:
5400 *
5401 * Init HW when the current phase is
5402 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5403 * PORT-only and FUNCTION-only HW blocks.
5404 */
5405static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5406 const struct bnx2x_func_sp_drv_ops *drv)
5407{
5408 int rc = drv->init_hw_cmn_chip(bp);
5409 if (rc)
5410 return rc;
5411
5412 return bnx2x_func_init_port(bp, drv);
5413}
5414
5415/**
5416 * bnx2x_func_init_cmn - performs HW init at common stage
5417 *
5418 * @bp: device handle
5419 * @drv:
5420 *
5421 * Init HW when the current phase is
5422 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5423 * PORT-only and FUNCTION-only HW blocks.
5424 */
5425static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5426 const struct bnx2x_func_sp_drv_ops *drv)
5427{
5428 int rc = drv->init_hw_cmn(bp);
5429 if (rc)
5430 return rc;
5431
5432 return bnx2x_func_init_port(bp, drv);
5433}
5434
5435static int bnx2x_func_hw_init(struct bnx2x *bp,
5436 struct bnx2x_func_state_params *params)
5437{
5438 u32 load_code = params->params.hw_init.load_phase;
5439 struct bnx2x_func_sp_obj *o = params->f_obj;
5440 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5441 int rc = 0;
5442
5443 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5444 BP_ABS_FUNC(bp), load_code);
5445
5446 /* Prepare buffers for unzipping the FW */
5447 rc = drv->gunzip_init(bp);
5448 if (rc)
5449 return rc;
5450
5451 /* Prepare FW */
5452 rc = drv->init_fw(bp);
5453 if (rc) {
5454 BNX2X_ERR("Error loading firmware\n");
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005455 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005456 }
5457
5458 /* Handle the beginning of COMMON_XXX pases separatelly... */
5459 switch (load_code) {
5460 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5461 rc = bnx2x_func_init_cmn_chip(bp, drv);
5462 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005463 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005464
5465 break;
5466 case FW_MSG_CODE_DRV_LOAD_COMMON:
5467 rc = bnx2x_func_init_cmn(bp, drv);
5468 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005469 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005470
5471 break;
5472 case FW_MSG_CODE_DRV_LOAD_PORT:
5473 rc = bnx2x_func_init_port(bp, drv);
5474 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005475 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005476
5477 break;
5478 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5479 rc = bnx2x_func_init_func(bp, drv);
5480 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005481 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005482
5483 break;
5484 default:
5485 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5486 rc = -EINVAL;
5487 }
5488
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005489init_err:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005490 drv->gunzip_end(bp);
5491
5492 /* In case of success, complete the comand immediatelly: no ramrods
5493 * have been sent.
5494 */
5495 if (!rc)
5496 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5497
5498 return rc;
5499}
5500
5501/**
5502 * bnx2x_func_reset_func - reset HW at function stage
5503 *
5504 * @bp: device handle
5505 * @drv:
5506 *
5507 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5508 * FUNCTION-only HW blocks.
5509 */
5510static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5511 const struct bnx2x_func_sp_drv_ops *drv)
5512{
5513 drv->reset_hw_func(bp);
5514}
5515
5516/**
5517 * bnx2x_func_reset_port - reser HW at port stage
5518 *
5519 * @bp: device handle
5520 * @drv:
5521 *
5522 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5523 * FUNCTION-only and PORT-only HW blocks.
5524 *
5525 * !!!IMPORTANT!!!
5526 *
5527 * It's important to call reset_port before reset_func() as the last thing
5528 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5529 * makes impossible any DMAE transactions.
5530 */
5531static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5532 const struct bnx2x_func_sp_drv_ops *drv)
5533{
5534 drv->reset_hw_port(bp);
5535 bnx2x_func_reset_func(bp, drv);
5536}
5537
5538/**
5539 * bnx2x_func_reset_cmn - reser HW at common stage
5540 *
5541 * @bp: device handle
5542 * @drv:
5543 *
5544 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5545 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5546 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5547 */
5548static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5549 const struct bnx2x_func_sp_drv_ops *drv)
5550{
5551 bnx2x_func_reset_port(bp, drv);
5552 drv->reset_hw_cmn(bp);
5553}
5554
5555
5556static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5557 struct bnx2x_func_state_params *params)
5558{
5559 u32 reset_phase = params->params.hw_reset.reset_phase;
5560 struct bnx2x_func_sp_obj *o = params->f_obj;
5561 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5562
5563 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5564 reset_phase);
5565
5566 switch (reset_phase) {
5567 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5568 bnx2x_func_reset_cmn(bp, drv);
5569 break;
5570 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5571 bnx2x_func_reset_port(bp, drv);
5572 break;
5573 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5574 bnx2x_func_reset_func(bp, drv);
5575 break;
5576 default:
5577 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5578 reset_phase);
5579 break;
5580 }
5581
5582 /* Complete the comand immediatelly: no ramrods have been sent. */
5583 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5584
5585 return 0;
5586}
5587
5588static inline int bnx2x_func_send_start(struct bnx2x *bp,
5589 struct bnx2x_func_state_params *params)
5590{
5591 struct bnx2x_func_sp_obj *o = params->f_obj;
5592 struct function_start_data *rdata =
5593 (struct function_start_data *)o->rdata;
5594 dma_addr_t data_mapping = o->rdata_mapping;
5595 struct bnx2x_func_start_params *start_params = &params->params.start;
5596
5597 memset(rdata, 0, sizeof(*rdata));
5598
5599 /* Fill the ramrod data with provided parameters */
5600 rdata->function_mode = cpu_to_le16(start_params->mf_mode);
5601 rdata->sd_vlan_tag = start_params->sd_vlan_tag;
5602 rdata->path_id = BP_PATH(bp);
5603 rdata->network_cos_mode = start_params->network_cos_mode;
5604
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00005605 /*
5606 * No need for an explicit memory barrier here as long we would
5607 * need to ensure the ordering of writing to the SPQ element
5608 * and updating of the SPQ producer which involves a memory
5609 * read and we will have to put a full memory barrier there
5610 * (inside bnx2x_sp_post()).
5611 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005612
5613 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5614 U64_HI(data_mapping),
5615 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5616}
5617
5618static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5619 struct bnx2x_func_state_params *params)
5620{
5621 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5622 NONE_CONNECTION_TYPE);
5623}
5624
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005625static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5626 struct bnx2x_func_state_params *params)
5627{
5628 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5629 NONE_CONNECTION_TYPE);
5630}
5631static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5632 struct bnx2x_func_state_params *params)
5633{
5634 struct bnx2x_func_sp_obj *o = params->f_obj;
5635 struct flow_control_configuration *rdata =
5636 (struct flow_control_configuration *)o->rdata;
5637 dma_addr_t data_mapping = o->rdata_mapping;
5638 struct bnx2x_func_tx_start_params *tx_start_params =
5639 &params->params.tx_start;
5640 int i;
5641
5642 memset(rdata, 0, sizeof(*rdata));
5643
5644 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5645 rdata->dcb_version = tx_start_params->dcb_version;
5646 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5647
5648 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5649 rdata->traffic_type_to_priority_cos[i] =
5650 tx_start_params->traffic_type_to_priority_cos[i];
5651
5652 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5653 U64_HI(data_mapping),
5654 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5655}
5656
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005657static int bnx2x_func_send_cmd(struct bnx2x *bp,
5658 struct bnx2x_func_state_params *params)
5659{
5660 switch (params->cmd) {
5661 case BNX2X_F_CMD_HW_INIT:
5662 return bnx2x_func_hw_init(bp, params);
5663 case BNX2X_F_CMD_START:
5664 return bnx2x_func_send_start(bp, params);
5665 case BNX2X_F_CMD_STOP:
5666 return bnx2x_func_send_stop(bp, params);
5667 case BNX2X_F_CMD_HW_RESET:
5668 return bnx2x_func_hw_reset(bp, params);
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005669 case BNX2X_F_CMD_TX_STOP:
5670 return bnx2x_func_send_tx_stop(bp, params);
5671 case BNX2X_F_CMD_TX_START:
5672 return bnx2x_func_send_tx_start(bp, params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005673 default:
5674 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5675 return -EINVAL;
5676 }
5677}
5678
5679void bnx2x_init_func_obj(struct bnx2x *bp,
5680 struct bnx2x_func_sp_obj *obj,
5681 void *rdata, dma_addr_t rdata_mapping,
5682 struct bnx2x_func_sp_drv_ops *drv_iface)
5683{
5684 memset(obj, 0, sizeof(*obj));
5685
5686 mutex_init(&obj->one_pending_mutex);
5687
5688 obj->rdata = rdata;
5689 obj->rdata_mapping = rdata_mapping;
5690
5691 obj->send_cmd = bnx2x_func_send_cmd;
5692 obj->check_transition = bnx2x_func_chk_transition;
5693 obj->complete_cmd = bnx2x_func_comp_cmd;
5694 obj->wait_comp = bnx2x_func_wait_comp;
5695
5696 obj->drv = drv_iface;
5697}
5698
5699/**
5700 * bnx2x_func_state_change - perform Function state change transition
5701 *
5702 * @bp: device handle
5703 * @params: parameters to perform the transaction
5704 *
5705 * returns 0 in case of successfully completed transition,
5706 * negative error code in case of failure, positive
5707 * (EBUSY) value if there is a completion to that is
5708 * still pending (possible only if RAMROD_COMP_WAIT is
5709 * not set in params->ramrod_flags for asynchronous
5710 * commands).
5711 */
5712int bnx2x_func_state_change(struct bnx2x *bp,
5713 struct bnx2x_func_state_params *params)
5714{
5715 struct bnx2x_func_sp_obj *o = params->f_obj;
5716 int rc;
5717 enum bnx2x_func_cmd cmd = params->cmd;
5718 unsigned long *pending = &o->pending;
5719
5720 mutex_lock(&o->one_pending_mutex);
5721
5722 /* Check that the requested transition is legal */
5723 if (o->check_transition(bp, o, params)) {
5724 mutex_unlock(&o->one_pending_mutex);
5725 return -EINVAL;
5726 }
5727
5728 /* Set "pending" bit */
5729 set_bit(cmd, pending);
5730
5731 /* Don't send a command if only driver cleanup was requested */
5732 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5733 bnx2x_func_state_change_comp(bp, o, cmd);
5734 mutex_unlock(&o->one_pending_mutex);
5735 } else {
5736 /* Send a ramrod */
5737 rc = o->send_cmd(bp, params);
5738
5739 mutex_unlock(&o->one_pending_mutex);
5740
5741 if (rc) {
5742 o->next_state = BNX2X_F_STATE_MAX;
5743 clear_bit(cmd, pending);
5744 smp_mb__after_clear_bit();
5745 return rc;
5746 }
5747
5748 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5749 rc = o->wait_comp(bp, o, cmd);
5750 if (rc)
5751 return rc;
5752
5753 return 0;
5754 }
5755 }
5756
5757 return !!test_bit(cmd, pending);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005758}