blob: 9f2637c295c8d3421e92a175578995296da2a5be [file] [log] [blame]
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001/* bnx2x_sp.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2011-2013 Broadcom Corporation
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
17 *
18 */
Joe Perchesf1deab52011-08-14 12:16:21 +000019
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000022#include <linux/module.h>
23#include <linux/crc32.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/crc32c.h>
27#include "bnx2x.h"
28#include "bnx2x_cmn.h"
29#include "bnx2x_sp.h"
30
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030031#define BNX2X_MAX_EMUL_MULTI 16
32
Ariel Eliored5162a2011-12-05 21:52:24 +000033#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
34
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030035/**** Exe Queue interfaces ****/
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000036
37/**
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030038 * bnx2x_exe_queue_init - init the Exe Queue object
39 *
40 * @o: poiter to the object
41 * @exe_len: length
42 * @owner: poiter to the owner
43 * @validate: validate function pointer
44 * @optimize: optimize function pointer
45 * @exec: execute function pointer
46 * @get: get function pointer
47 */
48static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
49 struct bnx2x_exe_queue_obj *o,
50 int exe_len,
51 union bnx2x_qable_obj *owner,
52 exe_q_validate validate,
Yuval Mintz460a25c2012-01-23 07:31:51 +000053 exe_q_remove remove,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030054 exe_q_optimize optimize,
55 exe_q_execute exec,
56 exe_q_get get)
57{
58 memset(o, 0, sizeof(*o));
59
60 INIT_LIST_HEAD(&o->exe_queue);
61 INIT_LIST_HEAD(&o->pending_comp);
62
63 spin_lock_init(&o->lock);
64
65 o->exe_chunk_len = exe_len;
66 o->owner = owner;
67
68 /* Owner specific callbacks */
69 o->validate = validate;
Yuval Mintz460a25c2012-01-23 07:31:51 +000070 o->remove = remove;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030071 o->optimize = optimize;
72 o->execute = exec;
73 o->get = get;
74
Merav Sicron51c1a582012-03-18 10:33:38 +000075 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
76 exe_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030077}
78
79static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
80 struct bnx2x_exeq_elem *elem)
81{
82 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
83 kfree(elem);
84}
85
86static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
87{
88 struct bnx2x_exeq_elem *elem;
89 int cnt = 0;
90
91 spin_lock_bh(&o->lock);
92
93 list_for_each_entry(elem, &o->exe_queue, link)
94 cnt++;
95
96 spin_unlock_bh(&o->lock);
97
98 return cnt;
99}
100
101/**
102 * bnx2x_exe_queue_add - add a new element to the execution queue
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000103 *
104 * @bp: driver handle
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300105 * @o: queue
106 * @cmd: new command to add
107 * @restore: true - do not optimize the command
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000108 *
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300109 * If the element is optimized or is illegal, frees it.
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000110 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300111static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
112 struct bnx2x_exe_queue_obj *o,
113 struct bnx2x_exeq_elem *elem,
114 bool restore)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000115{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300116 int rc;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000117
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300118 spin_lock_bh(&o->lock);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000119
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300120 if (!restore) {
121 /* Try to cancel this element queue */
122 rc = o->optimize(bp, o->owner, elem);
123 if (rc)
124 goto free_and_exit;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000125
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300126 /* Check if this request is ok */
127 rc = o->validate(bp, o->owner, elem);
128 if (rc) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +0000129 DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300130 goto free_and_exit;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000131 }
132 }
133
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300134 /* If so, add it to the execution queue */
135 list_add_tail(&elem->link, &o->exe_queue);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000136
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300137 spin_unlock_bh(&o->lock);
138
139 return 0;
140
141free_and_exit:
142 bnx2x_exe_queue_free_elem(bp, elem);
143
144 spin_unlock_bh(&o->lock);
145
146 return rc;
147
148}
149
150static inline void __bnx2x_exe_queue_reset_pending(
151 struct bnx2x *bp,
152 struct bnx2x_exe_queue_obj *o)
153{
154 struct bnx2x_exeq_elem *elem;
155
156 while (!list_empty(&o->pending_comp)) {
157 elem = list_first_entry(&o->pending_comp,
158 struct bnx2x_exeq_elem, link);
159
160 list_del(&elem->link);
161 bnx2x_exe_queue_free_elem(bp, elem);
162 }
163}
164
165static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
166 struct bnx2x_exe_queue_obj *o)
167{
168
169 spin_lock_bh(&o->lock);
170
171 __bnx2x_exe_queue_reset_pending(bp, o);
172
173 spin_unlock_bh(&o->lock);
174
175}
176
177/**
178 * bnx2x_exe_queue_step - execute one execution chunk atomically
179 *
180 * @bp: driver handle
181 * @o: queue
182 * @ramrod_flags: flags
183 *
184 * (Atomicy is ensured using the exe_queue->lock).
185 */
186static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
187 struct bnx2x_exe_queue_obj *o,
188 unsigned long *ramrod_flags)
189{
190 struct bnx2x_exeq_elem *elem, spacer;
191 int cur_len = 0, rc;
192
193 memset(&spacer, 0, sizeof(spacer));
194
195 spin_lock_bh(&o->lock);
196
197 /*
198 * Next step should not be performed until the current is finished,
199 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
200 * properly clear object internals without sending any command to the FW
201 * which also implies there won't be any completion to clear the
202 * 'pending' list.
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000203 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300204 if (!list_empty(&o->pending_comp)) {
205 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000206 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300207 __bnx2x_exe_queue_reset_pending(bp, o);
208 } else {
209 spin_unlock_bh(&o->lock);
210 return 1;
211 }
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000212 }
213
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300214 /*
215 * Run through the pending commands list and create a next
216 * execution chunk.
217 */
218 while (!list_empty(&o->exe_queue)) {
219 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
220 link);
221 WARN_ON(!elem->cmd_len);
222
223 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
224 cur_len += elem->cmd_len;
225 /*
226 * Prevent from both lists being empty when moving an
227 * element. This will allow the call of
228 * bnx2x_exe_queue_empty() without locking.
229 */
230 list_add_tail(&spacer.link, &o->pending_comp);
231 mb();
Wei Yongjun7933aa52012-09-04 21:06:55 +0000232 list_move_tail(&elem->link, &o->pending_comp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300233 list_del(&spacer.link);
234 } else
235 break;
236 }
237
238 /* Sanity check */
239 if (!cur_len) {
240 spin_unlock_bh(&o->lock);
241 return 0;
242 }
243
244 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
245 if (rc < 0)
246 /*
247 * In case of an error return the commands back to the queue
248 * and reset the pending_comp.
249 */
250 list_splice_init(&o->pending_comp, &o->exe_queue);
251 else if (!rc)
252 /*
253 * If zero is returned, means there are no outstanding pending
254 * completions and we may dismiss the pending list.
255 */
256 __bnx2x_exe_queue_reset_pending(bp, o);
257
258 spin_unlock_bh(&o->lock);
259 return rc;
260}
261
262static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
263{
264 bool empty = list_empty(&o->exe_queue);
265
266 /* Don't reorder!!! */
267 mb();
268
269 return empty && list_empty(&o->pending_comp);
270}
271
272static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
273 struct bnx2x *bp)
274{
275 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
276 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
277}
278
279/************************ raw_obj functions ***********************************/
280static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
281{
282 return !!test_bit(o->state, o->pstate);
283}
284
285static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
286{
287 smp_mb__before_clear_bit();
288 clear_bit(o->state, o->pstate);
289 smp_mb__after_clear_bit();
290}
291
292static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
293{
294 smp_mb__before_clear_bit();
295 set_bit(o->state, o->pstate);
296 smp_mb__after_clear_bit();
297}
298
299/**
300 * bnx2x_state_wait - wait until the given bit(state) is cleared
301 *
302 * @bp: device handle
303 * @state: state which is to be cleared
304 * @state_p: state buffer
305 *
306 */
307static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
308 unsigned long *pstate)
309{
310 /* can take a while if any port is running */
311 int cnt = 5000;
312
313
314 if (CHIP_REV_IS_EMUL(bp))
315 cnt *= 20;
316
317 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
318
319 might_sleep();
320 while (cnt--) {
321 if (!test_bit(state, pstate)) {
322#ifdef BNX2X_STOP_ON_ERROR
323 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
324#endif
325 return 0;
326 }
327
Yuval Mintz0926d492013-01-23 03:21:45 +0000328 usleep_range(1000, 2000);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300329
330 if (bp->panic)
331 return -EIO;
332 }
333
334 /* timeout! */
335 BNX2X_ERR("timeout waiting for state %d\n", state);
336#ifdef BNX2X_STOP_ON_ERROR
337 bnx2x_panic();
338#endif
339
340 return -EBUSY;
341}
342
343static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
344{
345 return bnx2x_state_wait(bp, raw->state, raw->pstate);
346}
347
348/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
349/* credit handling callbacks */
350static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
351{
352 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
353
354 WARN_ON(!mp);
355
356 return mp->get_entry(mp, offset);
357}
358
359static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
360{
361 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
362
363 WARN_ON(!mp);
364
365 return mp->get(mp, 1);
366}
367
368static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
369{
370 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
371
372 WARN_ON(!vp);
373
374 return vp->get_entry(vp, offset);
375}
376
377static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
378{
379 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
380
381 WARN_ON(!vp);
382
383 return vp->get(vp, 1);
384}
385
386static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
387{
388 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
389 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
390
391 if (!mp->get(mp, 1))
392 return false;
393
394 if (!vp->get(vp, 1)) {
395 mp->put(mp, 1);
396 return false;
397 }
398
399 return true;
400}
401
402static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
403{
404 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
405
406 return mp->put_entry(mp, offset);
407}
408
409static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
410{
411 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
412
413 return mp->put(mp, 1);
414}
415
416static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
417{
418 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
419
420 return vp->put_entry(vp, offset);
421}
422
423static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
424{
425 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
426
427 return vp->put(vp, 1);
428}
429
430static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
431{
432 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
433 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
434
435 if (!mp->put(mp, 1))
436 return false;
437
438 if (!vp->put(vp, 1)) {
439 mp->get(mp, 1);
440 return false;
441 }
442
443 return true;
444}
445
Ariel Eliored5162a2011-12-05 21:52:24 +0000446static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
447 int n, u8 *buf)
448{
449 struct bnx2x_vlan_mac_registry_elem *pos;
450 u8 *next = buf;
451 int counter = 0;
452
453 /* traverse list */
454 list_for_each_entry(pos, &o->head, link) {
455 if (counter < n) {
456 /* place leading zeroes in buffer */
457 memset(next, 0, MAC_LEADING_ZERO_CNT);
458
459 /* place mac after leading zeroes*/
460 memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
461 ETH_ALEN);
462
463 /* calculate address of next element and
464 * advance counter
465 */
466 counter++;
467 next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
468
469 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
470 counter, next, pos->u.mac.mac);
471 }
472 }
473 return counter * ETH_ALEN;
474}
475
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300476/* check_add() callbacks */
Merav Sicron51c1a582012-03-18 10:33:38 +0000477static int bnx2x_check_mac_add(struct bnx2x *bp,
478 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300479 union bnx2x_classification_ramrod_data *data)
480{
481 struct bnx2x_vlan_mac_registry_elem *pos;
482
Merav Sicron51c1a582012-03-18 10:33:38 +0000483 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
484
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300485 if (!is_valid_ether_addr(data->mac.mac))
486 return -EINVAL;
487
488 /* Check if a requested MAC already exists */
489 list_for_each_entry(pos, &o->head, link)
490 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
491 return -EEXIST;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000492
493 return 0;
494}
495
Merav Sicron51c1a582012-03-18 10:33:38 +0000496static int bnx2x_check_vlan_add(struct bnx2x *bp,
497 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300498 union bnx2x_classification_ramrod_data *data)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000499{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300500 struct bnx2x_vlan_mac_registry_elem *pos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000501
Merav Sicron51c1a582012-03-18 10:33:38 +0000502 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
503
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300504 list_for_each_entry(pos, &o->head, link)
505 if (data->vlan.vlan == pos->u.vlan.vlan)
506 return -EEXIST;
507
508 return 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000509}
510
Merav Sicron51c1a582012-03-18 10:33:38 +0000511static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
512 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300513 union bnx2x_classification_ramrod_data *data)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000514{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300515 struct bnx2x_vlan_mac_registry_elem *pos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000516
Merav Sicron51c1a582012-03-18 10:33:38 +0000517 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
518 data->vlan_mac.mac, data->vlan_mac.vlan);
519
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300520 list_for_each_entry(pos, &o->head, link)
521 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
522 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
523 ETH_ALEN)))
524 return -EEXIST;
525
526 return 0;
527}
528
529
530/* check_del() callbacks */
531static struct bnx2x_vlan_mac_registry_elem *
Merav Sicron51c1a582012-03-18 10:33:38 +0000532 bnx2x_check_mac_del(struct bnx2x *bp,
533 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300534 union bnx2x_classification_ramrod_data *data)
535{
536 struct bnx2x_vlan_mac_registry_elem *pos;
537
Merav Sicron51c1a582012-03-18 10:33:38 +0000538 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
539
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300540 list_for_each_entry(pos, &o->head, link)
541 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
542 return pos;
543
544 return NULL;
545}
546
547static struct bnx2x_vlan_mac_registry_elem *
Merav Sicron51c1a582012-03-18 10:33:38 +0000548 bnx2x_check_vlan_del(struct bnx2x *bp,
549 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300550 union bnx2x_classification_ramrod_data *data)
551{
552 struct bnx2x_vlan_mac_registry_elem *pos;
553
Merav Sicron51c1a582012-03-18 10:33:38 +0000554 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
555
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300556 list_for_each_entry(pos, &o->head, link)
557 if (data->vlan.vlan == pos->u.vlan.vlan)
558 return pos;
559
560 return NULL;
561}
562
563static struct bnx2x_vlan_mac_registry_elem *
Merav Sicron51c1a582012-03-18 10:33:38 +0000564 bnx2x_check_vlan_mac_del(struct bnx2x *bp,
565 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300566 union bnx2x_classification_ramrod_data *data)
567{
568 struct bnx2x_vlan_mac_registry_elem *pos;
569
Merav Sicron51c1a582012-03-18 10:33:38 +0000570 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
571 data->vlan_mac.mac, data->vlan_mac.vlan);
572
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300573 list_for_each_entry(pos, &o->head, link)
574 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
575 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
576 ETH_ALEN)))
577 return pos;
578
579 return NULL;
580}
581
582/* check_move() callback */
Merav Sicron51c1a582012-03-18 10:33:38 +0000583static bool bnx2x_check_move(struct bnx2x *bp,
584 struct bnx2x_vlan_mac_obj *src_o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300585 struct bnx2x_vlan_mac_obj *dst_o,
586 union bnx2x_classification_ramrod_data *data)
587{
588 struct bnx2x_vlan_mac_registry_elem *pos;
589 int rc;
590
591 /* Check if we can delete the requested configuration from the first
592 * object.
593 */
Merav Sicron51c1a582012-03-18 10:33:38 +0000594 pos = src_o->check_del(bp, src_o, data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300595
596 /* check if configuration can be added */
Merav Sicron51c1a582012-03-18 10:33:38 +0000597 rc = dst_o->check_add(bp, dst_o, data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300598
599 /* If this classification can not be added (is already set)
600 * or can't be deleted - return an error.
601 */
602 if (rc || !pos)
603 return false;
604
605 return true;
606}
607
608static bool bnx2x_check_move_always_err(
Merav Sicron51c1a582012-03-18 10:33:38 +0000609 struct bnx2x *bp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300610 struct bnx2x_vlan_mac_obj *src_o,
611 struct bnx2x_vlan_mac_obj *dst_o,
612 union bnx2x_classification_ramrod_data *data)
613{
614 return false;
615}
616
617
618static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
619{
620 struct bnx2x_raw_obj *raw = &o->raw;
621 u8 rx_tx_flag = 0;
622
623 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
624 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
625 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
626
627 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
628 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
629 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
630
631 return rx_tx_flag;
632}
633
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300634
Barak Witkowskia3348722012-04-23 03:04:46 +0000635void bnx2x_set_mac_in_nig(struct bnx2x *bp,
636 bool add, unsigned char *dev_addr, int index)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300637{
638 u32 wb_data[2];
639 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
640 NIG_REG_LLH0_FUNC_MEM;
641
Barak Witkowskia3348722012-04-23 03:04:46 +0000642 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
643 return;
644
645 if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300646 return;
647
648 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
649 (add ? "ADD" : "DELETE"), index);
650
651 if (add) {
652 /* LLH_FUNC_MEM is a u64 WB register */
653 reg_offset += 8*index;
654
655 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
656 (dev_addr[4] << 8) | dev_addr[5]);
657 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
658
659 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
660 }
661
662 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
663 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
664}
665
666/**
667 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
668 *
669 * @bp: device handle
670 * @o: queue for which we want to configure this rule
671 * @add: if true the command is an ADD command, DEL otherwise
672 * @opcode: CLASSIFY_RULE_OPCODE_XXX
673 * @hdr: pointer to a header to setup
674 *
675 */
676static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
677 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
678 struct eth_classify_cmd_header *hdr)
679{
680 struct bnx2x_raw_obj *raw = &o->raw;
681
682 hdr->client_id = raw->cl_id;
683 hdr->func_id = raw->func_id;
684
685 /* Rx or/and Tx (internal switching) configuration ? */
686 hdr->cmd_general_data |=
687 bnx2x_vlan_mac_get_rx_tx_flag(o);
688
689 if (add)
690 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
691
692 hdr->cmd_general_data |=
693 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
694}
695
696/**
697 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
698 *
699 * @cid: connection id
700 * @type: BNX2X_FILTER_XXX_PENDING
701 * @hdr: poiter to header to setup
702 * @rule_cnt:
703 *
704 * currently we always configure one rule and echo field to contain a CID and an
705 * opcode type.
706 */
707static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
708 struct eth_classify_header *hdr, int rule_cnt)
709{
Yuval Mintz86564c32013-01-23 03:21:50 +0000710 hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
711 (type << BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300712 hdr->rule_cnt = (u8)rule_cnt;
713}
714
715
716/* hw_config() callbacks */
717static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
718 struct bnx2x_vlan_mac_obj *o,
719 struct bnx2x_exeq_elem *elem, int rule_idx,
720 int cam_offset)
721{
722 struct bnx2x_raw_obj *raw = &o->raw;
723 struct eth_classify_rules_ramrod_data *data =
724 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
725 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
726 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
727 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
728 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
729 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
730
731 /*
732 * Set LLH CAM entry: currently only iSCSI and ETH macs are
733 * relevant. In addition, current implementation is tuned for a
734 * single ETH MAC.
735 *
736 * When multiple unicast ETH MACs PF configuration in switch
737 * independent mode is required (NetQ, multiple netdev MACs,
738 * etc.), consider better utilisation of 8 per function MAC
739 * entries in the LLH register. There is also
740 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
741 * total number of CAM entries to 16.
742 *
743 * Currently we won't configure NIG for MACs other than a primary ETH
744 * MAC and iSCSI L2 MAC.
745 *
746 * If this MAC is moving from one Queue to another, no need to change
747 * NIG configuration.
748 */
749 if (cmd != BNX2X_VLAN_MAC_MOVE) {
750 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
751 bnx2x_set_mac_in_nig(bp, add, mac,
Yuval Mintz0a52fd02012-03-12 08:53:07 +0000752 BNX2X_LLH_CAM_ISCSI_ETH_LINE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300753 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
Yuval Mintz0a52fd02012-03-12 08:53:07 +0000754 bnx2x_set_mac_in_nig(bp, add, mac,
755 BNX2X_LLH_CAM_ETH_LINE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300756 }
757
758 /* Reset the ramrod data buffer for the first rule */
759 if (rule_idx == 0)
760 memset(data, 0, sizeof(*data));
761
762 /* Setup a command header */
763 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
764 &rule_entry->mac.header);
765
Joe Perches0f9dad12011-08-14 12:16:19 +0000766 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +0000767 (add ? "add" : "delete"), mac, raw->cl_id);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300768
769 /* Set a MAC itself */
770 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
771 &rule_entry->mac.mac_mid,
772 &rule_entry->mac.mac_lsb, mac);
773
774 /* MOVE: Add a rule that will add this MAC to the target Queue */
775 if (cmd == BNX2X_VLAN_MAC_MOVE) {
776 rule_entry++;
777 rule_cnt++;
778
779 /* Setup ramrod data */
780 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
781 elem->cmd_data.vlan_mac.target_obj,
782 true, CLASSIFY_RULE_OPCODE_MAC,
783 &rule_entry->mac.header);
784
785 /* Set a MAC itself */
786 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
787 &rule_entry->mac.mac_mid,
788 &rule_entry->mac.mac_lsb, mac);
789 }
790
791 /* Set the ramrod data header */
792 /* TODO: take this to the higher level in order to prevent multiple
793 writing */
794 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
795 rule_cnt);
796}
797
798/**
799 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
800 *
801 * @bp: device handle
802 * @o: queue
803 * @type:
804 * @cam_offset: offset in cam memory
805 * @hdr: pointer to a header to setup
806 *
807 * E1/E1H
808 */
809static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
810 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
811 struct mac_configuration_hdr *hdr)
812{
813 struct bnx2x_raw_obj *r = &o->raw;
814
815 hdr->length = 1;
816 hdr->offset = (u8)cam_offset;
Yuval Mintz86564c32013-01-23 03:21:50 +0000817 hdr->client_id = cpu_to_le16(0xff);
818 hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
819 (type << BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300820}
821
822static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
823 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
824 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
825{
826 struct bnx2x_raw_obj *r = &o->raw;
827 u32 cl_bit_vec = (1 << r->cl_id);
828
829 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
830 cfg_entry->pf_id = r->func_id;
831 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
832
833 if (add) {
834 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
835 T_ETH_MAC_COMMAND_SET);
836 SET_FLAG(cfg_entry->flags,
837 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
838
839 /* Set a MAC in a ramrod data */
840 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
841 &cfg_entry->middle_mac_addr,
842 &cfg_entry->lsb_mac_addr, mac);
843 } else
844 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
845 T_ETH_MAC_COMMAND_INVALIDATE);
846}
847
848static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
849 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
850 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
851{
852 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
853 struct bnx2x_raw_obj *raw = &o->raw;
854
855 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
856 &config->hdr);
857 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
858 cfg_entry);
859
Joe Perches0f9dad12011-08-14 12:16:19 +0000860 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +0000861 (add ? "setting" : "clearing"),
Joe Perches0f9dad12011-08-14 12:16:19 +0000862 mac, raw->cl_id, cam_offset);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300863}
864
865/**
866 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
867 *
868 * @bp: device handle
869 * @o: bnx2x_vlan_mac_obj
870 * @elem: bnx2x_exeq_elem
871 * @rule_idx: rule_idx
872 * @cam_offset: cam_offset
873 */
874static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
875 struct bnx2x_vlan_mac_obj *o,
876 struct bnx2x_exeq_elem *elem, int rule_idx,
877 int cam_offset)
878{
879 struct bnx2x_raw_obj *raw = &o->raw;
880 struct mac_configuration_cmd *config =
881 (struct mac_configuration_cmd *)(raw->rdata);
882 /*
883 * 57710 and 57711 do not support MOVE command,
884 * so it's either ADD or DEL
885 */
886 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
887 true : false;
888
889 /* Reset the ramrod data buffer */
890 memset(config, 0, sizeof(*config));
891
Yuval Mintz33ac3382012-03-12 08:53:09 +0000892 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300893 cam_offset, add,
894 elem->cmd_data.vlan_mac.u.mac.mac, 0,
895 ETH_VLAN_FILTER_ANY_VLAN, config);
896}
897
898static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
899 struct bnx2x_vlan_mac_obj *o,
900 struct bnx2x_exeq_elem *elem, int rule_idx,
901 int cam_offset)
902{
903 struct bnx2x_raw_obj *raw = &o->raw;
904 struct eth_classify_rules_ramrod_data *data =
905 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
906 int rule_cnt = rule_idx + 1;
907 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
Yuval Mintz86564c32013-01-23 03:21:50 +0000908 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300909 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
910 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
911
912 /* Reset the ramrod data buffer for the first rule */
913 if (rule_idx == 0)
914 memset(data, 0, sizeof(*data));
915
916 /* Set a rule header */
917 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
918 &rule_entry->vlan.header);
919
920 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
921 vlan);
922
923 /* Set a VLAN itself */
924 rule_entry->vlan.vlan = cpu_to_le16(vlan);
925
926 /* MOVE: Add a rule that will add this MAC to the target Queue */
927 if (cmd == BNX2X_VLAN_MAC_MOVE) {
928 rule_entry++;
929 rule_cnt++;
930
931 /* Setup ramrod data */
932 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
933 elem->cmd_data.vlan_mac.target_obj,
934 true, CLASSIFY_RULE_OPCODE_VLAN,
935 &rule_entry->vlan.header);
936
937 /* Set a VLAN itself */
938 rule_entry->vlan.vlan = cpu_to_le16(vlan);
939 }
940
941 /* Set the ramrod data header */
942 /* TODO: take this to the higher level in order to prevent multiple
943 writing */
944 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
945 rule_cnt);
946}
947
948static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
949 struct bnx2x_vlan_mac_obj *o,
950 struct bnx2x_exeq_elem *elem,
951 int rule_idx, int cam_offset)
952{
953 struct bnx2x_raw_obj *raw = &o->raw;
954 struct eth_classify_rules_ramrod_data *data =
955 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
956 int rule_cnt = rule_idx + 1;
957 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
Yuval Mintz86564c32013-01-23 03:21:50 +0000958 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300959 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
960 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
961 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
962
963
964 /* Reset the ramrod data buffer for the first rule */
965 if (rule_idx == 0)
966 memset(data, 0, sizeof(*data));
967
968 /* Set a rule header */
969 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
970 &rule_entry->pair.header);
971
972 /* Set VLAN and MAC themselvs */
973 rule_entry->pair.vlan = cpu_to_le16(vlan);
974 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
975 &rule_entry->pair.mac_mid,
976 &rule_entry->pair.mac_lsb, mac);
977
978 /* MOVE: Add a rule that will add this MAC to the target Queue */
979 if (cmd == BNX2X_VLAN_MAC_MOVE) {
980 rule_entry++;
981 rule_cnt++;
982
983 /* Setup ramrod data */
984 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
985 elem->cmd_data.vlan_mac.target_obj,
986 true, CLASSIFY_RULE_OPCODE_PAIR,
987 &rule_entry->pair.header);
988
989 /* Set a VLAN itself */
990 rule_entry->pair.vlan = cpu_to_le16(vlan);
991 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
992 &rule_entry->pair.mac_mid,
993 &rule_entry->pair.mac_lsb, mac);
994 }
995
996 /* Set the ramrod data header */
997 /* TODO: take this to the higher level in order to prevent multiple
998 writing */
999 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1000 rule_cnt);
1001}
1002
1003/**
1004 * bnx2x_set_one_vlan_mac_e1h -
1005 *
1006 * @bp: device handle
1007 * @o: bnx2x_vlan_mac_obj
1008 * @elem: bnx2x_exeq_elem
1009 * @rule_idx: rule_idx
1010 * @cam_offset: cam_offset
1011 */
1012static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1013 struct bnx2x_vlan_mac_obj *o,
1014 struct bnx2x_exeq_elem *elem,
1015 int rule_idx, int cam_offset)
1016{
1017 struct bnx2x_raw_obj *raw = &o->raw;
1018 struct mac_configuration_cmd *config =
1019 (struct mac_configuration_cmd *)(raw->rdata);
1020 /*
1021 * 57710 and 57711 do not support MOVE command,
1022 * so it's either ADD or DEL
1023 */
1024 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1025 true : false;
1026
1027 /* Reset the ramrod data buffer */
1028 memset(config, 0, sizeof(*config));
1029
1030 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1031 cam_offset, add,
1032 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1033 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1034 ETH_VLAN_FILTER_CLASSIFY, config);
1035}
1036
1037#define list_next_entry(pos, member) \
1038 list_entry((pos)->member.next, typeof(*(pos)), member)
1039
1040/**
1041 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1042 *
1043 * @bp: device handle
1044 * @p: command parameters
1045 * @ppos: pointer to the cooky
1046 *
1047 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1048 * previously configured elements list.
1049 *
1050 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1051 * into an account
1052 *
1053 * pointer to the cooky - that should be given back in the next call to make
1054 * function handle the next element. If *ppos is set to NULL it will restart the
1055 * iterator. If returned *ppos == NULL this means that the last element has been
1056 * handled.
1057 *
1058 */
1059static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1060 struct bnx2x_vlan_mac_ramrod_params *p,
1061 struct bnx2x_vlan_mac_registry_elem **ppos)
1062{
1063 struct bnx2x_vlan_mac_registry_elem *pos;
1064 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1065
1066 /* If list is empty - there is nothing to do here */
1067 if (list_empty(&o->head)) {
1068 *ppos = NULL;
1069 return 0;
1070 }
1071
1072 /* make a step... */
1073 if (*ppos == NULL)
1074 *ppos = list_first_entry(&o->head,
1075 struct bnx2x_vlan_mac_registry_elem,
1076 link);
1077 else
1078 *ppos = list_next_entry(*ppos, link);
1079
1080 pos = *ppos;
1081
1082 /* If it's the last step - return NULL */
1083 if (list_is_last(&pos->link, &o->head))
1084 *ppos = NULL;
1085
1086 /* Prepare a 'user_req' */
1087 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1088
1089 /* Set the command */
1090 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1091
1092 /* Set vlan_mac_flags */
1093 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1094
1095 /* Set a restore bit */
1096 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1097
1098 return bnx2x_config_vlan_mac(bp, p);
1099}
1100
1101/*
1102 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1103 * pointer to an element with a specific criteria and NULL if such an element
1104 * hasn't been found.
1105 */
1106static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1107 struct bnx2x_exe_queue_obj *o,
1108 struct bnx2x_exeq_elem *elem)
1109{
1110 struct bnx2x_exeq_elem *pos;
1111 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1112
1113 /* Check pending for execution commands */
1114 list_for_each_entry(pos, &o->exe_queue, link)
1115 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1116 sizeof(*data)) &&
1117 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1118 return pos;
1119
1120 return NULL;
1121}
1122
1123static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1124 struct bnx2x_exe_queue_obj *o,
1125 struct bnx2x_exeq_elem *elem)
1126{
1127 struct bnx2x_exeq_elem *pos;
1128 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1129
1130 /* Check pending for execution commands */
1131 list_for_each_entry(pos, &o->exe_queue, link)
1132 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1133 sizeof(*data)) &&
1134 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1135 return pos;
1136
1137 return NULL;
1138}
1139
1140static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1141 struct bnx2x_exe_queue_obj *o,
1142 struct bnx2x_exeq_elem *elem)
1143{
1144 struct bnx2x_exeq_elem *pos;
1145 struct bnx2x_vlan_mac_ramrod_data *data =
1146 &elem->cmd_data.vlan_mac.u.vlan_mac;
1147
1148 /* Check pending for execution commands */
1149 list_for_each_entry(pos, &o->exe_queue, link)
1150 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1151 sizeof(*data)) &&
1152 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1153 return pos;
1154
1155 return NULL;
1156}
1157
1158/**
1159 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1160 *
1161 * @bp: device handle
1162 * @qo: bnx2x_qable_obj
1163 * @elem: bnx2x_exeq_elem
1164 *
1165 * Checks that the requested configuration can be added. If yes and if
1166 * requested, consume CAM credit.
1167 *
1168 * The 'validate' is run after the 'optimize'.
1169 *
1170 */
1171static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1172 union bnx2x_qable_obj *qo,
1173 struct bnx2x_exeq_elem *elem)
1174{
1175 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1176 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1177 int rc;
1178
1179 /* Check the registry */
Merav Sicron51c1a582012-03-18 10:33:38 +00001180 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001181 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001182 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001183 return rc;
1184 }
1185
1186 /*
1187 * Check if there is a pending ADD command for this
1188 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1189 */
1190 if (exeq->get(exeq, elem)) {
1191 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1192 return -EEXIST;
1193 }
1194
1195 /*
1196 * TODO: Check the pending MOVE from other objects where this
1197 * object is a destination object.
1198 */
1199
1200 /* Consume the credit if not requested not to */
1201 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1202 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1203 o->get_credit(o)))
1204 return -EINVAL;
1205
1206 return 0;
1207}
1208
1209/**
1210 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1211 *
1212 * @bp: device handle
1213 * @qo: quable object to check
1214 * @elem: element that needs to be deleted
1215 *
1216 * Checks that the requested configuration can be deleted. If yes and if
1217 * requested, returns a CAM credit.
1218 *
1219 * The 'validate' is run after the 'optimize'.
1220 */
1221static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1222 union bnx2x_qable_obj *qo,
1223 struct bnx2x_exeq_elem *elem)
1224{
1225 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1226 struct bnx2x_vlan_mac_registry_elem *pos;
1227 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1228 struct bnx2x_exeq_elem query_elem;
1229
1230 /* If this classification can not be deleted (doesn't exist)
1231 * - return a BNX2X_EXIST.
1232 */
Merav Sicron51c1a582012-03-18 10:33:38 +00001233 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001234 if (!pos) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001235 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001236 return -EEXIST;
1237 }
1238
1239 /*
1240 * Check if there are pending DEL or MOVE commands for this
1241 * MAC/VLAN/VLAN-MAC. Return an error if so.
1242 */
1243 memcpy(&query_elem, elem, sizeof(query_elem));
1244
1245 /* Check for MOVE commands */
1246 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1247 if (exeq->get(exeq, &query_elem)) {
1248 BNX2X_ERR("There is a pending MOVE command already\n");
1249 return -EINVAL;
1250 }
1251
1252 /* Check for DEL commands */
1253 if (exeq->get(exeq, elem)) {
1254 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1255 return -EEXIST;
1256 }
1257
1258 /* Return the credit to the credit pool if not requested not to */
1259 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1260 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1261 o->put_credit(o))) {
1262 BNX2X_ERR("Failed to return a credit\n");
1263 return -EINVAL;
1264 }
1265
1266 return 0;
1267}
1268
1269/**
1270 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1271 *
1272 * @bp: device handle
1273 * @qo: quable object to check (source)
1274 * @elem: element that needs to be moved
1275 *
1276 * Checks that the requested configuration can be moved. If yes and if
1277 * requested, returns a CAM credit.
1278 *
1279 * The 'validate' is run after the 'optimize'.
1280 */
1281static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1282 union bnx2x_qable_obj *qo,
1283 struct bnx2x_exeq_elem *elem)
1284{
1285 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1286 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1287 struct bnx2x_exeq_elem query_elem;
1288 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1289 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1290
1291 /*
1292 * Check if we can perform this operation based on the current registry
1293 * state.
1294 */
Merav Sicron51c1a582012-03-18 10:33:38 +00001295 if (!src_o->check_move(bp, src_o, dest_o,
1296 &elem->cmd_data.vlan_mac.u)) {
1297 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001298 return -EINVAL;
1299 }
1300
1301 /*
1302 * Check if there is an already pending DEL or MOVE command for the
1303 * source object or ADD command for a destination object. Return an
1304 * error if so.
1305 */
1306 memcpy(&query_elem, elem, sizeof(query_elem));
1307
1308 /* Check DEL on source */
1309 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1310 if (src_exeq->get(src_exeq, &query_elem)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001311 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001312 return -EINVAL;
1313 }
1314
1315 /* Check MOVE on source */
1316 if (src_exeq->get(src_exeq, elem)) {
1317 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1318 return -EEXIST;
1319 }
1320
1321 /* Check ADD on destination */
1322 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1323 if (dest_exeq->get(dest_exeq, &query_elem)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001324 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001325 return -EINVAL;
1326 }
1327
1328 /* Consume the credit if not requested not to */
1329 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1330 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1331 dest_o->get_credit(dest_o)))
1332 return -EINVAL;
1333
1334 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1335 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1336 src_o->put_credit(src_o))) {
1337 /* return the credit taken from dest... */
1338 dest_o->put_credit(dest_o);
1339 return -EINVAL;
1340 }
1341
1342 return 0;
1343}
1344
1345static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1346 union bnx2x_qable_obj *qo,
1347 struct bnx2x_exeq_elem *elem)
1348{
1349 switch (elem->cmd_data.vlan_mac.cmd) {
1350 case BNX2X_VLAN_MAC_ADD:
1351 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1352 case BNX2X_VLAN_MAC_DEL:
1353 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1354 case BNX2X_VLAN_MAC_MOVE:
1355 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1356 default:
1357 return -EINVAL;
1358 }
1359}
1360
Yuval Mintz460a25c2012-01-23 07:31:51 +00001361static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1362 union bnx2x_qable_obj *qo,
1363 struct bnx2x_exeq_elem *elem)
1364{
1365 int rc = 0;
1366
1367 /* If consumption wasn't required, nothing to do */
1368 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1369 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1370 return 0;
1371
1372 switch (elem->cmd_data.vlan_mac.cmd) {
1373 case BNX2X_VLAN_MAC_ADD:
1374 case BNX2X_VLAN_MAC_MOVE:
1375 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1376 break;
1377 case BNX2X_VLAN_MAC_DEL:
1378 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1379 break;
1380 default:
1381 return -EINVAL;
1382 }
1383
1384 if (rc != true)
1385 return -EINVAL;
1386
1387 return 0;
1388}
1389
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001390/**
1391 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1392 *
1393 * @bp: device handle
1394 * @o: bnx2x_vlan_mac_obj
1395 *
1396 */
1397static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1398 struct bnx2x_vlan_mac_obj *o)
1399{
1400 int cnt = 5000, rc;
1401 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1402 struct bnx2x_raw_obj *raw = &o->raw;
1403
1404 while (cnt--) {
1405 /* Wait for the current command to complete */
1406 rc = raw->wait_comp(bp, raw);
1407 if (rc)
1408 return rc;
1409
1410 /* Wait until there are no pending commands */
1411 if (!bnx2x_exe_queue_empty(exeq))
Yuval Mintz0926d492013-01-23 03:21:45 +00001412 usleep_range(1000, 2000);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001413 else
1414 return 0;
1415 }
1416
1417 return -EBUSY;
1418}
1419
1420/**
1421 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1422 *
1423 * @bp: device handle
1424 * @o: bnx2x_vlan_mac_obj
1425 * @cqe:
1426 * @cont: if true schedule next execution chunk
1427 *
1428 */
1429static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1430 struct bnx2x_vlan_mac_obj *o,
1431 union event_ring_elem *cqe,
1432 unsigned long *ramrod_flags)
1433{
1434 struct bnx2x_raw_obj *r = &o->raw;
1435 int rc;
1436
1437 /* Reset pending list */
1438 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1439
1440 /* Clear pending */
1441 r->clear_pending(r);
1442
1443 /* If ramrod failed this is most likely a SW bug */
1444 if (cqe->message.error)
1445 return -EINVAL;
1446
Yuval Mintz2de67432013-01-23 03:21:43 +00001447 /* Run the next bulk of pending commands if requested */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001448 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1449 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1450 if (rc < 0)
1451 return rc;
1452 }
1453
1454 /* If there is more work to do return PENDING */
1455 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1456 return 1;
1457
1458 return 0;
1459}
1460
1461/**
1462 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1463 *
1464 * @bp: device handle
1465 * @o: bnx2x_qable_obj
1466 * @elem: bnx2x_exeq_elem
1467 */
1468static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1469 union bnx2x_qable_obj *qo,
1470 struct bnx2x_exeq_elem *elem)
1471{
1472 struct bnx2x_exeq_elem query, *pos;
1473 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1474 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1475
1476 memcpy(&query, elem, sizeof(query));
1477
1478 switch (elem->cmd_data.vlan_mac.cmd) {
1479 case BNX2X_VLAN_MAC_ADD:
1480 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1481 break;
1482 case BNX2X_VLAN_MAC_DEL:
1483 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1484 break;
1485 default:
1486 /* Don't handle anything other than ADD or DEL */
1487 return 0;
1488 }
1489
1490 /* If we found the appropriate element - delete it */
1491 pos = exeq->get(exeq, &query);
1492 if (pos) {
1493
1494 /* Return the credit of the optimized command */
1495 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1496 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1497 if ((query.cmd_data.vlan_mac.cmd ==
1498 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001499 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001500 return -EINVAL;
1501 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
Merav Sicron51c1a582012-03-18 10:33:38 +00001502 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001503 return -EINVAL;
1504 }
1505 }
1506
1507 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1508 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1509 "ADD" : "DEL");
1510
1511 list_del(&pos->link);
1512 bnx2x_exe_queue_free_elem(bp, pos);
1513 return 1;
1514 }
1515
1516 return 0;
1517}
1518
1519/**
1520 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1521 *
1522 * @bp: device handle
1523 * @o:
1524 * @elem:
1525 * @restore:
1526 * @re:
1527 *
1528 * prepare a registry element according to the current command request.
1529 */
1530static inline int bnx2x_vlan_mac_get_registry_elem(
1531 struct bnx2x *bp,
1532 struct bnx2x_vlan_mac_obj *o,
1533 struct bnx2x_exeq_elem *elem,
1534 bool restore,
1535 struct bnx2x_vlan_mac_registry_elem **re)
1536{
Yuval Mintz86564c32013-01-23 03:21:50 +00001537 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001538 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1539
1540 /* Allocate a new registry element if needed. */
1541 if (!restore &&
1542 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1543 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1544 if (!reg_elem)
1545 return -ENOMEM;
1546
1547 /* Get a new CAM offset */
1548 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1549 /*
1550 * This shell never happen, because we have checked the
1551 * CAM availiability in the 'validate'.
1552 */
1553 WARN_ON(1);
1554 kfree(reg_elem);
1555 return -EINVAL;
1556 }
1557
1558 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1559
1560 /* Set a VLAN-MAC data */
1561 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1562 sizeof(reg_elem->u));
1563
1564 /* Copy the flags (needed for DEL and RESTORE flows) */
1565 reg_elem->vlan_mac_flags =
1566 elem->cmd_data.vlan_mac.vlan_mac_flags;
1567 } else /* DEL, RESTORE */
Merav Sicron51c1a582012-03-18 10:33:38 +00001568 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001569
1570 *re = reg_elem;
1571 return 0;
1572}
1573
1574/**
1575 * bnx2x_execute_vlan_mac - execute vlan mac command
1576 *
1577 * @bp: device handle
1578 * @qo:
1579 * @exe_chunk:
1580 * @ramrod_flags:
1581 *
1582 * go and send a ramrod!
1583 */
1584static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1585 union bnx2x_qable_obj *qo,
1586 struct list_head *exe_chunk,
1587 unsigned long *ramrod_flags)
1588{
1589 struct bnx2x_exeq_elem *elem;
1590 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1591 struct bnx2x_raw_obj *r = &o->raw;
1592 int rc, idx = 0;
1593 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1594 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1595 struct bnx2x_vlan_mac_registry_elem *reg_elem;
Yuval Mintz86564c32013-01-23 03:21:50 +00001596 enum bnx2x_vlan_mac_cmd cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001597
1598 /*
1599 * If DRIVER_ONLY execution is requested, cleanup a registry
1600 * and exit. Otherwise send a ramrod to FW.
1601 */
1602 if (!drv_only) {
1603 WARN_ON(r->check_pending(r));
1604
1605 /* Set pending */
1606 r->set_pending(r);
1607
1608 /* Fill tha ramrod data */
1609 list_for_each_entry(elem, exe_chunk, link) {
1610 cmd = elem->cmd_data.vlan_mac.cmd;
1611 /*
1612 * We will add to the target object in MOVE command, so
1613 * change the object for a CAM search.
1614 */
1615 if (cmd == BNX2X_VLAN_MAC_MOVE)
1616 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1617 else
1618 cam_obj = o;
1619
1620 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1621 elem, restore,
1622 &reg_elem);
1623 if (rc)
1624 goto error_exit;
1625
1626 WARN_ON(!reg_elem);
1627
1628 /* Push a new entry into the registry */
1629 if (!restore &&
1630 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1631 (cmd == BNX2X_VLAN_MAC_MOVE)))
1632 list_add(&reg_elem->link, &cam_obj->head);
1633
1634 /* Configure a single command in a ramrod data buffer */
1635 o->set_one_rule(bp, o, elem, idx,
1636 reg_elem->cam_offset);
1637
1638 /* MOVE command consumes 2 entries in the ramrod data */
1639 if (cmd == BNX2X_VLAN_MAC_MOVE)
1640 idx += 2;
1641 else
1642 idx++;
1643 }
1644
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00001645 /*
1646 * No need for an explicit memory barrier here as long we would
1647 * need to ensure the ordering of writing to the SPQ element
1648 * and updating of the SPQ producer which involves a memory
1649 * read and we will have to put a full memory barrier there
1650 * (inside bnx2x_sp_post()).
1651 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001652
1653 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1654 U64_HI(r->rdata_mapping),
1655 U64_LO(r->rdata_mapping),
1656 ETH_CONNECTION_TYPE);
1657 if (rc)
1658 goto error_exit;
1659 }
1660
1661 /* Now, when we are done with the ramrod - clean up the registry */
1662 list_for_each_entry(elem, exe_chunk, link) {
1663 cmd = elem->cmd_data.vlan_mac.cmd;
1664 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1665 (cmd == BNX2X_VLAN_MAC_MOVE)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001666 reg_elem = o->check_del(bp, o,
1667 &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001668
1669 WARN_ON(!reg_elem);
1670
1671 o->put_cam_offset(o, reg_elem->cam_offset);
1672 list_del(&reg_elem->link);
1673 kfree(reg_elem);
1674 }
1675 }
1676
1677 if (!drv_only)
1678 return 1;
1679 else
1680 return 0;
1681
1682error_exit:
1683 r->clear_pending(r);
1684
1685 /* Cleanup a registry in case of a failure */
1686 list_for_each_entry(elem, exe_chunk, link) {
1687 cmd = elem->cmd_data.vlan_mac.cmd;
1688
1689 if (cmd == BNX2X_VLAN_MAC_MOVE)
1690 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1691 else
1692 cam_obj = o;
1693
1694 /* Delete all newly added above entries */
1695 if (!restore &&
1696 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1697 (cmd == BNX2X_VLAN_MAC_MOVE))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001698 reg_elem = o->check_del(bp, cam_obj,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001699 &elem->cmd_data.vlan_mac.u);
1700 if (reg_elem) {
1701 list_del(&reg_elem->link);
1702 kfree(reg_elem);
1703 }
1704 }
1705 }
1706
1707 return rc;
1708}
1709
1710static inline int bnx2x_vlan_mac_push_new_cmd(
1711 struct bnx2x *bp,
1712 struct bnx2x_vlan_mac_ramrod_params *p)
1713{
1714 struct bnx2x_exeq_elem *elem;
1715 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1716 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1717
1718 /* Allocate the execution queue element */
1719 elem = bnx2x_exe_queue_alloc_elem(bp);
1720 if (!elem)
1721 return -ENOMEM;
1722
1723 /* Set the command 'length' */
1724 switch (p->user_req.cmd) {
1725 case BNX2X_VLAN_MAC_MOVE:
1726 elem->cmd_len = 2;
1727 break;
1728 default:
1729 elem->cmd_len = 1;
1730 }
1731
1732 /* Fill the object specific info */
1733 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1734
1735 /* Try to add a new command to the pending list */
1736 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1737}
1738
1739/**
1740 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1741 *
1742 * @bp: device handle
1743 * @p:
1744 *
1745 */
1746int bnx2x_config_vlan_mac(
1747 struct bnx2x *bp,
1748 struct bnx2x_vlan_mac_ramrod_params *p)
1749{
1750 int rc = 0;
1751 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1752 unsigned long *ramrod_flags = &p->ramrod_flags;
1753 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1754 struct bnx2x_raw_obj *raw = &o->raw;
1755
1756 /*
1757 * Add new elements to the execution list for commands that require it.
1758 */
1759 if (!cont) {
1760 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1761 if (rc)
1762 return rc;
1763 }
1764
1765 /*
1766 * If nothing will be executed further in this iteration we want to
1767 * return PENDING if there are pending commands
1768 */
1769 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1770 rc = 1;
1771
Vladislav Zolotarov79616892011-07-21 07:58:54 +00001772 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001773 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
Vladislav Zolotarov79616892011-07-21 07:58:54 +00001774 raw->clear_pending(raw);
1775 }
1776
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001777 /* Execute commands if required */
1778 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1779 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1780 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1781 if (rc < 0)
1782 return rc;
1783 }
1784
1785 /*
1786 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1787 * then user want to wait until the last command is done.
1788 */
1789 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1790 /*
1791 * Wait maximum for the current exe_queue length iterations plus
1792 * one (for the current pending command).
1793 */
1794 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1795
1796 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1797 max_iterations--) {
1798
1799 /* Wait for the current command to complete */
1800 rc = raw->wait_comp(bp, raw);
1801 if (rc)
1802 return rc;
1803
1804 /* Make a next step */
1805 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1806 ramrod_flags);
1807 if (rc < 0)
1808 return rc;
1809 }
1810
1811 return 0;
1812 }
1813
1814 return rc;
1815}
1816
1817
1818
1819/**
1820 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1821 *
1822 * @bp: device handle
1823 * @o:
1824 * @vlan_mac_flags:
1825 * @ramrod_flags: execution flags to be used for this deletion
1826 *
1827 * if the last operation has completed successfully and there are no
1828 * moreelements left, positive value if the last operation has completed
1829 * successfully and there are more previously configured elements, negative
1830 * value is current operation has failed.
1831 */
1832static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1833 struct bnx2x_vlan_mac_obj *o,
1834 unsigned long *vlan_mac_flags,
1835 unsigned long *ramrod_flags)
1836{
1837 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1838 int rc = 0;
1839 struct bnx2x_vlan_mac_ramrod_params p;
1840 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1841 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1842
1843 /* Clear pending commands first */
1844
1845 spin_lock_bh(&exeq->lock);
1846
1847 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1848 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
Yuval Mintz460a25c2012-01-23 07:31:51 +00001849 *vlan_mac_flags) {
1850 rc = exeq->remove(bp, exeq->owner, exeq_pos);
1851 if (rc) {
1852 BNX2X_ERR("Failed to remove command\n");
Dan Carpentera44acd52012-01-24 21:59:31 +00001853 spin_unlock_bh(&exeq->lock);
Yuval Mintz460a25c2012-01-23 07:31:51 +00001854 return rc;
1855 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001856 list_del(&exeq_pos->link);
Yuval Mintz07ef7be2013-03-11 05:17:41 +00001857 bnx2x_exe_queue_free_elem(bp, exeq_pos);
Yuval Mintz460a25c2012-01-23 07:31:51 +00001858 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001859 }
1860
1861 spin_unlock_bh(&exeq->lock);
1862
1863 /* Prepare a command request */
1864 memset(&p, 0, sizeof(p));
1865 p.vlan_mac_obj = o;
1866 p.ramrod_flags = *ramrod_flags;
1867 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1868
1869 /*
1870 * Add all but the last VLAN-MAC to the execution queue without actually
1871 * execution anything.
1872 */
1873 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1874 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1875 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1876
1877 list_for_each_entry(pos, &o->head, link) {
1878 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1879 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1880 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1881 rc = bnx2x_config_vlan_mac(bp, &p);
1882 if (rc < 0) {
1883 BNX2X_ERR("Failed to add a new DEL command\n");
1884 return rc;
1885 }
1886 }
1887 }
1888
1889 p.ramrod_flags = *ramrod_flags;
1890 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1891
1892 return bnx2x_config_vlan_mac(bp, &p);
1893}
1894
1895static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1896 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1897 unsigned long *pstate, bnx2x_obj_type type)
1898{
1899 raw->func_id = func_id;
1900 raw->cid = cid;
1901 raw->cl_id = cl_id;
1902 raw->rdata = rdata;
1903 raw->rdata_mapping = rdata_mapping;
1904 raw->state = state;
1905 raw->pstate = pstate;
1906 raw->obj_type = type;
1907 raw->check_pending = bnx2x_raw_check_pending;
1908 raw->clear_pending = bnx2x_raw_clear_pending;
1909 raw->set_pending = bnx2x_raw_set_pending;
1910 raw->wait_comp = bnx2x_raw_wait;
1911}
1912
1913static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1914 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1915 int state, unsigned long *pstate, bnx2x_obj_type type,
1916 struct bnx2x_credit_pool_obj *macs_pool,
1917 struct bnx2x_credit_pool_obj *vlans_pool)
1918{
1919 INIT_LIST_HEAD(&o->head);
1920
1921 o->macs_pool = macs_pool;
1922 o->vlans_pool = vlans_pool;
1923
1924 o->delete_all = bnx2x_vlan_mac_del_all;
1925 o->restore = bnx2x_vlan_mac_restore;
1926 o->complete = bnx2x_complete_vlan_mac;
1927 o->wait = bnx2x_wait_vlan_mac;
1928
1929 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1930 state, pstate, type);
1931}
1932
1933
1934void bnx2x_init_mac_obj(struct bnx2x *bp,
1935 struct bnx2x_vlan_mac_obj *mac_obj,
1936 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1937 dma_addr_t rdata_mapping, int state,
1938 unsigned long *pstate, bnx2x_obj_type type,
1939 struct bnx2x_credit_pool_obj *macs_pool)
1940{
1941 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1942
1943 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1944 rdata_mapping, state, pstate, type,
1945 macs_pool, NULL);
1946
1947 /* CAM credit pool handling */
1948 mac_obj->get_credit = bnx2x_get_credit_mac;
1949 mac_obj->put_credit = bnx2x_put_credit_mac;
1950 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1951 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1952
1953 if (CHIP_IS_E1x(bp)) {
1954 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1955 mac_obj->check_del = bnx2x_check_mac_del;
1956 mac_obj->check_add = bnx2x_check_mac_add;
1957 mac_obj->check_move = bnx2x_check_move_always_err;
1958 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1959
1960 /* Exe Queue */
1961 bnx2x_exe_queue_init(bp,
1962 &mac_obj->exe_queue, 1, qable_obj,
1963 bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00001964 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001965 bnx2x_optimize_vlan_mac,
1966 bnx2x_execute_vlan_mac,
1967 bnx2x_exeq_get_mac);
1968 } else {
1969 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1970 mac_obj->check_del = bnx2x_check_mac_del;
1971 mac_obj->check_add = bnx2x_check_mac_add;
1972 mac_obj->check_move = bnx2x_check_move;
1973 mac_obj->ramrod_cmd =
1974 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
Ariel Eliored5162a2011-12-05 21:52:24 +00001975 mac_obj->get_n_elements = bnx2x_get_n_elements;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001976
1977 /* Exe Queue */
1978 bnx2x_exe_queue_init(bp,
1979 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1980 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00001981 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001982 bnx2x_optimize_vlan_mac,
1983 bnx2x_execute_vlan_mac,
1984 bnx2x_exeq_get_mac);
1985 }
1986}
1987
1988void bnx2x_init_vlan_obj(struct bnx2x *bp,
1989 struct bnx2x_vlan_mac_obj *vlan_obj,
1990 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1991 dma_addr_t rdata_mapping, int state,
1992 unsigned long *pstate, bnx2x_obj_type type,
1993 struct bnx2x_credit_pool_obj *vlans_pool)
1994{
1995 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1996
1997 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1998 rdata_mapping, state, pstate, type, NULL,
1999 vlans_pool);
2000
2001 vlan_obj->get_credit = bnx2x_get_credit_vlan;
2002 vlan_obj->put_credit = bnx2x_put_credit_vlan;
2003 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2004 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2005
2006 if (CHIP_IS_E1x(bp)) {
2007 BNX2X_ERR("Do not support chips others than E2 and newer\n");
2008 BUG();
2009 } else {
2010 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
2011 vlan_obj->check_del = bnx2x_check_vlan_del;
2012 vlan_obj->check_add = bnx2x_check_vlan_add;
2013 vlan_obj->check_move = bnx2x_check_move;
2014 vlan_obj->ramrod_cmd =
2015 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2016
2017 /* Exe Queue */
2018 bnx2x_exe_queue_init(bp,
2019 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2020 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002021 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002022 bnx2x_optimize_vlan_mac,
2023 bnx2x_execute_vlan_mac,
2024 bnx2x_exeq_get_vlan);
2025 }
2026}
2027
2028void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2029 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2030 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2031 dma_addr_t rdata_mapping, int state,
2032 unsigned long *pstate, bnx2x_obj_type type,
2033 struct bnx2x_credit_pool_obj *macs_pool,
2034 struct bnx2x_credit_pool_obj *vlans_pool)
2035{
2036 union bnx2x_qable_obj *qable_obj =
2037 (union bnx2x_qable_obj *)vlan_mac_obj;
2038
2039 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2040 rdata_mapping, state, pstate, type,
2041 macs_pool, vlans_pool);
2042
2043 /* CAM pool handling */
2044 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2045 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2046 /*
2047 * CAM offset is relevant for 57710 and 57711 chips only which have a
2048 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2049 * will be taken from MACs' pool object only.
2050 */
2051 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2052 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2053
2054 if (CHIP_IS_E1(bp)) {
2055 BNX2X_ERR("Do not support chips others than E2\n");
2056 BUG();
2057 } else if (CHIP_IS_E1H(bp)) {
2058 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2059 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2060 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2061 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2062 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2063
2064 /* Exe Queue */
2065 bnx2x_exe_queue_init(bp,
2066 &vlan_mac_obj->exe_queue, 1, qable_obj,
2067 bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002068 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002069 bnx2x_optimize_vlan_mac,
2070 bnx2x_execute_vlan_mac,
2071 bnx2x_exeq_get_vlan_mac);
2072 } else {
2073 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2074 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2075 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2076 vlan_mac_obj->check_move = bnx2x_check_move;
2077 vlan_mac_obj->ramrod_cmd =
2078 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2079
2080 /* Exe Queue */
2081 bnx2x_exe_queue_init(bp,
2082 &vlan_mac_obj->exe_queue,
2083 CLASSIFY_RULES_COUNT,
2084 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002085 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002086 bnx2x_optimize_vlan_mac,
2087 bnx2x_execute_vlan_mac,
2088 bnx2x_exeq_get_vlan_mac);
2089 }
2090
2091}
2092
2093/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2094static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2095 struct tstorm_eth_mac_filter_config *mac_filters,
2096 u16 pf_id)
2097{
2098 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2099
2100 u32 addr = BAR_TSTRORM_INTMEM +
2101 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2102
2103 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2104}
2105
2106static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2107 struct bnx2x_rx_mode_ramrod_params *p)
2108{
Yuval Mintz2de67432013-01-23 03:21:43 +00002109 /* update the bp MAC filter structure */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002110 u32 mask = (1 << p->cl_id);
2111
2112 struct tstorm_eth_mac_filter_config *mac_filters =
2113 (struct tstorm_eth_mac_filter_config *)p->rdata;
2114
2115 /* initial seeting is drop-all */
2116 u8 drop_all_ucast = 1, drop_all_mcast = 1;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002117 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2118 u8 unmatched_unicast = 0;
2119
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002120 /* In e1x there we only take into account rx acceot flag since tx switching
2121 * isn't enabled. */
2122 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002123 /* accept matched ucast */
2124 drop_all_ucast = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002125
2126 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002127 /* accept matched mcast */
2128 drop_all_mcast = 0;
2129
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002130 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002131 /* accept all mcast */
2132 drop_all_ucast = 0;
2133 accp_all_ucast = 1;
2134 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002135 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002136 /* accept all mcast */
2137 drop_all_mcast = 0;
2138 accp_all_mcast = 1;
2139 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002140 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002141 /* accept (all) bcast */
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002142 accp_all_bcast = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002143 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2144 /* accept unmatched unicasts */
2145 unmatched_unicast = 1;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002146
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002147 mac_filters->ucast_drop_all = drop_all_ucast ?
2148 mac_filters->ucast_drop_all | mask :
2149 mac_filters->ucast_drop_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002150
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002151 mac_filters->mcast_drop_all = drop_all_mcast ?
2152 mac_filters->mcast_drop_all | mask :
2153 mac_filters->mcast_drop_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002154
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002155 mac_filters->ucast_accept_all = accp_all_ucast ?
2156 mac_filters->ucast_accept_all | mask :
2157 mac_filters->ucast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002158
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002159 mac_filters->mcast_accept_all = accp_all_mcast ?
2160 mac_filters->mcast_accept_all | mask :
2161 mac_filters->mcast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002162
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002163 mac_filters->bcast_accept_all = accp_all_bcast ?
2164 mac_filters->bcast_accept_all | mask :
2165 mac_filters->bcast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002166
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002167 mac_filters->unmatched_unicast = unmatched_unicast ?
2168 mac_filters->unmatched_unicast | mask :
2169 mac_filters->unmatched_unicast & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002170
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002171 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
Yuval Mintz2de67432013-01-23 03:21:43 +00002172 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00002173 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2174 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2175 mac_filters->bcast_accept_all);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002176
2177 /* write the MAC filter structure*/
2178 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2179
2180 /* The operation is completed */
2181 clear_bit(p->state, p->pstate);
2182 smp_mb__after_clear_bit();
2183
2184 return 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002185}
2186
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002187/* Setup ramrod data */
2188static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2189 struct eth_classify_header *hdr,
2190 u8 rule_cnt)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002191{
Yuval Mintz86564c32013-01-23 03:21:50 +00002192 hdr->echo = cpu_to_le32(cid);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002193 hdr->rule_cnt = rule_cnt;
2194}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002195
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002196static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
Yuval Mintz924d75a2013-01-23 03:21:44 +00002197 unsigned long *accept_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002198 struct eth_filter_rules_cmd *cmd,
2199 bool clear_accept_all)
2200{
2201 u16 state;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002202
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002203 /* start with 'drop-all' */
2204 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2205 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2206
Yuval Mintz924d75a2013-01-23 03:21:44 +00002207 if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2208 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002209
Yuval Mintz924d75a2013-01-23 03:21:44 +00002210 if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2211 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002212
Yuval Mintz924d75a2013-01-23 03:21:44 +00002213 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2214 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2215 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002216 }
2217
Yuval Mintz924d75a2013-01-23 03:21:44 +00002218 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2219 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2220 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2221 }
2222
2223 if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2224 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2225
2226 if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2227 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2228 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2229 }
2230
2231 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2232 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2233
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002234 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2235 if (clear_accept_all) {
2236 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2237 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2238 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2239 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2240 }
2241
2242 cmd->state = cpu_to_le16(state);
2243
2244}
2245
2246static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2247 struct bnx2x_rx_mode_ramrod_params *p)
2248{
2249 struct eth_filter_rules_ramrod_data *data = p->rdata;
2250 int rc;
2251 u8 rule_idx = 0;
2252
2253 /* Reset the ramrod data buffer */
2254 memset(data, 0, sizeof(*data));
2255
2256 /* Setup ramrod data */
2257
2258 /* Tx (internal switching) */
2259 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2260 data->rules[rule_idx].client_id = p->cl_id;
2261 data->rules[rule_idx].func_id = p->func_id;
2262
2263 data->rules[rule_idx].cmd_general_data =
2264 ETH_FILTER_RULES_CMD_TX_CMD;
2265
Yuval Mintz924d75a2013-01-23 03:21:44 +00002266 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2267 &(data->rules[rule_idx++]),
2268 false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002269 }
2270
2271 /* Rx */
2272 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2273 data->rules[rule_idx].client_id = p->cl_id;
2274 data->rules[rule_idx].func_id = p->func_id;
2275
2276 data->rules[rule_idx].cmd_general_data =
2277 ETH_FILTER_RULES_CMD_RX_CMD;
2278
Yuval Mintz924d75a2013-01-23 03:21:44 +00002279 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2280 &(data->rules[rule_idx++]),
2281 false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002282 }
2283
2284
2285 /*
2286 * If FCoE Queue configuration has been requested configure the Rx and
2287 * internal switching modes for this queue in separate rules.
2288 *
2289 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2290 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2291 */
2292 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2293 /* Tx (internal switching) */
2294 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2295 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2296 data->rules[rule_idx].func_id = p->func_id;
2297
2298 data->rules[rule_idx].cmd_general_data =
2299 ETH_FILTER_RULES_CMD_TX_CMD;
2300
Yuval Mintz924d75a2013-01-23 03:21:44 +00002301 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2302 &(data->rules[rule_idx]),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002303 true);
Yuval Mintz924d75a2013-01-23 03:21:44 +00002304 rule_idx++;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002305 }
2306
2307 /* Rx */
2308 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2309 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2310 data->rules[rule_idx].func_id = p->func_id;
2311
2312 data->rules[rule_idx].cmd_general_data =
2313 ETH_FILTER_RULES_CMD_RX_CMD;
2314
Yuval Mintz924d75a2013-01-23 03:21:44 +00002315 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2316 &(data->rules[rule_idx]),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002317 true);
Yuval Mintz924d75a2013-01-23 03:21:44 +00002318 rule_idx++;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002319 }
2320 }
2321
2322 /*
2323 * Set the ramrod header (most importantly - number of rules to
2324 * configure).
2325 */
2326 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2327
Merav Sicron51c1a582012-03-18 10:33:38 +00002328 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002329 data->header.rule_cnt, p->rx_accept_flags,
2330 p->tx_accept_flags);
2331
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00002332 /*
2333 * No need for an explicit memory barrier here as long we would
2334 * need to ensure the ordering of writing to the SPQ element
2335 * and updating of the SPQ producer which involves a memory
2336 * read and we will have to put a full memory barrier there
2337 * (inside bnx2x_sp_post()).
2338 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002339
2340 /* Send a ramrod */
2341 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2342 U64_HI(p->rdata_mapping),
2343 U64_LO(p->rdata_mapping),
2344 ETH_CONNECTION_TYPE);
2345 if (rc)
2346 return rc;
2347
2348 /* Ramrod completion is pending */
2349 return 1;
2350}
2351
2352static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2353 struct bnx2x_rx_mode_ramrod_params *p)
2354{
2355 return bnx2x_state_wait(bp, p->state, p->pstate);
2356}
2357
2358static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2359 struct bnx2x_rx_mode_ramrod_params *p)
2360{
2361 /* Do nothing */
2362 return 0;
2363}
2364
2365int bnx2x_config_rx_mode(struct bnx2x *bp,
2366 struct bnx2x_rx_mode_ramrod_params *p)
2367{
2368 int rc;
2369
2370 /* Configure the new classification in the chip */
2371 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2372 if (rc < 0)
2373 return rc;
2374
2375 /* Wait for a ramrod completion if was requested */
2376 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2377 rc = p->rx_mode_obj->wait_comp(bp, p);
2378 if (rc)
2379 return rc;
2380 }
2381
2382 return rc;
2383}
2384
2385void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2386 struct bnx2x_rx_mode_obj *o)
2387{
2388 if (CHIP_IS_E1x(bp)) {
2389 o->wait_comp = bnx2x_empty_rx_mode_wait;
2390 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2391 } else {
2392 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2393 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2394 }
2395}
2396
2397/********************* Multicast verbs: SET, CLEAR ****************************/
2398static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2399{
2400 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2401}
2402
2403struct bnx2x_mcast_mac_elem {
2404 struct list_head link;
2405 u8 mac[ETH_ALEN];
2406 u8 pad[2]; /* For a natural alignment of the following buffer */
2407};
2408
2409struct bnx2x_pending_mcast_cmd {
2410 struct list_head link;
2411 int type; /* BNX2X_MCAST_CMD_X */
2412 union {
2413 struct list_head macs_head;
2414 u32 macs_num; /* Needed for DEL command */
2415 int next_bin; /* Needed for RESTORE flow with aprox match */
2416 } data;
2417
2418 bool done; /* set to true, when the command has been handled,
2419 * practically used in 57712 handling only, where one pending
2420 * command may be handled in a few operations. As long as for
2421 * other chips every operation handling is completed in a
2422 * single ramrod, there is no need to utilize this field.
2423 */
2424};
2425
2426static int bnx2x_mcast_wait(struct bnx2x *bp,
2427 struct bnx2x_mcast_obj *o)
2428{
2429 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2430 o->raw.wait_comp(bp, &o->raw))
2431 return -EBUSY;
2432
2433 return 0;
2434}
2435
2436static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2437 struct bnx2x_mcast_obj *o,
2438 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00002439 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002440{
2441 int total_sz;
2442 struct bnx2x_pending_mcast_cmd *new_cmd;
2443 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2444 struct bnx2x_mcast_list_elem *pos;
2445 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2446 p->mcast_list_len : 0);
2447
2448 /* If the command is empty ("handle pending commands only"), break */
2449 if (!p->mcast_list_len)
2450 return 0;
2451
2452 total_sz = sizeof(*new_cmd) +
2453 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2454
2455 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2456 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2457
2458 if (!new_cmd)
2459 return -ENOMEM;
2460
Merav Sicron51c1a582012-03-18 10:33:38 +00002461 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2462 cmd, macs_list_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002463
2464 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2465
2466 new_cmd->type = cmd;
2467 new_cmd->done = false;
2468
2469 switch (cmd) {
2470 case BNX2X_MCAST_CMD_ADD:
2471 cur_mac = (struct bnx2x_mcast_mac_elem *)
2472 ((u8 *)new_cmd + sizeof(*new_cmd));
2473
2474 /* Push the MACs of the current command into the pendig command
2475 * MACs list: FIFO
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002476 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002477 list_for_each_entry(pos, &p->mcast_list, link) {
2478 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2479 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2480 cur_mac++;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002481 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002482
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002483 break;
2484
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002485 case BNX2X_MCAST_CMD_DEL:
2486 new_cmd->data.macs_num = p->mcast_list_len;
2487 break;
2488
2489 case BNX2X_MCAST_CMD_RESTORE:
2490 new_cmd->data.next_bin = 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002491 break;
2492
2493 default:
Jesper Juhl8b6d5c02012-07-31 11:39:37 +00002494 kfree(new_cmd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002495 BNX2X_ERR("Unknown command: %d\n", cmd);
2496 return -EINVAL;
2497 }
2498
2499 /* Push the new pending command to the tail of the pending list: FIFO */
2500 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2501
2502 o->set_sched(o);
2503
2504 return 1;
2505}
2506
2507/**
2508 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2509 *
2510 * @o:
2511 * @last: index to start looking from (including)
2512 *
2513 * returns the next found (set) bin or a negative value if none is found.
2514 */
2515static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2516{
2517 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2518
2519 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2520 if (o->registry.aprox_match.vec[i])
2521 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2522 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2523 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2524 vec, cur_bit)) {
2525 return cur_bit;
2526 }
2527 }
2528 inner_start = 0;
2529 }
2530
2531 /* None found */
2532 return -1;
2533}
2534
2535/**
2536 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2537 *
2538 * @o:
2539 *
2540 * returns the index of the found bin or -1 if none is found
2541 */
2542static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2543{
2544 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2545
2546 if (cur_bit >= 0)
2547 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2548
2549 return cur_bit;
2550}
2551
2552static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2553{
2554 struct bnx2x_raw_obj *raw = &o->raw;
2555 u8 rx_tx_flag = 0;
2556
2557 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2558 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2559 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2560
2561 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2562 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2563 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2564
2565 return rx_tx_flag;
2566}
2567
2568static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2569 struct bnx2x_mcast_obj *o, int idx,
2570 union bnx2x_mcast_config_data *cfg_data,
Yuval Mintz86564c32013-01-23 03:21:50 +00002571 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002572{
2573 struct bnx2x_raw_obj *r = &o->raw;
2574 struct eth_multicast_rules_ramrod_data *data =
2575 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2576 u8 func_id = r->func_id;
2577 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2578 int bin;
2579
2580 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2581 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2582
2583 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2584
2585 /* Get a bin and update a bins' vector */
2586 switch (cmd) {
2587 case BNX2X_MCAST_CMD_ADD:
2588 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2589 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002590 break;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002591
2592 case BNX2X_MCAST_CMD_DEL:
2593 /* If there were no more bins to clear
2594 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2595 * clear any (0xff) bin.
2596 * See bnx2x_mcast_validate_e2() for explanation when it may
2597 * happen.
2598 */
2599 bin = bnx2x_mcast_clear_first_bin(o);
2600 break;
2601
2602 case BNX2X_MCAST_CMD_RESTORE:
2603 bin = cfg_data->bin;
2604 break;
2605
2606 default:
2607 BNX2X_ERR("Unknown command: %d\n", cmd);
2608 return;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002609 }
2610
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002611 DP(BNX2X_MSG_SP, "%s bin %d\n",
2612 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2613 "Setting" : "Clearing"), bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002614
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002615 data->rules[idx].bin_id = (u8)bin;
2616 data->rules[idx].func_id = func_id;
2617 data->rules[idx].engine_id = o->engine_id;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002618}
2619
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002620/**
2621 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2622 *
2623 * @bp: device handle
2624 * @o:
2625 * @start_bin: index in the registry to start from (including)
2626 * @rdata_idx: index in the ramrod data to start from
2627 *
2628 * returns last handled bin index or -1 if all bins have been handled
2629 */
2630static inline int bnx2x_mcast_handle_restore_cmd_e2(
2631 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2632 int *rdata_idx)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002633{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002634 int cur_bin, cnt = *rdata_idx;
Yuval Mintz86564c32013-01-23 03:21:50 +00002635 union bnx2x_mcast_config_data cfg_data = {NULL};
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002636
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002637 /* go through the registry and configure the bins from it */
2638 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2639 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002640
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002641 cfg_data.bin = (u8)cur_bin;
2642 o->set_one_rule(bp, o, cnt, &cfg_data,
2643 BNX2X_MCAST_CMD_RESTORE);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002644
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002645 cnt++;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002646
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002647 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002648
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002649 /* Break if we reached the maximum number
2650 * of rules.
2651 */
2652 if (cnt >= o->max_cmd_len)
2653 break;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002654 }
2655
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002656 *rdata_idx = cnt;
2657
2658 return cur_bin;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002659}
2660
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002661static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2662 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2663 int *line_idx)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002664{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002665 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2666 int cnt = *line_idx;
Yuval Mintz86564c32013-01-23 03:21:50 +00002667 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002668
2669 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2670 link) {
2671
2672 cfg_data.mac = &pmac_pos->mac[0];
2673 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2674
2675 cnt++;
2676
Joe Perches0f9dad12011-08-14 12:16:19 +00002677 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00002678 pmac_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002679
2680 list_del(&pmac_pos->link);
2681
2682 /* Break if we reached the maximum number
2683 * of rules.
2684 */
2685 if (cnt >= o->max_cmd_len)
2686 break;
2687 }
2688
2689 *line_idx = cnt;
2690
2691 /* if no more MACs to configure - we are done */
2692 if (list_empty(&cmd_pos->data.macs_head))
2693 cmd_pos->done = true;
2694}
2695
2696static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2697 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2698 int *line_idx)
2699{
2700 int cnt = *line_idx;
2701
2702 while (cmd_pos->data.macs_num) {
2703 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2704
2705 cnt++;
2706
2707 cmd_pos->data.macs_num--;
2708
2709 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2710 cmd_pos->data.macs_num, cnt);
2711
2712 /* Break if we reached the maximum
2713 * number of rules.
2714 */
2715 if (cnt >= o->max_cmd_len)
2716 break;
2717 }
2718
2719 *line_idx = cnt;
2720
2721 /* If we cleared all bins - we are done */
2722 if (!cmd_pos->data.macs_num)
2723 cmd_pos->done = true;
2724}
2725
2726static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2727 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2728 int *line_idx)
2729{
2730 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2731 line_idx);
2732
2733 if (cmd_pos->data.next_bin < 0)
2734 /* If o->set_restore returned -1 we are done */
2735 cmd_pos->done = true;
2736 else
2737 /* Start from the next bin next time */
2738 cmd_pos->data.next_bin++;
2739}
2740
2741static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2742 struct bnx2x_mcast_ramrod_params *p)
2743{
2744 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2745 int cnt = 0;
2746 struct bnx2x_mcast_obj *o = p->mcast_obj;
2747
2748 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2749 link) {
2750 switch (cmd_pos->type) {
2751 case BNX2X_MCAST_CMD_ADD:
2752 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2753 break;
2754
2755 case BNX2X_MCAST_CMD_DEL:
2756 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2757 break;
2758
2759 case BNX2X_MCAST_CMD_RESTORE:
2760 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2761 &cnt);
2762 break;
2763
2764 default:
2765 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2766 return -EINVAL;
2767 }
2768
2769 /* If the command has been completed - remove it from the list
2770 * and free the memory
2771 */
2772 if (cmd_pos->done) {
2773 list_del(&cmd_pos->link);
2774 kfree(cmd_pos);
2775 }
2776
2777 /* Break if we reached the maximum number of rules */
2778 if (cnt >= o->max_cmd_len)
2779 break;
2780 }
2781
2782 return cnt;
2783}
2784
2785static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2786 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2787 int *line_idx)
2788{
2789 struct bnx2x_mcast_list_elem *mlist_pos;
Yuval Mintz86564c32013-01-23 03:21:50 +00002790 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002791 int cnt = *line_idx;
2792
2793 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2794 cfg_data.mac = mlist_pos->mac;
2795 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2796
2797 cnt++;
2798
Joe Perches0f9dad12011-08-14 12:16:19 +00002799 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00002800 mlist_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002801 }
2802
2803 *line_idx = cnt;
2804}
2805
2806static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2807 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2808 int *line_idx)
2809{
2810 int cnt = *line_idx, i;
2811
2812 for (i = 0; i < p->mcast_list_len; i++) {
2813 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2814
2815 cnt++;
2816
2817 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2818 p->mcast_list_len - i - 1);
2819 }
2820
2821 *line_idx = cnt;
2822}
2823
2824/**
2825 * bnx2x_mcast_handle_current_cmd -
2826 *
2827 * @bp: device handle
2828 * @p:
2829 * @cmd:
2830 * @start_cnt: first line in the ramrod data that may be used
2831 *
2832 * This function is called iff there is enough place for the current command in
2833 * the ramrod data.
2834 * Returns number of lines filled in the ramrod data in total.
2835 */
2836static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
Yuval Mintz86564c32013-01-23 03:21:50 +00002837 struct bnx2x_mcast_ramrod_params *p,
2838 enum bnx2x_mcast_cmd cmd,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002839 int start_cnt)
2840{
2841 struct bnx2x_mcast_obj *o = p->mcast_obj;
2842 int cnt = start_cnt;
2843
2844 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2845
2846 switch (cmd) {
2847 case BNX2X_MCAST_CMD_ADD:
2848 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2849 break;
2850
2851 case BNX2X_MCAST_CMD_DEL:
2852 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2853 break;
2854
2855 case BNX2X_MCAST_CMD_RESTORE:
2856 o->hdl_restore(bp, o, 0, &cnt);
2857 break;
2858
2859 default:
2860 BNX2X_ERR("Unknown command: %d\n", cmd);
2861 return -EINVAL;
2862 }
2863
2864 /* The current command has been handled */
2865 p->mcast_list_len = 0;
2866
2867 return cnt;
2868}
2869
2870static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2871 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00002872 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002873{
2874 struct bnx2x_mcast_obj *o = p->mcast_obj;
2875 int reg_sz = o->get_registry_size(o);
2876
2877 switch (cmd) {
2878 /* DEL command deletes all currently configured MACs */
2879 case BNX2X_MCAST_CMD_DEL:
2880 o->set_registry_size(o, 0);
2881 /* Don't break */
2882
2883 /* RESTORE command will restore the entire multicast configuration */
2884 case BNX2X_MCAST_CMD_RESTORE:
2885 /* Here we set the approximate amount of work to do, which in
2886 * fact may be only less as some MACs in postponed ADD
2887 * command(s) scheduled before this command may fall into
2888 * the same bin and the actual number of bins set in the
2889 * registry would be less than we estimated here. See
2890 * bnx2x_mcast_set_one_rule_e2() for further details.
2891 */
2892 p->mcast_list_len = reg_sz;
2893 break;
2894
2895 case BNX2X_MCAST_CMD_ADD:
2896 case BNX2X_MCAST_CMD_CONT:
2897 /* Here we assume that all new MACs will fall into new bins.
2898 * However we will correct the real registry size after we
2899 * handle all pending commands.
2900 */
2901 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2902 break;
2903
2904 default:
2905 BNX2X_ERR("Unknown command: %d\n", cmd);
2906 return -EINVAL;
2907
2908 }
2909
2910 /* Increase the total number of MACs pending to be configured */
2911 o->total_pending_num += p->mcast_list_len;
2912
2913 return 0;
2914}
2915
2916static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2917 struct bnx2x_mcast_ramrod_params *p,
2918 int old_num_bins)
2919{
2920 struct bnx2x_mcast_obj *o = p->mcast_obj;
2921
2922 o->set_registry_size(o, old_num_bins);
2923 o->total_pending_num -= p->mcast_list_len;
2924}
2925
2926/**
2927 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2928 *
2929 * @bp: device handle
2930 * @p:
2931 * @len: number of rules to handle
2932 */
2933static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2934 struct bnx2x_mcast_ramrod_params *p,
2935 u8 len)
2936{
2937 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2938 struct eth_multicast_rules_ramrod_data *data =
2939 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2940
Yuval Mintz86564c32013-01-23 03:21:50 +00002941 data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
2942 (BNX2X_FILTER_MCAST_PENDING <<
2943 BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002944 data->header.rule_cnt = len;
2945}
2946
2947/**
2948 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2949 *
2950 * @bp: device handle
2951 * @o:
2952 *
2953 * Recalculate the actual number of set bins in the registry using Brian
2954 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2955 *
2956 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2957 */
2958static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2959 struct bnx2x_mcast_obj *o)
2960{
2961 int i, cnt = 0;
2962 u64 elem;
2963
2964 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2965 elem = o->registry.aprox_match.vec[i];
2966 for (; elem; cnt++)
2967 elem &= elem - 1;
2968 }
2969
2970 o->set_registry_size(o, cnt);
2971
2972 return 0;
2973}
2974
2975static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2976 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00002977 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002978{
2979 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2980 struct bnx2x_mcast_obj *o = p->mcast_obj;
2981 struct eth_multicast_rules_ramrod_data *data =
2982 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2983 int cnt = 0, rc;
2984
2985 /* Reset the ramrod data buffer */
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002986 memset(data, 0, sizeof(*data));
2987
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002988 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2989
2990 /* If there are no more pending commands - clear SCHEDULED state */
2991 if (list_empty(&o->pending_cmds_head))
2992 o->clear_sched(o);
2993
2994 /* The below may be true iff there was enough room in ramrod
2995 * data for all pending commands and for the current
2996 * command. Otherwise the current command would have been added
2997 * to the pending commands and p->mcast_list_len would have been
2998 * zeroed.
2999 */
3000 if (p->mcast_list_len > 0)
3001 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
3002
3003 /* We've pulled out some MACs - update the total number of
3004 * outstanding.
3005 */
3006 o->total_pending_num -= cnt;
3007
3008 /* send a ramrod */
3009 WARN_ON(o->total_pending_num < 0);
3010 WARN_ON(cnt > o->max_cmd_len);
3011
3012 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3013
3014 /* Update a registry size if there are no more pending operations.
3015 *
3016 * We don't want to change the value of the registry size if there are
3017 * pending operations because we want it to always be equal to the
3018 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3019 * set bins after the last requested operation in order to properly
3020 * evaluate the size of the next DEL/RESTORE operation.
3021 *
3022 * Note that we update the registry itself during command(s) handling
3023 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3024 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3025 * with a limited amount of update commands (per MAC/bin) and we don't
3026 * know in this scope what the actual state of bins configuration is
3027 * going to be after this ramrod.
3028 */
3029 if (!o->total_pending_num)
3030 bnx2x_mcast_refresh_registry_e2(bp, o);
3031
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003032 /*
3033 * If CLEAR_ONLY was requested - don't send a ramrod and clear
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003034 * RAMROD_PENDING status immediately.
3035 */
3036 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3037 raw->clear_pending(raw);
3038 return 0;
3039 } else {
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003040 /*
3041 * No need for an explicit memory barrier here as long we would
3042 * need to ensure the ordering of writing to the SPQ element
3043 * and updating of the SPQ producer which involves a memory
3044 * read and we will have to put a full memory barrier there
3045 * (inside bnx2x_sp_post()).
3046 */
3047
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003048 /* Send a ramrod */
3049 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3050 raw->cid, U64_HI(raw->rdata_mapping),
3051 U64_LO(raw->rdata_mapping),
3052 ETH_CONNECTION_TYPE);
3053 if (rc)
3054 return rc;
3055
3056 /* Ramrod completion is pending */
3057 return 1;
3058 }
3059}
3060
3061static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3062 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003063 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003064{
3065 /* Mark, that there is a work to do */
3066 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3067 p->mcast_list_len = 1;
3068
3069 return 0;
3070}
3071
3072static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3073 struct bnx2x_mcast_ramrod_params *p,
3074 int old_num_bins)
3075{
3076 /* Do nothing */
3077}
3078
3079#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3080do { \
3081 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3082} while (0)
3083
3084static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3085 struct bnx2x_mcast_obj *o,
3086 struct bnx2x_mcast_ramrod_params *p,
3087 u32 *mc_filter)
3088{
3089 struct bnx2x_mcast_list_elem *mlist_pos;
3090 int bit;
3091
3092 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3093 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3094 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3095
Joe Perches0f9dad12011-08-14 12:16:19 +00003096 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003097 mlist_pos->mac, bit);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003098
3099 /* bookkeeping... */
3100 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3101 bit);
3102 }
3103}
3104
3105static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3106 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3107 u32 *mc_filter)
3108{
3109 int bit;
3110
3111 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3112 bit >= 0;
3113 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3114 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3115 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3116 }
3117}
3118
3119/* On 57711 we write the multicast MACs' aproximate match
3120 * table by directly into the TSTORM's internal RAM. So we don't
3121 * really need to handle any tricks to make it work.
3122 */
3123static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3124 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003125 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003126{
3127 int i;
3128 struct bnx2x_mcast_obj *o = p->mcast_obj;
3129 struct bnx2x_raw_obj *r = &o->raw;
3130
3131 /* If CLEAR_ONLY has been requested - clear the registry
3132 * and clear a pending bit.
3133 */
3134 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3135 u32 mc_filter[MC_HASH_SIZE] = {0};
3136
3137 /* Set the multicast filter bits before writing it into
3138 * the internal memory.
3139 */
3140 switch (cmd) {
3141 case BNX2X_MCAST_CMD_ADD:
3142 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3143 break;
3144
3145 case BNX2X_MCAST_CMD_DEL:
Joe Perches94f05b02011-08-14 12:16:20 +00003146 DP(BNX2X_MSG_SP,
3147 "Invalidating multicast MACs configuration\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003148
3149 /* clear the registry */
3150 memset(o->registry.aprox_match.vec, 0,
3151 sizeof(o->registry.aprox_match.vec));
3152 break;
3153
3154 case BNX2X_MCAST_CMD_RESTORE:
3155 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3156 break;
3157
3158 default:
3159 BNX2X_ERR("Unknown command: %d\n", cmd);
3160 return -EINVAL;
3161 }
3162
3163 /* Set the mcast filter in the internal memory */
3164 for (i = 0; i < MC_HASH_SIZE; i++)
3165 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3166 } else
3167 /* clear the registry */
3168 memset(o->registry.aprox_match.vec, 0,
3169 sizeof(o->registry.aprox_match.vec));
3170
3171 /* We are done */
3172 r->clear_pending(r);
3173
3174 return 0;
3175}
3176
3177static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3178 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003179 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003180{
3181 struct bnx2x_mcast_obj *o = p->mcast_obj;
3182 int reg_sz = o->get_registry_size(o);
3183
3184 switch (cmd) {
3185 /* DEL command deletes all currently configured MACs */
3186 case BNX2X_MCAST_CMD_DEL:
3187 o->set_registry_size(o, 0);
3188 /* Don't break */
3189
3190 /* RESTORE command will restore the entire multicast configuration */
3191 case BNX2X_MCAST_CMD_RESTORE:
3192 p->mcast_list_len = reg_sz;
3193 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3194 cmd, p->mcast_list_len);
3195 break;
3196
3197 case BNX2X_MCAST_CMD_ADD:
3198 case BNX2X_MCAST_CMD_CONT:
3199 /* Multicast MACs on 57710 are configured as unicast MACs and
3200 * there is only a limited number of CAM entries for that
3201 * matter.
3202 */
3203 if (p->mcast_list_len > o->max_cmd_len) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003204 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3205 o->max_cmd_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003206 return -EINVAL;
3207 }
3208 /* Every configured MAC should be cleared if DEL command is
3209 * called. Only the last ADD command is relevant as long as
3210 * every ADD commands overrides the previous configuration.
3211 */
3212 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3213 if (p->mcast_list_len > 0)
3214 o->set_registry_size(o, p->mcast_list_len);
3215
3216 break;
3217
3218 default:
3219 BNX2X_ERR("Unknown command: %d\n", cmd);
3220 return -EINVAL;
3221
3222 }
3223
3224 /* We want to ensure that commands are executed one by one for 57710.
3225 * Therefore each none-empty command will consume o->max_cmd_len.
3226 */
3227 if (p->mcast_list_len)
3228 o->total_pending_num += o->max_cmd_len;
3229
3230 return 0;
3231}
3232
3233static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3234 struct bnx2x_mcast_ramrod_params *p,
3235 int old_num_macs)
3236{
3237 struct bnx2x_mcast_obj *o = p->mcast_obj;
3238
3239 o->set_registry_size(o, old_num_macs);
3240
3241 /* If current command hasn't been handled yet and we are
3242 * here means that it's meant to be dropped and we have to
3243 * update the number of outstandling MACs accordingly.
3244 */
3245 if (p->mcast_list_len)
3246 o->total_pending_num -= o->max_cmd_len;
3247}
3248
3249static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3250 struct bnx2x_mcast_obj *o, int idx,
3251 union bnx2x_mcast_config_data *cfg_data,
Yuval Mintz86564c32013-01-23 03:21:50 +00003252 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003253{
3254 struct bnx2x_raw_obj *r = &o->raw;
3255 struct mac_configuration_cmd *data =
3256 (struct mac_configuration_cmd *)(r->rdata);
3257
3258 /* copy mac */
3259 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3260 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3261 &data->config_table[idx].middle_mac_addr,
3262 &data->config_table[idx].lsb_mac_addr,
3263 cfg_data->mac);
3264
3265 data->config_table[idx].vlan_id = 0;
3266 data->config_table[idx].pf_id = r->func_id;
3267 data->config_table[idx].clients_bit_vector =
3268 cpu_to_le32(1 << r->cl_id);
3269
3270 SET_FLAG(data->config_table[idx].flags,
3271 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3272 T_ETH_MAC_COMMAND_SET);
3273 }
3274}
3275
3276/**
3277 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3278 *
3279 * @bp: device handle
3280 * @p:
3281 * @len: number of rules to handle
3282 */
3283static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3284 struct bnx2x_mcast_ramrod_params *p,
3285 u8 len)
3286{
3287 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3288 struct mac_configuration_cmd *data =
3289 (struct mac_configuration_cmd *)(r->rdata);
3290
3291 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3292 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3293 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3294
3295 data->hdr.offset = offset;
Yuval Mintz86564c32013-01-23 03:21:50 +00003296 data->hdr.client_id = cpu_to_le16(0xff);
3297 data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3298 (BNX2X_FILTER_MCAST_PENDING <<
3299 BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003300 data->hdr.length = len;
3301}
3302
3303/**
3304 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3305 *
3306 * @bp: device handle
3307 * @o:
3308 * @start_idx: index in the registry to start from
3309 * @rdata_idx: index in the ramrod data to start from
3310 *
3311 * restore command for 57710 is like all other commands - always a stand alone
3312 * command - start_idx and rdata_idx will always be 0. This function will always
3313 * succeed.
3314 * returns -1 to comply with 57712 variant.
3315 */
3316static inline int bnx2x_mcast_handle_restore_cmd_e1(
3317 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3318 int *rdata_idx)
3319{
3320 struct bnx2x_mcast_mac_elem *elem;
3321 int i = 0;
Yuval Mintz86564c32013-01-23 03:21:50 +00003322 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003323
3324 /* go through the registry and configure the MACs from it. */
3325 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3326 cfg_data.mac = &elem->mac[0];
3327 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3328
3329 i++;
3330
Joe Perches0f9dad12011-08-14 12:16:19 +00003331 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003332 cfg_data.mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003333 }
3334
3335 *rdata_idx = i;
3336
3337 return -1;
3338}
3339
3340
3341static inline int bnx2x_mcast_handle_pending_cmds_e1(
3342 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3343{
3344 struct bnx2x_pending_mcast_cmd *cmd_pos;
3345 struct bnx2x_mcast_mac_elem *pmac_pos;
3346 struct bnx2x_mcast_obj *o = p->mcast_obj;
Yuval Mintz86564c32013-01-23 03:21:50 +00003347 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003348 int cnt = 0;
3349
3350
3351 /* If nothing to be done - return */
3352 if (list_empty(&o->pending_cmds_head))
3353 return 0;
3354
3355 /* Handle the first command */
3356 cmd_pos = list_first_entry(&o->pending_cmds_head,
3357 struct bnx2x_pending_mcast_cmd, link);
3358
3359 switch (cmd_pos->type) {
3360 case BNX2X_MCAST_CMD_ADD:
3361 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3362 cfg_data.mac = &pmac_pos->mac[0];
3363 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3364
3365 cnt++;
3366
Joe Perches0f9dad12011-08-14 12:16:19 +00003367 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003368 pmac_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003369 }
3370 break;
3371
3372 case BNX2X_MCAST_CMD_DEL:
3373 cnt = cmd_pos->data.macs_num;
3374 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3375 break;
3376
3377 case BNX2X_MCAST_CMD_RESTORE:
3378 o->hdl_restore(bp, o, 0, &cnt);
3379 break;
3380
3381 default:
3382 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3383 return -EINVAL;
3384 }
3385
3386 list_del(&cmd_pos->link);
3387 kfree(cmd_pos);
3388
3389 return cnt;
3390}
3391
3392/**
3393 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3394 *
3395 * @fw_hi:
3396 * @fw_mid:
3397 * @fw_lo:
3398 * @mac:
3399 */
3400static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3401 __le16 *fw_lo, u8 *mac)
3402{
3403 mac[1] = ((u8 *)fw_hi)[0];
3404 mac[0] = ((u8 *)fw_hi)[1];
3405 mac[3] = ((u8 *)fw_mid)[0];
3406 mac[2] = ((u8 *)fw_mid)[1];
3407 mac[5] = ((u8 *)fw_lo)[0];
3408 mac[4] = ((u8 *)fw_lo)[1];
3409}
3410
3411/**
3412 * bnx2x_mcast_refresh_registry_e1 -
3413 *
3414 * @bp: device handle
3415 * @cnt:
3416 *
3417 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3418 * and update the registry correspondingly: if ADD - allocate a memory and add
3419 * the entries to the registry (list), if DELETE - clear the registry and free
3420 * the memory.
3421 */
3422static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3423 struct bnx2x_mcast_obj *o)
3424{
3425 struct bnx2x_raw_obj *raw = &o->raw;
3426 struct bnx2x_mcast_mac_elem *elem;
3427 struct mac_configuration_cmd *data =
3428 (struct mac_configuration_cmd *)(raw->rdata);
3429
3430 /* If first entry contains a SET bit - the command was ADD,
3431 * otherwise - DEL_ALL
3432 */
3433 if (GET_FLAG(data->config_table[0].flags,
3434 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3435 int i, len = data->hdr.length;
3436
3437 /* Break if it was a RESTORE command */
3438 if (!list_empty(&o->registry.exact_match.macs))
3439 return 0;
3440
Thomas Meyer01e23742011-11-29 11:08:00 +00003441 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003442 if (!elem) {
3443 BNX2X_ERR("Failed to allocate registry memory\n");
3444 return -ENOMEM;
3445 }
3446
3447 for (i = 0; i < len; i++, elem++) {
3448 bnx2x_get_fw_mac_addr(
3449 &data->config_table[i].msb_mac_addr,
3450 &data->config_table[i].middle_mac_addr,
3451 &data->config_table[i].lsb_mac_addr,
3452 elem->mac);
Joe Perches0f9dad12011-08-14 12:16:19 +00003453 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00003454 elem->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003455 list_add_tail(&elem->link,
3456 &o->registry.exact_match.macs);
3457 }
3458 } else {
3459 elem = list_first_entry(&o->registry.exact_match.macs,
3460 struct bnx2x_mcast_mac_elem, link);
3461 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3462 kfree(elem);
3463 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3464 }
3465
3466 return 0;
3467}
3468
3469static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3470 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003471 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003472{
3473 struct bnx2x_mcast_obj *o = p->mcast_obj;
3474 struct bnx2x_raw_obj *raw = &o->raw;
3475 struct mac_configuration_cmd *data =
3476 (struct mac_configuration_cmd *)(raw->rdata);
3477 int cnt = 0, i, rc;
3478
3479 /* Reset the ramrod data buffer */
3480 memset(data, 0, sizeof(*data));
3481
3482 /* First set all entries as invalid */
3483 for (i = 0; i < o->max_cmd_len ; i++)
3484 SET_FLAG(data->config_table[i].flags,
3485 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3486 T_ETH_MAC_COMMAND_INVALIDATE);
3487
3488 /* Handle pending commands first */
3489 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3490
3491 /* If there are no more pending commands - clear SCHEDULED state */
3492 if (list_empty(&o->pending_cmds_head))
3493 o->clear_sched(o);
3494
3495 /* The below may be true iff there were no pending commands */
3496 if (!cnt)
3497 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3498
3499 /* For 57710 every command has o->max_cmd_len length to ensure that
3500 * commands are done one at a time.
3501 */
3502 o->total_pending_num -= o->max_cmd_len;
3503
3504 /* send a ramrod */
3505
3506 WARN_ON(cnt > o->max_cmd_len);
3507
3508 /* Set ramrod header (in particular, a number of entries to update) */
3509 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3510
3511 /* update a registry: we need the registry contents to be always up
3512 * to date in order to be able to execute a RESTORE opcode. Here
3513 * we use the fact that for 57710 we sent one command at a time
3514 * hence we may take the registry update out of the command handling
3515 * and do it in a simpler way here.
3516 */
3517 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3518 if (rc)
3519 return rc;
3520
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003521 /*
3522 * If CLEAR_ONLY was requested - don't send a ramrod and clear
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003523 * RAMROD_PENDING status immediately.
3524 */
3525 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3526 raw->clear_pending(raw);
3527 return 0;
3528 } else {
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003529 /*
3530 * No need for an explicit memory barrier here as long we would
3531 * need to ensure the ordering of writing to the SPQ element
3532 * and updating of the SPQ producer which involves a memory
3533 * read and we will have to put a full memory barrier there
3534 * (inside bnx2x_sp_post()).
3535 */
3536
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003537 /* Send a ramrod */
3538 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3539 U64_HI(raw->rdata_mapping),
3540 U64_LO(raw->rdata_mapping),
3541 ETH_CONNECTION_TYPE);
3542 if (rc)
3543 return rc;
3544
3545 /* Ramrod completion is pending */
3546 return 1;
3547 }
3548
3549}
3550
3551static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3552{
3553 return o->registry.exact_match.num_macs_set;
3554}
3555
3556static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3557{
3558 return o->registry.aprox_match.num_bins_set;
3559}
3560
3561static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3562 int n)
3563{
3564 o->registry.exact_match.num_macs_set = n;
3565}
3566
3567static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3568 int n)
3569{
3570 o->registry.aprox_match.num_bins_set = n;
3571}
3572
3573int bnx2x_config_mcast(struct bnx2x *bp,
3574 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003575 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003576{
3577 struct bnx2x_mcast_obj *o = p->mcast_obj;
3578 struct bnx2x_raw_obj *r = &o->raw;
3579 int rc = 0, old_reg_size;
3580
3581 /* This is needed to recover number of currently configured mcast macs
3582 * in case of failure.
3583 */
3584 old_reg_size = o->get_registry_size(o);
3585
3586 /* Do some calculations and checks */
3587 rc = o->validate(bp, p, cmd);
3588 if (rc)
3589 return rc;
3590
3591 /* Return if there is no work to do */
3592 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3593 return 0;
3594
Merav Sicron51c1a582012-03-18 10:33:38 +00003595 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3596 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003597
3598 /* Enqueue the current command to the pending list if we can't complete
3599 * it in the current iteration
3600 */
3601 if (r->check_pending(r) ||
3602 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3603 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3604 if (rc < 0)
3605 goto error_exit1;
3606
3607 /* As long as the current command is in a command list we
3608 * don't need to handle it separately.
3609 */
3610 p->mcast_list_len = 0;
3611 }
3612
3613 if (!r->check_pending(r)) {
3614
3615 /* Set 'pending' state */
3616 r->set_pending(r);
3617
3618 /* Configure the new classification in the chip */
3619 rc = o->config_mcast(bp, p, cmd);
3620 if (rc < 0)
3621 goto error_exit2;
3622
3623 /* Wait for a ramrod completion if was requested */
3624 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3625 rc = o->wait_comp(bp, o);
3626 }
3627
3628 return rc;
3629
3630error_exit2:
3631 r->clear_pending(r);
3632
3633error_exit1:
3634 o->revert(bp, p, old_reg_size);
3635
3636 return rc;
3637}
3638
3639static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3640{
3641 smp_mb__before_clear_bit();
3642 clear_bit(o->sched_state, o->raw.pstate);
3643 smp_mb__after_clear_bit();
3644}
3645
3646static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3647{
3648 smp_mb__before_clear_bit();
3649 set_bit(o->sched_state, o->raw.pstate);
3650 smp_mb__after_clear_bit();
3651}
3652
3653static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3654{
3655 return !!test_bit(o->sched_state, o->raw.pstate);
3656}
3657
3658static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3659{
3660 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3661}
3662
3663void bnx2x_init_mcast_obj(struct bnx2x *bp,
3664 struct bnx2x_mcast_obj *mcast_obj,
3665 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3666 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3667 int state, unsigned long *pstate, bnx2x_obj_type type)
3668{
3669 memset(mcast_obj, 0, sizeof(*mcast_obj));
3670
3671 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3672 rdata, rdata_mapping, state, pstate, type);
3673
3674 mcast_obj->engine_id = engine_id;
3675
3676 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3677
3678 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3679 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3680 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3681 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3682
3683 if (CHIP_IS_E1(bp)) {
3684 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3685 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3686 mcast_obj->hdl_restore =
3687 bnx2x_mcast_handle_restore_cmd_e1;
3688 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3689
3690 if (CHIP_REV_IS_SLOW(bp))
3691 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3692 else
3693 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3694
3695 mcast_obj->wait_comp = bnx2x_mcast_wait;
3696 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3697 mcast_obj->validate = bnx2x_mcast_validate_e1;
3698 mcast_obj->revert = bnx2x_mcast_revert_e1;
3699 mcast_obj->get_registry_size =
3700 bnx2x_mcast_get_registry_size_exact;
3701 mcast_obj->set_registry_size =
3702 bnx2x_mcast_set_registry_size_exact;
3703
3704 /* 57710 is the only chip that uses the exact match for mcast
3705 * at the moment.
3706 */
3707 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3708
3709 } else if (CHIP_IS_E1H(bp)) {
3710 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3711 mcast_obj->enqueue_cmd = NULL;
3712 mcast_obj->hdl_restore = NULL;
3713 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3714
3715 /* 57711 doesn't send a ramrod, so it has unlimited credit
3716 * for one command.
3717 */
3718 mcast_obj->max_cmd_len = -1;
3719 mcast_obj->wait_comp = bnx2x_mcast_wait;
3720 mcast_obj->set_one_rule = NULL;
3721 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3722 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3723 mcast_obj->get_registry_size =
3724 bnx2x_mcast_get_registry_size_aprox;
3725 mcast_obj->set_registry_size =
3726 bnx2x_mcast_set_registry_size_aprox;
3727 } else {
3728 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3729 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3730 mcast_obj->hdl_restore =
3731 bnx2x_mcast_handle_restore_cmd_e2;
3732 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3733 /* TODO: There should be a proper HSI define for this number!!!
3734 */
3735 mcast_obj->max_cmd_len = 16;
3736 mcast_obj->wait_comp = bnx2x_mcast_wait;
3737 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3738 mcast_obj->validate = bnx2x_mcast_validate_e2;
3739 mcast_obj->revert = bnx2x_mcast_revert_e2;
3740 mcast_obj->get_registry_size =
3741 bnx2x_mcast_get_registry_size_aprox;
3742 mcast_obj->set_registry_size =
3743 bnx2x_mcast_set_registry_size_aprox;
3744 }
3745}
3746
3747/*************************** Credit handling **********************************/
3748
3749/**
3750 * atomic_add_ifless - add if the result is less than a given value.
3751 *
3752 * @v: pointer of type atomic_t
3753 * @a: the amount to add to v...
3754 * @u: ...if (v + a) is less than u.
3755 *
3756 * returns true if (v + a) was less than u, and false otherwise.
3757 *
3758 */
3759static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3760{
3761 int c, old;
3762
3763 c = atomic_read(v);
3764 for (;;) {
3765 if (unlikely(c + a >= u))
3766 return false;
3767
3768 old = atomic_cmpxchg((v), c, c + a);
3769 if (likely(old == c))
3770 break;
3771 c = old;
3772 }
3773
3774 return true;
3775}
3776
3777/**
3778 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3779 *
3780 * @v: pointer of type atomic_t
3781 * @a: the amount to dec from v...
3782 * @u: ...if (v - a) is more or equal than u.
3783 *
3784 * returns true if (v - a) was more or equal than u, and false
3785 * otherwise.
3786 */
3787static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3788{
3789 int c, old;
3790
3791 c = atomic_read(v);
3792 for (;;) {
3793 if (unlikely(c - a < u))
3794 return false;
3795
3796 old = atomic_cmpxchg((v), c, c - a);
3797 if (likely(old == c))
3798 break;
3799 c = old;
3800 }
3801
3802 return true;
3803}
3804
3805static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3806{
3807 bool rc;
3808
3809 smp_mb();
3810 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3811 smp_mb();
3812
3813 return rc;
3814}
3815
3816static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3817{
3818 bool rc;
3819
3820 smp_mb();
3821
3822 /* Don't let to refill if credit + cnt > pool_sz */
3823 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3824
3825 smp_mb();
3826
3827 return rc;
3828}
3829
3830static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3831{
3832 int cur_credit;
3833
3834 smp_mb();
3835 cur_credit = atomic_read(&o->credit);
3836
3837 return cur_credit;
3838}
3839
3840static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3841 int cnt)
3842{
3843 return true;
3844}
3845
3846
3847static bool bnx2x_credit_pool_get_entry(
3848 struct bnx2x_credit_pool_obj *o,
3849 int *offset)
3850{
3851 int idx, vec, i;
3852
3853 *offset = -1;
3854
3855 /* Find "internal cam-offset" then add to base for this object... */
3856 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3857
3858 /* Skip the current vector if there are no free entries in it */
3859 if (!o->pool_mirror[vec])
3860 continue;
3861
3862 /* If we've got here we are going to find a free entry */
Dmitry Kravkovc54e9bd2012-03-26 21:08:55 +00003863 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003864 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3865
3866 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3867 /* Got one!! */
3868 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3869 *offset = o->base_pool_offset + idx;
3870 return true;
3871 }
3872 }
3873
3874 return false;
3875}
3876
3877static bool bnx2x_credit_pool_put_entry(
3878 struct bnx2x_credit_pool_obj *o,
3879 int offset)
3880{
3881 if (offset < o->base_pool_offset)
3882 return false;
3883
3884 offset -= o->base_pool_offset;
3885
3886 if (offset >= o->pool_sz)
3887 return false;
3888
3889 /* Return the entry to the pool */
3890 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3891
3892 return true;
3893}
3894
3895static bool bnx2x_credit_pool_put_entry_always_true(
3896 struct bnx2x_credit_pool_obj *o,
3897 int offset)
3898{
3899 return true;
3900}
3901
3902static bool bnx2x_credit_pool_get_entry_always_true(
3903 struct bnx2x_credit_pool_obj *o,
3904 int *offset)
3905{
3906 *offset = -1;
3907 return true;
3908}
3909/**
3910 * bnx2x_init_credit_pool - initialize credit pool internals.
3911 *
3912 * @p:
3913 * @base: Base entry in the CAM to use.
3914 * @credit: pool size.
3915 *
3916 * If base is negative no CAM entries handling will be performed.
3917 * If credit is negative pool operations will always succeed (unlimited pool).
3918 *
3919 */
3920static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3921 int base, int credit)
3922{
3923 /* Zero the object first */
3924 memset(p, 0, sizeof(*p));
3925
3926 /* Set the table to all 1s */
3927 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3928
3929 /* Init a pool as full */
3930 atomic_set(&p->credit, credit);
3931
3932 /* The total poll size */
3933 p->pool_sz = credit;
3934
3935 p->base_pool_offset = base;
3936
3937 /* Commit the change */
3938 smp_mb();
3939
3940 p->check = bnx2x_credit_pool_check;
3941
3942 /* if pool credit is negative - disable the checks */
3943 if (credit >= 0) {
3944 p->put = bnx2x_credit_pool_put;
3945 p->get = bnx2x_credit_pool_get;
3946 p->put_entry = bnx2x_credit_pool_put_entry;
3947 p->get_entry = bnx2x_credit_pool_get_entry;
3948 } else {
3949 p->put = bnx2x_credit_pool_always_true;
3950 p->get = bnx2x_credit_pool_always_true;
3951 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3952 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3953 }
3954
3955 /* If base is negative - disable entries handling */
3956 if (base < 0) {
3957 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3958 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3959 }
3960}
3961
3962void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3963 struct bnx2x_credit_pool_obj *p, u8 func_id,
3964 u8 func_num)
3965{
3966/* TODO: this will be defined in consts as well... */
3967#define BNX2X_CAM_SIZE_EMUL 5
3968
3969 int cam_sz;
3970
3971 if (CHIP_IS_E1(bp)) {
3972 /* In E1, Multicast is saved in cam... */
3973 if (!CHIP_REV_IS_SLOW(bp))
3974 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3975 else
3976 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3977
3978 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3979
3980 } else if (CHIP_IS_E1H(bp)) {
3981 /* CAM credit is equaly divided between all active functions
3982 * on the PORT!.
3983 */
3984 if ((func_num > 0)) {
3985 if (!CHIP_REV_IS_SLOW(bp))
3986 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3987 else
3988 cam_sz = BNX2X_CAM_SIZE_EMUL;
3989 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3990 } else {
3991 /* this should never happen! Block MAC operations. */
3992 bnx2x_init_credit_pool(p, 0, 0);
3993 }
3994
3995 } else {
3996
3997 /*
3998 * CAM credit is equaly divided between all active functions
3999 * on the PATH.
4000 */
4001 if ((func_num > 0)) {
4002 if (!CHIP_REV_IS_SLOW(bp))
4003 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
4004 else
4005 cam_sz = BNX2X_CAM_SIZE_EMUL;
4006
4007 /*
4008 * No need for CAM entries handling for 57712 and
4009 * newer.
4010 */
4011 bnx2x_init_credit_pool(p, -1, cam_sz);
4012 } else {
4013 /* this should never happen! Block MAC operations. */
4014 bnx2x_init_credit_pool(p, 0, 0);
4015 }
4016
4017 }
4018}
4019
4020void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4021 struct bnx2x_credit_pool_obj *p,
4022 u8 func_id,
4023 u8 func_num)
4024{
4025 if (CHIP_IS_E1x(bp)) {
4026 /*
4027 * There is no VLAN credit in HW on 57710 and 57711 only
4028 * MAC / MAC-VLAN can be set
4029 */
4030 bnx2x_init_credit_pool(p, 0, -1);
4031 } else {
4032 /*
4033 * CAM credit is equaly divided between all active functions
4034 * on the PATH.
4035 */
4036 if (func_num > 0) {
4037 int credit = MAX_VLAN_CREDIT_E2 / func_num;
4038 bnx2x_init_credit_pool(p, func_id * credit, credit);
4039 } else
4040 /* this should never happen! Block VLAN operations. */
4041 bnx2x_init_credit_pool(p, 0, 0);
4042 }
4043}
4044
4045/****************** RSS Configuration ******************/
4046/**
4047 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4048 *
4049 * @bp: driver hanlde
4050 * @p: pointer to rss configuration
4051 *
4052 * Prints it when NETIF_MSG_IFUP debug level is configured.
4053 */
4054static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4055 struct bnx2x_config_rss_params *p)
4056{
4057 int i;
4058
4059 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4060 DP(BNX2X_MSG_SP, "0x0000: ");
4061 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4062 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4063
4064 /* Print 4 bytes in a line */
4065 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4066 (((i + 1) & 0x3) == 0)) {
4067 DP_CONT(BNX2X_MSG_SP, "\n");
4068 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4069 }
4070 }
4071
4072 DP_CONT(BNX2X_MSG_SP, "\n");
4073}
4074
4075/**
4076 * bnx2x_setup_rss - configure RSS
4077 *
4078 * @bp: device handle
4079 * @p: rss configuration
4080 *
4081 * sends on UPDATE ramrod for that matter.
4082 */
4083static int bnx2x_setup_rss(struct bnx2x *bp,
4084 struct bnx2x_config_rss_params *p)
4085{
4086 struct bnx2x_rss_config_obj *o = p->rss_obj;
4087 struct bnx2x_raw_obj *r = &o->raw;
4088 struct eth_rss_update_ramrod_data *data =
4089 (struct eth_rss_update_ramrod_data *)(r->rdata);
4090 u8 rss_mode = 0;
4091 int rc;
4092
4093 memset(data, 0, sizeof(*data));
4094
4095 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4096
4097 /* Set an echo field */
Yuval Mintz86564c32013-01-23 03:21:50 +00004098 data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4099 (r->state << BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004100
4101 /* RSS mode */
4102 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4103 rss_mode = ETH_RSS_MODE_DISABLED;
4104 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4105 rss_mode = ETH_RSS_MODE_REGULAR;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004106
4107 data->rss_mode = rss_mode;
4108
4109 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4110
4111 /* RSS capabilities */
4112 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4113 data->capabilities |=
4114 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4115
4116 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4117 data->capabilities |=
4118 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4119
Merav Sicron5d317c6a2012-06-19 07:48:24 +00004120 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4121 data->capabilities |=
4122 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4123
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004124 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4125 data->capabilities |=
4126 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4127
4128 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4129 data->capabilities |=
4130 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4131
Merav Sicron5d317c6a2012-06-19 07:48:24 +00004132 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4133 data->capabilities |=
4134 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4135
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004136 /* Hashing mask */
4137 data->rss_result_mask = p->rss_result_mask;
4138
4139 /* RSS engine ID */
4140 data->rss_engine_id = o->engine_id;
4141
4142 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4143
4144 /* Indirection table */
4145 memcpy(data->indirection_table, p->ind_table,
4146 T_ETH_INDIRECTION_TABLE_SIZE);
4147
4148 /* Remember the last configuration */
4149 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4150
4151 /* Print the indirection table */
4152 if (netif_msg_ifup(bp))
4153 bnx2x_debug_print_ind_table(bp, p);
4154
4155 /* RSS keys */
4156 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4157 memcpy(&data->rss_key[0], &p->rss_key[0],
4158 sizeof(data->rss_key));
4159 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4160 }
4161
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004162 /*
4163 * No need for an explicit memory barrier here as long we would
4164 * need to ensure the ordering of writing to the SPQ element
4165 * and updating of the SPQ producer which involves a memory
4166 * read and we will have to put a full memory barrier there
4167 * (inside bnx2x_sp_post()).
4168 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004169
4170 /* Send a ramrod */
4171 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4172 U64_HI(r->rdata_mapping),
4173 U64_LO(r->rdata_mapping),
4174 ETH_CONNECTION_TYPE);
4175
4176 if (rc < 0)
4177 return rc;
4178
4179 return 1;
4180}
4181
4182void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4183 u8 *ind_table)
4184{
4185 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4186}
4187
4188int bnx2x_config_rss(struct bnx2x *bp,
4189 struct bnx2x_config_rss_params *p)
4190{
4191 int rc;
4192 struct bnx2x_rss_config_obj *o = p->rss_obj;
4193 struct bnx2x_raw_obj *r = &o->raw;
4194
4195 /* Do nothing if only driver cleanup was requested */
4196 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4197 return 0;
4198
4199 r->set_pending(r);
4200
4201 rc = o->config_rss(bp, p);
4202 if (rc < 0) {
4203 r->clear_pending(r);
4204 return rc;
4205 }
4206
4207 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4208 rc = r->wait_comp(bp, r);
4209
4210 return rc;
4211}
4212
4213
4214void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4215 struct bnx2x_rss_config_obj *rss_obj,
4216 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4217 void *rdata, dma_addr_t rdata_mapping,
4218 int state, unsigned long *pstate,
4219 bnx2x_obj_type type)
4220{
4221 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4222 rdata_mapping, state, pstate, type);
4223
4224 rss_obj->engine_id = engine_id;
4225 rss_obj->config_rss = bnx2x_setup_rss;
4226}
4227
4228/********************** Queue state object ***********************************/
4229
4230/**
4231 * bnx2x_queue_state_change - perform Queue state change transition
4232 *
4233 * @bp: device handle
4234 * @params: parameters to perform the transition
4235 *
4236 * returns 0 in case of successfully completed transition, negative error
4237 * code in case of failure, positive (EBUSY) value if there is a completion
4238 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4239 * not set in params->ramrod_flags for asynchronous commands).
4240 *
4241 */
4242int bnx2x_queue_state_change(struct bnx2x *bp,
4243 struct bnx2x_queue_state_params *params)
4244{
4245 struct bnx2x_queue_sp_obj *o = params->q_obj;
4246 int rc, pending_bit;
4247 unsigned long *pending = &o->pending;
4248
4249 /* Check that the requested transition is legal */
Yuval Mintz04c46732013-01-23 03:21:46 +00004250 rc = o->check_transition(bp, o, params);
4251 if (rc) {
4252 BNX2X_ERR("check transition returned an error. rc %d\n", rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004253 return -EINVAL;
Yuval Mintz04c46732013-01-23 03:21:46 +00004254 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004255
4256 /* Set "pending" bit */
Yuval Mintz04c46732013-01-23 03:21:46 +00004257 DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004258 pending_bit = o->set_pending(o, params);
Yuval Mintz04c46732013-01-23 03:21:46 +00004259 DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004260
4261 /* Don't send a command if only driver cleanup was requested */
4262 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4263 o->complete_cmd(bp, o, pending_bit);
4264 else {
4265 /* Send a ramrod */
4266 rc = o->send_cmd(bp, params);
4267 if (rc) {
4268 o->next_state = BNX2X_Q_STATE_MAX;
4269 clear_bit(pending_bit, pending);
4270 smp_mb__after_clear_bit();
4271 return rc;
4272 }
4273
4274 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4275 rc = o->wait_comp(bp, o, pending_bit);
4276 if (rc)
4277 return rc;
4278
4279 return 0;
4280 }
4281 }
4282
4283 return !!test_bit(pending_bit, pending);
4284}
4285
4286
4287static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4288 struct bnx2x_queue_state_params *params)
4289{
4290 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4291
4292 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4293 * UPDATE command.
4294 */
4295 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4296 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4297 bit = BNX2X_Q_CMD_UPDATE;
4298 else
4299 bit = cmd;
4300
4301 set_bit(bit, &obj->pending);
4302 return bit;
4303}
4304
4305static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4306 struct bnx2x_queue_sp_obj *o,
4307 enum bnx2x_queue_cmd cmd)
4308{
4309 return bnx2x_state_wait(bp, cmd, &o->pending);
4310}
4311
4312/**
4313 * bnx2x_queue_comp_cmd - complete the state change command.
4314 *
4315 * @bp: device handle
4316 * @o:
4317 * @cmd:
4318 *
4319 * Checks that the arrived completion is expected.
4320 */
4321static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4322 struct bnx2x_queue_sp_obj *o,
4323 enum bnx2x_queue_cmd cmd)
4324{
4325 unsigned long cur_pending = o->pending;
4326
4327 if (!test_and_clear_bit(cmd, &cur_pending)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004328 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4329 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004330 o->state, cur_pending, o->next_state);
4331 return -EINVAL;
4332 }
4333
Ariel Elior6383c0b2011-07-14 08:31:57 +00004334 if (o->next_tx_only >= o->max_cos)
4335 /* >= becuase tx only must always be smaller than cos since the
Masanari Iida02582e92012-08-22 19:11:26 +09004336 * primary connection supports COS 0
Ariel Elior6383c0b2011-07-14 08:31:57 +00004337 */
4338 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4339 o->next_tx_only, o->max_cos);
4340
Merav Sicron51c1a582012-03-18 10:33:38 +00004341 DP(BNX2X_MSG_SP,
4342 "Completing command %d for queue %d, setting state to %d\n",
4343 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004344
4345 if (o->next_tx_only) /* print num tx-only if any exist */
Joe Perches94f05b02011-08-14 12:16:20 +00004346 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00004347 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004348
4349 o->state = o->next_state;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004350 o->num_tx_only = o->next_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004351 o->next_state = BNX2X_Q_STATE_MAX;
4352
4353 /* It's important that o->state and o->next_state are
4354 * updated before o->pending.
4355 */
4356 wmb();
4357
4358 clear_bit(cmd, &o->pending);
4359 smp_mb__after_clear_bit();
4360
4361 return 0;
4362}
4363
4364static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4365 struct bnx2x_queue_state_params *cmd_params,
4366 struct client_init_ramrod_data *data)
4367{
4368 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004369
4370 /* Rx data */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004371
4372 /* IPv6 TPA supported for E2 and above only */
Vladislav Zolotarovf5219d82011-07-19 01:44:11 +00004373 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004374 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4375}
4376
Ariel Elior6383c0b2011-07-14 08:31:57 +00004377static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4378 struct bnx2x_queue_sp_obj *o,
4379 struct bnx2x_general_setup_params *params,
4380 struct client_init_general_data *gen_data,
4381 unsigned long *flags)
4382{
4383 gen_data->client_id = o->cl_id;
4384
4385 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4386 gen_data->statistics_counter_id =
4387 params->stat_id;
4388 gen_data->statistics_en_flg = 1;
4389 gen_data->statistics_zero_flg =
4390 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4391 } else
4392 gen_data->statistics_counter_id =
4393 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4394
4395 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4396 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4397 gen_data->sp_client_id = params->spcl_id;
4398 gen_data->mtu = cpu_to_le16(params->mtu);
4399 gen_data->func_id = o->func_id;
4400
4401
4402 gen_data->cos = params->cos;
4403
4404 gen_data->traffic_type =
4405 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4406 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4407
Joe Perches94f05b02011-08-14 12:16:20 +00004408 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004409 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4410}
4411
4412static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4413 struct bnx2x_txq_setup_params *params,
4414 struct client_init_tx_data *tx_data,
4415 unsigned long *flags)
4416{
4417 tx_data->enforce_security_flg =
4418 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4419 tx_data->default_vlan =
4420 cpu_to_le16(params->default_vlan);
4421 tx_data->default_vlan_flg =
4422 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4423 tx_data->tx_switching_flg =
4424 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4425 tx_data->anti_spoofing_flg =
4426 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
Barak Witkowskia3348722012-04-23 03:04:46 +00004427 tx_data->force_default_pri_flg =
4428 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4429
Ariel Elior6383c0b2011-07-14 08:31:57 +00004430 tx_data->tx_status_block_id = params->fw_sb_id;
4431 tx_data->tx_sb_index_number = params->sb_cq_index;
4432 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4433
4434 tx_data->tx_bd_page_base.lo =
4435 cpu_to_le32(U64_LO(params->dscr_map));
4436 tx_data->tx_bd_page_base.hi =
4437 cpu_to_le32(U64_HI(params->dscr_map));
4438
4439 /* Don't configure any Tx switching mode during queue SETUP */
4440 tx_data->state = 0;
4441}
4442
4443static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4444 struct rxq_pause_params *params,
4445 struct client_init_rx_data *rx_data)
4446{
4447 /* flow control data */
4448 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4449 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4450 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4451 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4452 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4453 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4454 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4455}
4456
4457static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4458 struct bnx2x_rxq_setup_params *params,
4459 struct client_init_rx_data *rx_data,
4460 unsigned long *flags)
4461{
Ariel Elior6383c0b2011-07-14 08:31:57 +00004462 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4463 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004464 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4465 CLIENT_INIT_RX_DATA_TPA_MODE;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004466 rx_data->vmqueue_mode_en_flg = 0;
4467
4468 rx_data->cache_line_alignment_log_size =
4469 params->cache_line_log;
4470 rx_data->enable_dynamic_hc =
4471 test_bit(BNX2X_Q_FLG_DHC, flags);
4472 rx_data->max_sges_for_packet = params->max_sges_pkt;
4473 rx_data->client_qzone_id = params->cl_qzone_id;
4474 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4475
4476 /* Always start in DROP_ALL mode */
4477 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4478 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4479
4480 /* We don't set drop flags */
4481 rx_data->drop_ip_cs_err_flg = 0;
4482 rx_data->drop_tcp_cs_err_flg = 0;
4483 rx_data->drop_ttl0_flg = 0;
4484 rx_data->drop_udp_cs_err_flg = 0;
4485 rx_data->inner_vlan_removal_enable_flg =
4486 test_bit(BNX2X_Q_FLG_VLAN, flags);
4487 rx_data->outer_vlan_removal_enable_flg =
4488 test_bit(BNX2X_Q_FLG_OV, flags);
4489 rx_data->status_block_id = params->fw_sb_id;
4490 rx_data->rx_sb_index_number = params->sb_cq_index;
4491 rx_data->max_tpa_queues = params->max_tpa_queues;
4492 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4493 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4494 rx_data->bd_page_base.lo =
4495 cpu_to_le32(U64_LO(params->dscr_map));
4496 rx_data->bd_page_base.hi =
4497 cpu_to_le32(U64_HI(params->dscr_map));
4498 rx_data->sge_page_base.lo =
4499 cpu_to_le32(U64_LO(params->sge_map));
4500 rx_data->sge_page_base.hi =
4501 cpu_to_le32(U64_HI(params->sge_map));
4502 rx_data->cqe_page_base.lo =
4503 cpu_to_le32(U64_LO(params->rcq_map));
4504 rx_data->cqe_page_base.hi =
4505 cpu_to_le32(U64_HI(params->rcq_map));
4506 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4507
4508 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
Yuval Mintz259afa12012-03-12 08:53:10 +00004509 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004510 rx_data->is_approx_mcast = 1;
4511 }
4512
4513 rx_data->rss_engine_id = params->rss_engine_id;
4514
4515 /* silent vlan removal */
4516 rx_data->silent_vlan_removal_flg =
4517 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4518 rx_data->silent_vlan_value =
4519 cpu_to_le16(params->silent_removal_value);
4520 rx_data->silent_vlan_mask =
4521 cpu_to_le16(params->silent_removal_mask);
4522
4523}
4524
4525/* initialize the general, tx and rx parts of a queue object */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004526static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4527 struct bnx2x_queue_state_params *cmd_params,
4528 struct client_init_ramrod_data *data)
4529{
Ariel Elior6383c0b2011-07-14 08:31:57 +00004530 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4531 &cmd_params->params.setup.gen_params,
4532 &data->general,
4533 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004534
Ariel Elior6383c0b2011-07-14 08:31:57 +00004535 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4536 &cmd_params->params.setup.txq_params,
4537 &data->tx,
4538 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004539
Ariel Elior6383c0b2011-07-14 08:31:57 +00004540 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4541 &cmd_params->params.setup.rxq_params,
4542 &data->rx,
4543 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004544
Ariel Elior6383c0b2011-07-14 08:31:57 +00004545 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4546 &cmd_params->params.setup.pause_params,
4547 &data->rx);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004548}
4549
Ariel Elior6383c0b2011-07-14 08:31:57 +00004550/* initialize the general and tx parts of a tx-only queue object */
4551static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4552 struct bnx2x_queue_state_params *cmd_params,
4553 struct tx_queue_init_ramrod_data *data)
4554{
4555 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4556 &cmd_params->params.tx_only.gen_params,
4557 &data->general,
4558 &cmd_params->params.tx_only.flags);
4559
4560 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4561 &cmd_params->params.tx_only.txq_params,
4562 &data->tx,
4563 &cmd_params->params.tx_only.flags);
4564
Merav Sicron51c1a582012-03-18 10:33:38 +00004565 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4566 cmd_params->q_obj->cids[0],
4567 data->tx.tx_bd_page_base.lo,
4568 data->tx.tx_bd_page_base.hi);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004569}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004570
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004571/**
4572 * bnx2x_q_init - init HW/FW queue
4573 *
4574 * @bp: device handle
4575 * @params:
4576 *
4577 * HW/FW initial Queue configuration:
4578 * - HC: Rx and Tx
4579 * - CDU context validation
4580 *
4581 */
4582static inline int bnx2x_q_init(struct bnx2x *bp,
4583 struct bnx2x_queue_state_params *params)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004584{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004585 struct bnx2x_queue_sp_obj *o = params->q_obj;
4586 struct bnx2x_queue_init_params *init = &params->params.init;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004587 u16 hc_usec;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004588 u8 cos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004589
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004590 /* Tx HC configuration */
4591 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4592 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4593 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4594
4595 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4596 init->tx.sb_cq_index,
4597 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004598 hc_usec);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004599 }
4600
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004601 /* Rx HC configuration */
4602 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4603 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4604 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004605
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004606 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4607 init->rx.sb_cq_index,
4608 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4609 hc_usec);
4610 }
4611
4612 /* Set CDU context validation values */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004613 for (cos = 0; cos < o->max_cos; cos++) {
Joe Perches94f05b02011-08-14 12:16:20 +00004614 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004615 o->cids[cos], cos);
Joe Perches94f05b02011-08-14 12:16:20 +00004616 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004617 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4618 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004619
4620 /* As no ramrod is sent, complete the command immediately */
4621 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4622
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004623 mmiowb();
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004624 smp_mb();
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004625
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004626 return 0;
4627}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004628
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004629static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4630 struct bnx2x_queue_state_params *params)
4631{
4632 struct bnx2x_queue_sp_obj *o = params->q_obj;
4633 struct client_init_ramrod_data *rdata =
4634 (struct client_init_ramrod_data *)o->rdata;
4635 dma_addr_t data_mapping = o->rdata_mapping;
4636 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004637
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004638 /* Clear the ramrod data */
4639 memset(rdata, 0, sizeof(*rdata));
4640
4641 /* Fill the ramrod data */
4642 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4643
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004644 /*
4645 * No need for an explicit memory barrier here as long we would
4646 * need to ensure the ordering of writing to the SPQ element
4647 * and updating of the SPQ producer which involves a memory
4648 * read and we will have to put a full memory barrier there
4649 * (inside bnx2x_sp_post()).
4650 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004651
Ariel Elior6383c0b2011-07-14 08:31:57 +00004652 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4653 U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004654 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4655}
4656
4657static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4658 struct bnx2x_queue_state_params *params)
4659{
4660 struct bnx2x_queue_sp_obj *o = params->q_obj;
4661 struct client_init_ramrod_data *rdata =
4662 (struct client_init_ramrod_data *)o->rdata;
4663 dma_addr_t data_mapping = o->rdata_mapping;
4664 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4665
4666 /* Clear the ramrod data */
4667 memset(rdata, 0, sizeof(*rdata));
4668
4669 /* Fill the ramrod data */
4670 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4671 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4672
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004673 /*
4674 * No need for an explicit memory barrier here as long we would
4675 * need to ensure the ordering of writing to the SPQ element
4676 * and updating of the SPQ producer which involves a memory
4677 * read and we will have to put a full memory barrier there
4678 * (inside bnx2x_sp_post()).
4679 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004680
Ariel Elior6383c0b2011-07-14 08:31:57 +00004681 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4682 U64_HI(data_mapping),
4683 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4684}
4685
4686static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4687 struct bnx2x_queue_state_params *params)
4688{
4689 struct bnx2x_queue_sp_obj *o = params->q_obj;
4690 struct tx_queue_init_ramrod_data *rdata =
4691 (struct tx_queue_init_ramrod_data *)o->rdata;
4692 dma_addr_t data_mapping = o->rdata_mapping;
4693 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4694 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4695 &params->params.tx_only;
4696 u8 cid_index = tx_only_params->cid_index;
4697
4698
4699 if (cid_index >= o->max_cos) {
4700 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4701 o->cl_id, cid_index);
4702 return -EINVAL;
4703 }
4704
Joe Perches94f05b02011-08-14 12:16:20 +00004705 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004706 tx_only_params->gen_params.cos,
4707 tx_only_params->gen_params.spcl_id);
4708
4709 /* Clear the ramrod data */
4710 memset(rdata, 0, sizeof(*rdata));
4711
4712 /* Fill the ramrod data */
4713 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4714
Merav Sicron51c1a582012-03-18 10:33:38 +00004715 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4716 o->cids[cid_index], rdata->general.client_id,
Ariel Elior6383c0b2011-07-14 08:31:57 +00004717 rdata->general.sp_client_id, rdata->general.cos);
4718
4719 /*
4720 * No need for an explicit memory barrier here as long we would
4721 * need to ensure the ordering of writing to the SPQ element
4722 * and updating of the SPQ producer which involves a memory
4723 * read and we will have to put a full memory barrier there
4724 * (inside bnx2x_sp_post()).
4725 */
4726
4727 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4728 U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004729 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4730}
4731
4732static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4733 struct bnx2x_queue_sp_obj *obj,
4734 struct bnx2x_queue_update_params *params,
4735 struct client_update_ramrod_data *data)
4736{
4737 /* Client ID of the client to update */
4738 data->client_id = obj->cl_id;
4739
4740 /* Function ID of the client to update */
4741 data->func_id = obj->func_id;
4742
4743 /* Default VLAN value */
4744 data->default_vlan = cpu_to_le16(params->def_vlan);
4745
4746 /* Inner VLAN stripping */
4747 data->inner_vlan_removal_enable_flg =
4748 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4749 data->inner_vlan_removal_change_flg =
4750 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4751 &params->update_flags);
4752
4753 /* Outer VLAN sripping */
4754 data->outer_vlan_removal_enable_flg =
4755 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4756 data->outer_vlan_removal_change_flg =
4757 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4758 &params->update_flags);
4759
4760 /* Drop packets that have source MAC that doesn't belong to this
4761 * Queue.
4762 */
4763 data->anti_spoofing_enable_flg =
4764 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4765 data->anti_spoofing_change_flg =
4766 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4767
4768 /* Activate/Deactivate */
4769 data->activate_flg =
4770 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4771 data->activate_change_flg =
4772 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4773
4774 /* Enable default VLAN */
4775 data->default_vlan_enable_flg =
4776 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4777 data->default_vlan_change_flg =
4778 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4779 &params->update_flags);
4780
4781 /* silent vlan removal */
4782 data->silent_vlan_change_flg =
4783 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4784 &params->update_flags);
4785 data->silent_vlan_removal_flg =
4786 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4787 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4788 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4789}
4790
4791static inline int bnx2x_q_send_update(struct bnx2x *bp,
4792 struct bnx2x_queue_state_params *params)
4793{
4794 struct bnx2x_queue_sp_obj *o = params->q_obj;
4795 struct client_update_ramrod_data *rdata =
4796 (struct client_update_ramrod_data *)o->rdata;
4797 dma_addr_t data_mapping = o->rdata_mapping;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004798 struct bnx2x_queue_update_params *update_params =
4799 &params->params.update;
4800 u8 cid_index = update_params->cid_index;
4801
4802 if (cid_index >= o->max_cos) {
4803 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4804 o->cl_id, cid_index);
4805 return -EINVAL;
4806 }
4807
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004808
4809 /* Clear the ramrod data */
4810 memset(rdata, 0, sizeof(*rdata));
4811
4812 /* Fill the ramrod data */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004813 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004814
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004815 /*
4816 * No need for an explicit memory barrier here as long we would
4817 * need to ensure the ordering of writing to the SPQ element
4818 * and updating of the SPQ producer which involves a memory
4819 * read and we will have to put a full memory barrier there
4820 * (inside bnx2x_sp_post()).
4821 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004822
Ariel Elior6383c0b2011-07-14 08:31:57 +00004823 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4824 o->cids[cid_index], U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004825 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4826}
4827
4828/**
4829 * bnx2x_q_send_deactivate - send DEACTIVATE command
4830 *
4831 * @bp: device handle
4832 * @params:
4833 *
4834 * implemented using the UPDATE command.
4835 */
4836static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4837 struct bnx2x_queue_state_params *params)
4838{
4839 struct bnx2x_queue_update_params *update = &params->params.update;
4840
4841 memset(update, 0, sizeof(*update));
4842
4843 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4844
4845 return bnx2x_q_send_update(bp, params);
4846}
4847
4848/**
4849 * bnx2x_q_send_activate - send ACTIVATE command
4850 *
4851 * @bp: device handle
4852 * @params:
4853 *
4854 * implemented using the UPDATE command.
4855 */
4856static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4857 struct bnx2x_queue_state_params *params)
4858{
4859 struct bnx2x_queue_update_params *update = &params->params.update;
4860
4861 memset(update, 0, sizeof(*update));
4862
4863 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4864 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4865
4866 return bnx2x_q_send_update(bp, params);
4867}
4868
4869static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4870 struct bnx2x_queue_state_params *params)
4871{
4872 /* TODO: Not implemented yet. */
4873 return -1;
4874}
4875
4876static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4877 struct bnx2x_queue_state_params *params)
4878{
4879 struct bnx2x_queue_sp_obj *o = params->q_obj;
4880
Ariel Elior6383c0b2011-07-14 08:31:57 +00004881 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4882 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004883 ETH_CONNECTION_TYPE);
4884}
4885
4886static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4887 struct bnx2x_queue_state_params *params)
4888{
4889 struct bnx2x_queue_sp_obj *o = params->q_obj;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004890 u8 cid_idx = params->params.cfc_del.cid_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004891
Ariel Elior6383c0b2011-07-14 08:31:57 +00004892 if (cid_idx >= o->max_cos) {
4893 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4894 o->cl_id, cid_idx);
4895 return -EINVAL;
4896 }
4897
4898 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4899 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004900}
4901
4902static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4903 struct bnx2x_queue_state_params *params)
4904{
4905 struct bnx2x_queue_sp_obj *o = params->q_obj;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004906 u8 cid_index = params->params.terminate.cid_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004907
Ariel Elior6383c0b2011-07-14 08:31:57 +00004908 if (cid_index >= o->max_cos) {
4909 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4910 o->cl_id, cid_index);
4911 return -EINVAL;
4912 }
4913
4914 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4915 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004916}
4917
4918static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4919 struct bnx2x_queue_state_params *params)
4920{
4921 struct bnx2x_queue_sp_obj *o = params->q_obj;
4922
Ariel Elior6383c0b2011-07-14 08:31:57 +00004923 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4924 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004925 ETH_CONNECTION_TYPE);
4926}
4927
4928static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4929 struct bnx2x_queue_state_params *params)
4930{
4931 switch (params->cmd) {
4932 case BNX2X_Q_CMD_INIT:
4933 return bnx2x_q_init(bp, params);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004934 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4935 return bnx2x_q_send_setup_tx_only(bp, params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004936 case BNX2X_Q_CMD_DEACTIVATE:
4937 return bnx2x_q_send_deactivate(bp, params);
4938 case BNX2X_Q_CMD_ACTIVATE:
4939 return bnx2x_q_send_activate(bp, params);
4940 case BNX2X_Q_CMD_UPDATE:
4941 return bnx2x_q_send_update(bp, params);
4942 case BNX2X_Q_CMD_UPDATE_TPA:
4943 return bnx2x_q_send_update_tpa(bp, params);
4944 case BNX2X_Q_CMD_HALT:
4945 return bnx2x_q_send_halt(bp, params);
4946 case BNX2X_Q_CMD_CFC_DEL:
4947 return bnx2x_q_send_cfc_del(bp, params);
4948 case BNX2X_Q_CMD_TERMINATE:
4949 return bnx2x_q_send_terminate(bp, params);
4950 case BNX2X_Q_CMD_EMPTY:
4951 return bnx2x_q_send_empty(bp, params);
4952 default:
4953 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4954 return -EINVAL;
4955 }
4956}
4957
4958static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4959 struct bnx2x_queue_state_params *params)
4960{
4961 switch (params->cmd) {
4962 case BNX2X_Q_CMD_SETUP:
4963 return bnx2x_q_send_setup_e1x(bp, params);
4964 case BNX2X_Q_CMD_INIT:
Ariel Elior6383c0b2011-07-14 08:31:57 +00004965 case BNX2X_Q_CMD_SETUP_TX_ONLY:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004966 case BNX2X_Q_CMD_DEACTIVATE:
4967 case BNX2X_Q_CMD_ACTIVATE:
4968 case BNX2X_Q_CMD_UPDATE:
4969 case BNX2X_Q_CMD_UPDATE_TPA:
4970 case BNX2X_Q_CMD_HALT:
4971 case BNX2X_Q_CMD_CFC_DEL:
4972 case BNX2X_Q_CMD_TERMINATE:
4973 case BNX2X_Q_CMD_EMPTY:
4974 return bnx2x_queue_send_cmd_cmn(bp, params);
4975 default:
4976 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4977 return -EINVAL;
4978 }
4979}
4980
4981static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4982 struct bnx2x_queue_state_params *params)
4983{
4984 switch (params->cmd) {
4985 case BNX2X_Q_CMD_SETUP:
4986 return bnx2x_q_send_setup_e2(bp, params);
4987 case BNX2X_Q_CMD_INIT:
Ariel Elior6383c0b2011-07-14 08:31:57 +00004988 case BNX2X_Q_CMD_SETUP_TX_ONLY:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004989 case BNX2X_Q_CMD_DEACTIVATE:
4990 case BNX2X_Q_CMD_ACTIVATE:
4991 case BNX2X_Q_CMD_UPDATE:
4992 case BNX2X_Q_CMD_UPDATE_TPA:
4993 case BNX2X_Q_CMD_HALT:
4994 case BNX2X_Q_CMD_CFC_DEL:
4995 case BNX2X_Q_CMD_TERMINATE:
4996 case BNX2X_Q_CMD_EMPTY:
4997 return bnx2x_queue_send_cmd_cmn(bp, params);
4998 default:
4999 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5000 return -EINVAL;
5001 }
5002}
5003
5004/**
5005 * bnx2x_queue_chk_transition - check state machine of a regular Queue
5006 *
5007 * @bp: device handle
5008 * @o:
5009 * @params:
5010 *
5011 * (not Forwarding)
5012 * It both checks if the requested command is legal in a current
5013 * state and, if it's legal, sets a `next_state' in the object
5014 * that will be used in the completion flow to set the `state'
5015 * of the object.
5016 *
5017 * returns 0 if a requested command is a legal transition,
5018 * -EINVAL otherwise.
5019 */
5020static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5021 struct bnx2x_queue_sp_obj *o,
5022 struct bnx2x_queue_state_params *params)
5023{
5024 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5025 enum bnx2x_queue_cmd cmd = params->cmd;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005026 struct bnx2x_queue_update_params *update_params =
5027 &params->params.update;
5028 u8 next_tx_only = o->num_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005029
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005030 /*
5031 * Forget all pending for completion commands if a driver only state
5032 * transition has been requested.
5033 */
5034 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5035 o->pending = 0;
5036 o->next_state = BNX2X_Q_STATE_MAX;
5037 }
5038
5039 /*
5040 * Don't allow a next state transition if we are in the middle of
5041 * the previous one.
5042 */
Yuval Mintz04c46732013-01-23 03:21:46 +00005043 if (o->pending) {
5044 BNX2X_ERR("Blocking transition since pending was %lx\n",
5045 o->pending);
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005046 return -EBUSY;
Yuval Mintz04c46732013-01-23 03:21:46 +00005047 }
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005048
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005049 switch (state) {
5050 case BNX2X_Q_STATE_RESET:
5051 if (cmd == BNX2X_Q_CMD_INIT)
5052 next_state = BNX2X_Q_STATE_INITIALIZED;
5053
5054 break;
5055 case BNX2X_Q_STATE_INITIALIZED:
5056 if (cmd == BNX2X_Q_CMD_SETUP) {
5057 if (test_bit(BNX2X_Q_FLG_ACTIVE,
5058 &params->params.setup.flags))
5059 next_state = BNX2X_Q_STATE_ACTIVE;
5060 else
5061 next_state = BNX2X_Q_STATE_INACTIVE;
5062 }
5063
5064 break;
5065 case BNX2X_Q_STATE_ACTIVE:
5066 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5067 next_state = BNX2X_Q_STATE_INACTIVE;
5068
5069 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5070 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5071 next_state = BNX2X_Q_STATE_ACTIVE;
5072
Ariel Elior6383c0b2011-07-14 08:31:57 +00005073 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5074 next_state = BNX2X_Q_STATE_MULTI_COS;
5075 next_tx_only = 1;
5076 }
5077
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005078 else if (cmd == BNX2X_Q_CMD_HALT)
5079 next_state = BNX2X_Q_STATE_STOPPED;
5080
5081 else if (cmd == BNX2X_Q_CMD_UPDATE) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005082 /* If "active" state change is requested, update the
5083 * state accordingly.
5084 */
5085 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5086 &update_params->update_flags) &&
5087 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5088 &update_params->update_flags))
5089 next_state = BNX2X_Q_STATE_INACTIVE;
5090 else
5091 next_state = BNX2X_Q_STATE_ACTIVE;
5092 }
5093
5094 break;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005095 case BNX2X_Q_STATE_MULTI_COS:
5096 if (cmd == BNX2X_Q_CMD_TERMINATE)
5097 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5098
5099 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5100 next_state = BNX2X_Q_STATE_MULTI_COS;
5101 next_tx_only = o->num_tx_only + 1;
5102 }
5103
5104 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5105 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5106 next_state = BNX2X_Q_STATE_MULTI_COS;
5107
5108 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5109 /* If "active" state change is requested, update the
5110 * state accordingly.
5111 */
5112 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5113 &update_params->update_flags) &&
5114 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5115 &update_params->update_flags))
5116 next_state = BNX2X_Q_STATE_INACTIVE;
5117 else
5118 next_state = BNX2X_Q_STATE_MULTI_COS;
5119 }
5120
5121 break;
5122 case BNX2X_Q_STATE_MCOS_TERMINATED:
5123 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5124 next_tx_only = o->num_tx_only - 1;
5125 if (next_tx_only == 0)
5126 next_state = BNX2X_Q_STATE_ACTIVE;
5127 else
5128 next_state = BNX2X_Q_STATE_MULTI_COS;
5129 }
5130
5131 break;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005132 case BNX2X_Q_STATE_INACTIVE:
5133 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5134 next_state = BNX2X_Q_STATE_ACTIVE;
5135
5136 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5137 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5138 next_state = BNX2X_Q_STATE_INACTIVE;
5139
5140 else if (cmd == BNX2X_Q_CMD_HALT)
5141 next_state = BNX2X_Q_STATE_STOPPED;
5142
5143 else if (cmd == BNX2X_Q_CMD_UPDATE) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005144 /* If "active" state change is requested, update the
5145 * state accordingly.
5146 */
5147 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5148 &update_params->update_flags) &&
5149 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
Ariel Elior6383c0b2011-07-14 08:31:57 +00005150 &update_params->update_flags)){
5151 if (o->num_tx_only == 0)
5152 next_state = BNX2X_Q_STATE_ACTIVE;
5153 else /* tx only queues exist for this queue */
5154 next_state = BNX2X_Q_STATE_MULTI_COS;
5155 } else
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005156 next_state = BNX2X_Q_STATE_INACTIVE;
5157 }
5158
5159 break;
5160 case BNX2X_Q_STATE_STOPPED:
5161 if (cmd == BNX2X_Q_CMD_TERMINATE)
5162 next_state = BNX2X_Q_STATE_TERMINATED;
5163
5164 break;
5165 case BNX2X_Q_STATE_TERMINATED:
5166 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5167 next_state = BNX2X_Q_STATE_RESET;
5168
5169 break;
5170 default:
5171 BNX2X_ERR("Illegal state: %d\n", state);
5172 }
5173
5174 /* Transition is assured */
5175 if (next_state != BNX2X_Q_STATE_MAX) {
5176 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5177 state, cmd, next_state);
5178 o->next_state = next_state;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005179 o->next_tx_only = next_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005180 return 0;
5181 }
5182
5183 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5184
5185 return -EINVAL;
5186}
5187
5188void bnx2x_init_queue_obj(struct bnx2x *bp,
5189 struct bnx2x_queue_sp_obj *obj,
Ariel Elior6383c0b2011-07-14 08:31:57 +00005190 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5191 void *rdata,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005192 dma_addr_t rdata_mapping, unsigned long type)
5193{
5194 memset(obj, 0, sizeof(*obj));
5195
Ariel Elior6383c0b2011-07-14 08:31:57 +00005196 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5197 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5198
5199 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5200 obj->max_cos = cid_cnt;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005201 obj->cl_id = cl_id;
5202 obj->func_id = func_id;
5203 obj->rdata = rdata;
5204 obj->rdata_mapping = rdata_mapping;
5205 obj->type = type;
5206 obj->next_state = BNX2X_Q_STATE_MAX;
5207
5208 if (CHIP_IS_E1x(bp))
5209 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5210 else
5211 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5212
5213 obj->check_transition = bnx2x_queue_chk_transition;
5214
5215 obj->complete_cmd = bnx2x_queue_comp_cmd;
5216 obj->wait_comp = bnx2x_queue_wait_comp;
5217 obj->set_pending = bnx2x_queue_set_pending;
5218}
5219
Ariel Elior67c431a2013-01-01 05:22:36 +00005220/* return a queue object's logical state*/
5221int bnx2x_get_q_logical_state(struct bnx2x *bp,
5222 struct bnx2x_queue_sp_obj *obj)
5223{
5224 switch (obj->state) {
5225 case BNX2X_Q_STATE_ACTIVE:
5226 case BNX2X_Q_STATE_MULTI_COS:
5227 return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5228 case BNX2X_Q_STATE_RESET:
5229 case BNX2X_Q_STATE_INITIALIZED:
5230 case BNX2X_Q_STATE_MCOS_TERMINATED:
5231 case BNX2X_Q_STATE_INACTIVE:
5232 case BNX2X_Q_STATE_STOPPED:
5233 case BNX2X_Q_STATE_TERMINATED:
5234 case BNX2X_Q_STATE_FLRED:
5235 return BNX2X_Q_LOGICAL_STATE_STOPPED;
5236 default:
5237 return -EINVAL;
5238 }
5239}
5240
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005241/********************** Function state object *********************************/
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005242enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5243 struct bnx2x_func_sp_obj *o)
5244{
5245 /* in the middle of transaction - return INVALID state */
5246 if (o->pending)
5247 return BNX2X_F_STATE_MAX;
5248
5249 /*
5250 * unsure the order of reading of o->pending and o->state
5251 * o->pending should be read first
5252 */
5253 rmb();
5254
5255 return o->state;
5256}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005257
5258static int bnx2x_func_wait_comp(struct bnx2x *bp,
5259 struct bnx2x_func_sp_obj *o,
5260 enum bnx2x_func_cmd cmd)
5261{
5262 return bnx2x_state_wait(bp, cmd, &o->pending);
5263}
5264
5265/**
5266 * bnx2x_func_state_change_comp - complete the state machine transition
5267 *
5268 * @bp: device handle
5269 * @o:
5270 * @cmd:
5271 *
5272 * Called on state change transition. Completes the state
5273 * machine transition only - no HW interaction.
5274 */
5275static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5276 struct bnx2x_func_sp_obj *o,
5277 enum bnx2x_func_cmd cmd)
5278{
5279 unsigned long cur_pending = o->pending;
5280
5281 if (!test_and_clear_bit(cmd, &cur_pending)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00005282 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5283 cmd, BP_FUNC(bp), o->state,
5284 cur_pending, o->next_state);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005285 return -EINVAL;
5286 }
5287
Joe Perches94f05b02011-08-14 12:16:20 +00005288 DP(BNX2X_MSG_SP,
5289 "Completing command %d for func %d, setting state to %d\n",
5290 cmd, BP_FUNC(bp), o->next_state);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005291
5292 o->state = o->next_state;
5293 o->next_state = BNX2X_F_STATE_MAX;
5294
5295 /* It's important that o->state and o->next_state are
5296 * updated before o->pending.
5297 */
5298 wmb();
5299
5300 clear_bit(cmd, &o->pending);
5301 smp_mb__after_clear_bit();
5302
5303 return 0;
5304}
5305
5306/**
5307 * bnx2x_func_comp_cmd - complete the state change command
5308 *
5309 * @bp: device handle
5310 * @o:
5311 * @cmd:
5312 *
5313 * Checks that the arrived completion is expected.
5314 */
5315static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5316 struct bnx2x_func_sp_obj *o,
5317 enum bnx2x_func_cmd cmd)
5318{
5319 /* Complete the state machine part first, check if it's a
5320 * legal completion.
5321 */
5322 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005323 return rc;
5324}
5325
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005326/**
5327 * bnx2x_func_chk_transition - perform function state machine transition
5328 *
5329 * @bp: device handle
5330 * @o:
5331 * @params:
5332 *
5333 * It both checks if the requested command is legal in a current
5334 * state and, if it's legal, sets a `next_state' in the object
5335 * that will be used in the completion flow to set the `state'
5336 * of the object.
5337 *
5338 * returns 0 if a requested command is a legal transition,
5339 * -EINVAL otherwise.
5340 */
5341static int bnx2x_func_chk_transition(struct bnx2x *bp,
5342 struct bnx2x_func_sp_obj *o,
5343 struct bnx2x_func_state_params *params)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005344{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005345 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5346 enum bnx2x_func_cmd cmd = params->cmd;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005347
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005348 /*
5349 * Forget all pending for completion commands if a driver only state
5350 * transition has been requested.
5351 */
5352 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5353 o->pending = 0;
5354 o->next_state = BNX2X_F_STATE_MAX;
5355 }
5356
5357 /*
5358 * Don't allow a next state transition if we are in the middle of
5359 * the previous one.
5360 */
5361 if (o->pending)
5362 return -EBUSY;
5363
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005364 switch (state) {
5365 case BNX2X_F_STATE_RESET:
5366 if (cmd == BNX2X_F_CMD_HW_INIT)
5367 next_state = BNX2X_F_STATE_INITIALIZED;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005368
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005369 break;
5370 case BNX2X_F_STATE_INITIALIZED:
5371 if (cmd == BNX2X_F_CMD_START)
5372 next_state = BNX2X_F_STATE_STARTED;
5373
5374 else if (cmd == BNX2X_F_CMD_HW_RESET)
5375 next_state = BNX2X_F_STATE_RESET;
5376
5377 break;
5378 case BNX2X_F_STATE_STARTED:
5379 if (cmd == BNX2X_F_CMD_STOP)
5380 next_state = BNX2X_F_STATE_INITIALIZED;
Barak Witkowskia3348722012-04-23 03:04:46 +00005381 /* afex ramrods can be sent only in started mode, and only
5382 * if not pending for function_stop ramrod completion
5383 * for these events - next state remained STARTED.
5384 */
5385 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5386 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5387 next_state = BNX2X_F_STATE_STARTED;
5388
5389 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5390 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5391 next_state = BNX2X_F_STATE_STARTED;
Merav Sicron55c11942012-11-07 00:45:48 +00005392
5393 /* Switch_update ramrod can be sent in either started or
5394 * tx_stopped state, and it doesn't change the state.
5395 */
5396 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5397 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5398 next_state = BNX2X_F_STATE_STARTED;
5399
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005400 else if (cmd == BNX2X_F_CMD_TX_STOP)
5401 next_state = BNX2X_F_STATE_TX_STOPPED;
5402
5403 break;
5404 case BNX2X_F_STATE_TX_STOPPED:
Merav Sicron55c11942012-11-07 00:45:48 +00005405 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5406 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5407 next_state = BNX2X_F_STATE_TX_STOPPED;
5408
5409 else if (cmd == BNX2X_F_CMD_TX_START)
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005410 next_state = BNX2X_F_STATE_STARTED;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005411
5412 break;
5413 default:
5414 BNX2X_ERR("Unknown state: %d\n", state);
5415 }
5416
5417 /* Transition is assured */
5418 if (next_state != BNX2X_F_STATE_MAX) {
5419 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5420 state, cmd, next_state);
5421 o->next_state = next_state;
5422 return 0;
5423 }
5424
5425 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5426 state, cmd);
5427
5428 return -EINVAL;
5429}
5430
5431/**
5432 * bnx2x_func_init_func - performs HW init at function stage
5433 *
5434 * @bp: device handle
5435 * @drv:
5436 *
5437 * Init HW when the current phase is
5438 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5439 * HW blocks.
5440 */
5441static inline int bnx2x_func_init_func(struct bnx2x *bp,
5442 const struct bnx2x_func_sp_drv_ops *drv)
5443{
5444 return drv->init_hw_func(bp);
5445}
5446
5447/**
5448 * bnx2x_func_init_port - performs HW init at port stage
5449 *
5450 * @bp: device handle
5451 * @drv:
5452 *
5453 * Init HW when the current phase is
5454 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5455 * FUNCTION-only HW blocks.
5456 *
5457 */
5458static inline int bnx2x_func_init_port(struct bnx2x *bp,
5459 const struct bnx2x_func_sp_drv_ops *drv)
5460{
5461 int rc = drv->init_hw_port(bp);
5462 if (rc)
5463 return rc;
5464
5465 return bnx2x_func_init_func(bp, drv);
5466}
5467
5468/**
5469 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5470 *
5471 * @bp: device handle
5472 * @drv:
5473 *
5474 * Init HW when the current phase is
5475 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5476 * PORT-only and FUNCTION-only HW blocks.
5477 */
5478static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5479 const struct bnx2x_func_sp_drv_ops *drv)
5480{
5481 int rc = drv->init_hw_cmn_chip(bp);
5482 if (rc)
5483 return rc;
5484
5485 return bnx2x_func_init_port(bp, drv);
5486}
5487
5488/**
5489 * bnx2x_func_init_cmn - performs HW init at common stage
5490 *
5491 * @bp: device handle
5492 * @drv:
5493 *
5494 * Init HW when the current phase is
5495 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5496 * PORT-only and FUNCTION-only HW blocks.
5497 */
5498static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5499 const struct bnx2x_func_sp_drv_ops *drv)
5500{
5501 int rc = drv->init_hw_cmn(bp);
5502 if (rc)
5503 return rc;
5504
5505 return bnx2x_func_init_port(bp, drv);
5506}
5507
5508static int bnx2x_func_hw_init(struct bnx2x *bp,
5509 struct bnx2x_func_state_params *params)
5510{
5511 u32 load_code = params->params.hw_init.load_phase;
5512 struct bnx2x_func_sp_obj *o = params->f_obj;
5513 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5514 int rc = 0;
5515
5516 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5517 BP_ABS_FUNC(bp), load_code);
5518
5519 /* Prepare buffers for unzipping the FW */
5520 rc = drv->gunzip_init(bp);
5521 if (rc)
5522 return rc;
5523
5524 /* Prepare FW */
5525 rc = drv->init_fw(bp);
5526 if (rc) {
5527 BNX2X_ERR("Error loading firmware\n");
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005528 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005529 }
5530
5531 /* Handle the beginning of COMMON_XXX pases separatelly... */
5532 switch (load_code) {
5533 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5534 rc = bnx2x_func_init_cmn_chip(bp, drv);
5535 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005536 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005537
5538 break;
5539 case FW_MSG_CODE_DRV_LOAD_COMMON:
5540 rc = bnx2x_func_init_cmn(bp, drv);
5541 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005542 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005543
5544 break;
5545 case FW_MSG_CODE_DRV_LOAD_PORT:
5546 rc = bnx2x_func_init_port(bp, drv);
5547 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005548 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005549
5550 break;
5551 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5552 rc = bnx2x_func_init_func(bp, drv);
5553 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005554 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005555
5556 break;
5557 default:
5558 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5559 rc = -EINVAL;
5560 }
5561
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005562init_err:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005563 drv->gunzip_end(bp);
5564
5565 /* In case of success, complete the comand immediatelly: no ramrods
5566 * have been sent.
5567 */
5568 if (!rc)
5569 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5570
5571 return rc;
5572}
5573
5574/**
5575 * bnx2x_func_reset_func - reset HW at function stage
5576 *
5577 * @bp: device handle
5578 * @drv:
5579 *
5580 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5581 * FUNCTION-only HW blocks.
5582 */
5583static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5584 const struct bnx2x_func_sp_drv_ops *drv)
5585{
5586 drv->reset_hw_func(bp);
5587}
5588
5589/**
5590 * bnx2x_func_reset_port - reser HW at port stage
5591 *
5592 * @bp: device handle
5593 * @drv:
5594 *
5595 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5596 * FUNCTION-only and PORT-only HW blocks.
5597 *
5598 * !!!IMPORTANT!!!
5599 *
5600 * It's important to call reset_port before reset_func() as the last thing
5601 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5602 * makes impossible any DMAE transactions.
5603 */
5604static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5605 const struct bnx2x_func_sp_drv_ops *drv)
5606{
5607 drv->reset_hw_port(bp);
5608 bnx2x_func_reset_func(bp, drv);
5609}
5610
5611/**
5612 * bnx2x_func_reset_cmn - reser HW at common stage
5613 *
5614 * @bp: device handle
5615 * @drv:
5616 *
5617 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5618 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5619 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5620 */
5621static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5622 const struct bnx2x_func_sp_drv_ops *drv)
5623{
5624 bnx2x_func_reset_port(bp, drv);
5625 drv->reset_hw_cmn(bp);
5626}
5627
5628
5629static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5630 struct bnx2x_func_state_params *params)
5631{
5632 u32 reset_phase = params->params.hw_reset.reset_phase;
5633 struct bnx2x_func_sp_obj *o = params->f_obj;
5634 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5635
5636 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5637 reset_phase);
5638
5639 switch (reset_phase) {
5640 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5641 bnx2x_func_reset_cmn(bp, drv);
5642 break;
5643 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5644 bnx2x_func_reset_port(bp, drv);
5645 break;
5646 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5647 bnx2x_func_reset_func(bp, drv);
5648 break;
5649 default:
5650 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5651 reset_phase);
5652 break;
5653 }
5654
5655 /* Complete the comand immediatelly: no ramrods have been sent. */
5656 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5657
5658 return 0;
5659}
5660
5661static inline int bnx2x_func_send_start(struct bnx2x *bp,
5662 struct bnx2x_func_state_params *params)
5663{
5664 struct bnx2x_func_sp_obj *o = params->f_obj;
5665 struct function_start_data *rdata =
5666 (struct function_start_data *)o->rdata;
5667 dma_addr_t data_mapping = o->rdata_mapping;
5668 struct bnx2x_func_start_params *start_params = &params->params.start;
5669
5670 memset(rdata, 0, sizeof(*rdata));
5671
5672 /* Fill the ramrod data with provided parameters */
Yuval Mintz2de67432013-01-23 03:21:43 +00005673 rdata->function_mode = (u8)start_params->mf_mode;
5674 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5675 rdata->path_id = BP_PATH(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005676 rdata->network_cos_mode = start_params->network_cos_mode;
5677
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00005678 /*
5679 * No need for an explicit memory barrier here as long we would
5680 * need to ensure the ordering of writing to the SPQ element
5681 * and updating of the SPQ producer which involves a memory
5682 * read and we will have to put a full memory barrier there
5683 * (inside bnx2x_sp_post()).
5684 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005685
5686 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5687 U64_HI(data_mapping),
5688 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5689}
5690
Merav Sicron55c11942012-11-07 00:45:48 +00005691static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5692 struct bnx2x_func_state_params *params)
5693{
5694 struct bnx2x_func_sp_obj *o = params->f_obj;
5695 struct function_update_data *rdata =
5696 (struct function_update_data *)o->rdata;
5697 dma_addr_t data_mapping = o->rdata_mapping;
5698 struct bnx2x_func_switch_update_params *switch_update_params =
5699 &params->params.switch_update;
5700
5701 memset(rdata, 0, sizeof(*rdata));
5702
5703 /* Fill the ramrod data with provided parameters */
5704 rdata->tx_switch_suspend_change_flg = 1;
5705 rdata->tx_switch_suspend = switch_update_params->suspend;
5706 rdata->echo = SWITCH_UPDATE;
5707
5708 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5709 U64_HI(data_mapping),
5710 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5711}
5712
Barak Witkowskia3348722012-04-23 03:04:46 +00005713static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5714 struct bnx2x_func_state_params *params)
5715{
5716 struct bnx2x_func_sp_obj *o = params->f_obj;
5717 struct function_update_data *rdata =
5718 (struct function_update_data *)o->afex_rdata;
5719 dma_addr_t data_mapping = o->afex_rdata_mapping;
5720 struct bnx2x_func_afex_update_params *afex_update_params =
5721 &params->params.afex_update;
5722
5723 memset(rdata, 0, sizeof(*rdata));
5724
5725 /* Fill the ramrod data with provided parameters */
5726 rdata->vif_id_change_flg = 1;
5727 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5728 rdata->afex_default_vlan_change_flg = 1;
5729 rdata->afex_default_vlan =
5730 cpu_to_le16(afex_update_params->afex_default_vlan);
5731 rdata->allowed_priorities_change_flg = 1;
5732 rdata->allowed_priorities = afex_update_params->allowed_priorities;
Merav Sicron55c11942012-11-07 00:45:48 +00005733 rdata->echo = AFEX_UPDATE;
Barak Witkowskia3348722012-04-23 03:04:46 +00005734
5735 /* No need for an explicit memory barrier here as long we would
5736 * need to ensure the ordering of writing to the SPQ element
5737 * and updating of the SPQ producer which involves a memory
5738 * read and we will have to put a full memory barrier there
5739 * (inside bnx2x_sp_post()).
5740 */
5741 DP(BNX2X_MSG_SP,
5742 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5743 rdata->vif_id,
5744 rdata->afex_default_vlan, rdata->allowed_priorities);
5745
5746 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5747 U64_HI(data_mapping),
5748 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5749}
5750
5751static
5752inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5753 struct bnx2x_func_state_params *params)
5754{
5755 struct bnx2x_func_sp_obj *o = params->f_obj;
5756 struct afex_vif_list_ramrod_data *rdata =
5757 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
Yuval Mintz86564c32013-01-23 03:21:50 +00005758 struct bnx2x_func_afex_viflists_params *afex_vif_params =
Barak Witkowskia3348722012-04-23 03:04:46 +00005759 &params->params.afex_viflists;
5760 u64 *p_rdata = (u64 *)rdata;
5761
5762 memset(rdata, 0, sizeof(*rdata));
5763
5764 /* Fill the ramrod data with provided parameters */
Yuval Mintz86564c32013-01-23 03:21:50 +00005765 rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5766 rdata->func_bit_map = afex_vif_params->func_bit_map;
5767 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5768 rdata->func_to_clear = afex_vif_params->func_to_clear;
Barak Witkowskia3348722012-04-23 03:04:46 +00005769
5770 /* send in echo type of sub command */
Yuval Mintz86564c32013-01-23 03:21:50 +00005771 rdata->echo = afex_vif_params->afex_vif_list_command;
Barak Witkowskia3348722012-04-23 03:04:46 +00005772
5773 /* No need for an explicit memory barrier here as long we would
5774 * need to ensure the ordering of writing to the SPQ element
5775 * and updating of the SPQ producer which involves a memory
5776 * read and we will have to put a full memory barrier there
5777 * (inside bnx2x_sp_post()).
5778 */
5779
5780 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5781 rdata->afex_vif_list_command, rdata->vif_list_index,
5782 rdata->func_bit_map, rdata->func_to_clear);
5783
5784 /* this ramrod sends data directly and not through DMA mapping */
5785 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5786 U64_HI(*p_rdata), U64_LO(*p_rdata),
5787 NONE_CONNECTION_TYPE);
5788}
5789
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005790static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5791 struct bnx2x_func_state_params *params)
5792{
5793 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5794 NONE_CONNECTION_TYPE);
5795}
5796
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005797static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5798 struct bnx2x_func_state_params *params)
5799{
5800 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5801 NONE_CONNECTION_TYPE);
5802}
5803static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5804 struct bnx2x_func_state_params *params)
5805{
5806 struct bnx2x_func_sp_obj *o = params->f_obj;
5807 struct flow_control_configuration *rdata =
5808 (struct flow_control_configuration *)o->rdata;
5809 dma_addr_t data_mapping = o->rdata_mapping;
5810 struct bnx2x_func_tx_start_params *tx_start_params =
5811 &params->params.tx_start;
5812 int i;
5813
5814 memset(rdata, 0, sizeof(*rdata));
5815
5816 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5817 rdata->dcb_version = tx_start_params->dcb_version;
5818 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5819
5820 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5821 rdata->traffic_type_to_priority_cos[i] =
5822 tx_start_params->traffic_type_to_priority_cos[i];
5823
5824 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5825 U64_HI(data_mapping),
5826 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5827}
5828
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005829static int bnx2x_func_send_cmd(struct bnx2x *bp,
5830 struct bnx2x_func_state_params *params)
5831{
5832 switch (params->cmd) {
5833 case BNX2X_F_CMD_HW_INIT:
5834 return bnx2x_func_hw_init(bp, params);
5835 case BNX2X_F_CMD_START:
5836 return bnx2x_func_send_start(bp, params);
5837 case BNX2X_F_CMD_STOP:
5838 return bnx2x_func_send_stop(bp, params);
5839 case BNX2X_F_CMD_HW_RESET:
5840 return bnx2x_func_hw_reset(bp, params);
Barak Witkowskia3348722012-04-23 03:04:46 +00005841 case BNX2X_F_CMD_AFEX_UPDATE:
5842 return bnx2x_func_send_afex_update(bp, params);
5843 case BNX2X_F_CMD_AFEX_VIFLISTS:
5844 return bnx2x_func_send_afex_viflists(bp, params);
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005845 case BNX2X_F_CMD_TX_STOP:
5846 return bnx2x_func_send_tx_stop(bp, params);
5847 case BNX2X_F_CMD_TX_START:
5848 return bnx2x_func_send_tx_start(bp, params);
Merav Sicron55c11942012-11-07 00:45:48 +00005849 case BNX2X_F_CMD_SWITCH_UPDATE:
5850 return bnx2x_func_send_switch_update(bp, params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005851 default:
5852 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5853 return -EINVAL;
5854 }
5855}
5856
5857void bnx2x_init_func_obj(struct bnx2x *bp,
5858 struct bnx2x_func_sp_obj *obj,
5859 void *rdata, dma_addr_t rdata_mapping,
Barak Witkowskia3348722012-04-23 03:04:46 +00005860 void *afex_rdata, dma_addr_t afex_rdata_mapping,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005861 struct bnx2x_func_sp_drv_ops *drv_iface)
5862{
5863 memset(obj, 0, sizeof(*obj));
5864
5865 mutex_init(&obj->one_pending_mutex);
5866
5867 obj->rdata = rdata;
5868 obj->rdata_mapping = rdata_mapping;
Barak Witkowskia3348722012-04-23 03:04:46 +00005869 obj->afex_rdata = afex_rdata;
5870 obj->afex_rdata_mapping = afex_rdata_mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005871 obj->send_cmd = bnx2x_func_send_cmd;
5872 obj->check_transition = bnx2x_func_chk_transition;
5873 obj->complete_cmd = bnx2x_func_comp_cmd;
5874 obj->wait_comp = bnx2x_func_wait_comp;
5875
5876 obj->drv = drv_iface;
5877}
5878
5879/**
5880 * bnx2x_func_state_change - perform Function state change transition
5881 *
5882 * @bp: device handle
5883 * @params: parameters to perform the transaction
5884 *
5885 * returns 0 in case of successfully completed transition,
5886 * negative error code in case of failure, positive
5887 * (EBUSY) value if there is a completion to that is
5888 * still pending (possible only if RAMROD_COMP_WAIT is
5889 * not set in params->ramrod_flags for asynchronous
5890 * commands).
5891 */
5892int bnx2x_func_state_change(struct bnx2x *bp,
5893 struct bnx2x_func_state_params *params)
5894{
5895 struct bnx2x_func_sp_obj *o = params->f_obj;
Merav Sicron55c11942012-11-07 00:45:48 +00005896 int rc, cnt = 300;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005897 enum bnx2x_func_cmd cmd = params->cmd;
5898 unsigned long *pending = &o->pending;
5899
5900 mutex_lock(&o->one_pending_mutex);
5901
5902 /* Check that the requested transition is legal */
Merav Sicron55c11942012-11-07 00:45:48 +00005903 rc = o->check_transition(bp, o, params);
5904 if ((rc == -EBUSY) &&
5905 (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
5906 while ((rc == -EBUSY) && (--cnt > 0)) {
5907 mutex_unlock(&o->one_pending_mutex);
5908 msleep(10);
5909 mutex_lock(&o->one_pending_mutex);
5910 rc = o->check_transition(bp, o, params);
5911 }
5912 if (rc == -EBUSY) {
5913 mutex_unlock(&o->one_pending_mutex);
5914 BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5915 return rc;
5916 }
5917 } else if (rc) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005918 mutex_unlock(&o->one_pending_mutex);
Merav Sicron55c11942012-11-07 00:45:48 +00005919 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005920 }
5921
5922 /* Set "pending" bit */
5923 set_bit(cmd, pending);
5924
5925 /* Don't send a command if only driver cleanup was requested */
5926 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5927 bnx2x_func_state_change_comp(bp, o, cmd);
5928 mutex_unlock(&o->one_pending_mutex);
5929 } else {
5930 /* Send a ramrod */
5931 rc = o->send_cmd(bp, params);
5932
5933 mutex_unlock(&o->one_pending_mutex);
5934
5935 if (rc) {
5936 o->next_state = BNX2X_F_STATE_MAX;
5937 clear_bit(cmd, pending);
5938 smp_mb__after_clear_bit();
5939 return rc;
5940 }
5941
5942 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5943 rc = o->wait_comp(bp, o, cmd);
5944 if (rc)
5945 return rc;
5946
5947 return 0;
5948 }
5949 }
5950
5951 return !!test_bit(cmd, pending);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005952}