blob: 6b03acd5d9ad1b44e8750bb0b7fc0255756744ad [file] [log] [blame]
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001/* bnx2x_sp.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2011-2013 Broadcom Corporation
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
17 *
18 */
Joe Perchesf1deab52011-08-14 12:16:21 +000019
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000022#include <linux/module.h>
23#include <linux/crc32.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/crc32c.h>
27#include "bnx2x.h"
28#include "bnx2x_cmn.h"
29#include "bnx2x_sp.h"
30
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030031#define BNX2X_MAX_EMUL_MULTI 16
32
33/**** Exe Queue interfaces ****/
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000034
35/**
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030036 * bnx2x_exe_queue_init - init the Exe Queue object
37 *
38 * @o: poiter to the object
39 * @exe_len: length
40 * @owner: poiter to the owner
41 * @validate: validate function pointer
42 * @optimize: optimize function pointer
43 * @exec: execute function pointer
44 * @get: get function pointer
45 */
46static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
47 struct bnx2x_exe_queue_obj *o,
48 int exe_len,
49 union bnx2x_qable_obj *owner,
50 exe_q_validate validate,
Yuval Mintz460a25c2012-01-23 07:31:51 +000051 exe_q_remove remove,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030052 exe_q_optimize optimize,
53 exe_q_execute exec,
54 exe_q_get get)
55{
56 memset(o, 0, sizeof(*o));
57
58 INIT_LIST_HEAD(&o->exe_queue);
59 INIT_LIST_HEAD(&o->pending_comp);
60
61 spin_lock_init(&o->lock);
62
63 o->exe_chunk_len = exe_len;
64 o->owner = owner;
65
66 /* Owner specific callbacks */
67 o->validate = validate;
Yuval Mintz460a25c2012-01-23 07:31:51 +000068 o->remove = remove;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030069 o->optimize = optimize;
70 o->execute = exec;
71 o->get = get;
72
Merav Sicron51c1a582012-03-18 10:33:38 +000073 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
74 exe_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030075}
76
77static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
78 struct bnx2x_exeq_elem *elem)
79{
80 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
81 kfree(elem);
82}
83
84static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
85{
86 struct bnx2x_exeq_elem *elem;
87 int cnt = 0;
88
89 spin_lock_bh(&o->lock);
90
91 list_for_each_entry(elem, &o->exe_queue, link)
92 cnt++;
93
94 spin_unlock_bh(&o->lock);
95
96 return cnt;
97}
98
99/**
100 * bnx2x_exe_queue_add - add a new element to the execution queue
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000101 *
102 * @bp: driver handle
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300103 * @o: queue
104 * @cmd: new command to add
105 * @restore: true - do not optimize the command
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000106 *
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300107 * If the element is optimized or is illegal, frees it.
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000108 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300109static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
110 struct bnx2x_exe_queue_obj *o,
111 struct bnx2x_exeq_elem *elem,
112 bool restore)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000113{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300114 int rc;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000115
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300116 spin_lock_bh(&o->lock);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000117
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300118 if (!restore) {
119 /* Try to cancel this element queue */
120 rc = o->optimize(bp, o->owner, elem);
121 if (rc)
122 goto free_and_exit;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000123
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300124 /* Check if this request is ok */
125 rc = o->validate(bp, o->owner, elem);
126 if (rc) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +0000127 DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300128 goto free_and_exit;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000129 }
130 }
131
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300132 /* If so, add it to the execution queue */
133 list_add_tail(&elem->link, &o->exe_queue);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000134
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300135 spin_unlock_bh(&o->lock);
136
137 return 0;
138
139free_and_exit:
140 bnx2x_exe_queue_free_elem(bp, elem);
141
142 spin_unlock_bh(&o->lock);
143
144 return rc;
145
146}
147
148static inline void __bnx2x_exe_queue_reset_pending(
149 struct bnx2x *bp,
150 struct bnx2x_exe_queue_obj *o)
151{
152 struct bnx2x_exeq_elem *elem;
153
154 while (!list_empty(&o->pending_comp)) {
155 elem = list_first_entry(&o->pending_comp,
156 struct bnx2x_exeq_elem, link);
157
158 list_del(&elem->link);
159 bnx2x_exe_queue_free_elem(bp, elem);
160 }
161}
162
163static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
164 struct bnx2x_exe_queue_obj *o)
165{
166
167 spin_lock_bh(&o->lock);
168
169 __bnx2x_exe_queue_reset_pending(bp, o);
170
171 spin_unlock_bh(&o->lock);
172
173}
174
175/**
176 * bnx2x_exe_queue_step - execute one execution chunk atomically
177 *
178 * @bp: driver handle
179 * @o: queue
180 * @ramrod_flags: flags
181 *
182 * (Atomicy is ensured using the exe_queue->lock).
183 */
184static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
185 struct bnx2x_exe_queue_obj *o,
186 unsigned long *ramrod_flags)
187{
188 struct bnx2x_exeq_elem *elem, spacer;
189 int cur_len = 0, rc;
190
191 memset(&spacer, 0, sizeof(spacer));
192
193 spin_lock_bh(&o->lock);
194
195 /*
196 * Next step should not be performed until the current is finished,
197 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
198 * properly clear object internals without sending any command to the FW
199 * which also implies there won't be any completion to clear the
200 * 'pending' list.
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000201 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300202 if (!list_empty(&o->pending_comp)) {
203 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000204 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300205 __bnx2x_exe_queue_reset_pending(bp, o);
206 } else {
207 spin_unlock_bh(&o->lock);
208 return 1;
209 }
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000210 }
211
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300212 /*
213 * Run through the pending commands list and create a next
214 * execution chunk.
215 */
216 while (!list_empty(&o->exe_queue)) {
217 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
218 link);
219 WARN_ON(!elem->cmd_len);
220
221 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
222 cur_len += elem->cmd_len;
223 /*
224 * Prevent from both lists being empty when moving an
225 * element. This will allow the call of
226 * bnx2x_exe_queue_empty() without locking.
227 */
228 list_add_tail(&spacer.link, &o->pending_comp);
229 mb();
Wei Yongjun7933aa52012-09-04 21:06:55 +0000230 list_move_tail(&elem->link, &o->pending_comp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300231 list_del(&spacer.link);
232 } else
233 break;
234 }
235
236 /* Sanity check */
237 if (!cur_len) {
238 spin_unlock_bh(&o->lock);
239 return 0;
240 }
241
242 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
243 if (rc < 0)
244 /*
245 * In case of an error return the commands back to the queue
246 * and reset the pending_comp.
247 */
248 list_splice_init(&o->pending_comp, &o->exe_queue);
249 else if (!rc)
250 /*
251 * If zero is returned, means there are no outstanding pending
252 * completions and we may dismiss the pending list.
253 */
254 __bnx2x_exe_queue_reset_pending(bp, o);
255
256 spin_unlock_bh(&o->lock);
257 return rc;
258}
259
260static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
261{
262 bool empty = list_empty(&o->exe_queue);
263
264 /* Don't reorder!!! */
265 mb();
266
267 return empty && list_empty(&o->pending_comp);
268}
269
270static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
271 struct bnx2x *bp)
272{
273 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
274 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
275}
276
277/************************ raw_obj functions ***********************************/
278static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
279{
280 return !!test_bit(o->state, o->pstate);
281}
282
283static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
284{
285 smp_mb__before_clear_bit();
286 clear_bit(o->state, o->pstate);
287 smp_mb__after_clear_bit();
288}
289
290static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
291{
292 smp_mb__before_clear_bit();
293 set_bit(o->state, o->pstate);
294 smp_mb__after_clear_bit();
295}
296
297/**
298 * bnx2x_state_wait - wait until the given bit(state) is cleared
299 *
300 * @bp: device handle
301 * @state: state which is to be cleared
302 * @state_p: state buffer
303 *
304 */
305static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
306 unsigned long *pstate)
307{
308 /* can take a while if any port is running */
309 int cnt = 5000;
310
311
312 if (CHIP_REV_IS_EMUL(bp))
313 cnt *= 20;
314
315 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
316
317 might_sleep();
318 while (cnt--) {
319 if (!test_bit(state, pstate)) {
320#ifdef BNX2X_STOP_ON_ERROR
321 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
322#endif
323 return 0;
324 }
325
Yuval Mintz0926d492013-01-23 03:21:45 +0000326 usleep_range(1000, 2000);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300327
328 if (bp->panic)
329 return -EIO;
330 }
331
332 /* timeout! */
333 BNX2X_ERR("timeout waiting for state %d\n", state);
334#ifdef BNX2X_STOP_ON_ERROR
335 bnx2x_panic();
336#endif
337
338 return -EBUSY;
339}
340
341static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
342{
343 return bnx2x_state_wait(bp, raw->state, raw->pstate);
344}
345
346/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
347/* credit handling callbacks */
348static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
349{
350 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
351
352 WARN_ON(!mp);
353
354 return mp->get_entry(mp, offset);
355}
356
357static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
358{
359 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
360
361 WARN_ON(!mp);
362
363 return mp->get(mp, 1);
364}
365
366static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
367{
368 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
369
370 WARN_ON(!vp);
371
372 return vp->get_entry(vp, offset);
373}
374
375static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
376{
377 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
378
379 WARN_ON(!vp);
380
381 return vp->get(vp, 1);
382}
383
384static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
385{
386 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
387 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
388
389 if (!mp->get(mp, 1))
390 return false;
391
392 if (!vp->get(vp, 1)) {
393 mp->put(mp, 1);
394 return false;
395 }
396
397 return true;
398}
399
400static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
401{
402 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
403
404 return mp->put_entry(mp, offset);
405}
406
407static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
408{
409 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
410
411 return mp->put(mp, 1);
412}
413
414static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
415{
416 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
417
418 return vp->put_entry(vp, offset);
419}
420
421static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
422{
423 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
424
425 return vp->put(vp, 1);
426}
427
428static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
429{
430 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
431 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
432
433 if (!mp->put(mp, 1))
434 return false;
435
436 if (!vp->put(vp, 1)) {
437 mp->get(mp, 1);
438 return false;
439 }
440
441 return true;
442}
443
Ariel Eliored5162a2011-12-05 21:52:24 +0000444static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000445 int n, u8 *base, u8 stride, u8 size)
Ariel Eliored5162a2011-12-05 21:52:24 +0000446{
447 struct bnx2x_vlan_mac_registry_elem *pos;
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000448 u8 *next = base;
Ariel Eliored5162a2011-12-05 21:52:24 +0000449 int counter = 0;
450
451 /* traverse list */
452 list_for_each_entry(pos, &o->head, link) {
453 if (counter < n) {
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000454 memcpy(next, &pos->u, size);
Ariel Eliored5162a2011-12-05 21:52:24 +0000455 counter++;
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000456 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
457 counter, next);
458 next += stride + size;
Ariel Eliored5162a2011-12-05 21:52:24 +0000459
Ariel Eliored5162a2011-12-05 21:52:24 +0000460 }
461 }
462 return counter * ETH_ALEN;
463}
464
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300465/* check_add() callbacks */
Merav Sicron51c1a582012-03-18 10:33:38 +0000466static int bnx2x_check_mac_add(struct bnx2x *bp,
467 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300468 union bnx2x_classification_ramrod_data *data)
469{
470 struct bnx2x_vlan_mac_registry_elem *pos;
471
Merav Sicron51c1a582012-03-18 10:33:38 +0000472 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
473
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300474 if (!is_valid_ether_addr(data->mac.mac))
475 return -EINVAL;
476
477 /* Check if a requested MAC already exists */
478 list_for_each_entry(pos, &o->head, link)
479 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
480 return -EEXIST;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000481
482 return 0;
483}
484
Merav Sicron51c1a582012-03-18 10:33:38 +0000485static int bnx2x_check_vlan_add(struct bnx2x *bp,
486 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300487 union bnx2x_classification_ramrod_data *data)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000488{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300489 struct bnx2x_vlan_mac_registry_elem *pos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000490
Merav Sicron51c1a582012-03-18 10:33:38 +0000491 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
492
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300493 list_for_each_entry(pos, &o->head, link)
494 if (data->vlan.vlan == pos->u.vlan.vlan)
495 return -EEXIST;
496
497 return 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000498}
499
Merav Sicron51c1a582012-03-18 10:33:38 +0000500static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
501 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300502 union bnx2x_classification_ramrod_data *data)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000503{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300504 struct bnx2x_vlan_mac_registry_elem *pos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000505
Merav Sicron51c1a582012-03-18 10:33:38 +0000506 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
507 data->vlan_mac.mac, data->vlan_mac.vlan);
508
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300509 list_for_each_entry(pos, &o->head, link)
510 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
511 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
512 ETH_ALEN)))
513 return -EEXIST;
514
515 return 0;
516}
517
518
519/* check_del() callbacks */
520static struct bnx2x_vlan_mac_registry_elem *
Merav Sicron51c1a582012-03-18 10:33:38 +0000521 bnx2x_check_mac_del(struct bnx2x *bp,
522 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300523 union bnx2x_classification_ramrod_data *data)
524{
525 struct bnx2x_vlan_mac_registry_elem *pos;
526
Merav Sicron51c1a582012-03-18 10:33:38 +0000527 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
528
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300529 list_for_each_entry(pos, &o->head, link)
530 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
531 return pos;
532
533 return NULL;
534}
535
536static struct bnx2x_vlan_mac_registry_elem *
Merav Sicron51c1a582012-03-18 10:33:38 +0000537 bnx2x_check_vlan_del(struct bnx2x *bp,
538 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300539 union bnx2x_classification_ramrod_data *data)
540{
541 struct bnx2x_vlan_mac_registry_elem *pos;
542
Merav Sicron51c1a582012-03-18 10:33:38 +0000543 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
544
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300545 list_for_each_entry(pos, &o->head, link)
546 if (data->vlan.vlan == pos->u.vlan.vlan)
547 return pos;
548
549 return NULL;
550}
551
552static struct bnx2x_vlan_mac_registry_elem *
Merav Sicron51c1a582012-03-18 10:33:38 +0000553 bnx2x_check_vlan_mac_del(struct bnx2x *bp,
554 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300555 union bnx2x_classification_ramrod_data *data)
556{
557 struct bnx2x_vlan_mac_registry_elem *pos;
558
Merav Sicron51c1a582012-03-18 10:33:38 +0000559 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
560 data->vlan_mac.mac, data->vlan_mac.vlan);
561
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300562 list_for_each_entry(pos, &o->head, link)
563 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
564 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
565 ETH_ALEN)))
566 return pos;
567
568 return NULL;
569}
570
571/* check_move() callback */
Merav Sicron51c1a582012-03-18 10:33:38 +0000572static bool bnx2x_check_move(struct bnx2x *bp,
573 struct bnx2x_vlan_mac_obj *src_o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300574 struct bnx2x_vlan_mac_obj *dst_o,
575 union bnx2x_classification_ramrod_data *data)
576{
577 struct bnx2x_vlan_mac_registry_elem *pos;
578 int rc;
579
580 /* Check if we can delete the requested configuration from the first
581 * object.
582 */
Merav Sicron51c1a582012-03-18 10:33:38 +0000583 pos = src_o->check_del(bp, src_o, data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300584
585 /* check if configuration can be added */
Merav Sicron51c1a582012-03-18 10:33:38 +0000586 rc = dst_o->check_add(bp, dst_o, data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300587
588 /* If this classification can not be added (is already set)
589 * or can't be deleted - return an error.
590 */
591 if (rc || !pos)
592 return false;
593
594 return true;
595}
596
597static bool bnx2x_check_move_always_err(
Merav Sicron51c1a582012-03-18 10:33:38 +0000598 struct bnx2x *bp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300599 struct bnx2x_vlan_mac_obj *src_o,
600 struct bnx2x_vlan_mac_obj *dst_o,
601 union bnx2x_classification_ramrod_data *data)
602{
603 return false;
604}
605
606
607static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
608{
609 struct bnx2x_raw_obj *raw = &o->raw;
610 u8 rx_tx_flag = 0;
611
612 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
613 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
614 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
615
616 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
617 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
618 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
619
620 return rx_tx_flag;
621}
622
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300623
Barak Witkowskia3348722012-04-23 03:04:46 +0000624void bnx2x_set_mac_in_nig(struct bnx2x *bp,
625 bool add, unsigned char *dev_addr, int index)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300626{
627 u32 wb_data[2];
628 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
629 NIG_REG_LLH0_FUNC_MEM;
630
Barak Witkowskia3348722012-04-23 03:04:46 +0000631 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
632 return;
633
634 if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300635 return;
636
637 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
638 (add ? "ADD" : "DELETE"), index);
639
640 if (add) {
641 /* LLH_FUNC_MEM is a u64 WB register */
642 reg_offset += 8*index;
643
644 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
645 (dev_addr[4] << 8) | dev_addr[5]);
646 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
647
648 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
649 }
650
651 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
652 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
653}
654
655/**
656 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
657 *
658 * @bp: device handle
659 * @o: queue for which we want to configure this rule
660 * @add: if true the command is an ADD command, DEL otherwise
661 * @opcode: CLASSIFY_RULE_OPCODE_XXX
662 * @hdr: pointer to a header to setup
663 *
664 */
665static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
666 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
667 struct eth_classify_cmd_header *hdr)
668{
669 struct bnx2x_raw_obj *raw = &o->raw;
670
671 hdr->client_id = raw->cl_id;
672 hdr->func_id = raw->func_id;
673
674 /* Rx or/and Tx (internal switching) configuration ? */
675 hdr->cmd_general_data |=
676 bnx2x_vlan_mac_get_rx_tx_flag(o);
677
678 if (add)
679 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
680
681 hdr->cmd_general_data |=
682 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
683}
684
685/**
686 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
687 *
688 * @cid: connection id
689 * @type: BNX2X_FILTER_XXX_PENDING
690 * @hdr: poiter to header to setup
691 * @rule_cnt:
692 *
693 * currently we always configure one rule and echo field to contain a CID and an
694 * opcode type.
695 */
696static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
697 struct eth_classify_header *hdr, int rule_cnt)
698{
Yuval Mintz86564c32013-01-23 03:21:50 +0000699 hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
700 (type << BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300701 hdr->rule_cnt = (u8)rule_cnt;
702}
703
704
705/* hw_config() callbacks */
706static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
707 struct bnx2x_vlan_mac_obj *o,
708 struct bnx2x_exeq_elem *elem, int rule_idx,
709 int cam_offset)
710{
711 struct bnx2x_raw_obj *raw = &o->raw;
712 struct eth_classify_rules_ramrod_data *data =
713 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
714 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
715 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
716 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
717 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
718 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
719
720 /*
721 * Set LLH CAM entry: currently only iSCSI and ETH macs are
722 * relevant. In addition, current implementation is tuned for a
723 * single ETH MAC.
724 *
725 * When multiple unicast ETH MACs PF configuration in switch
726 * independent mode is required (NetQ, multiple netdev MACs,
727 * etc.), consider better utilisation of 8 per function MAC
728 * entries in the LLH register. There is also
729 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
730 * total number of CAM entries to 16.
731 *
732 * Currently we won't configure NIG for MACs other than a primary ETH
733 * MAC and iSCSI L2 MAC.
734 *
735 * If this MAC is moving from one Queue to another, no need to change
736 * NIG configuration.
737 */
738 if (cmd != BNX2X_VLAN_MAC_MOVE) {
739 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
740 bnx2x_set_mac_in_nig(bp, add, mac,
Yuval Mintz0a52fd02012-03-12 08:53:07 +0000741 BNX2X_LLH_CAM_ISCSI_ETH_LINE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300742 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
Yuval Mintz0a52fd02012-03-12 08:53:07 +0000743 bnx2x_set_mac_in_nig(bp, add, mac,
744 BNX2X_LLH_CAM_ETH_LINE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300745 }
746
747 /* Reset the ramrod data buffer for the first rule */
748 if (rule_idx == 0)
749 memset(data, 0, sizeof(*data));
750
751 /* Setup a command header */
752 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
753 &rule_entry->mac.header);
754
Joe Perches0f9dad12011-08-14 12:16:19 +0000755 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +0000756 (add ? "add" : "delete"), mac, raw->cl_id);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300757
758 /* Set a MAC itself */
759 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
760 &rule_entry->mac.mac_mid,
761 &rule_entry->mac.mac_lsb, mac);
762
763 /* MOVE: Add a rule that will add this MAC to the target Queue */
764 if (cmd == BNX2X_VLAN_MAC_MOVE) {
765 rule_entry++;
766 rule_cnt++;
767
768 /* Setup ramrod data */
769 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
770 elem->cmd_data.vlan_mac.target_obj,
771 true, CLASSIFY_RULE_OPCODE_MAC,
772 &rule_entry->mac.header);
773
774 /* Set a MAC itself */
775 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
776 &rule_entry->mac.mac_mid,
777 &rule_entry->mac.mac_lsb, mac);
778 }
779
780 /* Set the ramrod data header */
781 /* TODO: take this to the higher level in order to prevent multiple
782 writing */
783 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
784 rule_cnt);
785}
786
787/**
788 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
789 *
790 * @bp: device handle
791 * @o: queue
792 * @type:
793 * @cam_offset: offset in cam memory
794 * @hdr: pointer to a header to setup
795 *
796 * E1/E1H
797 */
798static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
799 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
800 struct mac_configuration_hdr *hdr)
801{
802 struct bnx2x_raw_obj *r = &o->raw;
803
804 hdr->length = 1;
805 hdr->offset = (u8)cam_offset;
Yuval Mintz86564c32013-01-23 03:21:50 +0000806 hdr->client_id = cpu_to_le16(0xff);
807 hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
808 (type << BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300809}
810
811static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
812 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
813 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
814{
815 struct bnx2x_raw_obj *r = &o->raw;
816 u32 cl_bit_vec = (1 << r->cl_id);
817
818 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
819 cfg_entry->pf_id = r->func_id;
820 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
821
822 if (add) {
823 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
824 T_ETH_MAC_COMMAND_SET);
825 SET_FLAG(cfg_entry->flags,
826 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
827
828 /* Set a MAC in a ramrod data */
829 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
830 &cfg_entry->middle_mac_addr,
831 &cfg_entry->lsb_mac_addr, mac);
832 } else
833 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
834 T_ETH_MAC_COMMAND_INVALIDATE);
835}
836
837static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
838 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
839 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
840{
841 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
842 struct bnx2x_raw_obj *raw = &o->raw;
843
844 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
845 &config->hdr);
846 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
847 cfg_entry);
848
Joe Perches0f9dad12011-08-14 12:16:19 +0000849 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +0000850 (add ? "setting" : "clearing"),
Joe Perches0f9dad12011-08-14 12:16:19 +0000851 mac, raw->cl_id, cam_offset);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300852}
853
854/**
855 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
856 *
857 * @bp: device handle
858 * @o: bnx2x_vlan_mac_obj
859 * @elem: bnx2x_exeq_elem
860 * @rule_idx: rule_idx
861 * @cam_offset: cam_offset
862 */
863static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
864 struct bnx2x_vlan_mac_obj *o,
865 struct bnx2x_exeq_elem *elem, int rule_idx,
866 int cam_offset)
867{
868 struct bnx2x_raw_obj *raw = &o->raw;
869 struct mac_configuration_cmd *config =
870 (struct mac_configuration_cmd *)(raw->rdata);
871 /*
872 * 57710 and 57711 do not support MOVE command,
873 * so it's either ADD or DEL
874 */
875 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
876 true : false;
877
878 /* Reset the ramrod data buffer */
879 memset(config, 0, sizeof(*config));
880
Yuval Mintz33ac3382012-03-12 08:53:09 +0000881 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300882 cam_offset, add,
883 elem->cmd_data.vlan_mac.u.mac.mac, 0,
884 ETH_VLAN_FILTER_ANY_VLAN, config);
885}
886
887static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
888 struct bnx2x_vlan_mac_obj *o,
889 struct bnx2x_exeq_elem *elem, int rule_idx,
890 int cam_offset)
891{
892 struct bnx2x_raw_obj *raw = &o->raw;
893 struct eth_classify_rules_ramrod_data *data =
894 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
895 int rule_cnt = rule_idx + 1;
896 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
Yuval Mintz86564c32013-01-23 03:21:50 +0000897 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300898 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
899 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
900
901 /* Reset the ramrod data buffer for the first rule */
902 if (rule_idx == 0)
903 memset(data, 0, sizeof(*data));
904
905 /* Set a rule header */
906 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
907 &rule_entry->vlan.header);
908
909 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
910 vlan);
911
912 /* Set a VLAN itself */
913 rule_entry->vlan.vlan = cpu_to_le16(vlan);
914
915 /* MOVE: Add a rule that will add this MAC to the target Queue */
916 if (cmd == BNX2X_VLAN_MAC_MOVE) {
917 rule_entry++;
918 rule_cnt++;
919
920 /* Setup ramrod data */
921 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
922 elem->cmd_data.vlan_mac.target_obj,
923 true, CLASSIFY_RULE_OPCODE_VLAN,
924 &rule_entry->vlan.header);
925
926 /* Set a VLAN itself */
927 rule_entry->vlan.vlan = cpu_to_le16(vlan);
928 }
929
930 /* Set the ramrod data header */
931 /* TODO: take this to the higher level in order to prevent multiple
932 writing */
933 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
934 rule_cnt);
935}
936
937static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
938 struct bnx2x_vlan_mac_obj *o,
939 struct bnx2x_exeq_elem *elem,
940 int rule_idx, int cam_offset)
941{
942 struct bnx2x_raw_obj *raw = &o->raw;
943 struct eth_classify_rules_ramrod_data *data =
944 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
945 int rule_cnt = rule_idx + 1;
946 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
Yuval Mintz86564c32013-01-23 03:21:50 +0000947 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300948 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
949 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
950 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
951
952
953 /* Reset the ramrod data buffer for the first rule */
954 if (rule_idx == 0)
955 memset(data, 0, sizeof(*data));
956
957 /* Set a rule header */
958 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
959 &rule_entry->pair.header);
960
961 /* Set VLAN and MAC themselvs */
962 rule_entry->pair.vlan = cpu_to_le16(vlan);
963 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
964 &rule_entry->pair.mac_mid,
965 &rule_entry->pair.mac_lsb, mac);
966
967 /* MOVE: Add a rule that will add this MAC to the target Queue */
968 if (cmd == BNX2X_VLAN_MAC_MOVE) {
969 rule_entry++;
970 rule_cnt++;
971
972 /* Setup ramrod data */
973 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
974 elem->cmd_data.vlan_mac.target_obj,
975 true, CLASSIFY_RULE_OPCODE_PAIR,
976 &rule_entry->pair.header);
977
978 /* Set a VLAN itself */
979 rule_entry->pair.vlan = cpu_to_le16(vlan);
980 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
981 &rule_entry->pair.mac_mid,
982 &rule_entry->pair.mac_lsb, mac);
983 }
984
985 /* Set the ramrod data header */
986 /* TODO: take this to the higher level in order to prevent multiple
987 writing */
988 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
989 rule_cnt);
990}
991
992/**
993 * bnx2x_set_one_vlan_mac_e1h -
994 *
995 * @bp: device handle
996 * @o: bnx2x_vlan_mac_obj
997 * @elem: bnx2x_exeq_elem
998 * @rule_idx: rule_idx
999 * @cam_offset: cam_offset
1000 */
1001static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1002 struct bnx2x_vlan_mac_obj *o,
1003 struct bnx2x_exeq_elem *elem,
1004 int rule_idx, int cam_offset)
1005{
1006 struct bnx2x_raw_obj *raw = &o->raw;
1007 struct mac_configuration_cmd *config =
1008 (struct mac_configuration_cmd *)(raw->rdata);
1009 /*
1010 * 57710 and 57711 do not support MOVE command,
1011 * so it's either ADD or DEL
1012 */
1013 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1014 true : false;
1015
1016 /* Reset the ramrod data buffer */
1017 memset(config, 0, sizeof(*config));
1018
1019 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1020 cam_offset, add,
1021 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1022 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1023 ETH_VLAN_FILTER_CLASSIFY, config);
1024}
1025
1026#define list_next_entry(pos, member) \
1027 list_entry((pos)->member.next, typeof(*(pos)), member)
1028
1029/**
1030 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1031 *
1032 * @bp: device handle
1033 * @p: command parameters
1034 * @ppos: pointer to the cooky
1035 *
1036 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1037 * previously configured elements list.
1038 *
1039 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1040 * into an account
1041 *
1042 * pointer to the cooky - that should be given back in the next call to make
1043 * function handle the next element. If *ppos is set to NULL it will restart the
1044 * iterator. If returned *ppos == NULL this means that the last element has been
1045 * handled.
1046 *
1047 */
1048static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1049 struct bnx2x_vlan_mac_ramrod_params *p,
1050 struct bnx2x_vlan_mac_registry_elem **ppos)
1051{
1052 struct bnx2x_vlan_mac_registry_elem *pos;
1053 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1054
1055 /* If list is empty - there is nothing to do here */
1056 if (list_empty(&o->head)) {
1057 *ppos = NULL;
1058 return 0;
1059 }
1060
1061 /* make a step... */
1062 if (*ppos == NULL)
1063 *ppos = list_first_entry(&o->head,
1064 struct bnx2x_vlan_mac_registry_elem,
1065 link);
1066 else
1067 *ppos = list_next_entry(*ppos, link);
1068
1069 pos = *ppos;
1070
1071 /* If it's the last step - return NULL */
1072 if (list_is_last(&pos->link, &o->head))
1073 *ppos = NULL;
1074
1075 /* Prepare a 'user_req' */
1076 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1077
1078 /* Set the command */
1079 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1080
1081 /* Set vlan_mac_flags */
1082 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1083
1084 /* Set a restore bit */
1085 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1086
1087 return bnx2x_config_vlan_mac(bp, p);
1088}
1089
1090/*
1091 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1092 * pointer to an element with a specific criteria and NULL if such an element
1093 * hasn't been found.
1094 */
1095static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1096 struct bnx2x_exe_queue_obj *o,
1097 struct bnx2x_exeq_elem *elem)
1098{
1099 struct bnx2x_exeq_elem *pos;
1100 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1101
1102 /* Check pending for execution commands */
1103 list_for_each_entry(pos, &o->exe_queue, link)
1104 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1105 sizeof(*data)) &&
1106 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1107 return pos;
1108
1109 return NULL;
1110}
1111
1112static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1113 struct bnx2x_exe_queue_obj *o,
1114 struct bnx2x_exeq_elem *elem)
1115{
1116 struct bnx2x_exeq_elem *pos;
1117 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1118
1119 /* Check pending for execution commands */
1120 list_for_each_entry(pos, &o->exe_queue, link)
1121 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1122 sizeof(*data)) &&
1123 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1124 return pos;
1125
1126 return NULL;
1127}
1128
1129static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1130 struct bnx2x_exe_queue_obj *o,
1131 struct bnx2x_exeq_elem *elem)
1132{
1133 struct bnx2x_exeq_elem *pos;
1134 struct bnx2x_vlan_mac_ramrod_data *data =
1135 &elem->cmd_data.vlan_mac.u.vlan_mac;
1136
1137 /* Check pending for execution commands */
1138 list_for_each_entry(pos, &o->exe_queue, link)
1139 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1140 sizeof(*data)) &&
1141 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1142 return pos;
1143
1144 return NULL;
1145}
1146
1147/**
1148 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1149 *
1150 * @bp: device handle
1151 * @qo: bnx2x_qable_obj
1152 * @elem: bnx2x_exeq_elem
1153 *
1154 * Checks that the requested configuration can be added. If yes and if
1155 * requested, consume CAM credit.
1156 *
1157 * The 'validate' is run after the 'optimize'.
1158 *
1159 */
1160static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1161 union bnx2x_qable_obj *qo,
1162 struct bnx2x_exeq_elem *elem)
1163{
1164 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1165 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1166 int rc;
1167
1168 /* Check the registry */
Merav Sicron51c1a582012-03-18 10:33:38 +00001169 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001170 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001171 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001172 return rc;
1173 }
1174
1175 /*
1176 * Check if there is a pending ADD command for this
1177 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1178 */
1179 if (exeq->get(exeq, elem)) {
1180 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1181 return -EEXIST;
1182 }
1183
1184 /*
1185 * TODO: Check the pending MOVE from other objects where this
1186 * object is a destination object.
1187 */
1188
1189 /* Consume the credit if not requested not to */
1190 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1191 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1192 o->get_credit(o)))
1193 return -EINVAL;
1194
1195 return 0;
1196}
1197
1198/**
1199 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1200 *
1201 * @bp: device handle
1202 * @qo: quable object to check
1203 * @elem: element that needs to be deleted
1204 *
1205 * Checks that the requested configuration can be deleted. If yes and if
1206 * requested, returns a CAM credit.
1207 *
1208 * The 'validate' is run after the 'optimize'.
1209 */
1210static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1211 union bnx2x_qable_obj *qo,
1212 struct bnx2x_exeq_elem *elem)
1213{
1214 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1215 struct bnx2x_vlan_mac_registry_elem *pos;
1216 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1217 struct bnx2x_exeq_elem query_elem;
1218
1219 /* If this classification can not be deleted (doesn't exist)
1220 * - return a BNX2X_EXIST.
1221 */
Merav Sicron51c1a582012-03-18 10:33:38 +00001222 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001223 if (!pos) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001224 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001225 return -EEXIST;
1226 }
1227
1228 /*
1229 * Check if there are pending DEL or MOVE commands for this
1230 * MAC/VLAN/VLAN-MAC. Return an error if so.
1231 */
1232 memcpy(&query_elem, elem, sizeof(query_elem));
1233
1234 /* Check for MOVE commands */
1235 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1236 if (exeq->get(exeq, &query_elem)) {
1237 BNX2X_ERR("There is a pending MOVE command already\n");
1238 return -EINVAL;
1239 }
1240
1241 /* Check for DEL commands */
1242 if (exeq->get(exeq, elem)) {
1243 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1244 return -EEXIST;
1245 }
1246
1247 /* Return the credit to the credit pool if not requested not to */
1248 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1249 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1250 o->put_credit(o))) {
1251 BNX2X_ERR("Failed to return a credit\n");
1252 return -EINVAL;
1253 }
1254
1255 return 0;
1256}
1257
1258/**
1259 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1260 *
1261 * @bp: device handle
1262 * @qo: quable object to check (source)
1263 * @elem: element that needs to be moved
1264 *
1265 * Checks that the requested configuration can be moved. If yes and if
1266 * requested, returns a CAM credit.
1267 *
1268 * The 'validate' is run after the 'optimize'.
1269 */
1270static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1271 union bnx2x_qable_obj *qo,
1272 struct bnx2x_exeq_elem *elem)
1273{
1274 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1275 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1276 struct bnx2x_exeq_elem query_elem;
1277 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1278 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1279
1280 /*
1281 * Check if we can perform this operation based on the current registry
1282 * state.
1283 */
Merav Sicron51c1a582012-03-18 10:33:38 +00001284 if (!src_o->check_move(bp, src_o, dest_o,
1285 &elem->cmd_data.vlan_mac.u)) {
1286 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001287 return -EINVAL;
1288 }
1289
1290 /*
1291 * Check if there is an already pending DEL or MOVE command for the
1292 * source object or ADD command for a destination object. Return an
1293 * error if so.
1294 */
1295 memcpy(&query_elem, elem, sizeof(query_elem));
1296
1297 /* Check DEL on source */
1298 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1299 if (src_exeq->get(src_exeq, &query_elem)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001300 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001301 return -EINVAL;
1302 }
1303
1304 /* Check MOVE on source */
1305 if (src_exeq->get(src_exeq, elem)) {
1306 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1307 return -EEXIST;
1308 }
1309
1310 /* Check ADD on destination */
1311 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1312 if (dest_exeq->get(dest_exeq, &query_elem)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001313 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001314 return -EINVAL;
1315 }
1316
1317 /* Consume the credit if not requested not to */
1318 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1319 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1320 dest_o->get_credit(dest_o)))
1321 return -EINVAL;
1322
1323 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1324 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1325 src_o->put_credit(src_o))) {
1326 /* return the credit taken from dest... */
1327 dest_o->put_credit(dest_o);
1328 return -EINVAL;
1329 }
1330
1331 return 0;
1332}
1333
1334static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1335 union bnx2x_qable_obj *qo,
1336 struct bnx2x_exeq_elem *elem)
1337{
1338 switch (elem->cmd_data.vlan_mac.cmd) {
1339 case BNX2X_VLAN_MAC_ADD:
1340 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1341 case BNX2X_VLAN_MAC_DEL:
1342 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1343 case BNX2X_VLAN_MAC_MOVE:
1344 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1345 default:
1346 return -EINVAL;
1347 }
1348}
1349
Yuval Mintz460a25c2012-01-23 07:31:51 +00001350static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1351 union bnx2x_qable_obj *qo,
1352 struct bnx2x_exeq_elem *elem)
1353{
1354 int rc = 0;
1355
1356 /* If consumption wasn't required, nothing to do */
1357 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1358 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1359 return 0;
1360
1361 switch (elem->cmd_data.vlan_mac.cmd) {
1362 case BNX2X_VLAN_MAC_ADD:
1363 case BNX2X_VLAN_MAC_MOVE:
1364 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1365 break;
1366 case BNX2X_VLAN_MAC_DEL:
1367 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1368 break;
1369 default:
1370 return -EINVAL;
1371 }
1372
1373 if (rc != true)
1374 return -EINVAL;
1375
1376 return 0;
1377}
1378
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001379/**
1380 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1381 *
1382 * @bp: device handle
1383 * @o: bnx2x_vlan_mac_obj
1384 *
1385 */
1386static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1387 struct bnx2x_vlan_mac_obj *o)
1388{
1389 int cnt = 5000, rc;
1390 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1391 struct bnx2x_raw_obj *raw = &o->raw;
1392
1393 while (cnt--) {
1394 /* Wait for the current command to complete */
1395 rc = raw->wait_comp(bp, raw);
1396 if (rc)
1397 return rc;
1398
1399 /* Wait until there are no pending commands */
1400 if (!bnx2x_exe_queue_empty(exeq))
Yuval Mintz0926d492013-01-23 03:21:45 +00001401 usleep_range(1000, 2000);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001402 else
1403 return 0;
1404 }
1405
1406 return -EBUSY;
1407}
1408
1409/**
1410 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1411 *
1412 * @bp: device handle
1413 * @o: bnx2x_vlan_mac_obj
1414 * @cqe:
1415 * @cont: if true schedule next execution chunk
1416 *
1417 */
1418static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1419 struct bnx2x_vlan_mac_obj *o,
1420 union event_ring_elem *cqe,
1421 unsigned long *ramrod_flags)
1422{
1423 struct bnx2x_raw_obj *r = &o->raw;
1424 int rc;
1425
1426 /* Reset pending list */
1427 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1428
1429 /* Clear pending */
1430 r->clear_pending(r);
1431
1432 /* If ramrod failed this is most likely a SW bug */
1433 if (cqe->message.error)
1434 return -EINVAL;
1435
Yuval Mintz2de67432013-01-23 03:21:43 +00001436 /* Run the next bulk of pending commands if requested */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001437 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1438 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1439 if (rc < 0)
1440 return rc;
1441 }
1442
1443 /* If there is more work to do return PENDING */
1444 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1445 return 1;
1446
1447 return 0;
1448}
1449
1450/**
1451 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1452 *
1453 * @bp: device handle
1454 * @o: bnx2x_qable_obj
1455 * @elem: bnx2x_exeq_elem
1456 */
1457static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1458 union bnx2x_qable_obj *qo,
1459 struct bnx2x_exeq_elem *elem)
1460{
1461 struct bnx2x_exeq_elem query, *pos;
1462 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1463 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1464
1465 memcpy(&query, elem, sizeof(query));
1466
1467 switch (elem->cmd_data.vlan_mac.cmd) {
1468 case BNX2X_VLAN_MAC_ADD:
1469 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1470 break;
1471 case BNX2X_VLAN_MAC_DEL:
1472 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1473 break;
1474 default:
1475 /* Don't handle anything other than ADD or DEL */
1476 return 0;
1477 }
1478
1479 /* If we found the appropriate element - delete it */
1480 pos = exeq->get(exeq, &query);
1481 if (pos) {
1482
1483 /* Return the credit of the optimized command */
1484 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1485 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1486 if ((query.cmd_data.vlan_mac.cmd ==
1487 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001488 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001489 return -EINVAL;
1490 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
Merav Sicron51c1a582012-03-18 10:33:38 +00001491 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001492 return -EINVAL;
1493 }
1494 }
1495
1496 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1497 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1498 "ADD" : "DEL");
1499
1500 list_del(&pos->link);
1501 bnx2x_exe_queue_free_elem(bp, pos);
1502 return 1;
1503 }
1504
1505 return 0;
1506}
1507
1508/**
1509 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1510 *
1511 * @bp: device handle
1512 * @o:
1513 * @elem:
1514 * @restore:
1515 * @re:
1516 *
1517 * prepare a registry element according to the current command request.
1518 */
1519static inline int bnx2x_vlan_mac_get_registry_elem(
1520 struct bnx2x *bp,
1521 struct bnx2x_vlan_mac_obj *o,
1522 struct bnx2x_exeq_elem *elem,
1523 bool restore,
1524 struct bnx2x_vlan_mac_registry_elem **re)
1525{
Yuval Mintz86564c32013-01-23 03:21:50 +00001526 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001527 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1528
1529 /* Allocate a new registry element if needed. */
1530 if (!restore &&
1531 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1532 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1533 if (!reg_elem)
1534 return -ENOMEM;
1535
1536 /* Get a new CAM offset */
1537 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1538 /*
1539 * This shell never happen, because we have checked the
1540 * CAM availiability in the 'validate'.
1541 */
1542 WARN_ON(1);
1543 kfree(reg_elem);
1544 return -EINVAL;
1545 }
1546
1547 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1548
1549 /* Set a VLAN-MAC data */
1550 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1551 sizeof(reg_elem->u));
1552
1553 /* Copy the flags (needed for DEL and RESTORE flows) */
1554 reg_elem->vlan_mac_flags =
1555 elem->cmd_data.vlan_mac.vlan_mac_flags;
1556 } else /* DEL, RESTORE */
Merav Sicron51c1a582012-03-18 10:33:38 +00001557 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001558
1559 *re = reg_elem;
1560 return 0;
1561}
1562
1563/**
1564 * bnx2x_execute_vlan_mac - execute vlan mac command
1565 *
1566 * @bp: device handle
1567 * @qo:
1568 * @exe_chunk:
1569 * @ramrod_flags:
1570 *
1571 * go and send a ramrod!
1572 */
1573static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1574 union bnx2x_qable_obj *qo,
1575 struct list_head *exe_chunk,
1576 unsigned long *ramrod_flags)
1577{
1578 struct bnx2x_exeq_elem *elem;
1579 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1580 struct bnx2x_raw_obj *r = &o->raw;
1581 int rc, idx = 0;
1582 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1583 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1584 struct bnx2x_vlan_mac_registry_elem *reg_elem;
Yuval Mintz86564c32013-01-23 03:21:50 +00001585 enum bnx2x_vlan_mac_cmd cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001586
1587 /*
1588 * If DRIVER_ONLY execution is requested, cleanup a registry
1589 * and exit. Otherwise send a ramrod to FW.
1590 */
1591 if (!drv_only) {
1592 WARN_ON(r->check_pending(r));
1593
1594 /* Set pending */
1595 r->set_pending(r);
1596
1597 /* Fill tha ramrod data */
1598 list_for_each_entry(elem, exe_chunk, link) {
1599 cmd = elem->cmd_data.vlan_mac.cmd;
1600 /*
1601 * We will add to the target object in MOVE command, so
1602 * change the object for a CAM search.
1603 */
1604 if (cmd == BNX2X_VLAN_MAC_MOVE)
1605 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1606 else
1607 cam_obj = o;
1608
1609 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1610 elem, restore,
1611 &reg_elem);
1612 if (rc)
1613 goto error_exit;
1614
1615 WARN_ON(!reg_elem);
1616
1617 /* Push a new entry into the registry */
1618 if (!restore &&
1619 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1620 (cmd == BNX2X_VLAN_MAC_MOVE)))
1621 list_add(&reg_elem->link, &cam_obj->head);
1622
1623 /* Configure a single command in a ramrod data buffer */
1624 o->set_one_rule(bp, o, elem, idx,
1625 reg_elem->cam_offset);
1626
1627 /* MOVE command consumes 2 entries in the ramrod data */
1628 if (cmd == BNX2X_VLAN_MAC_MOVE)
1629 idx += 2;
1630 else
1631 idx++;
1632 }
1633
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00001634 /*
1635 * No need for an explicit memory barrier here as long we would
1636 * need to ensure the ordering of writing to the SPQ element
1637 * and updating of the SPQ producer which involves a memory
1638 * read and we will have to put a full memory barrier there
1639 * (inside bnx2x_sp_post()).
1640 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001641
1642 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1643 U64_HI(r->rdata_mapping),
1644 U64_LO(r->rdata_mapping),
1645 ETH_CONNECTION_TYPE);
1646 if (rc)
1647 goto error_exit;
1648 }
1649
1650 /* Now, when we are done with the ramrod - clean up the registry */
1651 list_for_each_entry(elem, exe_chunk, link) {
1652 cmd = elem->cmd_data.vlan_mac.cmd;
1653 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1654 (cmd == BNX2X_VLAN_MAC_MOVE)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001655 reg_elem = o->check_del(bp, o,
1656 &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001657
1658 WARN_ON(!reg_elem);
1659
1660 o->put_cam_offset(o, reg_elem->cam_offset);
1661 list_del(&reg_elem->link);
1662 kfree(reg_elem);
1663 }
1664 }
1665
1666 if (!drv_only)
1667 return 1;
1668 else
1669 return 0;
1670
1671error_exit:
1672 r->clear_pending(r);
1673
1674 /* Cleanup a registry in case of a failure */
1675 list_for_each_entry(elem, exe_chunk, link) {
1676 cmd = elem->cmd_data.vlan_mac.cmd;
1677
1678 if (cmd == BNX2X_VLAN_MAC_MOVE)
1679 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1680 else
1681 cam_obj = o;
1682
1683 /* Delete all newly added above entries */
1684 if (!restore &&
1685 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1686 (cmd == BNX2X_VLAN_MAC_MOVE))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001687 reg_elem = o->check_del(bp, cam_obj,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001688 &elem->cmd_data.vlan_mac.u);
1689 if (reg_elem) {
1690 list_del(&reg_elem->link);
1691 kfree(reg_elem);
1692 }
1693 }
1694 }
1695
1696 return rc;
1697}
1698
1699static inline int bnx2x_vlan_mac_push_new_cmd(
1700 struct bnx2x *bp,
1701 struct bnx2x_vlan_mac_ramrod_params *p)
1702{
1703 struct bnx2x_exeq_elem *elem;
1704 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1705 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1706
1707 /* Allocate the execution queue element */
1708 elem = bnx2x_exe_queue_alloc_elem(bp);
1709 if (!elem)
1710 return -ENOMEM;
1711
1712 /* Set the command 'length' */
1713 switch (p->user_req.cmd) {
1714 case BNX2X_VLAN_MAC_MOVE:
1715 elem->cmd_len = 2;
1716 break;
1717 default:
1718 elem->cmd_len = 1;
1719 }
1720
1721 /* Fill the object specific info */
1722 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1723
1724 /* Try to add a new command to the pending list */
1725 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1726}
1727
1728/**
1729 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1730 *
1731 * @bp: device handle
1732 * @p:
1733 *
1734 */
1735int bnx2x_config_vlan_mac(
1736 struct bnx2x *bp,
1737 struct bnx2x_vlan_mac_ramrod_params *p)
1738{
1739 int rc = 0;
1740 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1741 unsigned long *ramrod_flags = &p->ramrod_flags;
1742 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1743 struct bnx2x_raw_obj *raw = &o->raw;
1744
1745 /*
1746 * Add new elements to the execution list for commands that require it.
1747 */
1748 if (!cont) {
1749 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1750 if (rc)
1751 return rc;
1752 }
1753
1754 /*
1755 * If nothing will be executed further in this iteration we want to
1756 * return PENDING if there are pending commands
1757 */
1758 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1759 rc = 1;
1760
Vladislav Zolotarov79616892011-07-21 07:58:54 +00001761 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001762 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
Vladislav Zolotarov79616892011-07-21 07:58:54 +00001763 raw->clear_pending(raw);
1764 }
1765
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001766 /* Execute commands if required */
1767 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1768 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1769 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1770 if (rc < 0)
1771 return rc;
1772 }
1773
1774 /*
1775 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1776 * then user want to wait until the last command is done.
1777 */
1778 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1779 /*
1780 * Wait maximum for the current exe_queue length iterations plus
1781 * one (for the current pending command).
1782 */
1783 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1784
1785 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1786 max_iterations--) {
1787
1788 /* Wait for the current command to complete */
1789 rc = raw->wait_comp(bp, raw);
1790 if (rc)
1791 return rc;
1792
1793 /* Make a next step */
1794 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1795 ramrod_flags);
1796 if (rc < 0)
1797 return rc;
1798 }
1799
1800 return 0;
1801 }
1802
1803 return rc;
1804}
1805
1806
1807
1808/**
1809 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1810 *
1811 * @bp: device handle
1812 * @o:
1813 * @vlan_mac_flags:
1814 * @ramrod_flags: execution flags to be used for this deletion
1815 *
1816 * if the last operation has completed successfully and there are no
1817 * moreelements left, positive value if the last operation has completed
1818 * successfully and there are more previously configured elements, negative
1819 * value is current operation has failed.
1820 */
1821static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1822 struct bnx2x_vlan_mac_obj *o,
1823 unsigned long *vlan_mac_flags,
1824 unsigned long *ramrod_flags)
1825{
1826 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1827 int rc = 0;
1828 struct bnx2x_vlan_mac_ramrod_params p;
1829 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1830 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1831
1832 /* Clear pending commands first */
1833
1834 spin_lock_bh(&exeq->lock);
1835
1836 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1837 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
Yuval Mintz460a25c2012-01-23 07:31:51 +00001838 *vlan_mac_flags) {
1839 rc = exeq->remove(bp, exeq->owner, exeq_pos);
1840 if (rc) {
1841 BNX2X_ERR("Failed to remove command\n");
Dan Carpentera44acd52012-01-24 21:59:31 +00001842 spin_unlock_bh(&exeq->lock);
Yuval Mintz460a25c2012-01-23 07:31:51 +00001843 return rc;
1844 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001845 list_del(&exeq_pos->link);
Yuval Mintz07ef7be2013-03-11 05:17:41 +00001846 bnx2x_exe_queue_free_elem(bp, exeq_pos);
Yuval Mintz460a25c2012-01-23 07:31:51 +00001847 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001848 }
1849
1850 spin_unlock_bh(&exeq->lock);
1851
1852 /* Prepare a command request */
1853 memset(&p, 0, sizeof(p));
1854 p.vlan_mac_obj = o;
1855 p.ramrod_flags = *ramrod_flags;
1856 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1857
1858 /*
1859 * Add all but the last VLAN-MAC to the execution queue without actually
1860 * execution anything.
1861 */
1862 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1863 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1864 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1865
1866 list_for_each_entry(pos, &o->head, link) {
1867 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1868 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1869 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1870 rc = bnx2x_config_vlan_mac(bp, &p);
1871 if (rc < 0) {
1872 BNX2X_ERR("Failed to add a new DEL command\n");
1873 return rc;
1874 }
1875 }
1876 }
1877
1878 p.ramrod_flags = *ramrod_flags;
1879 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1880
1881 return bnx2x_config_vlan_mac(bp, &p);
1882}
1883
1884static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1885 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1886 unsigned long *pstate, bnx2x_obj_type type)
1887{
1888 raw->func_id = func_id;
1889 raw->cid = cid;
1890 raw->cl_id = cl_id;
1891 raw->rdata = rdata;
1892 raw->rdata_mapping = rdata_mapping;
1893 raw->state = state;
1894 raw->pstate = pstate;
1895 raw->obj_type = type;
1896 raw->check_pending = bnx2x_raw_check_pending;
1897 raw->clear_pending = bnx2x_raw_clear_pending;
1898 raw->set_pending = bnx2x_raw_set_pending;
1899 raw->wait_comp = bnx2x_raw_wait;
1900}
1901
1902static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1903 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1904 int state, unsigned long *pstate, bnx2x_obj_type type,
1905 struct bnx2x_credit_pool_obj *macs_pool,
1906 struct bnx2x_credit_pool_obj *vlans_pool)
1907{
1908 INIT_LIST_HEAD(&o->head);
1909
1910 o->macs_pool = macs_pool;
1911 o->vlans_pool = vlans_pool;
1912
1913 o->delete_all = bnx2x_vlan_mac_del_all;
1914 o->restore = bnx2x_vlan_mac_restore;
1915 o->complete = bnx2x_complete_vlan_mac;
1916 o->wait = bnx2x_wait_vlan_mac;
1917
1918 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1919 state, pstate, type);
1920}
1921
1922
1923void bnx2x_init_mac_obj(struct bnx2x *bp,
1924 struct bnx2x_vlan_mac_obj *mac_obj,
1925 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1926 dma_addr_t rdata_mapping, int state,
1927 unsigned long *pstate, bnx2x_obj_type type,
1928 struct bnx2x_credit_pool_obj *macs_pool)
1929{
1930 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1931
1932 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1933 rdata_mapping, state, pstate, type,
1934 macs_pool, NULL);
1935
1936 /* CAM credit pool handling */
1937 mac_obj->get_credit = bnx2x_get_credit_mac;
1938 mac_obj->put_credit = bnx2x_put_credit_mac;
1939 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1940 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1941
1942 if (CHIP_IS_E1x(bp)) {
1943 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1944 mac_obj->check_del = bnx2x_check_mac_del;
1945 mac_obj->check_add = bnx2x_check_mac_add;
1946 mac_obj->check_move = bnx2x_check_move_always_err;
1947 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1948
1949 /* Exe Queue */
1950 bnx2x_exe_queue_init(bp,
1951 &mac_obj->exe_queue, 1, qable_obj,
1952 bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00001953 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001954 bnx2x_optimize_vlan_mac,
1955 bnx2x_execute_vlan_mac,
1956 bnx2x_exeq_get_mac);
1957 } else {
1958 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1959 mac_obj->check_del = bnx2x_check_mac_del;
1960 mac_obj->check_add = bnx2x_check_mac_add;
1961 mac_obj->check_move = bnx2x_check_move;
1962 mac_obj->ramrod_cmd =
1963 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
Ariel Eliored5162a2011-12-05 21:52:24 +00001964 mac_obj->get_n_elements = bnx2x_get_n_elements;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001965
1966 /* Exe Queue */
1967 bnx2x_exe_queue_init(bp,
1968 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1969 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00001970 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001971 bnx2x_optimize_vlan_mac,
1972 bnx2x_execute_vlan_mac,
1973 bnx2x_exeq_get_mac);
1974 }
1975}
1976
1977void bnx2x_init_vlan_obj(struct bnx2x *bp,
1978 struct bnx2x_vlan_mac_obj *vlan_obj,
1979 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1980 dma_addr_t rdata_mapping, int state,
1981 unsigned long *pstate, bnx2x_obj_type type,
1982 struct bnx2x_credit_pool_obj *vlans_pool)
1983{
1984 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1985
1986 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1987 rdata_mapping, state, pstate, type, NULL,
1988 vlans_pool);
1989
1990 vlan_obj->get_credit = bnx2x_get_credit_vlan;
1991 vlan_obj->put_credit = bnx2x_put_credit_vlan;
1992 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1993 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1994
1995 if (CHIP_IS_E1x(bp)) {
1996 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1997 BUG();
1998 } else {
1999 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
2000 vlan_obj->check_del = bnx2x_check_vlan_del;
2001 vlan_obj->check_add = bnx2x_check_vlan_add;
2002 vlan_obj->check_move = bnx2x_check_move;
2003 vlan_obj->ramrod_cmd =
2004 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
Ariel Elior3ec9f9c2013-03-11 05:17:45 +00002005 vlan_obj->get_n_elements = bnx2x_get_n_elements;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002006
2007 /* Exe Queue */
2008 bnx2x_exe_queue_init(bp,
2009 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2010 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002011 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002012 bnx2x_optimize_vlan_mac,
2013 bnx2x_execute_vlan_mac,
2014 bnx2x_exeq_get_vlan);
2015 }
2016}
2017
2018void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2019 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2020 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2021 dma_addr_t rdata_mapping, int state,
2022 unsigned long *pstate, bnx2x_obj_type type,
2023 struct bnx2x_credit_pool_obj *macs_pool,
2024 struct bnx2x_credit_pool_obj *vlans_pool)
2025{
2026 union bnx2x_qable_obj *qable_obj =
2027 (union bnx2x_qable_obj *)vlan_mac_obj;
2028
2029 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2030 rdata_mapping, state, pstate, type,
2031 macs_pool, vlans_pool);
2032
2033 /* CAM pool handling */
2034 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2035 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2036 /*
2037 * CAM offset is relevant for 57710 and 57711 chips only which have a
2038 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2039 * will be taken from MACs' pool object only.
2040 */
2041 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2042 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2043
2044 if (CHIP_IS_E1(bp)) {
2045 BNX2X_ERR("Do not support chips others than E2\n");
2046 BUG();
2047 } else if (CHIP_IS_E1H(bp)) {
2048 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2049 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2050 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2051 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2052 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2053
2054 /* Exe Queue */
2055 bnx2x_exe_queue_init(bp,
2056 &vlan_mac_obj->exe_queue, 1, qable_obj,
2057 bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002058 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002059 bnx2x_optimize_vlan_mac,
2060 bnx2x_execute_vlan_mac,
2061 bnx2x_exeq_get_vlan_mac);
2062 } else {
2063 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2064 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2065 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2066 vlan_mac_obj->check_move = bnx2x_check_move;
2067 vlan_mac_obj->ramrod_cmd =
2068 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2069
2070 /* Exe Queue */
2071 bnx2x_exe_queue_init(bp,
2072 &vlan_mac_obj->exe_queue,
2073 CLASSIFY_RULES_COUNT,
2074 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002075 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002076 bnx2x_optimize_vlan_mac,
2077 bnx2x_execute_vlan_mac,
2078 bnx2x_exeq_get_vlan_mac);
2079 }
2080
2081}
2082
2083/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2084static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2085 struct tstorm_eth_mac_filter_config *mac_filters,
2086 u16 pf_id)
2087{
2088 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2089
2090 u32 addr = BAR_TSTRORM_INTMEM +
2091 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2092
2093 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2094}
2095
2096static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2097 struct bnx2x_rx_mode_ramrod_params *p)
2098{
Yuval Mintz2de67432013-01-23 03:21:43 +00002099 /* update the bp MAC filter structure */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002100 u32 mask = (1 << p->cl_id);
2101
2102 struct tstorm_eth_mac_filter_config *mac_filters =
2103 (struct tstorm_eth_mac_filter_config *)p->rdata;
2104
2105 /* initial seeting is drop-all */
2106 u8 drop_all_ucast = 1, drop_all_mcast = 1;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002107 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2108 u8 unmatched_unicast = 0;
2109
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002110 /* In e1x there we only take into account rx acceot flag since tx switching
2111 * isn't enabled. */
2112 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002113 /* accept matched ucast */
2114 drop_all_ucast = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002115
2116 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002117 /* accept matched mcast */
2118 drop_all_mcast = 0;
2119
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002120 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002121 /* accept all mcast */
2122 drop_all_ucast = 0;
2123 accp_all_ucast = 1;
2124 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002125 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002126 /* accept all mcast */
2127 drop_all_mcast = 0;
2128 accp_all_mcast = 1;
2129 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002130 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002131 /* accept (all) bcast */
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002132 accp_all_bcast = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002133 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2134 /* accept unmatched unicasts */
2135 unmatched_unicast = 1;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002136
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002137 mac_filters->ucast_drop_all = drop_all_ucast ?
2138 mac_filters->ucast_drop_all | mask :
2139 mac_filters->ucast_drop_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002140
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002141 mac_filters->mcast_drop_all = drop_all_mcast ?
2142 mac_filters->mcast_drop_all | mask :
2143 mac_filters->mcast_drop_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002144
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002145 mac_filters->ucast_accept_all = accp_all_ucast ?
2146 mac_filters->ucast_accept_all | mask :
2147 mac_filters->ucast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002148
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002149 mac_filters->mcast_accept_all = accp_all_mcast ?
2150 mac_filters->mcast_accept_all | mask :
2151 mac_filters->mcast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002152
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002153 mac_filters->bcast_accept_all = accp_all_bcast ?
2154 mac_filters->bcast_accept_all | mask :
2155 mac_filters->bcast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002156
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002157 mac_filters->unmatched_unicast = unmatched_unicast ?
2158 mac_filters->unmatched_unicast | mask :
2159 mac_filters->unmatched_unicast & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002160
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002161 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
Yuval Mintz2de67432013-01-23 03:21:43 +00002162 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00002163 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2164 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2165 mac_filters->bcast_accept_all);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002166
2167 /* write the MAC filter structure*/
2168 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2169
2170 /* The operation is completed */
2171 clear_bit(p->state, p->pstate);
2172 smp_mb__after_clear_bit();
2173
2174 return 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002175}
2176
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002177/* Setup ramrod data */
2178static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2179 struct eth_classify_header *hdr,
2180 u8 rule_cnt)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002181{
Yuval Mintz86564c32013-01-23 03:21:50 +00002182 hdr->echo = cpu_to_le32(cid);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002183 hdr->rule_cnt = rule_cnt;
2184}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002185
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002186static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
Yuval Mintz924d75a2013-01-23 03:21:44 +00002187 unsigned long *accept_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002188 struct eth_filter_rules_cmd *cmd,
2189 bool clear_accept_all)
2190{
2191 u16 state;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002192
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002193 /* start with 'drop-all' */
2194 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2195 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2196
Yuval Mintz924d75a2013-01-23 03:21:44 +00002197 if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2198 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002199
Yuval Mintz924d75a2013-01-23 03:21:44 +00002200 if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2201 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002202
Yuval Mintz924d75a2013-01-23 03:21:44 +00002203 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2204 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2205 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002206 }
2207
Yuval Mintz924d75a2013-01-23 03:21:44 +00002208 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2209 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2210 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2211 }
2212
2213 if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2214 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2215
2216 if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2217 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2218 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2219 }
2220
2221 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2222 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2223
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002224 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2225 if (clear_accept_all) {
2226 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2227 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2228 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2229 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2230 }
2231
2232 cmd->state = cpu_to_le16(state);
2233
2234}
2235
2236static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2237 struct bnx2x_rx_mode_ramrod_params *p)
2238{
2239 struct eth_filter_rules_ramrod_data *data = p->rdata;
2240 int rc;
2241 u8 rule_idx = 0;
2242
2243 /* Reset the ramrod data buffer */
2244 memset(data, 0, sizeof(*data));
2245
2246 /* Setup ramrod data */
2247
2248 /* Tx (internal switching) */
2249 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2250 data->rules[rule_idx].client_id = p->cl_id;
2251 data->rules[rule_idx].func_id = p->func_id;
2252
2253 data->rules[rule_idx].cmd_general_data =
2254 ETH_FILTER_RULES_CMD_TX_CMD;
2255
Yuval Mintz924d75a2013-01-23 03:21:44 +00002256 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2257 &(data->rules[rule_idx++]),
2258 false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002259 }
2260
2261 /* Rx */
2262 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2263 data->rules[rule_idx].client_id = p->cl_id;
2264 data->rules[rule_idx].func_id = p->func_id;
2265
2266 data->rules[rule_idx].cmd_general_data =
2267 ETH_FILTER_RULES_CMD_RX_CMD;
2268
Yuval Mintz924d75a2013-01-23 03:21:44 +00002269 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2270 &(data->rules[rule_idx++]),
2271 false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002272 }
2273
2274
2275 /*
2276 * If FCoE Queue configuration has been requested configure the Rx and
2277 * internal switching modes for this queue in separate rules.
2278 *
2279 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2280 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2281 */
2282 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2283 /* Tx (internal switching) */
2284 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2285 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2286 data->rules[rule_idx].func_id = p->func_id;
2287
2288 data->rules[rule_idx].cmd_general_data =
2289 ETH_FILTER_RULES_CMD_TX_CMD;
2290
Yuval Mintz924d75a2013-01-23 03:21:44 +00002291 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2292 &(data->rules[rule_idx]),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002293 true);
Yuval Mintz924d75a2013-01-23 03:21:44 +00002294 rule_idx++;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002295 }
2296
2297 /* Rx */
2298 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2299 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2300 data->rules[rule_idx].func_id = p->func_id;
2301
2302 data->rules[rule_idx].cmd_general_data =
2303 ETH_FILTER_RULES_CMD_RX_CMD;
2304
Yuval Mintz924d75a2013-01-23 03:21:44 +00002305 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2306 &(data->rules[rule_idx]),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002307 true);
Yuval Mintz924d75a2013-01-23 03:21:44 +00002308 rule_idx++;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002309 }
2310 }
2311
2312 /*
2313 * Set the ramrod header (most importantly - number of rules to
2314 * configure).
2315 */
2316 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2317
Merav Sicron51c1a582012-03-18 10:33:38 +00002318 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002319 data->header.rule_cnt, p->rx_accept_flags,
2320 p->tx_accept_flags);
2321
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00002322 /*
2323 * No need for an explicit memory barrier here as long we would
2324 * need to ensure the ordering of writing to the SPQ element
2325 * and updating of the SPQ producer which involves a memory
2326 * read and we will have to put a full memory barrier there
2327 * (inside bnx2x_sp_post()).
2328 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002329
2330 /* Send a ramrod */
2331 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2332 U64_HI(p->rdata_mapping),
2333 U64_LO(p->rdata_mapping),
2334 ETH_CONNECTION_TYPE);
2335 if (rc)
2336 return rc;
2337
2338 /* Ramrod completion is pending */
2339 return 1;
2340}
2341
2342static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2343 struct bnx2x_rx_mode_ramrod_params *p)
2344{
2345 return bnx2x_state_wait(bp, p->state, p->pstate);
2346}
2347
2348static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2349 struct bnx2x_rx_mode_ramrod_params *p)
2350{
2351 /* Do nothing */
2352 return 0;
2353}
2354
2355int bnx2x_config_rx_mode(struct bnx2x *bp,
2356 struct bnx2x_rx_mode_ramrod_params *p)
2357{
2358 int rc;
2359
2360 /* Configure the new classification in the chip */
2361 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2362 if (rc < 0)
2363 return rc;
2364
2365 /* Wait for a ramrod completion if was requested */
2366 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2367 rc = p->rx_mode_obj->wait_comp(bp, p);
2368 if (rc)
2369 return rc;
2370 }
2371
2372 return rc;
2373}
2374
2375void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2376 struct bnx2x_rx_mode_obj *o)
2377{
2378 if (CHIP_IS_E1x(bp)) {
2379 o->wait_comp = bnx2x_empty_rx_mode_wait;
2380 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2381 } else {
2382 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2383 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2384 }
2385}
2386
2387/********************* Multicast verbs: SET, CLEAR ****************************/
2388static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2389{
2390 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2391}
2392
2393struct bnx2x_mcast_mac_elem {
2394 struct list_head link;
2395 u8 mac[ETH_ALEN];
2396 u8 pad[2]; /* For a natural alignment of the following buffer */
2397};
2398
2399struct bnx2x_pending_mcast_cmd {
2400 struct list_head link;
2401 int type; /* BNX2X_MCAST_CMD_X */
2402 union {
2403 struct list_head macs_head;
2404 u32 macs_num; /* Needed for DEL command */
2405 int next_bin; /* Needed for RESTORE flow with aprox match */
2406 } data;
2407
2408 bool done; /* set to true, when the command has been handled,
2409 * practically used in 57712 handling only, where one pending
2410 * command may be handled in a few operations. As long as for
2411 * other chips every operation handling is completed in a
2412 * single ramrod, there is no need to utilize this field.
2413 */
2414};
2415
2416static int bnx2x_mcast_wait(struct bnx2x *bp,
2417 struct bnx2x_mcast_obj *o)
2418{
2419 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2420 o->raw.wait_comp(bp, &o->raw))
2421 return -EBUSY;
2422
2423 return 0;
2424}
2425
2426static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2427 struct bnx2x_mcast_obj *o,
2428 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00002429 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002430{
2431 int total_sz;
2432 struct bnx2x_pending_mcast_cmd *new_cmd;
2433 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2434 struct bnx2x_mcast_list_elem *pos;
2435 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2436 p->mcast_list_len : 0);
2437
2438 /* If the command is empty ("handle pending commands only"), break */
2439 if (!p->mcast_list_len)
2440 return 0;
2441
2442 total_sz = sizeof(*new_cmd) +
2443 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2444
2445 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2446 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2447
2448 if (!new_cmd)
2449 return -ENOMEM;
2450
Merav Sicron51c1a582012-03-18 10:33:38 +00002451 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2452 cmd, macs_list_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002453
2454 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2455
2456 new_cmd->type = cmd;
2457 new_cmd->done = false;
2458
2459 switch (cmd) {
2460 case BNX2X_MCAST_CMD_ADD:
2461 cur_mac = (struct bnx2x_mcast_mac_elem *)
2462 ((u8 *)new_cmd + sizeof(*new_cmd));
2463
2464 /* Push the MACs of the current command into the pendig command
2465 * MACs list: FIFO
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002466 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002467 list_for_each_entry(pos, &p->mcast_list, link) {
2468 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2469 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2470 cur_mac++;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002471 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002472
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002473 break;
2474
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002475 case BNX2X_MCAST_CMD_DEL:
2476 new_cmd->data.macs_num = p->mcast_list_len;
2477 break;
2478
2479 case BNX2X_MCAST_CMD_RESTORE:
2480 new_cmd->data.next_bin = 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002481 break;
2482
2483 default:
Jesper Juhl8b6d5c02012-07-31 11:39:37 +00002484 kfree(new_cmd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002485 BNX2X_ERR("Unknown command: %d\n", cmd);
2486 return -EINVAL;
2487 }
2488
2489 /* Push the new pending command to the tail of the pending list: FIFO */
2490 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2491
2492 o->set_sched(o);
2493
2494 return 1;
2495}
2496
2497/**
2498 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2499 *
2500 * @o:
2501 * @last: index to start looking from (including)
2502 *
2503 * returns the next found (set) bin or a negative value if none is found.
2504 */
2505static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2506{
2507 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2508
2509 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2510 if (o->registry.aprox_match.vec[i])
2511 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2512 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2513 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2514 vec, cur_bit)) {
2515 return cur_bit;
2516 }
2517 }
2518 inner_start = 0;
2519 }
2520
2521 /* None found */
2522 return -1;
2523}
2524
2525/**
2526 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2527 *
2528 * @o:
2529 *
2530 * returns the index of the found bin or -1 if none is found
2531 */
2532static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2533{
2534 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2535
2536 if (cur_bit >= 0)
2537 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2538
2539 return cur_bit;
2540}
2541
2542static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2543{
2544 struct bnx2x_raw_obj *raw = &o->raw;
2545 u8 rx_tx_flag = 0;
2546
2547 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2548 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2549 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2550
2551 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2552 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2553 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2554
2555 return rx_tx_flag;
2556}
2557
2558static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2559 struct bnx2x_mcast_obj *o, int idx,
2560 union bnx2x_mcast_config_data *cfg_data,
Yuval Mintz86564c32013-01-23 03:21:50 +00002561 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002562{
2563 struct bnx2x_raw_obj *r = &o->raw;
2564 struct eth_multicast_rules_ramrod_data *data =
2565 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2566 u8 func_id = r->func_id;
2567 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2568 int bin;
2569
2570 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2571 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2572
2573 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2574
2575 /* Get a bin and update a bins' vector */
2576 switch (cmd) {
2577 case BNX2X_MCAST_CMD_ADD:
2578 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2579 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002580 break;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002581
2582 case BNX2X_MCAST_CMD_DEL:
2583 /* If there were no more bins to clear
2584 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2585 * clear any (0xff) bin.
2586 * See bnx2x_mcast_validate_e2() for explanation when it may
2587 * happen.
2588 */
2589 bin = bnx2x_mcast_clear_first_bin(o);
2590 break;
2591
2592 case BNX2X_MCAST_CMD_RESTORE:
2593 bin = cfg_data->bin;
2594 break;
2595
2596 default:
2597 BNX2X_ERR("Unknown command: %d\n", cmd);
2598 return;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002599 }
2600
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002601 DP(BNX2X_MSG_SP, "%s bin %d\n",
2602 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2603 "Setting" : "Clearing"), bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002604
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002605 data->rules[idx].bin_id = (u8)bin;
2606 data->rules[idx].func_id = func_id;
2607 data->rules[idx].engine_id = o->engine_id;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002608}
2609
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002610/**
2611 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2612 *
2613 * @bp: device handle
2614 * @o:
2615 * @start_bin: index in the registry to start from (including)
2616 * @rdata_idx: index in the ramrod data to start from
2617 *
2618 * returns last handled bin index or -1 if all bins have been handled
2619 */
2620static inline int bnx2x_mcast_handle_restore_cmd_e2(
2621 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2622 int *rdata_idx)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002623{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002624 int cur_bin, cnt = *rdata_idx;
Yuval Mintz86564c32013-01-23 03:21:50 +00002625 union bnx2x_mcast_config_data cfg_data = {NULL};
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002626
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002627 /* go through the registry and configure the bins from it */
2628 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2629 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002630
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002631 cfg_data.bin = (u8)cur_bin;
2632 o->set_one_rule(bp, o, cnt, &cfg_data,
2633 BNX2X_MCAST_CMD_RESTORE);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002634
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002635 cnt++;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002636
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002637 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002638
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002639 /* Break if we reached the maximum number
2640 * of rules.
2641 */
2642 if (cnt >= o->max_cmd_len)
2643 break;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002644 }
2645
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002646 *rdata_idx = cnt;
2647
2648 return cur_bin;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002649}
2650
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002651static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2652 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2653 int *line_idx)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002654{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002655 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2656 int cnt = *line_idx;
Yuval Mintz86564c32013-01-23 03:21:50 +00002657 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002658
2659 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2660 link) {
2661
2662 cfg_data.mac = &pmac_pos->mac[0];
2663 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2664
2665 cnt++;
2666
Joe Perches0f9dad12011-08-14 12:16:19 +00002667 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00002668 pmac_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002669
2670 list_del(&pmac_pos->link);
2671
2672 /* Break if we reached the maximum number
2673 * of rules.
2674 */
2675 if (cnt >= o->max_cmd_len)
2676 break;
2677 }
2678
2679 *line_idx = cnt;
2680
2681 /* if no more MACs to configure - we are done */
2682 if (list_empty(&cmd_pos->data.macs_head))
2683 cmd_pos->done = true;
2684}
2685
2686static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2687 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2688 int *line_idx)
2689{
2690 int cnt = *line_idx;
2691
2692 while (cmd_pos->data.macs_num) {
2693 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2694
2695 cnt++;
2696
2697 cmd_pos->data.macs_num--;
2698
2699 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2700 cmd_pos->data.macs_num, cnt);
2701
2702 /* Break if we reached the maximum
2703 * number of rules.
2704 */
2705 if (cnt >= o->max_cmd_len)
2706 break;
2707 }
2708
2709 *line_idx = cnt;
2710
2711 /* If we cleared all bins - we are done */
2712 if (!cmd_pos->data.macs_num)
2713 cmd_pos->done = true;
2714}
2715
2716static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2717 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2718 int *line_idx)
2719{
2720 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2721 line_idx);
2722
2723 if (cmd_pos->data.next_bin < 0)
2724 /* If o->set_restore returned -1 we are done */
2725 cmd_pos->done = true;
2726 else
2727 /* Start from the next bin next time */
2728 cmd_pos->data.next_bin++;
2729}
2730
2731static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2732 struct bnx2x_mcast_ramrod_params *p)
2733{
2734 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2735 int cnt = 0;
2736 struct bnx2x_mcast_obj *o = p->mcast_obj;
2737
2738 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2739 link) {
2740 switch (cmd_pos->type) {
2741 case BNX2X_MCAST_CMD_ADD:
2742 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2743 break;
2744
2745 case BNX2X_MCAST_CMD_DEL:
2746 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2747 break;
2748
2749 case BNX2X_MCAST_CMD_RESTORE:
2750 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2751 &cnt);
2752 break;
2753
2754 default:
2755 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2756 return -EINVAL;
2757 }
2758
2759 /* If the command has been completed - remove it from the list
2760 * and free the memory
2761 */
2762 if (cmd_pos->done) {
2763 list_del(&cmd_pos->link);
2764 kfree(cmd_pos);
2765 }
2766
2767 /* Break if we reached the maximum number of rules */
2768 if (cnt >= o->max_cmd_len)
2769 break;
2770 }
2771
2772 return cnt;
2773}
2774
2775static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2776 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2777 int *line_idx)
2778{
2779 struct bnx2x_mcast_list_elem *mlist_pos;
Yuval Mintz86564c32013-01-23 03:21:50 +00002780 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002781 int cnt = *line_idx;
2782
2783 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2784 cfg_data.mac = mlist_pos->mac;
2785 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2786
2787 cnt++;
2788
Joe Perches0f9dad12011-08-14 12:16:19 +00002789 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00002790 mlist_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002791 }
2792
2793 *line_idx = cnt;
2794}
2795
2796static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2797 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2798 int *line_idx)
2799{
2800 int cnt = *line_idx, i;
2801
2802 for (i = 0; i < p->mcast_list_len; i++) {
2803 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2804
2805 cnt++;
2806
2807 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2808 p->mcast_list_len - i - 1);
2809 }
2810
2811 *line_idx = cnt;
2812}
2813
2814/**
2815 * bnx2x_mcast_handle_current_cmd -
2816 *
2817 * @bp: device handle
2818 * @p:
2819 * @cmd:
2820 * @start_cnt: first line in the ramrod data that may be used
2821 *
2822 * This function is called iff there is enough place for the current command in
2823 * the ramrod data.
2824 * Returns number of lines filled in the ramrod data in total.
2825 */
2826static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
Yuval Mintz86564c32013-01-23 03:21:50 +00002827 struct bnx2x_mcast_ramrod_params *p,
2828 enum bnx2x_mcast_cmd cmd,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002829 int start_cnt)
2830{
2831 struct bnx2x_mcast_obj *o = p->mcast_obj;
2832 int cnt = start_cnt;
2833
2834 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2835
2836 switch (cmd) {
2837 case BNX2X_MCAST_CMD_ADD:
2838 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2839 break;
2840
2841 case BNX2X_MCAST_CMD_DEL:
2842 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2843 break;
2844
2845 case BNX2X_MCAST_CMD_RESTORE:
2846 o->hdl_restore(bp, o, 0, &cnt);
2847 break;
2848
2849 default:
2850 BNX2X_ERR("Unknown command: %d\n", cmd);
2851 return -EINVAL;
2852 }
2853
2854 /* The current command has been handled */
2855 p->mcast_list_len = 0;
2856
2857 return cnt;
2858}
2859
2860static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2861 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00002862 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002863{
2864 struct bnx2x_mcast_obj *o = p->mcast_obj;
2865 int reg_sz = o->get_registry_size(o);
2866
2867 switch (cmd) {
2868 /* DEL command deletes all currently configured MACs */
2869 case BNX2X_MCAST_CMD_DEL:
2870 o->set_registry_size(o, 0);
2871 /* Don't break */
2872
2873 /* RESTORE command will restore the entire multicast configuration */
2874 case BNX2X_MCAST_CMD_RESTORE:
2875 /* Here we set the approximate amount of work to do, which in
2876 * fact may be only less as some MACs in postponed ADD
2877 * command(s) scheduled before this command may fall into
2878 * the same bin and the actual number of bins set in the
2879 * registry would be less than we estimated here. See
2880 * bnx2x_mcast_set_one_rule_e2() for further details.
2881 */
2882 p->mcast_list_len = reg_sz;
2883 break;
2884
2885 case BNX2X_MCAST_CMD_ADD:
2886 case BNX2X_MCAST_CMD_CONT:
2887 /* Here we assume that all new MACs will fall into new bins.
2888 * However we will correct the real registry size after we
2889 * handle all pending commands.
2890 */
2891 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2892 break;
2893
2894 default:
2895 BNX2X_ERR("Unknown command: %d\n", cmd);
2896 return -EINVAL;
2897
2898 }
2899
2900 /* Increase the total number of MACs pending to be configured */
2901 o->total_pending_num += p->mcast_list_len;
2902
2903 return 0;
2904}
2905
2906static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2907 struct bnx2x_mcast_ramrod_params *p,
2908 int old_num_bins)
2909{
2910 struct bnx2x_mcast_obj *o = p->mcast_obj;
2911
2912 o->set_registry_size(o, old_num_bins);
2913 o->total_pending_num -= p->mcast_list_len;
2914}
2915
2916/**
2917 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2918 *
2919 * @bp: device handle
2920 * @p:
2921 * @len: number of rules to handle
2922 */
2923static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2924 struct bnx2x_mcast_ramrod_params *p,
2925 u8 len)
2926{
2927 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2928 struct eth_multicast_rules_ramrod_data *data =
2929 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2930
Yuval Mintz86564c32013-01-23 03:21:50 +00002931 data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
2932 (BNX2X_FILTER_MCAST_PENDING <<
2933 BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002934 data->header.rule_cnt = len;
2935}
2936
2937/**
2938 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2939 *
2940 * @bp: device handle
2941 * @o:
2942 *
2943 * Recalculate the actual number of set bins in the registry using Brian
2944 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2945 *
2946 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2947 */
2948static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2949 struct bnx2x_mcast_obj *o)
2950{
2951 int i, cnt = 0;
2952 u64 elem;
2953
2954 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2955 elem = o->registry.aprox_match.vec[i];
2956 for (; elem; cnt++)
2957 elem &= elem - 1;
2958 }
2959
2960 o->set_registry_size(o, cnt);
2961
2962 return 0;
2963}
2964
2965static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2966 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00002967 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002968{
2969 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2970 struct bnx2x_mcast_obj *o = p->mcast_obj;
2971 struct eth_multicast_rules_ramrod_data *data =
2972 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2973 int cnt = 0, rc;
2974
2975 /* Reset the ramrod data buffer */
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002976 memset(data, 0, sizeof(*data));
2977
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002978 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2979
2980 /* If there are no more pending commands - clear SCHEDULED state */
2981 if (list_empty(&o->pending_cmds_head))
2982 o->clear_sched(o);
2983
2984 /* The below may be true iff there was enough room in ramrod
2985 * data for all pending commands and for the current
2986 * command. Otherwise the current command would have been added
2987 * to the pending commands and p->mcast_list_len would have been
2988 * zeroed.
2989 */
2990 if (p->mcast_list_len > 0)
2991 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2992
2993 /* We've pulled out some MACs - update the total number of
2994 * outstanding.
2995 */
2996 o->total_pending_num -= cnt;
2997
2998 /* send a ramrod */
2999 WARN_ON(o->total_pending_num < 0);
3000 WARN_ON(cnt > o->max_cmd_len);
3001
3002 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3003
3004 /* Update a registry size if there are no more pending operations.
3005 *
3006 * We don't want to change the value of the registry size if there are
3007 * pending operations because we want it to always be equal to the
3008 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3009 * set bins after the last requested operation in order to properly
3010 * evaluate the size of the next DEL/RESTORE operation.
3011 *
3012 * Note that we update the registry itself during command(s) handling
3013 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3014 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3015 * with a limited amount of update commands (per MAC/bin) and we don't
3016 * know in this scope what the actual state of bins configuration is
3017 * going to be after this ramrod.
3018 */
3019 if (!o->total_pending_num)
3020 bnx2x_mcast_refresh_registry_e2(bp, o);
3021
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003022 /*
3023 * If CLEAR_ONLY was requested - don't send a ramrod and clear
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003024 * RAMROD_PENDING status immediately.
3025 */
3026 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3027 raw->clear_pending(raw);
3028 return 0;
3029 } else {
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003030 /*
3031 * No need for an explicit memory barrier here as long we would
3032 * need to ensure the ordering of writing to the SPQ element
3033 * and updating of the SPQ producer which involves a memory
3034 * read and we will have to put a full memory barrier there
3035 * (inside bnx2x_sp_post()).
3036 */
3037
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003038 /* Send a ramrod */
3039 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3040 raw->cid, U64_HI(raw->rdata_mapping),
3041 U64_LO(raw->rdata_mapping),
3042 ETH_CONNECTION_TYPE);
3043 if (rc)
3044 return rc;
3045
3046 /* Ramrod completion is pending */
3047 return 1;
3048 }
3049}
3050
3051static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3052 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003053 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003054{
3055 /* Mark, that there is a work to do */
3056 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3057 p->mcast_list_len = 1;
3058
3059 return 0;
3060}
3061
3062static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3063 struct bnx2x_mcast_ramrod_params *p,
3064 int old_num_bins)
3065{
3066 /* Do nothing */
3067}
3068
3069#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3070do { \
3071 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3072} while (0)
3073
3074static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3075 struct bnx2x_mcast_obj *o,
3076 struct bnx2x_mcast_ramrod_params *p,
3077 u32 *mc_filter)
3078{
3079 struct bnx2x_mcast_list_elem *mlist_pos;
3080 int bit;
3081
3082 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3083 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3084 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3085
Joe Perches0f9dad12011-08-14 12:16:19 +00003086 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003087 mlist_pos->mac, bit);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003088
3089 /* bookkeeping... */
3090 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3091 bit);
3092 }
3093}
3094
3095static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3096 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3097 u32 *mc_filter)
3098{
3099 int bit;
3100
3101 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3102 bit >= 0;
3103 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3104 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3105 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3106 }
3107}
3108
3109/* On 57711 we write the multicast MACs' aproximate match
3110 * table by directly into the TSTORM's internal RAM. So we don't
3111 * really need to handle any tricks to make it work.
3112 */
3113static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3114 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003115 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003116{
3117 int i;
3118 struct bnx2x_mcast_obj *o = p->mcast_obj;
3119 struct bnx2x_raw_obj *r = &o->raw;
3120
3121 /* If CLEAR_ONLY has been requested - clear the registry
3122 * and clear a pending bit.
3123 */
3124 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3125 u32 mc_filter[MC_HASH_SIZE] = {0};
3126
3127 /* Set the multicast filter bits before writing it into
3128 * the internal memory.
3129 */
3130 switch (cmd) {
3131 case BNX2X_MCAST_CMD_ADD:
3132 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3133 break;
3134
3135 case BNX2X_MCAST_CMD_DEL:
Joe Perches94f05b02011-08-14 12:16:20 +00003136 DP(BNX2X_MSG_SP,
3137 "Invalidating multicast MACs configuration\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003138
3139 /* clear the registry */
3140 memset(o->registry.aprox_match.vec, 0,
3141 sizeof(o->registry.aprox_match.vec));
3142 break;
3143
3144 case BNX2X_MCAST_CMD_RESTORE:
3145 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3146 break;
3147
3148 default:
3149 BNX2X_ERR("Unknown command: %d\n", cmd);
3150 return -EINVAL;
3151 }
3152
3153 /* Set the mcast filter in the internal memory */
3154 for (i = 0; i < MC_HASH_SIZE; i++)
3155 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3156 } else
3157 /* clear the registry */
3158 memset(o->registry.aprox_match.vec, 0,
3159 sizeof(o->registry.aprox_match.vec));
3160
3161 /* We are done */
3162 r->clear_pending(r);
3163
3164 return 0;
3165}
3166
3167static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3168 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003169 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003170{
3171 struct bnx2x_mcast_obj *o = p->mcast_obj;
3172 int reg_sz = o->get_registry_size(o);
3173
3174 switch (cmd) {
3175 /* DEL command deletes all currently configured MACs */
3176 case BNX2X_MCAST_CMD_DEL:
3177 o->set_registry_size(o, 0);
3178 /* Don't break */
3179
3180 /* RESTORE command will restore the entire multicast configuration */
3181 case BNX2X_MCAST_CMD_RESTORE:
3182 p->mcast_list_len = reg_sz;
3183 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3184 cmd, p->mcast_list_len);
3185 break;
3186
3187 case BNX2X_MCAST_CMD_ADD:
3188 case BNX2X_MCAST_CMD_CONT:
3189 /* Multicast MACs on 57710 are configured as unicast MACs and
3190 * there is only a limited number of CAM entries for that
3191 * matter.
3192 */
3193 if (p->mcast_list_len > o->max_cmd_len) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003194 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3195 o->max_cmd_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003196 return -EINVAL;
3197 }
3198 /* Every configured MAC should be cleared if DEL command is
3199 * called. Only the last ADD command is relevant as long as
3200 * every ADD commands overrides the previous configuration.
3201 */
3202 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3203 if (p->mcast_list_len > 0)
3204 o->set_registry_size(o, p->mcast_list_len);
3205
3206 break;
3207
3208 default:
3209 BNX2X_ERR("Unknown command: %d\n", cmd);
3210 return -EINVAL;
3211
3212 }
3213
3214 /* We want to ensure that commands are executed one by one for 57710.
3215 * Therefore each none-empty command will consume o->max_cmd_len.
3216 */
3217 if (p->mcast_list_len)
3218 o->total_pending_num += o->max_cmd_len;
3219
3220 return 0;
3221}
3222
3223static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3224 struct bnx2x_mcast_ramrod_params *p,
3225 int old_num_macs)
3226{
3227 struct bnx2x_mcast_obj *o = p->mcast_obj;
3228
3229 o->set_registry_size(o, old_num_macs);
3230
3231 /* If current command hasn't been handled yet and we are
3232 * here means that it's meant to be dropped and we have to
3233 * update the number of outstandling MACs accordingly.
3234 */
3235 if (p->mcast_list_len)
3236 o->total_pending_num -= o->max_cmd_len;
3237}
3238
3239static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3240 struct bnx2x_mcast_obj *o, int idx,
3241 union bnx2x_mcast_config_data *cfg_data,
Yuval Mintz86564c32013-01-23 03:21:50 +00003242 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003243{
3244 struct bnx2x_raw_obj *r = &o->raw;
3245 struct mac_configuration_cmd *data =
3246 (struct mac_configuration_cmd *)(r->rdata);
3247
3248 /* copy mac */
3249 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3250 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3251 &data->config_table[idx].middle_mac_addr,
3252 &data->config_table[idx].lsb_mac_addr,
3253 cfg_data->mac);
3254
3255 data->config_table[idx].vlan_id = 0;
3256 data->config_table[idx].pf_id = r->func_id;
3257 data->config_table[idx].clients_bit_vector =
3258 cpu_to_le32(1 << r->cl_id);
3259
3260 SET_FLAG(data->config_table[idx].flags,
3261 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3262 T_ETH_MAC_COMMAND_SET);
3263 }
3264}
3265
3266/**
3267 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3268 *
3269 * @bp: device handle
3270 * @p:
3271 * @len: number of rules to handle
3272 */
3273static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3274 struct bnx2x_mcast_ramrod_params *p,
3275 u8 len)
3276{
3277 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3278 struct mac_configuration_cmd *data =
3279 (struct mac_configuration_cmd *)(r->rdata);
3280
3281 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3282 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3283 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3284
3285 data->hdr.offset = offset;
Yuval Mintz86564c32013-01-23 03:21:50 +00003286 data->hdr.client_id = cpu_to_le16(0xff);
3287 data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3288 (BNX2X_FILTER_MCAST_PENDING <<
3289 BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003290 data->hdr.length = len;
3291}
3292
3293/**
3294 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3295 *
3296 * @bp: device handle
3297 * @o:
3298 * @start_idx: index in the registry to start from
3299 * @rdata_idx: index in the ramrod data to start from
3300 *
3301 * restore command for 57710 is like all other commands - always a stand alone
3302 * command - start_idx and rdata_idx will always be 0. This function will always
3303 * succeed.
3304 * returns -1 to comply with 57712 variant.
3305 */
3306static inline int bnx2x_mcast_handle_restore_cmd_e1(
3307 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3308 int *rdata_idx)
3309{
3310 struct bnx2x_mcast_mac_elem *elem;
3311 int i = 0;
Yuval Mintz86564c32013-01-23 03:21:50 +00003312 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003313
3314 /* go through the registry and configure the MACs from it. */
3315 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3316 cfg_data.mac = &elem->mac[0];
3317 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3318
3319 i++;
3320
Joe Perches0f9dad12011-08-14 12:16:19 +00003321 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003322 cfg_data.mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003323 }
3324
3325 *rdata_idx = i;
3326
3327 return -1;
3328}
3329
3330
3331static inline int bnx2x_mcast_handle_pending_cmds_e1(
3332 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3333{
3334 struct bnx2x_pending_mcast_cmd *cmd_pos;
3335 struct bnx2x_mcast_mac_elem *pmac_pos;
3336 struct bnx2x_mcast_obj *o = p->mcast_obj;
Yuval Mintz86564c32013-01-23 03:21:50 +00003337 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003338 int cnt = 0;
3339
3340
3341 /* If nothing to be done - return */
3342 if (list_empty(&o->pending_cmds_head))
3343 return 0;
3344
3345 /* Handle the first command */
3346 cmd_pos = list_first_entry(&o->pending_cmds_head,
3347 struct bnx2x_pending_mcast_cmd, link);
3348
3349 switch (cmd_pos->type) {
3350 case BNX2X_MCAST_CMD_ADD:
3351 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3352 cfg_data.mac = &pmac_pos->mac[0];
3353 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3354
3355 cnt++;
3356
Joe Perches0f9dad12011-08-14 12:16:19 +00003357 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003358 pmac_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003359 }
3360 break;
3361
3362 case BNX2X_MCAST_CMD_DEL:
3363 cnt = cmd_pos->data.macs_num;
3364 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3365 break;
3366
3367 case BNX2X_MCAST_CMD_RESTORE:
3368 o->hdl_restore(bp, o, 0, &cnt);
3369 break;
3370
3371 default:
3372 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3373 return -EINVAL;
3374 }
3375
3376 list_del(&cmd_pos->link);
3377 kfree(cmd_pos);
3378
3379 return cnt;
3380}
3381
3382/**
3383 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3384 *
3385 * @fw_hi:
3386 * @fw_mid:
3387 * @fw_lo:
3388 * @mac:
3389 */
3390static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3391 __le16 *fw_lo, u8 *mac)
3392{
3393 mac[1] = ((u8 *)fw_hi)[0];
3394 mac[0] = ((u8 *)fw_hi)[1];
3395 mac[3] = ((u8 *)fw_mid)[0];
3396 mac[2] = ((u8 *)fw_mid)[1];
3397 mac[5] = ((u8 *)fw_lo)[0];
3398 mac[4] = ((u8 *)fw_lo)[1];
3399}
3400
3401/**
3402 * bnx2x_mcast_refresh_registry_e1 -
3403 *
3404 * @bp: device handle
3405 * @cnt:
3406 *
3407 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3408 * and update the registry correspondingly: if ADD - allocate a memory and add
3409 * the entries to the registry (list), if DELETE - clear the registry and free
3410 * the memory.
3411 */
3412static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3413 struct bnx2x_mcast_obj *o)
3414{
3415 struct bnx2x_raw_obj *raw = &o->raw;
3416 struct bnx2x_mcast_mac_elem *elem;
3417 struct mac_configuration_cmd *data =
3418 (struct mac_configuration_cmd *)(raw->rdata);
3419
3420 /* If first entry contains a SET bit - the command was ADD,
3421 * otherwise - DEL_ALL
3422 */
3423 if (GET_FLAG(data->config_table[0].flags,
3424 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3425 int i, len = data->hdr.length;
3426
3427 /* Break if it was a RESTORE command */
3428 if (!list_empty(&o->registry.exact_match.macs))
3429 return 0;
3430
Thomas Meyer01e23742011-11-29 11:08:00 +00003431 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003432 if (!elem) {
3433 BNX2X_ERR("Failed to allocate registry memory\n");
3434 return -ENOMEM;
3435 }
3436
3437 for (i = 0; i < len; i++, elem++) {
3438 bnx2x_get_fw_mac_addr(
3439 &data->config_table[i].msb_mac_addr,
3440 &data->config_table[i].middle_mac_addr,
3441 &data->config_table[i].lsb_mac_addr,
3442 elem->mac);
Joe Perches0f9dad12011-08-14 12:16:19 +00003443 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00003444 elem->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003445 list_add_tail(&elem->link,
3446 &o->registry.exact_match.macs);
3447 }
3448 } else {
3449 elem = list_first_entry(&o->registry.exact_match.macs,
3450 struct bnx2x_mcast_mac_elem, link);
3451 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3452 kfree(elem);
3453 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3454 }
3455
3456 return 0;
3457}
3458
3459static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3460 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003461 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003462{
3463 struct bnx2x_mcast_obj *o = p->mcast_obj;
3464 struct bnx2x_raw_obj *raw = &o->raw;
3465 struct mac_configuration_cmd *data =
3466 (struct mac_configuration_cmd *)(raw->rdata);
3467 int cnt = 0, i, rc;
3468
3469 /* Reset the ramrod data buffer */
3470 memset(data, 0, sizeof(*data));
3471
3472 /* First set all entries as invalid */
3473 for (i = 0; i < o->max_cmd_len ; i++)
3474 SET_FLAG(data->config_table[i].flags,
3475 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3476 T_ETH_MAC_COMMAND_INVALIDATE);
3477
3478 /* Handle pending commands first */
3479 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3480
3481 /* If there are no more pending commands - clear SCHEDULED state */
3482 if (list_empty(&o->pending_cmds_head))
3483 o->clear_sched(o);
3484
3485 /* The below may be true iff there were no pending commands */
3486 if (!cnt)
3487 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3488
3489 /* For 57710 every command has o->max_cmd_len length to ensure that
3490 * commands are done one at a time.
3491 */
3492 o->total_pending_num -= o->max_cmd_len;
3493
3494 /* send a ramrod */
3495
3496 WARN_ON(cnt > o->max_cmd_len);
3497
3498 /* Set ramrod header (in particular, a number of entries to update) */
3499 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3500
3501 /* update a registry: we need the registry contents to be always up
3502 * to date in order to be able to execute a RESTORE opcode. Here
3503 * we use the fact that for 57710 we sent one command at a time
3504 * hence we may take the registry update out of the command handling
3505 * and do it in a simpler way here.
3506 */
3507 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3508 if (rc)
3509 return rc;
3510
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003511 /*
3512 * If CLEAR_ONLY was requested - don't send a ramrod and clear
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003513 * RAMROD_PENDING status immediately.
3514 */
3515 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3516 raw->clear_pending(raw);
3517 return 0;
3518 } else {
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003519 /*
3520 * No need for an explicit memory barrier here as long we would
3521 * need to ensure the ordering of writing to the SPQ element
3522 * and updating of the SPQ producer which involves a memory
3523 * read and we will have to put a full memory barrier there
3524 * (inside bnx2x_sp_post()).
3525 */
3526
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003527 /* Send a ramrod */
3528 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3529 U64_HI(raw->rdata_mapping),
3530 U64_LO(raw->rdata_mapping),
3531 ETH_CONNECTION_TYPE);
3532 if (rc)
3533 return rc;
3534
3535 /* Ramrod completion is pending */
3536 return 1;
3537 }
3538
3539}
3540
3541static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3542{
3543 return o->registry.exact_match.num_macs_set;
3544}
3545
3546static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3547{
3548 return o->registry.aprox_match.num_bins_set;
3549}
3550
3551static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3552 int n)
3553{
3554 o->registry.exact_match.num_macs_set = n;
3555}
3556
3557static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3558 int n)
3559{
3560 o->registry.aprox_match.num_bins_set = n;
3561}
3562
3563int bnx2x_config_mcast(struct bnx2x *bp,
3564 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003565 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003566{
3567 struct bnx2x_mcast_obj *o = p->mcast_obj;
3568 struct bnx2x_raw_obj *r = &o->raw;
3569 int rc = 0, old_reg_size;
3570
3571 /* This is needed to recover number of currently configured mcast macs
3572 * in case of failure.
3573 */
3574 old_reg_size = o->get_registry_size(o);
3575
3576 /* Do some calculations and checks */
3577 rc = o->validate(bp, p, cmd);
3578 if (rc)
3579 return rc;
3580
3581 /* Return if there is no work to do */
3582 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3583 return 0;
3584
Merav Sicron51c1a582012-03-18 10:33:38 +00003585 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3586 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003587
3588 /* Enqueue the current command to the pending list if we can't complete
3589 * it in the current iteration
3590 */
3591 if (r->check_pending(r) ||
3592 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3593 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3594 if (rc < 0)
3595 goto error_exit1;
3596
3597 /* As long as the current command is in a command list we
3598 * don't need to handle it separately.
3599 */
3600 p->mcast_list_len = 0;
3601 }
3602
3603 if (!r->check_pending(r)) {
3604
3605 /* Set 'pending' state */
3606 r->set_pending(r);
3607
3608 /* Configure the new classification in the chip */
3609 rc = o->config_mcast(bp, p, cmd);
3610 if (rc < 0)
3611 goto error_exit2;
3612
3613 /* Wait for a ramrod completion if was requested */
3614 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3615 rc = o->wait_comp(bp, o);
3616 }
3617
3618 return rc;
3619
3620error_exit2:
3621 r->clear_pending(r);
3622
3623error_exit1:
3624 o->revert(bp, p, old_reg_size);
3625
3626 return rc;
3627}
3628
3629static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3630{
3631 smp_mb__before_clear_bit();
3632 clear_bit(o->sched_state, o->raw.pstate);
3633 smp_mb__after_clear_bit();
3634}
3635
3636static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3637{
3638 smp_mb__before_clear_bit();
3639 set_bit(o->sched_state, o->raw.pstate);
3640 smp_mb__after_clear_bit();
3641}
3642
3643static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3644{
3645 return !!test_bit(o->sched_state, o->raw.pstate);
3646}
3647
3648static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3649{
3650 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3651}
3652
3653void bnx2x_init_mcast_obj(struct bnx2x *bp,
3654 struct bnx2x_mcast_obj *mcast_obj,
3655 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3656 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3657 int state, unsigned long *pstate, bnx2x_obj_type type)
3658{
3659 memset(mcast_obj, 0, sizeof(*mcast_obj));
3660
3661 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3662 rdata, rdata_mapping, state, pstate, type);
3663
3664 mcast_obj->engine_id = engine_id;
3665
3666 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3667
3668 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3669 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3670 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3671 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3672
3673 if (CHIP_IS_E1(bp)) {
3674 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3675 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3676 mcast_obj->hdl_restore =
3677 bnx2x_mcast_handle_restore_cmd_e1;
3678 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3679
3680 if (CHIP_REV_IS_SLOW(bp))
3681 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3682 else
3683 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3684
3685 mcast_obj->wait_comp = bnx2x_mcast_wait;
3686 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3687 mcast_obj->validate = bnx2x_mcast_validate_e1;
3688 mcast_obj->revert = bnx2x_mcast_revert_e1;
3689 mcast_obj->get_registry_size =
3690 bnx2x_mcast_get_registry_size_exact;
3691 mcast_obj->set_registry_size =
3692 bnx2x_mcast_set_registry_size_exact;
3693
3694 /* 57710 is the only chip that uses the exact match for mcast
3695 * at the moment.
3696 */
3697 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3698
3699 } else if (CHIP_IS_E1H(bp)) {
3700 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3701 mcast_obj->enqueue_cmd = NULL;
3702 mcast_obj->hdl_restore = NULL;
3703 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3704
3705 /* 57711 doesn't send a ramrod, so it has unlimited credit
3706 * for one command.
3707 */
3708 mcast_obj->max_cmd_len = -1;
3709 mcast_obj->wait_comp = bnx2x_mcast_wait;
3710 mcast_obj->set_one_rule = NULL;
3711 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3712 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3713 mcast_obj->get_registry_size =
3714 bnx2x_mcast_get_registry_size_aprox;
3715 mcast_obj->set_registry_size =
3716 bnx2x_mcast_set_registry_size_aprox;
3717 } else {
3718 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3719 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3720 mcast_obj->hdl_restore =
3721 bnx2x_mcast_handle_restore_cmd_e2;
3722 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3723 /* TODO: There should be a proper HSI define for this number!!!
3724 */
3725 mcast_obj->max_cmd_len = 16;
3726 mcast_obj->wait_comp = bnx2x_mcast_wait;
3727 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3728 mcast_obj->validate = bnx2x_mcast_validate_e2;
3729 mcast_obj->revert = bnx2x_mcast_revert_e2;
3730 mcast_obj->get_registry_size =
3731 bnx2x_mcast_get_registry_size_aprox;
3732 mcast_obj->set_registry_size =
3733 bnx2x_mcast_set_registry_size_aprox;
3734 }
3735}
3736
3737/*************************** Credit handling **********************************/
3738
3739/**
3740 * atomic_add_ifless - add if the result is less than a given value.
3741 *
3742 * @v: pointer of type atomic_t
3743 * @a: the amount to add to v...
3744 * @u: ...if (v + a) is less than u.
3745 *
3746 * returns true if (v + a) was less than u, and false otherwise.
3747 *
3748 */
3749static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3750{
3751 int c, old;
3752
3753 c = atomic_read(v);
3754 for (;;) {
3755 if (unlikely(c + a >= u))
3756 return false;
3757
3758 old = atomic_cmpxchg((v), c, c + a);
3759 if (likely(old == c))
3760 break;
3761 c = old;
3762 }
3763
3764 return true;
3765}
3766
3767/**
3768 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3769 *
3770 * @v: pointer of type atomic_t
3771 * @a: the amount to dec from v...
3772 * @u: ...if (v - a) is more or equal than u.
3773 *
3774 * returns true if (v - a) was more or equal than u, and false
3775 * otherwise.
3776 */
3777static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3778{
3779 int c, old;
3780
3781 c = atomic_read(v);
3782 for (;;) {
3783 if (unlikely(c - a < u))
3784 return false;
3785
3786 old = atomic_cmpxchg((v), c, c - a);
3787 if (likely(old == c))
3788 break;
3789 c = old;
3790 }
3791
3792 return true;
3793}
3794
3795static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3796{
3797 bool rc;
3798
3799 smp_mb();
3800 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3801 smp_mb();
3802
3803 return rc;
3804}
3805
3806static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3807{
3808 bool rc;
3809
3810 smp_mb();
3811
3812 /* Don't let to refill if credit + cnt > pool_sz */
3813 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3814
3815 smp_mb();
3816
3817 return rc;
3818}
3819
3820static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3821{
3822 int cur_credit;
3823
3824 smp_mb();
3825 cur_credit = atomic_read(&o->credit);
3826
3827 return cur_credit;
3828}
3829
3830static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3831 int cnt)
3832{
3833 return true;
3834}
3835
3836
3837static bool bnx2x_credit_pool_get_entry(
3838 struct bnx2x_credit_pool_obj *o,
3839 int *offset)
3840{
3841 int idx, vec, i;
3842
3843 *offset = -1;
3844
3845 /* Find "internal cam-offset" then add to base for this object... */
3846 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3847
3848 /* Skip the current vector if there are no free entries in it */
3849 if (!o->pool_mirror[vec])
3850 continue;
3851
3852 /* If we've got here we are going to find a free entry */
Dmitry Kravkovc54e9bd2012-03-26 21:08:55 +00003853 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003854 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3855
3856 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3857 /* Got one!! */
3858 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3859 *offset = o->base_pool_offset + idx;
3860 return true;
3861 }
3862 }
3863
3864 return false;
3865}
3866
3867static bool bnx2x_credit_pool_put_entry(
3868 struct bnx2x_credit_pool_obj *o,
3869 int offset)
3870{
3871 if (offset < o->base_pool_offset)
3872 return false;
3873
3874 offset -= o->base_pool_offset;
3875
3876 if (offset >= o->pool_sz)
3877 return false;
3878
3879 /* Return the entry to the pool */
3880 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3881
3882 return true;
3883}
3884
3885static bool bnx2x_credit_pool_put_entry_always_true(
3886 struct bnx2x_credit_pool_obj *o,
3887 int offset)
3888{
3889 return true;
3890}
3891
3892static bool bnx2x_credit_pool_get_entry_always_true(
3893 struct bnx2x_credit_pool_obj *o,
3894 int *offset)
3895{
3896 *offset = -1;
3897 return true;
3898}
3899/**
3900 * bnx2x_init_credit_pool - initialize credit pool internals.
3901 *
3902 * @p:
3903 * @base: Base entry in the CAM to use.
3904 * @credit: pool size.
3905 *
3906 * If base is negative no CAM entries handling will be performed.
3907 * If credit is negative pool operations will always succeed (unlimited pool).
3908 *
3909 */
3910static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3911 int base, int credit)
3912{
3913 /* Zero the object first */
3914 memset(p, 0, sizeof(*p));
3915
3916 /* Set the table to all 1s */
3917 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3918
3919 /* Init a pool as full */
3920 atomic_set(&p->credit, credit);
3921
3922 /* The total poll size */
3923 p->pool_sz = credit;
3924
3925 p->base_pool_offset = base;
3926
3927 /* Commit the change */
3928 smp_mb();
3929
3930 p->check = bnx2x_credit_pool_check;
3931
3932 /* if pool credit is negative - disable the checks */
3933 if (credit >= 0) {
3934 p->put = bnx2x_credit_pool_put;
3935 p->get = bnx2x_credit_pool_get;
3936 p->put_entry = bnx2x_credit_pool_put_entry;
3937 p->get_entry = bnx2x_credit_pool_get_entry;
3938 } else {
3939 p->put = bnx2x_credit_pool_always_true;
3940 p->get = bnx2x_credit_pool_always_true;
3941 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3942 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3943 }
3944
3945 /* If base is negative - disable entries handling */
3946 if (base < 0) {
3947 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3948 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3949 }
3950}
3951
3952void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3953 struct bnx2x_credit_pool_obj *p, u8 func_id,
3954 u8 func_num)
3955{
3956/* TODO: this will be defined in consts as well... */
3957#define BNX2X_CAM_SIZE_EMUL 5
3958
3959 int cam_sz;
3960
3961 if (CHIP_IS_E1(bp)) {
3962 /* In E1, Multicast is saved in cam... */
3963 if (!CHIP_REV_IS_SLOW(bp))
3964 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3965 else
3966 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3967
3968 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3969
3970 } else if (CHIP_IS_E1H(bp)) {
3971 /* CAM credit is equaly divided between all active functions
3972 * on the PORT!.
3973 */
3974 if ((func_num > 0)) {
3975 if (!CHIP_REV_IS_SLOW(bp))
3976 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3977 else
3978 cam_sz = BNX2X_CAM_SIZE_EMUL;
3979 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3980 } else {
3981 /* this should never happen! Block MAC operations. */
3982 bnx2x_init_credit_pool(p, 0, 0);
3983 }
3984
3985 } else {
3986
3987 /*
3988 * CAM credit is equaly divided between all active functions
3989 * on the PATH.
3990 */
3991 if ((func_num > 0)) {
3992 if (!CHIP_REV_IS_SLOW(bp))
3993 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3994 else
3995 cam_sz = BNX2X_CAM_SIZE_EMUL;
3996
3997 /*
3998 * No need for CAM entries handling for 57712 and
3999 * newer.
4000 */
4001 bnx2x_init_credit_pool(p, -1, cam_sz);
4002 } else {
4003 /* this should never happen! Block MAC operations. */
4004 bnx2x_init_credit_pool(p, 0, 0);
4005 }
4006
4007 }
4008}
4009
4010void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4011 struct bnx2x_credit_pool_obj *p,
4012 u8 func_id,
4013 u8 func_num)
4014{
4015 if (CHIP_IS_E1x(bp)) {
4016 /*
4017 * There is no VLAN credit in HW on 57710 and 57711 only
4018 * MAC / MAC-VLAN can be set
4019 */
4020 bnx2x_init_credit_pool(p, 0, -1);
4021 } else {
4022 /*
4023 * CAM credit is equaly divided between all active functions
4024 * on the PATH.
4025 */
4026 if (func_num > 0) {
4027 int credit = MAX_VLAN_CREDIT_E2 / func_num;
4028 bnx2x_init_credit_pool(p, func_id * credit, credit);
4029 } else
4030 /* this should never happen! Block VLAN operations. */
4031 bnx2x_init_credit_pool(p, 0, 0);
4032 }
4033}
4034
4035/****************** RSS Configuration ******************/
4036/**
4037 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4038 *
4039 * @bp: driver hanlde
4040 * @p: pointer to rss configuration
4041 *
4042 * Prints it when NETIF_MSG_IFUP debug level is configured.
4043 */
4044static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4045 struct bnx2x_config_rss_params *p)
4046{
4047 int i;
4048
4049 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4050 DP(BNX2X_MSG_SP, "0x0000: ");
4051 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4052 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4053
4054 /* Print 4 bytes in a line */
4055 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4056 (((i + 1) & 0x3) == 0)) {
4057 DP_CONT(BNX2X_MSG_SP, "\n");
4058 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4059 }
4060 }
4061
4062 DP_CONT(BNX2X_MSG_SP, "\n");
4063}
4064
4065/**
4066 * bnx2x_setup_rss - configure RSS
4067 *
4068 * @bp: device handle
4069 * @p: rss configuration
4070 *
4071 * sends on UPDATE ramrod for that matter.
4072 */
4073static int bnx2x_setup_rss(struct bnx2x *bp,
4074 struct bnx2x_config_rss_params *p)
4075{
4076 struct bnx2x_rss_config_obj *o = p->rss_obj;
4077 struct bnx2x_raw_obj *r = &o->raw;
4078 struct eth_rss_update_ramrod_data *data =
4079 (struct eth_rss_update_ramrod_data *)(r->rdata);
4080 u8 rss_mode = 0;
4081 int rc;
4082
4083 memset(data, 0, sizeof(*data));
4084
4085 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4086
4087 /* Set an echo field */
Yuval Mintz86564c32013-01-23 03:21:50 +00004088 data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4089 (r->state << BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004090
4091 /* RSS mode */
4092 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4093 rss_mode = ETH_RSS_MODE_DISABLED;
4094 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4095 rss_mode = ETH_RSS_MODE_REGULAR;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004096
4097 data->rss_mode = rss_mode;
4098
4099 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4100
4101 /* RSS capabilities */
4102 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4103 data->capabilities |=
4104 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4105
4106 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4107 data->capabilities |=
4108 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4109
Merav Sicron5d317c6a2012-06-19 07:48:24 +00004110 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4111 data->capabilities |=
4112 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4113
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004114 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4115 data->capabilities |=
4116 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4117
4118 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4119 data->capabilities |=
4120 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4121
Merav Sicron5d317c6a2012-06-19 07:48:24 +00004122 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4123 data->capabilities |=
4124 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4125
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004126 /* Hashing mask */
4127 data->rss_result_mask = p->rss_result_mask;
4128
4129 /* RSS engine ID */
4130 data->rss_engine_id = o->engine_id;
4131
4132 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4133
4134 /* Indirection table */
4135 memcpy(data->indirection_table, p->ind_table,
4136 T_ETH_INDIRECTION_TABLE_SIZE);
4137
4138 /* Remember the last configuration */
4139 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4140
4141 /* Print the indirection table */
4142 if (netif_msg_ifup(bp))
4143 bnx2x_debug_print_ind_table(bp, p);
4144
4145 /* RSS keys */
4146 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4147 memcpy(&data->rss_key[0], &p->rss_key[0],
4148 sizeof(data->rss_key));
4149 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4150 }
4151
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004152 /*
4153 * No need for an explicit memory barrier here as long we would
4154 * need to ensure the ordering of writing to the SPQ element
4155 * and updating of the SPQ producer which involves a memory
4156 * read and we will have to put a full memory barrier there
4157 * (inside bnx2x_sp_post()).
4158 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004159
4160 /* Send a ramrod */
4161 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4162 U64_HI(r->rdata_mapping),
4163 U64_LO(r->rdata_mapping),
4164 ETH_CONNECTION_TYPE);
4165
4166 if (rc < 0)
4167 return rc;
4168
4169 return 1;
4170}
4171
4172void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4173 u8 *ind_table)
4174{
4175 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4176}
4177
4178int bnx2x_config_rss(struct bnx2x *bp,
4179 struct bnx2x_config_rss_params *p)
4180{
4181 int rc;
4182 struct bnx2x_rss_config_obj *o = p->rss_obj;
4183 struct bnx2x_raw_obj *r = &o->raw;
4184
4185 /* Do nothing if only driver cleanup was requested */
4186 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4187 return 0;
4188
4189 r->set_pending(r);
4190
4191 rc = o->config_rss(bp, p);
4192 if (rc < 0) {
4193 r->clear_pending(r);
4194 return rc;
4195 }
4196
4197 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4198 rc = r->wait_comp(bp, r);
4199
4200 return rc;
4201}
4202
4203
4204void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4205 struct bnx2x_rss_config_obj *rss_obj,
4206 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4207 void *rdata, dma_addr_t rdata_mapping,
4208 int state, unsigned long *pstate,
4209 bnx2x_obj_type type)
4210{
4211 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4212 rdata_mapping, state, pstate, type);
4213
4214 rss_obj->engine_id = engine_id;
4215 rss_obj->config_rss = bnx2x_setup_rss;
4216}
4217
4218/********************** Queue state object ***********************************/
4219
4220/**
4221 * bnx2x_queue_state_change - perform Queue state change transition
4222 *
4223 * @bp: device handle
4224 * @params: parameters to perform the transition
4225 *
4226 * returns 0 in case of successfully completed transition, negative error
4227 * code in case of failure, positive (EBUSY) value if there is a completion
4228 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4229 * not set in params->ramrod_flags for asynchronous commands).
4230 *
4231 */
4232int bnx2x_queue_state_change(struct bnx2x *bp,
4233 struct bnx2x_queue_state_params *params)
4234{
4235 struct bnx2x_queue_sp_obj *o = params->q_obj;
4236 int rc, pending_bit;
4237 unsigned long *pending = &o->pending;
4238
4239 /* Check that the requested transition is legal */
Yuval Mintz04c46732013-01-23 03:21:46 +00004240 rc = o->check_transition(bp, o, params);
4241 if (rc) {
4242 BNX2X_ERR("check transition returned an error. rc %d\n", rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004243 return -EINVAL;
Yuval Mintz04c46732013-01-23 03:21:46 +00004244 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004245
4246 /* Set "pending" bit */
Yuval Mintz04c46732013-01-23 03:21:46 +00004247 DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004248 pending_bit = o->set_pending(o, params);
Yuval Mintz04c46732013-01-23 03:21:46 +00004249 DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004250
4251 /* Don't send a command if only driver cleanup was requested */
4252 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4253 o->complete_cmd(bp, o, pending_bit);
4254 else {
4255 /* Send a ramrod */
4256 rc = o->send_cmd(bp, params);
4257 if (rc) {
4258 o->next_state = BNX2X_Q_STATE_MAX;
4259 clear_bit(pending_bit, pending);
4260 smp_mb__after_clear_bit();
4261 return rc;
4262 }
4263
4264 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4265 rc = o->wait_comp(bp, o, pending_bit);
4266 if (rc)
4267 return rc;
4268
4269 return 0;
4270 }
4271 }
4272
4273 return !!test_bit(pending_bit, pending);
4274}
4275
4276
4277static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4278 struct bnx2x_queue_state_params *params)
4279{
4280 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4281
4282 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4283 * UPDATE command.
4284 */
4285 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4286 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4287 bit = BNX2X_Q_CMD_UPDATE;
4288 else
4289 bit = cmd;
4290
4291 set_bit(bit, &obj->pending);
4292 return bit;
4293}
4294
4295static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4296 struct bnx2x_queue_sp_obj *o,
4297 enum bnx2x_queue_cmd cmd)
4298{
4299 return bnx2x_state_wait(bp, cmd, &o->pending);
4300}
4301
4302/**
4303 * bnx2x_queue_comp_cmd - complete the state change command.
4304 *
4305 * @bp: device handle
4306 * @o:
4307 * @cmd:
4308 *
4309 * Checks that the arrived completion is expected.
4310 */
4311static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4312 struct bnx2x_queue_sp_obj *o,
4313 enum bnx2x_queue_cmd cmd)
4314{
4315 unsigned long cur_pending = o->pending;
4316
4317 if (!test_and_clear_bit(cmd, &cur_pending)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004318 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4319 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004320 o->state, cur_pending, o->next_state);
4321 return -EINVAL;
4322 }
4323
Ariel Elior6383c0b2011-07-14 08:31:57 +00004324 if (o->next_tx_only >= o->max_cos)
4325 /* >= becuase tx only must always be smaller than cos since the
Masanari Iida02582e92012-08-22 19:11:26 +09004326 * primary connection supports COS 0
Ariel Elior6383c0b2011-07-14 08:31:57 +00004327 */
4328 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4329 o->next_tx_only, o->max_cos);
4330
Merav Sicron51c1a582012-03-18 10:33:38 +00004331 DP(BNX2X_MSG_SP,
4332 "Completing command %d for queue %d, setting state to %d\n",
4333 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004334
4335 if (o->next_tx_only) /* print num tx-only if any exist */
Joe Perches94f05b02011-08-14 12:16:20 +00004336 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00004337 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004338
4339 o->state = o->next_state;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004340 o->num_tx_only = o->next_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004341 o->next_state = BNX2X_Q_STATE_MAX;
4342
4343 /* It's important that o->state and o->next_state are
4344 * updated before o->pending.
4345 */
4346 wmb();
4347
4348 clear_bit(cmd, &o->pending);
4349 smp_mb__after_clear_bit();
4350
4351 return 0;
4352}
4353
4354static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4355 struct bnx2x_queue_state_params *cmd_params,
4356 struct client_init_ramrod_data *data)
4357{
4358 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004359
4360 /* Rx data */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004361
4362 /* IPv6 TPA supported for E2 and above only */
Vladislav Zolotarovf5219d82011-07-19 01:44:11 +00004363 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004364 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4365}
4366
Ariel Elior6383c0b2011-07-14 08:31:57 +00004367static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4368 struct bnx2x_queue_sp_obj *o,
4369 struct bnx2x_general_setup_params *params,
4370 struct client_init_general_data *gen_data,
4371 unsigned long *flags)
4372{
4373 gen_data->client_id = o->cl_id;
4374
4375 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4376 gen_data->statistics_counter_id =
4377 params->stat_id;
4378 gen_data->statistics_en_flg = 1;
4379 gen_data->statistics_zero_flg =
4380 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4381 } else
4382 gen_data->statistics_counter_id =
4383 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4384
4385 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4386 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4387 gen_data->sp_client_id = params->spcl_id;
4388 gen_data->mtu = cpu_to_le16(params->mtu);
4389 gen_data->func_id = o->func_id;
4390
4391
4392 gen_data->cos = params->cos;
4393
4394 gen_data->traffic_type =
4395 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4396 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4397
Joe Perches94f05b02011-08-14 12:16:20 +00004398 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004399 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4400}
4401
4402static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4403 struct bnx2x_txq_setup_params *params,
4404 struct client_init_tx_data *tx_data,
4405 unsigned long *flags)
4406{
4407 tx_data->enforce_security_flg =
4408 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4409 tx_data->default_vlan =
4410 cpu_to_le16(params->default_vlan);
4411 tx_data->default_vlan_flg =
4412 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4413 tx_data->tx_switching_flg =
4414 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4415 tx_data->anti_spoofing_flg =
4416 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
Barak Witkowskia3348722012-04-23 03:04:46 +00004417 tx_data->force_default_pri_flg =
4418 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4419
Ariel Elior6383c0b2011-07-14 08:31:57 +00004420 tx_data->tx_status_block_id = params->fw_sb_id;
4421 tx_data->tx_sb_index_number = params->sb_cq_index;
4422 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4423
4424 tx_data->tx_bd_page_base.lo =
4425 cpu_to_le32(U64_LO(params->dscr_map));
4426 tx_data->tx_bd_page_base.hi =
4427 cpu_to_le32(U64_HI(params->dscr_map));
4428
4429 /* Don't configure any Tx switching mode during queue SETUP */
4430 tx_data->state = 0;
4431}
4432
4433static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4434 struct rxq_pause_params *params,
4435 struct client_init_rx_data *rx_data)
4436{
4437 /* flow control data */
4438 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4439 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4440 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4441 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4442 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4443 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4444 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4445}
4446
4447static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4448 struct bnx2x_rxq_setup_params *params,
4449 struct client_init_rx_data *rx_data,
4450 unsigned long *flags)
4451{
Ariel Elior6383c0b2011-07-14 08:31:57 +00004452 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4453 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004454 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4455 CLIENT_INIT_RX_DATA_TPA_MODE;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004456 rx_data->vmqueue_mode_en_flg = 0;
4457
4458 rx_data->cache_line_alignment_log_size =
4459 params->cache_line_log;
4460 rx_data->enable_dynamic_hc =
4461 test_bit(BNX2X_Q_FLG_DHC, flags);
4462 rx_data->max_sges_for_packet = params->max_sges_pkt;
4463 rx_data->client_qzone_id = params->cl_qzone_id;
4464 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4465
4466 /* Always start in DROP_ALL mode */
4467 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4468 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4469
4470 /* We don't set drop flags */
4471 rx_data->drop_ip_cs_err_flg = 0;
4472 rx_data->drop_tcp_cs_err_flg = 0;
4473 rx_data->drop_ttl0_flg = 0;
4474 rx_data->drop_udp_cs_err_flg = 0;
4475 rx_data->inner_vlan_removal_enable_flg =
4476 test_bit(BNX2X_Q_FLG_VLAN, flags);
4477 rx_data->outer_vlan_removal_enable_flg =
4478 test_bit(BNX2X_Q_FLG_OV, flags);
4479 rx_data->status_block_id = params->fw_sb_id;
4480 rx_data->rx_sb_index_number = params->sb_cq_index;
4481 rx_data->max_tpa_queues = params->max_tpa_queues;
4482 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4483 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4484 rx_data->bd_page_base.lo =
4485 cpu_to_le32(U64_LO(params->dscr_map));
4486 rx_data->bd_page_base.hi =
4487 cpu_to_le32(U64_HI(params->dscr_map));
4488 rx_data->sge_page_base.lo =
4489 cpu_to_le32(U64_LO(params->sge_map));
4490 rx_data->sge_page_base.hi =
4491 cpu_to_le32(U64_HI(params->sge_map));
4492 rx_data->cqe_page_base.lo =
4493 cpu_to_le32(U64_LO(params->rcq_map));
4494 rx_data->cqe_page_base.hi =
4495 cpu_to_le32(U64_HI(params->rcq_map));
4496 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4497
4498 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
Yuval Mintz259afa12012-03-12 08:53:10 +00004499 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004500 rx_data->is_approx_mcast = 1;
4501 }
4502
4503 rx_data->rss_engine_id = params->rss_engine_id;
4504
4505 /* silent vlan removal */
4506 rx_data->silent_vlan_removal_flg =
4507 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4508 rx_data->silent_vlan_value =
4509 cpu_to_le16(params->silent_removal_value);
4510 rx_data->silent_vlan_mask =
4511 cpu_to_le16(params->silent_removal_mask);
4512
4513}
4514
4515/* initialize the general, tx and rx parts of a queue object */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004516static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4517 struct bnx2x_queue_state_params *cmd_params,
4518 struct client_init_ramrod_data *data)
4519{
Ariel Elior6383c0b2011-07-14 08:31:57 +00004520 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4521 &cmd_params->params.setup.gen_params,
4522 &data->general,
4523 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004524
Ariel Elior6383c0b2011-07-14 08:31:57 +00004525 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4526 &cmd_params->params.setup.txq_params,
4527 &data->tx,
4528 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004529
Ariel Elior6383c0b2011-07-14 08:31:57 +00004530 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4531 &cmd_params->params.setup.rxq_params,
4532 &data->rx,
4533 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004534
Ariel Elior6383c0b2011-07-14 08:31:57 +00004535 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4536 &cmd_params->params.setup.pause_params,
4537 &data->rx);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004538}
4539
Ariel Elior6383c0b2011-07-14 08:31:57 +00004540/* initialize the general and tx parts of a tx-only queue object */
4541static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4542 struct bnx2x_queue_state_params *cmd_params,
4543 struct tx_queue_init_ramrod_data *data)
4544{
4545 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4546 &cmd_params->params.tx_only.gen_params,
4547 &data->general,
4548 &cmd_params->params.tx_only.flags);
4549
4550 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4551 &cmd_params->params.tx_only.txq_params,
4552 &data->tx,
4553 &cmd_params->params.tx_only.flags);
4554
Merav Sicron51c1a582012-03-18 10:33:38 +00004555 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4556 cmd_params->q_obj->cids[0],
4557 data->tx.tx_bd_page_base.lo,
4558 data->tx.tx_bd_page_base.hi);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004559}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004560
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004561/**
4562 * bnx2x_q_init - init HW/FW queue
4563 *
4564 * @bp: device handle
4565 * @params:
4566 *
4567 * HW/FW initial Queue configuration:
4568 * - HC: Rx and Tx
4569 * - CDU context validation
4570 *
4571 */
4572static inline int bnx2x_q_init(struct bnx2x *bp,
4573 struct bnx2x_queue_state_params *params)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004574{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004575 struct bnx2x_queue_sp_obj *o = params->q_obj;
4576 struct bnx2x_queue_init_params *init = &params->params.init;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004577 u16 hc_usec;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004578 u8 cos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004579
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004580 /* Tx HC configuration */
4581 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4582 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4583 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4584
4585 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4586 init->tx.sb_cq_index,
4587 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004588 hc_usec);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004589 }
4590
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004591 /* Rx HC configuration */
4592 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4593 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4594 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004595
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004596 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4597 init->rx.sb_cq_index,
4598 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4599 hc_usec);
4600 }
4601
4602 /* Set CDU context validation values */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004603 for (cos = 0; cos < o->max_cos; cos++) {
Joe Perches94f05b02011-08-14 12:16:20 +00004604 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004605 o->cids[cos], cos);
Joe Perches94f05b02011-08-14 12:16:20 +00004606 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004607 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4608 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004609
4610 /* As no ramrod is sent, complete the command immediately */
4611 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4612
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004613 mmiowb();
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004614 smp_mb();
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004615
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004616 return 0;
4617}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004618
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004619static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4620 struct bnx2x_queue_state_params *params)
4621{
4622 struct bnx2x_queue_sp_obj *o = params->q_obj;
4623 struct client_init_ramrod_data *rdata =
4624 (struct client_init_ramrod_data *)o->rdata;
4625 dma_addr_t data_mapping = o->rdata_mapping;
4626 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004627
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004628 /* Clear the ramrod data */
4629 memset(rdata, 0, sizeof(*rdata));
4630
4631 /* Fill the ramrod data */
4632 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4633
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004634 /*
4635 * No need for an explicit memory barrier here as long we would
4636 * need to ensure the ordering of writing to the SPQ element
4637 * and updating of the SPQ producer which involves a memory
4638 * read and we will have to put a full memory barrier there
4639 * (inside bnx2x_sp_post()).
4640 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004641
Ariel Elior6383c0b2011-07-14 08:31:57 +00004642 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4643 U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004644 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4645}
4646
4647static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4648 struct bnx2x_queue_state_params *params)
4649{
4650 struct bnx2x_queue_sp_obj *o = params->q_obj;
4651 struct client_init_ramrod_data *rdata =
4652 (struct client_init_ramrod_data *)o->rdata;
4653 dma_addr_t data_mapping = o->rdata_mapping;
4654 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4655
4656 /* Clear the ramrod data */
4657 memset(rdata, 0, sizeof(*rdata));
4658
4659 /* Fill the ramrod data */
4660 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4661 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4662
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004663 /*
4664 * No need for an explicit memory barrier here as long we would
4665 * need to ensure the ordering of writing to the SPQ element
4666 * and updating of the SPQ producer which involves a memory
4667 * read and we will have to put a full memory barrier there
4668 * (inside bnx2x_sp_post()).
4669 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004670
Ariel Elior6383c0b2011-07-14 08:31:57 +00004671 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4672 U64_HI(data_mapping),
4673 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4674}
4675
4676static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4677 struct bnx2x_queue_state_params *params)
4678{
4679 struct bnx2x_queue_sp_obj *o = params->q_obj;
4680 struct tx_queue_init_ramrod_data *rdata =
4681 (struct tx_queue_init_ramrod_data *)o->rdata;
4682 dma_addr_t data_mapping = o->rdata_mapping;
4683 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4684 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4685 &params->params.tx_only;
4686 u8 cid_index = tx_only_params->cid_index;
4687
4688
4689 if (cid_index >= o->max_cos) {
4690 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4691 o->cl_id, cid_index);
4692 return -EINVAL;
4693 }
4694
Joe Perches94f05b02011-08-14 12:16:20 +00004695 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004696 tx_only_params->gen_params.cos,
4697 tx_only_params->gen_params.spcl_id);
4698
4699 /* Clear the ramrod data */
4700 memset(rdata, 0, sizeof(*rdata));
4701
4702 /* Fill the ramrod data */
4703 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4704
Merav Sicron51c1a582012-03-18 10:33:38 +00004705 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4706 o->cids[cid_index], rdata->general.client_id,
Ariel Elior6383c0b2011-07-14 08:31:57 +00004707 rdata->general.sp_client_id, rdata->general.cos);
4708
4709 /*
4710 * No need for an explicit memory barrier here as long we would
4711 * need to ensure the ordering of writing to the SPQ element
4712 * and updating of the SPQ producer which involves a memory
4713 * read and we will have to put a full memory barrier there
4714 * (inside bnx2x_sp_post()).
4715 */
4716
4717 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4718 U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004719 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4720}
4721
4722static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4723 struct bnx2x_queue_sp_obj *obj,
4724 struct bnx2x_queue_update_params *params,
4725 struct client_update_ramrod_data *data)
4726{
4727 /* Client ID of the client to update */
4728 data->client_id = obj->cl_id;
4729
4730 /* Function ID of the client to update */
4731 data->func_id = obj->func_id;
4732
4733 /* Default VLAN value */
4734 data->default_vlan = cpu_to_le16(params->def_vlan);
4735
4736 /* Inner VLAN stripping */
4737 data->inner_vlan_removal_enable_flg =
4738 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4739 data->inner_vlan_removal_change_flg =
4740 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4741 &params->update_flags);
4742
4743 /* Outer VLAN sripping */
4744 data->outer_vlan_removal_enable_flg =
4745 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4746 data->outer_vlan_removal_change_flg =
4747 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4748 &params->update_flags);
4749
4750 /* Drop packets that have source MAC that doesn't belong to this
4751 * Queue.
4752 */
4753 data->anti_spoofing_enable_flg =
4754 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4755 data->anti_spoofing_change_flg =
4756 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4757
4758 /* Activate/Deactivate */
4759 data->activate_flg =
4760 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4761 data->activate_change_flg =
4762 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4763
4764 /* Enable default VLAN */
4765 data->default_vlan_enable_flg =
4766 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4767 data->default_vlan_change_flg =
4768 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4769 &params->update_flags);
4770
4771 /* silent vlan removal */
4772 data->silent_vlan_change_flg =
4773 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4774 &params->update_flags);
4775 data->silent_vlan_removal_flg =
4776 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4777 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4778 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4779}
4780
4781static inline int bnx2x_q_send_update(struct bnx2x *bp,
4782 struct bnx2x_queue_state_params *params)
4783{
4784 struct bnx2x_queue_sp_obj *o = params->q_obj;
4785 struct client_update_ramrod_data *rdata =
4786 (struct client_update_ramrod_data *)o->rdata;
4787 dma_addr_t data_mapping = o->rdata_mapping;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004788 struct bnx2x_queue_update_params *update_params =
4789 &params->params.update;
4790 u8 cid_index = update_params->cid_index;
4791
4792 if (cid_index >= o->max_cos) {
4793 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4794 o->cl_id, cid_index);
4795 return -EINVAL;
4796 }
4797
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004798
4799 /* Clear the ramrod data */
4800 memset(rdata, 0, sizeof(*rdata));
4801
4802 /* Fill the ramrod data */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004803 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004804
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004805 /*
4806 * No need for an explicit memory barrier here as long we would
4807 * need to ensure the ordering of writing to the SPQ element
4808 * and updating of the SPQ producer which involves a memory
4809 * read and we will have to put a full memory barrier there
4810 * (inside bnx2x_sp_post()).
4811 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004812
Ariel Elior6383c0b2011-07-14 08:31:57 +00004813 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4814 o->cids[cid_index], U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004815 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4816}
4817
4818/**
4819 * bnx2x_q_send_deactivate - send DEACTIVATE command
4820 *
4821 * @bp: device handle
4822 * @params:
4823 *
4824 * implemented using the UPDATE command.
4825 */
4826static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4827 struct bnx2x_queue_state_params *params)
4828{
4829 struct bnx2x_queue_update_params *update = &params->params.update;
4830
4831 memset(update, 0, sizeof(*update));
4832
4833 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4834
4835 return bnx2x_q_send_update(bp, params);
4836}
4837
4838/**
4839 * bnx2x_q_send_activate - send ACTIVATE command
4840 *
4841 * @bp: device handle
4842 * @params:
4843 *
4844 * implemented using the UPDATE command.
4845 */
4846static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4847 struct bnx2x_queue_state_params *params)
4848{
4849 struct bnx2x_queue_update_params *update = &params->params.update;
4850
4851 memset(update, 0, sizeof(*update));
4852
4853 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4854 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4855
4856 return bnx2x_q_send_update(bp, params);
4857}
4858
4859static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4860 struct bnx2x_queue_state_params *params)
4861{
4862 /* TODO: Not implemented yet. */
4863 return -1;
4864}
4865
4866static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4867 struct bnx2x_queue_state_params *params)
4868{
4869 struct bnx2x_queue_sp_obj *o = params->q_obj;
4870
Ariel Elior6383c0b2011-07-14 08:31:57 +00004871 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4872 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004873 ETH_CONNECTION_TYPE);
4874}
4875
4876static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4877 struct bnx2x_queue_state_params *params)
4878{
4879 struct bnx2x_queue_sp_obj *o = params->q_obj;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004880 u8 cid_idx = params->params.cfc_del.cid_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004881
Ariel Elior6383c0b2011-07-14 08:31:57 +00004882 if (cid_idx >= o->max_cos) {
4883 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4884 o->cl_id, cid_idx);
4885 return -EINVAL;
4886 }
4887
4888 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4889 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004890}
4891
4892static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4893 struct bnx2x_queue_state_params *params)
4894{
4895 struct bnx2x_queue_sp_obj *o = params->q_obj;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004896 u8 cid_index = params->params.terminate.cid_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004897
Ariel Elior6383c0b2011-07-14 08:31:57 +00004898 if (cid_index >= o->max_cos) {
4899 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4900 o->cl_id, cid_index);
4901 return -EINVAL;
4902 }
4903
4904 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4905 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004906}
4907
4908static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4909 struct bnx2x_queue_state_params *params)
4910{
4911 struct bnx2x_queue_sp_obj *o = params->q_obj;
4912
Ariel Elior6383c0b2011-07-14 08:31:57 +00004913 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4914 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004915 ETH_CONNECTION_TYPE);
4916}
4917
4918static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4919 struct bnx2x_queue_state_params *params)
4920{
4921 switch (params->cmd) {
4922 case BNX2X_Q_CMD_INIT:
4923 return bnx2x_q_init(bp, params);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004924 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4925 return bnx2x_q_send_setup_tx_only(bp, params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004926 case BNX2X_Q_CMD_DEACTIVATE:
4927 return bnx2x_q_send_deactivate(bp, params);
4928 case BNX2X_Q_CMD_ACTIVATE:
4929 return bnx2x_q_send_activate(bp, params);
4930 case BNX2X_Q_CMD_UPDATE:
4931 return bnx2x_q_send_update(bp, params);
4932 case BNX2X_Q_CMD_UPDATE_TPA:
4933 return bnx2x_q_send_update_tpa(bp, params);
4934 case BNX2X_Q_CMD_HALT:
4935 return bnx2x_q_send_halt(bp, params);
4936 case BNX2X_Q_CMD_CFC_DEL:
4937 return bnx2x_q_send_cfc_del(bp, params);
4938 case BNX2X_Q_CMD_TERMINATE:
4939 return bnx2x_q_send_terminate(bp, params);
4940 case BNX2X_Q_CMD_EMPTY:
4941 return bnx2x_q_send_empty(bp, params);
4942 default:
4943 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4944 return -EINVAL;
4945 }
4946}
4947
4948static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4949 struct bnx2x_queue_state_params *params)
4950{
4951 switch (params->cmd) {
4952 case BNX2X_Q_CMD_SETUP:
4953 return bnx2x_q_send_setup_e1x(bp, params);
4954 case BNX2X_Q_CMD_INIT:
Ariel Elior6383c0b2011-07-14 08:31:57 +00004955 case BNX2X_Q_CMD_SETUP_TX_ONLY:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004956 case BNX2X_Q_CMD_DEACTIVATE:
4957 case BNX2X_Q_CMD_ACTIVATE:
4958 case BNX2X_Q_CMD_UPDATE:
4959 case BNX2X_Q_CMD_UPDATE_TPA:
4960 case BNX2X_Q_CMD_HALT:
4961 case BNX2X_Q_CMD_CFC_DEL:
4962 case BNX2X_Q_CMD_TERMINATE:
4963 case BNX2X_Q_CMD_EMPTY:
4964 return bnx2x_queue_send_cmd_cmn(bp, params);
4965 default:
4966 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4967 return -EINVAL;
4968 }
4969}
4970
4971static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4972 struct bnx2x_queue_state_params *params)
4973{
4974 switch (params->cmd) {
4975 case BNX2X_Q_CMD_SETUP:
4976 return bnx2x_q_send_setup_e2(bp, params);
4977 case BNX2X_Q_CMD_INIT:
Ariel Elior6383c0b2011-07-14 08:31:57 +00004978 case BNX2X_Q_CMD_SETUP_TX_ONLY:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004979 case BNX2X_Q_CMD_DEACTIVATE:
4980 case BNX2X_Q_CMD_ACTIVATE:
4981 case BNX2X_Q_CMD_UPDATE:
4982 case BNX2X_Q_CMD_UPDATE_TPA:
4983 case BNX2X_Q_CMD_HALT:
4984 case BNX2X_Q_CMD_CFC_DEL:
4985 case BNX2X_Q_CMD_TERMINATE:
4986 case BNX2X_Q_CMD_EMPTY:
4987 return bnx2x_queue_send_cmd_cmn(bp, params);
4988 default:
4989 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4990 return -EINVAL;
4991 }
4992}
4993
4994/**
4995 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4996 *
4997 * @bp: device handle
4998 * @o:
4999 * @params:
5000 *
5001 * (not Forwarding)
5002 * It both checks if the requested command is legal in a current
5003 * state and, if it's legal, sets a `next_state' in the object
5004 * that will be used in the completion flow to set the `state'
5005 * of the object.
5006 *
5007 * returns 0 if a requested command is a legal transition,
5008 * -EINVAL otherwise.
5009 */
5010static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5011 struct bnx2x_queue_sp_obj *o,
5012 struct bnx2x_queue_state_params *params)
5013{
5014 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5015 enum bnx2x_queue_cmd cmd = params->cmd;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005016 struct bnx2x_queue_update_params *update_params =
5017 &params->params.update;
5018 u8 next_tx_only = o->num_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005019
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005020 /*
5021 * Forget all pending for completion commands if a driver only state
5022 * transition has been requested.
5023 */
5024 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5025 o->pending = 0;
5026 o->next_state = BNX2X_Q_STATE_MAX;
5027 }
5028
5029 /*
5030 * Don't allow a next state transition if we are in the middle of
5031 * the previous one.
5032 */
Yuval Mintz04c46732013-01-23 03:21:46 +00005033 if (o->pending) {
5034 BNX2X_ERR("Blocking transition since pending was %lx\n",
5035 o->pending);
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005036 return -EBUSY;
Yuval Mintz04c46732013-01-23 03:21:46 +00005037 }
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005038
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005039 switch (state) {
5040 case BNX2X_Q_STATE_RESET:
5041 if (cmd == BNX2X_Q_CMD_INIT)
5042 next_state = BNX2X_Q_STATE_INITIALIZED;
5043
5044 break;
5045 case BNX2X_Q_STATE_INITIALIZED:
5046 if (cmd == BNX2X_Q_CMD_SETUP) {
5047 if (test_bit(BNX2X_Q_FLG_ACTIVE,
5048 &params->params.setup.flags))
5049 next_state = BNX2X_Q_STATE_ACTIVE;
5050 else
5051 next_state = BNX2X_Q_STATE_INACTIVE;
5052 }
5053
5054 break;
5055 case BNX2X_Q_STATE_ACTIVE:
5056 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5057 next_state = BNX2X_Q_STATE_INACTIVE;
5058
5059 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5060 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5061 next_state = BNX2X_Q_STATE_ACTIVE;
5062
Ariel Elior6383c0b2011-07-14 08:31:57 +00005063 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5064 next_state = BNX2X_Q_STATE_MULTI_COS;
5065 next_tx_only = 1;
5066 }
5067
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005068 else if (cmd == BNX2X_Q_CMD_HALT)
5069 next_state = BNX2X_Q_STATE_STOPPED;
5070
5071 else if (cmd == BNX2X_Q_CMD_UPDATE) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005072 /* If "active" state change is requested, update the
5073 * state accordingly.
5074 */
5075 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5076 &update_params->update_flags) &&
5077 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5078 &update_params->update_flags))
5079 next_state = BNX2X_Q_STATE_INACTIVE;
5080 else
5081 next_state = BNX2X_Q_STATE_ACTIVE;
5082 }
5083
5084 break;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005085 case BNX2X_Q_STATE_MULTI_COS:
5086 if (cmd == BNX2X_Q_CMD_TERMINATE)
5087 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5088
5089 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5090 next_state = BNX2X_Q_STATE_MULTI_COS;
5091 next_tx_only = o->num_tx_only + 1;
5092 }
5093
5094 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5095 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5096 next_state = BNX2X_Q_STATE_MULTI_COS;
5097
5098 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5099 /* If "active" state change is requested, update the
5100 * state accordingly.
5101 */
5102 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5103 &update_params->update_flags) &&
5104 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5105 &update_params->update_flags))
5106 next_state = BNX2X_Q_STATE_INACTIVE;
5107 else
5108 next_state = BNX2X_Q_STATE_MULTI_COS;
5109 }
5110
5111 break;
5112 case BNX2X_Q_STATE_MCOS_TERMINATED:
5113 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5114 next_tx_only = o->num_tx_only - 1;
5115 if (next_tx_only == 0)
5116 next_state = BNX2X_Q_STATE_ACTIVE;
5117 else
5118 next_state = BNX2X_Q_STATE_MULTI_COS;
5119 }
5120
5121 break;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005122 case BNX2X_Q_STATE_INACTIVE:
5123 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5124 next_state = BNX2X_Q_STATE_ACTIVE;
5125
5126 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5127 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5128 next_state = BNX2X_Q_STATE_INACTIVE;
5129
5130 else if (cmd == BNX2X_Q_CMD_HALT)
5131 next_state = BNX2X_Q_STATE_STOPPED;
5132
5133 else if (cmd == BNX2X_Q_CMD_UPDATE) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005134 /* If "active" state change is requested, update the
5135 * state accordingly.
5136 */
5137 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5138 &update_params->update_flags) &&
5139 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
Ariel Elior6383c0b2011-07-14 08:31:57 +00005140 &update_params->update_flags)){
5141 if (o->num_tx_only == 0)
5142 next_state = BNX2X_Q_STATE_ACTIVE;
5143 else /* tx only queues exist for this queue */
5144 next_state = BNX2X_Q_STATE_MULTI_COS;
5145 } else
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005146 next_state = BNX2X_Q_STATE_INACTIVE;
5147 }
5148
5149 break;
5150 case BNX2X_Q_STATE_STOPPED:
5151 if (cmd == BNX2X_Q_CMD_TERMINATE)
5152 next_state = BNX2X_Q_STATE_TERMINATED;
5153
5154 break;
5155 case BNX2X_Q_STATE_TERMINATED:
5156 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5157 next_state = BNX2X_Q_STATE_RESET;
5158
5159 break;
5160 default:
5161 BNX2X_ERR("Illegal state: %d\n", state);
5162 }
5163
5164 /* Transition is assured */
5165 if (next_state != BNX2X_Q_STATE_MAX) {
5166 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5167 state, cmd, next_state);
5168 o->next_state = next_state;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005169 o->next_tx_only = next_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005170 return 0;
5171 }
5172
5173 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5174
5175 return -EINVAL;
5176}
5177
5178void bnx2x_init_queue_obj(struct bnx2x *bp,
5179 struct bnx2x_queue_sp_obj *obj,
Ariel Elior6383c0b2011-07-14 08:31:57 +00005180 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5181 void *rdata,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005182 dma_addr_t rdata_mapping, unsigned long type)
5183{
5184 memset(obj, 0, sizeof(*obj));
5185
Ariel Elior6383c0b2011-07-14 08:31:57 +00005186 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5187 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5188
5189 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5190 obj->max_cos = cid_cnt;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005191 obj->cl_id = cl_id;
5192 obj->func_id = func_id;
5193 obj->rdata = rdata;
5194 obj->rdata_mapping = rdata_mapping;
5195 obj->type = type;
5196 obj->next_state = BNX2X_Q_STATE_MAX;
5197
5198 if (CHIP_IS_E1x(bp))
5199 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5200 else
5201 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5202
5203 obj->check_transition = bnx2x_queue_chk_transition;
5204
5205 obj->complete_cmd = bnx2x_queue_comp_cmd;
5206 obj->wait_comp = bnx2x_queue_wait_comp;
5207 obj->set_pending = bnx2x_queue_set_pending;
5208}
5209
Ariel Elior67c431a2013-01-01 05:22:36 +00005210/* return a queue object's logical state*/
5211int bnx2x_get_q_logical_state(struct bnx2x *bp,
5212 struct bnx2x_queue_sp_obj *obj)
5213{
5214 switch (obj->state) {
5215 case BNX2X_Q_STATE_ACTIVE:
5216 case BNX2X_Q_STATE_MULTI_COS:
5217 return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5218 case BNX2X_Q_STATE_RESET:
5219 case BNX2X_Q_STATE_INITIALIZED:
5220 case BNX2X_Q_STATE_MCOS_TERMINATED:
5221 case BNX2X_Q_STATE_INACTIVE:
5222 case BNX2X_Q_STATE_STOPPED:
5223 case BNX2X_Q_STATE_TERMINATED:
5224 case BNX2X_Q_STATE_FLRED:
5225 return BNX2X_Q_LOGICAL_STATE_STOPPED;
5226 default:
5227 return -EINVAL;
5228 }
5229}
5230
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005231/********************** Function state object *********************************/
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005232enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5233 struct bnx2x_func_sp_obj *o)
5234{
5235 /* in the middle of transaction - return INVALID state */
5236 if (o->pending)
5237 return BNX2X_F_STATE_MAX;
5238
5239 /*
5240 * unsure the order of reading of o->pending and o->state
5241 * o->pending should be read first
5242 */
5243 rmb();
5244
5245 return o->state;
5246}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005247
5248static int bnx2x_func_wait_comp(struct bnx2x *bp,
5249 struct bnx2x_func_sp_obj *o,
5250 enum bnx2x_func_cmd cmd)
5251{
5252 return bnx2x_state_wait(bp, cmd, &o->pending);
5253}
5254
5255/**
5256 * bnx2x_func_state_change_comp - complete the state machine transition
5257 *
5258 * @bp: device handle
5259 * @o:
5260 * @cmd:
5261 *
5262 * Called on state change transition. Completes the state
5263 * machine transition only - no HW interaction.
5264 */
5265static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5266 struct bnx2x_func_sp_obj *o,
5267 enum bnx2x_func_cmd cmd)
5268{
5269 unsigned long cur_pending = o->pending;
5270
5271 if (!test_and_clear_bit(cmd, &cur_pending)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00005272 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5273 cmd, BP_FUNC(bp), o->state,
5274 cur_pending, o->next_state);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005275 return -EINVAL;
5276 }
5277
Joe Perches94f05b02011-08-14 12:16:20 +00005278 DP(BNX2X_MSG_SP,
5279 "Completing command %d for func %d, setting state to %d\n",
5280 cmd, BP_FUNC(bp), o->next_state);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005281
5282 o->state = o->next_state;
5283 o->next_state = BNX2X_F_STATE_MAX;
5284
5285 /* It's important that o->state and o->next_state are
5286 * updated before o->pending.
5287 */
5288 wmb();
5289
5290 clear_bit(cmd, &o->pending);
5291 smp_mb__after_clear_bit();
5292
5293 return 0;
5294}
5295
5296/**
5297 * bnx2x_func_comp_cmd - complete the state change command
5298 *
5299 * @bp: device handle
5300 * @o:
5301 * @cmd:
5302 *
5303 * Checks that the arrived completion is expected.
5304 */
5305static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5306 struct bnx2x_func_sp_obj *o,
5307 enum bnx2x_func_cmd cmd)
5308{
5309 /* Complete the state machine part first, check if it's a
5310 * legal completion.
5311 */
5312 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005313 return rc;
5314}
5315
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005316/**
5317 * bnx2x_func_chk_transition - perform function state machine transition
5318 *
5319 * @bp: device handle
5320 * @o:
5321 * @params:
5322 *
5323 * It both checks if the requested command is legal in a current
5324 * state and, if it's legal, sets a `next_state' in the object
5325 * that will be used in the completion flow to set the `state'
5326 * of the object.
5327 *
5328 * returns 0 if a requested command is a legal transition,
5329 * -EINVAL otherwise.
5330 */
5331static int bnx2x_func_chk_transition(struct bnx2x *bp,
5332 struct bnx2x_func_sp_obj *o,
5333 struct bnx2x_func_state_params *params)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005334{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005335 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5336 enum bnx2x_func_cmd cmd = params->cmd;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005337
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005338 /*
5339 * Forget all pending for completion commands if a driver only state
5340 * transition has been requested.
5341 */
5342 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5343 o->pending = 0;
5344 o->next_state = BNX2X_F_STATE_MAX;
5345 }
5346
5347 /*
5348 * Don't allow a next state transition if we are in the middle of
5349 * the previous one.
5350 */
5351 if (o->pending)
5352 return -EBUSY;
5353
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005354 switch (state) {
5355 case BNX2X_F_STATE_RESET:
5356 if (cmd == BNX2X_F_CMD_HW_INIT)
5357 next_state = BNX2X_F_STATE_INITIALIZED;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005358
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005359 break;
5360 case BNX2X_F_STATE_INITIALIZED:
5361 if (cmd == BNX2X_F_CMD_START)
5362 next_state = BNX2X_F_STATE_STARTED;
5363
5364 else if (cmd == BNX2X_F_CMD_HW_RESET)
5365 next_state = BNX2X_F_STATE_RESET;
5366
5367 break;
5368 case BNX2X_F_STATE_STARTED:
5369 if (cmd == BNX2X_F_CMD_STOP)
5370 next_state = BNX2X_F_STATE_INITIALIZED;
Barak Witkowskia3348722012-04-23 03:04:46 +00005371 /* afex ramrods can be sent only in started mode, and only
5372 * if not pending for function_stop ramrod completion
5373 * for these events - next state remained STARTED.
5374 */
5375 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5376 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5377 next_state = BNX2X_F_STATE_STARTED;
5378
5379 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5380 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5381 next_state = BNX2X_F_STATE_STARTED;
Merav Sicron55c11942012-11-07 00:45:48 +00005382
5383 /* Switch_update ramrod can be sent in either started or
5384 * tx_stopped state, and it doesn't change the state.
5385 */
5386 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5387 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5388 next_state = BNX2X_F_STATE_STARTED;
5389
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005390 else if (cmd == BNX2X_F_CMD_TX_STOP)
5391 next_state = BNX2X_F_STATE_TX_STOPPED;
5392
5393 break;
5394 case BNX2X_F_STATE_TX_STOPPED:
Merav Sicron55c11942012-11-07 00:45:48 +00005395 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5396 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5397 next_state = BNX2X_F_STATE_TX_STOPPED;
5398
5399 else if (cmd == BNX2X_F_CMD_TX_START)
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005400 next_state = BNX2X_F_STATE_STARTED;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005401
5402 break;
5403 default:
5404 BNX2X_ERR("Unknown state: %d\n", state);
5405 }
5406
5407 /* Transition is assured */
5408 if (next_state != BNX2X_F_STATE_MAX) {
5409 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5410 state, cmd, next_state);
5411 o->next_state = next_state;
5412 return 0;
5413 }
5414
5415 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5416 state, cmd);
5417
5418 return -EINVAL;
5419}
5420
5421/**
5422 * bnx2x_func_init_func - performs HW init at function stage
5423 *
5424 * @bp: device handle
5425 * @drv:
5426 *
5427 * Init HW when the current phase is
5428 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5429 * HW blocks.
5430 */
5431static inline int bnx2x_func_init_func(struct bnx2x *bp,
5432 const struct bnx2x_func_sp_drv_ops *drv)
5433{
5434 return drv->init_hw_func(bp);
5435}
5436
5437/**
5438 * bnx2x_func_init_port - performs HW init at port stage
5439 *
5440 * @bp: device handle
5441 * @drv:
5442 *
5443 * Init HW when the current phase is
5444 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5445 * FUNCTION-only HW blocks.
5446 *
5447 */
5448static inline int bnx2x_func_init_port(struct bnx2x *bp,
5449 const struct bnx2x_func_sp_drv_ops *drv)
5450{
5451 int rc = drv->init_hw_port(bp);
5452 if (rc)
5453 return rc;
5454
5455 return bnx2x_func_init_func(bp, drv);
5456}
5457
5458/**
5459 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5460 *
5461 * @bp: device handle
5462 * @drv:
5463 *
5464 * Init HW when the current phase is
5465 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5466 * PORT-only and FUNCTION-only HW blocks.
5467 */
5468static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5469 const struct bnx2x_func_sp_drv_ops *drv)
5470{
5471 int rc = drv->init_hw_cmn_chip(bp);
5472 if (rc)
5473 return rc;
5474
5475 return bnx2x_func_init_port(bp, drv);
5476}
5477
5478/**
5479 * bnx2x_func_init_cmn - performs HW init at common stage
5480 *
5481 * @bp: device handle
5482 * @drv:
5483 *
5484 * Init HW when the current phase is
5485 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5486 * PORT-only and FUNCTION-only HW blocks.
5487 */
5488static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5489 const struct bnx2x_func_sp_drv_ops *drv)
5490{
5491 int rc = drv->init_hw_cmn(bp);
5492 if (rc)
5493 return rc;
5494
5495 return bnx2x_func_init_port(bp, drv);
5496}
5497
5498static int bnx2x_func_hw_init(struct bnx2x *bp,
5499 struct bnx2x_func_state_params *params)
5500{
5501 u32 load_code = params->params.hw_init.load_phase;
5502 struct bnx2x_func_sp_obj *o = params->f_obj;
5503 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5504 int rc = 0;
5505
5506 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5507 BP_ABS_FUNC(bp), load_code);
5508
5509 /* Prepare buffers for unzipping the FW */
5510 rc = drv->gunzip_init(bp);
5511 if (rc)
5512 return rc;
5513
5514 /* Prepare FW */
5515 rc = drv->init_fw(bp);
5516 if (rc) {
5517 BNX2X_ERR("Error loading firmware\n");
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005518 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005519 }
5520
5521 /* Handle the beginning of COMMON_XXX pases separatelly... */
5522 switch (load_code) {
5523 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5524 rc = bnx2x_func_init_cmn_chip(bp, drv);
5525 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005526 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005527
5528 break;
5529 case FW_MSG_CODE_DRV_LOAD_COMMON:
5530 rc = bnx2x_func_init_cmn(bp, drv);
5531 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005532 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005533
5534 break;
5535 case FW_MSG_CODE_DRV_LOAD_PORT:
5536 rc = bnx2x_func_init_port(bp, drv);
5537 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005538 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005539
5540 break;
5541 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5542 rc = bnx2x_func_init_func(bp, drv);
5543 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005544 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005545
5546 break;
5547 default:
5548 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5549 rc = -EINVAL;
5550 }
5551
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005552init_err:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005553 drv->gunzip_end(bp);
5554
5555 /* In case of success, complete the comand immediatelly: no ramrods
5556 * have been sent.
5557 */
5558 if (!rc)
5559 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5560
5561 return rc;
5562}
5563
5564/**
5565 * bnx2x_func_reset_func - reset HW at function stage
5566 *
5567 * @bp: device handle
5568 * @drv:
5569 *
5570 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5571 * FUNCTION-only HW blocks.
5572 */
5573static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5574 const struct bnx2x_func_sp_drv_ops *drv)
5575{
5576 drv->reset_hw_func(bp);
5577}
5578
5579/**
5580 * bnx2x_func_reset_port - reser HW at port stage
5581 *
5582 * @bp: device handle
5583 * @drv:
5584 *
5585 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5586 * FUNCTION-only and PORT-only HW blocks.
5587 *
5588 * !!!IMPORTANT!!!
5589 *
5590 * It's important to call reset_port before reset_func() as the last thing
5591 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5592 * makes impossible any DMAE transactions.
5593 */
5594static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5595 const struct bnx2x_func_sp_drv_ops *drv)
5596{
5597 drv->reset_hw_port(bp);
5598 bnx2x_func_reset_func(bp, drv);
5599}
5600
5601/**
5602 * bnx2x_func_reset_cmn - reser HW at common stage
5603 *
5604 * @bp: device handle
5605 * @drv:
5606 *
5607 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5608 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5609 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5610 */
5611static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5612 const struct bnx2x_func_sp_drv_ops *drv)
5613{
5614 bnx2x_func_reset_port(bp, drv);
5615 drv->reset_hw_cmn(bp);
5616}
5617
5618
5619static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5620 struct bnx2x_func_state_params *params)
5621{
5622 u32 reset_phase = params->params.hw_reset.reset_phase;
5623 struct bnx2x_func_sp_obj *o = params->f_obj;
5624 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5625
5626 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5627 reset_phase);
5628
5629 switch (reset_phase) {
5630 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5631 bnx2x_func_reset_cmn(bp, drv);
5632 break;
5633 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5634 bnx2x_func_reset_port(bp, drv);
5635 break;
5636 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5637 bnx2x_func_reset_func(bp, drv);
5638 break;
5639 default:
5640 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5641 reset_phase);
5642 break;
5643 }
5644
5645 /* Complete the comand immediatelly: no ramrods have been sent. */
5646 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5647
5648 return 0;
5649}
5650
5651static inline int bnx2x_func_send_start(struct bnx2x *bp,
5652 struct bnx2x_func_state_params *params)
5653{
5654 struct bnx2x_func_sp_obj *o = params->f_obj;
5655 struct function_start_data *rdata =
5656 (struct function_start_data *)o->rdata;
5657 dma_addr_t data_mapping = o->rdata_mapping;
5658 struct bnx2x_func_start_params *start_params = &params->params.start;
5659
5660 memset(rdata, 0, sizeof(*rdata));
5661
5662 /* Fill the ramrod data with provided parameters */
Yuval Mintz2de67432013-01-23 03:21:43 +00005663 rdata->function_mode = (u8)start_params->mf_mode;
5664 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5665 rdata->path_id = BP_PATH(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005666 rdata->network_cos_mode = start_params->network_cos_mode;
5667
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00005668 /*
5669 * No need for an explicit memory barrier here as long we would
5670 * need to ensure the ordering of writing to the SPQ element
5671 * and updating of the SPQ producer which involves a memory
5672 * read and we will have to put a full memory barrier there
5673 * (inside bnx2x_sp_post()).
5674 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005675
5676 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5677 U64_HI(data_mapping),
5678 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5679}
5680
Merav Sicron55c11942012-11-07 00:45:48 +00005681static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5682 struct bnx2x_func_state_params *params)
5683{
5684 struct bnx2x_func_sp_obj *o = params->f_obj;
5685 struct function_update_data *rdata =
5686 (struct function_update_data *)o->rdata;
5687 dma_addr_t data_mapping = o->rdata_mapping;
5688 struct bnx2x_func_switch_update_params *switch_update_params =
5689 &params->params.switch_update;
5690
5691 memset(rdata, 0, sizeof(*rdata));
5692
5693 /* Fill the ramrod data with provided parameters */
5694 rdata->tx_switch_suspend_change_flg = 1;
5695 rdata->tx_switch_suspend = switch_update_params->suspend;
5696 rdata->echo = SWITCH_UPDATE;
5697
5698 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5699 U64_HI(data_mapping),
5700 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5701}
5702
Barak Witkowskia3348722012-04-23 03:04:46 +00005703static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5704 struct bnx2x_func_state_params *params)
5705{
5706 struct bnx2x_func_sp_obj *o = params->f_obj;
5707 struct function_update_data *rdata =
5708 (struct function_update_data *)o->afex_rdata;
5709 dma_addr_t data_mapping = o->afex_rdata_mapping;
5710 struct bnx2x_func_afex_update_params *afex_update_params =
5711 &params->params.afex_update;
5712
5713 memset(rdata, 0, sizeof(*rdata));
5714
5715 /* Fill the ramrod data with provided parameters */
5716 rdata->vif_id_change_flg = 1;
5717 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5718 rdata->afex_default_vlan_change_flg = 1;
5719 rdata->afex_default_vlan =
5720 cpu_to_le16(afex_update_params->afex_default_vlan);
5721 rdata->allowed_priorities_change_flg = 1;
5722 rdata->allowed_priorities = afex_update_params->allowed_priorities;
Merav Sicron55c11942012-11-07 00:45:48 +00005723 rdata->echo = AFEX_UPDATE;
Barak Witkowskia3348722012-04-23 03:04:46 +00005724
5725 /* No need for an explicit memory barrier here as long we would
5726 * need to ensure the ordering of writing to the SPQ element
5727 * and updating of the SPQ producer which involves a memory
5728 * read and we will have to put a full memory barrier there
5729 * (inside bnx2x_sp_post()).
5730 */
5731 DP(BNX2X_MSG_SP,
5732 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5733 rdata->vif_id,
5734 rdata->afex_default_vlan, rdata->allowed_priorities);
5735
5736 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5737 U64_HI(data_mapping),
5738 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5739}
5740
5741static
5742inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5743 struct bnx2x_func_state_params *params)
5744{
5745 struct bnx2x_func_sp_obj *o = params->f_obj;
5746 struct afex_vif_list_ramrod_data *rdata =
5747 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
Yuval Mintz86564c32013-01-23 03:21:50 +00005748 struct bnx2x_func_afex_viflists_params *afex_vif_params =
Barak Witkowskia3348722012-04-23 03:04:46 +00005749 &params->params.afex_viflists;
5750 u64 *p_rdata = (u64 *)rdata;
5751
5752 memset(rdata, 0, sizeof(*rdata));
5753
5754 /* Fill the ramrod data with provided parameters */
Yuval Mintz86564c32013-01-23 03:21:50 +00005755 rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5756 rdata->func_bit_map = afex_vif_params->func_bit_map;
5757 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5758 rdata->func_to_clear = afex_vif_params->func_to_clear;
Barak Witkowskia3348722012-04-23 03:04:46 +00005759
5760 /* send in echo type of sub command */
Yuval Mintz86564c32013-01-23 03:21:50 +00005761 rdata->echo = afex_vif_params->afex_vif_list_command;
Barak Witkowskia3348722012-04-23 03:04:46 +00005762
5763 /* No need for an explicit memory barrier here as long we would
5764 * need to ensure the ordering of writing to the SPQ element
5765 * and updating of the SPQ producer which involves a memory
5766 * read and we will have to put a full memory barrier there
5767 * (inside bnx2x_sp_post()).
5768 */
5769
5770 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5771 rdata->afex_vif_list_command, rdata->vif_list_index,
5772 rdata->func_bit_map, rdata->func_to_clear);
5773
5774 /* this ramrod sends data directly and not through DMA mapping */
5775 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5776 U64_HI(*p_rdata), U64_LO(*p_rdata),
5777 NONE_CONNECTION_TYPE);
5778}
5779
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005780static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5781 struct bnx2x_func_state_params *params)
5782{
5783 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5784 NONE_CONNECTION_TYPE);
5785}
5786
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005787static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5788 struct bnx2x_func_state_params *params)
5789{
5790 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5791 NONE_CONNECTION_TYPE);
5792}
5793static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5794 struct bnx2x_func_state_params *params)
5795{
5796 struct bnx2x_func_sp_obj *o = params->f_obj;
5797 struct flow_control_configuration *rdata =
5798 (struct flow_control_configuration *)o->rdata;
5799 dma_addr_t data_mapping = o->rdata_mapping;
5800 struct bnx2x_func_tx_start_params *tx_start_params =
5801 &params->params.tx_start;
5802 int i;
5803
5804 memset(rdata, 0, sizeof(*rdata));
5805
5806 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5807 rdata->dcb_version = tx_start_params->dcb_version;
5808 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5809
5810 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5811 rdata->traffic_type_to_priority_cos[i] =
5812 tx_start_params->traffic_type_to_priority_cos[i];
5813
5814 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5815 U64_HI(data_mapping),
5816 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5817}
5818
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005819static int bnx2x_func_send_cmd(struct bnx2x *bp,
5820 struct bnx2x_func_state_params *params)
5821{
5822 switch (params->cmd) {
5823 case BNX2X_F_CMD_HW_INIT:
5824 return bnx2x_func_hw_init(bp, params);
5825 case BNX2X_F_CMD_START:
5826 return bnx2x_func_send_start(bp, params);
5827 case BNX2X_F_CMD_STOP:
5828 return bnx2x_func_send_stop(bp, params);
5829 case BNX2X_F_CMD_HW_RESET:
5830 return bnx2x_func_hw_reset(bp, params);
Barak Witkowskia3348722012-04-23 03:04:46 +00005831 case BNX2X_F_CMD_AFEX_UPDATE:
5832 return bnx2x_func_send_afex_update(bp, params);
5833 case BNX2X_F_CMD_AFEX_VIFLISTS:
5834 return bnx2x_func_send_afex_viflists(bp, params);
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005835 case BNX2X_F_CMD_TX_STOP:
5836 return bnx2x_func_send_tx_stop(bp, params);
5837 case BNX2X_F_CMD_TX_START:
5838 return bnx2x_func_send_tx_start(bp, params);
Merav Sicron55c11942012-11-07 00:45:48 +00005839 case BNX2X_F_CMD_SWITCH_UPDATE:
5840 return bnx2x_func_send_switch_update(bp, params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005841 default:
5842 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5843 return -EINVAL;
5844 }
5845}
5846
5847void bnx2x_init_func_obj(struct bnx2x *bp,
5848 struct bnx2x_func_sp_obj *obj,
5849 void *rdata, dma_addr_t rdata_mapping,
Barak Witkowskia3348722012-04-23 03:04:46 +00005850 void *afex_rdata, dma_addr_t afex_rdata_mapping,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005851 struct bnx2x_func_sp_drv_ops *drv_iface)
5852{
5853 memset(obj, 0, sizeof(*obj));
5854
5855 mutex_init(&obj->one_pending_mutex);
5856
5857 obj->rdata = rdata;
5858 obj->rdata_mapping = rdata_mapping;
Barak Witkowskia3348722012-04-23 03:04:46 +00005859 obj->afex_rdata = afex_rdata;
5860 obj->afex_rdata_mapping = afex_rdata_mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005861 obj->send_cmd = bnx2x_func_send_cmd;
5862 obj->check_transition = bnx2x_func_chk_transition;
5863 obj->complete_cmd = bnx2x_func_comp_cmd;
5864 obj->wait_comp = bnx2x_func_wait_comp;
5865
5866 obj->drv = drv_iface;
5867}
5868
5869/**
5870 * bnx2x_func_state_change - perform Function state change transition
5871 *
5872 * @bp: device handle
5873 * @params: parameters to perform the transaction
5874 *
5875 * returns 0 in case of successfully completed transition,
5876 * negative error code in case of failure, positive
5877 * (EBUSY) value if there is a completion to that is
5878 * still pending (possible only if RAMROD_COMP_WAIT is
5879 * not set in params->ramrod_flags for asynchronous
5880 * commands).
5881 */
5882int bnx2x_func_state_change(struct bnx2x *bp,
5883 struct bnx2x_func_state_params *params)
5884{
5885 struct bnx2x_func_sp_obj *o = params->f_obj;
Merav Sicron55c11942012-11-07 00:45:48 +00005886 int rc, cnt = 300;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005887 enum bnx2x_func_cmd cmd = params->cmd;
5888 unsigned long *pending = &o->pending;
5889
5890 mutex_lock(&o->one_pending_mutex);
5891
5892 /* Check that the requested transition is legal */
Merav Sicron55c11942012-11-07 00:45:48 +00005893 rc = o->check_transition(bp, o, params);
5894 if ((rc == -EBUSY) &&
5895 (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
5896 while ((rc == -EBUSY) && (--cnt > 0)) {
5897 mutex_unlock(&o->one_pending_mutex);
5898 msleep(10);
5899 mutex_lock(&o->one_pending_mutex);
5900 rc = o->check_transition(bp, o, params);
5901 }
5902 if (rc == -EBUSY) {
5903 mutex_unlock(&o->one_pending_mutex);
5904 BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5905 return rc;
5906 }
5907 } else if (rc) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005908 mutex_unlock(&o->one_pending_mutex);
Merav Sicron55c11942012-11-07 00:45:48 +00005909 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005910 }
5911
5912 /* Set "pending" bit */
5913 set_bit(cmd, pending);
5914
5915 /* Don't send a command if only driver cleanup was requested */
5916 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5917 bnx2x_func_state_change_comp(bp, o, cmd);
5918 mutex_unlock(&o->one_pending_mutex);
5919 } else {
5920 /* Send a ramrod */
5921 rc = o->send_cmd(bp, params);
5922
5923 mutex_unlock(&o->one_pending_mutex);
5924
5925 if (rc) {
5926 o->next_state = BNX2X_F_STATE_MAX;
5927 clear_bit(cmd, pending);
5928 smp_mb__after_clear_bit();
5929 return rc;
5930 }
5931
5932 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5933 rc = o->wait_comp(bp, o, cmd);
5934 if (rc)
5935 return rc;
5936
5937 return 0;
5938 }
5939 }
5940
5941 return !!test_bit(cmd, pending);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005942}