blob: 5bdc1d6dcf89a02b491d07cd63543804652d5892 [file] [log] [blame]
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001/* bnx2x_sp.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2011-2013 Broadcom Corporation
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
17 *
18 */
Joe Perchesf1deab52011-08-14 12:16:21 +000019
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000022#include <linux/module.h>
23#include <linux/crc32.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/crc32c.h>
27#include "bnx2x.h"
28#include "bnx2x_cmn.h"
29#include "bnx2x_sp.h"
30
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030031#define BNX2X_MAX_EMUL_MULTI 16
32
33/**** Exe Queue interfaces ****/
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000034
35/**
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030036 * bnx2x_exe_queue_init - init the Exe Queue object
37 *
38 * @o: poiter to the object
39 * @exe_len: length
40 * @owner: poiter to the owner
41 * @validate: validate function pointer
42 * @optimize: optimize function pointer
43 * @exec: execute function pointer
44 * @get: get function pointer
45 */
46static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
47 struct bnx2x_exe_queue_obj *o,
48 int exe_len,
49 union bnx2x_qable_obj *owner,
50 exe_q_validate validate,
Yuval Mintz460a25c2012-01-23 07:31:51 +000051 exe_q_remove remove,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030052 exe_q_optimize optimize,
53 exe_q_execute exec,
54 exe_q_get get)
55{
56 memset(o, 0, sizeof(*o));
57
58 INIT_LIST_HEAD(&o->exe_queue);
59 INIT_LIST_HEAD(&o->pending_comp);
60
61 spin_lock_init(&o->lock);
62
63 o->exe_chunk_len = exe_len;
64 o->owner = owner;
65
66 /* Owner specific callbacks */
67 o->validate = validate;
Yuval Mintz460a25c2012-01-23 07:31:51 +000068 o->remove = remove;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030069 o->optimize = optimize;
70 o->execute = exec;
71 o->get = get;
72
Merav Sicron51c1a582012-03-18 10:33:38 +000073 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
74 exe_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030075}
76
77static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
78 struct bnx2x_exeq_elem *elem)
79{
80 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
81 kfree(elem);
82}
83
84static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
85{
86 struct bnx2x_exeq_elem *elem;
87 int cnt = 0;
88
89 spin_lock_bh(&o->lock);
90
91 list_for_each_entry(elem, &o->exe_queue, link)
92 cnt++;
93
94 spin_unlock_bh(&o->lock);
95
96 return cnt;
97}
98
99/**
100 * bnx2x_exe_queue_add - add a new element to the execution queue
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000101 *
102 * @bp: driver handle
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300103 * @o: queue
104 * @cmd: new command to add
105 * @restore: true - do not optimize the command
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000106 *
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300107 * If the element is optimized or is illegal, frees it.
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000108 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300109static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
110 struct bnx2x_exe_queue_obj *o,
111 struct bnx2x_exeq_elem *elem,
112 bool restore)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000113{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300114 int rc;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000115
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300116 spin_lock_bh(&o->lock);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000117
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300118 if (!restore) {
119 /* Try to cancel this element queue */
120 rc = o->optimize(bp, o->owner, elem);
121 if (rc)
122 goto free_and_exit;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000123
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300124 /* Check if this request is ok */
125 rc = o->validate(bp, o->owner, elem);
126 if (rc) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +0000127 DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300128 goto free_and_exit;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000129 }
130 }
131
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300132 /* If so, add it to the execution queue */
133 list_add_tail(&elem->link, &o->exe_queue);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000134
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300135 spin_unlock_bh(&o->lock);
136
137 return 0;
138
139free_and_exit:
140 bnx2x_exe_queue_free_elem(bp, elem);
141
142 spin_unlock_bh(&o->lock);
143
144 return rc;
145
146}
147
148static inline void __bnx2x_exe_queue_reset_pending(
149 struct bnx2x *bp,
150 struct bnx2x_exe_queue_obj *o)
151{
152 struct bnx2x_exeq_elem *elem;
153
154 while (!list_empty(&o->pending_comp)) {
155 elem = list_first_entry(&o->pending_comp,
156 struct bnx2x_exeq_elem, link);
157
158 list_del(&elem->link);
159 bnx2x_exe_queue_free_elem(bp, elem);
160 }
161}
162
163static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
164 struct bnx2x_exe_queue_obj *o)
165{
166
167 spin_lock_bh(&o->lock);
168
169 __bnx2x_exe_queue_reset_pending(bp, o);
170
171 spin_unlock_bh(&o->lock);
172
173}
174
175/**
176 * bnx2x_exe_queue_step - execute one execution chunk atomically
177 *
178 * @bp: driver handle
179 * @o: queue
180 * @ramrod_flags: flags
181 *
182 * (Atomicy is ensured using the exe_queue->lock).
183 */
184static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
185 struct bnx2x_exe_queue_obj *o,
186 unsigned long *ramrod_flags)
187{
188 struct bnx2x_exeq_elem *elem, spacer;
189 int cur_len = 0, rc;
190
191 memset(&spacer, 0, sizeof(spacer));
192
193 spin_lock_bh(&o->lock);
194
195 /*
196 * Next step should not be performed until the current is finished,
197 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
198 * properly clear object internals without sending any command to the FW
199 * which also implies there won't be any completion to clear the
200 * 'pending' list.
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000201 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300202 if (!list_empty(&o->pending_comp)) {
203 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000204 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300205 __bnx2x_exe_queue_reset_pending(bp, o);
206 } else {
207 spin_unlock_bh(&o->lock);
208 return 1;
209 }
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000210 }
211
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300212 /*
213 * Run through the pending commands list and create a next
214 * execution chunk.
215 */
216 while (!list_empty(&o->exe_queue)) {
217 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
218 link);
219 WARN_ON(!elem->cmd_len);
220
221 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
222 cur_len += elem->cmd_len;
223 /*
224 * Prevent from both lists being empty when moving an
225 * element. This will allow the call of
226 * bnx2x_exe_queue_empty() without locking.
227 */
228 list_add_tail(&spacer.link, &o->pending_comp);
229 mb();
Wei Yongjun7933aa52012-09-04 21:06:55 +0000230 list_move_tail(&elem->link, &o->pending_comp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300231 list_del(&spacer.link);
232 } else
233 break;
234 }
235
236 /* Sanity check */
237 if (!cur_len) {
238 spin_unlock_bh(&o->lock);
239 return 0;
240 }
241
242 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
243 if (rc < 0)
244 /*
245 * In case of an error return the commands back to the queue
246 * and reset the pending_comp.
247 */
248 list_splice_init(&o->pending_comp, &o->exe_queue);
249 else if (!rc)
250 /*
251 * If zero is returned, means there are no outstanding pending
252 * completions and we may dismiss the pending list.
253 */
254 __bnx2x_exe_queue_reset_pending(bp, o);
255
256 spin_unlock_bh(&o->lock);
257 return rc;
258}
259
260static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
261{
262 bool empty = list_empty(&o->exe_queue);
263
264 /* Don't reorder!!! */
265 mb();
266
267 return empty && list_empty(&o->pending_comp);
268}
269
270static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
271 struct bnx2x *bp)
272{
273 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
274 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
275}
276
277/************************ raw_obj functions ***********************************/
278static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
279{
280 return !!test_bit(o->state, o->pstate);
281}
282
283static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
284{
285 smp_mb__before_clear_bit();
286 clear_bit(o->state, o->pstate);
287 smp_mb__after_clear_bit();
288}
289
290static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
291{
292 smp_mb__before_clear_bit();
293 set_bit(o->state, o->pstate);
294 smp_mb__after_clear_bit();
295}
296
297/**
298 * bnx2x_state_wait - wait until the given bit(state) is cleared
299 *
300 * @bp: device handle
301 * @state: state which is to be cleared
302 * @state_p: state buffer
303 *
304 */
305static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
306 unsigned long *pstate)
307{
308 /* can take a while if any port is running */
309 int cnt = 5000;
310
311
312 if (CHIP_REV_IS_EMUL(bp))
313 cnt *= 20;
314
315 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
316
317 might_sleep();
318 while (cnt--) {
319 if (!test_bit(state, pstate)) {
320#ifdef BNX2X_STOP_ON_ERROR
321 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
322#endif
323 return 0;
324 }
325
Yuval Mintz0926d492013-01-23 03:21:45 +0000326 usleep_range(1000, 2000);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300327
328 if (bp->panic)
329 return -EIO;
330 }
331
332 /* timeout! */
333 BNX2X_ERR("timeout waiting for state %d\n", state);
334#ifdef BNX2X_STOP_ON_ERROR
335 bnx2x_panic();
336#endif
337
338 return -EBUSY;
339}
340
341static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
342{
343 return bnx2x_state_wait(bp, raw->state, raw->pstate);
344}
345
346/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
347/* credit handling callbacks */
348static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
349{
350 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
351
352 WARN_ON(!mp);
353
354 return mp->get_entry(mp, offset);
355}
356
357static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
358{
359 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
360
361 WARN_ON(!mp);
362
363 return mp->get(mp, 1);
364}
365
366static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
367{
368 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
369
370 WARN_ON(!vp);
371
372 return vp->get_entry(vp, offset);
373}
374
375static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
376{
377 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
378
379 WARN_ON(!vp);
380
381 return vp->get(vp, 1);
382}
383
384static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
385{
386 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
387 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
388
389 if (!mp->get(mp, 1))
390 return false;
391
392 if (!vp->get(vp, 1)) {
393 mp->put(mp, 1);
394 return false;
395 }
396
397 return true;
398}
399
400static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
401{
402 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
403
404 return mp->put_entry(mp, offset);
405}
406
407static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
408{
409 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
410
411 return mp->put(mp, 1);
412}
413
414static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
415{
416 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
417
418 return vp->put_entry(vp, offset);
419}
420
421static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
422{
423 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
424
425 return vp->put(vp, 1);
426}
427
428static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
429{
430 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
431 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
432
433 if (!mp->put(mp, 1))
434 return false;
435
436 if (!vp->put(vp, 1)) {
437 mp->get(mp, 1);
438 return false;
439 }
440
441 return true;
442}
443
Ariel Eliored5162a2011-12-05 21:52:24 +0000444static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000445 int n, u8 *base, u8 stride, u8 size)
Ariel Eliored5162a2011-12-05 21:52:24 +0000446{
447 struct bnx2x_vlan_mac_registry_elem *pos;
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000448 u8 *next = base;
Ariel Eliored5162a2011-12-05 21:52:24 +0000449 int counter = 0;
450
451 /* traverse list */
452 list_for_each_entry(pos, &o->head, link) {
453 if (counter < n) {
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000454 memcpy(next, &pos->u, size);
Ariel Eliored5162a2011-12-05 21:52:24 +0000455 counter++;
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000456 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
457 counter, next);
458 next += stride + size;
Ariel Eliored5162a2011-12-05 21:52:24 +0000459
Ariel Eliored5162a2011-12-05 21:52:24 +0000460 }
461 }
462 return counter * ETH_ALEN;
463}
464
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300465/* check_add() callbacks */
Merav Sicron51c1a582012-03-18 10:33:38 +0000466static int bnx2x_check_mac_add(struct bnx2x *bp,
467 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300468 union bnx2x_classification_ramrod_data *data)
469{
470 struct bnx2x_vlan_mac_registry_elem *pos;
471
Merav Sicron51c1a582012-03-18 10:33:38 +0000472 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
473
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300474 if (!is_valid_ether_addr(data->mac.mac))
475 return -EINVAL;
476
477 /* Check if a requested MAC already exists */
478 list_for_each_entry(pos, &o->head, link)
Dmitry Kravkov91226792013-03-11 05:17:52 +0000479 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
480 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300481 return -EEXIST;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000482
483 return 0;
484}
485
Merav Sicron51c1a582012-03-18 10:33:38 +0000486static int bnx2x_check_vlan_add(struct bnx2x *bp,
487 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300488 union bnx2x_classification_ramrod_data *data)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000489{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300490 struct bnx2x_vlan_mac_registry_elem *pos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000491
Merav Sicron51c1a582012-03-18 10:33:38 +0000492 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
493
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300494 list_for_each_entry(pos, &o->head, link)
495 if (data->vlan.vlan == pos->u.vlan.vlan)
496 return -EEXIST;
497
498 return 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000499}
500
Merav Sicron51c1a582012-03-18 10:33:38 +0000501static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
502 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300503 union bnx2x_classification_ramrod_data *data)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000504{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300505 struct bnx2x_vlan_mac_registry_elem *pos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000506
Merav Sicron51c1a582012-03-18 10:33:38 +0000507 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
508 data->vlan_mac.mac, data->vlan_mac.vlan);
509
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300510 list_for_each_entry(pos, &o->head, link)
511 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
512 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
Dmitry Kravkov91226792013-03-11 05:17:52 +0000513 ETH_ALEN)) &&
514 (data->vlan_mac.is_inner_mac ==
515 pos->u.vlan_mac.is_inner_mac))
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300516 return -EEXIST;
517
518 return 0;
519}
520
521
522/* check_del() callbacks */
523static struct bnx2x_vlan_mac_registry_elem *
Merav Sicron51c1a582012-03-18 10:33:38 +0000524 bnx2x_check_mac_del(struct bnx2x *bp,
525 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300526 union bnx2x_classification_ramrod_data *data)
527{
528 struct bnx2x_vlan_mac_registry_elem *pos;
529
Merav Sicron51c1a582012-03-18 10:33:38 +0000530 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
531
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300532 list_for_each_entry(pos, &o->head, link)
Dmitry Kravkov91226792013-03-11 05:17:52 +0000533 if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
534 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300535 return pos;
536
537 return NULL;
538}
539
540static struct bnx2x_vlan_mac_registry_elem *
Merav Sicron51c1a582012-03-18 10:33:38 +0000541 bnx2x_check_vlan_del(struct bnx2x *bp,
542 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300543 union bnx2x_classification_ramrod_data *data)
544{
545 struct bnx2x_vlan_mac_registry_elem *pos;
546
Merav Sicron51c1a582012-03-18 10:33:38 +0000547 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
548
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300549 list_for_each_entry(pos, &o->head, link)
550 if (data->vlan.vlan == pos->u.vlan.vlan)
551 return pos;
552
553 return NULL;
554}
555
556static struct bnx2x_vlan_mac_registry_elem *
Merav Sicron51c1a582012-03-18 10:33:38 +0000557 bnx2x_check_vlan_mac_del(struct bnx2x *bp,
558 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300559 union bnx2x_classification_ramrod_data *data)
560{
561 struct bnx2x_vlan_mac_registry_elem *pos;
562
Merav Sicron51c1a582012-03-18 10:33:38 +0000563 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
564 data->vlan_mac.mac, data->vlan_mac.vlan);
565
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300566 list_for_each_entry(pos, &o->head, link)
567 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
568 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
Dmitry Kravkov91226792013-03-11 05:17:52 +0000569 ETH_ALEN)) &&
570 (data->vlan_mac.is_inner_mac ==
571 pos->u.vlan_mac.is_inner_mac))
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300572 return pos;
573
574 return NULL;
575}
576
577/* check_move() callback */
Merav Sicron51c1a582012-03-18 10:33:38 +0000578static bool bnx2x_check_move(struct bnx2x *bp,
579 struct bnx2x_vlan_mac_obj *src_o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300580 struct bnx2x_vlan_mac_obj *dst_o,
581 union bnx2x_classification_ramrod_data *data)
582{
583 struct bnx2x_vlan_mac_registry_elem *pos;
584 int rc;
585
586 /* Check if we can delete the requested configuration from the first
587 * object.
588 */
Merav Sicron51c1a582012-03-18 10:33:38 +0000589 pos = src_o->check_del(bp, src_o, data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300590
591 /* check if configuration can be added */
Merav Sicron51c1a582012-03-18 10:33:38 +0000592 rc = dst_o->check_add(bp, dst_o, data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300593
594 /* If this classification can not be added (is already set)
595 * or can't be deleted - return an error.
596 */
597 if (rc || !pos)
598 return false;
599
600 return true;
601}
602
603static bool bnx2x_check_move_always_err(
Merav Sicron51c1a582012-03-18 10:33:38 +0000604 struct bnx2x *bp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300605 struct bnx2x_vlan_mac_obj *src_o,
606 struct bnx2x_vlan_mac_obj *dst_o,
607 union bnx2x_classification_ramrod_data *data)
608{
609 return false;
610}
611
612
613static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
614{
615 struct bnx2x_raw_obj *raw = &o->raw;
616 u8 rx_tx_flag = 0;
617
618 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
619 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
620 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
621
622 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
623 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
624 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
625
626 return rx_tx_flag;
627}
628
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300629
Barak Witkowskia3348722012-04-23 03:04:46 +0000630void bnx2x_set_mac_in_nig(struct bnx2x *bp,
631 bool add, unsigned char *dev_addr, int index)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300632{
633 u32 wb_data[2];
634 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
635 NIG_REG_LLH0_FUNC_MEM;
636
Barak Witkowskia3348722012-04-23 03:04:46 +0000637 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
638 return;
639
640 if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300641 return;
642
643 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
644 (add ? "ADD" : "DELETE"), index);
645
646 if (add) {
647 /* LLH_FUNC_MEM is a u64 WB register */
648 reg_offset += 8*index;
649
650 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
651 (dev_addr[4] << 8) | dev_addr[5]);
652 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
653
654 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
655 }
656
657 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
658 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
659}
660
661/**
662 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
663 *
664 * @bp: device handle
665 * @o: queue for which we want to configure this rule
666 * @add: if true the command is an ADD command, DEL otherwise
667 * @opcode: CLASSIFY_RULE_OPCODE_XXX
668 * @hdr: pointer to a header to setup
669 *
670 */
671static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
672 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
673 struct eth_classify_cmd_header *hdr)
674{
675 struct bnx2x_raw_obj *raw = &o->raw;
676
677 hdr->client_id = raw->cl_id;
678 hdr->func_id = raw->func_id;
679
680 /* Rx or/and Tx (internal switching) configuration ? */
681 hdr->cmd_general_data |=
682 bnx2x_vlan_mac_get_rx_tx_flag(o);
683
684 if (add)
685 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
686
687 hdr->cmd_general_data |=
688 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
689}
690
691/**
692 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
693 *
694 * @cid: connection id
695 * @type: BNX2X_FILTER_XXX_PENDING
696 * @hdr: poiter to header to setup
697 * @rule_cnt:
698 *
699 * currently we always configure one rule and echo field to contain a CID and an
700 * opcode type.
701 */
702static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
703 struct eth_classify_header *hdr, int rule_cnt)
704{
Yuval Mintz86564c32013-01-23 03:21:50 +0000705 hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
706 (type << BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300707 hdr->rule_cnt = (u8)rule_cnt;
708}
709
710
711/* hw_config() callbacks */
712static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
713 struct bnx2x_vlan_mac_obj *o,
714 struct bnx2x_exeq_elem *elem, int rule_idx,
715 int cam_offset)
716{
717 struct bnx2x_raw_obj *raw = &o->raw;
718 struct eth_classify_rules_ramrod_data *data =
719 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
720 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
721 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
722 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
723 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
724 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
725
726 /*
727 * Set LLH CAM entry: currently only iSCSI and ETH macs are
728 * relevant. In addition, current implementation is tuned for a
729 * single ETH MAC.
730 *
731 * When multiple unicast ETH MACs PF configuration in switch
732 * independent mode is required (NetQ, multiple netdev MACs,
733 * etc.), consider better utilisation of 8 per function MAC
734 * entries in the LLH register. There is also
735 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
736 * total number of CAM entries to 16.
737 *
738 * Currently we won't configure NIG for MACs other than a primary ETH
739 * MAC and iSCSI L2 MAC.
740 *
741 * If this MAC is moving from one Queue to another, no need to change
742 * NIG configuration.
743 */
744 if (cmd != BNX2X_VLAN_MAC_MOVE) {
745 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
746 bnx2x_set_mac_in_nig(bp, add, mac,
Yuval Mintz0a52fd02012-03-12 08:53:07 +0000747 BNX2X_LLH_CAM_ISCSI_ETH_LINE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300748 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
Yuval Mintz0a52fd02012-03-12 08:53:07 +0000749 bnx2x_set_mac_in_nig(bp, add, mac,
750 BNX2X_LLH_CAM_ETH_LINE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300751 }
752
753 /* Reset the ramrod data buffer for the first rule */
754 if (rule_idx == 0)
755 memset(data, 0, sizeof(*data));
756
757 /* Setup a command header */
758 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
759 &rule_entry->mac.header);
760
Joe Perches0f9dad12011-08-14 12:16:19 +0000761 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +0000762 (add ? "add" : "delete"), mac, raw->cl_id);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300763
764 /* Set a MAC itself */
765 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
766 &rule_entry->mac.mac_mid,
767 &rule_entry->mac.mac_lsb, mac);
Dmitry Kravkov91226792013-03-11 05:17:52 +0000768 rule_entry->mac.inner_mac =
769 cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300770
771 /* MOVE: Add a rule that will add this MAC to the target Queue */
772 if (cmd == BNX2X_VLAN_MAC_MOVE) {
773 rule_entry++;
774 rule_cnt++;
775
776 /* Setup ramrod data */
777 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
778 elem->cmd_data.vlan_mac.target_obj,
779 true, CLASSIFY_RULE_OPCODE_MAC,
780 &rule_entry->mac.header);
781
782 /* Set a MAC itself */
783 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
784 &rule_entry->mac.mac_mid,
785 &rule_entry->mac.mac_lsb, mac);
Dmitry Kravkov91226792013-03-11 05:17:52 +0000786 rule_entry->mac.inner_mac =
787 cpu_to_le16(elem->cmd_data.vlan_mac.
788 u.mac.is_inner_mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300789 }
790
791 /* Set the ramrod data header */
792 /* TODO: take this to the higher level in order to prevent multiple
793 writing */
794 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
795 rule_cnt);
796}
797
798/**
799 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
800 *
801 * @bp: device handle
802 * @o: queue
803 * @type:
804 * @cam_offset: offset in cam memory
805 * @hdr: pointer to a header to setup
806 *
807 * E1/E1H
808 */
809static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
810 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
811 struct mac_configuration_hdr *hdr)
812{
813 struct bnx2x_raw_obj *r = &o->raw;
814
815 hdr->length = 1;
816 hdr->offset = (u8)cam_offset;
Yuval Mintz86564c32013-01-23 03:21:50 +0000817 hdr->client_id = cpu_to_le16(0xff);
818 hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
819 (type << BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300820}
821
822static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
823 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
824 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
825{
826 struct bnx2x_raw_obj *r = &o->raw;
827 u32 cl_bit_vec = (1 << r->cl_id);
828
829 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
830 cfg_entry->pf_id = r->func_id;
831 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
832
833 if (add) {
834 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
835 T_ETH_MAC_COMMAND_SET);
836 SET_FLAG(cfg_entry->flags,
837 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
838
839 /* Set a MAC in a ramrod data */
840 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
841 &cfg_entry->middle_mac_addr,
842 &cfg_entry->lsb_mac_addr, mac);
843 } else
844 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
845 T_ETH_MAC_COMMAND_INVALIDATE);
846}
847
848static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
849 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
850 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
851{
852 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
853 struct bnx2x_raw_obj *raw = &o->raw;
854
855 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
856 &config->hdr);
857 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
858 cfg_entry);
859
Joe Perches0f9dad12011-08-14 12:16:19 +0000860 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +0000861 (add ? "setting" : "clearing"),
Joe Perches0f9dad12011-08-14 12:16:19 +0000862 mac, raw->cl_id, cam_offset);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300863}
864
865/**
866 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
867 *
868 * @bp: device handle
869 * @o: bnx2x_vlan_mac_obj
870 * @elem: bnx2x_exeq_elem
871 * @rule_idx: rule_idx
872 * @cam_offset: cam_offset
873 */
874static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
875 struct bnx2x_vlan_mac_obj *o,
876 struct bnx2x_exeq_elem *elem, int rule_idx,
877 int cam_offset)
878{
879 struct bnx2x_raw_obj *raw = &o->raw;
880 struct mac_configuration_cmd *config =
881 (struct mac_configuration_cmd *)(raw->rdata);
882 /*
883 * 57710 and 57711 do not support MOVE command,
884 * so it's either ADD or DEL
885 */
886 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
887 true : false;
888
889 /* Reset the ramrod data buffer */
890 memset(config, 0, sizeof(*config));
891
Yuval Mintz33ac3382012-03-12 08:53:09 +0000892 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300893 cam_offset, add,
894 elem->cmd_data.vlan_mac.u.mac.mac, 0,
895 ETH_VLAN_FILTER_ANY_VLAN, config);
896}
897
898static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
899 struct bnx2x_vlan_mac_obj *o,
900 struct bnx2x_exeq_elem *elem, int rule_idx,
901 int cam_offset)
902{
903 struct bnx2x_raw_obj *raw = &o->raw;
904 struct eth_classify_rules_ramrod_data *data =
905 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
906 int rule_cnt = rule_idx + 1;
907 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
Yuval Mintz86564c32013-01-23 03:21:50 +0000908 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300909 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
910 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
911
912 /* Reset the ramrod data buffer for the first rule */
913 if (rule_idx == 0)
914 memset(data, 0, sizeof(*data));
915
916 /* Set a rule header */
917 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
918 &rule_entry->vlan.header);
919
920 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
921 vlan);
922
923 /* Set a VLAN itself */
924 rule_entry->vlan.vlan = cpu_to_le16(vlan);
925
926 /* MOVE: Add a rule that will add this MAC to the target Queue */
927 if (cmd == BNX2X_VLAN_MAC_MOVE) {
928 rule_entry++;
929 rule_cnt++;
930
931 /* Setup ramrod data */
932 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
933 elem->cmd_data.vlan_mac.target_obj,
934 true, CLASSIFY_RULE_OPCODE_VLAN,
935 &rule_entry->vlan.header);
936
937 /* Set a VLAN itself */
938 rule_entry->vlan.vlan = cpu_to_le16(vlan);
939 }
940
941 /* Set the ramrod data header */
942 /* TODO: take this to the higher level in order to prevent multiple
943 writing */
944 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
945 rule_cnt);
946}
947
948static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
949 struct bnx2x_vlan_mac_obj *o,
950 struct bnx2x_exeq_elem *elem,
951 int rule_idx, int cam_offset)
952{
953 struct bnx2x_raw_obj *raw = &o->raw;
954 struct eth_classify_rules_ramrod_data *data =
955 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
956 int rule_cnt = rule_idx + 1;
957 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
Yuval Mintz86564c32013-01-23 03:21:50 +0000958 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300959 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
960 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
961 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
962
963
964 /* Reset the ramrod data buffer for the first rule */
965 if (rule_idx == 0)
966 memset(data, 0, sizeof(*data));
967
968 /* Set a rule header */
969 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
970 &rule_entry->pair.header);
971
972 /* Set VLAN and MAC themselvs */
973 rule_entry->pair.vlan = cpu_to_le16(vlan);
974 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
975 &rule_entry->pair.mac_mid,
976 &rule_entry->pair.mac_lsb, mac);
Dmitry Kravkov91226792013-03-11 05:17:52 +0000977 rule_entry->pair.inner_mac =
978 cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300979 /* MOVE: Add a rule that will add this MAC to the target Queue */
980 if (cmd == BNX2X_VLAN_MAC_MOVE) {
981 rule_entry++;
982 rule_cnt++;
983
984 /* Setup ramrod data */
985 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
986 elem->cmd_data.vlan_mac.target_obj,
987 true, CLASSIFY_RULE_OPCODE_PAIR,
988 &rule_entry->pair.header);
989
990 /* Set a VLAN itself */
991 rule_entry->pair.vlan = cpu_to_le16(vlan);
992 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
993 &rule_entry->pair.mac_mid,
994 &rule_entry->pair.mac_lsb, mac);
Dmitry Kravkov91226792013-03-11 05:17:52 +0000995 rule_entry->pair.inner_mac =
996 cpu_to_le16(elem->cmd_data.vlan_mac.u.
997 vlan_mac.is_inner_mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300998 }
999
1000 /* Set the ramrod data header */
1001 /* TODO: take this to the higher level in order to prevent multiple
1002 writing */
1003 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1004 rule_cnt);
1005}
1006
1007/**
1008 * bnx2x_set_one_vlan_mac_e1h -
1009 *
1010 * @bp: device handle
1011 * @o: bnx2x_vlan_mac_obj
1012 * @elem: bnx2x_exeq_elem
1013 * @rule_idx: rule_idx
1014 * @cam_offset: cam_offset
1015 */
1016static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1017 struct bnx2x_vlan_mac_obj *o,
1018 struct bnx2x_exeq_elem *elem,
1019 int rule_idx, int cam_offset)
1020{
1021 struct bnx2x_raw_obj *raw = &o->raw;
1022 struct mac_configuration_cmd *config =
1023 (struct mac_configuration_cmd *)(raw->rdata);
1024 /*
1025 * 57710 and 57711 do not support MOVE command,
1026 * so it's either ADD or DEL
1027 */
1028 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1029 true : false;
1030
1031 /* Reset the ramrod data buffer */
1032 memset(config, 0, sizeof(*config));
1033
1034 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1035 cam_offset, add,
1036 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1037 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1038 ETH_VLAN_FILTER_CLASSIFY, config);
1039}
1040
1041#define list_next_entry(pos, member) \
1042 list_entry((pos)->member.next, typeof(*(pos)), member)
1043
1044/**
1045 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1046 *
1047 * @bp: device handle
1048 * @p: command parameters
1049 * @ppos: pointer to the cooky
1050 *
1051 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1052 * previously configured elements list.
1053 *
1054 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1055 * into an account
1056 *
1057 * pointer to the cooky - that should be given back in the next call to make
1058 * function handle the next element. If *ppos is set to NULL it will restart the
1059 * iterator. If returned *ppos == NULL this means that the last element has been
1060 * handled.
1061 *
1062 */
1063static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1064 struct bnx2x_vlan_mac_ramrod_params *p,
1065 struct bnx2x_vlan_mac_registry_elem **ppos)
1066{
1067 struct bnx2x_vlan_mac_registry_elem *pos;
1068 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1069
1070 /* If list is empty - there is nothing to do here */
1071 if (list_empty(&o->head)) {
1072 *ppos = NULL;
1073 return 0;
1074 }
1075
1076 /* make a step... */
1077 if (*ppos == NULL)
1078 *ppos = list_first_entry(&o->head,
1079 struct bnx2x_vlan_mac_registry_elem,
1080 link);
1081 else
1082 *ppos = list_next_entry(*ppos, link);
1083
1084 pos = *ppos;
1085
1086 /* If it's the last step - return NULL */
1087 if (list_is_last(&pos->link, &o->head))
1088 *ppos = NULL;
1089
1090 /* Prepare a 'user_req' */
1091 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1092
1093 /* Set the command */
1094 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1095
1096 /* Set vlan_mac_flags */
1097 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1098
1099 /* Set a restore bit */
1100 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1101
1102 return bnx2x_config_vlan_mac(bp, p);
1103}
1104
1105/*
1106 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1107 * pointer to an element with a specific criteria and NULL if such an element
1108 * hasn't been found.
1109 */
1110static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1111 struct bnx2x_exe_queue_obj *o,
1112 struct bnx2x_exeq_elem *elem)
1113{
1114 struct bnx2x_exeq_elem *pos;
1115 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1116
1117 /* Check pending for execution commands */
1118 list_for_each_entry(pos, &o->exe_queue, link)
1119 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1120 sizeof(*data)) &&
1121 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1122 return pos;
1123
1124 return NULL;
1125}
1126
1127static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1128 struct bnx2x_exe_queue_obj *o,
1129 struct bnx2x_exeq_elem *elem)
1130{
1131 struct bnx2x_exeq_elem *pos;
1132 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1133
1134 /* Check pending for execution commands */
1135 list_for_each_entry(pos, &o->exe_queue, link)
1136 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1137 sizeof(*data)) &&
1138 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1139 return pos;
1140
1141 return NULL;
1142}
1143
1144static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1145 struct bnx2x_exe_queue_obj *o,
1146 struct bnx2x_exeq_elem *elem)
1147{
1148 struct bnx2x_exeq_elem *pos;
1149 struct bnx2x_vlan_mac_ramrod_data *data =
1150 &elem->cmd_data.vlan_mac.u.vlan_mac;
1151
1152 /* Check pending for execution commands */
1153 list_for_each_entry(pos, &o->exe_queue, link)
1154 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1155 sizeof(*data)) &&
1156 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1157 return pos;
1158
1159 return NULL;
1160}
1161
1162/**
1163 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1164 *
1165 * @bp: device handle
1166 * @qo: bnx2x_qable_obj
1167 * @elem: bnx2x_exeq_elem
1168 *
1169 * Checks that the requested configuration can be added. If yes and if
1170 * requested, consume CAM credit.
1171 *
1172 * The 'validate' is run after the 'optimize'.
1173 *
1174 */
1175static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1176 union bnx2x_qable_obj *qo,
1177 struct bnx2x_exeq_elem *elem)
1178{
1179 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1180 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1181 int rc;
1182
1183 /* Check the registry */
Merav Sicron51c1a582012-03-18 10:33:38 +00001184 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001185 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001186 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001187 return rc;
1188 }
1189
1190 /*
1191 * Check if there is a pending ADD command for this
1192 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1193 */
1194 if (exeq->get(exeq, elem)) {
1195 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1196 return -EEXIST;
1197 }
1198
1199 /*
1200 * TODO: Check the pending MOVE from other objects where this
1201 * object is a destination object.
1202 */
1203
1204 /* Consume the credit if not requested not to */
1205 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1206 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1207 o->get_credit(o)))
1208 return -EINVAL;
1209
1210 return 0;
1211}
1212
1213/**
1214 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1215 *
1216 * @bp: device handle
1217 * @qo: quable object to check
1218 * @elem: element that needs to be deleted
1219 *
1220 * Checks that the requested configuration can be deleted. If yes and if
1221 * requested, returns a CAM credit.
1222 *
1223 * The 'validate' is run after the 'optimize'.
1224 */
1225static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1226 union bnx2x_qable_obj *qo,
1227 struct bnx2x_exeq_elem *elem)
1228{
1229 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1230 struct bnx2x_vlan_mac_registry_elem *pos;
1231 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1232 struct bnx2x_exeq_elem query_elem;
1233
1234 /* If this classification can not be deleted (doesn't exist)
1235 * - return a BNX2X_EXIST.
1236 */
Merav Sicron51c1a582012-03-18 10:33:38 +00001237 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001238 if (!pos) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001239 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001240 return -EEXIST;
1241 }
1242
1243 /*
1244 * Check if there are pending DEL or MOVE commands for this
1245 * MAC/VLAN/VLAN-MAC. Return an error if so.
1246 */
1247 memcpy(&query_elem, elem, sizeof(query_elem));
1248
1249 /* Check for MOVE commands */
1250 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1251 if (exeq->get(exeq, &query_elem)) {
1252 BNX2X_ERR("There is a pending MOVE command already\n");
1253 return -EINVAL;
1254 }
1255
1256 /* Check for DEL commands */
1257 if (exeq->get(exeq, elem)) {
1258 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1259 return -EEXIST;
1260 }
1261
1262 /* Return the credit to the credit pool if not requested not to */
1263 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1264 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1265 o->put_credit(o))) {
1266 BNX2X_ERR("Failed to return a credit\n");
1267 return -EINVAL;
1268 }
1269
1270 return 0;
1271}
1272
1273/**
1274 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1275 *
1276 * @bp: device handle
1277 * @qo: quable object to check (source)
1278 * @elem: element that needs to be moved
1279 *
1280 * Checks that the requested configuration can be moved. If yes and if
1281 * requested, returns a CAM credit.
1282 *
1283 * The 'validate' is run after the 'optimize'.
1284 */
1285static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1286 union bnx2x_qable_obj *qo,
1287 struct bnx2x_exeq_elem *elem)
1288{
1289 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1290 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1291 struct bnx2x_exeq_elem query_elem;
1292 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1293 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1294
1295 /*
1296 * Check if we can perform this operation based on the current registry
1297 * state.
1298 */
Merav Sicron51c1a582012-03-18 10:33:38 +00001299 if (!src_o->check_move(bp, src_o, dest_o,
1300 &elem->cmd_data.vlan_mac.u)) {
1301 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001302 return -EINVAL;
1303 }
1304
1305 /*
1306 * Check if there is an already pending DEL or MOVE command for the
1307 * source object or ADD command for a destination object. Return an
1308 * error if so.
1309 */
1310 memcpy(&query_elem, elem, sizeof(query_elem));
1311
1312 /* Check DEL on source */
1313 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1314 if (src_exeq->get(src_exeq, &query_elem)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001315 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001316 return -EINVAL;
1317 }
1318
1319 /* Check MOVE on source */
1320 if (src_exeq->get(src_exeq, elem)) {
1321 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1322 return -EEXIST;
1323 }
1324
1325 /* Check ADD on destination */
1326 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1327 if (dest_exeq->get(dest_exeq, &query_elem)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001328 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001329 return -EINVAL;
1330 }
1331
1332 /* Consume the credit if not requested not to */
1333 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1334 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1335 dest_o->get_credit(dest_o)))
1336 return -EINVAL;
1337
1338 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1339 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1340 src_o->put_credit(src_o))) {
1341 /* return the credit taken from dest... */
1342 dest_o->put_credit(dest_o);
1343 return -EINVAL;
1344 }
1345
1346 return 0;
1347}
1348
1349static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1350 union bnx2x_qable_obj *qo,
1351 struct bnx2x_exeq_elem *elem)
1352{
1353 switch (elem->cmd_data.vlan_mac.cmd) {
1354 case BNX2X_VLAN_MAC_ADD:
1355 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1356 case BNX2X_VLAN_MAC_DEL:
1357 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1358 case BNX2X_VLAN_MAC_MOVE:
1359 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1360 default:
1361 return -EINVAL;
1362 }
1363}
1364
Yuval Mintz460a25c2012-01-23 07:31:51 +00001365static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1366 union bnx2x_qable_obj *qo,
1367 struct bnx2x_exeq_elem *elem)
1368{
1369 int rc = 0;
1370
1371 /* If consumption wasn't required, nothing to do */
1372 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1373 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1374 return 0;
1375
1376 switch (elem->cmd_data.vlan_mac.cmd) {
1377 case BNX2X_VLAN_MAC_ADD:
1378 case BNX2X_VLAN_MAC_MOVE:
1379 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1380 break;
1381 case BNX2X_VLAN_MAC_DEL:
1382 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1383 break;
1384 default:
1385 return -EINVAL;
1386 }
1387
1388 if (rc != true)
1389 return -EINVAL;
1390
1391 return 0;
1392}
1393
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001394/**
1395 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1396 *
1397 * @bp: device handle
1398 * @o: bnx2x_vlan_mac_obj
1399 *
1400 */
1401static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1402 struct bnx2x_vlan_mac_obj *o)
1403{
1404 int cnt = 5000, rc;
1405 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1406 struct bnx2x_raw_obj *raw = &o->raw;
1407
1408 while (cnt--) {
1409 /* Wait for the current command to complete */
1410 rc = raw->wait_comp(bp, raw);
1411 if (rc)
1412 return rc;
1413
1414 /* Wait until there are no pending commands */
1415 if (!bnx2x_exe_queue_empty(exeq))
Yuval Mintz0926d492013-01-23 03:21:45 +00001416 usleep_range(1000, 2000);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001417 else
1418 return 0;
1419 }
1420
1421 return -EBUSY;
1422}
1423
1424/**
1425 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1426 *
1427 * @bp: device handle
1428 * @o: bnx2x_vlan_mac_obj
1429 * @cqe:
1430 * @cont: if true schedule next execution chunk
1431 *
1432 */
1433static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1434 struct bnx2x_vlan_mac_obj *o,
1435 union event_ring_elem *cqe,
1436 unsigned long *ramrod_flags)
1437{
1438 struct bnx2x_raw_obj *r = &o->raw;
1439 int rc;
1440
1441 /* Reset pending list */
1442 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1443
1444 /* Clear pending */
1445 r->clear_pending(r);
1446
1447 /* If ramrod failed this is most likely a SW bug */
1448 if (cqe->message.error)
1449 return -EINVAL;
1450
Yuval Mintz2de67432013-01-23 03:21:43 +00001451 /* Run the next bulk of pending commands if requested */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001452 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1453 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1454 if (rc < 0)
1455 return rc;
1456 }
1457
1458 /* If there is more work to do return PENDING */
1459 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1460 return 1;
1461
1462 return 0;
1463}
1464
1465/**
1466 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1467 *
1468 * @bp: device handle
1469 * @o: bnx2x_qable_obj
1470 * @elem: bnx2x_exeq_elem
1471 */
1472static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1473 union bnx2x_qable_obj *qo,
1474 struct bnx2x_exeq_elem *elem)
1475{
1476 struct bnx2x_exeq_elem query, *pos;
1477 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1478 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1479
1480 memcpy(&query, elem, sizeof(query));
1481
1482 switch (elem->cmd_data.vlan_mac.cmd) {
1483 case BNX2X_VLAN_MAC_ADD:
1484 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1485 break;
1486 case BNX2X_VLAN_MAC_DEL:
1487 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1488 break;
1489 default:
1490 /* Don't handle anything other than ADD or DEL */
1491 return 0;
1492 }
1493
1494 /* If we found the appropriate element - delete it */
1495 pos = exeq->get(exeq, &query);
1496 if (pos) {
1497
1498 /* Return the credit of the optimized command */
1499 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1500 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1501 if ((query.cmd_data.vlan_mac.cmd ==
1502 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001503 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001504 return -EINVAL;
1505 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
Merav Sicron51c1a582012-03-18 10:33:38 +00001506 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001507 return -EINVAL;
1508 }
1509 }
1510
1511 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1512 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1513 "ADD" : "DEL");
1514
1515 list_del(&pos->link);
1516 bnx2x_exe_queue_free_elem(bp, pos);
1517 return 1;
1518 }
1519
1520 return 0;
1521}
1522
1523/**
1524 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1525 *
1526 * @bp: device handle
1527 * @o:
1528 * @elem:
1529 * @restore:
1530 * @re:
1531 *
1532 * prepare a registry element according to the current command request.
1533 */
1534static inline int bnx2x_vlan_mac_get_registry_elem(
1535 struct bnx2x *bp,
1536 struct bnx2x_vlan_mac_obj *o,
1537 struct bnx2x_exeq_elem *elem,
1538 bool restore,
1539 struct bnx2x_vlan_mac_registry_elem **re)
1540{
Yuval Mintz86564c32013-01-23 03:21:50 +00001541 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001542 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1543
1544 /* Allocate a new registry element if needed. */
1545 if (!restore &&
1546 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1547 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1548 if (!reg_elem)
1549 return -ENOMEM;
1550
1551 /* Get a new CAM offset */
1552 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1553 /*
1554 * This shell never happen, because we have checked the
1555 * CAM availiability in the 'validate'.
1556 */
1557 WARN_ON(1);
1558 kfree(reg_elem);
1559 return -EINVAL;
1560 }
1561
1562 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1563
1564 /* Set a VLAN-MAC data */
1565 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1566 sizeof(reg_elem->u));
1567
1568 /* Copy the flags (needed for DEL and RESTORE flows) */
1569 reg_elem->vlan_mac_flags =
1570 elem->cmd_data.vlan_mac.vlan_mac_flags;
1571 } else /* DEL, RESTORE */
Merav Sicron51c1a582012-03-18 10:33:38 +00001572 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001573
1574 *re = reg_elem;
1575 return 0;
1576}
1577
1578/**
1579 * bnx2x_execute_vlan_mac - execute vlan mac command
1580 *
1581 * @bp: device handle
1582 * @qo:
1583 * @exe_chunk:
1584 * @ramrod_flags:
1585 *
1586 * go and send a ramrod!
1587 */
1588static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1589 union bnx2x_qable_obj *qo,
1590 struct list_head *exe_chunk,
1591 unsigned long *ramrod_flags)
1592{
1593 struct bnx2x_exeq_elem *elem;
1594 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1595 struct bnx2x_raw_obj *r = &o->raw;
1596 int rc, idx = 0;
1597 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1598 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1599 struct bnx2x_vlan_mac_registry_elem *reg_elem;
Yuval Mintz86564c32013-01-23 03:21:50 +00001600 enum bnx2x_vlan_mac_cmd cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001601
1602 /*
1603 * If DRIVER_ONLY execution is requested, cleanup a registry
1604 * and exit. Otherwise send a ramrod to FW.
1605 */
1606 if (!drv_only) {
1607 WARN_ON(r->check_pending(r));
1608
1609 /* Set pending */
1610 r->set_pending(r);
1611
1612 /* Fill tha ramrod data */
1613 list_for_each_entry(elem, exe_chunk, link) {
1614 cmd = elem->cmd_data.vlan_mac.cmd;
1615 /*
1616 * We will add to the target object in MOVE command, so
1617 * change the object for a CAM search.
1618 */
1619 if (cmd == BNX2X_VLAN_MAC_MOVE)
1620 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1621 else
1622 cam_obj = o;
1623
1624 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1625 elem, restore,
1626 &reg_elem);
1627 if (rc)
1628 goto error_exit;
1629
1630 WARN_ON(!reg_elem);
1631
1632 /* Push a new entry into the registry */
1633 if (!restore &&
1634 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1635 (cmd == BNX2X_VLAN_MAC_MOVE)))
1636 list_add(&reg_elem->link, &cam_obj->head);
1637
1638 /* Configure a single command in a ramrod data buffer */
1639 o->set_one_rule(bp, o, elem, idx,
1640 reg_elem->cam_offset);
1641
1642 /* MOVE command consumes 2 entries in the ramrod data */
1643 if (cmd == BNX2X_VLAN_MAC_MOVE)
1644 idx += 2;
1645 else
1646 idx++;
1647 }
1648
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00001649 /*
1650 * No need for an explicit memory barrier here as long we would
1651 * need to ensure the ordering of writing to the SPQ element
1652 * and updating of the SPQ producer which involves a memory
1653 * read and we will have to put a full memory barrier there
1654 * (inside bnx2x_sp_post()).
1655 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001656
1657 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1658 U64_HI(r->rdata_mapping),
1659 U64_LO(r->rdata_mapping),
1660 ETH_CONNECTION_TYPE);
1661 if (rc)
1662 goto error_exit;
1663 }
1664
1665 /* Now, when we are done with the ramrod - clean up the registry */
1666 list_for_each_entry(elem, exe_chunk, link) {
1667 cmd = elem->cmd_data.vlan_mac.cmd;
1668 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1669 (cmd == BNX2X_VLAN_MAC_MOVE)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001670 reg_elem = o->check_del(bp, o,
1671 &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001672
1673 WARN_ON(!reg_elem);
1674
1675 o->put_cam_offset(o, reg_elem->cam_offset);
1676 list_del(&reg_elem->link);
1677 kfree(reg_elem);
1678 }
1679 }
1680
1681 if (!drv_only)
1682 return 1;
1683 else
1684 return 0;
1685
1686error_exit:
1687 r->clear_pending(r);
1688
1689 /* Cleanup a registry in case of a failure */
1690 list_for_each_entry(elem, exe_chunk, link) {
1691 cmd = elem->cmd_data.vlan_mac.cmd;
1692
1693 if (cmd == BNX2X_VLAN_MAC_MOVE)
1694 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1695 else
1696 cam_obj = o;
1697
1698 /* Delete all newly added above entries */
1699 if (!restore &&
1700 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1701 (cmd == BNX2X_VLAN_MAC_MOVE))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001702 reg_elem = o->check_del(bp, cam_obj,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001703 &elem->cmd_data.vlan_mac.u);
1704 if (reg_elem) {
1705 list_del(&reg_elem->link);
1706 kfree(reg_elem);
1707 }
1708 }
1709 }
1710
1711 return rc;
1712}
1713
1714static inline int bnx2x_vlan_mac_push_new_cmd(
1715 struct bnx2x *bp,
1716 struct bnx2x_vlan_mac_ramrod_params *p)
1717{
1718 struct bnx2x_exeq_elem *elem;
1719 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1720 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1721
1722 /* Allocate the execution queue element */
1723 elem = bnx2x_exe_queue_alloc_elem(bp);
1724 if (!elem)
1725 return -ENOMEM;
1726
1727 /* Set the command 'length' */
1728 switch (p->user_req.cmd) {
1729 case BNX2X_VLAN_MAC_MOVE:
1730 elem->cmd_len = 2;
1731 break;
1732 default:
1733 elem->cmd_len = 1;
1734 }
1735
1736 /* Fill the object specific info */
1737 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1738
1739 /* Try to add a new command to the pending list */
1740 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1741}
1742
1743/**
1744 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1745 *
1746 * @bp: device handle
1747 * @p:
1748 *
1749 */
1750int bnx2x_config_vlan_mac(
1751 struct bnx2x *bp,
1752 struct bnx2x_vlan_mac_ramrod_params *p)
1753{
1754 int rc = 0;
1755 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1756 unsigned long *ramrod_flags = &p->ramrod_flags;
1757 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1758 struct bnx2x_raw_obj *raw = &o->raw;
1759
1760 /*
1761 * Add new elements to the execution list for commands that require it.
1762 */
1763 if (!cont) {
1764 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1765 if (rc)
1766 return rc;
1767 }
1768
1769 /*
1770 * If nothing will be executed further in this iteration we want to
1771 * return PENDING if there are pending commands
1772 */
1773 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1774 rc = 1;
1775
Vladislav Zolotarov79616892011-07-21 07:58:54 +00001776 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001777 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
Vladislav Zolotarov79616892011-07-21 07:58:54 +00001778 raw->clear_pending(raw);
1779 }
1780
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001781 /* Execute commands if required */
1782 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1783 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1784 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1785 if (rc < 0)
1786 return rc;
1787 }
1788
1789 /*
1790 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1791 * then user want to wait until the last command is done.
1792 */
1793 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1794 /*
1795 * Wait maximum for the current exe_queue length iterations plus
1796 * one (for the current pending command).
1797 */
1798 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1799
1800 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1801 max_iterations--) {
1802
1803 /* Wait for the current command to complete */
1804 rc = raw->wait_comp(bp, raw);
1805 if (rc)
1806 return rc;
1807
1808 /* Make a next step */
1809 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1810 ramrod_flags);
1811 if (rc < 0)
1812 return rc;
1813 }
1814
1815 return 0;
1816 }
1817
1818 return rc;
1819}
1820
1821
1822
1823/**
1824 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1825 *
1826 * @bp: device handle
1827 * @o:
1828 * @vlan_mac_flags:
1829 * @ramrod_flags: execution flags to be used for this deletion
1830 *
1831 * if the last operation has completed successfully and there are no
1832 * moreelements left, positive value if the last operation has completed
1833 * successfully and there are more previously configured elements, negative
1834 * value is current operation has failed.
1835 */
1836static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1837 struct bnx2x_vlan_mac_obj *o,
1838 unsigned long *vlan_mac_flags,
1839 unsigned long *ramrod_flags)
1840{
1841 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1842 int rc = 0;
1843 struct bnx2x_vlan_mac_ramrod_params p;
1844 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1845 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1846
1847 /* Clear pending commands first */
1848
1849 spin_lock_bh(&exeq->lock);
1850
1851 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1852 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
Yuval Mintz460a25c2012-01-23 07:31:51 +00001853 *vlan_mac_flags) {
1854 rc = exeq->remove(bp, exeq->owner, exeq_pos);
1855 if (rc) {
1856 BNX2X_ERR("Failed to remove command\n");
Dan Carpentera44acd52012-01-24 21:59:31 +00001857 spin_unlock_bh(&exeq->lock);
Yuval Mintz460a25c2012-01-23 07:31:51 +00001858 return rc;
1859 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001860 list_del(&exeq_pos->link);
Yuval Mintz07ef7be2013-03-11 05:17:41 +00001861 bnx2x_exe_queue_free_elem(bp, exeq_pos);
Yuval Mintz460a25c2012-01-23 07:31:51 +00001862 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001863 }
1864
1865 spin_unlock_bh(&exeq->lock);
1866
1867 /* Prepare a command request */
1868 memset(&p, 0, sizeof(p));
1869 p.vlan_mac_obj = o;
1870 p.ramrod_flags = *ramrod_flags;
1871 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1872
1873 /*
1874 * Add all but the last VLAN-MAC to the execution queue without actually
1875 * execution anything.
1876 */
1877 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1878 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1879 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1880
1881 list_for_each_entry(pos, &o->head, link) {
1882 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1883 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1884 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1885 rc = bnx2x_config_vlan_mac(bp, &p);
1886 if (rc < 0) {
1887 BNX2X_ERR("Failed to add a new DEL command\n");
1888 return rc;
1889 }
1890 }
1891 }
1892
1893 p.ramrod_flags = *ramrod_flags;
1894 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1895
1896 return bnx2x_config_vlan_mac(bp, &p);
1897}
1898
1899static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1900 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1901 unsigned long *pstate, bnx2x_obj_type type)
1902{
1903 raw->func_id = func_id;
1904 raw->cid = cid;
1905 raw->cl_id = cl_id;
1906 raw->rdata = rdata;
1907 raw->rdata_mapping = rdata_mapping;
1908 raw->state = state;
1909 raw->pstate = pstate;
1910 raw->obj_type = type;
1911 raw->check_pending = bnx2x_raw_check_pending;
1912 raw->clear_pending = bnx2x_raw_clear_pending;
1913 raw->set_pending = bnx2x_raw_set_pending;
1914 raw->wait_comp = bnx2x_raw_wait;
1915}
1916
1917static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1918 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1919 int state, unsigned long *pstate, bnx2x_obj_type type,
1920 struct bnx2x_credit_pool_obj *macs_pool,
1921 struct bnx2x_credit_pool_obj *vlans_pool)
1922{
1923 INIT_LIST_HEAD(&o->head);
1924
1925 o->macs_pool = macs_pool;
1926 o->vlans_pool = vlans_pool;
1927
1928 o->delete_all = bnx2x_vlan_mac_del_all;
1929 o->restore = bnx2x_vlan_mac_restore;
1930 o->complete = bnx2x_complete_vlan_mac;
1931 o->wait = bnx2x_wait_vlan_mac;
1932
1933 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1934 state, pstate, type);
1935}
1936
1937
1938void bnx2x_init_mac_obj(struct bnx2x *bp,
1939 struct bnx2x_vlan_mac_obj *mac_obj,
1940 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1941 dma_addr_t rdata_mapping, int state,
1942 unsigned long *pstate, bnx2x_obj_type type,
1943 struct bnx2x_credit_pool_obj *macs_pool)
1944{
1945 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1946
1947 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1948 rdata_mapping, state, pstate, type,
1949 macs_pool, NULL);
1950
1951 /* CAM credit pool handling */
1952 mac_obj->get_credit = bnx2x_get_credit_mac;
1953 mac_obj->put_credit = bnx2x_put_credit_mac;
1954 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1955 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1956
1957 if (CHIP_IS_E1x(bp)) {
1958 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1959 mac_obj->check_del = bnx2x_check_mac_del;
1960 mac_obj->check_add = bnx2x_check_mac_add;
1961 mac_obj->check_move = bnx2x_check_move_always_err;
1962 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1963
1964 /* Exe Queue */
1965 bnx2x_exe_queue_init(bp,
1966 &mac_obj->exe_queue, 1, qable_obj,
1967 bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00001968 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001969 bnx2x_optimize_vlan_mac,
1970 bnx2x_execute_vlan_mac,
1971 bnx2x_exeq_get_mac);
1972 } else {
1973 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1974 mac_obj->check_del = bnx2x_check_mac_del;
1975 mac_obj->check_add = bnx2x_check_mac_add;
1976 mac_obj->check_move = bnx2x_check_move;
1977 mac_obj->ramrod_cmd =
1978 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
Ariel Eliored5162a2011-12-05 21:52:24 +00001979 mac_obj->get_n_elements = bnx2x_get_n_elements;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001980
1981 /* Exe Queue */
1982 bnx2x_exe_queue_init(bp,
1983 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1984 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00001985 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001986 bnx2x_optimize_vlan_mac,
1987 bnx2x_execute_vlan_mac,
1988 bnx2x_exeq_get_mac);
1989 }
1990}
1991
1992void bnx2x_init_vlan_obj(struct bnx2x *bp,
1993 struct bnx2x_vlan_mac_obj *vlan_obj,
1994 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1995 dma_addr_t rdata_mapping, int state,
1996 unsigned long *pstate, bnx2x_obj_type type,
1997 struct bnx2x_credit_pool_obj *vlans_pool)
1998{
1999 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
2000
2001 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2002 rdata_mapping, state, pstate, type, NULL,
2003 vlans_pool);
2004
2005 vlan_obj->get_credit = bnx2x_get_credit_vlan;
2006 vlan_obj->put_credit = bnx2x_put_credit_vlan;
2007 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2008 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2009
2010 if (CHIP_IS_E1x(bp)) {
2011 BNX2X_ERR("Do not support chips others than E2 and newer\n");
2012 BUG();
2013 } else {
2014 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
2015 vlan_obj->check_del = bnx2x_check_vlan_del;
2016 vlan_obj->check_add = bnx2x_check_vlan_add;
2017 vlan_obj->check_move = bnx2x_check_move;
2018 vlan_obj->ramrod_cmd =
2019 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
Ariel Elior3ec9f9c2013-03-11 05:17:45 +00002020 vlan_obj->get_n_elements = bnx2x_get_n_elements;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002021
2022 /* Exe Queue */
2023 bnx2x_exe_queue_init(bp,
2024 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2025 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002026 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002027 bnx2x_optimize_vlan_mac,
2028 bnx2x_execute_vlan_mac,
2029 bnx2x_exeq_get_vlan);
2030 }
2031}
2032
2033void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2034 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2035 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2036 dma_addr_t rdata_mapping, int state,
2037 unsigned long *pstate, bnx2x_obj_type type,
2038 struct bnx2x_credit_pool_obj *macs_pool,
2039 struct bnx2x_credit_pool_obj *vlans_pool)
2040{
2041 union bnx2x_qable_obj *qable_obj =
2042 (union bnx2x_qable_obj *)vlan_mac_obj;
2043
2044 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2045 rdata_mapping, state, pstate, type,
2046 macs_pool, vlans_pool);
2047
2048 /* CAM pool handling */
2049 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2050 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2051 /*
2052 * CAM offset is relevant for 57710 and 57711 chips only which have a
2053 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2054 * will be taken from MACs' pool object only.
2055 */
2056 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2057 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2058
2059 if (CHIP_IS_E1(bp)) {
2060 BNX2X_ERR("Do not support chips others than E2\n");
2061 BUG();
2062 } else if (CHIP_IS_E1H(bp)) {
2063 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2064 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2065 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2066 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2067 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2068
2069 /* Exe Queue */
2070 bnx2x_exe_queue_init(bp,
2071 &vlan_mac_obj->exe_queue, 1, qable_obj,
2072 bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002073 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002074 bnx2x_optimize_vlan_mac,
2075 bnx2x_execute_vlan_mac,
2076 bnx2x_exeq_get_vlan_mac);
2077 } else {
2078 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2079 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2080 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2081 vlan_mac_obj->check_move = bnx2x_check_move;
2082 vlan_mac_obj->ramrod_cmd =
2083 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2084
2085 /* Exe Queue */
2086 bnx2x_exe_queue_init(bp,
2087 &vlan_mac_obj->exe_queue,
2088 CLASSIFY_RULES_COUNT,
2089 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002090 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002091 bnx2x_optimize_vlan_mac,
2092 bnx2x_execute_vlan_mac,
2093 bnx2x_exeq_get_vlan_mac);
2094 }
2095
2096}
2097
2098/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2099static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2100 struct tstorm_eth_mac_filter_config *mac_filters,
2101 u16 pf_id)
2102{
2103 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2104
2105 u32 addr = BAR_TSTRORM_INTMEM +
2106 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2107
2108 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2109}
2110
2111static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2112 struct bnx2x_rx_mode_ramrod_params *p)
2113{
Yuval Mintz2de67432013-01-23 03:21:43 +00002114 /* update the bp MAC filter structure */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002115 u32 mask = (1 << p->cl_id);
2116
2117 struct tstorm_eth_mac_filter_config *mac_filters =
2118 (struct tstorm_eth_mac_filter_config *)p->rdata;
2119
2120 /* initial seeting is drop-all */
2121 u8 drop_all_ucast = 1, drop_all_mcast = 1;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002122 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2123 u8 unmatched_unicast = 0;
2124
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002125 /* In e1x there we only take into account rx acceot flag since tx switching
2126 * isn't enabled. */
2127 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002128 /* accept matched ucast */
2129 drop_all_ucast = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002130
2131 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002132 /* accept matched mcast */
2133 drop_all_mcast = 0;
2134
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002135 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002136 /* accept all mcast */
2137 drop_all_ucast = 0;
2138 accp_all_ucast = 1;
2139 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002140 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002141 /* accept all mcast */
2142 drop_all_mcast = 0;
2143 accp_all_mcast = 1;
2144 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002145 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002146 /* accept (all) bcast */
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002147 accp_all_bcast = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002148 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2149 /* accept unmatched unicasts */
2150 unmatched_unicast = 1;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002151
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002152 mac_filters->ucast_drop_all = drop_all_ucast ?
2153 mac_filters->ucast_drop_all | mask :
2154 mac_filters->ucast_drop_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002155
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002156 mac_filters->mcast_drop_all = drop_all_mcast ?
2157 mac_filters->mcast_drop_all | mask :
2158 mac_filters->mcast_drop_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002159
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002160 mac_filters->ucast_accept_all = accp_all_ucast ?
2161 mac_filters->ucast_accept_all | mask :
2162 mac_filters->ucast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002163
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002164 mac_filters->mcast_accept_all = accp_all_mcast ?
2165 mac_filters->mcast_accept_all | mask :
2166 mac_filters->mcast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002167
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002168 mac_filters->bcast_accept_all = accp_all_bcast ?
2169 mac_filters->bcast_accept_all | mask :
2170 mac_filters->bcast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002171
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002172 mac_filters->unmatched_unicast = unmatched_unicast ?
2173 mac_filters->unmatched_unicast | mask :
2174 mac_filters->unmatched_unicast & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002175
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002176 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
Yuval Mintz2de67432013-01-23 03:21:43 +00002177 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00002178 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2179 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2180 mac_filters->bcast_accept_all);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002181
2182 /* write the MAC filter structure*/
2183 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2184
2185 /* The operation is completed */
2186 clear_bit(p->state, p->pstate);
2187 smp_mb__after_clear_bit();
2188
2189 return 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002190}
2191
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002192/* Setup ramrod data */
2193static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2194 struct eth_classify_header *hdr,
2195 u8 rule_cnt)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002196{
Yuval Mintz86564c32013-01-23 03:21:50 +00002197 hdr->echo = cpu_to_le32(cid);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002198 hdr->rule_cnt = rule_cnt;
2199}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002200
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002201static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
Yuval Mintz924d75a2013-01-23 03:21:44 +00002202 unsigned long *accept_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002203 struct eth_filter_rules_cmd *cmd,
2204 bool clear_accept_all)
2205{
2206 u16 state;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002207
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002208 /* start with 'drop-all' */
2209 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2210 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2211
Yuval Mintz924d75a2013-01-23 03:21:44 +00002212 if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2213 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002214
Yuval Mintz924d75a2013-01-23 03:21:44 +00002215 if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2216 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002217
Yuval Mintz924d75a2013-01-23 03:21:44 +00002218 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2219 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2220 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002221 }
2222
Yuval Mintz924d75a2013-01-23 03:21:44 +00002223 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2224 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2225 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2226 }
2227
2228 if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2229 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2230
2231 if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2232 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2233 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2234 }
2235
2236 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2237 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2238
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002239 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2240 if (clear_accept_all) {
2241 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2242 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2243 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2244 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2245 }
2246
2247 cmd->state = cpu_to_le16(state);
2248
2249}
2250
2251static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2252 struct bnx2x_rx_mode_ramrod_params *p)
2253{
2254 struct eth_filter_rules_ramrod_data *data = p->rdata;
2255 int rc;
2256 u8 rule_idx = 0;
2257
2258 /* Reset the ramrod data buffer */
2259 memset(data, 0, sizeof(*data));
2260
2261 /* Setup ramrod data */
2262
2263 /* Tx (internal switching) */
2264 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2265 data->rules[rule_idx].client_id = p->cl_id;
2266 data->rules[rule_idx].func_id = p->func_id;
2267
2268 data->rules[rule_idx].cmd_general_data =
2269 ETH_FILTER_RULES_CMD_TX_CMD;
2270
Yuval Mintz924d75a2013-01-23 03:21:44 +00002271 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2272 &(data->rules[rule_idx++]),
2273 false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002274 }
2275
2276 /* Rx */
2277 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2278 data->rules[rule_idx].client_id = p->cl_id;
2279 data->rules[rule_idx].func_id = p->func_id;
2280
2281 data->rules[rule_idx].cmd_general_data =
2282 ETH_FILTER_RULES_CMD_RX_CMD;
2283
Yuval Mintz924d75a2013-01-23 03:21:44 +00002284 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2285 &(data->rules[rule_idx++]),
2286 false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002287 }
2288
2289
2290 /*
2291 * If FCoE Queue configuration has been requested configure the Rx and
2292 * internal switching modes for this queue in separate rules.
2293 *
2294 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2295 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2296 */
2297 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2298 /* Tx (internal switching) */
2299 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2300 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2301 data->rules[rule_idx].func_id = p->func_id;
2302
2303 data->rules[rule_idx].cmd_general_data =
2304 ETH_FILTER_RULES_CMD_TX_CMD;
2305
Yuval Mintz924d75a2013-01-23 03:21:44 +00002306 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2307 &(data->rules[rule_idx]),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002308 true);
Yuval Mintz924d75a2013-01-23 03:21:44 +00002309 rule_idx++;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002310 }
2311
2312 /* Rx */
2313 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2314 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2315 data->rules[rule_idx].func_id = p->func_id;
2316
2317 data->rules[rule_idx].cmd_general_data =
2318 ETH_FILTER_RULES_CMD_RX_CMD;
2319
Yuval Mintz924d75a2013-01-23 03:21:44 +00002320 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2321 &(data->rules[rule_idx]),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002322 true);
Yuval Mintz924d75a2013-01-23 03:21:44 +00002323 rule_idx++;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002324 }
2325 }
2326
2327 /*
2328 * Set the ramrod header (most importantly - number of rules to
2329 * configure).
2330 */
2331 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2332
Merav Sicron51c1a582012-03-18 10:33:38 +00002333 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002334 data->header.rule_cnt, p->rx_accept_flags,
2335 p->tx_accept_flags);
2336
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00002337 /*
2338 * No need for an explicit memory barrier here as long we would
2339 * need to ensure the ordering of writing to the SPQ element
2340 * and updating of the SPQ producer which involves a memory
2341 * read and we will have to put a full memory barrier there
2342 * (inside bnx2x_sp_post()).
2343 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002344
2345 /* Send a ramrod */
2346 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2347 U64_HI(p->rdata_mapping),
2348 U64_LO(p->rdata_mapping),
2349 ETH_CONNECTION_TYPE);
2350 if (rc)
2351 return rc;
2352
2353 /* Ramrod completion is pending */
2354 return 1;
2355}
2356
2357static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2358 struct bnx2x_rx_mode_ramrod_params *p)
2359{
2360 return bnx2x_state_wait(bp, p->state, p->pstate);
2361}
2362
2363static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2364 struct bnx2x_rx_mode_ramrod_params *p)
2365{
2366 /* Do nothing */
2367 return 0;
2368}
2369
2370int bnx2x_config_rx_mode(struct bnx2x *bp,
2371 struct bnx2x_rx_mode_ramrod_params *p)
2372{
2373 int rc;
2374
2375 /* Configure the new classification in the chip */
2376 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2377 if (rc < 0)
2378 return rc;
2379
2380 /* Wait for a ramrod completion if was requested */
2381 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2382 rc = p->rx_mode_obj->wait_comp(bp, p);
2383 if (rc)
2384 return rc;
2385 }
2386
2387 return rc;
2388}
2389
2390void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2391 struct bnx2x_rx_mode_obj *o)
2392{
2393 if (CHIP_IS_E1x(bp)) {
2394 o->wait_comp = bnx2x_empty_rx_mode_wait;
2395 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2396 } else {
2397 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2398 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2399 }
2400}
2401
2402/********************* Multicast verbs: SET, CLEAR ****************************/
2403static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2404{
2405 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2406}
2407
2408struct bnx2x_mcast_mac_elem {
2409 struct list_head link;
2410 u8 mac[ETH_ALEN];
2411 u8 pad[2]; /* For a natural alignment of the following buffer */
2412};
2413
2414struct bnx2x_pending_mcast_cmd {
2415 struct list_head link;
2416 int type; /* BNX2X_MCAST_CMD_X */
2417 union {
2418 struct list_head macs_head;
2419 u32 macs_num; /* Needed for DEL command */
2420 int next_bin; /* Needed for RESTORE flow with aprox match */
2421 } data;
2422
2423 bool done; /* set to true, when the command has been handled,
2424 * practically used in 57712 handling only, where one pending
2425 * command may be handled in a few operations. As long as for
2426 * other chips every operation handling is completed in a
2427 * single ramrod, there is no need to utilize this field.
2428 */
2429};
2430
2431static int bnx2x_mcast_wait(struct bnx2x *bp,
2432 struct bnx2x_mcast_obj *o)
2433{
2434 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2435 o->raw.wait_comp(bp, &o->raw))
2436 return -EBUSY;
2437
2438 return 0;
2439}
2440
2441static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2442 struct bnx2x_mcast_obj *o,
2443 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00002444 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002445{
2446 int total_sz;
2447 struct bnx2x_pending_mcast_cmd *new_cmd;
2448 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2449 struct bnx2x_mcast_list_elem *pos;
2450 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2451 p->mcast_list_len : 0);
2452
2453 /* If the command is empty ("handle pending commands only"), break */
2454 if (!p->mcast_list_len)
2455 return 0;
2456
2457 total_sz = sizeof(*new_cmd) +
2458 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2459
2460 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2461 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2462
2463 if (!new_cmd)
2464 return -ENOMEM;
2465
Merav Sicron51c1a582012-03-18 10:33:38 +00002466 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2467 cmd, macs_list_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002468
2469 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2470
2471 new_cmd->type = cmd;
2472 new_cmd->done = false;
2473
2474 switch (cmd) {
2475 case BNX2X_MCAST_CMD_ADD:
2476 cur_mac = (struct bnx2x_mcast_mac_elem *)
2477 ((u8 *)new_cmd + sizeof(*new_cmd));
2478
2479 /* Push the MACs of the current command into the pendig command
2480 * MACs list: FIFO
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002481 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002482 list_for_each_entry(pos, &p->mcast_list, link) {
2483 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2484 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2485 cur_mac++;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002486 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002487
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002488 break;
2489
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002490 case BNX2X_MCAST_CMD_DEL:
2491 new_cmd->data.macs_num = p->mcast_list_len;
2492 break;
2493
2494 case BNX2X_MCAST_CMD_RESTORE:
2495 new_cmd->data.next_bin = 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002496 break;
2497
2498 default:
Jesper Juhl8b6d5c02012-07-31 11:39:37 +00002499 kfree(new_cmd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002500 BNX2X_ERR("Unknown command: %d\n", cmd);
2501 return -EINVAL;
2502 }
2503
2504 /* Push the new pending command to the tail of the pending list: FIFO */
2505 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2506
2507 o->set_sched(o);
2508
2509 return 1;
2510}
2511
2512/**
2513 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2514 *
2515 * @o:
2516 * @last: index to start looking from (including)
2517 *
2518 * returns the next found (set) bin or a negative value if none is found.
2519 */
2520static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2521{
2522 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2523
2524 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2525 if (o->registry.aprox_match.vec[i])
2526 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2527 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2528 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2529 vec, cur_bit)) {
2530 return cur_bit;
2531 }
2532 }
2533 inner_start = 0;
2534 }
2535
2536 /* None found */
2537 return -1;
2538}
2539
2540/**
2541 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2542 *
2543 * @o:
2544 *
2545 * returns the index of the found bin or -1 if none is found
2546 */
2547static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2548{
2549 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2550
2551 if (cur_bit >= 0)
2552 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2553
2554 return cur_bit;
2555}
2556
2557static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2558{
2559 struct bnx2x_raw_obj *raw = &o->raw;
2560 u8 rx_tx_flag = 0;
2561
2562 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2563 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2564 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2565
2566 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2567 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2568 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2569
2570 return rx_tx_flag;
2571}
2572
2573static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2574 struct bnx2x_mcast_obj *o, int idx,
2575 union bnx2x_mcast_config_data *cfg_data,
Yuval Mintz86564c32013-01-23 03:21:50 +00002576 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002577{
2578 struct bnx2x_raw_obj *r = &o->raw;
2579 struct eth_multicast_rules_ramrod_data *data =
2580 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2581 u8 func_id = r->func_id;
2582 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2583 int bin;
2584
2585 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2586 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2587
2588 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2589
2590 /* Get a bin and update a bins' vector */
2591 switch (cmd) {
2592 case BNX2X_MCAST_CMD_ADD:
2593 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2594 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002595 break;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002596
2597 case BNX2X_MCAST_CMD_DEL:
2598 /* If there were no more bins to clear
2599 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2600 * clear any (0xff) bin.
2601 * See bnx2x_mcast_validate_e2() for explanation when it may
2602 * happen.
2603 */
2604 bin = bnx2x_mcast_clear_first_bin(o);
2605 break;
2606
2607 case BNX2X_MCAST_CMD_RESTORE:
2608 bin = cfg_data->bin;
2609 break;
2610
2611 default:
2612 BNX2X_ERR("Unknown command: %d\n", cmd);
2613 return;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002614 }
2615
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002616 DP(BNX2X_MSG_SP, "%s bin %d\n",
2617 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2618 "Setting" : "Clearing"), bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002619
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002620 data->rules[idx].bin_id = (u8)bin;
2621 data->rules[idx].func_id = func_id;
2622 data->rules[idx].engine_id = o->engine_id;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002623}
2624
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002625/**
2626 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2627 *
2628 * @bp: device handle
2629 * @o:
2630 * @start_bin: index in the registry to start from (including)
2631 * @rdata_idx: index in the ramrod data to start from
2632 *
2633 * returns last handled bin index or -1 if all bins have been handled
2634 */
2635static inline int bnx2x_mcast_handle_restore_cmd_e2(
2636 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2637 int *rdata_idx)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002638{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002639 int cur_bin, cnt = *rdata_idx;
Yuval Mintz86564c32013-01-23 03:21:50 +00002640 union bnx2x_mcast_config_data cfg_data = {NULL};
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002641
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002642 /* go through the registry and configure the bins from it */
2643 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2644 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002645
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002646 cfg_data.bin = (u8)cur_bin;
2647 o->set_one_rule(bp, o, cnt, &cfg_data,
2648 BNX2X_MCAST_CMD_RESTORE);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002649
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002650 cnt++;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002651
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002652 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002653
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002654 /* Break if we reached the maximum number
2655 * of rules.
2656 */
2657 if (cnt >= o->max_cmd_len)
2658 break;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002659 }
2660
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002661 *rdata_idx = cnt;
2662
2663 return cur_bin;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002664}
2665
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002666static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2667 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2668 int *line_idx)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002669{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002670 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2671 int cnt = *line_idx;
Yuval Mintz86564c32013-01-23 03:21:50 +00002672 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002673
2674 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2675 link) {
2676
2677 cfg_data.mac = &pmac_pos->mac[0];
2678 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2679
2680 cnt++;
2681
Joe Perches0f9dad12011-08-14 12:16:19 +00002682 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00002683 pmac_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002684
2685 list_del(&pmac_pos->link);
2686
2687 /* Break if we reached the maximum number
2688 * of rules.
2689 */
2690 if (cnt >= o->max_cmd_len)
2691 break;
2692 }
2693
2694 *line_idx = cnt;
2695
2696 /* if no more MACs to configure - we are done */
2697 if (list_empty(&cmd_pos->data.macs_head))
2698 cmd_pos->done = true;
2699}
2700
2701static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2702 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2703 int *line_idx)
2704{
2705 int cnt = *line_idx;
2706
2707 while (cmd_pos->data.macs_num) {
2708 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2709
2710 cnt++;
2711
2712 cmd_pos->data.macs_num--;
2713
2714 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2715 cmd_pos->data.macs_num, cnt);
2716
2717 /* Break if we reached the maximum
2718 * number of rules.
2719 */
2720 if (cnt >= o->max_cmd_len)
2721 break;
2722 }
2723
2724 *line_idx = cnt;
2725
2726 /* If we cleared all bins - we are done */
2727 if (!cmd_pos->data.macs_num)
2728 cmd_pos->done = true;
2729}
2730
2731static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2732 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2733 int *line_idx)
2734{
2735 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2736 line_idx);
2737
2738 if (cmd_pos->data.next_bin < 0)
2739 /* If o->set_restore returned -1 we are done */
2740 cmd_pos->done = true;
2741 else
2742 /* Start from the next bin next time */
2743 cmd_pos->data.next_bin++;
2744}
2745
2746static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2747 struct bnx2x_mcast_ramrod_params *p)
2748{
2749 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2750 int cnt = 0;
2751 struct bnx2x_mcast_obj *o = p->mcast_obj;
2752
2753 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2754 link) {
2755 switch (cmd_pos->type) {
2756 case BNX2X_MCAST_CMD_ADD:
2757 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2758 break;
2759
2760 case BNX2X_MCAST_CMD_DEL:
2761 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2762 break;
2763
2764 case BNX2X_MCAST_CMD_RESTORE:
2765 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2766 &cnt);
2767 break;
2768
2769 default:
2770 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2771 return -EINVAL;
2772 }
2773
2774 /* If the command has been completed - remove it from the list
2775 * and free the memory
2776 */
2777 if (cmd_pos->done) {
2778 list_del(&cmd_pos->link);
2779 kfree(cmd_pos);
2780 }
2781
2782 /* Break if we reached the maximum number of rules */
2783 if (cnt >= o->max_cmd_len)
2784 break;
2785 }
2786
2787 return cnt;
2788}
2789
2790static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2791 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2792 int *line_idx)
2793{
2794 struct bnx2x_mcast_list_elem *mlist_pos;
Yuval Mintz86564c32013-01-23 03:21:50 +00002795 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002796 int cnt = *line_idx;
2797
2798 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2799 cfg_data.mac = mlist_pos->mac;
2800 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2801
2802 cnt++;
2803
Joe Perches0f9dad12011-08-14 12:16:19 +00002804 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00002805 mlist_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002806 }
2807
2808 *line_idx = cnt;
2809}
2810
2811static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2812 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2813 int *line_idx)
2814{
2815 int cnt = *line_idx, i;
2816
2817 for (i = 0; i < p->mcast_list_len; i++) {
2818 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2819
2820 cnt++;
2821
2822 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2823 p->mcast_list_len - i - 1);
2824 }
2825
2826 *line_idx = cnt;
2827}
2828
2829/**
2830 * bnx2x_mcast_handle_current_cmd -
2831 *
2832 * @bp: device handle
2833 * @p:
2834 * @cmd:
2835 * @start_cnt: first line in the ramrod data that may be used
2836 *
2837 * This function is called iff there is enough place for the current command in
2838 * the ramrod data.
2839 * Returns number of lines filled in the ramrod data in total.
2840 */
2841static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
Yuval Mintz86564c32013-01-23 03:21:50 +00002842 struct bnx2x_mcast_ramrod_params *p,
2843 enum bnx2x_mcast_cmd cmd,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002844 int start_cnt)
2845{
2846 struct bnx2x_mcast_obj *o = p->mcast_obj;
2847 int cnt = start_cnt;
2848
2849 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2850
2851 switch (cmd) {
2852 case BNX2X_MCAST_CMD_ADD:
2853 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2854 break;
2855
2856 case BNX2X_MCAST_CMD_DEL:
2857 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2858 break;
2859
2860 case BNX2X_MCAST_CMD_RESTORE:
2861 o->hdl_restore(bp, o, 0, &cnt);
2862 break;
2863
2864 default:
2865 BNX2X_ERR("Unknown command: %d\n", cmd);
2866 return -EINVAL;
2867 }
2868
2869 /* The current command has been handled */
2870 p->mcast_list_len = 0;
2871
2872 return cnt;
2873}
2874
2875static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2876 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00002877 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002878{
2879 struct bnx2x_mcast_obj *o = p->mcast_obj;
2880 int reg_sz = o->get_registry_size(o);
2881
2882 switch (cmd) {
2883 /* DEL command deletes all currently configured MACs */
2884 case BNX2X_MCAST_CMD_DEL:
2885 o->set_registry_size(o, 0);
2886 /* Don't break */
2887
2888 /* RESTORE command will restore the entire multicast configuration */
2889 case BNX2X_MCAST_CMD_RESTORE:
2890 /* Here we set the approximate amount of work to do, which in
2891 * fact may be only less as some MACs in postponed ADD
2892 * command(s) scheduled before this command may fall into
2893 * the same bin and the actual number of bins set in the
2894 * registry would be less than we estimated here. See
2895 * bnx2x_mcast_set_one_rule_e2() for further details.
2896 */
2897 p->mcast_list_len = reg_sz;
2898 break;
2899
2900 case BNX2X_MCAST_CMD_ADD:
2901 case BNX2X_MCAST_CMD_CONT:
2902 /* Here we assume that all new MACs will fall into new bins.
2903 * However we will correct the real registry size after we
2904 * handle all pending commands.
2905 */
2906 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2907 break;
2908
2909 default:
2910 BNX2X_ERR("Unknown command: %d\n", cmd);
2911 return -EINVAL;
2912
2913 }
2914
2915 /* Increase the total number of MACs pending to be configured */
2916 o->total_pending_num += p->mcast_list_len;
2917
2918 return 0;
2919}
2920
2921static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2922 struct bnx2x_mcast_ramrod_params *p,
2923 int old_num_bins)
2924{
2925 struct bnx2x_mcast_obj *o = p->mcast_obj;
2926
2927 o->set_registry_size(o, old_num_bins);
2928 o->total_pending_num -= p->mcast_list_len;
2929}
2930
2931/**
2932 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2933 *
2934 * @bp: device handle
2935 * @p:
2936 * @len: number of rules to handle
2937 */
2938static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2939 struct bnx2x_mcast_ramrod_params *p,
2940 u8 len)
2941{
2942 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2943 struct eth_multicast_rules_ramrod_data *data =
2944 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2945
Yuval Mintz86564c32013-01-23 03:21:50 +00002946 data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
2947 (BNX2X_FILTER_MCAST_PENDING <<
2948 BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002949 data->header.rule_cnt = len;
2950}
2951
2952/**
2953 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2954 *
2955 * @bp: device handle
2956 * @o:
2957 *
2958 * Recalculate the actual number of set bins in the registry using Brian
2959 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2960 *
2961 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2962 */
2963static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2964 struct bnx2x_mcast_obj *o)
2965{
2966 int i, cnt = 0;
2967 u64 elem;
2968
2969 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2970 elem = o->registry.aprox_match.vec[i];
2971 for (; elem; cnt++)
2972 elem &= elem - 1;
2973 }
2974
2975 o->set_registry_size(o, cnt);
2976
2977 return 0;
2978}
2979
2980static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2981 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00002982 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002983{
2984 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2985 struct bnx2x_mcast_obj *o = p->mcast_obj;
2986 struct eth_multicast_rules_ramrod_data *data =
2987 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2988 int cnt = 0, rc;
2989
2990 /* Reset the ramrod data buffer */
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002991 memset(data, 0, sizeof(*data));
2992
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002993 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2994
2995 /* If there are no more pending commands - clear SCHEDULED state */
2996 if (list_empty(&o->pending_cmds_head))
2997 o->clear_sched(o);
2998
2999 /* The below may be true iff there was enough room in ramrod
3000 * data for all pending commands and for the current
3001 * command. Otherwise the current command would have been added
3002 * to the pending commands and p->mcast_list_len would have been
3003 * zeroed.
3004 */
3005 if (p->mcast_list_len > 0)
3006 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
3007
3008 /* We've pulled out some MACs - update the total number of
3009 * outstanding.
3010 */
3011 o->total_pending_num -= cnt;
3012
3013 /* send a ramrod */
3014 WARN_ON(o->total_pending_num < 0);
3015 WARN_ON(cnt > o->max_cmd_len);
3016
3017 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3018
3019 /* Update a registry size if there are no more pending operations.
3020 *
3021 * We don't want to change the value of the registry size if there are
3022 * pending operations because we want it to always be equal to the
3023 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3024 * set bins after the last requested operation in order to properly
3025 * evaluate the size of the next DEL/RESTORE operation.
3026 *
3027 * Note that we update the registry itself during command(s) handling
3028 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3029 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3030 * with a limited amount of update commands (per MAC/bin) and we don't
3031 * know in this scope what the actual state of bins configuration is
3032 * going to be after this ramrod.
3033 */
3034 if (!o->total_pending_num)
3035 bnx2x_mcast_refresh_registry_e2(bp, o);
3036
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003037 /*
3038 * If CLEAR_ONLY was requested - don't send a ramrod and clear
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003039 * RAMROD_PENDING status immediately.
3040 */
3041 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3042 raw->clear_pending(raw);
3043 return 0;
3044 } else {
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003045 /*
3046 * No need for an explicit memory barrier here as long we would
3047 * need to ensure the ordering of writing to the SPQ element
3048 * and updating of the SPQ producer which involves a memory
3049 * read and we will have to put a full memory barrier there
3050 * (inside bnx2x_sp_post()).
3051 */
3052
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003053 /* Send a ramrod */
3054 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3055 raw->cid, U64_HI(raw->rdata_mapping),
3056 U64_LO(raw->rdata_mapping),
3057 ETH_CONNECTION_TYPE);
3058 if (rc)
3059 return rc;
3060
3061 /* Ramrod completion is pending */
3062 return 1;
3063 }
3064}
3065
3066static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3067 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003068 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003069{
3070 /* Mark, that there is a work to do */
3071 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3072 p->mcast_list_len = 1;
3073
3074 return 0;
3075}
3076
3077static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3078 struct bnx2x_mcast_ramrod_params *p,
3079 int old_num_bins)
3080{
3081 /* Do nothing */
3082}
3083
3084#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3085do { \
3086 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3087} while (0)
3088
3089static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3090 struct bnx2x_mcast_obj *o,
3091 struct bnx2x_mcast_ramrod_params *p,
3092 u32 *mc_filter)
3093{
3094 struct bnx2x_mcast_list_elem *mlist_pos;
3095 int bit;
3096
3097 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3098 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3099 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3100
Joe Perches0f9dad12011-08-14 12:16:19 +00003101 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003102 mlist_pos->mac, bit);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003103
3104 /* bookkeeping... */
3105 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3106 bit);
3107 }
3108}
3109
3110static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3111 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3112 u32 *mc_filter)
3113{
3114 int bit;
3115
3116 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3117 bit >= 0;
3118 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3119 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3120 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3121 }
3122}
3123
3124/* On 57711 we write the multicast MACs' aproximate match
3125 * table by directly into the TSTORM's internal RAM. So we don't
3126 * really need to handle any tricks to make it work.
3127 */
3128static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3129 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003130 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003131{
3132 int i;
3133 struct bnx2x_mcast_obj *o = p->mcast_obj;
3134 struct bnx2x_raw_obj *r = &o->raw;
3135
3136 /* If CLEAR_ONLY has been requested - clear the registry
3137 * and clear a pending bit.
3138 */
3139 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3140 u32 mc_filter[MC_HASH_SIZE] = {0};
3141
3142 /* Set the multicast filter bits before writing it into
3143 * the internal memory.
3144 */
3145 switch (cmd) {
3146 case BNX2X_MCAST_CMD_ADD:
3147 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3148 break;
3149
3150 case BNX2X_MCAST_CMD_DEL:
Joe Perches94f05b02011-08-14 12:16:20 +00003151 DP(BNX2X_MSG_SP,
3152 "Invalidating multicast MACs configuration\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003153
3154 /* clear the registry */
3155 memset(o->registry.aprox_match.vec, 0,
3156 sizeof(o->registry.aprox_match.vec));
3157 break;
3158
3159 case BNX2X_MCAST_CMD_RESTORE:
3160 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3161 break;
3162
3163 default:
3164 BNX2X_ERR("Unknown command: %d\n", cmd);
3165 return -EINVAL;
3166 }
3167
3168 /* Set the mcast filter in the internal memory */
3169 for (i = 0; i < MC_HASH_SIZE; i++)
3170 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3171 } else
3172 /* clear the registry */
3173 memset(o->registry.aprox_match.vec, 0,
3174 sizeof(o->registry.aprox_match.vec));
3175
3176 /* We are done */
3177 r->clear_pending(r);
3178
3179 return 0;
3180}
3181
3182static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3183 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003184 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003185{
3186 struct bnx2x_mcast_obj *o = p->mcast_obj;
3187 int reg_sz = o->get_registry_size(o);
3188
3189 switch (cmd) {
3190 /* DEL command deletes all currently configured MACs */
3191 case BNX2X_MCAST_CMD_DEL:
3192 o->set_registry_size(o, 0);
3193 /* Don't break */
3194
3195 /* RESTORE command will restore the entire multicast configuration */
3196 case BNX2X_MCAST_CMD_RESTORE:
3197 p->mcast_list_len = reg_sz;
3198 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3199 cmd, p->mcast_list_len);
3200 break;
3201
3202 case BNX2X_MCAST_CMD_ADD:
3203 case BNX2X_MCAST_CMD_CONT:
3204 /* Multicast MACs on 57710 are configured as unicast MACs and
3205 * there is only a limited number of CAM entries for that
3206 * matter.
3207 */
3208 if (p->mcast_list_len > o->max_cmd_len) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003209 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3210 o->max_cmd_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003211 return -EINVAL;
3212 }
3213 /* Every configured MAC should be cleared if DEL command is
3214 * called. Only the last ADD command is relevant as long as
3215 * every ADD commands overrides the previous configuration.
3216 */
3217 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3218 if (p->mcast_list_len > 0)
3219 o->set_registry_size(o, p->mcast_list_len);
3220
3221 break;
3222
3223 default:
3224 BNX2X_ERR("Unknown command: %d\n", cmd);
3225 return -EINVAL;
3226
3227 }
3228
3229 /* We want to ensure that commands are executed one by one for 57710.
3230 * Therefore each none-empty command will consume o->max_cmd_len.
3231 */
3232 if (p->mcast_list_len)
3233 o->total_pending_num += o->max_cmd_len;
3234
3235 return 0;
3236}
3237
3238static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3239 struct bnx2x_mcast_ramrod_params *p,
3240 int old_num_macs)
3241{
3242 struct bnx2x_mcast_obj *o = p->mcast_obj;
3243
3244 o->set_registry_size(o, old_num_macs);
3245
3246 /* If current command hasn't been handled yet and we are
3247 * here means that it's meant to be dropped and we have to
3248 * update the number of outstandling MACs accordingly.
3249 */
3250 if (p->mcast_list_len)
3251 o->total_pending_num -= o->max_cmd_len;
3252}
3253
3254static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3255 struct bnx2x_mcast_obj *o, int idx,
3256 union bnx2x_mcast_config_data *cfg_data,
Yuval Mintz86564c32013-01-23 03:21:50 +00003257 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003258{
3259 struct bnx2x_raw_obj *r = &o->raw;
3260 struct mac_configuration_cmd *data =
3261 (struct mac_configuration_cmd *)(r->rdata);
3262
3263 /* copy mac */
3264 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3265 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3266 &data->config_table[idx].middle_mac_addr,
3267 &data->config_table[idx].lsb_mac_addr,
3268 cfg_data->mac);
3269
3270 data->config_table[idx].vlan_id = 0;
3271 data->config_table[idx].pf_id = r->func_id;
3272 data->config_table[idx].clients_bit_vector =
3273 cpu_to_le32(1 << r->cl_id);
3274
3275 SET_FLAG(data->config_table[idx].flags,
3276 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3277 T_ETH_MAC_COMMAND_SET);
3278 }
3279}
3280
3281/**
3282 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3283 *
3284 * @bp: device handle
3285 * @p:
3286 * @len: number of rules to handle
3287 */
3288static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3289 struct bnx2x_mcast_ramrod_params *p,
3290 u8 len)
3291{
3292 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3293 struct mac_configuration_cmd *data =
3294 (struct mac_configuration_cmd *)(r->rdata);
3295
3296 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3297 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3298 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3299
3300 data->hdr.offset = offset;
Yuval Mintz86564c32013-01-23 03:21:50 +00003301 data->hdr.client_id = cpu_to_le16(0xff);
3302 data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3303 (BNX2X_FILTER_MCAST_PENDING <<
3304 BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003305 data->hdr.length = len;
3306}
3307
3308/**
3309 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3310 *
3311 * @bp: device handle
3312 * @o:
3313 * @start_idx: index in the registry to start from
3314 * @rdata_idx: index in the ramrod data to start from
3315 *
3316 * restore command for 57710 is like all other commands - always a stand alone
3317 * command - start_idx and rdata_idx will always be 0. This function will always
3318 * succeed.
3319 * returns -1 to comply with 57712 variant.
3320 */
3321static inline int bnx2x_mcast_handle_restore_cmd_e1(
3322 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3323 int *rdata_idx)
3324{
3325 struct bnx2x_mcast_mac_elem *elem;
3326 int i = 0;
Yuval Mintz86564c32013-01-23 03:21:50 +00003327 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003328
3329 /* go through the registry and configure the MACs from it. */
3330 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3331 cfg_data.mac = &elem->mac[0];
3332 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3333
3334 i++;
3335
Joe Perches0f9dad12011-08-14 12:16:19 +00003336 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003337 cfg_data.mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003338 }
3339
3340 *rdata_idx = i;
3341
3342 return -1;
3343}
3344
3345
3346static inline int bnx2x_mcast_handle_pending_cmds_e1(
3347 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3348{
3349 struct bnx2x_pending_mcast_cmd *cmd_pos;
3350 struct bnx2x_mcast_mac_elem *pmac_pos;
3351 struct bnx2x_mcast_obj *o = p->mcast_obj;
Yuval Mintz86564c32013-01-23 03:21:50 +00003352 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003353 int cnt = 0;
3354
3355
3356 /* If nothing to be done - return */
3357 if (list_empty(&o->pending_cmds_head))
3358 return 0;
3359
3360 /* Handle the first command */
3361 cmd_pos = list_first_entry(&o->pending_cmds_head,
3362 struct bnx2x_pending_mcast_cmd, link);
3363
3364 switch (cmd_pos->type) {
3365 case BNX2X_MCAST_CMD_ADD:
3366 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3367 cfg_data.mac = &pmac_pos->mac[0];
3368 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3369
3370 cnt++;
3371
Joe Perches0f9dad12011-08-14 12:16:19 +00003372 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003373 pmac_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003374 }
3375 break;
3376
3377 case BNX2X_MCAST_CMD_DEL:
3378 cnt = cmd_pos->data.macs_num;
3379 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3380 break;
3381
3382 case BNX2X_MCAST_CMD_RESTORE:
3383 o->hdl_restore(bp, o, 0, &cnt);
3384 break;
3385
3386 default:
3387 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3388 return -EINVAL;
3389 }
3390
3391 list_del(&cmd_pos->link);
3392 kfree(cmd_pos);
3393
3394 return cnt;
3395}
3396
3397/**
3398 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3399 *
3400 * @fw_hi:
3401 * @fw_mid:
3402 * @fw_lo:
3403 * @mac:
3404 */
3405static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3406 __le16 *fw_lo, u8 *mac)
3407{
3408 mac[1] = ((u8 *)fw_hi)[0];
3409 mac[0] = ((u8 *)fw_hi)[1];
3410 mac[3] = ((u8 *)fw_mid)[0];
3411 mac[2] = ((u8 *)fw_mid)[1];
3412 mac[5] = ((u8 *)fw_lo)[0];
3413 mac[4] = ((u8 *)fw_lo)[1];
3414}
3415
3416/**
3417 * bnx2x_mcast_refresh_registry_e1 -
3418 *
3419 * @bp: device handle
3420 * @cnt:
3421 *
3422 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3423 * and update the registry correspondingly: if ADD - allocate a memory and add
3424 * the entries to the registry (list), if DELETE - clear the registry and free
3425 * the memory.
3426 */
3427static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3428 struct bnx2x_mcast_obj *o)
3429{
3430 struct bnx2x_raw_obj *raw = &o->raw;
3431 struct bnx2x_mcast_mac_elem *elem;
3432 struct mac_configuration_cmd *data =
3433 (struct mac_configuration_cmd *)(raw->rdata);
3434
3435 /* If first entry contains a SET bit - the command was ADD,
3436 * otherwise - DEL_ALL
3437 */
3438 if (GET_FLAG(data->config_table[0].flags,
3439 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3440 int i, len = data->hdr.length;
3441
3442 /* Break if it was a RESTORE command */
3443 if (!list_empty(&o->registry.exact_match.macs))
3444 return 0;
3445
Thomas Meyer01e23742011-11-29 11:08:00 +00003446 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003447 if (!elem) {
3448 BNX2X_ERR("Failed to allocate registry memory\n");
3449 return -ENOMEM;
3450 }
3451
3452 for (i = 0; i < len; i++, elem++) {
3453 bnx2x_get_fw_mac_addr(
3454 &data->config_table[i].msb_mac_addr,
3455 &data->config_table[i].middle_mac_addr,
3456 &data->config_table[i].lsb_mac_addr,
3457 elem->mac);
Joe Perches0f9dad12011-08-14 12:16:19 +00003458 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00003459 elem->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003460 list_add_tail(&elem->link,
3461 &o->registry.exact_match.macs);
3462 }
3463 } else {
3464 elem = list_first_entry(&o->registry.exact_match.macs,
3465 struct bnx2x_mcast_mac_elem, link);
3466 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3467 kfree(elem);
3468 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3469 }
3470
3471 return 0;
3472}
3473
3474static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3475 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003476 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003477{
3478 struct bnx2x_mcast_obj *o = p->mcast_obj;
3479 struct bnx2x_raw_obj *raw = &o->raw;
3480 struct mac_configuration_cmd *data =
3481 (struct mac_configuration_cmd *)(raw->rdata);
3482 int cnt = 0, i, rc;
3483
3484 /* Reset the ramrod data buffer */
3485 memset(data, 0, sizeof(*data));
3486
3487 /* First set all entries as invalid */
3488 for (i = 0; i < o->max_cmd_len ; i++)
3489 SET_FLAG(data->config_table[i].flags,
3490 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3491 T_ETH_MAC_COMMAND_INVALIDATE);
3492
3493 /* Handle pending commands first */
3494 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3495
3496 /* If there are no more pending commands - clear SCHEDULED state */
3497 if (list_empty(&o->pending_cmds_head))
3498 o->clear_sched(o);
3499
3500 /* The below may be true iff there were no pending commands */
3501 if (!cnt)
3502 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3503
3504 /* For 57710 every command has o->max_cmd_len length to ensure that
3505 * commands are done one at a time.
3506 */
3507 o->total_pending_num -= o->max_cmd_len;
3508
3509 /* send a ramrod */
3510
3511 WARN_ON(cnt > o->max_cmd_len);
3512
3513 /* Set ramrod header (in particular, a number of entries to update) */
3514 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3515
3516 /* update a registry: we need the registry contents to be always up
3517 * to date in order to be able to execute a RESTORE opcode. Here
3518 * we use the fact that for 57710 we sent one command at a time
3519 * hence we may take the registry update out of the command handling
3520 * and do it in a simpler way here.
3521 */
3522 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3523 if (rc)
3524 return rc;
3525
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003526 /*
3527 * If CLEAR_ONLY was requested - don't send a ramrod and clear
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003528 * RAMROD_PENDING status immediately.
3529 */
3530 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3531 raw->clear_pending(raw);
3532 return 0;
3533 } else {
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003534 /*
3535 * No need for an explicit memory barrier here as long we would
3536 * need to ensure the ordering of writing to the SPQ element
3537 * and updating of the SPQ producer which involves a memory
3538 * read and we will have to put a full memory barrier there
3539 * (inside bnx2x_sp_post()).
3540 */
3541
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003542 /* Send a ramrod */
3543 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3544 U64_HI(raw->rdata_mapping),
3545 U64_LO(raw->rdata_mapping),
3546 ETH_CONNECTION_TYPE);
3547 if (rc)
3548 return rc;
3549
3550 /* Ramrod completion is pending */
3551 return 1;
3552 }
3553
3554}
3555
3556static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3557{
3558 return o->registry.exact_match.num_macs_set;
3559}
3560
3561static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3562{
3563 return o->registry.aprox_match.num_bins_set;
3564}
3565
3566static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3567 int n)
3568{
3569 o->registry.exact_match.num_macs_set = n;
3570}
3571
3572static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3573 int n)
3574{
3575 o->registry.aprox_match.num_bins_set = n;
3576}
3577
3578int bnx2x_config_mcast(struct bnx2x *bp,
3579 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003580 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003581{
3582 struct bnx2x_mcast_obj *o = p->mcast_obj;
3583 struct bnx2x_raw_obj *r = &o->raw;
3584 int rc = 0, old_reg_size;
3585
3586 /* This is needed to recover number of currently configured mcast macs
3587 * in case of failure.
3588 */
3589 old_reg_size = o->get_registry_size(o);
3590
3591 /* Do some calculations and checks */
3592 rc = o->validate(bp, p, cmd);
3593 if (rc)
3594 return rc;
3595
3596 /* Return if there is no work to do */
3597 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3598 return 0;
3599
Merav Sicron51c1a582012-03-18 10:33:38 +00003600 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3601 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003602
3603 /* Enqueue the current command to the pending list if we can't complete
3604 * it in the current iteration
3605 */
3606 if (r->check_pending(r) ||
3607 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3608 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3609 if (rc < 0)
3610 goto error_exit1;
3611
3612 /* As long as the current command is in a command list we
3613 * don't need to handle it separately.
3614 */
3615 p->mcast_list_len = 0;
3616 }
3617
3618 if (!r->check_pending(r)) {
3619
3620 /* Set 'pending' state */
3621 r->set_pending(r);
3622
3623 /* Configure the new classification in the chip */
3624 rc = o->config_mcast(bp, p, cmd);
3625 if (rc < 0)
3626 goto error_exit2;
3627
3628 /* Wait for a ramrod completion if was requested */
3629 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3630 rc = o->wait_comp(bp, o);
3631 }
3632
3633 return rc;
3634
3635error_exit2:
3636 r->clear_pending(r);
3637
3638error_exit1:
3639 o->revert(bp, p, old_reg_size);
3640
3641 return rc;
3642}
3643
3644static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3645{
3646 smp_mb__before_clear_bit();
3647 clear_bit(o->sched_state, o->raw.pstate);
3648 smp_mb__after_clear_bit();
3649}
3650
3651static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3652{
3653 smp_mb__before_clear_bit();
3654 set_bit(o->sched_state, o->raw.pstate);
3655 smp_mb__after_clear_bit();
3656}
3657
3658static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3659{
3660 return !!test_bit(o->sched_state, o->raw.pstate);
3661}
3662
3663static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3664{
3665 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3666}
3667
3668void bnx2x_init_mcast_obj(struct bnx2x *bp,
3669 struct bnx2x_mcast_obj *mcast_obj,
3670 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3671 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3672 int state, unsigned long *pstate, bnx2x_obj_type type)
3673{
3674 memset(mcast_obj, 0, sizeof(*mcast_obj));
3675
3676 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3677 rdata, rdata_mapping, state, pstate, type);
3678
3679 mcast_obj->engine_id = engine_id;
3680
3681 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3682
3683 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3684 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3685 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3686 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3687
3688 if (CHIP_IS_E1(bp)) {
3689 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3690 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3691 mcast_obj->hdl_restore =
3692 bnx2x_mcast_handle_restore_cmd_e1;
3693 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3694
3695 if (CHIP_REV_IS_SLOW(bp))
3696 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3697 else
3698 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3699
3700 mcast_obj->wait_comp = bnx2x_mcast_wait;
3701 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3702 mcast_obj->validate = bnx2x_mcast_validate_e1;
3703 mcast_obj->revert = bnx2x_mcast_revert_e1;
3704 mcast_obj->get_registry_size =
3705 bnx2x_mcast_get_registry_size_exact;
3706 mcast_obj->set_registry_size =
3707 bnx2x_mcast_set_registry_size_exact;
3708
3709 /* 57710 is the only chip that uses the exact match for mcast
3710 * at the moment.
3711 */
3712 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3713
3714 } else if (CHIP_IS_E1H(bp)) {
3715 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3716 mcast_obj->enqueue_cmd = NULL;
3717 mcast_obj->hdl_restore = NULL;
3718 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3719
3720 /* 57711 doesn't send a ramrod, so it has unlimited credit
3721 * for one command.
3722 */
3723 mcast_obj->max_cmd_len = -1;
3724 mcast_obj->wait_comp = bnx2x_mcast_wait;
3725 mcast_obj->set_one_rule = NULL;
3726 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3727 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3728 mcast_obj->get_registry_size =
3729 bnx2x_mcast_get_registry_size_aprox;
3730 mcast_obj->set_registry_size =
3731 bnx2x_mcast_set_registry_size_aprox;
3732 } else {
3733 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3734 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3735 mcast_obj->hdl_restore =
3736 bnx2x_mcast_handle_restore_cmd_e2;
3737 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3738 /* TODO: There should be a proper HSI define for this number!!!
3739 */
3740 mcast_obj->max_cmd_len = 16;
3741 mcast_obj->wait_comp = bnx2x_mcast_wait;
3742 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3743 mcast_obj->validate = bnx2x_mcast_validate_e2;
3744 mcast_obj->revert = bnx2x_mcast_revert_e2;
3745 mcast_obj->get_registry_size =
3746 bnx2x_mcast_get_registry_size_aprox;
3747 mcast_obj->set_registry_size =
3748 bnx2x_mcast_set_registry_size_aprox;
3749 }
3750}
3751
3752/*************************** Credit handling **********************************/
3753
3754/**
3755 * atomic_add_ifless - add if the result is less than a given value.
3756 *
3757 * @v: pointer of type atomic_t
3758 * @a: the amount to add to v...
3759 * @u: ...if (v + a) is less than u.
3760 *
3761 * returns true if (v + a) was less than u, and false otherwise.
3762 *
3763 */
3764static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3765{
3766 int c, old;
3767
3768 c = atomic_read(v);
3769 for (;;) {
3770 if (unlikely(c + a >= u))
3771 return false;
3772
3773 old = atomic_cmpxchg((v), c, c + a);
3774 if (likely(old == c))
3775 break;
3776 c = old;
3777 }
3778
3779 return true;
3780}
3781
3782/**
3783 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3784 *
3785 * @v: pointer of type atomic_t
3786 * @a: the amount to dec from v...
3787 * @u: ...if (v - a) is more or equal than u.
3788 *
3789 * returns true if (v - a) was more or equal than u, and false
3790 * otherwise.
3791 */
3792static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3793{
3794 int c, old;
3795
3796 c = atomic_read(v);
3797 for (;;) {
3798 if (unlikely(c - a < u))
3799 return false;
3800
3801 old = atomic_cmpxchg((v), c, c - a);
3802 if (likely(old == c))
3803 break;
3804 c = old;
3805 }
3806
3807 return true;
3808}
3809
3810static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3811{
3812 bool rc;
3813
3814 smp_mb();
3815 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3816 smp_mb();
3817
3818 return rc;
3819}
3820
3821static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3822{
3823 bool rc;
3824
3825 smp_mb();
3826
3827 /* Don't let to refill if credit + cnt > pool_sz */
3828 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3829
3830 smp_mb();
3831
3832 return rc;
3833}
3834
3835static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3836{
3837 int cur_credit;
3838
3839 smp_mb();
3840 cur_credit = atomic_read(&o->credit);
3841
3842 return cur_credit;
3843}
3844
3845static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3846 int cnt)
3847{
3848 return true;
3849}
3850
3851
3852static bool bnx2x_credit_pool_get_entry(
3853 struct bnx2x_credit_pool_obj *o,
3854 int *offset)
3855{
3856 int idx, vec, i;
3857
3858 *offset = -1;
3859
3860 /* Find "internal cam-offset" then add to base for this object... */
3861 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3862
3863 /* Skip the current vector if there are no free entries in it */
3864 if (!o->pool_mirror[vec])
3865 continue;
3866
3867 /* If we've got here we are going to find a free entry */
Dmitry Kravkovc54e9bd2012-03-26 21:08:55 +00003868 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003869 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3870
3871 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3872 /* Got one!! */
3873 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3874 *offset = o->base_pool_offset + idx;
3875 return true;
3876 }
3877 }
3878
3879 return false;
3880}
3881
3882static bool bnx2x_credit_pool_put_entry(
3883 struct bnx2x_credit_pool_obj *o,
3884 int offset)
3885{
3886 if (offset < o->base_pool_offset)
3887 return false;
3888
3889 offset -= o->base_pool_offset;
3890
3891 if (offset >= o->pool_sz)
3892 return false;
3893
3894 /* Return the entry to the pool */
3895 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3896
3897 return true;
3898}
3899
3900static bool bnx2x_credit_pool_put_entry_always_true(
3901 struct bnx2x_credit_pool_obj *o,
3902 int offset)
3903{
3904 return true;
3905}
3906
3907static bool bnx2x_credit_pool_get_entry_always_true(
3908 struct bnx2x_credit_pool_obj *o,
3909 int *offset)
3910{
3911 *offset = -1;
3912 return true;
3913}
3914/**
3915 * bnx2x_init_credit_pool - initialize credit pool internals.
3916 *
3917 * @p:
3918 * @base: Base entry in the CAM to use.
3919 * @credit: pool size.
3920 *
3921 * If base is negative no CAM entries handling will be performed.
3922 * If credit is negative pool operations will always succeed (unlimited pool).
3923 *
3924 */
3925static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3926 int base, int credit)
3927{
3928 /* Zero the object first */
3929 memset(p, 0, sizeof(*p));
3930
3931 /* Set the table to all 1s */
3932 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3933
3934 /* Init a pool as full */
3935 atomic_set(&p->credit, credit);
3936
3937 /* The total poll size */
3938 p->pool_sz = credit;
3939
3940 p->base_pool_offset = base;
3941
3942 /* Commit the change */
3943 smp_mb();
3944
3945 p->check = bnx2x_credit_pool_check;
3946
3947 /* if pool credit is negative - disable the checks */
3948 if (credit >= 0) {
3949 p->put = bnx2x_credit_pool_put;
3950 p->get = bnx2x_credit_pool_get;
3951 p->put_entry = bnx2x_credit_pool_put_entry;
3952 p->get_entry = bnx2x_credit_pool_get_entry;
3953 } else {
3954 p->put = bnx2x_credit_pool_always_true;
3955 p->get = bnx2x_credit_pool_always_true;
3956 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3957 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3958 }
3959
3960 /* If base is negative - disable entries handling */
3961 if (base < 0) {
3962 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3963 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3964 }
3965}
3966
3967void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3968 struct bnx2x_credit_pool_obj *p, u8 func_id,
3969 u8 func_num)
3970{
3971/* TODO: this will be defined in consts as well... */
3972#define BNX2X_CAM_SIZE_EMUL 5
3973
3974 int cam_sz;
3975
3976 if (CHIP_IS_E1(bp)) {
3977 /* In E1, Multicast is saved in cam... */
3978 if (!CHIP_REV_IS_SLOW(bp))
3979 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3980 else
3981 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3982
3983 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3984
3985 } else if (CHIP_IS_E1H(bp)) {
3986 /* CAM credit is equaly divided between all active functions
3987 * on the PORT!.
3988 */
3989 if ((func_num > 0)) {
3990 if (!CHIP_REV_IS_SLOW(bp))
3991 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3992 else
3993 cam_sz = BNX2X_CAM_SIZE_EMUL;
3994 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3995 } else {
3996 /* this should never happen! Block MAC operations. */
3997 bnx2x_init_credit_pool(p, 0, 0);
3998 }
3999
4000 } else {
4001
4002 /*
4003 * CAM credit is equaly divided between all active functions
4004 * on the PATH.
4005 */
4006 if ((func_num > 0)) {
4007 if (!CHIP_REV_IS_SLOW(bp))
4008 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
4009 else
4010 cam_sz = BNX2X_CAM_SIZE_EMUL;
4011
4012 /*
4013 * No need for CAM entries handling for 57712 and
4014 * newer.
4015 */
4016 bnx2x_init_credit_pool(p, -1, cam_sz);
4017 } else {
4018 /* this should never happen! Block MAC operations. */
4019 bnx2x_init_credit_pool(p, 0, 0);
4020 }
4021
4022 }
4023}
4024
4025void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4026 struct bnx2x_credit_pool_obj *p,
4027 u8 func_id,
4028 u8 func_num)
4029{
4030 if (CHIP_IS_E1x(bp)) {
4031 /*
4032 * There is no VLAN credit in HW on 57710 and 57711 only
4033 * MAC / MAC-VLAN can be set
4034 */
4035 bnx2x_init_credit_pool(p, 0, -1);
4036 } else {
4037 /*
4038 * CAM credit is equaly divided between all active functions
4039 * on the PATH.
4040 */
4041 if (func_num > 0) {
4042 int credit = MAX_VLAN_CREDIT_E2 / func_num;
4043 bnx2x_init_credit_pool(p, func_id * credit, credit);
4044 } else
4045 /* this should never happen! Block VLAN operations. */
4046 bnx2x_init_credit_pool(p, 0, 0);
4047 }
4048}
4049
4050/****************** RSS Configuration ******************/
4051/**
4052 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4053 *
4054 * @bp: driver hanlde
4055 * @p: pointer to rss configuration
4056 *
4057 * Prints it when NETIF_MSG_IFUP debug level is configured.
4058 */
4059static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4060 struct bnx2x_config_rss_params *p)
4061{
4062 int i;
4063
4064 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4065 DP(BNX2X_MSG_SP, "0x0000: ");
4066 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4067 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4068
4069 /* Print 4 bytes in a line */
4070 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4071 (((i + 1) & 0x3) == 0)) {
4072 DP_CONT(BNX2X_MSG_SP, "\n");
4073 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4074 }
4075 }
4076
4077 DP_CONT(BNX2X_MSG_SP, "\n");
4078}
4079
4080/**
4081 * bnx2x_setup_rss - configure RSS
4082 *
4083 * @bp: device handle
4084 * @p: rss configuration
4085 *
4086 * sends on UPDATE ramrod for that matter.
4087 */
4088static int bnx2x_setup_rss(struct bnx2x *bp,
4089 struct bnx2x_config_rss_params *p)
4090{
4091 struct bnx2x_rss_config_obj *o = p->rss_obj;
4092 struct bnx2x_raw_obj *r = &o->raw;
4093 struct eth_rss_update_ramrod_data *data =
4094 (struct eth_rss_update_ramrod_data *)(r->rdata);
4095 u8 rss_mode = 0;
4096 int rc;
4097
4098 memset(data, 0, sizeof(*data));
4099
4100 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4101
4102 /* Set an echo field */
Yuval Mintz86564c32013-01-23 03:21:50 +00004103 data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4104 (r->state << BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004105
4106 /* RSS mode */
4107 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4108 rss_mode = ETH_RSS_MODE_DISABLED;
4109 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4110 rss_mode = ETH_RSS_MODE_REGULAR;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004111
4112 data->rss_mode = rss_mode;
4113
4114 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4115
4116 /* RSS capabilities */
4117 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4118 data->capabilities |=
4119 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4120
4121 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4122 data->capabilities |=
4123 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4124
Merav Sicron5d317c6a2012-06-19 07:48:24 +00004125 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4126 data->capabilities |=
4127 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4128
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004129 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4130 data->capabilities |=
4131 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4132
4133 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4134 data->capabilities |=
4135 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4136
Merav Sicron5d317c6a2012-06-19 07:48:24 +00004137 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4138 data->capabilities |=
4139 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4140
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004141 /* Hashing mask */
4142 data->rss_result_mask = p->rss_result_mask;
4143
4144 /* RSS engine ID */
4145 data->rss_engine_id = o->engine_id;
4146
4147 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4148
4149 /* Indirection table */
4150 memcpy(data->indirection_table, p->ind_table,
4151 T_ETH_INDIRECTION_TABLE_SIZE);
4152
4153 /* Remember the last configuration */
4154 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4155
4156 /* Print the indirection table */
4157 if (netif_msg_ifup(bp))
4158 bnx2x_debug_print_ind_table(bp, p);
4159
4160 /* RSS keys */
4161 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4162 memcpy(&data->rss_key[0], &p->rss_key[0],
4163 sizeof(data->rss_key));
4164 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4165 }
4166
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004167 /*
4168 * No need for an explicit memory barrier here as long we would
4169 * need to ensure the ordering of writing to the SPQ element
4170 * and updating of the SPQ producer which involves a memory
4171 * read and we will have to put a full memory barrier there
4172 * (inside bnx2x_sp_post()).
4173 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004174
4175 /* Send a ramrod */
4176 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4177 U64_HI(r->rdata_mapping),
4178 U64_LO(r->rdata_mapping),
4179 ETH_CONNECTION_TYPE);
4180
4181 if (rc < 0)
4182 return rc;
4183
4184 return 1;
4185}
4186
4187void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4188 u8 *ind_table)
4189{
4190 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4191}
4192
4193int bnx2x_config_rss(struct bnx2x *bp,
4194 struct bnx2x_config_rss_params *p)
4195{
4196 int rc;
4197 struct bnx2x_rss_config_obj *o = p->rss_obj;
4198 struct bnx2x_raw_obj *r = &o->raw;
4199
4200 /* Do nothing if only driver cleanup was requested */
4201 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4202 return 0;
4203
4204 r->set_pending(r);
4205
4206 rc = o->config_rss(bp, p);
4207 if (rc < 0) {
4208 r->clear_pending(r);
4209 return rc;
4210 }
4211
4212 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4213 rc = r->wait_comp(bp, r);
4214
4215 return rc;
4216}
4217
4218
4219void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4220 struct bnx2x_rss_config_obj *rss_obj,
4221 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4222 void *rdata, dma_addr_t rdata_mapping,
4223 int state, unsigned long *pstate,
4224 bnx2x_obj_type type)
4225{
4226 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4227 rdata_mapping, state, pstate, type);
4228
4229 rss_obj->engine_id = engine_id;
4230 rss_obj->config_rss = bnx2x_setup_rss;
4231}
4232
4233/********************** Queue state object ***********************************/
4234
4235/**
4236 * bnx2x_queue_state_change - perform Queue state change transition
4237 *
4238 * @bp: device handle
4239 * @params: parameters to perform the transition
4240 *
4241 * returns 0 in case of successfully completed transition, negative error
4242 * code in case of failure, positive (EBUSY) value if there is a completion
4243 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4244 * not set in params->ramrod_flags for asynchronous commands).
4245 *
4246 */
4247int bnx2x_queue_state_change(struct bnx2x *bp,
4248 struct bnx2x_queue_state_params *params)
4249{
4250 struct bnx2x_queue_sp_obj *o = params->q_obj;
4251 int rc, pending_bit;
4252 unsigned long *pending = &o->pending;
4253
4254 /* Check that the requested transition is legal */
Yuval Mintz04c46732013-01-23 03:21:46 +00004255 rc = o->check_transition(bp, o, params);
4256 if (rc) {
4257 BNX2X_ERR("check transition returned an error. rc %d\n", rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004258 return -EINVAL;
Yuval Mintz04c46732013-01-23 03:21:46 +00004259 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004260
4261 /* Set "pending" bit */
Yuval Mintz04c46732013-01-23 03:21:46 +00004262 DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004263 pending_bit = o->set_pending(o, params);
Yuval Mintz04c46732013-01-23 03:21:46 +00004264 DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004265
4266 /* Don't send a command if only driver cleanup was requested */
4267 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4268 o->complete_cmd(bp, o, pending_bit);
4269 else {
4270 /* Send a ramrod */
4271 rc = o->send_cmd(bp, params);
4272 if (rc) {
4273 o->next_state = BNX2X_Q_STATE_MAX;
4274 clear_bit(pending_bit, pending);
4275 smp_mb__after_clear_bit();
4276 return rc;
4277 }
4278
4279 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4280 rc = o->wait_comp(bp, o, pending_bit);
4281 if (rc)
4282 return rc;
4283
4284 return 0;
4285 }
4286 }
4287
4288 return !!test_bit(pending_bit, pending);
4289}
4290
4291
4292static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4293 struct bnx2x_queue_state_params *params)
4294{
4295 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4296
4297 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4298 * UPDATE command.
4299 */
4300 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4301 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4302 bit = BNX2X_Q_CMD_UPDATE;
4303 else
4304 bit = cmd;
4305
4306 set_bit(bit, &obj->pending);
4307 return bit;
4308}
4309
4310static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4311 struct bnx2x_queue_sp_obj *o,
4312 enum bnx2x_queue_cmd cmd)
4313{
4314 return bnx2x_state_wait(bp, cmd, &o->pending);
4315}
4316
4317/**
4318 * bnx2x_queue_comp_cmd - complete the state change command.
4319 *
4320 * @bp: device handle
4321 * @o:
4322 * @cmd:
4323 *
4324 * Checks that the arrived completion is expected.
4325 */
4326static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4327 struct bnx2x_queue_sp_obj *o,
4328 enum bnx2x_queue_cmd cmd)
4329{
4330 unsigned long cur_pending = o->pending;
4331
4332 if (!test_and_clear_bit(cmd, &cur_pending)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004333 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4334 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004335 o->state, cur_pending, o->next_state);
4336 return -EINVAL;
4337 }
4338
Ariel Elior6383c0b2011-07-14 08:31:57 +00004339 if (o->next_tx_only >= o->max_cos)
4340 /* >= becuase tx only must always be smaller than cos since the
Masanari Iida02582e92012-08-22 19:11:26 +09004341 * primary connection supports COS 0
Ariel Elior6383c0b2011-07-14 08:31:57 +00004342 */
4343 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4344 o->next_tx_only, o->max_cos);
4345
Merav Sicron51c1a582012-03-18 10:33:38 +00004346 DP(BNX2X_MSG_SP,
4347 "Completing command %d for queue %d, setting state to %d\n",
4348 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004349
4350 if (o->next_tx_only) /* print num tx-only if any exist */
Joe Perches94f05b02011-08-14 12:16:20 +00004351 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00004352 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004353
4354 o->state = o->next_state;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004355 o->num_tx_only = o->next_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004356 o->next_state = BNX2X_Q_STATE_MAX;
4357
4358 /* It's important that o->state and o->next_state are
4359 * updated before o->pending.
4360 */
4361 wmb();
4362
4363 clear_bit(cmd, &o->pending);
4364 smp_mb__after_clear_bit();
4365
4366 return 0;
4367}
4368
4369static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4370 struct bnx2x_queue_state_params *cmd_params,
4371 struct client_init_ramrod_data *data)
4372{
4373 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004374
4375 /* Rx data */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004376
4377 /* IPv6 TPA supported for E2 and above only */
Vladislav Zolotarovf5219d82011-07-19 01:44:11 +00004378 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004379 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4380}
4381
Ariel Elior6383c0b2011-07-14 08:31:57 +00004382static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4383 struct bnx2x_queue_sp_obj *o,
4384 struct bnx2x_general_setup_params *params,
4385 struct client_init_general_data *gen_data,
4386 unsigned long *flags)
4387{
4388 gen_data->client_id = o->cl_id;
4389
4390 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4391 gen_data->statistics_counter_id =
4392 params->stat_id;
4393 gen_data->statistics_en_flg = 1;
4394 gen_data->statistics_zero_flg =
4395 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4396 } else
4397 gen_data->statistics_counter_id =
4398 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4399
4400 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4401 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4402 gen_data->sp_client_id = params->spcl_id;
4403 gen_data->mtu = cpu_to_le16(params->mtu);
4404 gen_data->func_id = o->func_id;
4405
4406
4407 gen_data->cos = params->cos;
4408
4409 gen_data->traffic_type =
4410 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4411 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4412
Joe Perches94f05b02011-08-14 12:16:20 +00004413 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004414 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4415}
4416
4417static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4418 struct bnx2x_txq_setup_params *params,
4419 struct client_init_tx_data *tx_data,
4420 unsigned long *flags)
4421{
4422 tx_data->enforce_security_flg =
4423 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4424 tx_data->default_vlan =
4425 cpu_to_le16(params->default_vlan);
4426 tx_data->default_vlan_flg =
4427 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4428 tx_data->tx_switching_flg =
4429 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4430 tx_data->anti_spoofing_flg =
4431 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
Barak Witkowskia3348722012-04-23 03:04:46 +00004432 tx_data->force_default_pri_flg =
4433 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4434
Dmitry Kravkov91226792013-03-11 05:17:52 +00004435 tx_data->tunnel_non_lso_pcsum_location =
4436 test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
4437 PCSUM_ON_BD;
4438
Ariel Elior6383c0b2011-07-14 08:31:57 +00004439 tx_data->tx_status_block_id = params->fw_sb_id;
4440 tx_data->tx_sb_index_number = params->sb_cq_index;
4441 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4442
4443 tx_data->tx_bd_page_base.lo =
4444 cpu_to_le32(U64_LO(params->dscr_map));
4445 tx_data->tx_bd_page_base.hi =
4446 cpu_to_le32(U64_HI(params->dscr_map));
4447
4448 /* Don't configure any Tx switching mode during queue SETUP */
4449 tx_data->state = 0;
4450}
4451
4452static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4453 struct rxq_pause_params *params,
4454 struct client_init_rx_data *rx_data)
4455{
4456 /* flow control data */
4457 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4458 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4459 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4460 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4461 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4462 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4463 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4464}
4465
4466static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4467 struct bnx2x_rxq_setup_params *params,
4468 struct client_init_rx_data *rx_data,
4469 unsigned long *flags)
4470{
Ariel Elior6383c0b2011-07-14 08:31:57 +00004471 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4472 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004473 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4474 CLIENT_INIT_RX_DATA_TPA_MODE;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004475 rx_data->vmqueue_mode_en_flg = 0;
4476
4477 rx_data->cache_line_alignment_log_size =
4478 params->cache_line_log;
4479 rx_data->enable_dynamic_hc =
4480 test_bit(BNX2X_Q_FLG_DHC, flags);
4481 rx_data->max_sges_for_packet = params->max_sges_pkt;
4482 rx_data->client_qzone_id = params->cl_qzone_id;
4483 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4484
4485 /* Always start in DROP_ALL mode */
4486 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4487 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4488
4489 /* We don't set drop flags */
4490 rx_data->drop_ip_cs_err_flg = 0;
4491 rx_data->drop_tcp_cs_err_flg = 0;
4492 rx_data->drop_ttl0_flg = 0;
4493 rx_data->drop_udp_cs_err_flg = 0;
4494 rx_data->inner_vlan_removal_enable_flg =
4495 test_bit(BNX2X_Q_FLG_VLAN, flags);
4496 rx_data->outer_vlan_removal_enable_flg =
4497 test_bit(BNX2X_Q_FLG_OV, flags);
4498 rx_data->status_block_id = params->fw_sb_id;
4499 rx_data->rx_sb_index_number = params->sb_cq_index;
4500 rx_data->max_tpa_queues = params->max_tpa_queues;
4501 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4502 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4503 rx_data->bd_page_base.lo =
4504 cpu_to_le32(U64_LO(params->dscr_map));
4505 rx_data->bd_page_base.hi =
4506 cpu_to_le32(U64_HI(params->dscr_map));
4507 rx_data->sge_page_base.lo =
4508 cpu_to_le32(U64_LO(params->sge_map));
4509 rx_data->sge_page_base.hi =
4510 cpu_to_le32(U64_HI(params->sge_map));
4511 rx_data->cqe_page_base.lo =
4512 cpu_to_le32(U64_LO(params->rcq_map));
4513 rx_data->cqe_page_base.hi =
4514 cpu_to_le32(U64_HI(params->rcq_map));
4515 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4516
4517 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
Yuval Mintz259afa12012-03-12 08:53:10 +00004518 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004519 rx_data->is_approx_mcast = 1;
4520 }
4521
4522 rx_data->rss_engine_id = params->rss_engine_id;
4523
4524 /* silent vlan removal */
4525 rx_data->silent_vlan_removal_flg =
4526 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4527 rx_data->silent_vlan_value =
4528 cpu_to_le16(params->silent_removal_value);
4529 rx_data->silent_vlan_mask =
4530 cpu_to_le16(params->silent_removal_mask);
4531
4532}
4533
4534/* initialize the general, tx and rx parts of a queue object */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004535static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4536 struct bnx2x_queue_state_params *cmd_params,
4537 struct client_init_ramrod_data *data)
4538{
Ariel Elior6383c0b2011-07-14 08:31:57 +00004539 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4540 &cmd_params->params.setup.gen_params,
4541 &data->general,
4542 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004543
Ariel Elior6383c0b2011-07-14 08:31:57 +00004544 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4545 &cmd_params->params.setup.txq_params,
4546 &data->tx,
4547 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004548
Ariel Elior6383c0b2011-07-14 08:31:57 +00004549 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4550 &cmd_params->params.setup.rxq_params,
4551 &data->rx,
4552 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004553
Ariel Elior6383c0b2011-07-14 08:31:57 +00004554 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4555 &cmd_params->params.setup.pause_params,
4556 &data->rx);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004557}
4558
Ariel Elior6383c0b2011-07-14 08:31:57 +00004559/* initialize the general and tx parts of a tx-only queue object */
4560static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4561 struct bnx2x_queue_state_params *cmd_params,
4562 struct tx_queue_init_ramrod_data *data)
4563{
4564 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4565 &cmd_params->params.tx_only.gen_params,
4566 &data->general,
4567 &cmd_params->params.tx_only.flags);
4568
4569 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4570 &cmd_params->params.tx_only.txq_params,
4571 &data->tx,
4572 &cmd_params->params.tx_only.flags);
4573
Merav Sicron51c1a582012-03-18 10:33:38 +00004574 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4575 cmd_params->q_obj->cids[0],
4576 data->tx.tx_bd_page_base.lo,
4577 data->tx.tx_bd_page_base.hi);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004578}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004579
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004580/**
4581 * bnx2x_q_init - init HW/FW queue
4582 *
4583 * @bp: device handle
4584 * @params:
4585 *
4586 * HW/FW initial Queue configuration:
4587 * - HC: Rx and Tx
4588 * - CDU context validation
4589 *
4590 */
4591static inline int bnx2x_q_init(struct bnx2x *bp,
4592 struct bnx2x_queue_state_params *params)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004593{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004594 struct bnx2x_queue_sp_obj *o = params->q_obj;
4595 struct bnx2x_queue_init_params *init = &params->params.init;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004596 u16 hc_usec;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004597 u8 cos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004598
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004599 /* Tx HC configuration */
4600 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4601 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4602 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4603
4604 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4605 init->tx.sb_cq_index,
4606 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004607 hc_usec);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004608 }
4609
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004610 /* Rx HC configuration */
4611 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4612 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4613 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004614
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004615 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4616 init->rx.sb_cq_index,
4617 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4618 hc_usec);
4619 }
4620
4621 /* Set CDU context validation values */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004622 for (cos = 0; cos < o->max_cos; cos++) {
Joe Perches94f05b02011-08-14 12:16:20 +00004623 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004624 o->cids[cos], cos);
Joe Perches94f05b02011-08-14 12:16:20 +00004625 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004626 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4627 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004628
4629 /* As no ramrod is sent, complete the command immediately */
4630 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4631
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004632 mmiowb();
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004633 smp_mb();
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004634
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004635 return 0;
4636}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004637
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004638static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4639 struct bnx2x_queue_state_params *params)
4640{
4641 struct bnx2x_queue_sp_obj *o = params->q_obj;
4642 struct client_init_ramrod_data *rdata =
4643 (struct client_init_ramrod_data *)o->rdata;
4644 dma_addr_t data_mapping = o->rdata_mapping;
4645 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004646
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004647 /* Clear the ramrod data */
4648 memset(rdata, 0, sizeof(*rdata));
4649
4650 /* Fill the ramrod data */
4651 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4652
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004653 /*
4654 * No need for an explicit memory barrier here as long we would
4655 * need to ensure the ordering of writing to the SPQ element
4656 * and updating of the SPQ producer which involves a memory
4657 * read and we will have to put a full memory barrier there
4658 * (inside bnx2x_sp_post()).
4659 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004660
Ariel Elior6383c0b2011-07-14 08:31:57 +00004661 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4662 U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004663 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4664}
4665
4666static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4667 struct bnx2x_queue_state_params *params)
4668{
4669 struct bnx2x_queue_sp_obj *o = params->q_obj;
4670 struct client_init_ramrod_data *rdata =
4671 (struct client_init_ramrod_data *)o->rdata;
4672 dma_addr_t data_mapping = o->rdata_mapping;
4673 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4674
4675 /* Clear the ramrod data */
4676 memset(rdata, 0, sizeof(*rdata));
4677
4678 /* Fill the ramrod data */
4679 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4680 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4681
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004682 /*
4683 * No need for an explicit memory barrier here as long we would
4684 * need to ensure the ordering of writing to the SPQ element
4685 * and updating of the SPQ producer which involves a memory
4686 * read and we will have to put a full memory barrier there
4687 * (inside bnx2x_sp_post()).
4688 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004689
Ariel Elior6383c0b2011-07-14 08:31:57 +00004690 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4691 U64_HI(data_mapping),
4692 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4693}
4694
4695static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4696 struct bnx2x_queue_state_params *params)
4697{
4698 struct bnx2x_queue_sp_obj *o = params->q_obj;
4699 struct tx_queue_init_ramrod_data *rdata =
4700 (struct tx_queue_init_ramrod_data *)o->rdata;
4701 dma_addr_t data_mapping = o->rdata_mapping;
4702 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4703 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4704 &params->params.tx_only;
4705 u8 cid_index = tx_only_params->cid_index;
4706
4707
4708 if (cid_index >= o->max_cos) {
4709 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4710 o->cl_id, cid_index);
4711 return -EINVAL;
4712 }
4713
Joe Perches94f05b02011-08-14 12:16:20 +00004714 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004715 tx_only_params->gen_params.cos,
4716 tx_only_params->gen_params.spcl_id);
4717
4718 /* Clear the ramrod data */
4719 memset(rdata, 0, sizeof(*rdata));
4720
4721 /* Fill the ramrod data */
4722 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4723
Merav Sicron51c1a582012-03-18 10:33:38 +00004724 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4725 o->cids[cid_index], rdata->general.client_id,
Ariel Elior6383c0b2011-07-14 08:31:57 +00004726 rdata->general.sp_client_id, rdata->general.cos);
4727
4728 /*
4729 * No need for an explicit memory barrier here as long we would
4730 * need to ensure the ordering of writing to the SPQ element
4731 * and updating of the SPQ producer which involves a memory
4732 * read and we will have to put a full memory barrier there
4733 * (inside bnx2x_sp_post()).
4734 */
4735
4736 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4737 U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004738 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4739}
4740
4741static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4742 struct bnx2x_queue_sp_obj *obj,
4743 struct bnx2x_queue_update_params *params,
4744 struct client_update_ramrod_data *data)
4745{
4746 /* Client ID of the client to update */
4747 data->client_id = obj->cl_id;
4748
4749 /* Function ID of the client to update */
4750 data->func_id = obj->func_id;
4751
4752 /* Default VLAN value */
4753 data->default_vlan = cpu_to_le16(params->def_vlan);
4754
4755 /* Inner VLAN stripping */
4756 data->inner_vlan_removal_enable_flg =
4757 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4758 data->inner_vlan_removal_change_flg =
4759 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4760 &params->update_flags);
4761
4762 /* Outer VLAN sripping */
4763 data->outer_vlan_removal_enable_flg =
4764 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4765 data->outer_vlan_removal_change_flg =
4766 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4767 &params->update_flags);
4768
4769 /* Drop packets that have source MAC that doesn't belong to this
4770 * Queue.
4771 */
4772 data->anti_spoofing_enable_flg =
4773 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4774 data->anti_spoofing_change_flg =
4775 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4776
4777 /* Activate/Deactivate */
4778 data->activate_flg =
4779 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4780 data->activate_change_flg =
4781 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4782
4783 /* Enable default VLAN */
4784 data->default_vlan_enable_flg =
4785 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4786 data->default_vlan_change_flg =
4787 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4788 &params->update_flags);
4789
4790 /* silent vlan removal */
4791 data->silent_vlan_change_flg =
4792 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4793 &params->update_flags);
4794 data->silent_vlan_removal_flg =
4795 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4796 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4797 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4798}
4799
4800static inline int bnx2x_q_send_update(struct bnx2x *bp,
4801 struct bnx2x_queue_state_params *params)
4802{
4803 struct bnx2x_queue_sp_obj *o = params->q_obj;
4804 struct client_update_ramrod_data *rdata =
4805 (struct client_update_ramrod_data *)o->rdata;
4806 dma_addr_t data_mapping = o->rdata_mapping;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004807 struct bnx2x_queue_update_params *update_params =
4808 &params->params.update;
4809 u8 cid_index = update_params->cid_index;
4810
4811 if (cid_index >= o->max_cos) {
4812 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4813 o->cl_id, cid_index);
4814 return -EINVAL;
4815 }
4816
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004817
4818 /* Clear the ramrod data */
4819 memset(rdata, 0, sizeof(*rdata));
4820
4821 /* Fill the ramrod data */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004822 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004823
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004824 /*
4825 * No need for an explicit memory barrier here as long we would
4826 * need to ensure the ordering of writing to the SPQ element
4827 * and updating of the SPQ producer which involves a memory
4828 * read and we will have to put a full memory barrier there
4829 * (inside bnx2x_sp_post()).
4830 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004831
Ariel Elior6383c0b2011-07-14 08:31:57 +00004832 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4833 o->cids[cid_index], U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004834 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4835}
4836
4837/**
4838 * bnx2x_q_send_deactivate - send DEACTIVATE command
4839 *
4840 * @bp: device handle
4841 * @params:
4842 *
4843 * implemented using the UPDATE command.
4844 */
4845static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4846 struct bnx2x_queue_state_params *params)
4847{
4848 struct bnx2x_queue_update_params *update = &params->params.update;
4849
4850 memset(update, 0, sizeof(*update));
4851
4852 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4853
4854 return bnx2x_q_send_update(bp, params);
4855}
4856
4857/**
4858 * bnx2x_q_send_activate - send ACTIVATE command
4859 *
4860 * @bp: device handle
4861 * @params:
4862 *
4863 * implemented using the UPDATE command.
4864 */
4865static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4866 struct bnx2x_queue_state_params *params)
4867{
4868 struct bnx2x_queue_update_params *update = &params->params.update;
4869
4870 memset(update, 0, sizeof(*update));
4871
4872 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4873 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4874
4875 return bnx2x_q_send_update(bp, params);
4876}
4877
4878static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4879 struct bnx2x_queue_state_params *params)
4880{
4881 /* TODO: Not implemented yet. */
4882 return -1;
4883}
4884
4885static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4886 struct bnx2x_queue_state_params *params)
4887{
4888 struct bnx2x_queue_sp_obj *o = params->q_obj;
4889
Ariel Elior6383c0b2011-07-14 08:31:57 +00004890 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4891 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004892 ETH_CONNECTION_TYPE);
4893}
4894
4895static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4896 struct bnx2x_queue_state_params *params)
4897{
4898 struct bnx2x_queue_sp_obj *o = params->q_obj;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004899 u8 cid_idx = params->params.cfc_del.cid_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004900
Ariel Elior6383c0b2011-07-14 08:31:57 +00004901 if (cid_idx >= o->max_cos) {
4902 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4903 o->cl_id, cid_idx);
4904 return -EINVAL;
4905 }
4906
4907 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4908 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004909}
4910
4911static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4912 struct bnx2x_queue_state_params *params)
4913{
4914 struct bnx2x_queue_sp_obj *o = params->q_obj;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004915 u8 cid_index = params->params.terminate.cid_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004916
Ariel Elior6383c0b2011-07-14 08:31:57 +00004917 if (cid_index >= o->max_cos) {
4918 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4919 o->cl_id, cid_index);
4920 return -EINVAL;
4921 }
4922
4923 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4924 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004925}
4926
4927static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4928 struct bnx2x_queue_state_params *params)
4929{
4930 struct bnx2x_queue_sp_obj *o = params->q_obj;
4931
Ariel Elior6383c0b2011-07-14 08:31:57 +00004932 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4933 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004934 ETH_CONNECTION_TYPE);
4935}
4936
4937static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4938 struct bnx2x_queue_state_params *params)
4939{
4940 switch (params->cmd) {
4941 case BNX2X_Q_CMD_INIT:
4942 return bnx2x_q_init(bp, params);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004943 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4944 return bnx2x_q_send_setup_tx_only(bp, params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004945 case BNX2X_Q_CMD_DEACTIVATE:
4946 return bnx2x_q_send_deactivate(bp, params);
4947 case BNX2X_Q_CMD_ACTIVATE:
4948 return bnx2x_q_send_activate(bp, params);
4949 case BNX2X_Q_CMD_UPDATE:
4950 return bnx2x_q_send_update(bp, params);
4951 case BNX2X_Q_CMD_UPDATE_TPA:
4952 return bnx2x_q_send_update_tpa(bp, params);
4953 case BNX2X_Q_CMD_HALT:
4954 return bnx2x_q_send_halt(bp, params);
4955 case BNX2X_Q_CMD_CFC_DEL:
4956 return bnx2x_q_send_cfc_del(bp, params);
4957 case BNX2X_Q_CMD_TERMINATE:
4958 return bnx2x_q_send_terminate(bp, params);
4959 case BNX2X_Q_CMD_EMPTY:
4960 return bnx2x_q_send_empty(bp, params);
4961 default:
4962 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4963 return -EINVAL;
4964 }
4965}
4966
4967static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4968 struct bnx2x_queue_state_params *params)
4969{
4970 switch (params->cmd) {
4971 case BNX2X_Q_CMD_SETUP:
4972 return bnx2x_q_send_setup_e1x(bp, params);
4973 case BNX2X_Q_CMD_INIT:
Ariel Elior6383c0b2011-07-14 08:31:57 +00004974 case BNX2X_Q_CMD_SETUP_TX_ONLY:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004975 case BNX2X_Q_CMD_DEACTIVATE:
4976 case BNX2X_Q_CMD_ACTIVATE:
4977 case BNX2X_Q_CMD_UPDATE:
4978 case BNX2X_Q_CMD_UPDATE_TPA:
4979 case BNX2X_Q_CMD_HALT:
4980 case BNX2X_Q_CMD_CFC_DEL:
4981 case BNX2X_Q_CMD_TERMINATE:
4982 case BNX2X_Q_CMD_EMPTY:
4983 return bnx2x_queue_send_cmd_cmn(bp, params);
4984 default:
4985 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4986 return -EINVAL;
4987 }
4988}
4989
4990static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4991 struct bnx2x_queue_state_params *params)
4992{
4993 switch (params->cmd) {
4994 case BNX2X_Q_CMD_SETUP:
4995 return bnx2x_q_send_setup_e2(bp, params);
4996 case BNX2X_Q_CMD_INIT:
Ariel Elior6383c0b2011-07-14 08:31:57 +00004997 case BNX2X_Q_CMD_SETUP_TX_ONLY:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004998 case BNX2X_Q_CMD_DEACTIVATE:
4999 case BNX2X_Q_CMD_ACTIVATE:
5000 case BNX2X_Q_CMD_UPDATE:
5001 case BNX2X_Q_CMD_UPDATE_TPA:
5002 case BNX2X_Q_CMD_HALT:
5003 case BNX2X_Q_CMD_CFC_DEL:
5004 case BNX2X_Q_CMD_TERMINATE:
5005 case BNX2X_Q_CMD_EMPTY:
5006 return bnx2x_queue_send_cmd_cmn(bp, params);
5007 default:
5008 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5009 return -EINVAL;
5010 }
5011}
5012
5013/**
5014 * bnx2x_queue_chk_transition - check state machine of a regular Queue
5015 *
5016 * @bp: device handle
5017 * @o:
5018 * @params:
5019 *
5020 * (not Forwarding)
5021 * It both checks if the requested command is legal in a current
5022 * state and, if it's legal, sets a `next_state' in the object
5023 * that will be used in the completion flow to set the `state'
5024 * of the object.
5025 *
5026 * returns 0 if a requested command is a legal transition,
5027 * -EINVAL otherwise.
5028 */
5029static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5030 struct bnx2x_queue_sp_obj *o,
5031 struct bnx2x_queue_state_params *params)
5032{
5033 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5034 enum bnx2x_queue_cmd cmd = params->cmd;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005035 struct bnx2x_queue_update_params *update_params =
5036 &params->params.update;
5037 u8 next_tx_only = o->num_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005038
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005039 /*
5040 * Forget all pending for completion commands if a driver only state
5041 * transition has been requested.
5042 */
5043 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5044 o->pending = 0;
5045 o->next_state = BNX2X_Q_STATE_MAX;
5046 }
5047
5048 /*
5049 * Don't allow a next state transition if we are in the middle of
5050 * the previous one.
5051 */
Yuval Mintz04c46732013-01-23 03:21:46 +00005052 if (o->pending) {
5053 BNX2X_ERR("Blocking transition since pending was %lx\n",
5054 o->pending);
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005055 return -EBUSY;
Yuval Mintz04c46732013-01-23 03:21:46 +00005056 }
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005057
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005058 switch (state) {
5059 case BNX2X_Q_STATE_RESET:
5060 if (cmd == BNX2X_Q_CMD_INIT)
5061 next_state = BNX2X_Q_STATE_INITIALIZED;
5062
5063 break;
5064 case BNX2X_Q_STATE_INITIALIZED:
5065 if (cmd == BNX2X_Q_CMD_SETUP) {
5066 if (test_bit(BNX2X_Q_FLG_ACTIVE,
5067 &params->params.setup.flags))
5068 next_state = BNX2X_Q_STATE_ACTIVE;
5069 else
5070 next_state = BNX2X_Q_STATE_INACTIVE;
5071 }
5072
5073 break;
5074 case BNX2X_Q_STATE_ACTIVE:
5075 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5076 next_state = BNX2X_Q_STATE_INACTIVE;
5077
5078 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5079 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5080 next_state = BNX2X_Q_STATE_ACTIVE;
5081
Ariel Elior6383c0b2011-07-14 08:31:57 +00005082 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5083 next_state = BNX2X_Q_STATE_MULTI_COS;
5084 next_tx_only = 1;
5085 }
5086
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005087 else if (cmd == BNX2X_Q_CMD_HALT)
5088 next_state = BNX2X_Q_STATE_STOPPED;
5089
5090 else if (cmd == BNX2X_Q_CMD_UPDATE) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005091 /* If "active" state change is requested, update the
5092 * state accordingly.
5093 */
5094 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5095 &update_params->update_flags) &&
5096 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5097 &update_params->update_flags))
5098 next_state = BNX2X_Q_STATE_INACTIVE;
5099 else
5100 next_state = BNX2X_Q_STATE_ACTIVE;
5101 }
5102
5103 break;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005104 case BNX2X_Q_STATE_MULTI_COS:
5105 if (cmd == BNX2X_Q_CMD_TERMINATE)
5106 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5107
5108 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5109 next_state = BNX2X_Q_STATE_MULTI_COS;
5110 next_tx_only = o->num_tx_only + 1;
5111 }
5112
5113 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5114 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5115 next_state = BNX2X_Q_STATE_MULTI_COS;
5116
5117 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5118 /* If "active" state change is requested, update the
5119 * state accordingly.
5120 */
5121 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5122 &update_params->update_flags) &&
5123 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5124 &update_params->update_flags))
5125 next_state = BNX2X_Q_STATE_INACTIVE;
5126 else
5127 next_state = BNX2X_Q_STATE_MULTI_COS;
5128 }
5129
5130 break;
5131 case BNX2X_Q_STATE_MCOS_TERMINATED:
5132 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5133 next_tx_only = o->num_tx_only - 1;
5134 if (next_tx_only == 0)
5135 next_state = BNX2X_Q_STATE_ACTIVE;
5136 else
5137 next_state = BNX2X_Q_STATE_MULTI_COS;
5138 }
5139
5140 break;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005141 case BNX2X_Q_STATE_INACTIVE:
5142 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5143 next_state = BNX2X_Q_STATE_ACTIVE;
5144
5145 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5146 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5147 next_state = BNX2X_Q_STATE_INACTIVE;
5148
5149 else if (cmd == BNX2X_Q_CMD_HALT)
5150 next_state = BNX2X_Q_STATE_STOPPED;
5151
5152 else if (cmd == BNX2X_Q_CMD_UPDATE) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005153 /* If "active" state change is requested, update the
5154 * state accordingly.
5155 */
5156 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5157 &update_params->update_flags) &&
5158 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
Ariel Elior6383c0b2011-07-14 08:31:57 +00005159 &update_params->update_flags)){
5160 if (o->num_tx_only == 0)
5161 next_state = BNX2X_Q_STATE_ACTIVE;
5162 else /* tx only queues exist for this queue */
5163 next_state = BNX2X_Q_STATE_MULTI_COS;
5164 } else
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005165 next_state = BNX2X_Q_STATE_INACTIVE;
5166 }
5167
5168 break;
5169 case BNX2X_Q_STATE_STOPPED:
5170 if (cmd == BNX2X_Q_CMD_TERMINATE)
5171 next_state = BNX2X_Q_STATE_TERMINATED;
5172
5173 break;
5174 case BNX2X_Q_STATE_TERMINATED:
5175 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5176 next_state = BNX2X_Q_STATE_RESET;
5177
5178 break;
5179 default:
5180 BNX2X_ERR("Illegal state: %d\n", state);
5181 }
5182
5183 /* Transition is assured */
5184 if (next_state != BNX2X_Q_STATE_MAX) {
5185 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5186 state, cmd, next_state);
5187 o->next_state = next_state;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005188 o->next_tx_only = next_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005189 return 0;
5190 }
5191
5192 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5193
5194 return -EINVAL;
5195}
5196
5197void bnx2x_init_queue_obj(struct bnx2x *bp,
5198 struct bnx2x_queue_sp_obj *obj,
Ariel Elior6383c0b2011-07-14 08:31:57 +00005199 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5200 void *rdata,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005201 dma_addr_t rdata_mapping, unsigned long type)
5202{
5203 memset(obj, 0, sizeof(*obj));
5204
Ariel Elior6383c0b2011-07-14 08:31:57 +00005205 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5206 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5207
5208 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5209 obj->max_cos = cid_cnt;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005210 obj->cl_id = cl_id;
5211 obj->func_id = func_id;
5212 obj->rdata = rdata;
5213 obj->rdata_mapping = rdata_mapping;
5214 obj->type = type;
5215 obj->next_state = BNX2X_Q_STATE_MAX;
5216
5217 if (CHIP_IS_E1x(bp))
5218 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5219 else
5220 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5221
5222 obj->check_transition = bnx2x_queue_chk_transition;
5223
5224 obj->complete_cmd = bnx2x_queue_comp_cmd;
5225 obj->wait_comp = bnx2x_queue_wait_comp;
5226 obj->set_pending = bnx2x_queue_set_pending;
5227}
5228
Ariel Elior67c431a2013-01-01 05:22:36 +00005229/* return a queue object's logical state*/
5230int bnx2x_get_q_logical_state(struct bnx2x *bp,
5231 struct bnx2x_queue_sp_obj *obj)
5232{
5233 switch (obj->state) {
5234 case BNX2X_Q_STATE_ACTIVE:
5235 case BNX2X_Q_STATE_MULTI_COS:
5236 return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5237 case BNX2X_Q_STATE_RESET:
5238 case BNX2X_Q_STATE_INITIALIZED:
5239 case BNX2X_Q_STATE_MCOS_TERMINATED:
5240 case BNX2X_Q_STATE_INACTIVE:
5241 case BNX2X_Q_STATE_STOPPED:
5242 case BNX2X_Q_STATE_TERMINATED:
5243 case BNX2X_Q_STATE_FLRED:
5244 return BNX2X_Q_LOGICAL_STATE_STOPPED;
5245 default:
5246 return -EINVAL;
5247 }
5248}
5249
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005250/********************** Function state object *********************************/
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005251enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5252 struct bnx2x_func_sp_obj *o)
5253{
5254 /* in the middle of transaction - return INVALID state */
5255 if (o->pending)
5256 return BNX2X_F_STATE_MAX;
5257
5258 /*
5259 * unsure the order of reading of o->pending and o->state
5260 * o->pending should be read first
5261 */
5262 rmb();
5263
5264 return o->state;
5265}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005266
5267static int bnx2x_func_wait_comp(struct bnx2x *bp,
5268 struct bnx2x_func_sp_obj *o,
5269 enum bnx2x_func_cmd cmd)
5270{
5271 return bnx2x_state_wait(bp, cmd, &o->pending);
5272}
5273
5274/**
5275 * bnx2x_func_state_change_comp - complete the state machine transition
5276 *
5277 * @bp: device handle
5278 * @o:
5279 * @cmd:
5280 *
5281 * Called on state change transition. Completes the state
5282 * machine transition only - no HW interaction.
5283 */
5284static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5285 struct bnx2x_func_sp_obj *o,
5286 enum bnx2x_func_cmd cmd)
5287{
5288 unsigned long cur_pending = o->pending;
5289
5290 if (!test_and_clear_bit(cmd, &cur_pending)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00005291 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5292 cmd, BP_FUNC(bp), o->state,
5293 cur_pending, o->next_state);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005294 return -EINVAL;
5295 }
5296
Joe Perches94f05b02011-08-14 12:16:20 +00005297 DP(BNX2X_MSG_SP,
5298 "Completing command %d for func %d, setting state to %d\n",
5299 cmd, BP_FUNC(bp), o->next_state);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005300
5301 o->state = o->next_state;
5302 o->next_state = BNX2X_F_STATE_MAX;
5303
5304 /* It's important that o->state and o->next_state are
5305 * updated before o->pending.
5306 */
5307 wmb();
5308
5309 clear_bit(cmd, &o->pending);
5310 smp_mb__after_clear_bit();
5311
5312 return 0;
5313}
5314
5315/**
5316 * bnx2x_func_comp_cmd - complete the state change command
5317 *
5318 * @bp: device handle
5319 * @o:
5320 * @cmd:
5321 *
5322 * Checks that the arrived completion is expected.
5323 */
5324static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5325 struct bnx2x_func_sp_obj *o,
5326 enum bnx2x_func_cmd cmd)
5327{
5328 /* Complete the state machine part first, check if it's a
5329 * legal completion.
5330 */
5331 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005332 return rc;
5333}
5334
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005335/**
5336 * bnx2x_func_chk_transition - perform function state machine transition
5337 *
5338 * @bp: device handle
5339 * @o:
5340 * @params:
5341 *
5342 * It both checks if the requested command is legal in a current
5343 * state and, if it's legal, sets a `next_state' in the object
5344 * that will be used in the completion flow to set the `state'
5345 * of the object.
5346 *
5347 * returns 0 if a requested command is a legal transition,
5348 * -EINVAL otherwise.
5349 */
5350static int bnx2x_func_chk_transition(struct bnx2x *bp,
5351 struct bnx2x_func_sp_obj *o,
5352 struct bnx2x_func_state_params *params)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005353{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005354 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5355 enum bnx2x_func_cmd cmd = params->cmd;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005356
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005357 /*
5358 * Forget all pending for completion commands if a driver only state
5359 * transition has been requested.
5360 */
5361 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5362 o->pending = 0;
5363 o->next_state = BNX2X_F_STATE_MAX;
5364 }
5365
5366 /*
5367 * Don't allow a next state transition if we are in the middle of
5368 * the previous one.
5369 */
5370 if (o->pending)
5371 return -EBUSY;
5372
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005373 switch (state) {
5374 case BNX2X_F_STATE_RESET:
5375 if (cmd == BNX2X_F_CMD_HW_INIT)
5376 next_state = BNX2X_F_STATE_INITIALIZED;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005377
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005378 break;
5379 case BNX2X_F_STATE_INITIALIZED:
5380 if (cmd == BNX2X_F_CMD_START)
5381 next_state = BNX2X_F_STATE_STARTED;
5382
5383 else if (cmd == BNX2X_F_CMD_HW_RESET)
5384 next_state = BNX2X_F_STATE_RESET;
5385
5386 break;
5387 case BNX2X_F_STATE_STARTED:
5388 if (cmd == BNX2X_F_CMD_STOP)
5389 next_state = BNX2X_F_STATE_INITIALIZED;
Barak Witkowskia3348722012-04-23 03:04:46 +00005390 /* afex ramrods can be sent only in started mode, and only
5391 * if not pending for function_stop ramrod completion
5392 * for these events - next state remained STARTED.
5393 */
5394 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5395 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5396 next_state = BNX2X_F_STATE_STARTED;
5397
5398 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5399 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5400 next_state = BNX2X_F_STATE_STARTED;
Merav Sicron55c11942012-11-07 00:45:48 +00005401
5402 /* Switch_update ramrod can be sent in either started or
5403 * tx_stopped state, and it doesn't change the state.
5404 */
5405 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5406 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5407 next_state = BNX2X_F_STATE_STARTED;
5408
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005409 else if (cmd == BNX2X_F_CMD_TX_STOP)
5410 next_state = BNX2X_F_STATE_TX_STOPPED;
5411
5412 break;
5413 case BNX2X_F_STATE_TX_STOPPED:
Merav Sicron55c11942012-11-07 00:45:48 +00005414 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5415 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5416 next_state = BNX2X_F_STATE_TX_STOPPED;
5417
5418 else if (cmd == BNX2X_F_CMD_TX_START)
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005419 next_state = BNX2X_F_STATE_STARTED;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005420
5421 break;
5422 default:
5423 BNX2X_ERR("Unknown state: %d\n", state);
5424 }
5425
5426 /* Transition is assured */
5427 if (next_state != BNX2X_F_STATE_MAX) {
5428 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5429 state, cmd, next_state);
5430 o->next_state = next_state;
5431 return 0;
5432 }
5433
5434 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5435 state, cmd);
5436
5437 return -EINVAL;
5438}
5439
5440/**
5441 * bnx2x_func_init_func - performs HW init at function stage
5442 *
5443 * @bp: device handle
5444 * @drv:
5445 *
5446 * Init HW when the current phase is
5447 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5448 * HW blocks.
5449 */
5450static inline int bnx2x_func_init_func(struct bnx2x *bp,
5451 const struct bnx2x_func_sp_drv_ops *drv)
5452{
5453 return drv->init_hw_func(bp);
5454}
5455
5456/**
5457 * bnx2x_func_init_port - performs HW init at port stage
5458 *
5459 * @bp: device handle
5460 * @drv:
5461 *
5462 * Init HW when the current phase is
5463 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5464 * FUNCTION-only HW blocks.
5465 *
5466 */
5467static inline int bnx2x_func_init_port(struct bnx2x *bp,
5468 const struct bnx2x_func_sp_drv_ops *drv)
5469{
5470 int rc = drv->init_hw_port(bp);
5471 if (rc)
5472 return rc;
5473
5474 return bnx2x_func_init_func(bp, drv);
5475}
5476
5477/**
5478 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5479 *
5480 * @bp: device handle
5481 * @drv:
5482 *
5483 * Init HW when the current phase is
5484 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5485 * PORT-only and FUNCTION-only HW blocks.
5486 */
5487static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5488 const struct bnx2x_func_sp_drv_ops *drv)
5489{
5490 int rc = drv->init_hw_cmn_chip(bp);
5491 if (rc)
5492 return rc;
5493
5494 return bnx2x_func_init_port(bp, drv);
5495}
5496
5497/**
5498 * bnx2x_func_init_cmn - performs HW init at common stage
5499 *
5500 * @bp: device handle
5501 * @drv:
5502 *
5503 * Init HW when the current phase is
5504 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5505 * PORT-only and FUNCTION-only HW blocks.
5506 */
5507static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5508 const struct bnx2x_func_sp_drv_ops *drv)
5509{
5510 int rc = drv->init_hw_cmn(bp);
5511 if (rc)
5512 return rc;
5513
5514 return bnx2x_func_init_port(bp, drv);
5515}
5516
5517static int bnx2x_func_hw_init(struct bnx2x *bp,
5518 struct bnx2x_func_state_params *params)
5519{
5520 u32 load_code = params->params.hw_init.load_phase;
5521 struct bnx2x_func_sp_obj *o = params->f_obj;
5522 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5523 int rc = 0;
5524
5525 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5526 BP_ABS_FUNC(bp), load_code);
5527
5528 /* Prepare buffers for unzipping the FW */
5529 rc = drv->gunzip_init(bp);
5530 if (rc)
5531 return rc;
5532
5533 /* Prepare FW */
5534 rc = drv->init_fw(bp);
5535 if (rc) {
5536 BNX2X_ERR("Error loading firmware\n");
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005537 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005538 }
5539
5540 /* Handle the beginning of COMMON_XXX pases separatelly... */
5541 switch (load_code) {
5542 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5543 rc = bnx2x_func_init_cmn_chip(bp, drv);
5544 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005545 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005546
5547 break;
5548 case FW_MSG_CODE_DRV_LOAD_COMMON:
5549 rc = bnx2x_func_init_cmn(bp, drv);
5550 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005551 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005552
5553 break;
5554 case FW_MSG_CODE_DRV_LOAD_PORT:
5555 rc = bnx2x_func_init_port(bp, drv);
5556 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005557 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005558
5559 break;
5560 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5561 rc = bnx2x_func_init_func(bp, drv);
5562 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005563 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005564
5565 break;
5566 default:
5567 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5568 rc = -EINVAL;
5569 }
5570
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005571init_err:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005572 drv->gunzip_end(bp);
5573
5574 /* In case of success, complete the comand immediatelly: no ramrods
5575 * have been sent.
5576 */
5577 if (!rc)
5578 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5579
5580 return rc;
5581}
5582
5583/**
5584 * bnx2x_func_reset_func - reset HW at function stage
5585 *
5586 * @bp: device handle
5587 * @drv:
5588 *
5589 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5590 * FUNCTION-only HW blocks.
5591 */
5592static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5593 const struct bnx2x_func_sp_drv_ops *drv)
5594{
5595 drv->reset_hw_func(bp);
5596}
5597
5598/**
5599 * bnx2x_func_reset_port - reser HW at port stage
5600 *
5601 * @bp: device handle
5602 * @drv:
5603 *
5604 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5605 * FUNCTION-only and PORT-only HW blocks.
5606 *
5607 * !!!IMPORTANT!!!
5608 *
5609 * It's important to call reset_port before reset_func() as the last thing
5610 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5611 * makes impossible any DMAE transactions.
5612 */
5613static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5614 const struct bnx2x_func_sp_drv_ops *drv)
5615{
5616 drv->reset_hw_port(bp);
5617 bnx2x_func_reset_func(bp, drv);
5618}
5619
5620/**
5621 * bnx2x_func_reset_cmn - reser HW at common stage
5622 *
5623 * @bp: device handle
5624 * @drv:
5625 *
5626 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5627 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5628 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5629 */
5630static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5631 const struct bnx2x_func_sp_drv_ops *drv)
5632{
5633 bnx2x_func_reset_port(bp, drv);
5634 drv->reset_hw_cmn(bp);
5635}
5636
5637
5638static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5639 struct bnx2x_func_state_params *params)
5640{
5641 u32 reset_phase = params->params.hw_reset.reset_phase;
5642 struct bnx2x_func_sp_obj *o = params->f_obj;
5643 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5644
5645 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5646 reset_phase);
5647
5648 switch (reset_phase) {
5649 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5650 bnx2x_func_reset_cmn(bp, drv);
5651 break;
5652 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5653 bnx2x_func_reset_port(bp, drv);
5654 break;
5655 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5656 bnx2x_func_reset_func(bp, drv);
5657 break;
5658 default:
5659 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5660 reset_phase);
5661 break;
5662 }
5663
5664 /* Complete the comand immediatelly: no ramrods have been sent. */
5665 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5666
5667 return 0;
5668}
5669
5670static inline int bnx2x_func_send_start(struct bnx2x *bp,
5671 struct bnx2x_func_state_params *params)
5672{
5673 struct bnx2x_func_sp_obj *o = params->f_obj;
5674 struct function_start_data *rdata =
5675 (struct function_start_data *)o->rdata;
5676 dma_addr_t data_mapping = o->rdata_mapping;
5677 struct bnx2x_func_start_params *start_params = &params->params.start;
5678
5679 memset(rdata, 0, sizeof(*rdata));
5680
5681 /* Fill the ramrod data with provided parameters */
Dmitry Kravkov1bc277f2013-03-18 06:51:04 +00005682 rdata->function_mode = (u8)start_params->mf_mode;
5683 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5684 rdata->path_id = BP_PATH(bp);
5685 rdata->network_cos_mode = start_params->network_cos_mode;
5686 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5687 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005688
Dmitry Kravkov1bc277f2013-03-18 06:51:04 +00005689 /* No need for an explicit memory barrier here as long we would
5690 * need to ensure the ordering of writing to the SPQ element
5691 * and updating of the SPQ producer which involves a memory
5692 * read and we will have to put a full memory barrier there
5693 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00005694 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005695
5696 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5697 U64_HI(data_mapping),
5698 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5699}
5700
Merav Sicron55c11942012-11-07 00:45:48 +00005701static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5702 struct bnx2x_func_state_params *params)
5703{
5704 struct bnx2x_func_sp_obj *o = params->f_obj;
5705 struct function_update_data *rdata =
5706 (struct function_update_data *)o->rdata;
5707 dma_addr_t data_mapping = o->rdata_mapping;
5708 struct bnx2x_func_switch_update_params *switch_update_params =
5709 &params->params.switch_update;
5710
5711 memset(rdata, 0, sizeof(*rdata));
5712
5713 /* Fill the ramrod data with provided parameters */
5714 rdata->tx_switch_suspend_change_flg = 1;
5715 rdata->tx_switch_suspend = switch_update_params->suspend;
5716 rdata->echo = SWITCH_UPDATE;
5717
5718 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5719 U64_HI(data_mapping),
5720 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5721}
5722
Barak Witkowskia3348722012-04-23 03:04:46 +00005723static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5724 struct bnx2x_func_state_params *params)
5725{
5726 struct bnx2x_func_sp_obj *o = params->f_obj;
5727 struct function_update_data *rdata =
5728 (struct function_update_data *)o->afex_rdata;
5729 dma_addr_t data_mapping = o->afex_rdata_mapping;
5730 struct bnx2x_func_afex_update_params *afex_update_params =
5731 &params->params.afex_update;
5732
5733 memset(rdata, 0, sizeof(*rdata));
5734
5735 /* Fill the ramrod data with provided parameters */
5736 rdata->vif_id_change_flg = 1;
5737 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5738 rdata->afex_default_vlan_change_flg = 1;
5739 rdata->afex_default_vlan =
5740 cpu_to_le16(afex_update_params->afex_default_vlan);
5741 rdata->allowed_priorities_change_flg = 1;
5742 rdata->allowed_priorities = afex_update_params->allowed_priorities;
Merav Sicron55c11942012-11-07 00:45:48 +00005743 rdata->echo = AFEX_UPDATE;
Barak Witkowskia3348722012-04-23 03:04:46 +00005744
5745 /* No need for an explicit memory barrier here as long we would
5746 * need to ensure the ordering of writing to the SPQ element
5747 * and updating of the SPQ producer which involves a memory
5748 * read and we will have to put a full memory barrier there
5749 * (inside bnx2x_sp_post()).
5750 */
5751 DP(BNX2X_MSG_SP,
5752 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5753 rdata->vif_id,
5754 rdata->afex_default_vlan, rdata->allowed_priorities);
5755
5756 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5757 U64_HI(data_mapping),
5758 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5759}
5760
5761static
5762inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5763 struct bnx2x_func_state_params *params)
5764{
5765 struct bnx2x_func_sp_obj *o = params->f_obj;
5766 struct afex_vif_list_ramrod_data *rdata =
5767 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
Yuval Mintz86564c32013-01-23 03:21:50 +00005768 struct bnx2x_func_afex_viflists_params *afex_vif_params =
Barak Witkowskia3348722012-04-23 03:04:46 +00005769 &params->params.afex_viflists;
5770 u64 *p_rdata = (u64 *)rdata;
5771
5772 memset(rdata, 0, sizeof(*rdata));
5773
5774 /* Fill the ramrod data with provided parameters */
Yuval Mintz86564c32013-01-23 03:21:50 +00005775 rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5776 rdata->func_bit_map = afex_vif_params->func_bit_map;
5777 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5778 rdata->func_to_clear = afex_vif_params->func_to_clear;
Barak Witkowskia3348722012-04-23 03:04:46 +00005779
5780 /* send in echo type of sub command */
Yuval Mintz86564c32013-01-23 03:21:50 +00005781 rdata->echo = afex_vif_params->afex_vif_list_command;
Barak Witkowskia3348722012-04-23 03:04:46 +00005782
5783 /* No need for an explicit memory barrier here as long we would
5784 * need to ensure the ordering of writing to the SPQ element
5785 * and updating of the SPQ producer which involves a memory
5786 * read and we will have to put a full memory barrier there
5787 * (inside bnx2x_sp_post()).
5788 */
5789
5790 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5791 rdata->afex_vif_list_command, rdata->vif_list_index,
5792 rdata->func_bit_map, rdata->func_to_clear);
5793
5794 /* this ramrod sends data directly and not through DMA mapping */
5795 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5796 U64_HI(*p_rdata), U64_LO(*p_rdata),
5797 NONE_CONNECTION_TYPE);
5798}
5799
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005800static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5801 struct bnx2x_func_state_params *params)
5802{
5803 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5804 NONE_CONNECTION_TYPE);
5805}
5806
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005807static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5808 struct bnx2x_func_state_params *params)
5809{
5810 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5811 NONE_CONNECTION_TYPE);
5812}
5813static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5814 struct bnx2x_func_state_params *params)
5815{
5816 struct bnx2x_func_sp_obj *o = params->f_obj;
5817 struct flow_control_configuration *rdata =
5818 (struct flow_control_configuration *)o->rdata;
5819 dma_addr_t data_mapping = o->rdata_mapping;
5820 struct bnx2x_func_tx_start_params *tx_start_params =
5821 &params->params.tx_start;
5822 int i;
5823
5824 memset(rdata, 0, sizeof(*rdata));
5825
5826 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5827 rdata->dcb_version = tx_start_params->dcb_version;
5828 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5829
5830 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5831 rdata->traffic_type_to_priority_cos[i] =
5832 tx_start_params->traffic_type_to_priority_cos[i];
5833
5834 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5835 U64_HI(data_mapping),
5836 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5837}
5838
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005839static int bnx2x_func_send_cmd(struct bnx2x *bp,
5840 struct bnx2x_func_state_params *params)
5841{
5842 switch (params->cmd) {
5843 case BNX2X_F_CMD_HW_INIT:
5844 return bnx2x_func_hw_init(bp, params);
5845 case BNX2X_F_CMD_START:
5846 return bnx2x_func_send_start(bp, params);
5847 case BNX2X_F_CMD_STOP:
5848 return bnx2x_func_send_stop(bp, params);
5849 case BNX2X_F_CMD_HW_RESET:
5850 return bnx2x_func_hw_reset(bp, params);
Barak Witkowskia3348722012-04-23 03:04:46 +00005851 case BNX2X_F_CMD_AFEX_UPDATE:
5852 return bnx2x_func_send_afex_update(bp, params);
5853 case BNX2X_F_CMD_AFEX_VIFLISTS:
5854 return bnx2x_func_send_afex_viflists(bp, params);
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005855 case BNX2X_F_CMD_TX_STOP:
5856 return bnx2x_func_send_tx_stop(bp, params);
5857 case BNX2X_F_CMD_TX_START:
5858 return bnx2x_func_send_tx_start(bp, params);
Merav Sicron55c11942012-11-07 00:45:48 +00005859 case BNX2X_F_CMD_SWITCH_UPDATE:
5860 return bnx2x_func_send_switch_update(bp, params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005861 default:
5862 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5863 return -EINVAL;
5864 }
5865}
5866
5867void bnx2x_init_func_obj(struct bnx2x *bp,
5868 struct bnx2x_func_sp_obj *obj,
5869 void *rdata, dma_addr_t rdata_mapping,
Barak Witkowskia3348722012-04-23 03:04:46 +00005870 void *afex_rdata, dma_addr_t afex_rdata_mapping,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005871 struct bnx2x_func_sp_drv_ops *drv_iface)
5872{
5873 memset(obj, 0, sizeof(*obj));
5874
5875 mutex_init(&obj->one_pending_mutex);
5876
5877 obj->rdata = rdata;
5878 obj->rdata_mapping = rdata_mapping;
Barak Witkowskia3348722012-04-23 03:04:46 +00005879 obj->afex_rdata = afex_rdata;
5880 obj->afex_rdata_mapping = afex_rdata_mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005881 obj->send_cmd = bnx2x_func_send_cmd;
5882 obj->check_transition = bnx2x_func_chk_transition;
5883 obj->complete_cmd = bnx2x_func_comp_cmd;
5884 obj->wait_comp = bnx2x_func_wait_comp;
5885
5886 obj->drv = drv_iface;
5887}
5888
5889/**
5890 * bnx2x_func_state_change - perform Function state change transition
5891 *
5892 * @bp: device handle
5893 * @params: parameters to perform the transaction
5894 *
5895 * returns 0 in case of successfully completed transition,
5896 * negative error code in case of failure, positive
5897 * (EBUSY) value if there is a completion to that is
5898 * still pending (possible only if RAMROD_COMP_WAIT is
5899 * not set in params->ramrod_flags for asynchronous
5900 * commands).
5901 */
5902int bnx2x_func_state_change(struct bnx2x *bp,
5903 struct bnx2x_func_state_params *params)
5904{
5905 struct bnx2x_func_sp_obj *o = params->f_obj;
Merav Sicron55c11942012-11-07 00:45:48 +00005906 int rc, cnt = 300;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005907 enum bnx2x_func_cmd cmd = params->cmd;
5908 unsigned long *pending = &o->pending;
5909
5910 mutex_lock(&o->one_pending_mutex);
5911
5912 /* Check that the requested transition is legal */
Merav Sicron55c11942012-11-07 00:45:48 +00005913 rc = o->check_transition(bp, o, params);
5914 if ((rc == -EBUSY) &&
5915 (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
5916 while ((rc == -EBUSY) && (--cnt > 0)) {
5917 mutex_unlock(&o->one_pending_mutex);
5918 msleep(10);
5919 mutex_lock(&o->one_pending_mutex);
5920 rc = o->check_transition(bp, o, params);
5921 }
5922 if (rc == -EBUSY) {
5923 mutex_unlock(&o->one_pending_mutex);
5924 BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5925 return rc;
5926 }
5927 } else if (rc) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005928 mutex_unlock(&o->one_pending_mutex);
Merav Sicron55c11942012-11-07 00:45:48 +00005929 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005930 }
5931
5932 /* Set "pending" bit */
5933 set_bit(cmd, pending);
5934
5935 /* Don't send a command if only driver cleanup was requested */
5936 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5937 bnx2x_func_state_change_comp(bp, o, cmd);
5938 mutex_unlock(&o->one_pending_mutex);
5939 } else {
5940 /* Send a ramrod */
5941 rc = o->send_cmd(bp, params);
5942
5943 mutex_unlock(&o->one_pending_mutex);
5944
5945 if (rc) {
5946 o->next_state = BNX2X_F_STATE_MAX;
5947 clear_bit(cmd, pending);
5948 smp_mb__after_clear_bit();
5949 return rc;
5950 }
5951
5952 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5953 rc = o->wait_comp(bp, o, cmd);
5954 if (rc)
5955 return rc;
5956
5957 return 0;
5958 }
5959 }
5960
5961 return !!test_bit(cmd, pending);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005962}