blob: 18438a504d573082239fe53532035d5516ff2881 [file] [log] [blame]
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001/* bnx2x_sp.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2011-2013 Broadcom Corporation
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
17 *
18 */
Joe Perchesf1deab52011-08-14 12:16:21 +000019
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000022#include <linux/module.h>
23#include <linux/crc32.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/crc32c.h>
27#include "bnx2x.h"
28#include "bnx2x_cmn.h"
29#include "bnx2x_sp.h"
30
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030031#define BNX2X_MAX_EMUL_MULTI 16
32
33/**** Exe Queue interfaces ****/
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000034
35/**
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030036 * bnx2x_exe_queue_init - init the Exe Queue object
37 *
Yuval Mintz16a5fd92013-06-02 00:06:18 +000038 * @o: pointer to the object
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030039 * @exe_len: length
Yuval Mintz16a5fd92013-06-02 00:06:18 +000040 * @owner: pointer to the owner
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030041 * @validate: validate function pointer
42 * @optimize: optimize function pointer
43 * @exec: execute function pointer
44 * @get: get function pointer
45 */
46static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
47 struct bnx2x_exe_queue_obj *o,
48 int exe_len,
49 union bnx2x_qable_obj *owner,
50 exe_q_validate validate,
Yuval Mintz460a25c2012-01-23 07:31:51 +000051 exe_q_remove remove,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030052 exe_q_optimize optimize,
53 exe_q_execute exec,
54 exe_q_get get)
55{
56 memset(o, 0, sizeof(*o));
57
58 INIT_LIST_HEAD(&o->exe_queue);
59 INIT_LIST_HEAD(&o->pending_comp);
60
61 spin_lock_init(&o->lock);
62
63 o->exe_chunk_len = exe_len;
64 o->owner = owner;
65
66 /* Owner specific callbacks */
67 o->validate = validate;
Yuval Mintz460a25c2012-01-23 07:31:51 +000068 o->remove = remove;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030069 o->optimize = optimize;
70 o->execute = exec;
71 o->get = get;
72
Merav Sicron51c1a582012-03-18 10:33:38 +000073 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
74 exe_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030075}
76
77static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
78 struct bnx2x_exeq_elem *elem)
79{
80 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
81 kfree(elem);
82}
83
84static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
85{
86 struct bnx2x_exeq_elem *elem;
87 int cnt = 0;
88
89 spin_lock_bh(&o->lock);
90
91 list_for_each_entry(elem, &o->exe_queue, link)
92 cnt++;
93
94 spin_unlock_bh(&o->lock);
95
96 return cnt;
97}
98
99/**
100 * bnx2x_exe_queue_add - add a new element to the execution queue
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000101 *
102 * @bp: driver handle
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300103 * @o: queue
104 * @cmd: new command to add
105 * @restore: true - do not optimize the command
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000106 *
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300107 * If the element is optimized or is illegal, frees it.
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000108 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300109static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
110 struct bnx2x_exe_queue_obj *o,
111 struct bnx2x_exeq_elem *elem,
112 bool restore)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000113{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300114 int rc;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000115
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300116 spin_lock_bh(&o->lock);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000117
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300118 if (!restore) {
119 /* Try to cancel this element queue */
120 rc = o->optimize(bp, o->owner, elem);
121 if (rc)
122 goto free_and_exit;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000123
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300124 /* Check if this request is ok */
125 rc = o->validate(bp, o->owner, elem);
126 if (rc) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +0000127 DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300128 goto free_and_exit;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000129 }
130 }
131
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300132 /* If so, add it to the execution queue */
133 list_add_tail(&elem->link, &o->exe_queue);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000134
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300135 spin_unlock_bh(&o->lock);
136
137 return 0;
138
139free_and_exit:
140 bnx2x_exe_queue_free_elem(bp, elem);
141
142 spin_unlock_bh(&o->lock);
143
144 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300145}
146
147static inline void __bnx2x_exe_queue_reset_pending(
148 struct bnx2x *bp,
149 struct bnx2x_exe_queue_obj *o)
150{
151 struct bnx2x_exeq_elem *elem;
152
153 while (!list_empty(&o->pending_comp)) {
154 elem = list_first_entry(&o->pending_comp,
155 struct bnx2x_exeq_elem, link);
156
157 list_del(&elem->link);
158 bnx2x_exe_queue_free_elem(bp, elem);
159 }
160}
161
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300162/**
163 * bnx2x_exe_queue_step - execute one execution chunk atomically
164 *
165 * @bp: driver handle
166 * @o: queue
167 * @ramrod_flags: flags
168 *
Yuval Mintz8b09be52013-08-01 17:30:59 +0300169 * (Should be called while holding the exe_queue->lock).
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300170 */
171static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
172 struct bnx2x_exe_queue_obj *o,
173 unsigned long *ramrod_flags)
174{
175 struct bnx2x_exeq_elem *elem, spacer;
176 int cur_len = 0, rc;
177
178 memset(&spacer, 0, sizeof(spacer));
179
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000180 /* Next step should not be performed until the current is finished,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300181 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
182 * properly clear object internals without sending any command to the FW
183 * which also implies there won't be any completion to clear the
184 * 'pending' list.
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000185 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300186 if (!list_empty(&o->pending_comp)) {
187 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000188 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300189 __bnx2x_exe_queue_reset_pending(bp, o);
190 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300191 return 1;
192 }
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000193 }
194
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000195 /* Run through the pending commands list and create a next
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300196 * execution chunk.
197 */
198 while (!list_empty(&o->exe_queue)) {
199 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
200 link);
201 WARN_ON(!elem->cmd_len);
202
203 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
204 cur_len += elem->cmd_len;
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000205 /* Prevent from both lists being empty when moving an
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300206 * element. This will allow the call of
207 * bnx2x_exe_queue_empty() without locking.
208 */
209 list_add_tail(&spacer.link, &o->pending_comp);
210 mb();
Wei Yongjun7933aa52012-09-04 21:06:55 +0000211 list_move_tail(&elem->link, &o->pending_comp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300212 list_del(&spacer.link);
213 } else
214 break;
215 }
216
217 /* Sanity check */
Yuval Mintz8b09be52013-08-01 17:30:59 +0300218 if (!cur_len)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300219 return 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300220
221 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
222 if (rc < 0)
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000223 /* In case of an error return the commands back to the queue
224 * and reset the pending_comp.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300225 */
226 list_splice_init(&o->pending_comp, &o->exe_queue);
227 else if (!rc)
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000228 /* If zero is returned, means there are no outstanding pending
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300229 * completions and we may dismiss the pending list.
230 */
231 __bnx2x_exe_queue_reset_pending(bp, o);
232
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300233 return rc;
234}
235
236static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
237{
238 bool empty = list_empty(&o->exe_queue);
239
240 /* Don't reorder!!! */
241 mb();
242
243 return empty && list_empty(&o->pending_comp);
244}
245
246static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
247 struct bnx2x *bp)
248{
249 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
250 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
251}
252
253/************************ raw_obj functions ***********************************/
254static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
255{
256 return !!test_bit(o->state, o->pstate);
257}
258
259static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
260{
261 smp_mb__before_clear_bit();
262 clear_bit(o->state, o->pstate);
263 smp_mb__after_clear_bit();
264}
265
266static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
267{
268 smp_mb__before_clear_bit();
269 set_bit(o->state, o->pstate);
270 smp_mb__after_clear_bit();
271}
272
273/**
274 * bnx2x_state_wait - wait until the given bit(state) is cleared
275 *
276 * @bp: device handle
277 * @state: state which is to be cleared
278 * @state_p: state buffer
279 *
280 */
281static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
282 unsigned long *pstate)
283{
284 /* can take a while if any port is running */
285 int cnt = 5000;
286
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300287 if (CHIP_REV_IS_EMUL(bp))
288 cnt *= 20;
289
290 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
291
292 might_sleep();
293 while (cnt--) {
294 if (!test_bit(state, pstate)) {
295#ifdef BNX2X_STOP_ON_ERROR
296 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
297#endif
298 return 0;
299 }
300
Yuval Mintz0926d492013-01-23 03:21:45 +0000301 usleep_range(1000, 2000);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300302
303 if (bp->panic)
304 return -EIO;
305 }
306
307 /* timeout! */
308 BNX2X_ERR("timeout waiting for state %d\n", state);
309#ifdef BNX2X_STOP_ON_ERROR
310 bnx2x_panic();
311#endif
312
313 return -EBUSY;
314}
315
316static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
317{
318 return bnx2x_state_wait(bp, raw->state, raw->pstate);
319}
320
321/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
322/* credit handling callbacks */
323static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
324{
325 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
326
327 WARN_ON(!mp);
328
329 return mp->get_entry(mp, offset);
330}
331
332static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
333{
334 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
335
336 WARN_ON(!mp);
337
338 return mp->get(mp, 1);
339}
340
341static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
342{
343 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
344
345 WARN_ON(!vp);
346
347 return vp->get_entry(vp, offset);
348}
349
350static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
351{
352 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
353
354 WARN_ON(!vp);
355
356 return vp->get(vp, 1);
357}
358
359static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
360{
361 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
362 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
363
364 if (!mp->get(mp, 1))
365 return false;
366
367 if (!vp->get(vp, 1)) {
368 mp->put(mp, 1);
369 return false;
370 }
371
372 return true;
373}
374
375static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
376{
377 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
378
379 return mp->put_entry(mp, offset);
380}
381
382static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
383{
384 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
385
386 return mp->put(mp, 1);
387}
388
389static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
390{
391 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
392
393 return vp->put_entry(vp, offset);
394}
395
396static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
397{
398 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
399
400 return vp->put(vp, 1);
401}
402
403static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
404{
405 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
406 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
407
408 if (!mp->put(mp, 1))
409 return false;
410
411 if (!vp->put(vp, 1)) {
412 mp->get(mp, 1);
413 return false;
414 }
415
416 return true;
417}
418
Yuval Mintz8b09be52013-08-01 17:30:59 +0300419/**
420 * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
421 *
422 * @bp: device handle
423 * @o: vlan_mac object
424 *
425 * @details: Non-blocking implementation; should be called under execution
426 * queue lock.
427 */
428static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
429 struct bnx2x_vlan_mac_obj *o)
430{
431 if (o->head_reader) {
432 DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
433 return -EBUSY;
434 }
435
436 DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
437 return 0;
438}
439
440/**
441 * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
442 *
443 * @bp: device handle
444 * @o: vlan_mac object
445 *
446 * @details Should be called under execution queue lock; notice it might release
447 * and reclaim it during its run.
448 */
449static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
450 struct bnx2x_vlan_mac_obj *o)
451{
452 int rc;
453 unsigned long ramrod_flags = o->saved_ramrod_flags;
454
455 DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
456 ramrod_flags);
457 o->head_exe_request = false;
458 o->saved_ramrod_flags = 0;
459 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
460 if (rc != 0) {
461 BNX2X_ERR("execution of pending commands failed with rc %d\n",
462 rc);
463#ifdef BNX2X_STOP_ON_ERROR
464 bnx2x_panic();
465#endif
466 }
467}
468
469/**
470 * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
471 *
472 * @bp: device handle
473 * @o: vlan_mac object
474 * @ramrod_flags: ramrod flags of missed execution
475 *
476 * @details Should be called under execution queue lock.
477 */
478static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
479 struct bnx2x_vlan_mac_obj *o,
480 unsigned long ramrod_flags)
481{
482 o->head_exe_request = true;
483 o->saved_ramrod_flags = ramrod_flags;
484 DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
485 ramrod_flags);
486}
487
488/**
489 * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
490 *
491 * @bp: device handle
492 * @o: vlan_mac object
493 *
494 * @details Should be called under execution queue lock. Notice if a pending
495 * execution exists, it would perform it - possibly releasing and
496 * reclaiming the execution queue lock.
497 */
498static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
499 struct bnx2x_vlan_mac_obj *o)
500{
501 /* It's possible a new pending execution was added since this writer
502 * executed. If so, execute again. [Ad infinitum]
503 */
504 while (o->head_exe_request) {
505 DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
506 __bnx2x_vlan_mac_h_exec_pending(bp, o);
507 }
508}
509
510/**
511 * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
512 *
513 * @bp: device handle
514 * @o: vlan_mac object
515 *
516 * @details Notice if a pending execution exists, it would perform it -
517 * possibly releasing and reclaiming the execution queue lock.
518 */
519void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
520 struct bnx2x_vlan_mac_obj *o)
521{
522 spin_lock_bh(&o->exe_queue.lock);
523 __bnx2x_vlan_mac_h_write_unlock(bp, o);
524 spin_unlock_bh(&o->exe_queue.lock);
525}
526
527/**
528 * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
529 *
530 * @bp: device handle
531 * @o: vlan_mac object
532 *
533 * @details Should be called under the execution queue lock. May sleep. May
534 * release and reclaim execution queue lock during its run.
535 */
536static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
537 struct bnx2x_vlan_mac_obj *o)
538{
539 /* If we got here, we're holding lock --> no WRITER exists */
540 o->head_reader++;
541 DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
542 o->head_reader);
543
544 return 0;
545}
546
547/**
548 * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
549 *
550 * @bp: device handle
551 * @o: vlan_mac object
552 *
553 * @details May sleep. Claims and releases execution queue lock during its run.
554 */
555int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
556 struct bnx2x_vlan_mac_obj *o)
557{
558 int rc;
559
560 spin_lock_bh(&o->exe_queue.lock);
561 rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
562 spin_unlock_bh(&o->exe_queue.lock);
563
564 return rc;
565}
566
567/**
568 * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
569 *
570 * @bp: device handle
571 * @o: vlan_mac object
572 *
573 * @details Should be called under execution queue lock. Notice if a pending
574 * execution exists, it would be performed if this was the last
575 * reader. possibly releasing and reclaiming the execution queue lock.
576 */
577static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
578 struct bnx2x_vlan_mac_obj *o)
579{
580 if (!o->head_reader) {
581 BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
582#ifdef BNX2X_STOP_ON_ERROR
583 bnx2x_panic();
584#endif
585 } else {
586 o->head_reader--;
587 DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
588 o->head_reader);
589 }
590
591 /* It's possible a new pending execution was added, and that this reader
592 * was last - if so we need to execute the command.
593 */
594 if (!o->head_reader && o->head_exe_request) {
595 DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
596
597 /* Writer release will do the trick */
598 __bnx2x_vlan_mac_h_write_unlock(bp, o);
599 }
600}
601
602/**
603 * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
604 *
605 * @bp: device handle
606 * @o: vlan_mac object
607 *
608 * @details Notice if a pending execution exists, it would be performed if this
609 * was the last reader. Claims and releases the execution queue lock
610 * during its run.
611 */
612void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
613 struct bnx2x_vlan_mac_obj *o)
614{
615 spin_lock_bh(&o->exe_queue.lock);
616 __bnx2x_vlan_mac_h_read_unlock(bp, o);
617 spin_unlock_bh(&o->exe_queue.lock);
618}
619
Ariel Eliored5162a2011-12-05 21:52:24 +0000620static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000621 int n, u8 *base, u8 stride, u8 size)
Ariel Eliored5162a2011-12-05 21:52:24 +0000622{
623 struct bnx2x_vlan_mac_registry_elem *pos;
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000624 u8 *next = base;
Ariel Eliored5162a2011-12-05 21:52:24 +0000625 int counter = 0;
Yuval Mintz8b09be52013-08-01 17:30:59 +0300626 int read_lock;
627
628 DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
629 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
630 if (read_lock != 0)
631 BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
Ariel Eliored5162a2011-12-05 21:52:24 +0000632
633 /* traverse list */
634 list_for_each_entry(pos, &o->head, link) {
635 if (counter < n) {
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000636 memcpy(next, &pos->u, size);
Ariel Eliored5162a2011-12-05 21:52:24 +0000637 counter++;
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000638 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
639 counter, next);
640 next += stride + size;
Ariel Eliored5162a2011-12-05 21:52:24 +0000641 }
642 }
Yuval Mintz8b09be52013-08-01 17:30:59 +0300643
644 if (read_lock == 0) {
645 DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
646 bnx2x_vlan_mac_h_read_unlock(bp, o);
647 }
648
Ariel Eliored5162a2011-12-05 21:52:24 +0000649 return counter * ETH_ALEN;
650}
651
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300652/* check_add() callbacks */
Merav Sicron51c1a582012-03-18 10:33:38 +0000653static int bnx2x_check_mac_add(struct bnx2x *bp,
654 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300655 union bnx2x_classification_ramrod_data *data)
656{
657 struct bnx2x_vlan_mac_registry_elem *pos;
658
Merav Sicron51c1a582012-03-18 10:33:38 +0000659 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
660
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300661 if (!is_valid_ether_addr(data->mac.mac))
662 return -EINVAL;
663
664 /* Check if a requested MAC already exists */
665 list_for_each_entry(pos, &o->head, link)
Dmitry Kravkov91226792013-03-11 05:17:52 +0000666 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
667 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300668 return -EEXIST;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000669
670 return 0;
671}
672
Merav Sicron51c1a582012-03-18 10:33:38 +0000673static int bnx2x_check_vlan_add(struct bnx2x *bp,
674 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300675 union bnx2x_classification_ramrod_data *data)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000676{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300677 struct bnx2x_vlan_mac_registry_elem *pos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000678
Merav Sicron51c1a582012-03-18 10:33:38 +0000679 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
680
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300681 list_for_each_entry(pos, &o->head, link)
682 if (data->vlan.vlan == pos->u.vlan.vlan)
683 return -EEXIST;
684
685 return 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000686}
687
Merav Sicron51c1a582012-03-18 10:33:38 +0000688static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
689 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300690 union bnx2x_classification_ramrod_data *data)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000691{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300692 struct bnx2x_vlan_mac_registry_elem *pos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000693
Merav Sicron51c1a582012-03-18 10:33:38 +0000694 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
695 data->vlan_mac.mac, data->vlan_mac.vlan);
696
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300697 list_for_each_entry(pos, &o->head, link)
698 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
699 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
Dmitry Kravkov91226792013-03-11 05:17:52 +0000700 ETH_ALEN)) &&
701 (data->vlan_mac.is_inner_mac ==
702 pos->u.vlan_mac.is_inner_mac))
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300703 return -EEXIST;
704
705 return 0;
706}
707
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300708/* check_del() callbacks */
709static struct bnx2x_vlan_mac_registry_elem *
Merav Sicron51c1a582012-03-18 10:33:38 +0000710 bnx2x_check_mac_del(struct bnx2x *bp,
711 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300712 union bnx2x_classification_ramrod_data *data)
713{
714 struct bnx2x_vlan_mac_registry_elem *pos;
715
Merav Sicron51c1a582012-03-18 10:33:38 +0000716 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
717
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300718 list_for_each_entry(pos, &o->head, link)
Dmitry Kravkov91226792013-03-11 05:17:52 +0000719 if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
720 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300721 return pos;
722
723 return NULL;
724}
725
726static struct bnx2x_vlan_mac_registry_elem *
Merav Sicron51c1a582012-03-18 10:33:38 +0000727 bnx2x_check_vlan_del(struct bnx2x *bp,
728 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300729 union bnx2x_classification_ramrod_data *data)
730{
731 struct bnx2x_vlan_mac_registry_elem *pos;
732
Merav Sicron51c1a582012-03-18 10:33:38 +0000733 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
734
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300735 list_for_each_entry(pos, &o->head, link)
736 if (data->vlan.vlan == pos->u.vlan.vlan)
737 return pos;
738
739 return NULL;
740}
741
742static struct bnx2x_vlan_mac_registry_elem *
Merav Sicron51c1a582012-03-18 10:33:38 +0000743 bnx2x_check_vlan_mac_del(struct bnx2x *bp,
744 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300745 union bnx2x_classification_ramrod_data *data)
746{
747 struct bnx2x_vlan_mac_registry_elem *pos;
748
Merav Sicron51c1a582012-03-18 10:33:38 +0000749 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
750 data->vlan_mac.mac, data->vlan_mac.vlan);
751
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300752 list_for_each_entry(pos, &o->head, link)
753 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
754 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
Dmitry Kravkov91226792013-03-11 05:17:52 +0000755 ETH_ALEN)) &&
756 (data->vlan_mac.is_inner_mac ==
757 pos->u.vlan_mac.is_inner_mac))
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300758 return pos;
759
760 return NULL;
761}
762
763/* check_move() callback */
Merav Sicron51c1a582012-03-18 10:33:38 +0000764static bool bnx2x_check_move(struct bnx2x *bp,
765 struct bnx2x_vlan_mac_obj *src_o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300766 struct bnx2x_vlan_mac_obj *dst_o,
767 union bnx2x_classification_ramrod_data *data)
768{
769 struct bnx2x_vlan_mac_registry_elem *pos;
770 int rc;
771
772 /* Check if we can delete the requested configuration from the first
773 * object.
774 */
Merav Sicron51c1a582012-03-18 10:33:38 +0000775 pos = src_o->check_del(bp, src_o, data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300776
777 /* check if configuration can be added */
Merav Sicron51c1a582012-03-18 10:33:38 +0000778 rc = dst_o->check_add(bp, dst_o, data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300779
780 /* If this classification can not be added (is already set)
781 * or can't be deleted - return an error.
782 */
783 if (rc || !pos)
784 return false;
785
786 return true;
787}
788
789static bool bnx2x_check_move_always_err(
Merav Sicron51c1a582012-03-18 10:33:38 +0000790 struct bnx2x *bp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300791 struct bnx2x_vlan_mac_obj *src_o,
792 struct bnx2x_vlan_mac_obj *dst_o,
793 union bnx2x_classification_ramrod_data *data)
794{
795 return false;
796}
797
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300798static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
799{
800 struct bnx2x_raw_obj *raw = &o->raw;
801 u8 rx_tx_flag = 0;
802
803 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
804 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
805 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
806
807 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
808 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
809 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
810
811 return rx_tx_flag;
812}
813
Barak Witkowskia3348722012-04-23 03:04:46 +0000814void bnx2x_set_mac_in_nig(struct bnx2x *bp,
815 bool add, unsigned char *dev_addr, int index)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300816{
817 u32 wb_data[2];
818 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
819 NIG_REG_LLH0_FUNC_MEM;
820
Barak Witkowskia3348722012-04-23 03:04:46 +0000821 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
822 return;
823
824 if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300825 return;
826
827 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
828 (add ? "ADD" : "DELETE"), index);
829
830 if (add) {
831 /* LLH_FUNC_MEM is a u64 WB register */
832 reg_offset += 8*index;
833
834 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
835 (dev_addr[4] << 8) | dev_addr[5]);
836 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
837
838 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
839 }
840
841 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
842 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
843}
844
845/**
846 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
847 *
848 * @bp: device handle
849 * @o: queue for which we want to configure this rule
850 * @add: if true the command is an ADD command, DEL otherwise
851 * @opcode: CLASSIFY_RULE_OPCODE_XXX
852 * @hdr: pointer to a header to setup
853 *
854 */
855static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
856 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
857 struct eth_classify_cmd_header *hdr)
858{
859 struct bnx2x_raw_obj *raw = &o->raw;
860
861 hdr->client_id = raw->cl_id;
862 hdr->func_id = raw->func_id;
863
864 /* Rx or/and Tx (internal switching) configuration ? */
865 hdr->cmd_general_data |=
866 bnx2x_vlan_mac_get_rx_tx_flag(o);
867
868 if (add)
869 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
870
871 hdr->cmd_general_data |=
872 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
873}
874
875/**
876 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
877 *
878 * @cid: connection id
879 * @type: BNX2X_FILTER_XXX_PENDING
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000880 * @hdr: pointer to header to setup
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300881 * @rule_cnt:
882 *
883 * currently we always configure one rule and echo field to contain a CID and an
884 * opcode type.
885 */
886static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
887 struct eth_classify_header *hdr, int rule_cnt)
888{
Yuval Mintz86564c32013-01-23 03:21:50 +0000889 hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
890 (type << BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300891 hdr->rule_cnt = (u8)rule_cnt;
892}
893
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300894/* hw_config() callbacks */
895static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
896 struct bnx2x_vlan_mac_obj *o,
897 struct bnx2x_exeq_elem *elem, int rule_idx,
898 int cam_offset)
899{
900 struct bnx2x_raw_obj *raw = &o->raw;
901 struct eth_classify_rules_ramrod_data *data =
902 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
903 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
904 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
905 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
906 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
907 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
908
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000909 /* Set LLH CAM entry: currently only iSCSI and ETH macs are
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300910 * relevant. In addition, current implementation is tuned for a
911 * single ETH MAC.
912 *
913 * When multiple unicast ETH MACs PF configuration in switch
914 * independent mode is required (NetQ, multiple netdev MACs,
915 * etc.), consider better utilisation of 8 per function MAC
916 * entries in the LLH register. There is also
917 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
918 * total number of CAM entries to 16.
919 *
920 * Currently we won't configure NIG for MACs other than a primary ETH
921 * MAC and iSCSI L2 MAC.
922 *
923 * If this MAC is moving from one Queue to another, no need to change
924 * NIG configuration.
925 */
926 if (cmd != BNX2X_VLAN_MAC_MOVE) {
927 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
928 bnx2x_set_mac_in_nig(bp, add, mac,
Yuval Mintz0a52fd02012-03-12 08:53:07 +0000929 BNX2X_LLH_CAM_ISCSI_ETH_LINE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300930 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
Yuval Mintz0a52fd02012-03-12 08:53:07 +0000931 bnx2x_set_mac_in_nig(bp, add, mac,
932 BNX2X_LLH_CAM_ETH_LINE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300933 }
934
935 /* Reset the ramrod data buffer for the first rule */
936 if (rule_idx == 0)
937 memset(data, 0, sizeof(*data));
938
939 /* Setup a command header */
940 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
941 &rule_entry->mac.header);
942
Joe Perches0f9dad12011-08-14 12:16:19 +0000943 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +0000944 (add ? "add" : "delete"), mac, raw->cl_id);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300945
946 /* Set a MAC itself */
947 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
948 &rule_entry->mac.mac_mid,
949 &rule_entry->mac.mac_lsb, mac);
Dmitry Kravkov91226792013-03-11 05:17:52 +0000950 rule_entry->mac.inner_mac =
951 cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300952
953 /* MOVE: Add a rule that will add this MAC to the target Queue */
954 if (cmd == BNX2X_VLAN_MAC_MOVE) {
955 rule_entry++;
956 rule_cnt++;
957
958 /* Setup ramrod data */
959 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
960 elem->cmd_data.vlan_mac.target_obj,
961 true, CLASSIFY_RULE_OPCODE_MAC,
962 &rule_entry->mac.header);
963
964 /* Set a MAC itself */
965 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
966 &rule_entry->mac.mac_mid,
967 &rule_entry->mac.mac_lsb, mac);
Dmitry Kravkov91226792013-03-11 05:17:52 +0000968 rule_entry->mac.inner_mac =
969 cpu_to_le16(elem->cmd_data.vlan_mac.
970 u.mac.is_inner_mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300971 }
972
973 /* Set the ramrod data header */
974 /* TODO: take this to the higher level in order to prevent multiple
975 writing */
976 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
977 rule_cnt);
978}
979
980/**
981 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
982 *
983 * @bp: device handle
984 * @o: queue
985 * @type:
986 * @cam_offset: offset in cam memory
987 * @hdr: pointer to a header to setup
988 *
989 * E1/E1H
990 */
991static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
992 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
993 struct mac_configuration_hdr *hdr)
994{
995 struct bnx2x_raw_obj *r = &o->raw;
996
997 hdr->length = 1;
998 hdr->offset = (u8)cam_offset;
Yuval Mintz86564c32013-01-23 03:21:50 +0000999 hdr->client_id = cpu_to_le16(0xff);
1000 hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
1001 (type << BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001002}
1003
1004static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
1005 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
1006 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
1007{
1008 struct bnx2x_raw_obj *r = &o->raw;
1009 u32 cl_bit_vec = (1 << r->cl_id);
1010
1011 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
1012 cfg_entry->pf_id = r->func_id;
1013 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
1014
1015 if (add) {
1016 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1017 T_ETH_MAC_COMMAND_SET);
1018 SET_FLAG(cfg_entry->flags,
1019 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
1020
1021 /* Set a MAC in a ramrod data */
1022 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
1023 &cfg_entry->middle_mac_addr,
1024 &cfg_entry->lsb_mac_addr, mac);
1025 } else
1026 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1027 T_ETH_MAC_COMMAND_INVALIDATE);
1028}
1029
1030static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
1031 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
1032 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
1033{
1034 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
1035 struct bnx2x_raw_obj *raw = &o->raw;
1036
1037 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
1038 &config->hdr);
1039 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
1040 cfg_entry);
1041
Joe Perches0f9dad12011-08-14 12:16:19 +00001042 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00001043 (add ? "setting" : "clearing"),
Joe Perches0f9dad12011-08-14 12:16:19 +00001044 mac, raw->cl_id, cam_offset);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001045}
1046
1047/**
1048 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
1049 *
1050 * @bp: device handle
1051 * @o: bnx2x_vlan_mac_obj
1052 * @elem: bnx2x_exeq_elem
1053 * @rule_idx: rule_idx
1054 * @cam_offset: cam_offset
1055 */
1056static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
1057 struct bnx2x_vlan_mac_obj *o,
1058 struct bnx2x_exeq_elem *elem, int rule_idx,
1059 int cam_offset)
1060{
1061 struct bnx2x_raw_obj *raw = &o->raw;
1062 struct mac_configuration_cmd *config =
1063 (struct mac_configuration_cmd *)(raw->rdata);
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001064 /* 57710 and 57711 do not support MOVE command,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001065 * so it's either ADD or DEL
1066 */
1067 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1068 true : false;
1069
1070 /* Reset the ramrod data buffer */
1071 memset(config, 0, sizeof(*config));
1072
Yuval Mintz33ac3382012-03-12 08:53:09 +00001073 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001074 cam_offset, add,
1075 elem->cmd_data.vlan_mac.u.mac.mac, 0,
1076 ETH_VLAN_FILTER_ANY_VLAN, config);
1077}
1078
1079static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
1080 struct bnx2x_vlan_mac_obj *o,
1081 struct bnx2x_exeq_elem *elem, int rule_idx,
1082 int cam_offset)
1083{
1084 struct bnx2x_raw_obj *raw = &o->raw;
1085 struct eth_classify_rules_ramrod_data *data =
1086 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1087 int rule_cnt = rule_idx + 1;
1088 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
Yuval Mintz86564c32013-01-23 03:21:50 +00001089 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001090 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
1091 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1092
1093 /* Reset the ramrod data buffer for the first rule */
1094 if (rule_idx == 0)
1095 memset(data, 0, sizeof(*data));
1096
1097 /* Set a rule header */
1098 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1099 &rule_entry->vlan.header);
1100
1101 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1102 vlan);
1103
1104 /* Set a VLAN itself */
1105 rule_entry->vlan.vlan = cpu_to_le16(vlan);
1106
1107 /* MOVE: Add a rule that will add this MAC to the target Queue */
1108 if (cmd == BNX2X_VLAN_MAC_MOVE) {
1109 rule_entry++;
1110 rule_cnt++;
1111
1112 /* Setup ramrod data */
1113 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
1114 elem->cmd_data.vlan_mac.target_obj,
1115 true, CLASSIFY_RULE_OPCODE_VLAN,
1116 &rule_entry->vlan.header);
1117
1118 /* Set a VLAN itself */
1119 rule_entry->vlan.vlan = cpu_to_le16(vlan);
1120 }
1121
1122 /* Set the ramrod data header */
1123 /* TODO: take this to the higher level in order to prevent multiple
1124 writing */
1125 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1126 rule_cnt);
1127}
1128
1129static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
1130 struct bnx2x_vlan_mac_obj *o,
1131 struct bnx2x_exeq_elem *elem,
1132 int rule_idx, int cam_offset)
1133{
1134 struct bnx2x_raw_obj *raw = &o->raw;
1135 struct eth_classify_rules_ramrod_data *data =
1136 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1137 int rule_cnt = rule_idx + 1;
1138 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
Yuval Mintz86564c32013-01-23 03:21:50 +00001139 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001140 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
1141 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
1142 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
1143
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001144 /* Reset the ramrod data buffer for the first rule */
1145 if (rule_idx == 0)
1146 memset(data, 0, sizeof(*data));
1147
1148 /* Set a rule header */
1149 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1150 &rule_entry->pair.header);
1151
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001152 /* Set VLAN and MAC themselves */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001153 rule_entry->pair.vlan = cpu_to_le16(vlan);
1154 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1155 &rule_entry->pair.mac_mid,
1156 &rule_entry->pair.mac_lsb, mac);
Dmitry Kravkov91226792013-03-11 05:17:52 +00001157 rule_entry->pair.inner_mac =
1158 cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001159 /* MOVE: Add a rule that will add this MAC to the target Queue */
1160 if (cmd == BNX2X_VLAN_MAC_MOVE) {
1161 rule_entry++;
1162 rule_cnt++;
1163
1164 /* Setup ramrod data */
1165 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
1166 elem->cmd_data.vlan_mac.target_obj,
1167 true, CLASSIFY_RULE_OPCODE_PAIR,
1168 &rule_entry->pair.header);
1169
1170 /* Set a VLAN itself */
1171 rule_entry->pair.vlan = cpu_to_le16(vlan);
1172 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1173 &rule_entry->pair.mac_mid,
1174 &rule_entry->pair.mac_lsb, mac);
Dmitry Kravkov91226792013-03-11 05:17:52 +00001175 rule_entry->pair.inner_mac =
1176 cpu_to_le16(elem->cmd_data.vlan_mac.u.
1177 vlan_mac.is_inner_mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001178 }
1179
1180 /* Set the ramrod data header */
1181 /* TODO: take this to the higher level in order to prevent multiple
1182 writing */
1183 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1184 rule_cnt);
1185}
1186
1187/**
1188 * bnx2x_set_one_vlan_mac_e1h -
1189 *
1190 * @bp: device handle
1191 * @o: bnx2x_vlan_mac_obj
1192 * @elem: bnx2x_exeq_elem
1193 * @rule_idx: rule_idx
1194 * @cam_offset: cam_offset
1195 */
1196static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1197 struct bnx2x_vlan_mac_obj *o,
1198 struct bnx2x_exeq_elem *elem,
1199 int rule_idx, int cam_offset)
1200{
1201 struct bnx2x_raw_obj *raw = &o->raw;
1202 struct mac_configuration_cmd *config =
1203 (struct mac_configuration_cmd *)(raw->rdata);
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001204 /* 57710 and 57711 do not support MOVE command,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001205 * so it's either ADD or DEL
1206 */
1207 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1208 true : false;
1209
1210 /* Reset the ramrod data buffer */
1211 memset(config, 0, sizeof(*config));
1212
1213 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1214 cam_offset, add,
1215 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1216 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1217 ETH_VLAN_FILTER_CLASSIFY, config);
1218}
1219
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001220/**
1221 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1222 *
1223 * @bp: device handle
1224 * @p: command parameters
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001225 * @ppos: pointer to the cookie
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001226 *
1227 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1228 * previously configured elements list.
1229 *
1230 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1231 * into an account
1232 *
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001233 * pointer to the cookie - that should be given back in the next call to make
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001234 * function handle the next element. If *ppos is set to NULL it will restart the
1235 * iterator. If returned *ppos == NULL this means that the last element has been
1236 * handled.
1237 *
1238 */
1239static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1240 struct bnx2x_vlan_mac_ramrod_params *p,
1241 struct bnx2x_vlan_mac_registry_elem **ppos)
1242{
1243 struct bnx2x_vlan_mac_registry_elem *pos;
1244 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1245
1246 /* If list is empty - there is nothing to do here */
1247 if (list_empty(&o->head)) {
1248 *ppos = NULL;
1249 return 0;
1250 }
1251
1252 /* make a step... */
1253 if (*ppos == NULL)
1254 *ppos = list_first_entry(&o->head,
1255 struct bnx2x_vlan_mac_registry_elem,
1256 link);
1257 else
1258 *ppos = list_next_entry(*ppos, link);
1259
1260 pos = *ppos;
1261
1262 /* If it's the last step - return NULL */
1263 if (list_is_last(&pos->link, &o->head))
1264 *ppos = NULL;
1265
1266 /* Prepare a 'user_req' */
1267 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1268
1269 /* Set the command */
1270 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1271
1272 /* Set vlan_mac_flags */
1273 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1274
1275 /* Set a restore bit */
1276 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1277
1278 return bnx2x_config_vlan_mac(bp, p);
1279}
1280
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001281/* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001282 * pointer to an element with a specific criteria and NULL if such an element
1283 * hasn't been found.
1284 */
1285static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1286 struct bnx2x_exe_queue_obj *o,
1287 struct bnx2x_exeq_elem *elem)
1288{
1289 struct bnx2x_exeq_elem *pos;
1290 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1291
1292 /* Check pending for execution commands */
1293 list_for_each_entry(pos, &o->exe_queue, link)
1294 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1295 sizeof(*data)) &&
1296 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1297 return pos;
1298
1299 return NULL;
1300}
1301
1302static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1303 struct bnx2x_exe_queue_obj *o,
1304 struct bnx2x_exeq_elem *elem)
1305{
1306 struct bnx2x_exeq_elem *pos;
1307 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1308
1309 /* Check pending for execution commands */
1310 list_for_each_entry(pos, &o->exe_queue, link)
1311 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1312 sizeof(*data)) &&
1313 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1314 return pos;
1315
1316 return NULL;
1317}
1318
1319static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1320 struct bnx2x_exe_queue_obj *o,
1321 struct bnx2x_exeq_elem *elem)
1322{
1323 struct bnx2x_exeq_elem *pos;
1324 struct bnx2x_vlan_mac_ramrod_data *data =
1325 &elem->cmd_data.vlan_mac.u.vlan_mac;
1326
1327 /* Check pending for execution commands */
1328 list_for_each_entry(pos, &o->exe_queue, link)
1329 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1330 sizeof(*data)) &&
1331 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1332 return pos;
1333
1334 return NULL;
1335}
1336
1337/**
1338 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1339 *
1340 * @bp: device handle
1341 * @qo: bnx2x_qable_obj
1342 * @elem: bnx2x_exeq_elem
1343 *
1344 * Checks that the requested configuration can be added. If yes and if
1345 * requested, consume CAM credit.
1346 *
1347 * The 'validate' is run after the 'optimize'.
1348 *
1349 */
1350static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1351 union bnx2x_qable_obj *qo,
1352 struct bnx2x_exeq_elem *elem)
1353{
1354 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1355 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1356 int rc;
1357
1358 /* Check the registry */
Merav Sicron51c1a582012-03-18 10:33:38 +00001359 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001360 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001361 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001362 return rc;
1363 }
1364
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001365 /* Check if there is a pending ADD command for this
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001366 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1367 */
1368 if (exeq->get(exeq, elem)) {
1369 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1370 return -EEXIST;
1371 }
1372
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001373 /* TODO: Check the pending MOVE from other objects where this
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001374 * object is a destination object.
1375 */
1376
1377 /* Consume the credit if not requested not to */
1378 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1379 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1380 o->get_credit(o)))
1381 return -EINVAL;
1382
1383 return 0;
1384}
1385
1386/**
1387 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1388 *
1389 * @bp: device handle
1390 * @qo: quable object to check
1391 * @elem: element that needs to be deleted
1392 *
1393 * Checks that the requested configuration can be deleted. If yes and if
1394 * requested, returns a CAM credit.
1395 *
1396 * The 'validate' is run after the 'optimize'.
1397 */
1398static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1399 union bnx2x_qable_obj *qo,
1400 struct bnx2x_exeq_elem *elem)
1401{
1402 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1403 struct bnx2x_vlan_mac_registry_elem *pos;
1404 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1405 struct bnx2x_exeq_elem query_elem;
1406
1407 /* If this classification can not be deleted (doesn't exist)
1408 * - return a BNX2X_EXIST.
1409 */
Merav Sicron51c1a582012-03-18 10:33:38 +00001410 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001411 if (!pos) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001412 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001413 return -EEXIST;
1414 }
1415
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001416 /* Check if there are pending DEL or MOVE commands for this
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001417 * MAC/VLAN/VLAN-MAC. Return an error if so.
1418 */
1419 memcpy(&query_elem, elem, sizeof(query_elem));
1420
1421 /* Check for MOVE commands */
1422 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1423 if (exeq->get(exeq, &query_elem)) {
1424 BNX2X_ERR("There is a pending MOVE command already\n");
1425 return -EINVAL;
1426 }
1427
1428 /* Check for DEL commands */
1429 if (exeq->get(exeq, elem)) {
1430 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1431 return -EEXIST;
1432 }
1433
1434 /* Return the credit to the credit pool if not requested not to */
1435 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1436 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1437 o->put_credit(o))) {
1438 BNX2X_ERR("Failed to return a credit\n");
1439 return -EINVAL;
1440 }
1441
1442 return 0;
1443}
1444
1445/**
1446 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1447 *
1448 * @bp: device handle
1449 * @qo: quable object to check (source)
1450 * @elem: element that needs to be moved
1451 *
1452 * Checks that the requested configuration can be moved. If yes and if
1453 * requested, returns a CAM credit.
1454 *
1455 * The 'validate' is run after the 'optimize'.
1456 */
1457static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1458 union bnx2x_qable_obj *qo,
1459 struct bnx2x_exeq_elem *elem)
1460{
1461 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1462 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1463 struct bnx2x_exeq_elem query_elem;
1464 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1465 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1466
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001467 /* Check if we can perform this operation based on the current registry
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001468 * state.
1469 */
Merav Sicron51c1a582012-03-18 10:33:38 +00001470 if (!src_o->check_move(bp, src_o, dest_o,
1471 &elem->cmd_data.vlan_mac.u)) {
1472 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001473 return -EINVAL;
1474 }
1475
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001476 /* Check if there is an already pending DEL or MOVE command for the
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001477 * source object or ADD command for a destination object. Return an
1478 * error if so.
1479 */
1480 memcpy(&query_elem, elem, sizeof(query_elem));
1481
1482 /* Check DEL on source */
1483 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1484 if (src_exeq->get(src_exeq, &query_elem)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001485 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001486 return -EINVAL;
1487 }
1488
1489 /* Check MOVE on source */
1490 if (src_exeq->get(src_exeq, elem)) {
1491 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1492 return -EEXIST;
1493 }
1494
1495 /* Check ADD on destination */
1496 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1497 if (dest_exeq->get(dest_exeq, &query_elem)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001498 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001499 return -EINVAL;
1500 }
1501
1502 /* Consume the credit if not requested not to */
1503 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1504 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1505 dest_o->get_credit(dest_o)))
1506 return -EINVAL;
1507
1508 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1509 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1510 src_o->put_credit(src_o))) {
1511 /* return the credit taken from dest... */
1512 dest_o->put_credit(dest_o);
1513 return -EINVAL;
1514 }
1515
1516 return 0;
1517}
1518
1519static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1520 union bnx2x_qable_obj *qo,
1521 struct bnx2x_exeq_elem *elem)
1522{
1523 switch (elem->cmd_data.vlan_mac.cmd) {
1524 case BNX2X_VLAN_MAC_ADD:
1525 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1526 case BNX2X_VLAN_MAC_DEL:
1527 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1528 case BNX2X_VLAN_MAC_MOVE:
1529 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1530 default:
1531 return -EINVAL;
1532 }
1533}
1534
Yuval Mintz460a25c2012-01-23 07:31:51 +00001535static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1536 union bnx2x_qable_obj *qo,
1537 struct bnx2x_exeq_elem *elem)
1538{
1539 int rc = 0;
1540
1541 /* If consumption wasn't required, nothing to do */
1542 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1543 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1544 return 0;
1545
1546 switch (elem->cmd_data.vlan_mac.cmd) {
1547 case BNX2X_VLAN_MAC_ADD:
1548 case BNX2X_VLAN_MAC_MOVE:
1549 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1550 break;
1551 case BNX2X_VLAN_MAC_DEL:
1552 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1553 break;
1554 default:
1555 return -EINVAL;
1556 }
1557
1558 if (rc != true)
1559 return -EINVAL;
1560
1561 return 0;
1562}
1563
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001564/**
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001565 * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001566 *
1567 * @bp: device handle
1568 * @o: bnx2x_vlan_mac_obj
1569 *
1570 */
1571static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1572 struct bnx2x_vlan_mac_obj *o)
1573{
1574 int cnt = 5000, rc;
1575 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1576 struct bnx2x_raw_obj *raw = &o->raw;
1577
1578 while (cnt--) {
1579 /* Wait for the current command to complete */
1580 rc = raw->wait_comp(bp, raw);
1581 if (rc)
1582 return rc;
1583
1584 /* Wait until there are no pending commands */
1585 if (!bnx2x_exe_queue_empty(exeq))
Yuval Mintz0926d492013-01-23 03:21:45 +00001586 usleep_range(1000, 2000);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001587 else
1588 return 0;
1589 }
1590
1591 return -EBUSY;
1592}
1593
Yuval Mintz8b09be52013-08-01 17:30:59 +03001594static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
1595 struct bnx2x_vlan_mac_obj *o,
1596 unsigned long *ramrod_flags)
1597{
1598 int rc = 0;
1599
1600 spin_lock_bh(&o->exe_queue.lock);
1601
1602 DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
1603 rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
1604
1605 if (rc != 0) {
1606 __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
1607
1608 /* Calling function should not diffrentiate between this case
1609 * and the case in which there is already a pending ramrod
1610 */
1611 rc = 1;
1612 } else {
1613 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1614 }
1615 spin_unlock_bh(&o->exe_queue.lock);
1616
1617 return rc;
1618}
1619
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001620/**
1621 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1622 *
1623 * @bp: device handle
1624 * @o: bnx2x_vlan_mac_obj
1625 * @cqe:
1626 * @cont: if true schedule next execution chunk
1627 *
1628 */
1629static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1630 struct bnx2x_vlan_mac_obj *o,
1631 union event_ring_elem *cqe,
1632 unsigned long *ramrod_flags)
1633{
1634 struct bnx2x_raw_obj *r = &o->raw;
1635 int rc;
1636
Yuval Mintz8b09be52013-08-01 17:30:59 +03001637 /* Clearing the pending list & raw state should be made
1638 * atomically (as execution flow assumes they represent the same).
1639 */
1640 spin_lock_bh(&o->exe_queue.lock);
1641
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001642 /* Reset pending list */
Yuval Mintz8b09be52013-08-01 17:30:59 +03001643 __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001644
1645 /* Clear pending */
1646 r->clear_pending(r);
1647
Yuval Mintz8b09be52013-08-01 17:30:59 +03001648 spin_unlock_bh(&o->exe_queue.lock);
1649
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001650 /* If ramrod failed this is most likely a SW bug */
1651 if (cqe->message.error)
1652 return -EINVAL;
1653
Yuval Mintz2de67432013-01-23 03:21:43 +00001654 /* Run the next bulk of pending commands if requested */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001655 if (test_bit(RAMROD_CONT, ramrod_flags)) {
Yuval Mintz8b09be52013-08-01 17:30:59 +03001656 rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
1657
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001658 if (rc < 0)
1659 return rc;
1660 }
1661
1662 /* If there is more work to do return PENDING */
1663 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1664 return 1;
1665
1666 return 0;
1667}
1668
1669/**
1670 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1671 *
1672 * @bp: device handle
1673 * @o: bnx2x_qable_obj
1674 * @elem: bnx2x_exeq_elem
1675 */
1676static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1677 union bnx2x_qable_obj *qo,
1678 struct bnx2x_exeq_elem *elem)
1679{
1680 struct bnx2x_exeq_elem query, *pos;
1681 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1682 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1683
1684 memcpy(&query, elem, sizeof(query));
1685
1686 switch (elem->cmd_data.vlan_mac.cmd) {
1687 case BNX2X_VLAN_MAC_ADD:
1688 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1689 break;
1690 case BNX2X_VLAN_MAC_DEL:
1691 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1692 break;
1693 default:
1694 /* Don't handle anything other than ADD or DEL */
1695 return 0;
1696 }
1697
1698 /* If we found the appropriate element - delete it */
1699 pos = exeq->get(exeq, &query);
1700 if (pos) {
1701
1702 /* Return the credit of the optimized command */
1703 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1704 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1705 if ((query.cmd_data.vlan_mac.cmd ==
1706 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001707 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001708 return -EINVAL;
1709 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
Merav Sicron51c1a582012-03-18 10:33:38 +00001710 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001711 return -EINVAL;
1712 }
1713 }
1714
1715 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1716 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1717 "ADD" : "DEL");
1718
1719 list_del(&pos->link);
1720 bnx2x_exe_queue_free_elem(bp, pos);
1721 return 1;
1722 }
1723
1724 return 0;
1725}
1726
1727/**
1728 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1729 *
1730 * @bp: device handle
1731 * @o:
1732 * @elem:
1733 * @restore:
1734 * @re:
1735 *
1736 * prepare a registry element according to the current command request.
1737 */
1738static inline int bnx2x_vlan_mac_get_registry_elem(
1739 struct bnx2x *bp,
1740 struct bnx2x_vlan_mac_obj *o,
1741 struct bnx2x_exeq_elem *elem,
1742 bool restore,
1743 struct bnx2x_vlan_mac_registry_elem **re)
1744{
Yuval Mintz86564c32013-01-23 03:21:50 +00001745 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001746 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1747
1748 /* Allocate a new registry element if needed. */
1749 if (!restore &&
1750 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1751 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1752 if (!reg_elem)
1753 return -ENOMEM;
1754
1755 /* Get a new CAM offset */
1756 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001757 /* This shall never happen, because we have checked the
1758 * CAM availability in the 'validate'.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001759 */
1760 WARN_ON(1);
1761 kfree(reg_elem);
1762 return -EINVAL;
1763 }
1764
1765 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1766
1767 /* Set a VLAN-MAC data */
1768 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1769 sizeof(reg_elem->u));
1770
1771 /* Copy the flags (needed for DEL and RESTORE flows) */
1772 reg_elem->vlan_mac_flags =
1773 elem->cmd_data.vlan_mac.vlan_mac_flags;
1774 } else /* DEL, RESTORE */
Merav Sicron51c1a582012-03-18 10:33:38 +00001775 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001776
1777 *re = reg_elem;
1778 return 0;
1779}
1780
1781/**
1782 * bnx2x_execute_vlan_mac - execute vlan mac command
1783 *
1784 * @bp: device handle
1785 * @qo:
1786 * @exe_chunk:
1787 * @ramrod_flags:
1788 *
1789 * go and send a ramrod!
1790 */
1791static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1792 union bnx2x_qable_obj *qo,
1793 struct list_head *exe_chunk,
1794 unsigned long *ramrod_flags)
1795{
1796 struct bnx2x_exeq_elem *elem;
1797 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1798 struct bnx2x_raw_obj *r = &o->raw;
1799 int rc, idx = 0;
1800 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1801 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1802 struct bnx2x_vlan_mac_registry_elem *reg_elem;
Yuval Mintz86564c32013-01-23 03:21:50 +00001803 enum bnx2x_vlan_mac_cmd cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001804
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001805 /* If DRIVER_ONLY execution is requested, cleanup a registry
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001806 * and exit. Otherwise send a ramrod to FW.
1807 */
1808 if (!drv_only) {
1809 WARN_ON(r->check_pending(r));
1810
1811 /* Set pending */
1812 r->set_pending(r);
1813
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001814 /* Fill the ramrod data */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001815 list_for_each_entry(elem, exe_chunk, link) {
1816 cmd = elem->cmd_data.vlan_mac.cmd;
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001817 /* We will add to the target object in MOVE command, so
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001818 * change the object for a CAM search.
1819 */
1820 if (cmd == BNX2X_VLAN_MAC_MOVE)
1821 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1822 else
1823 cam_obj = o;
1824
1825 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1826 elem, restore,
1827 &reg_elem);
1828 if (rc)
1829 goto error_exit;
1830
1831 WARN_ON(!reg_elem);
1832
1833 /* Push a new entry into the registry */
1834 if (!restore &&
1835 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1836 (cmd == BNX2X_VLAN_MAC_MOVE)))
1837 list_add(&reg_elem->link, &cam_obj->head);
1838
1839 /* Configure a single command in a ramrod data buffer */
1840 o->set_one_rule(bp, o, elem, idx,
1841 reg_elem->cam_offset);
1842
1843 /* MOVE command consumes 2 entries in the ramrod data */
1844 if (cmd == BNX2X_VLAN_MAC_MOVE)
1845 idx += 2;
1846 else
1847 idx++;
1848 }
1849
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001850 /* No need for an explicit memory barrier here as long we would
1851 * need to ensure the ordering of writing to the SPQ element
1852 * and updating of the SPQ producer which involves a memory
1853 * read and we will have to put a full memory barrier there
1854 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00001855 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001856
1857 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1858 U64_HI(r->rdata_mapping),
1859 U64_LO(r->rdata_mapping),
1860 ETH_CONNECTION_TYPE);
1861 if (rc)
1862 goto error_exit;
1863 }
1864
1865 /* Now, when we are done with the ramrod - clean up the registry */
1866 list_for_each_entry(elem, exe_chunk, link) {
1867 cmd = elem->cmd_data.vlan_mac.cmd;
1868 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1869 (cmd == BNX2X_VLAN_MAC_MOVE)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001870 reg_elem = o->check_del(bp, o,
1871 &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001872
1873 WARN_ON(!reg_elem);
1874
1875 o->put_cam_offset(o, reg_elem->cam_offset);
1876 list_del(&reg_elem->link);
1877 kfree(reg_elem);
1878 }
1879 }
1880
1881 if (!drv_only)
1882 return 1;
1883 else
1884 return 0;
1885
1886error_exit:
1887 r->clear_pending(r);
1888
1889 /* Cleanup a registry in case of a failure */
1890 list_for_each_entry(elem, exe_chunk, link) {
1891 cmd = elem->cmd_data.vlan_mac.cmd;
1892
1893 if (cmd == BNX2X_VLAN_MAC_MOVE)
1894 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1895 else
1896 cam_obj = o;
1897
1898 /* Delete all newly added above entries */
1899 if (!restore &&
1900 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1901 (cmd == BNX2X_VLAN_MAC_MOVE))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001902 reg_elem = o->check_del(bp, cam_obj,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001903 &elem->cmd_data.vlan_mac.u);
1904 if (reg_elem) {
1905 list_del(&reg_elem->link);
1906 kfree(reg_elem);
1907 }
1908 }
1909 }
1910
1911 return rc;
1912}
1913
1914static inline int bnx2x_vlan_mac_push_new_cmd(
1915 struct bnx2x *bp,
1916 struct bnx2x_vlan_mac_ramrod_params *p)
1917{
1918 struct bnx2x_exeq_elem *elem;
1919 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1920 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1921
1922 /* Allocate the execution queue element */
1923 elem = bnx2x_exe_queue_alloc_elem(bp);
1924 if (!elem)
1925 return -ENOMEM;
1926
1927 /* Set the command 'length' */
1928 switch (p->user_req.cmd) {
1929 case BNX2X_VLAN_MAC_MOVE:
1930 elem->cmd_len = 2;
1931 break;
1932 default:
1933 elem->cmd_len = 1;
1934 }
1935
1936 /* Fill the object specific info */
1937 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1938
1939 /* Try to add a new command to the pending list */
1940 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1941}
1942
1943/**
1944 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1945 *
1946 * @bp: device handle
1947 * @p:
1948 *
1949 */
Yuval Mintz8b09be52013-08-01 17:30:59 +03001950int bnx2x_config_vlan_mac(struct bnx2x *bp,
1951 struct bnx2x_vlan_mac_ramrod_params *p)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001952{
1953 int rc = 0;
1954 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1955 unsigned long *ramrod_flags = &p->ramrod_flags;
1956 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1957 struct bnx2x_raw_obj *raw = &o->raw;
1958
1959 /*
1960 * Add new elements to the execution list for commands that require it.
1961 */
1962 if (!cont) {
1963 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1964 if (rc)
1965 return rc;
1966 }
1967
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001968 /* If nothing will be executed further in this iteration we want to
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001969 * return PENDING if there are pending commands
1970 */
1971 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1972 rc = 1;
1973
Vladislav Zolotarov79616892011-07-21 07:58:54 +00001974 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001975 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
Vladislav Zolotarov79616892011-07-21 07:58:54 +00001976 raw->clear_pending(raw);
1977 }
1978
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001979 /* Execute commands if required */
1980 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1981 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
Yuval Mintz8b09be52013-08-01 17:30:59 +03001982 rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
1983 &p->ramrod_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001984 if (rc < 0)
1985 return rc;
1986 }
1987
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001988 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001989 * then user want to wait until the last command is done.
1990 */
1991 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001992 /* Wait maximum for the current exe_queue length iterations plus
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001993 * one (for the current pending command).
1994 */
1995 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1996
1997 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1998 max_iterations--) {
1999
2000 /* Wait for the current command to complete */
2001 rc = raw->wait_comp(bp, raw);
2002 if (rc)
2003 return rc;
2004
2005 /* Make a next step */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002006 rc = __bnx2x_vlan_mac_execute_step(bp,
2007 p->vlan_mac_obj,
2008 &p->ramrod_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002009 if (rc < 0)
2010 return rc;
2011 }
2012
2013 return 0;
2014 }
2015
2016 return rc;
2017}
2018
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002019/**
2020 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
2021 *
2022 * @bp: device handle
2023 * @o:
2024 * @vlan_mac_flags:
2025 * @ramrod_flags: execution flags to be used for this deletion
2026 *
2027 * if the last operation has completed successfully and there are no
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002028 * more elements left, positive value if the last operation has completed
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002029 * successfully and there are more previously configured elements, negative
2030 * value is current operation has failed.
2031 */
2032static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2033 struct bnx2x_vlan_mac_obj *o,
2034 unsigned long *vlan_mac_flags,
2035 unsigned long *ramrod_flags)
2036{
2037 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002038 struct bnx2x_vlan_mac_ramrod_params p;
2039 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
2040 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
Yuval Mintze8379c72014-01-05 18:33:54 +02002041 unsigned long flags;
Yuval Mintz8b09be52013-08-01 17:30:59 +03002042 int read_lock;
2043 int rc = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002044
2045 /* Clear pending commands first */
2046
2047 spin_lock_bh(&exeq->lock);
2048
2049 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
Yuval Mintze8379c72014-01-05 18:33:54 +02002050 flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
2051 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2052 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
Yuval Mintz460a25c2012-01-23 07:31:51 +00002053 rc = exeq->remove(bp, exeq->owner, exeq_pos);
2054 if (rc) {
2055 BNX2X_ERR("Failed to remove command\n");
Dan Carpentera44acd52012-01-24 21:59:31 +00002056 spin_unlock_bh(&exeq->lock);
Yuval Mintz460a25c2012-01-23 07:31:51 +00002057 return rc;
2058 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002059 list_del(&exeq_pos->link);
Yuval Mintz07ef7be2013-03-11 05:17:41 +00002060 bnx2x_exe_queue_free_elem(bp, exeq_pos);
Yuval Mintz460a25c2012-01-23 07:31:51 +00002061 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002062 }
2063
2064 spin_unlock_bh(&exeq->lock);
2065
2066 /* Prepare a command request */
2067 memset(&p, 0, sizeof(p));
2068 p.vlan_mac_obj = o;
2069 p.ramrod_flags = *ramrod_flags;
2070 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
2071
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002072 /* Add all but the last VLAN-MAC to the execution queue without actually
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002073 * execution anything.
2074 */
2075 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
2076 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
2077 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
2078
Yuval Mintz8b09be52013-08-01 17:30:59 +03002079 DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2080 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
2081 if (read_lock != 0)
2082 return read_lock;
2083
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002084 list_for_each_entry(pos, &o->head, link) {
Yuval Mintze8379c72014-01-05 18:33:54 +02002085 flags = pos->vlan_mac_flags;
2086 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2087 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002088 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2089 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
2090 rc = bnx2x_config_vlan_mac(bp, &p);
2091 if (rc < 0) {
2092 BNX2X_ERR("Failed to add a new DEL command\n");
Yuval Mintz8b09be52013-08-01 17:30:59 +03002093 bnx2x_vlan_mac_h_read_unlock(bp, o);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002094 return rc;
2095 }
2096 }
2097 }
2098
Yuval Mintz8b09be52013-08-01 17:30:59 +03002099 DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2100 bnx2x_vlan_mac_h_read_unlock(bp, o);
2101
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002102 p.ramrod_flags = *ramrod_flags;
2103 __set_bit(RAMROD_CONT, &p.ramrod_flags);
2104
2105 return bnx2x_config_vlan_mac(bp, &p);
2106}
2107
2108static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
2109 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
2110 unsigned long *pstate, bnx2x_obj_type type)
2111{
2112 raw->func_id = func_id;
2113 raw->cid = cid;
2114 raw->cl_id = cl_id;
2115 raw->rdata = rdata;
2116 raw->rdata_mapping = rdata_mapping;
2117 raw->state = state;
2118 raw->pstate = pstate;
2119 raw->obj_type = type;
2120 raw->check_pending = bnx2x_raw_check_pending;
2121 raw->clear_pending = bnx2x_raw_clear_pending;
2122 raw->set_pending = bnx2x_raw_set_pending;
2123 raw->wait_comp = bnx2x_raw_wait;
2124}
2125
2126static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
2127 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
2128 int state, unsigned long *pstate, bnx2x_obj_type type,
2129 struct bnx2x_credit_pool_obj *macs_pool,
2130 struct bnx2x_credit_pool_obj *vlans_pool)
2131{
2132 INIT_LIST_HEAD(&o->head);
Yuval Mintz8b09be52013-08-01 17:30:59 +03002133 o->head_reader = 0;
2134 o->head_exe_request = false;
2135 o->saved_ramrod_flags = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002136
2137 o->macs_pool = macs_pool;
2138 o->vlans_pool = vlans_pool;
2139
2140 o->delete_all = bnx2x_vlan_mac_del_all;
2141 o->restore = bnx2x_vlan_mac_restore;
2142 o->complete = bnx2x_complete_vlan_mac;
2143 o->wait = bnx2x_wait_vlan_mac;
2144
2145 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2146 state, pstate, type);
2147}
2148
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002149void bnx2x_init_mac_obj(struct bnx2x *bp,
2150 struct bnx2x_vlan_mac_obj *mac_obj,
2151 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2152 dma_addr_t rdata_mapping, int state,
2153 unsigned long *pstate, bnx2x_obj_type type,
2154 struct bnx2x_credit_pool_obj *macs_pool)
2155{
2156 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
2157
2158 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
2159 rdata_mapping, state, pstate, type,
2160 macs_pool, NULL);
2161
2162 /* CAM credit pool handling */
2163 mac_obj->get_credit = bnx2x_get_credit_mac;
2164 mac_obj->put_credit = bnx2x_put_credit_mac;
2165 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2166 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2167
2168 if (CHIP_IS_E1x(bp)) {
2169 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
2170 mac_obj->check_del = bnx2x_check_mac_del;
2171 mac_obj->check_add = bnx2x_check_mac_add;
2172 mac_obj->check_move = bnx2x_check_move_always_err;
2173 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2174
2175 /* Exe Queue */
2176 bnx2x_exe_queue_init(bp,
2177 &mac_obj->exe_queue, 1, qable_obj,
2178 bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002179 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002180 bnx2x_optimize_vlan_mac,
2181 bnx2x_execute_vlan_mac,
2182 bnx2x_exeq_get_mac);
2183 } else {
2184 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
2185 mac_obj->check_del = bnx2x_check_mac_del;
2186 mac_obj->check_add = bnx2x_check_mac_add;
2187 mac_obj->check_move = bnx2x_check_move;
2188 mac_obj->ramrod_cmd =
2189 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
Ariel Eliored5162a2011-12-05 21:52:24 +00002190 mac_obj->get_n_elements = bnx2x_get_n_elements;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002191
2192 /* Exe Queue */
2193 bnx2x_exe_queue_init(bp,
2194 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2195 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002196 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002197 bnx2x_optimize_vlan_mac,
2198 bnx2x_execute_vlan_mac,
2199 bnx2x_exeq_get_mac);
2200 }
2201}
2202
2203void bnx2x_init_vlan_obj(struct bnx2x *bp,
2204 struct bnx2x_vlan_mac_obj *vlan_obj,
2205 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2206 dma_addr_t rdata_mapping, int state,
2207 unsigned long *pstate, bnx2x_obj_type type,
2208 struct bnx2x_credit_pool_obj *vlans_pool)
2209{
2210 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
2211
2212 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2213 rdata_mapping, state, pstate, type, NULL,
2214 vlans_pool);
2215
2216 vlan_obj->get_credit = bnx2x_get_credit_vlan;
2217 vlan_obj->put_credit = bnx2x_put_credit_vlan;
2218 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2219 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2220
2221 if (CHIP_IS_E1x(bp)) {
2222 BNX2X_ERR("Do not support chips others than E2 and newer\n");
2223 BUG();
2224 } else {
2225 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
2226 vlan_obj->check_del = bnx2x_check_vlan_del;
2227 vlan_obj->check_add = bnx2x_check_vlan_add;
2228 vlan_obj->check_move = bnx2x_check_move;
2229 vlan_obj->ramrod_cmd =
2230 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
Ariel Elior3ec9f9c2013-03-11 05:17:45 +00002231 vlan_obj->get_n_elements = bnx2x_get_n_elements;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002232
2233 /* Exe Queue */
2234 bnx2x_exe_queue_init(bp,
2235 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2236 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002237 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002238 bnx2x_optimize_vlan_mac,
2239 bnx2x_execute_vlan_mac,
2240 bnx2x_exeq_get_vlan);
2241 }
2242}
2243
2244void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2245 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2246 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2247 dma_addr_t rdata_mapping, int state,
2248 unsigned long *pstate, bnx2x_obj_type type,
2249 struct bnx2x_credit_pool_obj *macs_pool,
2250 struct bnx2x_credit_pool_obj *vlans_pool)
2251{
2252 union bnx2x_qable_obj *qable_obj =
2253 (union bnx2x_qable_obj *)vlan_mac_obj;
2254
2255 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2256 rdata_mapping, state, pstate, type,
2257 macs_pool, vlans_pool);
2258
2259 /* CAM pool handling */
2260 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2261 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002262 /* CAM offset is relevant for 57710 and 57711 chips only which have a
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002263 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2264 * will be taken from MACs' pool object only.
2265 */
2266 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2267 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2268
2269 if (CHIP_IS_E1(bp)) {
2270 BNX2X_ERR("Do not support chips others than E2\n");
2271 BUG();
2272 } else if (CHIP_IS_E1H(bp)) {
2273 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2274 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2275 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2276 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2277 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2278
2279 /* Exe Queue */
2280 bnx2x_exe_queue_init(bp,
2281 &vlan_mac_obj->exe_queue, 1, qable_obj,
2282 bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002283 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002284 bnx2x_optimize_vlan_mac,
2285 bnx2x_execute_vlan_mac,
2286 bnx2x_exeq_get_vlan_mac);
2287 } else {
2288 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2289 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2290 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2291 vlan_mac_obj->check_move = bnx2x_check_move;
2292 vlan_mac_obj->ramrod_cmd =
2293 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2294
2295 /* Exe Queue */
2296 bnx2x_exe_queue_init(bp,
2297 &vlan_mac_obj->exe_queue,
2298 CLASSIFY_RULES_COUNT,
2299 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002300 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002301 bnx2x_optimize_vlan_mac,
2302 bnx2x_execute_vlan_mac,
2303 bnx2x_exeq_get_vlan_mac);
2304 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002305}
2306
2307/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2308static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2309 struct tstorm_eth_mac_filter_config *mac_filters,
2310 u16 pf_id)
2311{
2312 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2313
2314 u32 addr = BAR_TSTRORM_INTMEM +
2315 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2316
2317 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2318}
2319
2320static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2321 struct bnx2x_rx_mode_ramrod_params *p)
2322{
Yuval Mintz2de67432013-01-23 03:21:43 +00002323 /* update the bp MAC filter structure */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002324 u32 mask = (1 << p->cl_id);
2325
2326 struct tstorm_eth_mac_filter_config *mac_filters =
2327 (struct tstorm_eth_mac_filter_config *)p->rdata;
2328
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002329 /* initial setting is drop-all */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002330 u8 drop_all_ucast = 1, drop_all_mcast = 1;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002331 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2332 u8 unmatched_unicast = 0;
2333
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002334 /* In e1x there we only take into account rx accept flag since tx switching
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002335 * isn't enabled. */
2336 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002337 /* accept matched ucast */
2338 drop_all_ucast = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002339
2340 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002341 /* accept matched mcast */
2342 drop_all_mcast = 0;
2343
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002344 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002345 /* accept all mcast */
2346 drop_all_ucast = 0;
2347 accp_all_ucast = 1;
2348 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002349 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002350 /* accept all mcast */
2351 drop_all_mcast = 0;
2352 accp_all_mcast = 1;
2353 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002354 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002355 /* accept (all) bcast */
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002356 accp_all_bcast = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002357 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2358 /* accept unmatched unicasts */
2359 unmatched_unicast = 1;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002360
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002361 mac_filters->ucast_drop_all = drop_all_ucast ?
2362 mac_filters->ucast_drop_all | mask :
2363 mac_filters->ucast_drop_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002364
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002365 mac_filters->mcast_drop_all = drop_all_mcast ?
2366 mac_filters->mcast_drop_all | mask :
2367 mac_filters->mcast_drop_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002368
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002369 mac_filters->ucast_accept_all = accp_all_ucast ?
2370 mac_filters->ucast_accept_all | mask :
2371 mac_filters->ucast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002372
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002373 mac_filters->mcast_accept_all = accp_all_mcast ?
2374 mac_filters->mcast_accept_all | mask :
2375 mac_filters->mcast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002376
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002377 mac_filters->bcast_accept_all = accp_all_bcast ?
2378 mac_filters->bcast_accept_all | mask :
2379 mac_filters->bcast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002380
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002381 mac_filters->unmatched_unicast = unmatched_unicast ?
2382 mac_filters->unmatched_unicast | mask :
2383 mac_filters->unmatched_unicast & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002384
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002385 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
Yuval Mintz2de67432013-01-23 03:21:43 +00002386 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00002387 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2388 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2389 mac_filters->bcast_accept_all);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002390
2391 /* write the MAC filter structure*/
2392 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2393
2394 /* The operation is completed */
2395 clear_bit(p->state, p->pstate);
2396 smp_mb__after_clear_bit();
2397
2398 return 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002399}
2400
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002401/* Setup ramrod data */
2402static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2403 struct eth_classify_header *hdr,
2404 u8 rule_cnt)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002405{
Yuval Mintz86564c32013-01-23 03:21:50 +00002406 hdr->echo = cpu_to_le32(cid);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002407 hdr->rule_cnt = rule_cnt;
2408}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002409
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002410static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
Yuval Mintz924d75a2013-01-23 03:21:44 +00002411 unsigned long *accept_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002412 struct eth_filter_rules_cmd *cmd,
2413 bool clear_accept_all)
2414{
2415 u16 state;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002416
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002417 /* start with 'drop-all' */
2418 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2419 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2420
Yuval Mintz924d75a2013-01-23 03:21:44 +00002421 if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2422 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002423
Yuval Mintz924d75a2013-01-23 03:21:44 +00002424 if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2425 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002426
Yuval Mintz924d75a2013-01-23 03:21:44 +00002427 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2428 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2429 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002430 }
2431
Yuval Mintz924d75a2013-01-23 03:21:44 +00002432 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2433 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2434 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2435 }
2436
2437 if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2438 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2439
2440 if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2441 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2442 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2443 }
2444
2445 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2446 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2447
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002448 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2449 if (clear_accept_all) {
2450 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2451 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2452 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2453 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2454 }
2455
2456 cmd->state = cpu_to_le16(state);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002457}
2458
2459static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2460 struct bnx2x_rx_mode_ramrod_params *p)
2461{
2462 struct eth_filter_rules_ramrod_data *data = p->rdata;
2463 int rc;
2464 u8 rule_idx = 0;
2465
2466 /* Reset the ramrod data buffer */
2467 memset(data, 0, sizeof(*data));
2468
2469 /* Setup ramrod data */
2470
2471 /* Tx (internal switching) */
2472 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2473 data->rules[rule_idx].client_id = p->cl_id;
2474 data->rules[rule_idx].func_id = p->func_id;
2475
2476 data->rules[rule_idx].cmd_general_data =
2477 ETH_FILTER_RULES_CMD_TX_CMD;
2478
Yuval Mintz924d75a2013-01-23 03:21:44 +00002479 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2480 &(data->rules[rule_idx++]),
2481 false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002482 }
2483
2484 /* Rx */
2485 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2486 data->rules[rule_idx].client_id = p->cl_id;
2487 data->rules[rule_idx].func_id = p->func_id;
2488
2489 data->rules[rule_idx].cmd_general_data =
2490 ETH_FILTER_RULES_CMD_RX_CMD;
2491
Yuval Mintz924d75a2013-01-23 03:21:44 +00002492 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2493 &(data->rules[rule_idx++]),
2494 false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002495 }
2496
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002497 /* If FCoE Queue configuration has been requested configure the Rx and
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002498 * internal switching modes for this queue in separate rules.
2499 *
2500 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2501 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2502 */
2503 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2504 /* Tx (internal switching) */
2505 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2506 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2507 data->rules[rule_idx].func_id = p->func_id;
2508
2509 data->rules[rule_idx].cmd_general_data =
2510 ETH_FILTER_RULES_CMD_TX_CMD;
2511
Yuval Mintz924d75a2013-01-23 03:21:44 +00002512 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2513 &(data->rules[rule_idx]),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002514 true);
Yuval Mintz924d75a2013-01-23 03:21:44 +00002515 rule_idx++;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002516 }
2517
2518 /* Rx */
2519 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2520 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2521 data->rules[rule_idx].func_id = p->func_id;
2522
2523 data->rules[rule_idx].cmd_general_data =
2524 ETH_FILTER_RULES_CMD_RX_CMD;
2525
Yuval Mintz924d75a2013-01-23 03:21:44 +00002526 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2527 &(data->rules[rule_idx]),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002528 true);
Yuval Mintz924d75a2013-01-23 03:21:44 +00002529 rule_idx++;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002530 }
2531 }
2532
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002533 /* Set the ramrod header (most importantly - number of rules to
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002534 * configure).
2535 */
2536 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2537
Merav Sicron51c1a582012-03-18 10:33:38 +00002538 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002539 data->header.rule_cnt, p->rx_accept_flags,
2540 p->tx_accept_flags);
2541
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002542 /* No need for an explicit memory barrier here as long we would
2543 * need to ensure the ordering of writing to the SPQ element
2544 * and updating of the SPQ producer which involves a memory
2545 * read and we will have to put a full memory barrier there
2546 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00002547 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002548
2549 /* Send a ramrod */
2550 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2551 U64_HI(p->rdata_mapping),
2552 U64_LO(p->rdata_mapping),
2553 ETH_CONNECTION_TYPE);
2554 if (rc)
2555 return rc;
2556
2557 /* Ramrod completion is pending */
2558 return 1;
2559}
2560
2561static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2562 struct bnx2x_rx_mode_ramrod_params *p)
2563{
2564 return bnx2x_state_wait(bp, p->state, p->pstate);
2565}
2566
2567static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2568 struct bnx2x_rx_mode_ramrod_params *p)
2569{
2570 /* Do nothing */
2571 return 0;
2572}
2573
2574int bnx2x_config_rx_mode(struct bnx2x *bp,
2575 struct bnx2x_rx_mode_ramrod_params *p)
2576{
2577 int rc;
2578
2579 /* Configure the new classification in the chip */
2580 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2581 if (rc < 0)
2582 return rc;
2583
2584 /* Wait for a ramrod completion if was requested */
2585 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2586 rc = p->rx_mode_obj->wait_comp(bp, p);
2587 if (rc)
2588 return rc;
2589 }
2590
2591 return rc;
2592}
2593
2594void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2595 struct bnx2x_rx_mode_obj *o)
2596{
2597 if (CHIP_IS_E1x(bp)) {
2598 o->wait_comp = bnx2x_empty_rx_mode_wait;
2599 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2600 } else {
2601 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2602 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2603 }
2604}
2605
2606/********************* Multicast verbs: SET, CLEAR ****************************/
2607static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2608{
2609 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2610}
2611
2612struct bnx2x_mcast_mac_elem {
2613 struct list_head link;
2614 u8 mac[ETH_ALEN];
2615 u8 pad[2]; /* For a natural alignment of the following buffer */
2616};
2617
2618struct bnx2x_pending_mcast_cmd {
2619 struct list_head link;
2620 int type; /* BNX2X_MCAST_CMD_X */
2621 union {
2622 struct list_head macs_head;
2623 u32 macs_num; /* Needed for DEL command */
2624 int next_bin; /* Needed for RESTORE flow with aprox match */
2625 } data;
2626
2627 bool done; /* set to true, when the command has been handled,
2628 * practically used in 57712 handling only, where one pending
2629 * command may be handled in a few operations. As long as for
2630 * other chips every operation handling is completed in a
2631 * single ramrod, there is no need to utilize this field.
2632 */
2633};
2634
2635static int bnx2x_mcast_wait(struct bnx2x *bp,
2636 struct bnx2x_mcast_obj *o)
2637{
2638 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2639 o->raw.wait_comp(bp, &o->raw))
2640 return -EBUSY;
2641
2642 return 0;
2643}
2644
2645static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2646 struct bnx2x_mcast_obj *o,
2647 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00002648 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002649{
2650 int total_sz;
2651 struct bnx2x_pending_mcast_cmd *new_cmd;
2652 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2653 struct bnx2x_mcast_list_elem *pos;
2654 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2655 p->mcast_list_len : 0);
2656
2657 /* If the command is empty ("handle pending commands only"), break */
2658 if (!p->mcast_list_len)
2659 return 0;
2660
2661 total_sz = sizeof(*new_cmd) +
2662 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2663
2664 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2665 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2666
2667 if (!new_cmd)
2668 return -ENOMEM;
2669
Merav Sicron51c1a582012-03-18 10:33:38 +00002670 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2671 cmd, macs_list_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002672
2673 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2674
2675 new_cmd->type = cmd;
2676 new_cmd->done = false;
2677
2678 switch (cmd) {
2679 case BNX2X_MCAST_CMD_ADD:
2680 cur_mac = (struct bnx2x_mcast_mac_elem *)
2681 ((u8 *)new_cmd + sizeof(*new_cmd));
2682
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002683 /* Push the MACs of the current command into the pending command
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002684 * MACs list: FIFO
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002685 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002686 list_for_each_entry(pos, &p->mcast_list, link) {
2687 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2688 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2689 cur_mac++;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002690 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002691
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002692 break;
2693
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002694 case BNX2X_MCAST_CMD_DEL:
2695 new_cmd->data.macs_num = p->mcast_list_len;
2696 break;
2697
2698 case BNX2X_MCAST_CMD_RESTORE:
2699 new_cmd->data.next_bin = 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002700 break;
2701
2702 default:
Jesper Juhl8b6d5c02012-07-31 11:39:37 +00002703 kfree(new_cmd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002704 BNX2X_ERR("Unknown command: %d\n", cmd);
2705 return -EINVAL;
2706 }
2707
2708 /* Push the new pending command to the tail of the pending list: FIFO */
2709 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2710
2711 o->set_sched(o);
2712
2713 return 1;
2714}
2715
2716/**
2717 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2718 *
2719 * @o:
2720 * @last: index to start looking from (including)
2721 *
2722 * returns the next found (set) bin or a negative value if none is found.
2723 */
2724static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2725{
2726 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2727
2728 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2729 if (o->registry.aprox_match.vec[i])
2730 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2731 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2732 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2733 vec, cur_bit)) {
2734 return cur_bit;
2735 }
2736 }
2737 inner_start = 0;
2738 }
2739
2740 /* None found */
2741 return -1;
2742}
2743
2744/**
2745 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2746 *
2747 * @o:
2748 *
2749 * returns the index of the found bin or -1 if none is found
2750 */
2751static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2752{
2753 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2754
2755 if (cur_bit >= 0)
2756 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2757
2758 return cur_bit;
2759}
2760
2761static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2762{
2763 struct bnx2x_raw_obj *raw = &o->raw;
2764 u8 rx_tx_flag = 0;
2765
2766 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2767 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2768 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2769
2770 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2771 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2772 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2773
2774 return rx_tx_flag;
2775}
2776
2777static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2778 struct bnx2x_mcast_obj *o, int idx,
2779 union bnx2x_mcast_config_data *cfg_data,
Yuval Mintz86564c32013-01-23 03:21:50 +00002780 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002781{
2782 struct bnx2x_raw_obj *r = &o->raw;
2783 struct eth_multicast_rules_ramrod_data *data =
2784 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2785 u8 func_id = r->func_id;
2786 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2787 int bin;
2788
2789 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2790 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2791
2792 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2793
2794 /* Get a bin and update a bins' vector */
2795 switch (cmd) {
2796 case BNX2X_MCAST_CMD_ADD:
2797 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2798 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002799 break;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002800
2801 case BNX2X_MCAST_CMD_DEL:
2802 /* If there were no more bins to clear
2803 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2804 * clear any (0xff) bin.
2805 * See bnx2x_mcast_validate_e2() for explanation when it may
2806 * happen.
2807 */
2808 bin = bnx2x_mcast_clear_first_bin(o);
2809 break;
2810
2811 case BNX2X_MCAST_CMD_RESTORE:
2812 bin = cfg_data->bin;
2813 break;
2814
2815 default:
2816 BNX2X_ERR("Unknown command: %d\n", cmd);
2817 return;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002818 }
2819
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002820 DP(BNX2X_MSG_SP, "%s bin %d\n",
2821 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2822 "Setting" : "Clearing"), bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002823
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002824 data->rules[idx].bin_id = (u8)bin;
2825 data->rules[idx].func_id = func_id;
2826 data->rules[idx].engine_id = o->engine_id;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002827}
2828
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002829/**
2830 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2831 *
2832 * @bp: device handle
2833 * @o:
2834 * @start_bin: index in the registry to start from (including)
2835 * @rdata_idx: index in the ramrod data to start from
2836 *
2837 * returns last handled bin index or -1 if all bins have been handled
2838 */
2839static inline int bnx2x_mcast_handle_restore_cmd_e2(
2840 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2841 int *rdata_idx)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002842{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002843 int cur_bin, cnt = *rdata_idx;
Yuval Mintz86564c32013-01-23 03:21:50 +00002844 union bnx2x_mcast_config_data cfg_data = {NULL};
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002845
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002846 /* go through the registry and configure the bins from it */
2847 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2848 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002849
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002850 cfg_data.bin = (u8)cur_bin;
2851 o->set_one_rule(bp, o, cnt, &cfg_data,
2852 BNX2X_MCAST_CMD_RESTORE);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002853
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002854 cnt++;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002855
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002856 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002857
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002858 /* Break if we reached the maximum number
2859 * of rules.
2860 */
2861 if (cnt >= o->max_cmd_len)
2862 break;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002863 }
2864
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002865 *rdata_idx = cnt;
2866
2867 return cur_bin;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002868}
2869
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002870static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2871 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2872 int *line_idx)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002873{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002874 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2875 int cnt = *line_idx;
Yuval Mintz86564c32013-01-23 03:21:50 +00002876 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002877
2878 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2879 link) {
2880
2881 cfg_data.mac = &pmac_pos->mac[0];
2882 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2883
2884 cnt++;
2885
Joe Perches0f9dad12011-08-14 12:16:19 +00002886 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00002887 pmac_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002888
2889 list_del(&pmac_pos->link);
2890
2891 /* Break if we reached the maximum number
2892 * of rules.
2893 */
2894 if (cnt >= o->max_cmd_len)
2895 break;
2896 }
2897
2898 *line_idx = cnt;
2899
2900 /* if no more MACs to configure - we are done */
2901 if (list_empty(&cmd_pos->data.macs_head))
2902 cmd_pos->done = true;
2903}
2904
2905static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2906 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2907 int *line_idx)
2908{
2909 int cnt = *line_idx;
2910
2911 while (cmd_pos->data.macs_num) {
2912 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2913
2914 cnt++;
2915
2916 cmd_pos->data.macs_num--;
2917
2918 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2919 cmd_pos->data.macs_num, cnt);
2920
2921 /* Break if we reached the maximum
2922 * number of rules.
2923 */
2924 if (cnt >= o->max_cmd_len)
2925 break;
2926 }
2927
2928 *line_idx = cnt;
2929
2930 /* If we cleared all bins - we are done */
2931 if (!cmd_pos->data.macs_num)
2932 cmd_pos->done = true;
2933}
2934
2935static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2936 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2937 int *line_idx)
2938{
2939 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2940 line_idx);
2941
2942 if (cmd_pos->data.next_bin < 0)
2943 /* If o->set_restore returned -1 we are done */
2944 cmd_pos->done = true;
2945 else
2946 /* Start from the next bin next time */
2947 cmd_pos->data.next_bin++;
2948}
2949
2950static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2951 struct bnx2x_mcast_ramrod_params *p)
2952{
2953 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2954 int cnt = 0;
2955 struct bnx2x_mcast_obj *o = p->mcast_obj;
2956
2957 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2958 link) {
2959 switch (cmd_pos->type) {
2960 case BNX2X_MCAST_CMD_ADD:
2961 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2962 break;
2963
2964 case BNX2X_MCAST_CMD_DEL:
2965 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2966 break;
2967
2968 case BNX2X_MCAST_CMD_RESTORE:
2969 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2970 &cnt);
2971 break;
2972
2973 default:
2974 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2975 return -EINVAL;
2976 }
2977
2978 /* If the command has been completed - remove it from the list
2979 * and free the memory
2980 */
2981 if (cmd_pos->done) {
2982 list_del(&cmd_pos->link);
2983 kfree(cmd_pos);
2984 }
2985
2986 /* Break if we reached the maximum number of rules */
2987 if (cnt >= o->max_cmd_len)
2988 break;
2989 }
2990
2991 return cnt;
2992}
2993
2994static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2995 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2996 int *line_idx)
2997{
2998 struct bnx2x_mcast_list_elem *mlist_pos;
Yuval Mintz86564c32013-01-23 03:21:50 +00002999 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003000 int cnt = *line_idx;
3001
3002 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3003 cfg_data.mac = mlist_pos->mac;
3004 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
3005
3006 cnt++;
3007
Joe Perches0f9dad12011-08-14 12:16:19 +00003008 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003009 mlist_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003010 }
3011
3012 *line_idx = cnt;
3013}
3014
3015static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
3016 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3017 int *line_idx)
3018{
3019 int cnt = *line_idx, i;
3020
3021 for (i = 0; i < p->mcast_list_len; i++) {
3022 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
3023
3024 cnt++;
3025
3026 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
3027 p->mcast_list_len - i - 1);
3028 }
3029
3030 *line_idx = cnt;
3031}
3032
3033/**
3034 * bnx2x_mcast_handle_current_cmd -
3035 *
3036 * @bp: device handle
3037 * @p:
3038 * @cmd:
3039 * @start_cnt: first line in the ramrod data that may be used
3040 *
3041 * This function is called iff there is enough place for the current command in
3042 * the ramrod data.
3043 * Returns number of lines filled in the ramrod data in total.
3044 */
3045static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
Yuval Mintz86564c32013-01-23 03:21:50 +00003046 struct bnx2x_mcast_ramrod_params *p,
3047 enum bnx2x_mcast_cmd cmd,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003048 int start_cnt)
3049{
3050 struct bnx2x_mcast_obj *o = p->mcast_obj;
3051 int cnt = start_cnt;
3052
3053 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3054
3055 switch (cmd) {
3056 case BNX2X_MCAST_CMD_ADD:
3057 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
3058 break;
3059
3060 case BNX2X_MCAST_CMD_DEL:
3061 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
3062 break;
3063
3064 case BNX2X_MCAST_CMD_RESTORE:
3065 o->hdl_restore(bp, o, 0, &cnt);
3066 break;
3067
3068 default:
3069 BNX2X_ERR("Unknown command: %d\n", cmd);
3070 return -EINVAL;
3071 }
3072
3073 /* The current command has been handled */
3074 p->mcast_list_len = 0;
3075
3076 return cnt;
3077}
3078
3079static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
3080 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003081 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003082{
3083 struct bnx2x_mcast_obj *o = p->mcast_obj;
3084 int reg_sz = o->get_registry_size(o);
3085
3086 switch (cmd) {
3087 /* DEL command deletes all currently configured MACs */
3088 case BNX2X_MCAST_CMD_DEL:
3089 o->set_registry_size(o, 0);
3090 /* Don't break */
3091
3092 /* RESTORE command will restore the entire multicast configuration */
3093 case BNX2X_MCAST_CMD_RESTORE:
3094 /* Here we set the approximate amount of work to do, which in
3095 * fact may be only less as some MACs in postponed ADD
3096 * command(s) scheduled before this command may fall into
3097 * the same bin and the actual number of bins set in the
3098 * registry would be less than we estimated here. See
3099 * bnx2x_mcast_set_one_rule_e2() for further details.
3100 */
3101 p->mcast_list_len = reg_sz;
3102 break;
3103
3104 case BNX2X_MCAST_CMD_ADD:
3105 case BNX2X_MCAST_CMD_CONT:
3106 /* Here we assume that all new MACs will fall into new bins.
3107 * However we will correct the real registry size after we
3108 * handle all pending commands.
3109 */
3110 o->set_registry_size(o, reg_sz + p->mcast_list_len);
3111 break;
3112
3113 default:
3114 BNX2X_ERR("Unknown command: %d\n", cmd);
3115 return -EINVAL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003116 }
3117
3118 /* Increase the total number of MACs pending to be configured */
3119 o->total_pending_num += p->mcast_list_len;
3120
3121 return 0;
3122}
3123
3124static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
3125 struct bnx2x_mcast_ramrod_params *p,
3126 int old_num_bins)
3127{
3128 struct bnx2x_mcast_obj *o = p->mcast_obj;
3129
3130 o->set_registry_size(o, old_num_bins);
3131 o->total_pending_num -= p->mcast_list_len;
3132}
3133
3134/**
3135 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
3136 *
3137 * @bp: device handle
3138 * @p:
3139 * @len: number of rules to handle
3140 */
3141static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
3142 struct bnx2x_mcast_ramrod_params *p,
3143 u8 len)
3144{
3145 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3146 struct eth_multicast_rules_ramrod_data *data =
3147 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
3148
Yuval Mintz86564c32013-01-23 03:21:50 +00003149 data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3150 (BNX2X_FILTER_MCAST_PENDING <<
3151 BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003152 data->header.rule_cnt = len;
3153}
3154
3155/**
3156 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
3157 *
3158 * @bp: device handle
3159 * @o:
3160 *
3161 * Recalculate the actual number of set bins in the registry using Brian
3162 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
3163 *
3164 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
3165 */
3166static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
3167 struct bnx2x_mcast_obj *o)
3168{
3169 int i, cnt = 0;
3170 u64 elem;
3171
3172 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
3173 elem = o->registry.aprox_match.vec[i];
3174 for (; elem; cnt++)
3175 elem &= elem - 1;
3176 }
3177
3178 o->set_registry_size(o, cnt);
3179
3180 return 0;
3181}
3182
3183static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
3184 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003185 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003186{
3187 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
3188 struct bnx2x_mcast_obj *o = p->mcast_obj;
3189 struct eth_multicast_rules_ramrod_data *data =
3190 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
3191 int cnt = 0, rc;
3192
3193 /* Reset the ramrod data buffer */
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00003194 memset(data, 0, sizeof(*data));
3195
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003196 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
3197
3198 /* If there are no more pending commands - clear SCHEDULED state */
3199 if (list_empty(&o->pending_cmds_head))
3200 o->clear_sched(o);
3201
3202 /* The below may be true iff there was enough room in ramrod
3203 * data for all pending commands and for the current
3204 * command. Otherwise the current command would have been added
3205 * to the pending commands and p->mcast_list_len would have been
3206 * zeroed.
3207 */
3208 if (p->mcast_list_len > 0)
3209 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
3210
3211 /* We've pulled out some MACs - update the total number of
3212 * outstanding.
3213 */
3214 o->total_pending_num -= cnt;
3215
3216 /* send a ramrod */
3217 WARN_ON(o->total_pending_num < 0);
3218 WARN_ON(cnt > o->max_cmd_len);
3219
3220 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3221
3222 /* Update a registry size if there are no more pending operations.
3223 *
3224 * We don't want to change the value of the registry size if there are
3225 * pending operations because we want it to always be equal to the
3226 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3227 * set bins after the last requested operation in order to properly
3228 * evaluate the size of the next DEL/RESTORE operation.
3229 *
3230 * Note that we update the registry itself during command(s) handling
3231 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3232 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3233 * with a limited amount of update commands (per MAC/bin) and we don't
3234 * know in this scope what the actual state of bins configuration is
3235 * going to be after this ramrod.
3236 */
3237 if (!o->total_pending_num)
3238 bnx2x_mcast_refresh_registry_e2(bp, o);
3239
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003240 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003241 * RAMROD_PENDING status immediately.
3242 */
3243 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3244 raw->clear_pending(raw);
3245 return 0;
3246 } else {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003247 /* No need for an explicit memory barrier here as long we would
3248 * need to ensure the ordering of writing to the SPQ element
3249 * and updating of the SPQ producer which involves a memory
3250 * read and we will have to put a full memory barrier there
3251 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003252 */
3253
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003254 /* Send a ramrod */
3255 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3256 raw->cid, U64_HI(raw->rdata_mapping),
3257 U64_LO(raw->rdata_mapping),
3258 ETH_CONNECTION_TYPE);
3259 if (rc)
3260 return rc;
3261
3262 /* Ramrod completion is pending */
3263 return 1;
3264 }
3265}
3266
3267static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3268 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003269 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003270{
3271 /* Mark, that there is a work to do */
3272 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3273 p->mcast_list_len = 1;
3274
3275 return 0;
3276}
3277
3278static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3279 struct bnx2x_mcast_ramrod_params *p,
3280 int old_num_bins)
3281{
3282 /* Do nothing */
3283}
3284
3285#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3286do { \
3287 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3288} while (0)
3289
3290static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3291 struct bnx2x_mcast_obj *o,
3292 struct bnx2x_mcast_ramrod_params *p,
3293 u32 *mc_filter)
3294{
3295 struct bnx2x_mcast_list_elem *mlist_pos;
3296 int bit;
3297
3298 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3299 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3300 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3301
Joe Perches0f9dad12011-08-14 12:16:19 +00003302 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003303 mlist_pos->mac, bit);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003304
3305 /* bookkeeping... */
3306 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3307 bit);
3308 }
3309}
3310
3311static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3312 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3313 u32 *mc_filter)
3314{
3315 int bit;
3316
3317 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3318 bit >= 0;
3319 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3320 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3321 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3322 }
3323}
3324
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003325/* On 57711 we write the multicast MACs' approximate match
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003326 * table by directly into the TSTORM's internal RAM. So we don't
3327 * really need to handle any tricks to make it work.
3328 */
3329static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3330 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003331 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003332{
3333 int i;
3334 struct bnx2x_mcast_obj *o = p->mcast_obj;
3335 struct bnx2x_raw_obj *r = &o->raw;
3336
3337 /* If CLEAR_ONLY has been requested - clear the registry
3338 * and clear a pending bit.
3339 */
3340 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3341 u32 mc_filter[MC_HASH_SIZE] = {0};
3342
3343 /* Set the multicast filter bits before writing it into
3344 * the internal memory.
3345 */
3346 switch (cmd) {
3347 case BNX2X_MCAST_CMD_ADD:
3348 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3349 break;
3350
3351 case BNX2X_MCAST_CMD_DEL:
Joe Perches94f05b02011-08-14 12:16:20 +00003352 DP(BNX2X_MSG_SP,
3353 "Invalidating multicast MACs configuration\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003354
3355 /* clear the registry */
3356 memset(o->registry.aprox_match.vec, 0,
3357 sizeof(o->registry.aprox_match.vec));
3358 break;
3359
3360 case BNX2X_MCAST_CMD_RESTORE:
3361 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3362 break;
3363
3364 default:
3365 BNX2X_ERR("Unknown command: %d\n", cmd);
3366 return -EINVAL;
3367 }
3368
3369 /* Set the mcast filter in the internal memory */
3370 for (i = 0; i < MC_HASH_SIZE; i++)
3371 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3372 } else
3373 /* clear the registry */
3374 memset(o->registry.aprox_match.vec, 0,
3375 sizeof(o->registry.aprox_match.vec));
3376
3377 /* We are done */
3378 r->clear_pending(r);
3379
3380 return 0;
3381}
3382
3383static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3384 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003385 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003386{
3387 struct bnx2x_mcast_obj *o = p->mcast_obj;
3388 int reg_sz = o->get_registry_size(o);
3389
3390 switch (cmd) {
3391 /* DEL command deletes all currently configured MACs */
3392 case BNX2X_MCAST_CMD_DEL:
3393 o->set_registry_size(o, 0);
3394 /* Don't break */
3395
3396 /* RESTORE command will restore the entire multicast configuration */
3397 case BNX2X_MCAST_CMD_RESTORE:
3398 p->mcast_list_len = reg_sz;
3399 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3400 cmd, p->mcast_list_len);
3401 break;
3402
3403 case BNX2X_MCAST_CMD_ADD:
3404 case BNX2X_MCAST_CMD_CONT:
3405 /* Multicast MACs on 57710 are configured as unicast MACs and
3406 * there is only a limited number of CAM entries for that
3407 * matter.
3408 */
3409 if (p->mcast_list_len > o->max_cmd_len) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003410 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3411 o->max_cmd_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003412 return -EINVAL;
3413 }
3414 /* Every configured MAC should be cleared if DEL command is
3415 * called. Only the last ADD command is relevant as long as
3416 * every ADD commands overrides the previous configuration.
3417 */
3418 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3419 if (p->mcast_list_len > 0)
3420 o->set_registry_size(o, p->mcast_list_len);
3421
3422 break;
3423
3424 default:
3425 BNX2X_ERR("Unknown command: %d\n", cmd);
3426 return -EINVAL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003427 }
3428
3429 /* We want to ensure that commands are executed one by one for 57710.
3430 * Therefore each none-empty command will consume o->max_cmd_len.
3431 */
3432 if (p->mcast_list_len)
3433 o->total_pending_num += o->max_cmd_len;
3434
3435 return 0;
3436}
3437
3438static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3439 struct bnx2x_mcast_ramrod_params *p,
3440 int old_num_macs)
3441{
3442 struct bnx2x_mcast_obj *o = p->mcast_obj;
3443
3444 o->set_registry_size(o, old_num_macs);
3445
3446 /* If current command hasn't been handled yet and we are
3447 * here means that it's meant to be dropped and we have to
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003448 * update the number of outstanding MACs accordingly.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003449 */
3450 if (p->mcast_list_len)
3451 o->total_pending_num -= o->max_cmd_len;
3452}
3453
3454static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3455 struct bnx2x_mcast_obj *o, int idx,
3456 union bnx2x_mcast_config_data *cfg_data,
Yuval Mintz86564c32013-01-23 03:21:50 +00003457 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003458{
3459 struct bnx2x_raw_obj *r = &o->raw;
3460 struct mac_configuration_cmd *data =
3461 (struct mac_configuration_cmd *)(r->rdata);
3462
3463 /* copy mac */
3464 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3465 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3466 &data->config_table[idx].middle_mac_addr,
3467 &data->config_table[idx].lsb_mac_addr,
3468 cfg_data->mac);
3469
3470 data->config_table[idx].vlan_id = 0;
3471 data->config_table[idx].pf_id = r->func_id;
3472 data->config_table[idx].clients_bit_vector =
3473 cpu_to_le32(1 << r->cl_id);
3474
3475 SET_FLAG(data->config_table[idx].flags,
3476 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3477 T_ETH_MAC_COMMAND_SET);
3478 }
3479}
3480
3481/**
3482 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3483 *
3484 * @bp: device handle
3485 * @p:
3486 * @len: number of rules to handle
3487 */
3488static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3489 struct bnx2x_mcast_ramrod_params *p,
3490 u8 len)
3491{
3492 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3493 struct mac_configuration_cmd *data =
3494 (struct mac_configuration_cmd *)(r->rdata);
3495
3496 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3497 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3498 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3499
3500 data->hdr.offset = offset;
Yuval Mintz86564c32013-01-23 03:21:50 +00003501 data->hdr.client_id = cpu_to_le16(0xff);
3502 data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3503 (BNX2X_FILTER_MCAST_PENDING <<
3504 BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003505 data->hdr.length = len;
3506}
3507
3508/**
3509 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3510 *
3511 * @bp: device handle
3512 * @o:
3513 * @start_idx: index in the registry to start from
3514 * @rdata_idx: index in the ramrod data to start from
3515 *
3516 * restore command for 57710 is like all other commands - always a stand alone
3517 * command - start_idx and rdata_idx will always be 0. This function will always
3518 * succeed.
3519 * returns -1 to comply with 57712 variant.
3520 */
3521static inline int bnx2x_mcast_handle_restore_cmd_e1(
3522 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3523 int *rdata_idx)
3524{
3525 struct bnx2x_mcast_mac_elem *elem;
3526 int i = 0;
Yuval Mintz86564c32013-01-23 03:21:50 +00003527 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003528
3529 /* go through the registry and configure the MACs from it. */
3530 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3531 cfg_data.mac = &elem->mac[0];
3532 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3533
3534 i++;
3535
Joe Perches0f9dad12011-08-14 12:16:19 +00003536 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003537 cfg_data.mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003538 }
3539
3540 *rdata_idx = i;
3541
3542 return -1;
3543}
3544
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003545static inline int bnx2x_mcast_handle_pending_cmds_e1(
3546 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3547{
3548 struct bnx2x_pending_mcast_cmd *cmd_pos;
3549 struct bnx2x_mcast_mac_elem *pmac_pos;
3550 struct bnx2x_mcast_obj *o = p->mcast_obj;
Yuval Mintz86564c32013-01-23 03:21:50 +00003551 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003552 int cnt = 0;
3553
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003554 /* If nothing to be done - return */
3555 if (list_empty(&o->pending_cmds_head))
3556 return 0;
3557
3558 /* Handle the first command */
3559 cmd_pos = list_first_entry(&o->pending_cmds_head,
3560 struct bnx2x_pending_mcast_cmd, link);
3561
3562 switch (cmd_pos->type) {
3563 case BNX2X_MCAST_CMD_ADD:
3564 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3565 cfg_data.mac = &pmac_pos->mac[0];
3566 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3567
3568 cnt++;
3569
Joe Perches0f9dad12011-08-14 12:16:19 +00003570 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003571 pmac_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003572 }
3573 break;
3574
3575 case BNX2X_MCAST_CMD_DEL:
3576 cnt = cmd_pos->data.macs_num;
3577 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3578 break;
3579
3580 case BNX2X_MCAST_CMD_RESTORE:
3581 o->hdl_restore(bp, o, 0, &cnt);
3582 break;
3583
3584 default:
3585 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3586 return -EINVAL;
3587 }
3588
3589 list_del(&cmd_pos->link);
3590 kfree(cmd_pos);
3591
3592 return cnt;
3593}
3594
3595/**
3596 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3597 *
3598 * @fw_hi:
3599 * @fw_mid:
3600 * @fw_lo:
3601 * @mac:
3602 */
3603static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3604 __le16 *fw_lo, u8 *mac)
3605{
3606 mac[1] = ((u8 *)fw_hi)[0];
3607 mac[0] = ((u8 *)fw_hi)[1];
3608 mac[3] = ((u8 *)fw_mid)[0];
3609 mac[2] = ((u8 *)fw_mid)[1];
3610 mac[5] = ((u8 *)fw_lo)[0];
3611 mac[4] = ((u8 *)fw_lo)[1];
3612}
3613
3614/**
3615 * bnx2x_mcast_refresh_registry_e1 -
3616 *
3617 * @bp: device handle
3618 * @cnt:
3619 *
3620 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3621 * and update the registry correspondingly: if ADD - allocate a memory and add
3622 * the entries to the registry (list), if DELETE - clear the registry and free
3623 * the memory.
3624 */
3625static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3626 struct bnx2x_mcast_obj *o)
3627{
3628 struct bnx2x_raw_obj *raw = &o->raw;
3629 struct bnx2x_mcast_mac_elem *elem;
3630 struct mac_configuration_cmd *data =
3631 (struct mac_configuration_cmd *)(raw->rdata);
3632
3633 /* If first entry contains a SET bit - the command was ADD,
3634 * otherwise - DEL_ALL
3635 */
3636 if (GET_FLAG(data->config_table[0].flags,
3637 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3638 int i, len = data->hdr.length;
3639
3640 /* Break if it was a RESTORE command */
3641 if (!list_empty(&o->registry.exact_match.macs))
3642 return 0;
3643
Thomas Meyer01e23742011-11-29 11:08:00 +00003644 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003645 if (!elem) {
3646 BNX2X_ERR("Failed to allocate registry memory\n");
3647 return -ENOMEM;
3648 }
3649
3650 for (i = 0; i < len; i++, elem++) {
3651 bnx2x_get_fw_mac_addr(
3652 &data->config_table[i].msb_mac_addr,
3653 &data->config_table[i].middle_mac_addr,
3654 &data->config_table[i].lsb_mac_addr,
3655 elem->mac);
Joe Perches0f9dad12011-08-14 12:16:19 +00003656 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00003657 elem->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003658 list_add_tail(&elem->link,
3659 &o->registry.exact_match.macs);
3660 }
3661 } else {
3662 elem = list_first_entry(&o->registry.exact_match.macs,
3663 struct bnx2x_mcast_mac_elem, link);
3664 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3665 kfree(elem);
3666 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3667 }
3668
3669 return 0;
3670}
3671
3672static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3673 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003674 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003675{
3676 struct bnx2x_mcast_obj *o = p->mcast_obj;
3677 struct bnx2x_raw_obj *raw = &o->raw;
3678 struct mac_configuration_cmd *data =
3679 (struct mac_configuration_cmd *)(raw->rdata);
3680 int cnt = 0, i, rc;
3681
3682 /* Reset the ramrod data buffer */
3683 memset(data, 0, sizeof(*data));
3684
3685 /* First set all entries as invalid */
3686 for (i = 0; i < o->max_cmd_len ; i++)
3687 SET_FLAG(data->config_table[i].flags,
3688 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3689 T_ETH_MAC_COMMAND_INVALIDATE);
3690
3691 /* Handle pending commands first */
3692 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3693
3694 /* If there are no more pending commands - clear SCHEDULED state */
3695 if (list_empty(&o->pending_cmds_head))
3696 o->clear_sched(o);
3697
3698 /* The below may be true iff there were no pending commands */
3699 if (!cnt)
3700 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3701
3702 /* For 57710 every command has o->max_cmd_len length to ensure that
3703 * commands are done one at a time.
3704 */
3705 o->total_pending_num -= o->max_cmd_len;
3706
3707 /* send a ramrod */
3708
3709 WARN_ON(cnt > o->max_cmd_len);
3710
3711 /* Set ramrod header (in particular, a number of entries to update) */
3712 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3713
3714 /* update a registry: we need the registry contents to be always up
3715 * to date in order to be able to execute a RESTORE opcode. Here
3716 * we use the fact that for 57710 we sent one command at a time
3717 * hence we may take the registry update out of the command handling
3718 * and do it in a simpler way here.
3719 */
3720 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3721 if (rc)
3722 return rc;
3723
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003724 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003725 * RAMROD_PENDING status immediately.
3726 */
3727 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3728 raw->clear_pending(raw);
3729 return 0;
3730 } else {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003731 /* No need for an explicit memory barrier here as long we would
3732 * need to ensure the ordering of writing to the SPQ element
3733 * and updating of the SPQ producer which involves a memory
3734 * read and we will have to put a full memory barrier there
3735 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003736 */
3737
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003738 /* Send a ramrod */
3739 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3740 U64_HI(raw->rdata_mapping),
3741 U64_LO(raw->rdata_mapping),
3742 ETH_CONNECTION_TYPE);
3743 if (rc)
3744 return rc;
3745
3746 /* Ramrod completion is pending */
3747 return 1;
3748 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003749}
3750
3751static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3752{
3753 return o->registry.exact_match.num_macs_set;
3754}
3755
3756static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3757{
3758 return o->registry.aprox_match.num_bins_set;
3759}
3760
3761static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3762 int n)
3763{
3764 o->registry.exact_match.num_macs_set = n;
3765}
3766
3767static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3768 int n)
3769{
3770 o->registry.aprox_match.num_bins_set = n;
3771}
3772
3773int bnx2x_config_mcast(struct bnx2x *bp,
3774 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003775 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003776{
3777 struct bnx2x_mcast_obj *o = p->mcast_obj;
3778 struct bnx2x_raw_obj *r = &o->raw;
3779 int rc = 0, old_reg_size;
3780
3781 /* This is needed to recover number of currently configured mcast macs
3782 * in case of failure.
3783 */
3784 old_reg_size = o->get_registry_size(o);
3785
3786 /* Do some calculations and checks */
3787 rc = o->validate(bp, p, cmd);
3788 if (rc)
3789 return rc;
3790
3791 /* Return if there is no work to do */
3792 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3793 return 0;
3794
Merav Sicron51c1a582012-03-18 10:33:38 +00003795 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3796 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003797
3798 /* Enqueue the current command to the pending list if we can't complete
3799 * it in the current iteration
3800 */
3801 if (r->check_pending(r) ||
3802 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3803 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3804 if (rc < 0)
3805 goto error_exit1;
3806
3807 /* As long as the current command is in a command list we
3808 * don't need to handle it separately.
3809 */
3810 p->mcast_list_len = 0;
3811 }
3812
3813 if (!r->check_pending(r)) {
3814
3815 /* Set 'pending' state */
3816 r->set_pending(r);
3817
3818 /* Configure the new classification in the chip */
3819 rc = o->config_mcast(bp, p, cmd);
3820 if (rc < 0)
3821 goto error_exit2;
3822
3823 /* Wait for a ramrod completion if was requested */
3824 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3825 rc = o->wait_comp(bp, o);
3826 }
3827
3828 return rc;
3829
3830error_exit2:
3831 r->clear_pending(r);
3832
3833error_exit1:
3834 o->revert(bp, p, old_reg_size);
3835
3836 return rc;
3837}
3838
3839static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3840{
3841 smp_mb__before_clear_bit();
3842 clear_bit(o->sched_state, o->raw.pstate);
3843 smp_mb__after_clear_bit();
3844}
3845
3846static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3847{
3848 smp_mb__before_clear_bit();
3849 set_bit(o->sched_state, o->raw.pstate);
3850 smp_mb__after_clear_bit();
3851}
3852
3853static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3854{
3855 return !!test_bit(o->sched_state, o->raw.pstate);
3856}
3857
3858static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3859{
3860 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3861}
3862
3863void bnx2x_init_mcast_obj(struct bnx2x *bp,
3864 struct bnx2x_mcast_obj *mcast_obj,
3865 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3866 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3867 int state, unsigned long *pstate, bnx2x_obj_type type)
3868{
3869 memset(mcast_obj, 0, sizeof(*mcast_obj));
3870
3871 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3872 rdata, rdata_mapping, state, pstate, type);
3873
3874 mcast_obj->engine_id = engine_id;
3875
3876 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3877
3878 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3879 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3880 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3881 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3882
3883 if (CHIP_IS_E1(bp)) {
3884 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3885 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3886 mcast_obj->hdl_restore =
3887 bnx2x_mcast_handle_restore_cmd_e1;
3888 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3889
3890 if (CHIP_REV_IS_SLOW(bp))
3891 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3892 else
3893 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3894
3895 mcast_obj->wait_comp = bnx2x_mcast_wait;
3896 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3897 mcast_obj->validate = bnx2x_mcast_validate_e1;
3898 mcast_obj->revert = bnx2x_mcast_revert_e1;
3899 mcast_obj->get_registry_size =
3900 bnx2x_mcast_get_registry_size_exact;
3901 mcast_obj->set_registry_size =
3902 bnx2x_mcast_set_registry_size_exact;
3903
3904 /* 57710 is the only chip that uses the exact match for mcast
3905 * at the moment.
3906 */
3907 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3908
3909 } else if (CHIP_IS_E1H(bp)) {
3910 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3911 mcast_obj->enqueue_cmd = NULL;
3912 mcast_obj->hdl_restore = NULL;
3913 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3914
3915 /* 57711 doesn't send a ramrod, so it has unlimited credit
3916 * for one command.
3917 */
3918 mcast_obj->max_cmd_len = -1;
3919 mcast_obj->wait_comp = bnx2x_mcast_wait;
3920 mcast_obj->set_one_rule = NULL;
3921 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3922 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3923 mcast_obj->get_registry_size =
3924 bnx2x_mcast_get_registry_size_aprox;
3925 mcast_obj->set_registry_size =
3926 bnx2x_mcast_set_registry_size_aprox;
3927 } else {
3928 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3929 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3930 mcast_obj->hdl_restore =
3931 bnx2x_mcast_handle_restore_cmd_e2;
3932 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3933 /* TODO: There should be a proper HSI define for this number!!!
3934 */
3935 mcast_obj->max_cmd_len = 16;
3936 mcast_obj->wait_comp = bnx2x_mcast_wait;
3937 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3938 mcast_obj->validate = bnx2x_mcast_validate_e2;
3939 mcast_obj->revert = bnx2x_mcast_revert_e2;
3940 mcast_obj->get_registry_size =
3941 bnx2x_mcast_get_registry_size_aprox;
3942 mcast_obj->set_registry_size =
3943 bnx2x_mcast_set_registry_size_aprox;
3944 }
3945}
3946
3947/*************************** Credit handling **********************************/
3948
3949/**
3950 * atomic_add_ifless - add if the result is less than a given value.
3951 *
3952 * @v: pointer of type atomic_t
3953 * @a: the amount to add to v...
3954 * @u: ...if (v + a) is less than u.
3955 *
3956 * returns true if (v + a) was less than u, and false otherwise.
3957 *
3958 */
3959static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3960{
3961 int c, old;
3962
3963 c = atomic_read(v);
3964 for (;;) {
3965 if (unlikely(c + a >= u))
3966 return false;
3967
3968 old = atomic_cmpxchg((v), c, c + a);
3969 if (likely(old == c))
3970 break;
3971 c = old;
3972 }
3973
3974 return true;
3975}
3976
3977/**
3978 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3979 *
3980 * @v: pointer of type atomic_t
3981 * @a: the amount to dec from v...
3982 * @u: ...if (v - a) is more or equal than u.
3983 *
3984 * returns true if (v - a) was more or equal than u, and false
3985 * otherwise.
3986 */
3987static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3988{
3989 int c, old;
3990
3991 c = atomic_read(v);
3992 for (;;) {
3993 if (unlikely(c - a < u))
3994 return false;
3995
3996 old = atomic_cmpxchg((v), c, c - a);
3997 if (likely(old == c))
3998 break;
3999 c = old;
4000 }
4001
4002 return true;
4003}
4004
4005static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
4006{
4007 bool rc;
4008
4009 smp_mb();
4010 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
4011 smp_mb();
4012
4013 return rc;
4014}
4015
4016static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
4017{
4018 bool rc;
4019
4020 smp_mb();
4021
4022 /* Don't let to refill if credit + cnt > pool_sz */
4023 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
4024
4025 smp_mb();
4026
4027 return rc;
4028}
4029
4030static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
4031{
4032 int cur_credit;
4033
4034 smp_mb();
4035 cur_credit = atomic_read(&o->credit);
4036
4037 return cur_credit;
4038}
4039
4040static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
4041 int cnt)
4042{
4043 return true;
4044}
4045
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004046static bool bnx2x_credit_pool_get_entry(
4047 struct bnx2x_credit_pool_obj *o,
4048 int *offset)
4049{
4050 int idx, vec, i;
4051
4052 *offset = -1;
4053
4054 /* Find "internal cam-offset" then add to base for this object... */
4055 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
4056
4057 /* Skip the current vector if there are no free entries in it */
4058 if (!o->pool_mirror[vec])
4059 continue;
4060
4061 /* If we've got here we are going to find a free entry */
Dmitry Kravkovc54e9bd2012-03-26 21:08:55 +00004062 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004063 i < BIT_VEC64_ELEM_SZ; idx++, i++)
4064
4065 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
4066 /* Got one!! */
4067 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
4068 *offset = o->base_pool_offset + idx;
4069 return true;
4070 }
4071 }
4072
4073 return false;
4074}
4075
4076static bool bnx2x_credit_pool_put_entry(
4077 struct bnx2x_credit_pool_obj *o,
4078 int offset)
4079{
4080 if (offset < o->base_pool_offset)
4081 return false;
4082
4083 offset -= o->base_pool_offset;
4084
4085 if (offset >= o->pool_sz)
4086 return false;
4087
4088 /* Return the entry to the pool */
4089 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
4090
4091 return true;
4092}
4093
4094static bool bnx2x_credit_pool_put_entry_always_true(
4095 struct bnx2x_credit_pool_obj *o,
4096 int offset)
4097{
4098 return true;
4099}
4100
4101static bool bnx2x_credit_pool_get_entry_always_true(
4102 struct bnx2x_credit_pool_obj *o,
4103 int *offset)
4104{
4105 *offset = -1;
4106 return true;
4107}
4108/**
4109 * bnx2x_init_credit_pool - initialize credit pool internals.
4110 *
4111 * @p:
4112 * @base: Base entry in the CAM to use.
4113 * @credit: pool size.
4114 *
4115 * If base is negative no CAM entries handling will be performed.
4116 * If credit is negative pool operations will always succeed (unlimited pool).
4117 *
4118 */
4119static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
4120 int base, int credit)
4121{
4122 /* Zero the object first */
4123 memset(p, 0, sizeof(*p));
4124
4125 /* Set the table to all 1s */
4126 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
4127
4128 /* Init a pool as full */
4129 atomic_set(&p->credit, credit);
4130
4131 /* The total poll size */
4132 p->pool_sz = credit;
4133
4134 p->base_pool_offset = base;
4135
4136 /* Commit the change */
4137 smp_mb();
4138
4139 p->check = bnx2x_credit_pool_check;
4140
4141 /* if pool credit is negative - disable the checks */
4142 if (credit >= 0) {
4143 p->put = bnx2x_credit_pool_put;
4144 p->get = bnx2x_credit_pool_get;
4145 p->put_entry = bnx2x_credit_pool_put_entry;
4146 p->get_entry = bnx2x_credit_pool_get_entry;
4147 } else {
4148 p->put = bnx2x_credit_pool_always_true;
4149 p->get = bnx2x_credit_pool_always_true;
4150 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
4151 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
4152 }
4153
4154 /* If base is negative - disable entries handling */
4155 if (base < 0) {
4156 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
4157 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
4158 }
4159}
4160
4161void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
4162 struct bnx2x_credit_pool_obj *p, u8 func_id,
4163 u8 func_num)
4164{
4165/* TODO: this will be defined in consts as well... */
4166#define BNX2X_CAM_SIZE_EMUL 5
4167
4168 int cam_sz;
4169
4170 if (CHIP_IS_E1(bp)) {
4171 /* In E1, Multicast is saved in cam... */
4172 if (!CHIP_REV_IS_SLOW(bp))
4173 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
4174 else
4175 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
4176
4177 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
4178
4179 } else if (CHIP_IS_E1H(bp)) {
4180 /* CAM credit is equaly divided between all active functions
4181 * on the PORT!.
4182 */
4183 if ((func_num > 0)) {
4184 if (!CHIP_REV_IS_SLOW(bp))
4185 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
4186 else
4187 cam_sz = BNX2X_CAM_SIZE_EMUL;
4188 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
4189 } else {
4190 /* this should never happen! Block MAC operations. */
4191 bnx2x_init_credit_pool(p, 0, 0);
4192 }
4193
4194 } else {
4195
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004196 /* CAM credit is equaly divided between all active functions
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004197 * on the PATH.
4198 */
4199 if ((func_num > 0)) {
4200 if (!CHIP_REV_IS_SLOW(bp))
4201 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
4202 else
4203 cam_sz = BNX2X_CAM_SIZE_EMUL;
4204
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004205 /* No need for CAM entries handling for 57712 and
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004206 * newer.
4207 */
4208 bnx2x_init_credit_pool(p, -1, cam_sz);
4209 } else {
4210 /* this should never happen! Block MAC operations. */
4211 bnx2x_init_credit_pool(p, 0, 0);
4212 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004213 }
4214}
4215
4216void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4217 struct bnx2x_credit_pool_obj *p,
4218 u8 func_id,
4219 u8 func_num)
4220{
4221 if (CHIP_IS_E1x(bp)) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004222 /* There is no VLAN credit in HW on 57710 and 57711 only
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004223 * MAC / MAC-VLAN can be set
4224 */
4225 bnx2x_init_credit_pool(p, 0, -1);
4226 } else {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004227 /* CAM credit is equally divided between all active functions
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004228 * on the PATH.
4229 */
4230 if (func_num > 0) {
4231 int credit = MAX_VLAN_CREDIT_E2 / func_num;
4232 bnx2x_init_credit_pool(p, func_id * credit, credit);
4233 } else
4234 /* this should never happen! Block VLAN operations. */
4235 bnx2x_init_credit_pool(p, 0, 0);
4236 }
4237}
4238
4239/****************** RSS Configuration ******************/
4240/**
4241 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4242 *
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004243 * @bp: driver handle
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004244 * @p: pointer to rss configuration
4245 *
4246 * Prints it when NETIF_MSG_IFUP debug level is configured.
4247 */
4248static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4249 struct bnx2x_config_rss_params *p)
4250{
4251 int i;
4252
4253 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4254 DP(BNX2X_MSG_SP, "0x0000: ");
4255 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4256 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4257
4258 /* Print 4 bytes in a line */
4259 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4260 (((i + 1) & 0x3) == 0)) {
4261 DP_CONT(BNX2X_MSG_SP, "\n");
4262 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4263 }
4264 }
4265
4266 DP_CONT(BNX2X_MSG_SP, "\n");
4267}
4268
4269/**
4270 * bnx2x_setup_rss - configure RSS
4271 *
4272 * @bp: device handle
4273 * @p: rss configuration
4274 *
4275 * sends on UPDATE ramrod for that matter.
4276 */
4277static int bnx2x_setup_rss(struct bnx2x *bp,
4278 struct bnx2x_config_rss_params *p)
4279{
4280 struct bnx2x_rss_config_obj *o = p->rss_obj;
4281 struct bnx2x_raw_obj *r = &o->raw;
4282 struct eth_rss_update_ramrod_data *data =
4283 (struct eth_rss_update_ramrod_data *)(r->rdata);
4284 u8 rss_mode = 0;
4285 int rc;
4286
4287 memset(data, 0, sizeof(*data));
4288
4289 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4290
4291 /* Set an echo field */
Yuval Mintz86564c32013-01-23 03:21:50 +00004292 data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4293 (r->state << BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004294
4295 /* RSS mode */
4296 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4297 rss_mode = ETH_RSS_MODE_DISABLED;
4298 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4299 rss_mode = ETH_RSS_MODE_REGULAR;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004300
4301 data->rss_mode = rss_mode;
4302
4303 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4304
4305 /* RSS capabilities */
4306 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4307 data->capabilities |=
4308 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4309
4310 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4311 data->capabilities |=
4312 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4313
Merav Sicron5d317c6a2012-06-19 07:48:24 +00004314 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4315 data->capabilities |=
4316 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4317
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004318 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4319 data->capabilities |=
4320 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4321
4322 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4323 data->capabilities |=
4324 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4325
Merav Sicron5d317c6a2012-06-19 07:48:24 +00004326 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4327 data->capabilities |=
4328 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4329
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004330 /* Hashing mask */
4331 data->rss_result_mask = p->rss_result_mask;
4332
4333 /* RSS engine ID */
4334 data->rss_engine_id = o->engine_id;
4335
4336 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4337
4338 /* Indirection table */
4339 memcpy(data->indirection_table, p->ind_table,
4340 T_ETH_INDIRECTION_TABLE_SIZE);
4341
4342 /* Remember the last configuration */
4343 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4344
4345 /* Print the indirection table */
4346 if (netif_msg_ifup(bp))
4347 bnx2x_debug_print_ind_table(bp, p);
4348
4349 /* RSS keys */
4350 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4351 memcpy(&data->rss_key[0], &p->rss_key[0],
4352 sizeof(data->rss_key));
4353 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4354 }
4355
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004356 /* No need for an explicit memory barrier here as long we would
4357 * need to ensure the ordering of writing to the SPQ element
4358 * and updating of the SPQ producer which involves a memory
4359 * read and we will have to put a full memory barrier there
4360 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004361 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004362
4363 /* Send a ramrod */
4364 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4365 U64_HI(r->rdata_mapping),
4366 U64_LO(r->rdata_mapping),
4367 ETH_CONNECTION_TYPE);
4368
4369 if (rc < 0)
4370 return rc;
4371
4372 return 1;
4373}
4374
4375void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4376 u8 *ind_table)
4377{
4378 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4379}
4380
4381int bnx2x_config_rss(struct bnx2x *bp,
4382 struct bnx2x_config_rss_params *p)
4383{
4384 int rc;
4385 struct bnx2x_rss_config_obj *o = p->rss_obj;
4386 struct bnx2x_raw_obj *r = &o->raw;
4387
4388 /* Do nothing if only driver cleanup was requested */
Michal Kalderon5b622912014-01-05 18:33:52 +02004389 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4390 DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
4391 p->ramrod_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004392 return 0;
Michal Kalderon5b622912014-01-05 18:33:52 +02004393 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004394
4395 r->set_pending(r);
4396
4397 rc = o->config_rss(bp, p);
4398 if (rc < 0) {
4399 r->clear_pending(r);
4400 return rc;
4401 }
4402
4403 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4404 rc = r->wait_comp(bp, r);
4405
4406 return rc;
4407}
4408
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004409void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4410 struct bnx2x_rss_config_obj *rss_obj,
4411 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4412 void *rdata, dma_addr_t rdata_mapping,
4413 int state, unsigned long *pstate,
4414 bnx2x_obj_type type)
4415{
4416 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4417 rdata_mapping, state, pstate, type);
4418
4419 rss_obj->engine_id = engine_id;
4420 rss_obj->config_rss = bnx2x_setup_rss;
4421}
4422
Ariel Eliorb9871bc2013-09-04 14:09:21 +03004423int validate_vlan_mac(struct bnx2x *bp,
4424 struct bnx2x_vlan_mac_obj *vlan_mac)
4425{
4426 if (!vlan_mac->get_n_elements) {
4427 BNX2X_ERR("vlan mac object was not intialized\n");
4428 return -EINVAL;
4429 }
4430 return 0;
4431}
4432
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004433/********************** Queue state object ***********************************/
4434
4435/**
4436 * bnx2x_queue_state_change - perform Queue state change transition
4437 *
4438 * @bp: device handle
4439 * @params: parameters to perform the transition
4440 *
4441 * returns 0 in case of successfully completed transition, negative error
4442 * code in case of failure, positive (EBUSY) value if there is a completion
4443 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4444 * not set in params->ramrod_flags for asynchronous commands).
4445 *
4446 */
4447int bnx2x_queue_state_change(struct bnx2x *bp,
4448 struct bnx2x_queue_state_params *params)
4449{
4450 struct bnx2x_queue_sp_obj *o = params->q_obj;
4451 int rc, pending_bit;
4452 unsigned long *pending = &o->pending;
4453
4454 /* Check that the requested transition is legal */
Yuval Mintz04c46732013-01-23 03:21:46 +00004455 rc = o->check_transition(bp, o, params);
4456 if (rc) {
4457 BNX2X_ERR("check transition returned an error. rc %d\n", rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004458 return -EINVAL;
Yuval Mintz04c46732013-01-23 03:21:46 +00004459 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004460
4461 /* Set "pending" bit */
Yuval Mintz04c46732013-01-23 03:21:46 +00004462 DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004463 pending_bit = o->set_pending(o, params);
Yuval Mintz04c46732013-01-23 03:21:46 +00004464 DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004465
4466 /* Don't send a command if only driver cleanup was requested */
4467 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4468 o->complete_cmd(bp, o, pending_bit);
4469 else {
4470 /* Send a ramrod */
4471 rc = o->send_cmd(bp, params);
4472 if (rc) {
4473 o->next_state = BNX2X_Q_STATE_MAX;
4474 clear_bit(pending_bit, pending);
4475 smp_mb__after_clear_bit();
4476 return rc;
4477 }
4478
4479 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4480 rc = o->wait_comp(bp, o, pending_bit);
4481 if (rc)
4482 return rc;
4483
4484 return 0;
4485 }
4486 }
4487
4488 return !!test_bit(pending_bit, pending);
4489}
4490
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004491static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4492 struct bnx2x_queue_state_params *params)
4493{
4494 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4495
4496 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4497 * UPDATE command.
4498 */
4499 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4500 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4501 bit = BNX2X_Q_CMD_UPDATE;
4502 else
4503 bit = cmd;
4504
4505 set_bit(bit, &obj->pending);
4506 return bit;
4507}
4508
4509static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4510 struct bnx2x_queue_sp_obj *o,
4511 enum bnx2x_queue_cmd cmd)
4512{
4513 return bnx2x_state_wait(bp, cmd, &o->pending);
4514}
4515
4516/**
4517 * bnx2x_queue_comp_cmd - complete the state change command.
4518 *
4519 * @bp: device handle
4520 * @o:
4521 * @cmd:
4522 *
4523 * Checks that the arrived completion is expected.
4524 */
4525static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4526 struct bnx2x_queue_sp_obj *o,
4527 enum bnx2x_queue_cmd cmd)
4528{
4529 unsigned long cur_pending = o->pending;
4530
4531 if (!test_and_clear_bit(cmd, &cur_pending)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004532 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4533 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004534 o->state, cur_pending, o->next_state);
4535 return -EINVAL;
4536 }
4537
Ariel Elior6383c0b2011-07-14 08:31:57 +00004538 if (o->next_tx_only >= o->max_cos)
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004539 /* >= because tx only must always be smaller than cos since the
Masanari Iida02582e92012-08-22 19:11:26 +09004540 * primary connection supports COS 0
Ariel Elior6383c0b2011-07-14 08:31:57 +00004541 */
4542 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4543 o->next_tx_only, o->max_cos);
4544
Merav Sicron51c1a582012-03-18 10:33:38 +00004545 DP(BNX2X_MSG_SP,
4546 "Completing command %d for queue %d, setting state to %d\n",
4547 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004548
4549 if (o->next_tx_only) /* print num tx-only if any exist */
Joe Perches94f05b02011-08-14 12:16:20 +00004550 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00004551 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004552
4553 o->state = o->next_state;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004554 o->num_tx_only = o->next_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004555 o->next_state = BNX2X_Q_STATE_MAX;
4556
4557 /* It's important that o->state and o->next_state are
4558 * updated before o->pending.
4559 */
4560 wmb();
4561
4562 clear_bit(cmd, &o->pending);
4563 smp_mb__after_clear_bit();
4564
4565 return 0;
4566}
4567
4568static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4569 struct bnx2x_queue_state_params *cmd_params,
4570 struct client_init_ramrod_data *data)
4571{
4572 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004573
4574 /* Rx data */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004575
4576 /* IPv6 TPA supported for E2 and above only */
Vladislav Zolotarovf5219d82011-07-19 01:44:11 +00004577 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004578 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4579}
4580
Ariel Elior6383c0b2011-07-14 08:31:57 +00004581static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4582 struct bnx2x_queue_sp_obj *o,
4583 struct bnx2x_general_setup_params *params,
4584 struct client_init_general_data *gen_data,
4585 unsigned long *flags)
4586{
4587 gen_data->client_id = o->cl_id;
4588
4589 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4590 gen_data->statistics_counter_id =
4591 params->stat_id;
4592 gen_data->statistics_en_flg = 1;
4593 gen_data->statistics_zero_flg =
4594 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4595 } else
4596 gen_data->statistics_counter_id =
4597 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4598
4599 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4600 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4601 gen_data->sp_client_id = params->spcl_id;
4602 gen_data->mtu = cpu_to_le16(params->mtu);
4603 gen_data->func_id = o->func_id;
4604
Ariel Elior6383c0b2011-07-14 08:31:57 +00004605 gen_data->cos = params->cos;
4606
4607 gen_data->traffic_type =
4608 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4609 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4610
Joe Perches94f05b02011-08-14 12:16:20 +00004611 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004612 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4613}
4614
4615static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4616 struct bnx2x_txq_setup_params *params,
4617 struct client_init_tx_data *tx_data,
4618 unsigned long *flags)
4619{
4620 tx_data->enforce_security_flg =
4621 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4622 tx_data->default_vlan =
4623 cpu_to_le16(params->default_vlan);
4624 tx_data->default_vlan_flg =
4625 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4626 tx_data->tx_switching_flg =
4627 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4628 tx_data->anti_spoofing_flg =
4629 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
Barak Witkowskia3348722012-04-23 03:04:46 +00004630 tx_data->force_default_pri_flg =
4631 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4632
Dmitry Kravkove287a752013-03-21 15:38:24 +00004633 tx_data->tunnel_lso_inc_ip_id =
4634 test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
Dmitry Kravkov91226792013-03-11 05:17:52 +00004635 tx_data->tunnel_non_lso_pcsum_location =
4636 test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
4637 PCSUM_ON_BD;
4638
Ariel Elior6383c0b2011-07-14 08:31:57 +00004639 tx_data->tx_status_block_id = params->fw_sb_id;
4640 tx_data->tx_sb_index_number = params->sb_cq_index;
4641 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4642
4643 tx_data->tx_bd_page_base.lo =
4644 cpu_to_le32(U64_LO(params->dscr_map));
4645 tx_data->tx_bd_page_base.hi =
4646 cpu_to_le32(U64_HI(params->dscr_map));
4647
4648 /* Don't configure any Tx switching mode during queue SETUP */
4649 tx_data->state = 0;
4650}
4651
4652static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4653 struct rxq_pause_params *params,
4654 struct client_init_rx_data *rx_data)
4655{
4656 /* flow control data */
4657 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4658 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4659 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4660 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4661 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4662 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4663 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4664}
4665
4666static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4667 struct bnx2x_rxq_setup_params *params,
4668 struct client_init_rx_data *rx_data,
4669 unsigned long *flags)
4670{
Ariel Elior6383c0b2011-07-14 08:31:57 +00004671 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4672 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004673 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4674 CLIENT_INIT_RX_DATA_TPA_MODE;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004675 rx_data->vmqueue_mode_en_flg = 0;
4676
4677 rx_data->cache_line_alignment_log_size =
4678 params->cache_line_log;
4679 rx_data->enable_dynamic_hc =
4680 test_bit(BNX2X_Q_FLG_DHC, flags);
4681 rx_data->max_sges_for_packet = params->max_sges_pkt;
4682 rx_data->client_qzone_id = params->cl_qzone_id;
4683 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4684
4685 /* Always start in DROP_ALL mode */
4686 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4687 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4688
4689 /* We don't set drop flags */
4690 rx_data->drop_ip_cs_err_flg = 0;
4691 rx_data->drop_tcp_cs_err_flg = 0;
4692 rx_data->drop_ttl0_flg = 0;
4693 rx_data->drop_udp_cs_err_flg = 0;
4694 rx_data->inner_vlan_removal_enable_flg =
4695 test_bit(BNX2X_Q_FLG_VLAN, flags);
4696 rx_data->outer_vlan_removal_enable_flg =
4697 test_bit(BNX2X_Q_FLG_OV, flags);
4698 rx_data->status_block_id = params->fw_sb_id;
4699 rx_data->rx_sb_index_number = params->sb_cq_index;
4700 rx_data->max_tpa_queues = params->max_tpa_queues;
4701 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4702 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4703 rx_data->bd_page_base.lo =
4704 cpu_to_le32(U64_LO(params->dscr_map));
4705 rx_data->bd_page_base.hi =
4706 cpu_to_le32(U64_HI(params->dscr_map));
4707 rx_data->sge_page_base.lo =
4708 cpu_to_le32(U64_LO(params->sge_map));
4709 rx_data->sge_page_base.hi =
4710 cpu_to_le32(U64_HI(params->sge_map));
4711 rx_data->cqe_page_base.lo =
4712 cpu_to_le32(U64_LO(params->rcq_map));
4713 rx_data->cqe_page_base.hi =
4714 cpu_to_le32(U64_HI(params->rcq_map));
4715 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4716
4717 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
Yuval Mintz259afa12012-03-12 08:53:10 +00004718 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004719 rx_data->is_approx_mcast = 1;
4720 }
4721
4722 rx_data->rss_engine_id = params->rss_engine_id;
4723
4724 /* silent vlan removal */
4725 rx_data->silent_vlan_removal_flg =
4726 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4727 rx_data->silent_vlan_value =
4728 cpu_to_le16(params->silent_removal_value);
4729 rx_data->silent_vlan_mask =
4730 cpu_to_le16(params->silent_removal_mask);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004731}
4732
4733/* initialize the general, tx and rx parts of a queue object */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004734static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4735 struct bnx2x_queue_state_params *cmd_params,
4736 struct client_init_ramrod_data *data)
4737{
Ariel Elior6383c0b2011-07-14 08:31:57 +00004738 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4739 &cmd_params->params.setup.gen_params,
4740 &data->general,
4741 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004742
Ariel Elior6383c0b2011-07-14 08:31:57 +00004743 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4744 &cmd_params->params.setup.txq_params,
4745 &data->tx,
4746 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004747
Ariel Elior6383c0b2011-07-14 08:31:57 +00004748 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4749 &cmd_params->params.setup.rxq_params,
4750 &data->rx,
4751 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004752
Ariel Elior6383c0b2011-07-14 08:31:57 +00004753 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4754 &cmd_params->params.setup.pause_params,
4755 &data->rx);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004756}
4757
Ariel Elior6383c0b2011-07-14 08:31:57 +00004758/* initialize the general and tx parts of a tx-only queue object */
4759static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4760 struct bnx2x_queue_state_params *cmd_params,
4761 struct tx_queue_init_ramrod_data *data)
4762{
4763 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4764 &cmd_params->params.tx_only.gen_params,
4765 &data->general,
4766 &cmd_params->params.tx_only.flags);
4767
4768 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4769 &cmd_params->params.tx_only.txq_params,
4770 &data->tx,
4771 &cmd_params->params.tx_only.flags);
4772
Merav Sicron51c1a582012-03-18 10:33:38 +00004773 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4774 cmd_params->q_obj->cids[0],
4775 data->tx.tx_bd_page_base.lo,
4776 data->tx.tx_bd_page_base.hi);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004777}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004778
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004779/**
4780 * bnx2x_q_init - init HW/FW queue
4781 *
4782 * @bp: device handle
4783 * @params:
4784 *
4785 * HW/FW initial Queue configuration:
4786 * - HC: Rx and Tx
4787 * - CDU context validation
4788 *
4789 */
4790static inline int bnx2x_q_init(struct bnx2x *bp,
4791 struct bnx2x_queue_state_params *params)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004792{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004793 struct bnx2x_queue_sp_obj *o = params->q_obj;
4794 struct bnx2x_queue_init_params *init = &params->params.init;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004795 u16 hc_usec;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004796 u8 cos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004797
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004798 /* Tx HC configuration */
4799 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4800 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4801 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4802
4803 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4804 init->tx.sb_cq_index,
4805 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004806 hc_usec);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004807 }
4808
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004809 /* Rx HC configuration */
4810 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4811 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4812 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004813
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004814 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4815 init->rx.sb_cq_index,
4816 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4817 hc_usec);
4818 }
4819
4820 /* Set CDU context validation values */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004821 for (cos = 0; cos < o->max_cos; cos++) {
Joe Perches94f05b02011-08-14 12:16:20 +00004822 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004823 o->cids[cos], cos);
Joe Perches94f05b02011-08-14 12:16:20 +00004824 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004825 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4826 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004827
4828 /* As no ramrod is sent, complete the command immediately */
4829 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4830
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004831 mmiowb();
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004832 smp_mb();
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004833
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004834 return 0;
4835}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004836
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004837static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4838 struct bnx2x_queue_state_params *params)
4839{
4840 struct bnx2x_queue_sp_obj *o = params->q_obj;
4841 struct client_init_ramrod_data *rdata =
4842 (struct client_init_ramrod_data *)o->rdata;
4843 dma_addr_t data_mapping = o->rdata_mapping;
4844 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004845
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004846 /* Clear the ramrod data */
4847 memset(rdata, 0, sizeof(*rdata));
4848
4849 /* Fill the ramrod data */
4850 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4851
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004852 /* No need for an explicit memory barrier here as long we would
4853 * need to ensure the ordering of writing to the SPQ element
4854 * and updating of the SPQ producer which involves a memory
4855 * read and we will have to put a full memory barrier there
4856 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004857 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004858
Ariel Elior6383c0b2011-07-14 08:31:57 +00004859 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4860 U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004861 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4862}
4863
4864static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4865 struct bnx2x_queue_state_params *params)
4866{
4867 struct bnx2x_queue_sp_obj *o = params->q_obj;
4868 struct client_init_ramrod_data *rdata =
4869 (struct client_init_ramrod_data *)o->rdata;
4870 dma_addr_t data_mapping = o->rdata_mapping;
4871 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4872
4873 /* Clear the ramrod data */
4874 memset(rdata, 0, sizeof(*rdata));
4875
4876 /* Fill the ramrod data */
4877 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4878 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4879
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004880 /* No need for an explicit memory barrier here as long we would
4881 * need to ensure the ordering of writing to the SPQ element
4882 * and updating of the SPQ producer which involves a memory
4883 * read and we will have to put a full memory barrier there
4884 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004885 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004886
Ariel Elior6383c0b2011-07-14 08:31:57 +00004887 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4888 U64_HI(data_mapping),
4889 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4890}
4891
4892static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4893 struct bnx2x_queue_state_params *params)
4894{
4895 struct bnx2x_queue_sp_obj *o = params->q_obj;
4896 struct tx_queue_init_ramrod_data *rdata =
4897 (struct tx_queue_init_ramrod_data *)o->rdata;
4898 dma_addr_t data_mapping = o->rdata_mapping;
4899 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4900 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4901 &params->params.tx_only;
4902 u8 cid_index = tx_only_params->cid_index;
4903
Ariel Elior6383c0b2011-07-14 08:31:57 +00004904 if (cid_index >= o->max_cos) {
4905 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4906 o->cl_id, cid_index);
4907 return -EINVAL;
4908 }
4909
Joe Perches94f05b02011-08-14 12:16:20 +00004910 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004911 tx_only_params->gen_params.cos,
4912 tx_only_params->gen_params.spcl_id);
4913
4914 /* Clear the ramrod data */
4915 memset(rdata, 0, sizeof(*rdata));
4916
4917 /* Fill the ramrod data */
4918 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4919
Merav Sicron51c1a582012-03-18 10:33:38 +00004920 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4921 o->cids[cid_index], rdata->general.client_id,
Ariel Elior6383c0b2011-07-14 08:31:57 +00004922 rdata->general.sp_client_id, rdata->general.cos);
4923
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004924 /* No need for an explicit memory barrier here as long we would
4925 * need to ensure the ordering of writing to the SPQ element
4926 * and updating of the SPQ producer which involves a memory
4927 * read and we will have to put a full memory barrier there
4928 * (inside bnx2x_sp_post()).
Ariel Elior6383c0b2011-07-14 08:31:57 +00004929 */
4930
4931 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4932 U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004933 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4934}
4935
4936static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4937 struct bnx2x_queue_sp_obj *obj,
4938 struct bnx2x_queue_update_params *params,
4939 struct client_update_ramrod_data *data)
4940{
4941 /* Client ID of the client to update */
4942 data->client_id = obj->cl_id;
4943
4944 /* Function ID of the client to update */
4945 data->func_id = obj->func_id;
4946
4947 /* Default VLAN value */
4948 data->default_vlan = cpu_to_le16(params->def_vlan);
4949
4950 /* Inner VLAN stripping */
4951 data->inner_vlan_removal_enable_flg =
4952 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4953 data->inner_vlan_removal_change_flg =
4954 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4955 &params->update_flags);
4956
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004957 /* Outer VLAN stripping */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004958 data->outer_vlan_removal_enable_flg =
4959 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4960 data->outer_vlan_removal_change_flg =
4961 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4962 &params->update_flags);
4963
4964 /* Drop packets that have source MAC that doesn't belong to this
4965 * Queue.
4966 */
4967 data->anti_spoofing_enable_flg =
4968 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4969 data->anti_spoofing_change_flg =
4970 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4971
4972 /* Activate/Deactivate */
4973 data->activate_flg =
4974 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4975 data->activate_change_flg =
4976 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4977
4978 /* Enable default VLAN */
4979 data->default_vlan_enable_flg =
4980 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4981 data->default_vlan_change_flg =
4982 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4983 &params->update_flags);
4984
4985 /* silent vlan removal */
4986 data->silent_vlan_change_flg =
4987 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4988 &params->update_flags);
4989 data->silent_vlan_removal_flg =
4990 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4991 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4992 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4993}
4994
4995static inline int bnx2x_q_send_update(struct bnx2x *bp,
4996 struct bnx2x_queue_state_params *params)
4997{
4998 struct bnx2x_queue_sp_obj *o = params->q_obj;
4999 struct client_update_ramrod_data *rdata =
5000 (struct client_update_ramrod_data *)o->rdata;
5001 dma_addr_t data_mapping = o->rdata_mapping;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005002 struct bnx2x_queue_update_params *update_params =
5003 &params->params.update;
5004 u8 cid_index = update_params->cid_index;
5005
5006 if (cid_index >= o->max_cos) {
5007 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5008 o->cl_id, cid_index);
5009 return -EINVAL;
5010 }
5011
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005012 /* Clear the ramrod data */
5013 memset(rdata, 0, sizeof(*rdata));
5014
5015 /* Fill the ramrod data */
Ariel Elior6383c0b2011-07-14 08:31:57 +00005016 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005017
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005018 /* No need for an explicit memory barrier here as long we would
5019 * need to ensure the ordering of writing to the SPQ element
5020 * and updating of the SPQ producer which involves a memory
5021 * read and we will have to put a full memory barrier there
5022 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00005023 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005024
Ariel Elior6383c0b2011-07-14 08:31:57 +00005025 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
5026 o->cids[cid_index], U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005027 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
5028}
5029
5030/**
5031 * bnx2x_q_send_deactivate - send DEACTIVATE command
5032 *
5033 * @bp: device handle
5034 * @params:
5035 *
5036 * implemented using the UPDATE command.
5037 */
5038static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
5039 struct bnx2x_queue_state_params *params)
5040{
5041 struct bnx2x_queue_update_params *update = &params->params.update;
5042
5043 memset(update, 0, sizeof(*update));
5044
5045 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5046
5047 return bnx2x_q_send_update(bp, params);
5048}
5049
5050/**
5051 * bnx2x_q_send_activate - send ACTIVATE command
5052 *
5053 * @bp: device handle
5054 * @params:
5055 *
5056 * implemented using the UPDATE command.
5057 */
5058static inline int bnx2x_q_send_activate(struct bnx2x *bp,
5059 struct bnx2x_queue_state_params *params)
5060{
5061 struct bnx2x_queue_update_params *update = &params->params.update;
5062
5063 memset(update, 0, sizeof(*update));
5064
5065 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
5066 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5067
5068 return bnx2x_q_send_update(bp, params);
5069}
5070
5071static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
5072 struct bnx2x_queue_state_params *params)
5073{
5074 /* TODO: Not implemented yet. */
5075 return -1;
5076}
5077
5078static inline int bnx2x_q_send_halt(struct bnx2x *bp,
5079 struct bnx2x_queue_state_params *params)
5080{
5081 struct bnx2x_queue_sp_obj *o = params->q_obj;
5082
Ariel Elior6383c0b2011-07-14 08:31:57 +00005083 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
5084 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005085 ETH_CONNECTION_TYPE);
5086}
5087
5088static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
5089 struct bnx2x_queue_state_params *params)
5090{
5091 struct bnx2x_queue_sp_obj *o = params->q_obj;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005092 u8 cid_idx = params->params.cfc_del.cid_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005093
Ariel Elior6383c0b2011-07-14 08:31:57 +00005094 if (cid_idx >= o->max_cos) {
5095 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5096 o->cl_id, cid_idx);
5097 return -EINVAL;
5098 }
5099
5100 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
5101 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005102}
5103
5104static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
5105 struct bnx2x_queue_state_params *params)
5106{
5107 struct bnx2x_queue_sp_obj *o = params->q_obj;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005108 u8 cid_index = params->params.terminate.cid_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005109
Ariel Elior6383c0b2011-07-14 08:31:57 +00005110 if (cid_index >= o->max_cos) {
5111 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5112 o->cl_id, cid_index);
5113 return -EINVAL;
5114 }
5115
5116 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
5117 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005118}
5119
5120static inline int bnx2x_q_send_empty(struct bnx2x *bp,
5121 struct bnx2x_queue_state_params *params)
5122{
5123 struct bnx2x_queue_sp_obj *o = params->q_obj;
5124
Ariel Elior6383c0b2011-07-14 08:31:57 +00005125 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
5126 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005127 ETH_CONNECTION_TYPE);
5128}
5129
5130static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
5131 struct bnx2x_queue_state_params *params)
5132{
5133 switch (params->cmd) {
5134 case BNX2X_Q_CMD_INIT:
5135 return bnx2x_q_init(bp, params);
Ariel Elior6383c0b2011-07-14 08:31:57 +00005136 case BNX2X_Q_CMD_SETUP_TX_ONLY:
5137 return bnx2x_q_send_setup_tx_only(bp, params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005138 case BNX2X_Q_CMD_DEACTIVATE:
5139 return bnx2x_q_send_deactivate(bp, params);
5140 case BNX2X_Q_CMD_ACTIVATE:
5141 return bnx2x_q_send_activate(bp, params);
5142 case BNX2X_Q_CMD_UPDATE:
5143 return bnx2x_q_send_update(bp, params);
5144 case BNX2X_Q_CMD_UPDATE_TPA:
5145 return bnx2x_q_send_update_tpa(bp, params);
5146 case BNX2X_Q_CMD_HALT:
5147 return bnx2x_q_send_halt(bp, params);
5148 case BNX2X_Q_CMD_CFC_DEL:
5149 return bnx2x_q_send_cfc_del(bp, params);
5150 case BNX2X_Q_CMD_TERMINATE:
5151 return bnx2x_q_send_terminate(bp, params);
5152 case BNX2X_Q_CMD_EMPTY:
5153 return bnx2x_q_send_empty(bp, params);
5154 default:
5155 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5156 return -EINVAL;
5157 }
5158}
5159
5160static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
5161 struct bnx2x_queue_state_params *params)
5162{
5163 switch (params->cmd) {
5164 case BNX2X_Q_CMD_SETUP:
5165 return bnx2x_q_send_setup_e1x(bp, params);
5166 case BNX2X_Q_CMD_INIT:
Ariel Elior6383c0b2011-07-14 08:31:57 +00005167 case BNX2X_Q_CMD_SETUP_TX_ONLY:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005168 case BNX2X_Q_CMD_DEACTIVATE:
5169 case BNX2X_Q_CMD_ACTIVATE:
5170 case BNX2X_Q_CMD_UPDATE:
5171 case BNX2X_Q_CMD_UPDATE_TPA:
5172 case BNX2X_Q_CMD_HALT:
5173 case BNX2X_Q_CMD_CFC_DEL:
5174 case BNX2X_Q_CMD_TERMINATE:
5175 case BNX2X_Q_CMD_EMPTY:
5176 return bnx2x_queue_send_cmd_cmn(bp, params);
5177 default:
5178 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5179 return -EINVAL;
5180 }
5181}
5182
5183static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
5184 struct bnx2x_queue_state_params *params)
5185{
5186 switch (params->cmd) {
5187 case BNX2X_Q_CMD_SETUP:
5188 return bnx2x_q_send_setup_e2(bp, params);
5189 case BNX2X_Q_CMD_INIT:
Ariel Elior6383c0b2011-07-14 08:31:57 +00005190 case BNX2X_Q_CMD_SETUP_TX_ONLY:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005191 case BNX2X_Q_CMD_DEACTIVATE:
5192 case BNX2X_Q_CMD_ACTIVATE:
5193 case BNX2X_Q_CMD_UPDATE:
5194 case BNX2X_Q_CMD_UPDATE_TPA:
5195 case BNX2X_Q_CMD_HALT:
5196 case BNX2X_Q_CMD_CFC_DEL:
5197 case BNX2X_Q_CMD_TERMINATE:
5198 case BNX2X_Q_CMD_EMPTY:
5199 return bnx2x_queue_send_cmd_cmn(bp, params);
5200 default:
5201 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5202 return -EINVAL;
5203 }
5204}
5205
5206/**
5207 * bnx2x_queue_chk_transition - check state machine of a regular Queue
5208 *
5209 * @bp: device handle
5210 * @o:
5211 * @params:
5212 *
5213 * (not Forwarding)
5214 * It both checks if the requested command is legal in a current
5215 * state and, if it's legal, sets a `next_state' in the object
5216 * that will be used in the completion flow to set the `state'
5217 * of the object.
5218 *
5219 * returns 0 if a requested command is a legal transition,
5220 * -EINVAL otherwise.
5221 */
5222static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5223 struct bnx2x_queue_sp_obj *o,
5224 struct bnx2x_queue_state_params *params)
5225{
5226 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5227 enum bnx2x_queue_cmd cmd = params->cmd;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005228 struct bnx2x_queue_update_params *update_params =
5229 &params->params.update;
5230 u8 next_tx_only = o->num_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005231
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005232 /* Forget all pending for completion commands if a driver only state
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005233 * transition has been requested.
5234 */
5235 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5236 o->pending = 0;
5237 o->next_state = BNX2X_Q_STATE_MAX;
5238 }
5239
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005240 /* Don't allow a next state transition if we are in the middle of
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005241 * the previous one.
5242 */
Yuval Mintz04c46732013-01-23 03:21:46 +00005243 if (o->pending) {
5244 BNX2X_ERR("Blocking transition since pending was %lx\n",
5245 o->pending);
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005246 return -EBUSY;
Yuval Mintz04c46732013-01-23 03:21:46 +00005247 }
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005248
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005249 switch (state) {
5250 case BNX2X_Q_STATE_RESET:
5251 if (cmd == BNX2X_Q_CMD_INIT)
5252 next_state = BNX2X_Q_STATE_INITIALIZED;
5253
5254 break;
5255 case BNX2X_Q_STATE_INITIALIZED:
5256 if (cmd == BNX2X_Q_CMD_SETUP) {
5257 if (test_bit(BNX2X_Q_FLG_ACTIVE,
5258 &params->params.setup.flags))
5259 next_state = BNX2X_Q_STATE_ACTIVE;
5260 else
5261 next_state = BNX2X_Q_STATE_INACTIVE;
5262 }
5263
5264 break;
5265 case BNX2X_Q_STATE_ACTIVE:
5266 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5267 next_state = BNX2X_Q_STATE_INACTIVE;
5268
5269 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5270 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5271 next_state = BNX2X_Q_STATE_ACTIVE;
5272
Ariel Elior6383c0b2011-07-14 08:31:57 +00005273 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5274 next_state = BNX2X_Q_STATE_MULTI_COS;
5275 next_tx_only = 1;
5276 }
5277
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005278 else if (cmd == BNX2X_Q_CMD_HALT)
5279 next_state = BNX2X_Q_STATE_STOPPED;
5280
5281 else if (cmd == BNX2X_Q_CMD_UPDATE) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005282 /* If "active" state change is requested, update the
5283 * state accordingly.
5284 */
5285 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5286 &update_params->update_flags) &&
5287 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5288 &update_params->update_flags))
5289 next_state = BNX2X_Q_STATE_INACTIVE;
5290 else
5291 next_state = BNX2X_Q_STATE_ACTIVE;
5292 }
5293
5294 break;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005295 case BNX2X_Q_STATE_MULTI_COS:
5296 if (cmd == BNX2X_Q_CMD_TERMINATE)
5297 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5298
5299 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5300 next_state = BNX2X_Q_STATE_MULTI_COS;
5301 next_tx_only = o->num_tx_only + 1;
5302 }
5303
5304 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5305 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5306 next_state = BNX2X_Q_STATE_MULTI_COS;
5307
5308 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5309 /* If "active" state change is requested, update the
5310 * state accordingly.
5311 */
5312 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5313 &update_params->update_flags) &&
5314 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5315 &update_params->update_flags))
5316 next_state = BNX2X_Q_STATE_INACTIVE;
5317 else
5318 next_state = BNX2X_Q_STATE_MULTI_COS;
5319 }
5320
5321 break;
5322 case BNX2X_Q_STATE_MCOS_TERMINATED:
5323 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5324 next_tx_only = o->num_tx_only - 1;
5325 if (next_tx_only == 0)
5326 next_state = BNX2X_Q_STATE_ACTIVE;
5327 else
5328 next_state = BNX2X_Q_STATE_MULTI_COS;
5329 }
5330
5331 break;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005332 case BNX2X_Q_STATE_INACTIVE:
5333 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5334 next_state = BNX2X_Q_STATE_ACTIVE;
5335
5336 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5337 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5338 next_state = BNX2X_Q_STATE_INACTIVE;
5339
5340 else if (cmd == BNX2X_Q_CMD_HALT)
5341 next_state = BNX2X_Q_STATE_STOPPED;
5342
5343 else if (cmd == BNX2X_Q_CMD_UPDATE) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005344 /* If "active" state change is requested, update the
5345 * state accordingly.
5346 */
5347 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5348 &update_params->update_flags) &&
5349 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
Ariel Elior6383c0b2011-07-14 08:31:57 +00005350 &update_params->update_flags)){
5351 if (o->num_tx_only == 0)
5352 next_state = BNX2X_Q_STATE_ACTIVE;
5353 else /* tx only queues exist for this queue */
5354 next_state = BNX2X_Q_STATE_MULTI_COS;
5355 } else
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005356 next_state = BNX2X_Q_STATE_INACTIVE;
5357 }
5358
5359 break;
5360 case BNX2X_Q_STATE_STOPPED:
5361 if (cmd == BNX2X_Q_CMD_TERMINATE)
5362 next_state = BNX2X_Q_STATE_TERMINATED;
5363
5364 break;
5365 case BNX2X_Q_STATE_TERMINATED:
5366 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5367 next_state = BNX2X_Q_STATE_RESET;
5368
5369 break;
5370 default:
5371 BNX2X_ERR("Illegal state: %d\n", state);
5372 }
5373
5374 /* Transition is assured */
5375 if (next_state != BNX2X_Q_STATE_MAX) {
5376 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5377 state, cmd, next_state);
5378 o->next_state = next_state;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005379 o->next_tx_only = next_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005380 return 0;
5381 }
5382
5383 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5384
5385 return -EINVAL;
5386}
5387
5388void bnx2x_init_queue_obj(struct bnx2x *bp,
5389 struct bnx2x_queue_sp_obj *obj,
Ariel Elior6383c0b2011-07-14 08:31:57 +00005390 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5391 void *rdata,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005392 dma_addr_t rdata_mapping, unsigned long type)
5393{
5394 memset(obj, 0, sizeof(*obj));
5395
Ariel Elior6383c0b2011-07-14 08:31:57 +00005396 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5397 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5398
5399 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5400 obj->max_cos = cid_cnt;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005401 obj->cl_id = cl_id;
5402 obj->func_id = func_id;
5403 obj->rdata = rdata;
5404 obj->rdata_mapping = rdata_mapping;
5405 obj->type = type;
5406 obj->next_state = BNX2X_Q_STATE_MAX;
5407
5408 if (CHIP_IS_E1x(bp))
5409 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5410 else
5411 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5412
5413 obj->check_transition = bnx2x_queue_chk_transition;
5414
5415 obj->complete_cmd = bnx2x_queue_comp_cmd;
5416 obj->wait_comp = bnx2x_queue_wait_comp;
5417 obj->set_pending = bnx2x_queue_set_pending;
5418}
5419
Ariel Elior67c431a2013-01-01 05:22:36 +00005420/* return a queue object's logical state*/
5421int bnx2x_get_q_logical_state(struct bnx2x *bp,
5422 struct bnx2x_queue_sp_obj *obj)
5423{
5424 switch (obj->state) {
5425 case BNX2X_Q_STATE_ACTIVE:
5426 case BNX2X_Q_STATE_MULTI_COS:
5427 return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5428 case BNX2X_Q_STATE_RESET:
5429 case BNX2X_Q_STATE_INITIALIZED:
5430 case BNX2X_Q_STATE_MCOS_TERMINATED:
5431 case BNX2X_Q_STATE_INACTIVE:
5432 case BNX2X_Q_STATE_STOPPED:
5433 case BNX2X_Q_STATE_TERMINATED:
5434 case BNX2X_Q_STATE_FLRED:
5435 return BNX2X_Q_LOGICAL_STATE_STOPPED;
5436 default:
5437 return -EINVAL;
5438 }
5439}
5440
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005441/********************** Function state object *********************************/
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005442enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5443 struct bnx2x_func_sp_obj *o)
5444{
5445 /* in the middle of transaction - return INVALID state */
5446 if (o->pending)
5447 return BNX2X_F_STATE_MAX;
5448
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005449 /* unsure the order of reading of o->pending and o->state
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005450 * o->pending should be read first
5451 */
5452 rmb();
5453
5454 return o->state;
5455}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005456
5457static int bnx2x_func_wait_comp(struct bnx2x *bp,
5458 struct bnx2x_func_sp_obj *o,
5459 enum bnx2x_func_cmd cmd)
5460{
5461 return bnx2x_state_wait(bp, cmd, &o->pending);
5462}
5463
5464/**
5465 * bnx2x_func_state_change_comp - complete the state machine transition
5466 *
5467 * @bp: device handle
5468 * @o:
5469 * @cmd:
5470 *
5471 * Called on state change transition. Completes the state
5472 * machine transition only - no HW interaction.
5473 */
5474static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5475 struct bnx2x_func_sp_obj *o,
5476 enum bnx2x_func_cmd cmd)
5477{
5478 unsigned long cur_pending = o->pending;
5479
5480 if (!test_and_clear_bit(cmd, &cur_pending)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00005481 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5482 cmd, BP_FUNC(bp), o->state,
5483 cur_pending, o->next_state);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005484 return -EINVAL;
5485 }
5486
Joe Perches94f05b02011-08-14 12:16:20 +00005487 DP(BNX2X_MSG_SP,
5488 "Completing command %d for func %d, setting state to %d\n",
5489 cmd, BP_FUNC(bp), o->next_state);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005490
5491 o->state = o->next_state;
5492 o->next_state = BNX2X_F_STATE_MAX;
5493
5494 /* It's important that o->state and o->next_state are
5495 * updated before o->pending.
5496 */
5497 wmb();
5498
5499 clear_bit(cmd, &o->pending);
5500 smp_mb__after_clear_bit();
5501
5502 return 0;
5503}
5504
5505/**
5506 * bnx2x_func_comp_cmd - complete the state change command
5507 *
5508 * @bp: device handle
5509 * @o:
5510 * @cmd:
5511 *
5512 * Checks that the arrived completion is expected.
5513 */
5514static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5515 struct bnx2x_func_sp_obj *o,
5516 enum bnx2x_func_cmd cmd)
5517{
5518 /* Complete the state machine part first, check if it's a
5519 * legal completion.
5520 */
5521 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005522 return rc;
5523}
5524
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005525/**
5526 * bnx2x_func_chk_transition - perform function state machine transition
5527 *
5528 * @bp: device handle
5529 * @o:
5530 * @params:
5531 *
5532 * It both checks if the requested command is legal in a current
5533 * state and, if it's legal, sets a `next_state' in the object
5534 * that will be used in the completion flow to set the `state'
5535 * of the object.
5536 *
5537 * returns 0 if a requested command is a legal transition,
5538 * -EINVAL otherwise.
5539 */
5540static int bnx2x_func_chk_transition(struct bnx2x *bp,
5541 struct bnx2x_func_sp_obj *o,
5542 struct bnx2x_func_state_params *params)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005543{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005544 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5545 enum bnx2x_func_cmd cmd = params->cmd;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005546
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005547 /* Forget all pending for completion commands if a driver only state
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005548 * transition has been requested.
5549 */
5550 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5551 o->pending = 0;
5552 o->next_state = BNX2X_F_STATE_MAX;
5553 }
5554
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005555 /* Don't allow a next state transition if we are in the middle of
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005556 * the previous one.
5557 */
5558 if (o->pending)
5559 return -EBUSY;
5560
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005561 switch (state) {
5562 case BNX2X_F_STATE_RESET:
5563 if (cmd == BNX2X_F_CMD_HW_INIT)
5564 next_state = BNX2X_F_STATE_INITIALIZED;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005565
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005566 break;
5567 case BNX2X_F_STATE_INITIALIZED:
5568 if (cmd == BNX2X_F_CMD_START)
5569 next_state = BNX2X_F_STATE_STARTED;
5570
5571 else if (cmd == BNX2X_F_CMD_HW_RESET)
5572 next_state = BNX2X_F_STATE_RESET;
5573
5574 break;
5575 case BNX2X_F_STATE_STARTED:
5576 if (cmd == BNX2X_F_CMD_STOP)
5577 next_state = BNX2X_F_STATE_INITIALIZED;
Barak Witkowskia3348722012-04-23 03:04:46 +00005578 /* afex ramrods can be sent only in started mode, and only
5579 * if not pending for function_stop ramrod completion
5580 * for these events - next state remained STARTED.
5581 */
5582 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5583 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5584 next_state = BNX2X_F_STATE_STARTED;
5585
5586 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5587 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5588 next_state = BNX2X_F_STATE_STARTED;
Merav Sicron55c11942012-11-07 00:45:48 +00005589
5590 /* Switch_update ramrod can be sent in either started or
5591 * tx_stopped state, and it doesn't change the state.
5592 */
5593 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5594 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5595 next_state = BNX2X_F_STATE_STARTED;
5596
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005597 else if (cmd == BNX2X_F_CMD_TX_STOP)
5598 next_state = BNX2X_F_STATE_TX_STOPPED;
5599
5600 break;
5601 case BNX2X_F_STATE_TX_STOPPED:
Merav Sicron55c11942012-11-07 00:45:48 +00005602 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5603 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5604 next_state = BNX2X_F_STATE_TX_STOPPED;
5605
5606 else if (cmd == BNX2X_F_CMD_TX_START)
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005607 next_state = BNX2X_F_STATE_STARTED;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005608
5609 break;
5610 default:
5611 BNX2X_ERR("Unknown state: %d\n", state);
5612 }
5613
5614 /* Transition is assured */
5615 if (next_state != BNX2X_F_STATE_MAX) {
5616 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5617 state, cmd, next_state);
5618 o->next_state = next_state;
5619 return 0;
5620 }
5621
5622 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5623 state, cmd);
5624
5625 return -EINVAL;
5626}
5627
5628/**
5629 * bnx2x_func_init_func - performs HW init at function stage
5630 *
5631 * @bp: device handle
5632 * @drv:
5633 *
5634 * Init HW when the current phase is
5635 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5636 * HW blocks.
5637 */
5638static inline int bnx2x_func_init_func(struct bnx2x *bp,
5639 const struct bnx2x_func_sp_drv_ops *drv)
5640{
5641 return drv->init_hw_func(bp);
5642}
5643
5644/**
5645 * bnx2x_func_init_port - performs HW init at port stage
5646 *
5647 * @bp: device handle
5648 * @drv:
5649 *
5650 * Init HW when the current phase is
5651 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5652 * FUNCTION-only HW blocks.
5653 *
5654 */
5655static inline int bnx2x_func_init_port(struct bnx2x *bp,
5656 const struct bnx2x_func_sp_drv_ops *drv)
5657{
5658 int rc = drv->init_hw_port(bp);
5659 if (rc)
5660 return rc;
5661
5662 return bnx2x_func_init_func(bp, drv);
5663}
5664
5665/**
5666 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5667 *
5668 * @bp: device handle
5669 * @drv:
5670 *
5671 * Init HW when the current phase is
5672 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5673 * PORT-only and FUNCTION-only HW blocks.
5674 */
5675static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5676 const struct bnx2x_func_sp_drv_ops *drv)
5677{
5678 int rc = drv->init_hw_cmn_chip(bp);
5679 if (rc)
5680 return rc;
5681
5682 return bnx2x_func_init_port(bp, drv);
5683}
5684
5685/**
5686 * bnx2x_func_init_cmn - performs HW init at common stage
5687 *
5688 * @bp: device handle
5689 * @drv:
5690 *
5691 * Init HW when the current phase is
5692 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5693 * PORT-only and FUNCTION-only HW blocks.
5694 */
5695static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5696 const struct bnx2x_func_sp_drv_ops *drv)
5697{
5698 int rc = drv->init_hw_cmn(bp);
5699 if (rc)
5700 return rc;
5701
5702 return bnx2x_func_init_port(bp, drv);
5703}
5704
5705static int bnx2x_func_hw_init(struct bnx2x *bp,
5706 struct bnx2x_func_state_params *params)
5707{
5708 u32 load_code = params->params.hw_init.load_phase;
5709 struct bnx2x_func_sp_obj *o = params->f_obj;
5710 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5711 int rc = 0;
5712
5713 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5714 BP_ABS_FUNC(bp), load_code);
5715
5716 /* Prepare buffers for unzipping the FW */
5717 rc = drv->gunzip_init(bp);
5718 if (rc)
5719 return rc;
5720
5721 /* Prepare FW */
5722 rc = drv->init_fw(bp);
5723 if (rc) {
5724 BNX2X_ERR("Error loading firmware\n");
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005725 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005726 }
5727
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005728 /* Handle the beginning of COMMON_XXX pases separately... */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005729 switch (load_code) {
5730 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5731 rc = bnx2x_func_init_cmn_chip(bp, drv);
5732 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005733 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005734
5735 break;
5736 case FW_MSG_CODE_DRV_LOAD_COMMON:
5737 rc = bnx2x_func_init_cmn(bp, drv);
5738 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005739 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005740
5741 break;
5742 case FW_MSG_CODE_DRV_LOAD_PORT:
5743 rc = bnx2x_func_init_port(bp, drv);
5744 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005745 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005746
5747 break;
5748 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5749 rc = bnx2x_func_init_func(bp, drv);
5750 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005751 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005752
5753 break;
5754 default:
5755 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5756 rc = -EINVAL;
5757 }
5758
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005759init_err:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005760 drv->gunzip_end(bp);
5761
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005762 /* In case of success, complete the command immediately: no ramrods
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005763 * have been sent.
5764 */
5765 if (!rc)
5766 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5767
5768 return rc;
5769}
5770
5771/**
5772 * bnx2x_func_reset_func - reset HW at function stage
5773 *
5774 * @bp: device handle
5775 * @drv:
5776 *
5777 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5778 * FUNCTION-only HW blocks.
5779 */
5780static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5781 const struct bnx2x_func_sp_drv_ops *drv)
5782{
5783 drv->reset_hw_func(bp);
5784}
5785
5786/**
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005787 * bnx2x_func_reset_port - reset HW at port stage
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005788 *
5789 * @bp: device handle
5790 * @drv:
5791 *
5792 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5793 * FUNCTION-only and PORT-only HW blocks.
5794 *
5795 * !!!IMPORTANT!!!
5796 *
5797 * It's important to call reset_port before reset_func() as the last thing
5798 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5799 * makes impossible any DMAE transactions.
5800 */
5801static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5802 const struct bnx2x_func_sp_drv_ops *drv)
5803{
5804 drv->reset_hw_port(bp);
5805 bnx2x_func_reset_func(bp, drv);
5806}
5807
5808/**
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005809 * bnx2x_func_reset_cmn - reset HW at common stage
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005810 *
5811 * @bp: device handle
5812 * @drv:
5813 *
5814 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5815 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5816 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5817 */
5818static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5819 const struct bnx2x_func_sp_drv_ops *drv)
5820{
5821 bnx2x_func_reset_port(bp, drv);
5822 drv->reset_hw_cmn(bp);
5823}
5824
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005825static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5826 struct bnx2x_func_state_params *params)
5827{
5828 u32 reset_phase = params->params.hw_reset.reset_phase;
5829 struct bnx2x_func_sp_obj *o = params->f_obj;
5830 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5831
5832 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5833 reset_phase);
5834
5835 switch (reset_phase) {
5836 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5837 bnx2x_func_reset_cmn(bp, drv);
5838 break;
5839 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5840 bnx2x_func_reset_port(bp, drv);
5841 break;
5842 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5843 bnx2x_func_reset_func(bp, drv);
5844 break;
5845 default:
5846 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5847 reset_phase);
5848 break;
5849 }
5850
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005851 /* Complete the command immediately: no ramrods have been sent. */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005852 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5853
5854 return 0;
5855}
5856
5857static inline int bnx2x_func_send_start(struct bnx2x *bp,
5858 struct bnx2x_func_state_params *params)
5859{
5860 struct bnx2x_func_sp_obj *o = params->f_obj;
5861 struct function_start_data *rdata =
5862 (struct function_start_data *)o->rdata;
5863 dma_addr_t data_mapping = o->rdata_mapping;
5864 struct bnx2x_func_start_params *start_params = &params->params.start;
5865
5866 memset(rdata, 0, sizeof(*rdata));
5867
5868 /* Fill the ramrod data with provided parameters */
Dmitry Kravkov1bc277f2013-03-18 06:51:04 +00005869 rdata->function_mode = (u8)start_params->mf_mode;
5870 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5871 rdata->path_id = BP_PATH(bp);
5872 rdata->network_cos_mode = start_params->network_cos_mode;
5873 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5874 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005875
Dmitry Kravkov1bc277f2013-03-18 06:51:04 +00005876 /* No need for an explicit memory barrier here as long we would
5877 * need to ensure the ordering of writing to the SPQ element
5878 * and updating of the SPQ producer which involves a memory
5879 * read and we will have to put a full memory barrier there
5880 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00005881 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005882
5883 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5884 U64_HI(data_mapping),
5885 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5886}
5887
Merav Sicron55c11942012-11-07 00:45:48 +00005888static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5889 struct bnx2x_func_state_params *params)
5890{
5891 struct bnx2x_func_sp_obj *o = params->f_obj;
5892 struct function_update_data *rdata =
5893 (struct function_update_data *)o->rdata;
5894 dma_addr_t data_mapping = o->rdata_mapping;
5895 struct bnx2x_func_switch_update_params *switch_update_params =
5896 &params->params.switch_update;
5897
5898 memset(rdata, 0, sizeof(*rdata));
5899
5900 /* Fill the ramrod data with provided parameters */
5901 rdata->tx_switch_suspend_change_flg = 1;
5902 rdata->tx_switch_suspend = switch_update_params->suspend;
5903 rdata->echo = SWITCH_UPDATE;
5904
5905 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5906 U64_HI(data_mapping),
5907 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5908}
5909
Barak Witkowskia3348722012-04-23 03:04:46 +00005910static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5911 struct bnx2x_func_state_params *params)
5912{
5913 struct bnx2x_func_sp_obj *o = params->f_obj;
5914 struct function_update_data *rdata =
5915 (struct function_update_data *)o->afex_rdata;
5916 dma_addr_t data_mapping = o->afex_rdata_mapping;
5917 struct bnx2x_func_afex_update_params *afex_update_params =
5918 &params->params.afex_update;
5919
5920 memset(rdata, 0, sizeof(*rdata));
5921
5922 /* Fill the ramrod data with provided parameters */
5923 rdata->vif_id_change_flg = 1;
5924 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5925 rdata->afex_default_vlan_change_flg = 1;
5926 rdata->afex_default_vlan =
5927 cpu_to_le16(afex_update_params->afex_default_vlan);
5928 rdata->allowed_priorities_change_flg = 1;
5929 rdata->allowed_priorities = afex_update_params->allowed_priorities;
Merav Sicron55c11942012-11-07 00:45:48 +00005930 rdata->echo = AFEX_UPDATE;
Barak Witkowskia3348722012-04-23 03:04:46 +00005931
5932 /* No need for an explicit memory barrier here as long we would
5933 * need to ensure the ordering of writing to the SPQ element
5934 * and updating of the SPQ producer which involves a memory
5935 * read and we will have to put a full memory barrier there
5936 * (inside bnx2x_sp_post()).
5937 */
5938 DP(BNX2X_MSG_SP,
5939 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5940 rdata->vif_id,
5941 rdata->afex_default_vlan, rdata->allowed_priorities);
5942
5943 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5944 U64_HI(data_mapping),
5945 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5946}
5947
5948static
5949inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5950 struct bnx2x_func_state_params *params)
5951{
5952 struct bnx2x_func_sp_obj *o = params->f_obj;
5953 struct afex_vif_list_ramrod_data *rdata =
5954 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
Yuval Mintz86564c32013-01-23 03:21:50 +00005955 struct bnx2x_func_afex_viflists_params *afex_vif_params =
Barak Witkowskia3348722012-04-23 03:04:46 +00005956 &params->params.afex_viflists;
5957 u64 *p_rdata = (u64 *)rdata;
5958
5959 memset(rdata, 0, sizeof(*rdata));
5960
5961 /* Fill the ramrod data with provided parameters */
Yuval Mintz86564c32013-01-23 03:21:50 +00005962 rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5963 rdata->func_bit_map = afex_vif_params->func_bit_map;
5964 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5965 rdata->func_to_clear = afex_vif_params->func_to_clear;
Barak Witkowskia3348722012-04-23 03:04:46 +00005966
5967 /* send in echo type of sub command */
Yuval Mintz86564c32013-01-23 03:21:50 +00005968 rdata->echo = afex_vif_params->afex_vif_list_command;
Barak Witkowskia3348722012-04-23 03:04:46 +00005969
5970 /* No need for an explicit memory barrier here as long we would
5971 * need to ensure the ordering of writing to the SPQ element
5972 * and updating of the SPQ producer which involves a memory
5973 * read and we will have to put a full memory barrier there
5974 * (inside bnx2x_sp_post()).
5975 */
5976
5977 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5978 rdata->afex_vif_list_command, rdata->vif_list_index,
5979 rdata->func_bit_map, rdata->func_to_clear);
5980
5981 /* this ramrod sends data directly and not through DMA mapping */
5982 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5983 U64_HI(*p_rdata), U64_LO(*p_rdata),
5984 NONE_CONNECTION_TYPE);
5985}
5986
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005987static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5988 struct bnx2x_func_state_params *params)
5989{
5990 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5991 NONE_CONNECTION_TYPE);
5992}
5993
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005994static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5995 struct bnx2x_func_state_params *params)
5996{
5997 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5998 NONE_CONNECTION_TYPE);
5999}
6000static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
6001 struct bnx2x_func_state_params *params)
6002{
6003 struct bnx2x_func_sp_obj *o = params->f_obj;
6004 struct flow_control_configuration *rdata =
6005 (struct flow_control_configuration *)o->rdata;
6006 dma_addr_t data_mapping = o->rdata_mapping;
6007 struct bnx2x_func_tx_start_params *tx_start_params =
6008 &params->params.tx_start;
6009 int i;
6010
6011 memset(rdata, 0, sizeof(*rdata));
6012
6013 rdata->dcb_enabled = tx_start_params->dcb_enabled;
6014 rdata->dcb_version = tx_start_params->dcb_version;
6015 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
6016
6017 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
6018 rdata->traffic_type_to_priority_cos[i] =
6019 tx_start_params->traffic_type_to_priority_cos[i];
6020
6021 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
6022 U64_HI(data_mapping),
6023 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
6024}
6025
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03006026static int bnx2x_func_send_cmd(struct bnx2x *bp,
6027 struct bnx2x_func_state_params *params)
6028{
6029 switch (params->cmd) {
6030 case BNX2X_F_CMD_HW_INIT:
6031 return bnx2x_func_hw_init(bp, params);
6032 case BNX2X_F_CMD_START:
6033 return bnx2x_func_send_start(bp, params);
6034 case BNX2X_F_CMD_STOP:
6035 return bnx2x_func_send_stop(bp, params);
6036 case BNX2X_F_CMD_HW_RESET:
6037 return bnx2x_func_hw_reset(bp, params);
Barak Witkowskia3348722012-04-23 03:04:46 +00006038 case BNX2X_F_CMD_AFEX_UPDATE:
6039 return bnx2x_func_send_afex_update(bp, params);
6040 case BNX2X_F_CMD_AFEX_VIFLISTS:
6041 return bnx2x_func_send_afex_viflists(bp, params);
Dmitry Kravkov6debea82011-07-19 01:42:04 +00006042 case BNX2X_F_CMD_TX_STOP:
6043 return bnx2x_func_send_tx_stop(bp, params);
6044 case BNX2X_F_CMD_TX_START:
6045 return bnx2x_func_send_tx_start(bp, params);
Merav Sicron55c11942012-11-07 00:45:48 +00006046 case BNX2X_F_CMD_SWITCH_UPDATE:
6047 return bnx2x_func_send_switch_update(bp, params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03006048 default:
6049 BNX2X_ERR("Unknown command: %d\n", params->cmd);
6050 return -EINVAL;
6051 }
6052}
6053
6054void bnx2x_init_func_obj(struct bnx2x *bp,
6055 struct bnx2x_func_sp_obj *obj,
6056 void *rdata, dma_addr_t rdata_mapping,
Barak Witkowskia3348722012-04-23 03:04:46 +00006057 void *afex_rdata, dma_addr_t afex_rdata_mapping,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03006058 struct bnx2x_func_sp_drv_ops *drv_iface)
6059{
6060 memset(obj, 0, sizeof(*obj));
6061
6062 mutex_init(&obj->one_pending_mutex);
6063
6064 obj->rdata = rdata;
6065 obj->rdata_mapping = rdata_mapping;
Barak Witkowskia3348722012-04-23 03:04:46 +00006066 obj->afex_rdata = afex_rdata;
6067 obj->afex_rdata_mapping = afex_rdata_mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03006068 obj->send_cmd = bnx2x_func_send_cmd;
6069 obj->check_transition = bnx2x_func_chk_transition;
6070 obj->complete_cmd = bnx2x_func_comp_cmd;
6071 obj->wait_comp = bnx2x_func_wait_comp;
6072
6073 obj->drv = drv_iface;
6074}
6075
6076/**
6077 * bnx2x_func_state_change - perform Function state change transition
6078 *
6079 * @bp: device handle
6080 * @params: parameters to perform the transaction
6081 *
6082 * returns 0 in case of successfully completed transition,
6083 * negative error code in case of failure, positive
6084 * (EBUSY) value if there is a completion to that is
6085 * still pending (possible only if RAMROD_COMP_WAIT is
6086 * not set in params->ramrod_flags for asynchronous
6087 * commands).
6088 */
6089int bnx2x_func_state_change(struct bnx2x *bp,
6090 struct bnx2x_func_state_params *params)
6091{
6092 struct bnx2x_func_sp_obj *o = params->f_obj;
Merav Sicron55c11942012-11-07 00:45:48 +00006093 int rc, cnt = 300;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03006094 enum bnx2x_func_cmd cmd = params->cmd;
6095 unsigned long *pending = &o->pending;
6096
6097 mutex_lock(&o->one_pending_mutex);
6098
6099 /* Check that the requested transition is legal */
Merav Sicron55c11942012-11-07 00:45:48 +00006100 rc = o->check_transition(bp, o, params);
6101 if ((rc == -EBUSY) &&
6102 (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
6103 while ((rc == -EBUSY) && (--cnt > 0)) {
6104 mutex_unlock(&o->one_pending_mutex);
6105 msleep(10);
6106 mutex_lock(&o->one_pending_mutex);
6107 rc = o->check_transition(bp, o, params);
6108 }
6109 if (rc == -EBUSY) {
6110 mutex_unlock(&o->one_pending_mutex);
6111 BNX2X_ERR("timeout waiting for previous ramrod completion\n");
6112 return rc;
6113 }
6114 } else if (rc) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03006115 mutex_unlock(&o->one_pending_mutex);
Merav Sicron55c11942012-11-07 00:45:48 +00006116 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03006117 }
6118
6119 /* Set "pending" bit */
6120 set_bit(cmd, pending);
6121
6122 /* Don't send a command if only driver cleanup was requested */
6123 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
6124 bnx2x_func_state_change_comp(bp, o, cmd);
6125 mutex_unlock(&o->one_pending_mutex);
6126 } else {
6127 /* Send a ramrod */
6128 rc = o->send_cmd(bp, params);
6129
6130 mutex_unlock(&o->one_pending_mutex);
6131
6132 if (rc) {
6133 o->next_state = BNX2X_F_STATE_MAX;
6134 clear_bit(cmd, pending);
6135 smp_mb__after_clear_bit();
6136 return rc;
6137 }
6138
6139 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
6140 rc = o->wait_comp(bp, o, cmd);
6141 if (rc)
6142 return rc;
6143
6144 return 0;
6145 }
6146 }
6147
6148 return !!test_bit(cmd, pending);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00006149}