blob: 98cccd487fc241bafbd967f88b27f2bcfd174e65 [file] [log] [blame]
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001/* bnx2x_sp.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2011-2013 Broadcom Corporation
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
17 *
18 */
Joe Perchesf1deab52011-08-14 12:16:21 +000019
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000022#include <linux/module.h>
23#include <linux/crc32.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/crc32c.h>
27#include "bnx2x.h"
28#include "bnx2x_cmn.h"
29#include "bnx2x_sp.h"
30
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030031#define BNX2X_MAX_EMUL_MULTI 16
32
33/**** Exe Queue interfaces ****/
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000034
35/**
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030036 * bnx2x_exe_queue_init - init the Exe Queue object
37 *
Yuval Mintz16a5fd92013-06-02 00:06:18 +000038 * @o: pointer to the object
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030039 * @exe_len: length
Yuval Mintz16a5fd92013-06-02 00:06:18 +000040 * @owner: pointer to the owner
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030041 * @validate: validate function pointer
42 * @optimize: optimize function pointer
43 * @exec: execute function pointer
44 * @get: get function pointer
45 */
46static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
47 struct bnx2x_exe_queue_obj *o,
48 int exe_len,
49 union bnx2x_qable_obj *owner,
50 exe_q_validate validate,
Yuval Mintz460a25c2012-01-23 07:31:51 +000051 exe_q_remove remove,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030052 exe_q_optimize optimize,
53 exe_q_execute exec,
54 exe_q_get get)
55{
56 memset(o, 0, sizeof(*o));
57
58 INIT_LIST_HEAD(&o->exe_queue);
59 INIT_LIST_HEAD(&o->pending_comp);
60
61 spin_lock_init(&o->lock);
62
63 o->exe_chunk_len = exe_len;
64 o->owner = owner;
65
66 /* Owner specific callbacks */
67 o->validate = validate;
Yuval Mintz460a25c2012-01-23 07:31:51 +000068 o->remove = remove;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030069 o->optimize = optimize;
70 o->execute = exec;
71 o->get = get;
72
Merav Sicron51c1a582012-03-18 10:33:38 +000073 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
74 exe_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030075}
76
77static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
78 struct bnx2x_exeq_elem *elem)
79{
80 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
81 kfree(elem);
82}
83
84static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
85{
86 struct bnx2x_exeq_elem *elem;
87 int cnt = 0;
88
89 spin_lock_bh(&o->lock);
90
91 list_for_each_entry(elem, &o->exe_queue, link)
92 cnt++;
93
94 spin_unlock_bh(&o->lock);
95
96 return cnt;
97}
98
99/**
100 * bnx2x_exe_queue_add - add a new element to the execution queue
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000101 *
102 * @bp: driver handle
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300103 * @o: queue
104 * @cmd: new command to add
105 * @restore: true - do not optimize the command
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000106 *
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300107 * If the element is optimized or is illegal, frees it.
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000108 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300109static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
110 struct bnx2x_exe_queue_obj *o,
111 struct bnx2x_exeq_elem *elem,
112 bool restore)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000113{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300114 int rc;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000115
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300116 spin_lock_bh(&o->lock);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000117
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300118 if (!restore) {
119 /* Try to cancel this element queue */
120 rc = o->optimize(bp, o->owner, elem);
121 if (rc)
122 goto free_and_exit;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000123
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300124 /* Check if this request is ok */
125 rc = o->validate(bp, o->owner, elem);
126 if (rc) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +0000127 DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300128 goto free_and_exit;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000129 }
130 }
131
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300132 /* If so, add it to the execution queue */
133 list_add_tail(&elem->link, &o->exe_queue);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000134
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300135 spin_unlock_bh(&o->lock);
136
137 return 0;
138
139free_and_exit:
140 bnx2x_exe_queue_free_elem(bp, elem);
141
142 spin_unlock_bh(&o->lock);
143
144 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300145}
146
147static inline void __bnx2x_exe_queue_reset_pending(
148 struct bnx2x *bp,
149 struct bnx2x_exe_queue_obj *o)
150{
151 struct bnx2x_exeq_elem *elem;
152
153 while (!list_empty(&o->pending_comp)) {
154 elem = list_first_entry(&o->pending_comp,
155 struct bnx2x_exeq_elem, link);
156
157 list_del(&elem->link);
158 bnx2x_exe_queue_free_elem(bp, elem);
159 }
160}
161
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300162/**
163 * bnx2x_exe_queue_step - execute one execution chunk atomically
164 *
165 * @bp: driver handle
166 * @o: queue
167 * @ramrod_flags: flags
168 *
Yuval Mintz8b09be52013-08-01 17:30:59 +0300169 * (Should be called while holding the exe_queue->lock).
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300170 */
171static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
172 struct bnx2x_exe_queue_obj *o,
173 unsigned long *ramrod_flags)
174{
175 struct bnx2x_exeq_elem *elem, spacer;
176 int cur_len = 0, rc;
177
178 memset(&spacer, 0, sizeof(spacer));
179
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000180 /* Next step should not be performed until the current is finished,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300181 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
182 * properly clear object internals without sending any command to the FW
183 * which also implies there won't be any completion to clear the
184 * 'pending' list.
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000185 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300186 if (!list_empty(&o->pending_comp)) {
187 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000188 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300189 __bnx2x_exe_queue_reset_pending(bp, o);
190 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300191 return 1;
192 }
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000193 }
194
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000195 /* Run through the pending commands list and create a next
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300196 * execution chunk.
197 */
198 while (!list_empty(&o->exe_queue)) {
199 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
200 link);
201 WARN_ON(!elem->cmd_len);
202
203 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
204 cur_len += elem->cmd_len;
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000205 /* Prevent from both lists being empty when moving an
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300206 * element. This will allow the call of
207 * bnx2x_exe_queue_empty() without locking.
208 */
209 list_add_tail(&spacer.link, &o->pending_comp);
210 mb();
Wei Yongjun7933aa52012-09-04 21:06:55 +0000211 list_move_tail(&elem->link, &o->pending_comp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300212 list_del(&spacer.link);
213 } else
214 break;
215 }
216
217 /* Sanity check */
Yuval Mintz8b09be52013-08-01 17:30:59 +0300218 if (!cur_len)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300219 return 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300220
221 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
222 if (rc < 0)
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000223 /* In case of an error return the commands back to the queue
224 * and reset the pending_comp.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300225 */
226 list_splice_init(&o->pending_comp, &o->exe_queue);
227 else if (!rc)
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000228 /* If zero is returned, means there are no outstanding pending
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300229 * completions and we may dismiss the pending list.
230 */
231 __bnx2x_exe_queue_reset_pending(bp, o);
232
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300233 return rc;
234}
235
236static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
237{
238 bool empty = list_empty(&o->exe_queue);
239
240 /* Don't reorder!!! */
241 mb();
242
243 return empty && list_empty(&o->pending_comp);
244}
245
246static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
247 struct bnx2x *bp)
248{
249 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
250 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
251}
252
253/************************ raw_obj functions ***********************************/
254static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
255{
256 return !!test_bit(o->state, o->pstate);
257}
258
259static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
260{
261 smp_mb__before_clear_bit();
262 clear_bit(o->state, o->pstate);
263 smp_mb__after_clear_bit();
264}
265
266static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
267{
268 smp_mb__before_clear_bit();
269 set_bit(o->state, o->pstate);
270 smp_mb__after_clear_bit();
271}
272
273/**
274 * bnx2x_state_wait - wait until the given bit(state) is cleared
275 *
276 * @bp: device handle
277 * @state: state which is to be cleared
278 * @state_p: state buffer
279 *
280 */
281static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
282 unsigned long *pstate)
283{
284 /* can take a while if any port is running */
285 int cnt = 5000;
286
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300287 if (CHIP_REV_IS_EMUL(bp))
288 cnt *= 20;
289
290 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
291
292 might_sleep();
293 while (cnt--) {
294 if (!test_bit(state, pstate)) {
295#ifdef BNX2X_STOP_ON_ERROR
296 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
297#endif
298 return 0;
299 }
300
Yuval Mintz0926d492013-01-23 03:21:45 +0000301 usleep_range(1000, 2000);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300302
303 if (bp->panic)
304 return -EIO;
305 }
306
307 /* timeout! */
308 BNX2X_ERR("timeout waiting for state %d\n", state);
309#ifdef BNX2X_STOP_ON_ERROR
310 bnx2x_panic();
311#endif
312
313 return -EBUSY;
314}
315
316static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
317{
318 return bnx2x_state_wait(bp, raw->state, raw->pstate);
319}
320
321/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
322/* credit handling callbacks */
323static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
324{
325 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
326
327 WARN_ON(!mp);
328
329 return mp->get_entry(mp, offset);
330}
331
332static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
333{
334 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
335
336 WARN_ON(!mp);
337
338 return mp->get(mp, 1);
339}
340
341static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
342{
343 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
344
345 WARN_ON(!vp);
346
347 return vp->get_entry(vp, offset);
348}
349
350static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
351{
352 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
353
354 WARN_ON(!vp);
355
356 return vp->get(vp, 1);
357}
358
359static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
360{
361 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
362 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
363
364 if (!mp->get(mp, 1))
365 return false;
366
367 if (!vp->get(vp, 1)) {
368 mp->put(mp, 1);
369 return false;
370 }
371
372 return true;
373}
374
375static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
376{
377 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
378
379 return mp->put_entry(mp, offset);
380}
381
382static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
383{
384 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
385
386 return mp->put(mp, 1);
387}
388
389static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
390{
391 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
392
393 return vp->put_entry(vp, offset);
394}
395
396static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
397{
398 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
399
400 return vp->put(vp, 1);
401}
402
403static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
404{
405 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
406 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
407
408 if (!mp->put(mp, 1))
409 return false;
410
411 if (!vp->put(vp, 1)) {
412 mp->get(mp, 1);
413 return false;
414 }
415
416 return true;
417}
418
Yuval Mintz8b09be52013-08-01 17:30:59 +0300419/**
420 * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
421 *
422 * @bp: device handle
423 * @o: vlan_mac object
424 *
425 * @details: Non-blocking implementation; should be called under execution
426 * queue lock.
427 */
428static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
429 struct bnx2x_vlan_mac_obj *o)
430{
431 if (o->head_reader) {
432 DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
433 return -EBUSY;
434 }
435
436 DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
437 return 0;
438}
439
440/**
441 * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
442 *
443 * @bp: device handle
444 * @o: vlan_mac object
445 *
446 * @details Should be called under execution queue lock; notice it might release
447 * and reclaim it during its run.
448 */
449static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
450 struct bnx2x_vlan_mac_obj *o)
451{
452 int rc;
453 unsigned long ramrod_flags = o->saved_ramrod_flags;
454
455 DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
456 ramrod_flags);
457 o->head_exe_request = false;
458 o->saved_ramrod_flags = 0;
459 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
460 if (rc != 0) {
461 BNX2X_ERR("execution of pending commands failed with rc %d\n",
462 rc);
463#ifdef BNX2X_STOP_ON_ERROR
464 bnx2x_panic();
465#endif
466 }
467}
468
469/**
470 * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
471 *
472 * @bp: device handle
473 * @o: vlan_mac object
474 * @ramrod_flags: ramrod flags of missed execution
475 *
476 * @details Should be called under execution queue lock.
477 */
478static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
479 struct bnx2x_vlan_mac_obj *o,
480 unsigned long ramrod_flags)
481{
482 o->head_exe_request = true;
483 o->saved_ramrod_flags = ramrod_flags;
484 DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
485 ramrod_flags);
486}
487
488/**
489 * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
490 *
491 * @bp: device handle
492 * @o: vlan_mac object
493 *
494 * @details Should be called under execution queue lock. Notice if a pending
495 * execution exists, it would perform it - possibly releasing and
496 * reclaiming the execution queue lock.
497 */
498static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
499 struct bnx2x_vlan_mac_obj *o)
500{
501 /* It's possible a new pending execution was added since this writer
502 * executed. If so, execute again. [Ad infinitum]
503 */
504 while (o->head_exe_request) {
505 DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
506 __bnx2x_vlan_mac_h_exec_pending(bp, o);
507 }
508}
509
510/**
511 * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
512 *
513 * @bp: device handle
514 * @o: vlan_mac object
515 *
516 * @details Notice if a pending execution exists, it would perform it -
517 * possibly releasing and reclaiming the execution queue lock.
518 */
519void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
520 struct bnx2x_vlan_mac_obj *o)
521{
522 spin_lock_bh(&o->exe_queue.lock);
523 __bnx2x_vlan_mac_h_write_unlock(bp, o);
524 spin_unlock_bh(&o->exe_queue.lock);
525}
526
527/**
528 * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
529 *
530 * @bp: device handle
531 * @o: vlan_mac object
532 *
533 * @details Should be called under the execution queue lock. May sleep. May
534 * release and reclaim execution queue lock during its run.
535 */
536static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
537 struct bnx2x_vlan_mac_obj *o)
538{
539 /* If we got here, we're holding lock --> no WRITER exists */
540 o->head_reader++;
541 DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
542 o->head_reader);
543
544 return 0;
545}
546
547/**
548 * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
549 *
550 * @bp: device handle
551 * @o: vlan_mac object
552 *
553 * @details May sleep. Claims and releases execution queue lock during its run.
554 */
555int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
556 struct bnx2x_vlan_mac_obj *o)
557{
558 int rc;
559
560 spin_lock_bh(&o->exe_queue.lock);
561 rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
562 spin_unlock_bh(&o->exe_queue.lock);
563
564 return rc;
565}
566
567/**
568 * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
569 *
570 * @bp: device handle
571 * @o: vlan_mac object
572 *
573 * @details Should be called under execution queue lock. Notice if a pending
574 * execution exists, it would be performed if this was the last
575 * reader. possibly releasing and reclaiming the execution queue lock.
576 */
577static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
578 struct bnx2x_vlan_mac_obj *o)
579{
580 if (!o->head_reader) {
581 BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
582#ifdef BNX2X_STOP_ON_ERROR
583 bnx2x_panic();
584#endif
585 } else {
586 o->head_reader--;
587 DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
588 o->head_reader);
589 }
590
591 /* It's possible a new pending execution was added, and that this reader
592 * was last - if so we need to execute the command.
593 */
594 if (!o->head_reader && o->head_exe_request) {
595 DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
596
597 /* Writer release will do the trick */
598 __bnx2x_vlan_mac_h_write_unlock(bp, o);
599 }
600}
601
602/**
603 * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
604 *
605 * @bp: device handle
606 * @o: vlan_mac object
607 *
608 * @details Notice if a pending execution exists, it would be performed if this
609 * was the last reader. Claims and releases the execution queue lock
610 * during its run.
611 */
612void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
613 struct bnx2x_vlan_mac_obj *o)
614{
615 spin_lock_bh(&o->exe_queue.lock);
616 __bnx2x_vlan_mac_h_read_unlock(bp, o);
617 spin_unlock_bh(&o->exe_queue.lock);
618}
619
Ariel Eliored5162a2011-12-05 21:52:24 +0000620static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000621 int n, u8 *base, u8 stride, u8 size)
Ariel Eliored5162a2011-12-05 21:52:24 +0000622{
623 struct bnx2x_vlan_mac_registry_elem *pos;
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000624 u8 *next = base;
Ariel Eliored5162a2011-12-05 21:52:24 +0000625 int counter = 0;
Yuval Mintz8b09be52013-08-01 17:30:59 +0300626 int read_lock;
627
628 DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
629 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
630 if (read_lock != 0)
631 BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
Ariel Eliored5162a2011-12-05 21:52:24 +0000632
633 /* traverse list */
634 list_for_each_entry(pos, &o->head, link) {
635 if (counter < n) {
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000636 memcpy(next, &pos->u, size);
Ariel Eliored5162a2011-12-05 21:52:24 +0000637 counter++;
Ariel Elior3ec9f9c2013-03-11 05:17:45 +0000638 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
639 counter, next);
640 next += stride + size;
Ariel Eliored5162a2011-12-05 21:52:24 +0000641 }
642 }
Yuval Mintz8b09be52013-08-01 17:30:59 +0300643
644 if (read_lock == 0) {
645 DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
646 bnx2x_vlan_mac_h_read_unlock(bp, o);
647 }
648
Ariel Eliored5162a2011-12-05 21:52:24 +0000649 return counter * ETH_ALEN;
650}
651
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300652/* check_add() callbacks */
Merav Sicron51c1a582012-03-18 10:33:38 +0000653static int bnx2x_check_mac_add(struct bnx2x *bp,
654 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300655 union bnx2x_classification_ramrod_data *data)
656{
657 struct bnx2x_vlan_mac_registry_elem *pos;
658
Merav Sicron51c1a582012-03-18 10:33:38 +0000659 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
660
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300661 if (!is_valid_ether_addr(data->mac.mac))
662 return -EINVAL;
663
664 /* Check if a requested MAC already exists */
665 list_for_each_entry(pos, &o->head, link)
dingtianhong8fd90de2013-12-30 15:40:32 +0800666 if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
Dmitry Kravkov91226792013-03-11 05:17:52 +0000667 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300668 return -EEXIST;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000669
670 return 0;
671}
672
Merav Sicron51c1a582012-03-18 10:33:38 +0000673static int bnx2x_check_vlan_add(struct bnx2x *bp,
674 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300675 union bnx2x_classification_ramrod_data *data)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000676{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300677 struct bnx2x_vlan_mac_registry_elem *pos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000678
Merav Sicron51c1a582012-03-18 10:33:38 +0000679 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
680
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300681 list_for_each_entry(pos, &o->head, link)
682 if (data->vlan.vlan == pos->u.vlan.vlan)
683 return -EEXIST;
684
685 return 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000686}
687
Merav Sicron51c1a582012-03-18 10:33:38 +0000688static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
689 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300690 union bnx2x_classification_ramrod_data *data)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000691{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300692 struct bnx2x_vlan_mac_registry_elem *pos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +0000693
Merav Sicron51c1a582012-03-18 10:33:38 +0000694 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
695 data->vlan_mac.mac, data->vlan_mac.vlan);
696
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300697 list_for_each_entry(pos, &o->head, link)
698 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
dingtianhong8fd90de2013-12-30 15:40:32 +0800699 ether_addr_equal_unaligned(data->vlan_mac.mac, pos->u.vlan_mac.mac) &&
Dmitry Kravkov91226792013-03-11 05:17:52 +0000700 (data->vlan_mac.is_inner_mac ==
701 pos->u.vlan_mac.is_inner_mac))
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300702 return -EEXIST;
703
704 return 0;
705}
706
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300707/* check_del() callbacks */
708static struct bnx2x_vlan_mac_registry_elem *
Merav Sicron51c1a582012-03-18 10:33:38 +0000709 bnx2x_check_mac_del(struct bnx2x *bp,
710 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300711 union bnx2x_classification_ramrod_data *data)
712{
713 struct bnx2x_vlan_mac_registry_elem *pos;
714
Merav Sicron51c1a582012-03-18 10:33:38 +0000715 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
716
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300717 list_for_each_entry(pos, &o->head, link)
dingtianhong8fd90de2013-12-30 15:40:32 +0800718 if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
Dmitry Kravkov91226792013-03-11 05:17:52 +0000719 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300720 return pos;
721
722 return NULL;
723}
724
725static struct bnx2x_vlan_mac_registry_elem *
Merav Sicron51c1a582012-03-18 10:33:38 +0000726 bnx2x_check_vlan_del(struct bnx2x *bp,
727 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300728 union bnx2x_classification_ramrod_data *data)
729{
730 struct bnx2x_vlan_mac_registry_elem *pos;
731
Merav Sicron51c1a582012-03-18 10:33:38 +0000732 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
733
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300734 list_for_each_entry(pos, &o->head, link)
735 if (data->vlan.vlan == pos->u.vlan.vlan)
736 return pos;
737
738 return NULL;
739}
740
741static struct bnx2x_vlan_mac_registry_elem *
Merav Sicron51c1a582012-03-18 10:33:38 +0000742 bnx2x_check_vlan_mac_del(struct bnx2x *bp,
743 struct bnx2x_vlan_mac_obj *o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300744 union bnx2x_classification_ramrod_data *data)
745{
746 struct bnx2x_vlan_mac_registry_elem *pos;
747
Merav Sicron51c1a582012-03-18 10:33:38 +0000748 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
749 data->vlan_mac.mac, data->vlan_mac.vlan);
750
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300751 list_for_each_entry(pos, &o->head, link)
752 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
dingtianhong8fd90de2013-12-30 15:40:32 +0800753 ether_addr_equal_unaligned(data->vlan_mac.mac, pos->u.vlan_mac.mac) &&
Dmitry Kravkov91226792013-03-11 05:17:52 +0000754 (data->vlan_mac.is_inner_mac ==
755 pos->u.vlan_mac.is_inner_mac))
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300756 return pos;
757
758 return NULL;
759}
760
761/* check_move() callback */
Merav Sicron51c1a582012-03-18 10:33:38 +0000762static bool bnx2x_check_move(struct bnx2x *bp,
763 struct bnx2x_vlan_mac_obj *src_o,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300764 struct bnx2x_vlan_mac_obj *dst_o,
765 union bnx2x_classification_ramrod_data *data)
766{
767 struct bnx2x_vlan_mac_registry_elem *pos;
768 int rc;
769
770 /* Check if we can delete the requested configuration from the first
771 * object.
772 */
Merav Sicron51c1a582012-03-18 10:33:38 +0000773 pos = src_o->check_del(bp, src_o, data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300774
775 /* check if configuration can be added */
Merav Sicron51c1a582012-03-18 10:33:38 +0000776 rc = dst_o->check_add(bp, dst_o, data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300777
778 /* If this classification can not be added (is already set)
779 * or can't be deleted - return an error.
780 */
781 if (rc || !pos)
782 return false;
783
784 return true;
785}
786
787static bool bnx2x_check_move_always_err(
Merav Sicron51c1a582012-03-18 10:33:38 +0000788 struct bnx2x *bp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300789 struct bnx2x_vlan_mac_obj *src_o,
790 struct bnx2x_vlan_mac_obj *dst_o,
791 union bnx2x_classification_ramrod_data *data)
792{
793 return false;
794}
795
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300796static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
797{
798 struct bnx2x_raw_obj *raw = &o->raw;
799 u8 rx_tx_flag = 0;
800
801 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
802 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
803 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
804
805 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
806 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
807 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
808
809 return rx_tx_flag;
810}
811
Barak Witkowskia3348722012-04-23 03:04:46 +0000812void bnx2x_set_mac_in_nig(struct bnx2x *bp,
813 bool add, unsigned char *dev_addr, int index)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300814{
815 u32 wb_data[2];
816 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
817 NIG_REG_LLH0_FUNC_MEM;
818
Barak Witkowskia3348722012-04-23 03:04:46 +0000819 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
820 return;
821
822 if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300823 return;
824
825 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
826 (add ? "ADD" : "DELETE"), index);
827
828 if (add) {
829 /* LLH_FUNC_MEM is a u64 WB register */
830 reg_offset += 8*index;
831
832 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
833 (dev_addr[4] << 8) | dev_addr[5]);
834 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
835
836 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
837 }
838
839 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
840 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
841}
842
843/**
844 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
845 *
846 * @bp: device handle
847 * @o: queue for which we want to configure this rule
848 * @add: if true the command is an ADD command, DEL otherwise
849 * @opcode: CLASSIFY_RULE_OPCODE_XXX
850 * @hdr: pointer to a header to setup
851 *
852 */
853static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
854 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
855 struct eth_classify_cmd_header *hdr)
856{
857 struct bnx2x_raw_obj *raw = &o->raw;
858
859 hdr->client_id = raw->cl_id;
860 hdr->func_id = raw->func_id;
861
862 /* Rx or/and Tx (internal switching) configuration ? */
863 hdr->cmd_general_data |=
864 bnx2x_vlan_mac_get_rx_tx_flag(o);
865
866 if (add)
867 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
868
869 hdr->cmd_general_data |=
870 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
871}
872
873/**
874 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
875 *
876 * @cid: connection id
877 * @type: BNX2X_FILTER_XXX_PENDING
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000878 * @hdr: pointer to header to setup
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300879 * @rule_cnt:
880 *
881 * currently we always configure one rule and echo field to contain a CID and an
882 * opcode type.
883 */
884static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
885 struct eth_classify_header *hdr, int rule_cnt)
886{
Yuval Mintz86564c32013-01-23 03:21:50 +0000887 hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
888 (type << BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300889 hdr->rule_cnt = (u8)rule_cnt;
890}
891
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300892/* hw_config() callbacks */
893static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
894 struct bnx2x_vlan_mac_obj *o,
895 struct bnx2x_exeq_elem *elem, int rule_idx,
896 int cam_offset)
897{
898 struct bnx2x_raw_obj *raw = &o->raw;
899 struct eth_classify_rules_ramrod_data *data =
900 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
901 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
902 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
903 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
904 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
905 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
906
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000907 /* Set LLH CAM entry: currently only iSCSI and ETH macs are
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300908 * relevant. In addition, current implementation is tuned for a
909 * single ETH MAC.
910 *
911 * When multiple unicast ETH MACs PF configuration in switch
912 * independent mode is required (NetQ, multiple netdev MACs,
913 * etc.), consider better utilisation of 8 per function MAC
914 * entries in the LLH register. There is also
915 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
916 * total number of CAM entries to 16.
917 *
918 * Currently we won't configure NIG for MACs other than a primary ETH
919 * MAC and iSCSI L2 MAC.
920 *
921 * If this MAC is moving from one Queue to another, no need to change
922 * NIG configuration.
923 */
924 if (cmd != BNX2X_VLAN_MAC_MOVE) {
925 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
926 bnx2x_set_mac_in_nig(bp, add, mac,
Yuval Mintz0a52fd02012-03-12 08:53:07 +0000927 BNX2X_LLH_CAM_ISCSI_ETH_LINE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300928 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
Yuval Mintz0a52fd02012-03-12 08:53:07 +0000929 bnx2x_set_mac_in_nig(bp, add, mac,
930 BNX2X_LLH_CAM_ETH_LINE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300931 }
932
933 /* Reset the ramrod data buffer for the first rule */
934 if (rule_idx == 0)
935 memset(data, 0, sizeof(*data));
936
937 /* Setup a command header */
938 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
939 &rule_entry->mac.header);
940
Joe Perches0f9dad12011-08-14 12:16:19 +0000941 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +0000942 (add ? "add" : "delete"), mac, raw->cl_id);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300943
944 /* Set a MAC itself */
945 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
946 &rule_entry->mac.mac_mid,
947 &rule_entry->mac.mac_lsb, mac);
Dmitry Kravkov91226792013-03-11 05:17:52 +0000948 rule_entry->mac.inner_mac =
949 cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300950
951 /* MOVE: Add a rule that will add this MAC to the target Queue */
952 if (cmd == BNX2X_VLAN_MAC_MOVE) {
953 rule_entry++;
954 rule_cnt++;
955
956 /* Setup ramrod data */
957 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
958 elem->cmd_data.vlan_mac.target_obj,
959 true, CLASSIFY_RULE_OPCODE_MAC,
960 &rule_entry->mac.header);
961
962 /* Set a MAC itself */
963 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
964 &rule_entry->mac.mac_mid,
965 &rule_entry->mac.mac_lsb, mac);
Dmitry Kravkov91226792013-03-11 05:17:52 +0000966 rule_entry->mac.inner_mac =
967 cpu_to_le16(elem->cmd_data.vlan_mac.
968 u.mac.is_inner_mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300969 }
970
971 /* Set the ramrod data header */
972 /* TODO: take this to the higher level in order to prevent multiple
973 writing */
974 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
975 rule_cnt);
976}
977
978/**
979 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
980 *
981 * @bp: device handle
982 * @o: queue
983 * @type:
984 * @cam_offset: offset in cam memory
985 * @hdr: pointer to a header to setup
986 *
987 * E1/E1H
988 */
989static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
990 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
991 struct mac_configuration_hdr *hdr)
992{
993 struct bnx2x_raw_obj *r = &o->raw;
994
995 hdr->length = 1;
996 hdr->offset = (u8)cam_offset;
Yuval Mintz86564c32013-01-23 03:21:50 +0000997 hdr->client_id = cpu_to_le16(0xff);
998 hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
999 (type << BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001000}
1001
1002static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
1003 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
1004 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
1005{
1006 struct bnx2x_raw_obj *r = &o->raw;
1007 u32 cl_bit_vec = (1 << r->cl_id);
1008
1009 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
1010 cfg_entry->pf_id = r->func_id;
1011 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
1012
1013 if (add) {
1014 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1015 T_ETH_MAC_COMMAND_SET);
1016 SET_FLAG(cfg_entry->flags,
1017 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
1018
1019 /* Set a MAC in a ramrod data */
1020 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
1021 &cfg_entry->middle_mac_addr,
1022 &cfg_entry->lsb_mac_addr, mac);
1023 } else
1024 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1025 T_ETH_MAC_COMMAND_INVALIDATE);
1026}
1027
1028static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
1029 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
1030 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
1031{
1032 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
1033 struct bnx2x_raw_obj *raw = &o->raw;
1034
1035 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
1036 &config->hdr);
1037 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
1038 cfg_entry);
1039
Joe Perches0f9dad12011-08-14 12:16:19 +00001040 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00001041 (add ? "setting" : "clearing"),
Joe Perches0f9dad12011-08-14 12:16:19 +00001042 mac, raw->cl_id, cam_offset);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001043}
1044
1045/**
1046 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
1047 *
1048 * @bp: device handle
1049 * @o: bnx2x_vlan_mac_obj
1050 * @elem: bnx2x_exeq_elem
1051 * @rule_idx: rule_idx
1052 * @cam_offset: cam_offset
1053 */
1054static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
1055 struct bnx2x_vlan_mac_obj *o,
1056 struct bnx2x_exeq_elem *elem, int rule_idx,
1057 int cam_offset)
1058{
1059 struct bnx2x_raw_obj *raw = &o->raw;
1060 struct mac_configuration_cmd *config =
1061 (struct mac_configuration_cmd *)(raw->rdata);
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001062 /* 57710 and 57711 do not support MOVE command,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001063 * so it's either ADD or DEL
1064 */
1065 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1066 true : false;
1067
1068 /* Reset the ramrod data buffer */
1069 memset(config, 0, sizeof(*config));
1070
Yuval Mintz33ac3382012-03-12 08:53:09 +00001071 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001072 cam_offset, add,
1073 elem->cmd_data.vlan_mac.u.mac.mac, 0,
1074 ETH_VLAN_FILTER_ANY_VLAN, config);
1075}
1076
1077static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
1078 struct bnx2x_vlan_mac_obj *o,
1079 struct bnx2x_exeq_elem *elem, int rule_idx,
1080 int cam_offset)
1081{
1082 struct bnx2x_raw_obj *raw = &o->raw;
1083 struct eth_classify_rules_ramrod_data *data =
1084 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1085 int rule_cnt = rule_idx + 1;
1086 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
Yuval Mintz86564c32013-01-23 03:21:50 +00001087 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001088 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
1089 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1090
1091 /* Reset the ramrod data buffer for the first rule */
1092 if (rule_idx == 0)
1093 memset(data, 0, sizeof(*data));
1094
1095 /* Set a rule header */
1096 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1097 &rule_entry->vlan.header);
1098
1099 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1100 vlan);
1101
1102 /* Set a VLAN itself */
1103 rule_entry->vlan.vlan = cpu_to_le16(vlan);
1104
1105 /* MOVE: Add a rule that will add this MAC to the target Queue */
1106 if (cmd == BNX2X_VLAN_MAC_MOVE) {
1107 rule_entry++;
1108 rule_cnt++;
1109
1110 /* Setup ramrod data */
1111 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
1112 elem->cmd_data.vlan_mac.target_obj,
1113 true, CLASSIFY_RULE_OPCODE_VLAN,
1114 &rule_entry->vlan.header);
1115
1116 /* Set a VLAN itself */
1117 rule_entry->vlan.vlan = cpu_to_le16(vlan);
1118 }
1119
1120 /* Set the ramrod data header */
1121 /* TODO: take this to the higher level in order to prevent multiple
1122 writing */
1123 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1124 rule_cnt);
1125}
1126
1127static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
1128 struct bnx2x_vlan_mac_obj *o,
1129 struct bnx2x_exeq_elem *elem,
1130 int rule_idx, int cam_offset)
1131{
1132 struct bnx2x_raw_obj *raw = &o->raw;
1133 struct eth_classify_rules_ramrod_data *data =
1134 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1135 int rule_cnt = rule_idx + 1;
1136 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
Yuval Mintz86564c32013-01-23 03:21:50 +00001137 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001138 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
1139 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
1140 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
1141
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001142 /* Reset the ramrod data buffer for the first rule */
1143 if (rule_idx == 0)
1144 memset(data, 0, sizeof(*data));
1145
1146 /* Set a rule header */
1147 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1148 &rule_entry->pair.header);
1149
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001150 /* Set VLAN and MAC themselves */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001151 rule_entry->pair.vlan = cpu_to_le16(vlan);
1152 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1153 &rule_entry->pair.mac_mid,
1154 &rule_entry->pair.mac_lsb, mac);
Dmitry Kravkov91226792013-03-11 05:17:52 +00001155 rule_entry->pair.inner_mac =
1156 cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001157 /* MOVE: Add a rule that will add this MAC to the target Queue */
1158 if (cmd == BNX2X_VLAN_MAC_MOVE) {
1159 rule_entry++;
1160 rule_cnt++;
1161
1162 /* Setup ramrod data */
1163 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
1164 elem->cmd_data.vlan_mac.target_obj,
1165 true, CLASSIFY_RULE_OPCODE_PAIR,
1166 &rule_entry->pair.header);
1167
1168 /* Set a VLAN itself */
1169 rule_entry->pair.vlan = cpu_to_le16(vlan);
1170 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1171 &rule_entry->pair.mac_mid,
1172 &rule_entry->pair.mac_lsb, mac);
Dmitry Kravkov91226792013-03-11 05:17:52 +00001173 rule_entry->pair.inner_mac =
1174 cpu_to_le16(elem->cmd_data.vlan_mac.u.
1175 vlan_mac.is_inner_mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001176 }
1177
1178 /* Set the ramrod data header */
1179 /* TODO: take this to the higher level in order to prevent multiple
1180 writing */
1181 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1182 rule_cnt);
1183}
1184
1185/**
1186 * bnx2x_set_one_vlan_mac_e1h -
1187 *
1188 * @bp: device handle
1189 * @o: bnx2x_vlan_mac_obj
1190 * @elem: bnx2x_exeq_elem
1191 * @rule_idx: rule_idx
1192 * @cam_offset: cam_offset
1193 */
1194static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1195 struct bnx2x_vlan_mac_obj *o,
1196 struct bnx2x_exeq_elem *elem,
1197 int rule_idx, int cam_offset)
1198{
1199 struct bnx2x_raw_obj *raw = &o->raw;
1200 struct mac_configuration_cmd *config =
1201 (struct mac_configuration_cmd *)(raw->rdata);
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001202 /* 57710 and 57711 do not support MOVE command,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001203 * so it's either ADD or DEL
1204 */
1205 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1206 true : false;
1207
1208 /* Reset the ramrod data buffer */
1209 memset(config, 0, sizeof(*config));
1210
1211 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1212 cam_offset, add,
1213 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1214 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1215 ETH_VLAN_FILTER_CLASSIFY, config);
1216}
1217
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001218/**
1219 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1220 *
1221 * @bp: device handle
1222 * @p: command parameters
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001223 * @ppos: pointer to the cookie
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001224 *
1225 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1226 * previously configured elements list.
1227 *
1228 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1229 * into an account
1230 *
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001231 * pointer to the cookie - that should be given back in the next call to make
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001232 * function handle the next element. If *ppos is set to NULL it will restart the
1233 * iterator. If returned *ppos == NULL this means that the last element has been
1234 * handled.
1235 *
1236 */
1237static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1238 struct bnx2x_vlan_mac_ramrod_params *p,
1239 struct bnx2x_vlan_mac_registry_elem **ppos)
1240{
1241 struct bnx2x_vlan_mac_registry_elem *pos;
1242 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1243
1244 /* If list is empty - there is nothing to do here */
1245 if (list_empty(&o->head)) {
1246 *ppos = NULL;
1247 return 0;
1248 }
1249
1250 /* make a step... */
1251 if (*ppos == NULL)
1252 *ppos = list_first_entry(&o->head,
1253 struct bnx2x_vlan_mac_registry_elem,
1254 link);
1255 else
1256 *ppos = list_next_entry(*ppos, link);
1257
1258 pos = *ppos;
1259
1260 /* If it's the last step - return NULL */
1261 if (list_is_last(&pos->link, &o->head))
1262 *ppos = NULL;
1263
1264 /* Prepare a 'user_req' */
1265 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1266
1267 /* Set the command */
1268 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1269
1270 /* Set vlan_mac_flags */
1271 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1272
1273 /* Set a restore bit */
1274 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1275
1276 return bnx2x_config_vlan_mac(bp, p);
1277}
1278
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001279/* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001280 * pointer to an element with a specific criteria and NULL if such an element
1281 * hasn't been found.
1282 */
1283static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1284 struct bnx2x_exe_queue_obj *o,
1285 struct bnx2x_exeq_elem *elem)
1286{
1287 struct bnx2x_exeq_elem *pos;
1288 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1289
1290 /* Check pending for execution commands */
1291 list_for_each_entry(pos, &o->exe_queue, link)
1292 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1293 sizeof(*data)) &&
1294 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1295 return pos;
1296
1297 return NULL;
1298}
1299
1300static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1301 struct bnx2x_exe_queue_obj *o,
1302 struct bnx2x_exeq_elem *elem)
1303{
1304 struct bnx2x_exeq_elem *pos;
1305 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1306
1307 /* Check pending for execution commands */
1308 list_for_each_entry(pos, &o->exe_queue, link)
1309 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1310 sizeof(*data)) &&
1311 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1312 return pos;
1313
1314 return NULL;
1315}
1316
1317static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1318 struct bnx2x_exe_queue_obj *o,
1319 struct bnx2x_exeq_elem *elem)
1320{
1321 struct bnx2x_exeq_elem *pos;
1322 struct bnx2x_vlan_mac_ramrod_data *data =
1323 &elem->cmd_data.vlan_mac.u.vlan_mac;
1324
1325 /* Check pending for execution commands */
1326 list_for_each_entry(pos, &o->exe_queue, link)
1327 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1328 sizeof(*data)) &&
1329 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1330 return pos;
1331
1332 return NULL;
1333}
1334
1335/**
1336 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1337 *
1338 * @bp: device handle
1339 * @qo: bnx2x_qable_obj
1340 * @elem: bnx2x_exeq_elem
1341 *
1342 * Checks that the requested configuration can be added. If yes and if
1343 * requested, consume CAM credit.
1344 *
1345 * The 'validate' is run after the 'optimize'.
1346 *
1347 */
1348static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1349 union bnx2x_qable_obj *qo,
1350 struct bnx2x_exeq_elem *elem)
1351{
1352 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1353 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1354 int rc;
1355
1356 /* Check the registry */
Merav Sicron51c1a582012-03-18 10:33:38 +00001357 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001358 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001359 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001360 return rc;
1361 }
1362
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001363 /* Check if there is a pending ADD command for this
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001364 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1365 */
1366 if (exeq->get(exeq, elem)) {
1367 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1368 return -EEXIST;
1369 }
1370
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001371 /* TODO: Check the pending MOVE from other objects where this
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001372 * object is a destination object.
1373 */
1374
1375 /* Consume the credit if not requested not to */
1376 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1377 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1378 o->get_credit(o)))
1379 return -EINVAL;
1380
1381 return 0;
1382}
1383
1384/**
1385 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1386 *
1387 * @bp: device handle
1388 * @qo: quable object to check
1389 * @elem: element that needs to be deleted
1390 *
1391 * Checks that the requested configuration can be deleted. If yes and if
1392 * requested, returns a CAM credit.
1393 *
1394 * The 'validate' is run after the 'optimize'.
1395 */
1396static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1397 union bnx2x_qable_obj *qo,
1398 struct bnx2x_exeq_elem *elem)
1399{
1400 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1401 struct bnx2x_vlan_mac_registry_elem *pos;
1402 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1403 struct bnx2x_exeq_elem query_elem;
1404
1405 /* If this classification can not be deleted (doesn't exist)
1406 * - return a BNX2X_EXIST.
1407 */
Merav Sicron51c1a582012-03-18 10:33:38 +00001408 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001409 if (!pos) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001410 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001411 return -EEXIST;
1412 }
1413
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001414 /* Check if there are pending DEL or MOVE commands for this
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001415 * MAC/VLAN/VLAN-MAC. Return an error if so.
1416 */
1417 memcpy(&query_elem, elem, sizeof(query_elem));
1418
1419 /* Check for MOVE commands */
1420 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1421 if (exeq->get(exeq, &query_elem)) {
1422 BNX2X_ERR("There is a pending MOVE command already\n");
1423 return -EINVAL;
1424 }
1425
1426 /* Check for DEL commands */
1427 if (exeq->get(exeq, elem)) {
1428 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1429 return -EEXIST;
1430 }
1431
1432 /* Return the credit to the credit pool if not requested not to */
1433 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1434 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1435 o->put_credit(o))) {
1436 BNX2X_ERR("Failed to return a credit\n");
1437 return -EINVAL;
1438 }
1439
1440 return 0;
1441}
1442
1443/**
1444 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1445 *
1446 * @bp: device handle
1447 * @qo: quable object to check (source)
1448 * @elem: element that needs to be moved
1449 *
1450 * Checks that the requested configuration can be moved. If yes and if
1451 * requested, returns a CAM credit.
1452 *
1453 * The 'validate' is run after the 'optimize'.
1454 */
1455static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1456 union bnx2x_qable_obj *qo,
1457 struct bnx2x_exeq_elem *elem)
1458{
1459 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1460 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1461 struct bnx2x_exeq_elem query_elem;
1462 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1463 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1464
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001465 /* Check if we can perform this operation based on the current registry
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001466 * state.
1467 */
Merav Sicron51c1a582012-03-18 10:33:38 +00001468 if (!src_o->check_move(bp, src_o, dest_o,
1469 &elem->cmd_data.vlan_mac.u)) {
1470 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001471 return -EINVAL;
1472 }
1473
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001474 /* Check if there is an already pending DEL or MOVE command for the
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001475 * source object or ADD command for a destination object. Return an
1476 * error if so.
1477 */
1478 memcpy(&query_elem, elem, sizeof(query_elem));
1479
1480 /* Check DEL on source */
1481 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1482 if (src_exeq->get(src_exeq, &query_elem)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001483 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001484 return -EINVAL;
1485 }
1486
1487 /* Check MOVE on source */
1488 if (src_exeq->get(src_exeq, elem)) {
1489 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1490 return -EEXIST;
1491 }
1492
1493 /* Check ADD on destination */
1494 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1495 if (dest_exeq->get(dest_exeq, &query_elem)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001496 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001497 return -EINVAL;
1498 }
1499
1500 /* Consume the credit if not requested not to */
1501 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1502 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1503 dest_o->get_credit(dest_o)))
1504 return -EINVAL;
1505
1506 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1507 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1508 src_o->put_credit(src_o))) {
1509 /* return the credit taken from dest... */
1510 dest_o->put_credit(dest_o);
1511 return -EINVAL;
1512 }
1513
1514 return 0;
1515}
1516
1517static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1518 union bnx2x_qable_obj *qo,
1519 struct bnx2x_exeq_elem *elem)
1520{
1521 switch (elem->cmd_data.vlan_mac.cmd) {
1522 case BNX2X_VLAN_MAC_ADD:
1523 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1524 case BNX2X_VLAN_MAC_DEL:
1525 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1526 case BNX2X_VLAN_MAC_MOVE:
1527 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1528 default:
1529 return -EINVAL;
1530 }
1531}
1532
Yuval Mintz460a25c2012-01-23 07:31:51 +00001533static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1534 union bnx2x_qable_obj *qo,
1535 struct bnx2x_exeq_elem *elem)
1536{
1537 int rc = 0;
1538
1539 /* If consumption wasn't required, nothing to do */
1540 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1541 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1542 return 0;
1543
1544 switch (elem->cmd_data.vlan_mac.cmd) {
1545 case BNX2X_VLAN_MAC_ADD:
1546 case BNX2X_VLAN_MAC_MOVE:
1547 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1548 break;
1549 case BNX2X_VLAN_MAC_DEL:
1550 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1551 break;
1552 default:
1553 return -EINVAL;
1554 }
1555
1556 if (rc != true)
1557 return -EINVAL;
1558
1559 return 0;
1560}
1561
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001562/**
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001563 * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001564 *
1565 * @bp: device handle
1566 * @o: bnx2x_vlan_mac_obj
1567 *
1568 */
1569static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1570 struct bnx2x_vlan_mac_obj *o)
1571{
1572 int cnt = 5000, rc;
1573 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1574 struct bnx2x_raw_obj *raw = &o->raw;
1575
1576 while (cnt--) {
1577 /* Wait for the current command to complete */
1578 rc = raw->wait_comp(bp, raw);
1579 if (rc)
1580 return rc;
1581
1582 /* Wait until there are no pending commands */
1583 if (!bnx2x_exe_queue_empty(exeq))
Yuval Mintz0926d492013-01-23 03:21:45 +00001584 usleep_range(1000, 2000);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001585 else
1586 return 0;
1587 }
1588
1589 return -EBUSY;
1590}
1591
Yuval Mintz8b09be52013-08-01 17:30:59 +03001592static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
1593 struct bnx2x_vlan_mac_obj *o,
1594 unsigned long *ramrod_flags)
1595{
1596 int rc = 0;
1597
1598 spin_lock_bh(&o->exe_queue.lock);
1599
1600 DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
1601 rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
1602
1603 if (rc != 0) {
1604 __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
1605
1606 /* Calling function should not diffrentiate between this case
1607 * and the case in which there is already a pending ramrod
1608 */
1609 rc = 1;
1610 } else {
1611 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1612 }
1613 spin_unlock_bh(&o->exe_queue.lock);
1614
1615 return rc;
1616}
1617
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001618/**
1619 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1620 *
1621 * @bp: device handle
1622 * @o: bnx2x_vlan_mac_obj
1623 * @cqe:
1624 * @cont: if true schedule next execution chunk
1625 *
1626 */
1627static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1628 struct bnx2x_vlan_mac_obj *o,
1629 union event_ring_elem *cqe,
1630 unsigned long *ramrod_flags)
1631{
1632 struct bnx2x_raw_obj *r = &o->raw;
1633 int rc;
1634
Yuval Mintz8b09be52013-08-01 17:30:59 +03001635 /* Clearing the pending list & raw state should be made
1636 * atomically (as execution flow assumes they represent the same).
1637 */
1638 spin_lock_bh(&o->exe_queue.lock);
1639
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001640 /* Reset pending list */
Yuval Mintz8b09be52013-08-01 17:30:59 +03001641 __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001642
1643 /* Clear pending */
1644 r->clear_pending(r);
1645
Yuval Mintz8b09be52013-08-01 17:30:59 +03001646 spin_unlock_bh(&o->exe_queue.lock);
1647
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001648 /* If ramrod failed this is most likely a SW bug */
1649 if (cqe->message.error)
1650 return -EINVAL;
1651
Yuval Mintz2de67432013-01-23 03:21:43 +00001652 /* Run the next bulk of pending commands if requested */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001653 if (test_bit(RAMROD_CONT, ramrod_flags)) {
Yuval Mintz8b09be52013-08-01 17:30:59 +03001654 rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
1655
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001656 if (rc < 0)
1657 return rc;
1658 }
1659
1660 /* If there is more work to do return PENDING */
1661 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1662 return 1;
1663
1664 return 0;
1665}
1666
1667/**
1668 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1669 *
1670 * @bp: device handle
1671 * @o: bnx2x_qable_obj
1672 * @elem: bnx2x_exeq_elem
1673 */
1674static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1675 union bnx2x_qable_obj *qo,
1676 struct bnx2x_exeq_elem *elem)
1677{
1678 struct bnx2x_exeq_elem query, *pos;
1679 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1680 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1681
1682 memcpy(&query, elem, sizeof(query));
1683
1684 switch (elem->cmd_data.vlan_mac.cmd) {
1685 case BNX2X_VLAN_MAC_ADD:
1686 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1687 break;
1688 case BNX2X_VLAN_MAC_DEL:
1689 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1690 break;
1691 default:
1692 /* Don't handle anything other than ADD or DEL */
1693 return 0;
1694 }
1695
1696 /* If we found the appropriate element - delete it */
1697 pos = exeq->get(exeq, &query);
1698 if (pos) {
1699
1700 /* Return the credit of the optimized command */
1701 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1702 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1703 if ((query.cmd_data.vlan_mac.cmd ==
1704 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001705 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001706 return -EINVAL;
1707 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
Merav Sicron51c1a582012-03-18 10:33:38 +00001708 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001709 return -EINVAL;
1710 }
1711 }
1712
1713 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1714 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1715 "ADD" : "DEL");
1716
1717 list_del(&pos->link);
1718 bnx2x_exe_queue_free_elem(bp, pos);
1719 return 1;
1720 }
1721
1722 return 0;
1723}
1724
1725/**
1726 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1727 *
1728 * @bp: device handle
1729 * @o:
1730 * @elem:
1731 * @restore:
1732 * @re:
1733 *
1734 * prepare a registry element according to the current command request.
1735 */
1736static inline int bnx2x_vlan_mac_get_registry_elem(
1737 struct bnx2x *bp,
1738 struct bnx2x_vlan_mac_obj *o,
1739 struct bnx2x_exeq_elem *elem,
1740 bool restore,
1741 struct bnx2x_vlan_mac_registry_elem **re)
1742{
Yuval Mintz86564c32013-01-23 03:21:50 +00001743 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001744 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1745
1746 /* Allocate a new registry element if needed. */
1747 if (!restore &&
1748 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1749 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1750 if (!reg_elem)
1751 return -ENOMEM;
1752
1753 /* Get a new CAM offset */
1754 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001755 /* This shall never happen, because we have checked the
1756 * CAM availability in the 'validate'.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001757 */
1758 WARN_ON(1);
1759 kfree(reg_elem);
1760 return -EINVAL;
1761 }
1762
1763 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1764
1765 /* Set a VLAN-MAC data */
1766 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1767 sizeof(reg_elem->u));
1768
1769 /* Copy the flags (needed for DEL and RESTORE flows) */
1770 reg_elem->vlan_mac_flags =
1771 elem->cmd_data.vlan_mac.vlan_mac_flags;
1772 } else /* DEL, RESTORE */
Merav Sicron51c1a582012-03-18 10:33:38 +00001773 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001774
1775 *re = reg_elem;
1776 return 0;
1777}
1778
1779/**
1780 * bnx2x_execute_vlan_mac - execute vlan mac command
1781 *
1782 * @bp: device handle
1783 * @qo:
1784 * @exe_chunk:
1785 * @ramrod_flags:
1786 *
1787 * go and send a ramrod!
1788 */
1789static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1790 union bnx2x_qable_obj *qo,
1791 struct list_head *exe_chunk,
1792 unsigned long *ramrod_flags)
1793{
1794 struct bnx2x_exeq_elem *elem;
1795 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1796 struct bnx2x_raw_obj *r = &o->raw;
1797 int rc, idx = 0;
1798 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1799 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1800 struct bnx2x_vlan_mac_registry_elem *reg_elem;
Yuval Mintz86564c32013-01-23 03:21:50 +00001801 enum bnx2x_vlan_mac_cmd cmd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001802
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001803 /* If DRIVER_ONLY execution is requested, cleanup a registry
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001804 * and exit. Otherwise send a ramrod to FW.
1805 */
1806 if (!drv_only) {
1807 WARN_ON(r->check_pending(r));
1808
1809 /* Set pending */
1810 r->set_pending(r);
1811
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001812 /* Fill the ramrod data */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001813 list_for_each_entry(elem, exe_chunk, link) {
1814 cmd = elem->cmd_data.vlan_mac.cmd;
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001815 /* We will add to the target object in MOVE command, so
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001816 * change the object for a CAM search.
1817 */
1818 if (cmd == BNX2X_VLAN_MAC_MOVE)
1819 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1820 else
1821 cam_obj = o;
1822
1823 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1824 elem, restore,
1825 &reg_elem);
1826 if (rc)
1827 goto error_exit;
1828
1829 WARN_ON(!reg_elem);
1830
1831 /* Push a new entry into the registry */
1832 if (!restore &&
1833 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1834 (cmd == BNX2X_VLAN_MAC_MOVE)))
1835 list_add(&reg_elem->link, &cam_obj->head);
1836
1837 /* Configure a single command in a ramrod data buffer */
1838 o->set_one_rule(bp, o, elem, idx,
1839 reg_elem->cam_offset);
1840
1841 /* MOVE command consumes 2 entries in the ramrod data */
1842 if (cmd == BNX2X_VLAN_MAC_MOVE)
1843 idx += 2;
1844 else
1845 idx++;
1846 }
1847
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001848 /* No need for an explicit memory barrier here as long we would
1849 * need to ensure the ordering of writing to the SPQ element
1850 * and updating of the SPQ producer which involves a memory
1851 * read and we will have to put a full memory barrier there
1852 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00001853 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001854
1855 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1856 U64_HI(r->rdata_mapping),
1857 U64_LO(r->rdata_mapping),
1858 ETH_CONNECTION_TYPE);
1859 if (rc)
1860 goto error_exit;
1861 }
1862
1863 /* Now, when we are done with the ramrod - clean up the registry */
1864 list_for_each_entry(elem, exe_chunk, link) {
1865 cmd = elem->cmd_data.vlan_mac.cmd;
1866 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1867 (cmd == BNX2X_VLAN_MAC_MOVE)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001868 reg_elem = o->check_del(bp, o,
1869 &elem->cmd_data.vlan_mac.u);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001870
1871 WARN_ON(!reg_elem);
1872
1873 o->put_cam_offset(o, reg_elem->cam_offset);
1874 list_del(&reg_elem->link);
1875 kfree(reg_elem);
1876 }
1877 }
1878
1879 if (!drv_only)
1880 return 1;
1881 else
1882 return 0;
1883
1884error_exit:
1885 r->clear_pending(r);
1886
1887 /* Cleanup a registry in case of a failure */
1888 list_for_each_entry(elem, exe_chunk, link) {
1889 cmd = elem->cmd_data.vlan_mac.cmd;
1890
1891 if (cmd == BNX2X_VLAN_MAC_MOVE)
1892 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1893 else
1894 cam_obj = o;
1895
1896 /* Delete all newly added above entries */
1897 if (!restore &&
1898 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1899 (cmd == BNX2X_VLAN_MAC_MOVE))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001900 reg_elem = o->check_del(bp, cam_obj,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001901 &elem->cmd_data.vlan_mac.u);
1902 if (reg_elem) {
1903 list_del(&reg_elem->link);
1904 kfree(reg_elem);
1905 }
1906 }
1907 }
1908
1909 return rc;
1910}
1911
1912static inline int bnx2x_vlan_mac_push_new_cmd(
1913 struct bnx2x *bp,
1914 struct bnx2x_vlan_mac_ramrod_params *p)
1915{
1916 struct bnx2x_exeq_elem *elem;
1917 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1918 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1919
1920 /* Allocate the execution queue element */
1921 elem = bnx2x_exe_queue_alloc_elem(bp);
1922 if (!elem)
1923 return -ENOMEM;
1924
1925 /* Set the command 'length' */
1926 switch (p->user_req.cmd) {
1927 case BNX2X_VLAN_MAC_MOVE:
1928 elem->cmd_len = 2;
1929 break;
1930 default:
1931 elem->cmd_len = 1;
1932 }
1933
1934 /* Fill the object specific info */
1935 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1936
1937 /* Try to add a new command to the pending list */
1938 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1939}
1940
1941/**
1942 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1943 *
1944 * @bp: device handle
1945 * @p:
1946 *
1947 */
Yuval Mintz8b09be52013-08-01 17:30:59 +03001948int bnx2x_config_vlan_mac(struct bnx2x *bp,
1949 struct bnx2x_vlan_mac_ramrod_params *p)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001950{
1951 int rc = 0;
1952 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1953 unsigned long *ramrod_flags = &p->ramrod_flags;
1954 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1955 struct bnx2x_raw_obj *raw = &o->raw;
1956
1957 /*
1958 * Add new elements to the execution list for commands that require it.
1959 */
1960 if (!cont) {
1961 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1962 if (rc)
1963 return rc;
1964 }
1965
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001966 /* If nothing will be executed further in this iteration we want to
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001967 * return PENDING if there are pending commands
1968 */
1969 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1970 rc = 1;
1971
Vladislav Zolotarov79616892011-07-21 07:58:54 +00001972 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001973 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
Vladislav Zolotarov79616892011-07-21 07:58:54 +00001974 raw->clear_pending(raw);
1975 }
1976
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001977 /* Execute commands if required */
1978 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1979 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
Yuval Mintz8b09be52013-08-01 17:30:59 +03001980 rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
1981 &p->ramrod_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001982 if (rc < 0)
1983 return rc;
1984 }
1985
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001986 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001987 * then user want to wait until the last command is done.
1988 */
1989 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001990 /* Wait maximum for the current exe_queue length iterations plus
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001991 * one (for the current pending command).
1992 */
1993 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1994
1995 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1996 max_iterations--) {
1997
1998 /* Wait for the current command to complete */
1999 rc = raw->wait_comp(bp, raw);
2000 if (rc)
2001 return rc;
2002
2003 /* Make a next step */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002004 rc = __bnx2x_vlan_mac_execute_step(bp,
2005 p->vlan_mac_obj,
2006 &p->ramrod_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002007 if (rc < 0)
2008 return rc;
2009 }
2010
2011 return 0;
2012 }
2013
2014 return rc;
2015}
2016
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002017/**
2018 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
2019 *
2020 * @bp: device handle
2021 * @o:
2022 * @vlan_mac_flags:
2023 * @ramrod_flags: execution flags to be used for this deletion
2024 *
2025 * if the last operation has completed successfully and there are no
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002026 * more elements left, positive value if the last operation has completed
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002027 * successfully and there are more previously configured elements, negative
2028 * value is current operation has failed.
2029 */
2030static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2031 struct bnx2x_vlan_mac_obj *o,
2032 unsigned long *vlan_mac_flags,
2033 unsigned long *ramrod_flags)
2034{
2035 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002036 struct bnx2x_vlan_mac_ramrod_params p;
2037 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
2038 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
Yuval Mintze8379c72014-01-05 18:33:54 +02002039 unsigned long flags;
Yuval Mintz8b09be52013-08-01 17:30:59 +03002040 int read_lock;
2041 int rc = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002042
2043 /* Clear pending commands first */
2044
2045 spin_lock_bh(&exeq->lock);
2046
2047 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
Yuval Mintze8379c72014-01-05 18:33:54 +02002048 flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
2049 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2050 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
Yuval Mintz460a25c2012-01-23 07:31:51 +00002051 rc = exeq->remove(bp, exeq->owner, exeq_pos);
2052 if (rc) {
2053 BNX2X_ERR("Failed to remove command\n");
Dan Carpentera44acd52012-01-24 21:59:31 +00002054 spin_unlock_bh(&exeq->lock);
Yuval Mintz460a25c2012-01-23 07:31:51 +00002055 return rc;
2056 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002057 list_del(&exeq_pos->link);
Yuval Mintz07ef7be2013-03-11 05:17:41 +00002058 bnx2x_exe_queue_free_elem(bp, exeq_pos);
Yuval Mintz460a25c2012-01-23 07:31:51 +00002059 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002060 }
2061
2062 spin_unlock_bh(&exeq->lock);
2063
2064 /* Prepare a command request */
2065 memset(&p, 0, sizeof(p));
2066 p.vlan_mac_obj = o;
2067 p.ramrod_flags = *ramrod_flags;
2068 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
2069
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002070 /* Add all but the last VLAN-MAC to the execution queue without actually
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002071 * execution anything.
2072 */
2073 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
2074 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
2075 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
2076
Yuval Mintz8b09be52013-08-01 17:30:59 +03002077 DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2078 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
2079 if (read_lock != 0)
2080 return read_lock;
2081
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002082 list_for_each_entry(pos, &o->head, link) {
Yuval Mintze8379c72014-01-05 18:33:54 +02002083 flags = pos->vlan_mac_flags;
2084 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2085 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002086 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2087 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
2088 rc = bnx2x_config_vlan_mac(bp, &p);
2089 if (rc < 0) {
2090 BNX2X_ERR("Failed to add a new DEL command\n");
Yuval Mintz8b09be52013-08-01 17:30:59 +03002091 bnx2x_vlan_mac_h_read_unlock(bp, o);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002092 return rc;
2093 }
2094 }
2095 }
2096
Yuval Mintz8b09be52013-08-01 17:30:59 +03002097 DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2098 bnx2x_vlan_mac_h_read_unlock(bp, o);
2099
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002100 p.ramrod_flags = *ramrod_flags;
2101 __set_bit(RAMROD_CONT, &p.ramrod_flags);
2102
2103 return bnx2x_config_vlan_mac(bp, &p);
2104}
2105
2106static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
2107 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
2108 unsigned long *pstate, bnx2x_obj_type type)
2109{
2110 raw->func_id = func_id;
2111 raw->cid = cid;
2112 raw->cl_id = cl_id;
2113 raw->rdata = rdata;
2114 raw->rdata_mapping = rdata_mapping;
2115 raw->state = state;
2116 raw->pstate = pstate;
2117 raw->obj_type = type;
2118 raw->check_pending = bnx2x_raw_check_pending;
2119 raw->clear_pending = bnx2x_raw_clear_pending;
2120 raw->set_pending = bnx2x_raw_set_pending;
2121 raw->wait_comp = bnx2x_raw_wait;
2122}
2123
2124static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
2125 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
2126 int state, unsigned long *pstate, bnx2x_obj_type type,
2127 struct bnx2x_credit_pool_obj *macs_pool,
2128 struct bnx2x_credit_pool_obj *vlans_pool)
2129{
2130 INIT_LIST_HEAD(&o->head);
Yuval Mintz8b09be52013-08-01 17:30:59 +03002131 o->head_reader = 0;
2132 o->head_exe_request = false;
2133 o->saved_ramrod_flags = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002134
2135 o->macs_pool = macs_pool;
2136 o->vlans_pool = vlans_pool;
2137
2138 o->delete_all = bnx2x_vlan_mac_del_all;
2139 o->restore = bnx2x_vlan_mac_restore;
2140 o->complete = bnx2x_complete_vlan_mac;
2141 o->wait = bnx2x_wait_vlan_mac;
2142
2143 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2144 state, pstate, type);
2145}
2146
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002147void bnx2x_init_mac_obj(struct bnx2x *bp,
2148 struct bnx2x_vlan_mac_obj *mac_obj,
2149 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2150 dma_addr_t rdata_mapping, int state,
2151 unsigned long *pstate, bnx2x_obj_type type,
2152 struct bnx2x_credit_pool_obj *macs_pool)
2153{
2154 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
2155
2156 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
2157 rdata_mapping, state, pstate, type,
2158 macs_pool, NULL);
2159
2160 /* CAM credit pool handling */
2161 mac_obj->get_credit = bnx2x_get_credit_mac;
2162 mac_obj->put_credit = bnx2x_put_credit_mac;
2163 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2164 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2165
2166 if (CHIP_IS_E1x(bp)) {
2167 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
2168 mac_obj->check_del = bnx2x_check_mac_del;
2169 mac_obj->check_add = bnx2x_check_mac_add;
2170 mac_obj->check_move = bnx2x_check_move_always_err;
2171 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2172
2173 /* Exe Queue */
2174 bnx2x_exe_queue_init(bp,
2175 &mac_obj->exe_queue, 1, qable_obj,
2176 bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002177 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002178 bnx2x_optimize_vlan_mac,
2179 bnx2x_execute_vlan_mac,
2180 bnx2x_exeq_get_mac);
2181 } else {
2182 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
2183 mac_obj->check_del = bnx2x_check_mac_del;
2184 mac_obj->check_add = bnx2x_check_mac_add;
2185 mac_obj->check_move = bnx2x_check_move;
2186 mac_obj->ramrod_cmd =
2187 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
Ariel Eliored5162a2011-12-05 21:52:24 +00002188 mac_obj->get_n_elements = bnx2x_get_n_elements;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002189
2190 /* Exe Queue */
2191 bnx2x_exe_queue_init(bp,
2192 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2193 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002194 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002195 bnx2x_optimize_vlan_mac,
2196 bnx2x_execute_vlan_mac,
2197 bnx2x_exeq_get_mac);
2198 }
2199}
2200
2201void bnx2x_init_vlan_obj(struct bnx2x *bp,
2202 struct bnx2x_vlan_mac_obj *vlan_obj,
2203 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2204 dma_addr_t rdata_mapping, int state,
2205 unsigned long *pstate, bnx2x_obj_type type,
2206 struct bnx2x_credit_pool_obj *vlans_pool)
2207{
2208 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
2209
2210 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2211 rdata_mapping, state, pstate, type, NULL,
2212 vlans_pool);
2213
2214 vlan_obj->get_credit = bnx2x_get_credit_vlan;
2215 vlan_obj->put_credit = bnx2x_put_credit_vlan;
2216 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2217 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2218
2219 if (CHIP_IS_E1x(bp)) {
2220 BNX2X_ERR("Do not support chips others than E2 and newer\n");
2221 BUG();
2222 } else {
2223 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
2224 vlan_obj->check_del = bnx2x_check_vlan_del;
2225 vlan_obj->check_add = bnx2x_check_vlan_add;
2226 vlan_obj->check_move = bnx2x_check_move;
2227 vlan_obj->ramrod_cmd =
2228 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
Ariel Elior3ec9f9c2013-03-11 05:17:45 +00002229 vlan_obj->get_n_elements = bnx2x_get_n_elements;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002230
2231 /* Exe Queue */
2232 bnx2x_exe_queue_init(bp,
2233 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2234 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002235 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002236 bnx2x_optimize_vlan_mac,
2237 bnx2x_execute_vlan_mac,
2238 bnx2x_exeq_get_vlan);
2239 }
2240}
2241
2242void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2243 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2244 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2245 dma_addr_t rdata_mapping, int state,
2246 unsigned long *pstate, bnx2x_obj_type type,
2247 struct bnx2x_credit_pool_obj *macs_pool,
2248 struct bnx2x_credit_pool_obj *vlans_pool)
2249{
2250 union bnx2x_qable_obj *qable_obj =
2251 (union bnx2x_qable_obj *)vlan_mac_obj;
2252
2253 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2254 rdata_mapping, state, pstate, type,
2255 macs_pool, vlans_pool);
2256
2257 /* CAM pool handling */
2258 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2259 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002260 /* CAM offset is relevant for 57710 and 57711 chips only which have a
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002261 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2262 * will be taken from MACs' pool object only.
2263 */
2264 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2265 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2266
2267 if (CHIP_IS_E1(bp)) {
2268 BNX2X_ERR("Do not support chips others than E2\n");
2269 BUG();
2270 } else if (CHIP_IS_E1H(bp)) {
2271 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2272 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2273 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2274 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2275 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2276
2277 /* Exe Queue */
2278 bnx2x_exe_queue_init(bp,
2279 &vlan_mac_obj->exe_queue, 1, qable_obj,
2280 bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002281 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002282 bnx2x_optimize_vlan_mac,
2283 bnx2x_execute_vlan_mac,
2284 bnx2x_exeq_get_vlan_mac);
2285 } else {
2286 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2287 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2288 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2289 vlan_mac_obj->check_move = bnx2x_check_move;
2290 vlan_mac_obj->ramrod_cmd =
2291 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2292
2293 /* Exe Queue */
2294 bnx2x_exe_queue_init(bp,
2295 &vlan_mac_obj->exe_queue,
2296 CLASSIFY_RULES_COUNT,
2297 qable_obj, bnx2x_validate_vlan_mac,
Yuval Mintz460a25c2012-01-23 07:31:51 +00002298 bnx2x_remove_vlan_mac,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002299 bnx2x_optimize_vlan_mac,
2300 bnx2x_execute_vlan_mac,
2301 bnx2x_exeq_get_vlan_mac);
2302 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002303}
2304
2305/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2306static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2307 struct tstorm_eth_mac_filter_config *mac_filters,
2308 u16 pf_id)
2309{
2310 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2311
2312 u32 addr = BAR_TSTRORM_INTMEM +
2313 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2314
2315 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2316}
2317
2318static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2319 struct bnx2x_rx_mode_ramrod_params *p)
2320{
Yuval Mintz2de67432013-01-23 03:21:43 +00002321 /* update the bp MAC filter structure */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002322 u32 mask = (1 << p->cl_id);
2323
2324 struct tstorm_eth_mac_filter_config *mac_filters =
2325 (struct tstorm_eth_mac_filter_config *)p->rdata;
2326
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002327 /* initial setting is drop-all */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002328 u8 drop_all_ucast = 1, drop_all_mcast = 1;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002329 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2330 u8 unmatched_unicast = 0;
2331
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002332 /* In e1x there we only take into account rx accept flag since tx switching
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002333 * isn't enabled. */
2334 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002335 /* accept matched ucast */
2336 drop_all_ucast = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002337
2338 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002339 /* accept matched mcast */
2340 drop_all_mcast = 0;
2341
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002342 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002343 /* accept all mcast */
2344 drop_all_ucast = 0;
2345 accp_all_ucast = 1;
2346 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002347 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002348 /* accept all mcast */
2349 drop_all_mcast = 0;
2350 accp_all_mcast = 1;
2351 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002352 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002353 /* accept (all) bcast */
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002354 accp_all_bcast = 1;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002355 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2356 /* accept unmatched unicasts */
2357 unmatched_unicast = 1;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002358
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002359 mac_filters->ucast_drop_all = drop_all_ucast ?
2360 mac_filters->ucast_drop_all | mask :
2361 mac_filters->ucast_drop_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002362
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002363 mac_filters->mcast_drop_all = drop_all_mcast ?
2364 mac_filters->mcast_drop_all | mask :
2365 mac_filters->mcast_drop_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002366
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002367 mac_filters->ucast_accept_all = accp_all_ucast ?
2368 mac_filters->ucast_accept_all | mask :
2369 mac_filters->ucast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002370
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002371 mac_filters->mcast_accept_all = accp_all_mcast ?
2372 mac_filters->mcast_accept_all | mask :
2373 mac_filters->mcast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002374
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002375 mac_filters->bcast_accept_all = accp_all_bcast ?
2376 mac_filters->bcast_accept_all | mask :
2377 mac_filters->bcast_accept_all & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002378
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002379 mac_filters->unmatched_unicast = unmatched_unicast ?
2380 mac_filters->unmatched_unicast | mask :
2381 mac_filters->unmatched_unicast & ~mask;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002382
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002383 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
Yuval Mintz2de67432013-01-23 03:21:43 +00002384 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00002385 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2386 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2387 mac_filters->bcast_accept_all);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002388
2389 /* write the MAC filter structure*/
2390 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2391
2392 /* The operation is completed */
2393 clear_bit(p->state, p->pstate);
2394 smp_mb__after_clear_bit();
2395
2396 return 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002397}
2398
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002399/* Setup ramrod data */
2400static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2401 struct eth_classify_header *hdr,
2402 u8 rule_cnt)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002403{
Yuval Mintz86564c32013-01-23 03:21:50 +00002404 hdr->echo = cpu_to_le32(cid);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002405 hdr->rule_cnt = rule_cnt;
2406}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002407
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002408static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
Yuval Mintz924d75a2013-01-23 03:21:44 +00002409 unsigned long *accept_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002410 struct eth_filter_rules_cmd *cmd,
2411 bool clear_accept_all)
2412{
2413 u16 state;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002414
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002415 /* start with 'drop-all' */
2416 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2417 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2418
Yuval Mintz924d75a2013-01-23 03:21:44 +00002419 if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2420 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002421
Yuval Mintz924d75a2013-01-23 03:21:44 +00002422 if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2423 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002424
Yuval Mintz924d75a2013-01-23 03:21:44 +00002425 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2426 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2427 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002428 }
2429
Yuval Mintz924d75a2013-01-23 03:21:44 +00002430 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2431 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2432 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2433 }
2434
2435 if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2436 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2437
2438 if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2439 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2440 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2441 }
2442
2443 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2444 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2445
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002446 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2447 if (clear_accept_all) {
2448 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2449 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2450 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2451 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2452 }
2453
2454 cmd->state = cpu_to_le16(state);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002455}
2456
2457static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2458 struct bnx2x_rx_mode_ramrod_params *p)
2459{
2460 struct eth_filter_rules_ramrod_data *data = p->rdata;
2461 int rc;
2462 u8 rule_idx = 0;
2463
2464 /* Reset the ramrod data buffer */
2465 memset(data, 0, sizeof(*data));
2466
2467 /* Setup ramrod data */
2468
2469 /* Tx (internal switching) */
2470 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2471 data->rules[rule_idx].client_id = p->cl_id;
2472 data->rules[rule_idx].func_id = p->func_id;
2473
2474 data->rules[rule_idx].cmd_general_data =
2475 ETH_FILTER_RULES_CMD_TX_CMD;
2476
Yuval Mintz924d75a2013-01-23 03:21:44 +00002477 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2478 &(data->rules[rule_idx++]),
2479 false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002480 }
2481
2482 /* Rx */
2483 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2484 data->rules[rule_idx].client_id = p->cl_id;
2485 data->rules[rule_idx].func_id = p->func_id;
2486
2487 data->rules[rule_idx].cmd_general_data =
2488 ETH_FILTER_RULES_CMD_RX_CMD;
2489
Yuval Mintz924d75a2013-01-23 03:21:44 +00002490 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2491 &(data->rules[rule_idx++]),
2492 false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002493 }
2494
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002495 /* If FCoE Queue configuration has been requested configure the Rx and
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002496 * internal switching modes for this queue in separate rules.
2497 *
2498 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2499 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2500 */
2501 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2502 /* Tx (internal switching) */
2503 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2504 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2505 data->rules[rule_idx].func_id = p->func_id;
2506
2507 data->rules[rule_idx].cmd_general_data =
2508 ETH_FILTER_RULES_CMD_TX_CMD;
2509
Yuval Mintz924d75a2013-01-23 03:21:44 +00002510 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2511 &(data->rules[rule_idx]),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002512 true);
Yuval Mintz924d75a2013-01-23 03:21:44 +00002513 rule_idx++;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002514 }
2515
2516 /* Rx */
2517 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2518 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2519 data->rules[rule_idx].func_id = p->func_id;
2520
2521 data->rules[rule_idx].cmd_general_data =
2522 ETH_FILTER_RULES_CMD_RX_CMD;
2523
Yuval Mintz924d75a2013-01-23 03:21:44 +00002524 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2525 &(data->rules[rule_idx]),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002526 true);
Yuval Mintz924d75a2013-01-23 03:21:44 +00002527 rule_idx++;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002528 }
2529 }
2530
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002531 /* Set the ramrod header (most importantly - number of rules to
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002532 * configure).
2533 */
2534 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2535
Merav Sicron51c1a582012-03-18 10:33:38 +00002536 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002537 data->header.rule_cnt, p->rx_accept_flags,
2538 p->tx_accept_flags);
2539
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002540 /* No need for an explicit memory barrier here as long we would
2541 * need to ensure the ordering of writing to the SPQ element
2542 * and updating of the SPQ producer which involves a memory
2543 * read and we will have to put a full memory barrier there
2544 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00002545 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002546
2547 /* Send a ramrod */
2548 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2549 U64_HI(p->rdata_mapping),
2550 U64_LO(p->rdata_mapping),
2551 ETH_CONNECTION_TYPE);
2552 if (rc)
2553 return rc;
2554
2555 /* Ramrod completion is pending */
2556 return 1;
2557}
2558
2559static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2560 struct bnx2x_rx_mode_ramrod_params *p)
2561{
2562 return bnx2x_state_wait(bp, p->state, p->pstate);
2563}
2564
2565static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2566 struct bnx2x_rx_mode_ramrod_params *p)
2567{
2568 /* Do nothing */
2569 return 0;
2570}
2571
2572int bnx2x_config_rx_mode(struct bnx2x *bp,
2573 struct bnx2x_rx_mode_ramrod_params *p)
2574{
2575 int rc;
2576
2577 /* Configure the new classification in the chip */
2578 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2579 if (rc < 0)
2580 return rc;
2581
2582 /* Wait for a ramrod completion if was requested */
2583 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2584 rc = p->rx_mode_obj->wait_comp(bp, p);
2585 if (rc)
2586 return rc;
2587 }
2588
2589 return rc;
2590}
2591
2592void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2593 struct bnx2x_rx_mode_obj *o)
2594{
2595 if (CHIP_IS_E1x(bp)) {
2596 o->wait_comp = bnx2x_empty_rx_mode_wait;
2597 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2598 } else {
2599 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2600 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2601 }
2602}
2603
2604/********************* Multicast verbs: SET, CLEAR ****************************/
2605static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2606{
2607 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2608}
2609
2610struct bnx2x_mcast_mac_elem {
2611 struct list_head link;
2612 u8 mac[ETH_ALEN];
2613 u8 pad[2]; /* For a natural alignment of the following buffer */
2614};
2615
2616struct bnx2x_pending_mcast_cmd {
2617 struct list_head link;
2618 int type; /* BNX2X_MCAST_CMD_X */
2619 union {
2620 struct list_head macs_head;
2621 u32 macs_num; /* Needed for DEL command */
2622 int next_bin; /* Needed for RESTORE flow with aprox match */
2623 } data;
2624
2625 bool done; /* set to true, when the command has been handled,
2626 * practically used in 57712 handling only, where one pending
2627 * command may be handled in a few operations. As long as for
2628 * other chips every operation handling is completed in a
2629 * single ramrod, there is no need to utilize this field.
2630 */
2631};
2632
2633static int bnx2x_mcast_wait(struct bnx2x *bp,
2634 struct bnx2x_mcast_obj *o)
2635{
2636 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2637 o->raw.wait_comp(bp, &o->raw))
2638 return -EBUSY;
2639
2640 return 0;
2641}
2642
2643static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2644 struct bnx2x_mcast_obj *o,
2645 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00002646 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002647{
2648 int total_sz;
2649 struct bnx2x_pending_mcast_cmd *new_cmd;
2650 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2651 struct bnx2x_mcast_list_elem *pos;
2652 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2653 p->mcast_list_len : 0);
2654
2655 /* If the command is empty ("handle pending commands only"), break */
2656 if (!p->mcast_list_len)
2657 return 0;
2658
2659 total_sz = sizeof(*new_cmd) +
2660 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2661
2662 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2663 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2664
2665 if (!new_cmd)
2666 return -ENOMEM;
2667
Merav Sicron51c1a582012-03-18 10:33:38 +00002668 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2669 cmd, macs_list_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002670
2671 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2672
2673 new_cmd->type = cmd;
2674 new_cmd->done = false;
2675
2676 switch (cmd) {
2677 case BNX2X_MCAST_CMD_ADD:
2678 cur_mac = (struct bnx2x_mcast_mac_elem *)
2679 ((u8 *)new_cmd + sizeof(*new_cmd));
2680
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002681 /* Push the MACs of the current command into the pending command
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002682 * MACs list: FIFO
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002683 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002684 list_for_each_entry(pos, &p->mcast_list, link) {
2685 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2686 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2687 cur_mac++;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002688 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002689
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002690 break;
2691
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002692 case BNX2X_MCAST_CMD_DEL:
2693 new_cmd->data.macs_num = p->mcast_list_len;
2694 break;
2695
2696 case BNX2X_MCAST_CMD_RESTORE:
2697 new_cmd->data.next_bin = 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002698 break;
2699
2700 default:
Jesper Juhl8b6d5c02012-07-31 11:39:37 +00002701 kfree(new_cmd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002702 BNX2X_ERR("Unknown command: %d\n", cmd);
2703 return -EINVAL;
2704 }
2705
2706 /* Push the new pending command to the tail of the pending list: FIFO */
2707 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2708
2709 o->set_sched(o);
2710
2711 return 1;
2712}
2713
2714/**
2715 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2716 *
2717 * @o:
2718 * @last: index to start looking from (including)
2719 *
2720 * returns the next found (set) bin or a negative value if none is found.
2721 */
2722static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2723{
2724 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2725
2726 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2727 if (o->registry.aprox_match.vec[i])
2728 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2729 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2730 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2731 vec, cur_bit)) {
2732 return cur_bit;
2733 }
2734 }
2735 inner_start = 0;
2736 }
2737
2738 /* None found */
2739 return -1;
2740}
2741
2742/**
2743 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2744 *
2745 * @o:
2746 *
2747 * returns the index of the found bin or -1 if none is found
2748 */
2749static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2750{
2751 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2752
2753 if (cur_bit >= 0)
2754 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2755
2756 return cur_bit;
2757}
2758
2759static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2760{
2761 struct bnx2x_raw_obj *raw = &o->raw;
2762 u8 rx_tx_flag = 0;
2763
2764 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2765 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2766 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2767
2768 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2769 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2770 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2771
2772 return rx_tx_flag;
2773}
2774
2775static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2776 struct bnx2x_mcast_obj *o, int idx,
2777 union bnx2x_mcast_config_data *cfg_data,
Yuval Mintz86564c32013-01-23 03:21:50 +00002778 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002779{
2780 struct bnx2x_raw_obj *r = &o->raw;
2781 struct eth_multicast_rules_ramrod_data *data =
2782 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2783 u8 func_id = r->func_id;
2784 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2785 int bin;
2786
2787 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2788 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2789
2790 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2791
2792 /* Get a bin and update a bins' vector */
2793 switch (cmd) {
2794 case BNX2X_MCAST_CMD_ADD:
2795 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2796 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002797 break;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002798
2799 case BNX2X_MCAST_CMD_DEL:
2800 /* If there were no more bins to clear
2801 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2802 * clear any (0xff) bin.
2803 * See bnx2x_mcast_validate_e2() for explanation when it may
2804 * happen.
2805 */
2806 bin = bnx2x_mcast_clear_first_bin(o);
2807 break;
2808
2809 case BNX2X_MCAST_CMD_RESTORE:
2810 bin = cfg_data->bin;
2811 break;
2812
2813 default:
2814 BNX2X_ERR("Unknown command: %d\n", cmd);
2815 return;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002816 }
2817
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002818 DP(BNX2X_MSG_SP, "%s bin %d\n",
2819 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2820 "Setting" : "Clearing"), bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002821
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002822 data->rules[idx].bin_id = (u8)bin;
2823 data->rules[idx].func_id = func_id;
2824 data->rules[idx].engine_id = o->engine_id;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002825}
2826
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002827/**
2828 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2829 *
2830 * @bp: device handle
2831 * @o:
2832 * @start_bin: index in the registry to start from (including)
2833 * @rdata_idx: index in the ramrod data to start from
2834 *
2835 * returns last handled bin index or -1 if all bins have been handled
2836 */
2837static inline int bnx2x_mcast_handle_restore_cmd_e2(
2838 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2839 int *rdata_idx)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002840{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002841 int cur_bin, cnt = *rdata_idx;
Yuval Mintz86564c32013-01-23 03:21:50 +00002842 union bnx2x_mcast_config_data cfg_data = {NULL};
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002843
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002844 /* go through the registry and configure the bins from it */
2845 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2846 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002847
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002848 cfg_data.bin = (u8)cur_bin;
2849 o->set_one_rule(bp, o, cnt, &cfg_data,
2850 BNX2X_MCAST_CMD_RESTORE);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002851
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002852 cnt++;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002853
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002854 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002855
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002856 /* Break if we reached the maximum number
2857 * of rules.
2858 */
2859 if (cnt >= o->max_cmd_len)
2860 break;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002861 }
2862
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002863 *rdata_idx = cnt;
2864
2865 return cur_bin;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002866}
2867
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002868static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2869 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2870 int *line_idx)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00002871{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002872 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2873 int cnt = *line_idx;
Yuval Mintz86564c32013-01-23 03:21:50 +00002874 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002875
2876 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2877 link) {
2878
2879 cfg_data.mac = &pmac_pos->mac[0];
2880 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2881
2882 cnt++;
2883
Joe Perches0f9dad12011-08-14 12:16:19 +00002884 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00002885 pmac_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002886
2887 list_del(&pmac_pos->link);
2888
2889 /* Break if we reached the maximum number
2890 * of rules.
2891 */
2892 if (cnt >= o->max_cmd_len)
2893 break;
2894 }
2895
2896 *line_idx = cnt;
2897
2898 /* if no more MACs to configure - we are done */
2899 if (list_empty(&cmd_pos->data.macs_head))
2900 cmd_pos->done = true;
2901}
2902
2903static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2904 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2905 int *line_idx)
2906{
2907 int cnt = *line_idx;
2908
2909 while (cmd_pos->data.macs_num) {
2910 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2911
2912 cnt++;
2913
2914 cmd_pos->data.macs_num--;
2915
2916 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2917 cmd_pos->data.macs_num, cnt);
2918
2919 /* Break if we reached the maximum
2920 * number of rules.
2921 */
2922 if (cnt >= o->max_cmd_len)
2923 break;
2924 }
2925
2926 *line_idx = cnt;
2927
2928 /* If we cleared all bins - we are done */
2929 if (!cmd_pos->data.macs_num)
2930 cmd_pos->done = true;
2931}
2932
2933static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2934 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2935 int *line_idx)
2936{
2937 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2938 line_idx);
2939
2940 if (cmd_pos->data.next_bin < 0)
2941 /* If o->set_restore returned -1 we are done */
2942 cmd_pos->done = true;
2943 else
2944 /* Start from the next bin next time */
2945 cmd_pos->data.next_bin++;
2946}
2947
2948static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2949 struct bnx2x_mcast_ramrod_params *p)
2950{
2951 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2952 int cnt = 0;
2953 struct bnx2x_mcast_obj *o = p->mcast_obj;
2954
2955 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2956 link) {
2957 switch (cmd_pos->type) {
2958 case BNX2X_MCAST_CMD_ADD:
2959 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2960 break;
2961
2962 case BNX2X_MCAST_CMD_DEL:
2963 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2964 break;
2965
2966 case BNX2X_MCAST_CMD_RESTORE:
2967 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2968 &cnt);
2969 break;
2970
2971 default:
2972 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2973 return -EINVAL;
2974 }
2975
2976 /* If the command has been completed - remove it from the list
2977 * and free the memory
2978 */
2979 if (cmd_pos->done) {
2980 list_del(&cmd_pos->link);
2981 kfree(cmd_pos);
2982 }
2983
2984 /* Break if we reached the maximum number of rules */
2985 if (cnt >= o->max_cmd_len)
2986 break;
2987 }
2988
2989 return cnt;
2990}
2991
2992static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2993 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2994 int *line_idx)
2995{
2996 struct bnx2x_mcast_list_elem *mlist_pos;
Yuval Mintz86564c32013-01-23 03:21:50 +00002997 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002998 int cnt = *line_idx;
2999
3000 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3001 cfg_data.mac = mlist_pos->mac;
3002 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
3003
3004 cnt++;
3005
Joe Perches0f9dad12011-08-14 12:16:19 +00003006 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003007 mlist_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003008 }
3009
3010 *line_idx = cnt;
3011}
3012
3013static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
3014 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3015 int *line_idx)
3016{
3017 int cnt = *line_idx, i;
3018
3019 for (i = 0; i < p->mcast_list_len; i++) {
3020 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
3021
3022 cnt++;
3023
3024 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
3025 p->mcast_list_len - i - 1);
3026 }
3027
3028 *line_idx = cnt;
3029}
3030
3031/**
3032 * bnx2x_mcast_handle_current_cmd -
3033 *
3034 * @bp: device handle
3035 * @p:
3036 * @cmd:
3037 * @start_cnt: first line in the ramrod data that may be used
3038 *
3039 * This function is called iff there is enough place for the current command in
3040 * the ramrod data.
3041 * Returns number of lines filled in the ramrod data in total.
3042 */
3043static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
Yuval Mintz86564c32013-01-23 03:21:50 +00003044 struct bnx2x_mcast_ramrod_params *p,
3045 enum bnx2x_mcast_cmd cmd,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003046 int start_cnt)
3047{
3048 struct bnx2x_mcast_obj *o = p->mcast_obj;
3049 int cnt = start_cnt;
3050
3051 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3052
3053 switch (cmd) {
3054 case BNX2X_MCAST_CMD_ADD:
3055 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
3056 break;
3057
3058 case BNX2X_MCAST_CMD_DEL:
3059 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
3060 break;
3061
3062 case BNX2X_MCAST_CMD_RESTORE:
3063 o->hdl_restore(bp, o, 0, &cnt);
3064 break;
3065
3066 default:
3067 BNX2X_ERR("Unknown command: %d\n", cmd);
3068 return -EINVAL;
3069 }
3070
3071 /* The current command has been handled */
3072 p->mcast_list_len = 0;
3073
3074 return cnt;
3075}
3076
3077static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
3078 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003079 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003080{
3081 struct bnx2x_mcast_obj *o = p->mcast_obj;
3082 int reg_sz = o->get_registry_size(o);
3083
3084 switch (cmd) {
3085 /* DEL command deletes all currently configured MACs */
3086 case BNX2X_MCAST_CMD_DEL:
3087 o->set_registry_size(o, 0);
3088 /* Don't break */
3089
3090 /* RESTORE command will restore the entire multicast configuration */
3091 case BNX2X_MCAST_CMD_RESTORE:
3092 /* Here we set the approximate amount of work to do, which in
3093 * fact may be only less as some MACs in postponed ADD
3094 * command(s) scheduled before this command may fall into
3095 * the same bin and the actual number of bins set in the
3096 * registry would be less than we estimated here. See
3097 * bnx2x_mcast_set_one_rule_e2() for further details.
3098 */
3099 p->mcast_list_len = reg_sz;
3100 break;
3101
3102 case BNX2X_MCAST_CMD_ADD:
3103 case BNX2X_MCAST_CMD_CONT:
3104 /* Here we assume that all new MACs will fall into new bins.
3105 * However we will correct the real registry size after we
3106 * handle all pending commands.
3107 */
3108 o->set_registry_size(o, reg_sz + p->mcast_list_len);
3109 break;
3110
3111 default:
3112 BNX2X_ERR("Unknown command: %d\n", cmd);
3113 return -EINVAL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003114 }
3115
3116 /* Increase the total number of MACs pending to be configured */
3117 o->total_pending_num += p->mcast_list_len;
3118
3119 return 0;
3120}
3121
3122static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
3123 struct bnx2x_mcast_ramrod_params *p,
3124 int old_num_bins)
3125{
3126 struct bnx2x_mcast_obj *o = p->mcast_obj;
3127
3128 o->set_registry_size(o, old_num_bins);
3129 o->total_pending_num -= p->mcast_list_len;
3130}
3131
3132/**
3133 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
3134 *
3135 * @bp: device handle
3136 * @p:
3137 * @len: number of rules to handle
3138 */
3139static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
3140 struct bnx2x_mcast_ramrod_params *p,
3141 u8 len)
3142{
3143 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3144 struct eth_multicast_rules_ramrod_data *data =
3145 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
3146
Yuval Mintz86564c32013-01-23 03:21:50 +00003147 data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3148 (BNX2X_FILTER_MCAST_PENDING <<
3149 BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003150 data->header.rule_cnt = len;
3151}
3152
3153/**
3154 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
3155 *
3156 * @bp: device handle
3157 * @o:
3158 *
3159 * Recalculate the actual number of set bins in the registry using Brian
3160 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
3161 *
3162 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
3163 */
3164static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
3165 struct bnx2x_mcast_obj *o)
3166{
3167 int i, cnt = 0;
3168 u64 elem;
3169
3170 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
3171 elem = o->registry.aprox_match.vec[i];
3172 for (; elem; cnt++)
3173 elem &= elem - 1;
3174 }
3175
3176 o->set_registry_size(o, cnt);
3177
3178 return 0;
3179}
3180
3181static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
3182 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003183 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003184{
3185 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
3186 struct bnx2x_mcast_obj *o = p->mcast_obj;
3187 struct eth_multicast_rules_ramrod_data *data =
3188 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
3189 int cnt = 0, rc;
3190
3191 /* Reset the ramrod data buffer */
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00003192 memset(data, 0, sizeof(*data));
3193
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003194 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
3195
3196 /* If there are no more pending commands - clear SCHEDULED state */
3197 if (list_empty(&o->pending_cmds_head))
3198 o->clear_sched(o);
3199
3200 /* The below may be true iff there was enough room in ramrod
3201 * data for all pending commands and for the current
3202 * command. Otherwise the current command would have been added
3203 * to the pending commands and p->mcast_list_len would have been
3204 * zeroed.
3205 */
3206 if (p->mcast_list_len > 0)
3207 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
3208
3209 /* We've pulled out some MACs - update the total number of
3210 * outstanding.
3211 */
3212 o->total_pending_num -= cnt;
3213
3214 /* send a ramrod */
3215 WARN_ON(o->total_pending_num < 0);
3216 WARN_ON(cnt > o->max_cmd_len);
3217
3218 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3219
3220 /* Update a registry size if there are no more pending operations.
3221 *
3222 * We don't want to change the value of the registry size if there are
3223 * pending operations because we want it to always be equal to the
3224 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3225 * set bins after the last requested operation in order to properly
3226 * evaluate the size of the next DEL/RESTORE operation.
3227 *
3228 * Note that we update the registry itself during command(s) handling
3229 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3230 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3231 * with a limited amount of update commands (per MAC/bin) and we don't
3232 * know in this scope what the actual state of bins configuration is
3233 * going to be after this ramrod.
3234 */
3235 if (!o->total_pending_num)
3236 bnx2x_mcast_refresh_registry_e2(bp, o);
3237
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003238 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003239 * RAMROD_PENDING status immediately.
3240 */
3241 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3242 raw->clear_pending(raw);
3243 return 0;
3244 } else {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003245 /* No need for an explicit memory barrier here as long we would
3246 * need to ensure the ordering of writing to the SPQ element
3247 * and updating of the SPQ producer which involves a memory
3248 * read and we will have to put a full memory barrier there
3249 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003250 */
3251
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003252 /* Send a ramrod */
3253 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3254 raw->cid, U64_HI(raw->rdata_mapping),
3255 U64_LO(raw->rdata_mapping),
3256 ETH_CONNECTION_TYPE);
3257 if (rc)
3258 return rc;
3259
3260 /* Ramrod completion is pending */
3261 return 1;
3262 }
3263}
3264
3265static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3266 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003267 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003268{
3269 /* Mark, that there is a work to do */
3270 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3271 p->mcast_list_len = 1;
3272
3273 return 0;
3274}
3275
3276static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3277 struct bnx2x_mcast_ramrod_params *p,
3278 int old_num_bins)
3279{
3280 /* Do nothing */
3281}
3282
3283#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3284do { \
3285 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3286} while (0)
3287
3288static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3289 struct bnx2x_mcast_obj *o,
3290 struct bnx2x_mcast_ramrod_params *p,
3291 u32 *mc_filter)
3292{
3293 struct bnx2x_mcast_list_elem *mlist_pos;
3294 int bit;
3295
3296 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3297 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3298 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3299
Joe Perches0f9dad12011-08-14 12:16:19 +00003300 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003301 mlist_pos->mac, bit);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003302
3303 /* bookkeeping... */
3304 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3305 bit);
3306 }
3307}
3308
3309static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3310 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3311 u32 *mc_filter)
3312{
3313 int bit;
3314
3315 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3316 bit >= 0;
3317 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3318 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3319 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3320 }
3321}
3322
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003323/* On 57711 we write the multicast MACs' approximate match
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003324 * table by directly into the TSTORM's internal RAM. So we don't
3325 * really need to handle any tricks to make it work.
3326 */
3327static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3328 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003329 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003330{
3331 int i;
3332 struct bnx2x_mcast_obj *o = p->mcast_obj;
3333 struct bnx2x_raw_obj *r = &o->raw;
3334
3335 /* If CLEAR_ONLY has been requested - clear the registry
3336 * and clear a pending bit.
3337 */
3338 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3339 u32 mc_filter[MC_HASH_SIZE] = {0};
3340
3341 /* Set the multicast filter bits before writing it into
3342 * the internal memory.
3343 */
3344 switch (cmd) {
3345 case BNX2X_MCAST_CMD_ADD:
3346 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3347 break;
3348
3349 case BNX2X_MCAST_CMD_DEL:
Joe Perches94f05b02011-08-14 12:16:20 +00003350 DP(BNX2X_MSG_SP,
3351 "Invalidating multicast MACs configuration\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003352
3353 /* clear the registry */
3354 memset(o->registry.aprox_match.vec, 0,
3355 sizeof(o->registry.aprox_match.vec));
3356 break;
3357
3358 case BNX2X_MCAST_CMD_RESTORE:
3359 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3360 break;
3361
3362 default:
3363 BNX2X_ERR("Unknown command: %d\n", cmd);
3364 return -EINVAL;
3365 }
3366
3367 /* Set the mcast filter in the internal memory */
3368 for (i = 0; i < MC_HASH_SIZE; i++)
3369 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3370 } else
3371 /* clear the registry */
3372 memset(o->registry.aprox_match.vec, 0,
3373 sizeof(o->registry.aprox_match.vec));
3374
3375 /* We are done */
3376 r->clear_pending(r);
3377
3378 return 0;
3379}
3380
3381static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3382 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003383 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003384{
3385 struct bnx2x_mcast_obj *o = p->mcast_obj;
3386 int reg_sz = o->get_registry_size(o);
3387
3388 switch (cmd) {
3389 /* DEL command deletes all currently configured MACs */
3390 case BNX2X_MCAST_CMD_DEL:
3391 o->set_registry_size(o, 0);
3392 /* Don't break */
3393
3394 /* RESTORE command will restore the entire multicast configuration */
3395 case BNX2X_MCAST_CMD_RESTORE:
3396 p->mcast_list_len = reg_sz;
3397 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3398 cmd, p->mcast_list_len);
3399 break;
3400
3401 case BNX2X_MCAST_CMD_ADD:
3402 case BNX2X_MCAST_CMD_CONT:
3403 /* Multicast MACs on 57710 are configured as unicast MACs and
3404 * there is only a limited number of CAM entries for that
3405 * matter.
3406 */
3407 if (p->mcast_list_len > o->max_cmd_len) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003408 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3409 o->max_cmd_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003410 return -EINVAL;
3411 }
3412 /* Every configured MAC should be cleared if DEL command is
3413 * called. Only the last ADD command is relevant as long as
3414 * every ADD commands overrides the previous configuration.
3415 */
3416 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3417 if (p->mcast_list_len > 0)
3418 o->set_registry_size(o, p->mcast_list_len);
3419
3420 break;
3421
3422 default:
3423 BNX2X_ERR("Unknown command: %d\n", cmd);
3424 return -EINVAL;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003425 }
3426
3427 /* We want to ensure that commands are executed one by one for 57710.
3428 * Therefore each none-empty command will consume o->max_cmd_len.
3429 */
3430 if (p->mcast_list_len)
3431 o->total_pending_num += o->max_cmd_len;
3432
3433 return 0;
3434}
3435
3436static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3437 struct bnx2x_mcast_ramrod_params *p,
3438 int old_num_macs)
3439{
3440 struct bnx2x_mcast_obj *o = p->mcast_obj;
3441
3442 o->set_registry_size(o, old_num_macs);
3443
3444 /* If current command hasn't been handled yet and we are
3445 * here means that it's meant to be dropped and we have to
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003446 * update the number of outstanding MACs accordingly.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003447 */
3448 if (p->mcast_list_len)
3449 o->total_pending_num -= o->max_cmd_len;
3450}
3451
3452static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3453 struct bnx2x_mcast_obj *o, int idx,
3454 union bnx2x_mcast_config_data *cfg_data,
Yuval Mintz86564c32013-01-23 03:21:50 +00003455 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003456{
3457 struct bnx2x_raw_obj *r = &o->raw;
3458 struct mac_configuration_cmd *data =
3459 (struct mac_configuration_cmd *)(r->rdata);
3460
3461 /* copy mac */
3462 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3463 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3464 &data->config_table[idx].middle_mac_addr,
3465 &data->config_table[idx].lsb_mac_addr,
3466 cfg_data->mac);
3467
3468 data->config_table[idx].vlan_id = 0;
3469 data->config_table[idx].pf_id = r->func_id;
3470 data->config_table[idx].clients_bit_vector =
3471 cpu_to_le32(1 << r->cl_id);
3472
3473 SET_FLAG(data->config_table[idx].flags,
3474 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3475 T_ETH_MAC_COMMAND_SET);
3476 }
3477}
3478
3479/**
3480 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3481 *
3482 * @bp: device handle
3483 * @p:
3484 * @len: number of rules to handle
3485 */
3486static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3487 struct bnx2x_mcast_ramrod_params *p,
3488 u8 len)
3489{
3490 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3491 struct mac_configuration_cmd *data =
3492 (struct mac_configuration_cmd *)(r->rdata);
3493
3494 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3495 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3496 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3497
3498 data->hdr.offset = offset;
Yuval Mintz86564c32013-01-23 03:21:50 +00003499 data->hdr.client_id = cpu_to_le16(0xff);
3500 data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3501 (BNX2X_FILTER_MCAST_PENDING <<
3502 BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003503 data->hdr.length = len;
3504}
3505
3506/**
3507 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3508 *
3509 * @bp: device handle
3510 * @o:
3511 * @start_idx: index in the registry to start from
3512 * @rdata_idx: index in the ramrod data to start from
3513 *
3514 * restore command for 57710 is like all other commands - always a stand alone
3515 * command - start_idx and rdata_idx will always be 0. This function will always
3516 * succeed.
3517 * returns -1 to comply with 57712 variant.
3518 */
3519static inline int bnx2x_mcast_handle_restore_cmd_e1(
3520 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3521 int *rdata_idx)
3522{
3523 struct bnx2x_mcast_mac_elem *elem;
3524 int i = 0;
Yuval Mintz86564c32013-01-23 03:21:50 +00003525 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003526
3527 /* go through the registry and configure the MACs from it. */
3528 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3529 cfg_data.mac = &elem->mac[0];
3530 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3531
3532 i++;
3533
Joe Perches0f9dad12011-08-14 12:16:19 +00003534 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003535 cfg_data.mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003536 }
3537
3538 *rdata_idx = i;
3539
3540 return -1;
3541}
3542
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003543static inline int bnx2x_mcast_handle_pending_cmds_e1(
3544 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3545{
3546 struct bnx2x_pending_mcast_cmd *cmd_pos;
3547 struct bnx2x_mcast_mac_elem *pmac_pos;
3548 struct bnx2x_mcast_obj *o = p->mcast_obj;
Yuval Mintz86564c32013-01-23 03:21:50 +00003549 union bnx2x_mcast_config_data cfg_data = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003550 int cnt = 0;
3551
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003552 /* If nothing to be done - return */
3553 if (list_empty(&o->pending_cmds_head))
3554 return 0;
3555
3556 /* Handle the first command */
3557 cmd_pos = list_first_entry(&o->pending_cmds_head,
3558 struct bnx2x_pending_mcast_cmd, link);
3559
3560 switch (cmd_pos->type) {
3561 case BNX2X_MCAST_CMD_ADD:
3562 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3563 cfg_data.mac = &pmac_pos->mac[0];
3564 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3565
3566 cnt++;
3567
Joe Perches0f9dad12011-08-14 12:16:19 +00003568 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
Yuval Mintz2de67432013-01-23 03:21:43 +00003569 pmac_pos->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003570 }
3571 break;
3572
3573 case BNX2X_MCAST_CMD_DEL:
3574 cnt = cmd_pos->data.macs_num;
3575 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3576 break;
3577
3578 case BNX2X_MCAST_CMD_RESTORE:
3579 o->hdl_restore(bp, o, 0, &cnt);
3580 break;
3581
3582 default:
3583 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3584 return -EINVAL;
3585 }
3586
3587 list_del(&cmd_pos->link);
3588 kfree(cmd_pos);
3589
3590 return cnt;
3591}
3592
3593/**
3594 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3595 *
3596 * @fw_hi:
3597 * @fw_mid:
3598 * @fw_lo:
3599 * @mac:
3600 */
3601static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3602 __le16 *fw_lo, u8 *mac)
3603{
3604 mac[1] = ((u8 *)fw_hi)[0];
3605 mac[0] = ((u8 *)fw_hi)[1];
3606 mac[3] = ((u8 *)fw_mid)[0];
3607 mac[2] = ((u8 *)fw_mid)[1];
3608 mac[5] = ((u8 *)fw_lo)[0];
3609 mac[4] = ((u8 *)fw_lo)[1];
3610}
3611
3612/**
3613 * bnx2x_mcast_refresh_registry_e1 -
3614 *
3615 * @bp: device handle
3616 * @cnt:
3617 *
3618 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3619 * and update the registry correspondingly: if ADD - allocate a memory and add
3620 * the entries to the registry (list), if DELETE - clear the registry and free
3621 * the memory.
3622 */
3623static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3624 struct bnx2x_mcast_obj *o)
3625{
3626 struct bnx2x_raw_obj *raw = &o->raw;
3627 struct bnx2x_mcast_mac_elem *elem;
3628 struct mac_configuration_cmd *data =
3629 (struct mac_configuration_cmd *)(raw->rdata);
3630
3631 /* If first entry contains a SET bit - the command was ADD,
3632 * otherwise - DEL_ALL
3633 */
3634 if (GET_FLAG(data->config_table[0].flags,
3635 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3636 int i, len = data->hdr.length;
3637
3638 /* Break if it was a RESTORE command */
3639 if (!list_empty(&o->registry.exact_match.macs))
3640 return 0;
3641
Thomas Meyer01e23742011-11-29 11:08:00 +00003642 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003643 if (!elem) {
3644 BNX2X_ERR("Failed to allocate registry memory\n");
3645 return -ENOMEM;
3646 }
3647
3648 for (i = 0; i < len; i++, elem++) {
3649 bnx2x_get_fw_mac_addr(
3650 &data->config_table[i].msb_mac_addr,
3651 &data->config_table[i].middle_mac_addr,
3652 &data->config_table[i].lsb_mac_addr,
3653 elem->mac);
Joe Perches0f9dad12011-08-14 12:16:19 +00003654 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00003655 elem->mac);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003656 list_add_tail(&elem->link,
3657 &o->registry.exact_match.macs);
3658 }
3659 } else {
3660 elem = list_first_entry(&o->registry.exact_match.macs,
3661 struct bnx2x_mcast_mac_elem, link);
3662 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3663 kfree(elem);
3664 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3665 }
3666
3667 return 0;
3668}
3669
3670static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3671 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003672 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003673{
3674 struct bnx2x_mcast_obj *o = p->mcast_obj;
3675 struct bnx2x_raw_obj *raw = &o->raw;
3676 struct mac_configuration_cmd *data =
3677 (struct mac_configuration_cmd *)(raw->rdata);
3678 int cnt = 0, i, rc;
3679
3680 /* Reset the ramrod data buffer */
3681 memset(data, 0, sizeof(*data));
3682
3683 /* First set all entries as invalid */
3684 for (i = 0; i < o->max_cmd_len ; i++)
3685 SET_FLAG(data->config_table[i].flags,
3686 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3687 T_ETH_MAC_COMMAND_INVALIDATE);
3688
3689 /* Handle pending commands first */
3690 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3691
3692 /* If there are no more pending commands - clear SCHEDULED state */
3693 if (list_empty(&o->pending_cmds_head))
3694 o->clear_sched(o);
3695
3696 /* The below may be true iff there were no pending commands */
3697 if (!cnt)
3698 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3699
3700 /* For 57710 every command has o->max_cmd_len length to ensure that
3701 * commands are done one at a time.
3702 */
3703 o->total_pending_num -= o->max_cmd_len;
3704
3705 /* send a ramrod */
3706
3707 WARN_ON(cnt > o->max_cmd_len);
3708
3709 /* Set ramrod header (in particular, a number of entries to update) */
3710 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3711
3712 /* update a registry: we need the registry contents to be always up
3713 * to date in order to be able to execute a RESTORE opcode. Here
3714 * we use the fact that for 57710 we sent one command at a time
3715 * hence we may take the registry update out of the command handling
3716 * and do it in a simpler way here.
3717 */
3718 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3719 if (rc)
3720 return rc;
3721
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003722 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003723 * RAMROD_PENDING status immediately.
3724 */
3725 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3726 raw->clear_pending(raw);
3727 return 0;
3728 } else {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003729 /* No need for an explicit memory barrier here as long we would
3730 * need to ensure the ordering of writing to the SPQ element
3731 * and updating of the SPQ producer which involves a memory
3732 * read and we will have to put a full memory barrier there
3733 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00003734 */
3735
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003736 /* Send a ramrod */
3737 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3738 U64_HI(raw->rdata_mapping),
3739 U64_LO(raw->rdata_mapping),
3740 ETH_CONNECTION_TYPE);
3741 if (rc)
3742 return rc;
3743
3744 /* Ramrod completion is pending */
3745 return 1;
3746 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003747}
3748
3749static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3750{
3751 return o->registry.exact_match.num_macs_set;
3752}
3753
3754static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3755{
3756 return o->registry.aprox_match.num_bins_set;
3757}
3758
3759static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3760 int n)
3761{
3762 o->registry.exact_match.num_macs_set = n;
3763}
3764
3765static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3766 int n)
3767{
3768 o->registry.aprox_match.num_bins_set = n;
3769}
3770
3771int bnx2x_config_mcast(struct bnx2x *bp,
3772 struct bnx2x_mcast_ramrod_params *p,
Yuval Mintz86564c32013-01-23 03:21:50 +00003773 enum bnx2x_mcast_cmd cmd)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003774{
3775 struct bnx2x_mcast_obj *o = p->mcast_obj;
3776 struct bnx2x_raw_obj *r = &o->raw;
3777 int rc = 0, old_reg_size;
3778
3779 /* This is needed to recover number of currently configured mcast macs
3780 * in case of failure.
3781 */
3782 old_reg_size = o->get_registry_size(o);
3783
3784 /* Do some calculations and checks */
3785 rc = o->validate(bp, p, cmd);
3786 if (rc)
3787 return rc;
3788
3789 /* Return if there is no work to do */
3790 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3791 return 0;
3792
Merav Sicron51c1a582012-03-18 10:33:38 +00003793 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3794 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003795
3796 /* Enqueue the current command to the pending list if we can't complete
3797 * it in the current iteration
3798 */
3799 if (r->check_pending(r) ||
3800 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3801 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3802 if (rc < 0)
3803 goto error_exit1;
3804
3805 /* As long as the current command is in a command list we
3806 * don't need to handle it separately.
3807 */
3808 p->mcast_list_len = 0;
3809 }
3810
3811 if (!r->check_pending(r)) {
3812
3813 /* Set 'pending' state */
3814 r->set_pending(r);
3815
3816 /* Configure the new classification in the chip */
3817 rc = o->config_mcast(bp, p, cmd);
3818 if (rc < 0)
3819 goto error_exit2;
3820
3821 /* Wait for a ramrod completion if was requested */
3822 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3823 rc = o->wait_comp(bp, o);
3824 }
3825
3826 return rc;
3827
3828error_exit2:
3829 r->clear_pending(r);
3830
3831error_exit1:
3832 o->revert(bp, p, old_reg_size);
3833
3834 return rc;
3835}
3836
3837static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3838{
3839 smp_mb__before_clear_bit();
3840 clear_bit(o->sched_state, o->raw.pstate);
3841 smp_mb__after_clear_bit();
3842}
3843
3844static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3845{
3846 smp_mb__before_clear_bit();
3847 set_bit(o->sched_state, o->raw.pstate);
3848 smp_mb__after_clear_bit();
3849}
3850
3851static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3852{
3853 return !!test_bit(o->sched_state, o->raw.pstate);
3854}
3855
3856static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3857{
3858 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3859}
3860
3861void bnx2x_init_mcast_obj(struct bnx2x *bp,
3862 struct bnx2x_mcast_obj *mcast_obj,
3863 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3864 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3865 int state, unsigned long *pstate, bnx2x_obj_type type)
3866{
3867 memset(mcast_obj, 0, sizeof(*mcast_obj));
3868
3869 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3870 rdata, rdata_mapping, state, pstate, type);
3871
3872 mcast_obj->engine_id = engine_id;
3873
3874 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3875
3876 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3877 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3878 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3879 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3880
3881 if (CHIP_IS_E1(bp)) {
3882 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3883 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3884 mcast_obj->hdl_restore =
3885 bnx2x_mcast_handle_restore_cmd_e1;
3886 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3887
3888 if (CHIP_REV_IS_SLOW(bp))
3889 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3890 else
3891 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3892
3893 mcast_obj->wait_comp = bnx2x_mcast_wait;
3894 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3895 mcast_obj->validate = bnx2x_mcast_validate_e1;
3896 mcast_obj->revert = bnx2x_mcast_revert_e1;
3897 mcast_obj->get_registry_size =
3898 bnx2x_mcast_get_registry_size_exact;
3899 mcast_obj->set_registry_size =
3900 bnx2x_mcast_set_registry_size_exact;
3901
3902 /* 57710 is the only chip that uses the exact match for mcast
3903 * at the moment.
3904 */
3905 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3906
3907 } else if (CHIP_IS_E1H(bp)) {
3908 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3909 mcast_obj->enqueue_cmd = NULL;
3910 mcast_obj->hdl_restore = NULL;
3911 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3912
3913 /* 57711 doesn't send a ramrod, so it has unlimited credit
3914 * for one command.
3915 */
3916 mcast_obj->max_cmd_len = -1;
3917 mcast_obj->wait_comp = bnx2x_mcast_wait;
3918 mcast_obj->set_one_rule = NULL;
3919 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3920 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3921 mcast_obj->get_registry_size =
3922 bnx2x_mcast_get_registry_size_aprox;
3923 mcast_obj->set_registry_size =
3924 bnx2x_mcast_set_registry_size_aprox;
3925 } else {
3926 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3927 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3928 mcast_obj->hdl_restore =
3929 bnx2x_mcast_handle_restore_cmd_e2;
3930 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3931 /* TODO: There should be a proper HSI define for this number!!!
3932 */
3933 mcast_obj->max_cmd_len = 16;
3934 mcast_obj->wait_comp = bnx2x_mcast_wait;
3935 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3936 mcast_obj->validate = bnx2x_mcast_validate_e2;
3937 mcast_obj->revert = bnx2x_mcast_revert_e2;
3938 mcast_obj->get_registry_size =
3939 bnx2x_mcast_get_registry_size_aprox;
3940 mcast_obj->set_registry_size =
3941 bnx2x_mcast_set_registry_size_aprox;
3942 }
3943}
3944
3945/*************************** Credit handling **********************************/
3946
3947/**
3948 * atomic_add_ifless - add if the result is less than a given value.
3949 *
3950 * @v: pointer of type atomic_t
3951 * @a: the amount to add to v...
3952 * @u: ...if (v + a) is less than u.
3953 *
3954 * returns true if (v + a) was less than u, and false otherwise.
3955 *
3956 */
3957static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3958{
3959 int c, old;
3960
3961 c = atomic_read(v);
3962 for (;;) {
3963 if (unlikely(c + a >= u))
3964 return false;
3965
3966 old = atomic_cmpxchg((v), c, c + a);
3967 if (likely(old == c))
3968 break;
3969 c = old;
3970 }
3971
3972 return true;
3973}
3974
3975/**
3976 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3977 *
3978 * @v: pointer of type atomic_t
3979 * @a: the amount to dec from v...
3980 * @u: ...if (v - a) is more or equal than u.
3981 *
3982 * returns true if (v - a) was more or equal than u, and false
3983 * otherwise.
3984 */
3985static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3986{
3987 int c, old;
3988
3989 c = atomic_read(v);
3990 for (;;) {
3991 if (unlikely(c - a < u))
3992 return false;
3993
3994 old = atomic_cmpxchg((v), c, c - a);
3995 if (likely(old == c))
3996 break;
3997 c = old;
3998 }
3999
4000 return true;
4001}
4002
4003static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
4004{
4005 bool rc;
4006
4007 smp_mb();
4008 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
4009 smp_mb();
4010
4011 return rc;
4012}
4013
4014static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
4015{
4016 bool rc;
4017
4018 smp_mb();
4019
4020 /* Don't let to refill if credit + cnt > pool_sz */
4021 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
4022
4023 smp_mb();
4024
4025 return rc;
4026}
4027
4028static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
4029{
4030 int cur_credit;
4031
4032 smp_mb();
4033 cur_credit = atomic_read(&o->credit);
4034
4035 return cur_credit;
4036}
4037
4038static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
4039 int cnt)
4040{
4041 return true;
4042}
4043
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004044static bool bnx2x_credit_pool_get_entry(
4045 struct bnx2x_credit_pool_obj *o,
4046 int *offset)
4047{
4048 int idx, vec, i;
4049
4050 *offset = -1;
4051
4052 /* Find "internal cam-offset" then add to base for this object... */
4053 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
4054
4055 /* Skip the current vector if there are no free entries in it */
4056 if (!o->pool_mirror[vec])
4057 continue;
4058
4059 /* If we've got here we are going to find a free entry */
Dmitry Kravkovc54e9bd2012-03-26 21:08:55 +00004060 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004061 i < BIT_VEC64_ELEM_SZ; idx++, i++)
4062
4063 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
4064 /* Got one!! */
4065 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
4066 *offset = o->base_pool_offset + idx;
4067 return true;
4068 }
4069 }
4070
4071 return false;
4072}
4073
4074static bool bnx2x_credit_pool_put_entry(
4075 struct bnx2x_credit_pool_obj *o,
4076 int offset)
4077{
4078 if (offset < o->base_pool_offset)
4079 return false;
4080
4081 offset -= o->base_pool_offset;
4082
4083 if (offset >= o->pool_sz)
4084 return false;
4085
4086 /* Return the entry to the pool */
4087 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
4088
4089 return true;
4090}
4091
4092static bool bnx2x_credit_pool_put_entry_always_true(
4093 struct bnx2x_credit_pool_obj *o,
4094 int offset)
4095{
4096 return true;
4097}
4098
4099static bool bnx2x_credit_pool_get_entry_always_true(
4100 struct bnx2x_credit_pool_obj *o,
4101 int *offset)
4102{
4103 *offset = -1;
4104 return true;
4105}
4106/**
4107 * bnx2x_init_credit_pool - initialize credit pool internals.
4108 *
4109 * @p:
4110 * @base: Base entry in the CAM to use.
4111 * @credit: pool size.
4112 *
4113 * If base is negative no CAM entries handling will be performed.
4114 * If credit is negative pool operations will always succeed (unlimited pool).
4115 *
4116 */
4117static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
4118 int base, int credit)
4119{
4120 /* Zero the object first */
4121 memset(p, 0, sizeof(*p));
4122
4123 /* Set the table to all 1s */
4124 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
4125
4126 /* Init a pool as full */
4127 atomic_set(&p->credit, credit);
4128
4129 /* The total poll size */
4130 p->pool_sz = credit;
4131
4132 p->base_pool_offset = base;
4133
4134 /* Commit the change */
4135 smp_mb();
4136
4137 p->check = bnx2x_credit_pool_check;
4138
4139 /* if pool credit is negative - disable the checks */
4140 if (credit >= 0) {
4141 p->put = bnx2x_credit_pool_put;
4142 p->get = bnx2x_credit_pool_get;
4143 p->put_entry = bnx2x_credit_pool_put_entry;
4144 p->get_entry = bnx2x_credit_pool_get_entry;
4145 } else {
4146 p->put = bnx2x_credit_pool_always_true;
4147 p->get = bnx2x_credit_pool_always_true;
4148 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
4149 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
4150 }
4151
4152 /* If base is negative - disable entries handling */
4153 if (base < 0) {
4154 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
4155 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
4156 }
4157}
4158
4159void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
4160 struct bnx2x_credit_pool_obj *p, u8 func_id,
4161 u8 func_num)
4162{
4163/* TODO: this will be defined in consts as well... */
4164#define BNX2X_CAM_SIZE_EMUL 5
4165
4166 int cam_sz;
4167
4168 if (CHIP_IS_E1(bp)) {
4169 /* In E1, Multicast is saved in cam... */
4170 if (!CHIP_REV_IS_SLOW(bp))
4171 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
4172 else
4173 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
4174
4175 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
4176
4177 } else if (CHIP_IS_E1H(bp)) {
4178 /* CAM credit is equaly divided between all active functions
4179 * on the PORT!.
4180 */
4181 if ((func_num > 0)) {
4182 if (!CHIP_REV_IS_SLOW(bp))
4183 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
4184 else
4185 cam_sz = BNX2X_CAM_SIZE_EMUL;
4186 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
4187 } else {
4188 /* this should never happen! Block MAC operations. */
4189 bnx2x_init_credit_pool(p, 0, 0);
4190 }
4191
4192 } else {
4193
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004194 /* CAM credit is equaly divided between all active functions
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004195 * on the PATH.
4196 */
4197 if ((func_num > 0)) {
4198 if (!CHIP_REV_IS_SLOW(bp))
4199 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
4200 else
4201 cam_sz = BNX2X_CAM_SIZE_EMUL;
4202
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004203 /* No need for CAM entries handling for 57712 and
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004204 * newer.
4205 */
4206 bnx2x_init_credit_pool(p, -1, cam_sz);
4207 } else {
4208 /* this should never happen! Block MAC operations. */
4209 bnx2x_init_credit_pool(p, 0, 0);
4210 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004211 }
4212}
4213
4214void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4215 struct bnx2x_credit_pool_obj *p,
4216 u8 func_id,
4217 u8 func_num)
4218{
4219 if (CHIP_IS_E1x(bp)) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004220 /* There is no VLAN credit in HW on 57710 and 57711 only
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004221 * MAC / MAC-VLAN can be set
4222 */
4223 bnx2x_init_credit_pool(p, 0, -1);
4224 } else {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004225 /* CAM credit is equally divided between all active functions
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004226 * on the PATH.
4227 */
4228 if (func_num > 0) {
4229 int credit = MAX_VLAN_CREDIT_E2 / func_num;
4230 bnx2x_init_credit_pool(p, func_id * credit, credit);
4231 } else
4232 /* this should never happen! Block VLAN operations. */
4233 bnx2x_init_credit_pool(p, 0, 0);
4234 }
4235}
4236
4237/****************** RSS Configuration ******************/
4238/**
4239 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4240 *
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004241 * @bp: driver handle
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004242 * @p: pointer to rss configuration
4243 *
4244 * Prints it when NETIF_MSG_IFUP debug level is configured.
4245 */
4246static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4247 struct bnx2x_config_rss_params *p)
4248{
4249 int i;
4250
4251 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4252 DP(BNX2X_MSG_SP, "0x0000: ");
4253 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4254 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4255
4256 /* Print 4 bytes in a line */
4257 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4258 (((i + 1) & 0x3) == 0)) {
4259 DP_CONT(BNX2X_MSG_SP, "\n");
4260 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4261 }
4262 }
4263
4264 DP_CONT(BNX2X_MSG_SP, "\n");
4265}
4266
4267/**
4268 * bnx2x_setup_rss - configure RSS
4269 *
4270 * @bp: device handle
4271 * @p: rss configuration
4272 *
4273 * sends on UPDATE ramrod for that matter.
4274 */
4275static int bnx2x_setup_rss(struct bnx2x *bp,
4276 struct bnx2x_config_rss_params *p)
4277{
4278 struct bnx2x_rss_config_obj *o = p->rss_obj;
4279 struct bnx2x_raw_obj *r = &o->raw;
4280 struct eth_rss_update_ramrod_data *data =
4281 (struct eth_rss_update_ramrod_data *)(r->rdata);
4282 u8 rss_mode = 0;
4283 int rc;
4284
4285 memset(data, 0, sizeof(*data));
4286
4287 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4288
4289 /* Set an echo field */
Yuval Mintz86564c32013-01-23 03:21:50 +00004290 data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4291 (r->state << BNX2X_SWCID_SHIFT));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004292
4293 /* RSS mode */
4294 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4295 rss_mode = ETH_RSS_MODE_DISABLED;
4296 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4297 rss_mode = ETH_RSS_MODE_REGULAR;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004298
4299 data->rss_mode = rss_mode;
4300
4301 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4302
4303 /* RSS capabilities */
4304 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4305 data->capabilities |=
4306 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4307
4308 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4309 data->capabilities |=
4310 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4311
Merav Sicron5d317c6a2012-06-19 07:48:24 +00004312 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4313 data->capabilities |=
4314 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4315
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004316 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4317 data->capabilities |=
4318 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4319
4320 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4321 data->capabilities |=
4322 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4323
Merav Sicron5d317c6a2012-06-19 07:48:24 +00004324 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4325 data->capabilities |=
4326 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4327
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004328 /* Hashing mask */
4329 data->rss_result_mask = p->rss_result_mask;
4330
4331 /* RSS engine ID */
4332 data->rss_engine_id = o->engine_id;
4333
4334 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4335
4336 /* Indirection table */
4337 memcpy(data->indirection_table, p->ind_table,
4338 T_ETH_INDIRECTION_TABLE_SIZE);
4339
4340 /* Remember the last configuration */
4341 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4342
4343 /* Print the indirection table */
4344 if (netif_msg_ifup(bp))
4345 bnx2x_debug_print_ind_table(bp, p);
4346
4347 /* RSS keys */
4348 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4349 memcpy(&data->rss_key[0], &p->rss_key[0],
4350 sizeof(data->rss_key));
4351 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4352 }
4353
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004354 /* No need for an explicit memory barrier here as long we would
4355 * need to ensure the ordering of writing to the SPQ element
4356 * and updating of the SPQ producer which involves a memory
4357 * read and we will have to put a full memory barrier there
4358 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004359 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004360
4361 /* Send a ramrod */
4362 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4363 U64_HI(r->rdata_mapping),
4364 U64_LO(r->rdata_mapping),
4365 ETH_CONNECTION_TYPE);
4366
4367 if (rc < 0)
4368 return rc;
4369
4370 return 1;
4371}
4372
4373void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4374 u8 *ind_table)
4375{
4376 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4377}
4378
4379int bnx2x_config_rss(struct bnx2x *bp,
4380 struct bnx2x_config_rss_params *p)
4381{
4382 int rc;
4383 struct bnx2x_rss_config_obj *o = p->rss_obj;
4384 struct bnx2x_raw_obj *r = &o->raw;
4385
4386 /* Do nothing if only driver cleanup was requested */
Michal Kalderon5b622912014-01-05 18:33:52 +02004387 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4388 DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
4389 p->ramrod_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004390 return 0;
Michal Kalderon5b622912014-01-05 18:33:52 +02004391 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004392
4393 r->set_pending(r);
4394
4395 rc = o->config_rss(bp, p);
4396 if (rc < 0) {
4397 r->clear_pending(r);
4398 return rc;
4399 }
4400
4401 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4402 rc = r->wait_comp(bp, r);
4403
4404 return rc;
4405}
4406
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004407void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4408 struct bnx2x_rss_config_obj *rss_obj,
4409 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4410 void *rdata, dma_addr_t rdata_mapping,
4411 int state, unsigned long *pstate,
4412 bnx2x_obj_type type)
4413{
4414 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4415 rdata_mapping, state, pstate, type);
4416
4417 rss_obj->engine_id = engine_id;
4418 rss_obj->config_rss = bnx2x_setup_rss;
4419}
4420
Ariel Eliorb9871bc2013-09-04 14:09:21 +03004421int validate_vlan_mac(struct bnx2x *bp,
4422 struct bnx2x_vlan_mac_obj *vlan_mac)
4423{
4424 if (!vlan_mac->get_n_elements) {
4425 BNX2X_ERR("vlan mac object was not intialized\n");
4426 return -EINVAL;
4427 }
4428 return 0;
4429}
4430
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004431/********************** Queue state object ***********************************/
4432
4433/**
4434 * bnx2x_queue_state_change - perform Queue state change transition
4435 *
4436 * @bp: device handle
4437 * @params: parameters to perform the transition
4438 *
4439 * returns 0 in case of successfully completed transition, negative error
4440 * code in case of failure, positive (EBUSY) value if there is a completion
4441 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4442 * not set in params->ramrod_flags for asynchronous commands).
4443 *
4444 */
4445int bnx2x_queue_state_change(struct bnx2x *bp,
4446 struct bnx2x_queue_state_params *params)
4447{
4448 struct bnx2x_queue_sp_obj *o = params->q_obj;
4449 int rc, pending_bit;
4450 unsigned long *pending = &o->pending;
4451
4452 /* Check that the requested transition is legal */
Yuval Mintz04c46732013-01-23 03:21:46 +00004453 rc = o->check_transition(bp, o, params);
4454 if (rc) {
4455 BNX2X_ERR("check transition returned an error. rc %d\n", rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004456 return -EINVAL;
Yuval Mintz04c46732013-01-23 03:21:46 +00004457 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004458
4459 /* Set "pending" bit */
Yuval Mintz04c46732013-01-23 03:21:46 +00004460 DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004461 pending_bit = o->set_pending(o, params);
Yuval Mintz04c46732013-01-23 03:21:46 +00004462 DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004463
4464 /* Don't send a command if only driver cleanup was requested */
4465 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4466 o->complete_cmd(bp, o, pending_bit);
4467 else {
4468 /* Send a ramrod */
4469 rc = o->send_cmd(bp, params);
4470 if (rc) {
4471 o->next_state = BNX2X_Q_STATE_MAX;
4472 clear_bit(pending_bit, pending);
4473 smp_mb__after_clear_bit();
4474 return rc;
4475 }
4476
4477 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4478 rc = o->wait_comp(bp, o, pending_bit);
4479 if (rc)
4480 return rc;
4481
4482 return 0;
4483 }
4484 }
4485
4486 return !!test_bit(pending_bit, pending);
4487}
4488
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004489static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4490 struct bnx2x_queue_state_params *params)
4491{
4492 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4493
4494 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4495 * UPDATE command.
4496 */
4497 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4498 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4499 bit = BNX2X_Q_CMD_UPDATE;
4500 else
4501 bit = cmd;
4502
4503 set_bit(bit, &obj->pending);
4504 return bit;
4505}
4506
4507static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4508 struct bnx2x_queue_sp_obj *o,
4509 enum bnx2x_queue_cmd cmd)
4510{
4511 return bnx2x_state_wait(bp, cmd, &o->pending);
4512}
4513
4514/**
4515 * bnx2x_queue_comp_cmd - complete the state change command.
4516 *
4517 * @bp: device handle
4518 * @o:
4519 * @cmd:
4520 *
4521 * Checks that the arrived completion is expected.
4522 */
4523static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4524 struct bnx2x_queue_sp_obj *o,
4525 enum bnx2x_queue_cmd cmd)
4526{
4527 unsigned long cur_pending = o->pending;
4528
4529 if (!test_and_clear_bit(cmd, &cur_pending)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004530 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4531 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004532 o->state, cur_pending, o->next_state);
4533 return -EINVAL;
4534 }
4535
Ariel Elior6383c0b2011-07-14 08:31:57 +00004536 if (o->next_tx_only >= o->max_cos)
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004537 /* >= because tx only must always be smaller than cos since the
Masanari Iida02582e92012-08-22 19:11:26 +09004538 * primary connection supports COS 0
Ariel Elior6383c0b2011-07-14 08:31:57 +00004539 */
4540 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4541 o->next_tx_only, o->max_cos);
4542
Merav Sicron51c1a582012-03-18 10:33:38 +00004543 DP(BNX2X_MSG_SP,
4544 "Completing command %d for queue %d, setting state to %d\n",
4545 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004546
4547 if (o->next_tx_only) /* print num tx-only if any exist */
Joe Perches94f05b02011-08-14 12:16:20 +00004548 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00004549 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004550
4551 o->state = o->next_state;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004552 o->num_tx_only = o->next_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004553 o->next_state = BNX2X_Q_STATE_MAX;
4554
4555 /* It's important that o->state and o->next_state are
4556 * updated before o->pending.
4557 */
4558 wmb();
4559
4560 clear_bit(cmd, &o->pending);
4561 smp_mb__after_clear_bit();
4562
4563 return 0;
4564}
4565
4566static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4567 struct bnx2x_queue_state_params *cmd_params,
4568 struct client_init_ramrod_data *data)
4569{
4570 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004571
4572 /* Rx data */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004573
4574 /* IPv6 TPA supported for E2 and above only */
Vladislav Zolotarovf5219d82011-07-19 01:44:11 +00004575 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004576 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4577}
4578
Ariel Elior6383c0b2011-07-14 08:31:57 +00004579static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4580 struct bnx2x_queue_sp_obj *o,
4581 struct bnx2x_general_setup_params *params,
4582 struct client_init_general_data *gen_data,
4583 unsigned long *flags)
4584{
4585 gen_data->client_id = o->cl_id;
4586
4587 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4588 gen_data->statistics_counter_id =
4589 params->stat_id;
4590 gen_data->statistics_en_flg = 1;
4591 gen_data->statistics_zero_flg =
4592 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4593 } else
4594 gen_data->statistics_counter_id =
4595 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4596
4597 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4598 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4599 gen_data->sp_client_id = params->spcl_id;
4600 gen_data->mtu = cpu_to_le16(params->mtu);
4601 gen_data->func_id = o->func_id;
4602
Ariel Elior6383c0b2011-07-14 08:31:57 +00004603 gen_data->cos = params->cos;
4604
4605 gen_data->traffic_type =
4606 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4607 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4608
Joe Perches94f05b02011-08-14 12:16:20 +00004609 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004610 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4611}
4612
4613static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4614 struct bnx2x_txq_setup_params *params,
4615 struct client_init_tx_data *tx_data,
4616 unsigned long *flags)
4617{
4618 tx_data->enforce_security_flg =
4619 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4620 tx_data->default_vlan =
4621 cpu_to_le16(params->default_vlan);
4622 tx_data->default_vlan_flg =
4623 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4624 tx_data->tx_switching_flg =
4625 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4626 tx_data->anti_spoofing_flg =
4627 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
Barak Witkowskia3348722012-04-23 03:04:46 +00004628 tx_data->force_default_pri_flg =
4629 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4630
Dmitry Kravkove287a752013-03-21 15:38:24 +00004631 tx_data->tunnel_lso_inc_ip_id =
4632 test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
Dmitry Kravkov91226792013-03-11 05:17:52 +00004633 tx_data->tunnel_non_lso_pcsum_location =
4634 test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
4635 PCSUM_ON_BD;
4636
Ariel Elior6383c0b2011-07-14 08:31:57 +00004637 tx_data->tx_status_block_id = params->fw_sb_id;
4638 tx_data->tx_sb_index_number = params->sb_cq_index;
4639 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4640
4641 tx_data->tx_bd_page_base.lo =
4642 cpu_to_le32(U64_LO(params->dscr_map));
4643 tx_data->tx_bd_page_base.hi =
4644 cpu_to_le32(U64_HI(params->dscr_map));
4645
4646 /* Don't configure any Tx switching mode during queue SETUP */
4647 tx_data->state = 0;
4648}
4649
4650static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4651 struct rxq_pause_params *params,
4652 struct client_init_rx_data *rx_data)
4653{
4654 /* flow control data */
4655 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4656 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4657 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4658 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4659 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4660 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4661 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4662}
4663
4664static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4665 struct bnx2x_rxq_setup_params *params,
4666 struct client_init_rx_data *rx_data,
4667 unsigned long *flags)
4668{
Ariel Elior6383c0b2011-07-14 08:31:57 +00004669 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4670 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004671 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4672 CLIENT_INIT_RX_DATA_TPA_MODE;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004673 rx_data->vmqueue_mode_en_flg = 0;
4674
4675 rx_data->cache_line_alignment_log_size =
4676 params->cache_line_log;
4677 rx_data->enable_dynamic_hc =
4678 test_bit(BNX2X_Q_FLG_DHC, flags);
4679 rx_data->max_sges_for_packet = params->max_sges_pkt;
4680 rx_data->client_qzone_id = params->cl_qzone_id;
4681 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4682
4683 /* Always start in DROP_ALL mode */
4684 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4685 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4686
4687 /* We don't set drop flags */
4688 rx_data->drop_ip_cs_err_flg = 0;
4689 rx_data->drop_tcp_cs_err_flg = 0;
4690 rx_data->drop_ttl0_flg = 0;
4691 rx_data->drop_udp_cs_err_flg = 0;
4692 rx_data->inner_vlan_removal_enable_flg =
4693 test_bit(BNX2X_Q_FLG_VLAN, flags);
4694 rx_data->outer_vlan_removal_enable_flg =
4695 test_bit(BNX2X_Q_FLG_OV, flags);
4696 rx_data->status_block_id = params->fw_sb_id;
4697 rx_data->rx_sb_index_number = params->sb_cq_index;
4698 rx_data->max_tpa_queues = params->max_tpa_queues;
4699 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4700 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4701 rx_data->bd_page_base.lo =
4702 cpu_to_le32(U64_LO(params->dscr_map));
4703 rx_data->bd_page_base.hi =
4704 cpu_to_le32(U64_HI(params->dscr_map));
4705 rx_data->sge_page_base.lo =
4706 cpu_to_le32(U64_LO(params->sge_map));
4707 rx_data->sge_page_base.hi =
4708 cpu_to_le32(U64_HI(params->sge_map));
4709 rx_data->cqe_page_base.lo =
4710 cpu_to_le32(U64_LO(params->rcq_map));
4711 rx_data->cqe_page_base.hi =
4712 cpu_to_le32(U64_HI(params->rcq_map));
4713 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4714
4715 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
Yuval Mintz259afa12012-03-12 08:53:10 +00004716 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004717 rx_data->is_approx_mcast = 1;
4718 }
4719
4720 rx_data->rss_engine_id = params->rss_engine_id;
4721
4722 /* silent vlan removal */
4723 rx_data->silent_vlan_removal_flg =
4724 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4725 rx_data->silent_vlan_value =
4726 cpu_to_le16(params->silent_removal_value);
4727 rx_data->silent_vlan_mask =
4728 cpu_to_le16(params->silent_removal_mask);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004729}
4730
4731/* initialize the general, tx and rx parts of a queue object */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004732static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4733 struct bnx2x_queue_state_params *cmd_params,
4734 struct client_init_ramrod_data *data)
4735{
Ariel Elior6383c0b2011-07-14 08:31:57 +00004736 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4737 &cmd_params->params.setup.gen_params,
4738 &data->general,
4739 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004740
Ariel Elior6383c0b2011-07-14 08:31:57 +00004741 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4742 &cmd_params->params.setup.txq_params,
4743 &data->tx,
4744 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004745
Ariel Elior6383c0b2011-07-14 08:31:57 +00004746 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4747 &cmd_params->params.setup.rxq_params,
4748 &data->rx,
4749 &cmd_params->params.setup.flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004750
Ariel Elior6383c0b2011-07-14 08:31:57 +00004751 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4752 &cmd_params->params.setup.pause_params,
4753 &data->rx);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004754}
4755
Ariel Elior6383c0b2011-07-14 08:31:57 +00004756/* initialize the general and tx parts of a tx-only queue object */
4757static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4758 struct bnx2x_queue_state_params *cmd_params,
4759 struct tx_queue_init_ramrod_data *data)
4760{
4761 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4762 &cmd_params->params.tx_only.gen_params,
4763 &data->general,
4764 &cmd_params->params.tx_only.flags);
4765
4766 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4767 &cmd_params->params.tx_only.txq_params,
4768 &data->tx,
4769 &cmd_params->params.tx_only.flags);
4770
Merav Sicron51c1a582012-03-18 10:33:38 +00004771 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4772 cmd_params->q_obj->cids[0],
4773 data->tx.tx_bd_page_base.lo,
4774 data->tx.tx_bd_page_base.hi);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004775}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004776
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004777/**
4778 * bnx2x_q_init - init HW/FW queue
4779 *
4780 * @bp: device handle
4781 * @params:
4782 *
4783 * HW/FW initial Queue configuration:
4784 * - HC: Rx and Tx
4785 * - CDU context validation
4786 *
4787 */
4788static inline int bnx2x_q_init(struct bnx2x *bp,
4789 struct bnx2x_queue_state_params *params)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004790{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004791 struct bnx2x_queue_sp_obj *o = params->q_obj;
4792 struct bnx2x_queue_init_params *init = &params->params.init;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004793 u16 hc_usec;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004794 u8 cos;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004795
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004796 /* Tx HC configuration */
4797 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4798 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4799 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4800
4801 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4802 init->tx.sb_cq_index,
4803 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004804 hc_usec);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004805 }
4806
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004807 /* Rx HC configuration */
4808 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4809 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4810 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004811
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004812 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4813 init->rx.sb_cq_index,
4814 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4815 hc_usec);
4816 }
4817
4818 /* Set CDU context validation values */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004819 for (cos = 0; cos < o->max_cos; cos++) {
Joe Perches94f05b02011-08-14 12:16:20 +00004820 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004821 o->cids[cos], cos);
Joe Perches94f05b02011-08-14 12:16:20 +00004822 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004823 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4824 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004825
4826 /* As no ramrod is sent, complete the command immediately */
4827 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4828
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004829 mmiowb();
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004830 smp_mb();
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004831
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004832 return 0;
4833}
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004834
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004835static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4836 struct bnx2x_queue_state_params *params)
4837{
4838 struct bnx2x_queue_sp_obj *o = params->q_obj;
4839 struct client_init_ramrod_data *rdata =
4840 (struct client_init_ramrod_data *)o->rdata;
4841 dma_addr_t data_mapping = o->rdata_mapping;
4842 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00004843
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004844 /* Clear the ramrod data */
4845 memset(rdata, 0, sizeof(*rdata));
4846
4847 /* Fill the ramrod data */
4848 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4849
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004850 /* No need for an explicit memory barrier here as long we would
4851 * need to ensure the ordering of writing to the SPQ element
4852 * and updating of the SPQ producer which involves a memory
4853 * read and we will have to put a full memory barrier there
4854 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004855 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004856
Ariel Elior6383c0b2011-07-14 08:31:57 +00004857 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4858 U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004859 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4860}
4861
4862static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4863 struct bnx2x_queue_state_params *params)
4864{
4865 struct bnx2x_queue_sp_obj *o = params->q_obj;
4866 struct client_init_ramrod_data *rdata =
4867 (struct client_init_ramrod_data *)o->rdata;
4868 dma_addr_t data_mapping = o->rdata_mapping;
4869 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4870
4871 /* Clear the ramrod data */
4872 memset(rdata, 0, sizeof(*rdata));
4873
4874 /* Fill the ramrod data */
4875 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4876 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4877
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004878 /* No need for an explicit memory barrier here as long we would
4879 * need to ensure the ordering of writing to the SPQ element
4880 * and updating of the SPQ producer which involves a memory
4881 * read and we will have to put a full memory barrier there
4882 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00004883 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004884
Ariel Elior6383c0b2011-07-14 08:31:57 +00004885 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4886 U64_HI(data_mapping),
4887 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4888}
4889
4890static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4891 struct bnx2x_queue_state_params *params)
4892{
4893 struct bnx2x_queue_sp_obj *o = params->q_obj;
4894 struct tx_queue_init_ramrod_data *rdata =
4895 (struct tx_queue_init_ramrod_data *)o->rdata;
4896 dma_addr_t data_mapping = o->rdata_mapping;
4897 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4898 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4899 &params->params.tx_only;
4900 u8 cid_index = tx_only_params->cid_index;
4901
Ariel Elior6383c0b2011-07-14 08:31:57 +00004902 if (cid_index >= o->max_cos) {
4903 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4904 o->cl_id, cid_index);
4905 return -EINVAL;
4906 }
4907
Joe Perches94f05b02011-08-14 12:16:20 +00004908 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004909 tx_only_params->gen_params.cos,
4910 tx_only_params->gen_params.spcl_id);
4911
4912 /* Clear the ramrod data */
4913 memset(rdata, 0, sizeof(*rdata));
4914
4915 /* Fill the ramrod data */
4916 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4917
Merav Sicron51c1a582012-03-18 10:33:38 +00004918 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4919 o->cids[cid_index], rdata->general.client_id,
Ariel Elior6383c0b2011-07-14 08:31:57 +00004920 rdata->general.sp_client_id, rdata->general.cos);
4921
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004922 /* No need for an explicit memory barrier here as long we would
4923 * need to ensure the ordering of writing to the SPQ element
4924 * and updating of the SPQ producer which involves a memory
4925 * read and we will have to put a full memory barrier there
4926 * (inside bnx2x_sp_post()).
Ariel Elior6383c0b2011-07-14 08:31:57 +00004927 */
4928
4929 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4930 U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004931 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4932}
4933
4934static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4935 struct bnx2x_queue_sp_obj *obj,
4936 struct bnx2x_queue_update_params *params,
4937 struct client_update_ramrod_data *data)
4938{
4939 /* Client ID of the client to update */
4940 data->client_id = obj->cl_id;
4941
4942 /* Function ID of the client to update */
4943 data->func_id = obj->func_id;
4944
4945 /* Default VLAN value */
4946 data->default_vlan = cpu_to_le16(params->def_vlan);
4947
4948 /* Inner VLAN stripping */
4949 data->inner_vlan_removal_enable_flg =
4950 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4951 data->inner_vlan_removal_change_flg =
4952 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4953 &params->update_flags);
4954
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004955 /* Outer VLAN stripping */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004956 data->outer_vlan_removal_enable_flg =
4957 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4958 data->outer_vlan_removal_change_flg =
4959 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4960 &params->update_flags);
4961
4962 /* Drop packets that have source MAC that doesn't belong to this
4963 * Queue.
4964 */
4965 data->anti_spoofing_enable_flg =
4966 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4967 data->anti_spoofing_change_flg =
4968 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4969
4970 /* Activate/Deactivate */
4971 data->activate_flg =
4972 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4973 data->activate_change_flg =
4974 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4975
4976 /* Enable default VLAN */
4977 data->default_vlan_enable_flg =
4978 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4979 data->default_vlan_change_flg =
4980 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4981 &params->update_flags);
4982
4983 /* silent vlan removal */
4984 data->silent_vlan_change_flg =
4985 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4986 &params->update_flags);
4987 data->silent_vlan_removal_flg =
4988 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4989 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4990 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4991}
4992
4993static inline int bnx2x_q_send_update(struct bnx2x *bp,
4994 struct bnx2x_queue_state_params *params)
4995{
4996 struct bnx2x_queue_sp_obj *o = params->q_obj;
4997 struct client_update_ramrod_data *rdata =
4998 (struct client_update_ramrod_data *)o->rdata;
4999 dma_addr_t data_mapping = o->rdata_mapping;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005000 struct bnx2x_queue_update_params *update_params =
5001 &params->params.update;
5002 u8 cid_index = update_params->cid_index;
5003
5004 if (cid_index >= o->max_cos) {
5005 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5006 o->cl_id, cid_index);
5007 return -EINVAL;
5008 }
5009
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005010 /* Clear the ramrod data */
5011 memset(rdata, 0, sizeof(*rdata));
5012
5013 /* Fill the ramrod data */
Ariel Elior6383c0b2011-07-14 08:31:57 +00005014 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005015
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005016 /* No need for an explicit memory barrier here as long we would
5017 * need to ensure the ordering of writing to the SPQ element
5018 * and updating of the SPQ producer which involves a memory
5019 * read and we will have to put a full memory barrier there
5020 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00005021 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005022
Ariel Elior6383c0b2011-07-14 08:31:57 +00005023 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
5024 o->cids[cid_index], U64_HI(data_mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005025 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
5026}
5027
5028/**
5029 * bnx2x_q_send_deactivate - send DEACTIVATE command
5030 *
5031 * @bp: device handle
5032 * @params:
5033 *
5034 * implemented using the UPDATE command.
5035 */
5036static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
5037 struct bnx2x_queue_state_params *params)
5038{
5039 struct bnx2x_queue_update_params *update = &params->params.update;
5040
5041 memset(update, 0, sizeof(*update));
5042
5043 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5044
5045 return bnx2x_q_send_update(bp, params);
5046}
5047
5048/**
5049 * bnx2x_q_send_activate - send ACTIVATE command
5050 *
5051 * @bp: device handle
5052 * @params:
5053 *
5054 * implemented using the UPDATE command.
5055 */
5056static inline int bnx2x_q_send_activate(struct bnx2x *bp,
5057 struct bnx2x_queue_state_params *params)
5058{
5059 struct bnx2x_queue_update_params *update = &params->params.update;
5060
5061 memset(update, 0, sizeof(*update));
5062
5063 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
5064 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5065
5066 return bnx2x_q_send_update(bp, params);
5067}
5068
5069static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
5070 struct bnx2x_queue_state_params *params)
5071{
5072 /* TODO: Not implemented yet. */
5073 return -1;
5074}
5075
5076static inline int bnx2x_q_send_halt(struct bnx2x *bp,
5077 struct bnx2x_queue_state_params *params)
5078{
5079 struct bnx2x_queue_sp_obj *o = params->q_obj;
5080
Ariel Elior6383c0b2011-07-14 08:31:57 +00005081 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
5082 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005083 ETH_CONNECTION_TYPE);
5084}
5085
5086static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
5087 struct bnx2x_queue_state_params *params)
5088{
5089 struct bnx2x_queue_sp_obj *o = params->q_obj;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005090 u8 cid_idx = params->params.cfc_del.cid_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005091
Ariel Elior6383c0b2011-07-14 08:31:57 +00005092 if (cid_idx >= o->max_cos) {
5093 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5094 o->cl_id, cid_idx);
5095 return -EINVAL;
5096 }
5097
5098 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
5099 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005100}
5101
5102static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
5103 struct bnx2x_queue_state_params *params)
5104{
5105 struct bnx2x_queue_sp_obj *o = params->q_obj;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005106 u8 cid_index = params->params.terminate.cid_index;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005107
Ariel Elior6383c0b2011-07-14 08:31:57 +00005108 if (cid_index >= o->max_cos) {
5109 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
5110 o->cl_id, cid_index);
5111 return -EINVAL;
5112 }
5113
5114 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
5115 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005116}
5117
5118static inline int bnx2x_q_send_empty(struct bnx2x *bp,
5119 struct bnx2x_queue_state_params *params)
5120{
5121 struct bnx2x_queue_sp_obj *o = params->q_obj;
5122
Ariel Elior6383c0b2011-07-14 08:31:57 +00005123 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
5124 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005125 ETH_CONNECTION_TYPE);
5126}
5127
5128static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
5129 struct bnx2x_queue_state_params *params)
5130{
5131 switch (params->cmd) {
5132 case BNX2X_Q_CMD_INIT:
5133 return bnx2x_q_init(bp, params);
Ariel Elior6383c0b2011-07-14 08:31:57 +00005134 case BNX2X_Q_CMD_SETUP_TX_ONLY:
5135 return bnx2x_q_send_setup_tx_only(bp, params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005136 case BNX2X_Q_CMD_DEACTIVATE:
5137 return bnx2x_q_send_deactivate(bp, params);
5138 case BNX2X_Q_CMD_ACTIVATE:
5139 return bnx2x_q_send_activate(bp, params);
5140 case BNX2X_Q_CMD_UPDATE:
5141 return bnx2x_q_send_update(bp, params);
5142 case BNX2X_Q_CMD_UPDATE_TPA:
5143 return bnx2x_q_send_update_tpa(bp, params);
5144 case BNX2X_Q_CMD_HALT:
5145 return bnx2x_q_send_halt(bp, params);
5146 case BNX2X_Q_CMD_CFC_DEL:
5147 return bnx2x_q_send_cfc_del(bp, params);
5148 case BNX2X_Q_CMD_TERMINATE:
5149 return bnx2x_q_send_terminate(bp, params);
5150 case BNX2X_Q_CMD_EMPTY:
5151 return bnx2x_q_send_empty(bp, params);
5152 default:
5153 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5154 return -EINVAL;
5155 }
5156}
5157
5158static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
5159 struct bnx2x_queue_state_params *params)
5160{
5161 switch (params->cmd) {
5162 case BNX2X_Q_CMD_SETUP:
5163 return bnx2x_q_send_setup_e1x(bp, params);
5164 case BNX2X_Q_CMD_INIT:
Ariel Elior6383c0b2011-07-14 08:31:57 +00005165 case BNX2X_Q_CMD_SETUP_TX_ONLY:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005166 case BNX2X_Q_CMD_DEACTIVATE:
5167 case BNX2X_Q_CMD_ACTIVATE:
5168 case BNX2X_Q_CMD_UPDATE:
5169 case BNX2X_Q_CMD_UPDATE_TPA:
5170 case BNX2X_Q_CMD_HALT:
5171 case BNX2X_Q_CMD_CFC_DEL:
5172 case BNX2X_Q_CMD_TERMINATE:
5173 case BNX2X_Q_CMD_EMPTY:
5174 return bnx2x_queue_send_cmd_cmn(bp, params);
5175 default:
5176 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5177 return -EINVAL;
5178 }
5179}
5180
5181static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
5182 struct bnx2x_queue_state_params *params)
5183{
5184 switch (params->cmd) {
5185 case BNX2X_Q_CMD_SETUP:
5186 return bnx2x_q_send_setup_e2(bp, params);
5187 case BNX2X_Q_CMD_INIT:
Ariel Elior6383c0b2011-07-14 08:31:57 +00005188 case BNX2X_Q_CMD_SETUP_TX_ONLY:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005189 case BNX2X_Q_CMD_DEACTIVATE:
5190 case BNX2X_Q_CMD_ACTIVATE:
5191 case BNX2X_Q_CMD_UPDATE:
5192 case BNX2X_Q_CMD_UPDATE_TPA:
5193 case BNX2X_Q_CMD_HALT:
5194 case BNX2X_Q_CMD_CFC_DEL:
5195 case BNX2X_Q_CMD_TERMINATE:
5196 case BNX2X_Q_CMD_EMPTY:
5197 return bnx2x_queue_send_cmd_cmn(bp, params);
5198 default:
5199 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5200 return -EINVAL;
5201 }
5202}
5203
5204/**
5205 * bnx2x_queue_chk_transition - check state machine of a regular Queue
5206 *
5207 * @bp: device handle
5208 * @o:
5209 * @params:
5210 *
5211 * (not Forwarding)
5212 * It both checks if the requested command is legal in a current
5213 * state and, if it's legal, sets a `next_state' in the object
5214 * that will be used in the completion flow to set the `state'
5215 * of the object.
5216 *
5217 * returns 0 if a requested command is a legal transition,
5218 * -EINVAL otherwise.
5219 */
5220static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5221 struct bnx2x_queue_sp_obj *o,
5222 struct bnx2x_queue_state_params *params)
5223{
5224 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5225 enum bnx2x_queue_cmd cmd = params->cmd;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005226 struct bnx2x_queue_update_params *update_params =
5227 &params->params.update;
5228 u8 next_tx_only = o->num_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005229
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005230 /* Forget all pending for completion commands if a driver only state
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005231 * transition has been requested.
5232 */
5233 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5234 o->pending = 0;
5235 o->next_state = BNX2X_Q_STATE_MAX;
5236 }
5237
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005238 /* Don't allow a next state transition if we are in the middle of
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005239 * the previous one.
5240 */
Yuval Mintz04c46732013-01-23 03:21:46 +00005241 if (o->pending) {
5242 BNX2X_ERR("Blocking transition since pending was %lx\n",
5243 o->pending);
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005244 return -EBUSY;
Yuval Mintz04c46732013-01-23 03:21:46 +00005245 }
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005246
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005247 switch (state) {
5248 case BNX2X_Q_STATE_RESET:
5249 if (cmd == BNX2X_Q_CMD_INIT)
5250 next_state = BNX2X_Q_STATE_INITIALIZED;
5251
5252 break;
5253 case BNX2X_Q_STATE_INITIALIZED:
5254 if (cmd == BNX2X_Q_CMD_SETUP) {
5255 if (test_bit(BNX2X_Q_FLG_ACTIVE,
5256 &params->params.setup.flags))
5257 next_state = BNX2X_Q_STATE_ACTIVE;
5258 else
5259 next_state = BNX2X_Q_STATE_INACTIVE;
5260 }
5261
5262 break;
5263 case BNX2X_Q_STATE_ACTIVE:
5264 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5265 next_state = BNX2X_Q_STATE_INACTIVE;
5266
5267 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5268 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5269 next_state = BNX2X_Q_STATE_ACTIVE;
5270
Ariel Elior6383c0b2011-07-14 08:31:57 +00005271 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5272 next_state = BNX2X_Q_STATE_MULTI_COS;
5273 next_tx_only = 1;
5274 }
5275
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005276 else if (cmd == BNX2X_Q_CMD_HALT)
5277 next_state = BNX2X_Q_STATE_STOPPED;
5278
5279 else if (cmd == BNX2X_Q_CMD_UPDATE) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005280 /* If "active" state change is requested, update the
5281 * state accordingly.
5282 */
5283 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5284 &update_params->update_flags) &&
5285 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5286 &update_params->update_flags))
5287 next_state = BNX2X_Q_STATE_INACTIVE;
5288 else
5289 next_state = BNX2X_Q_STATE_ACTIVE;
5290 }
5291
5292 break;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005293 case BNX2X_Q_STATE_MULTI_COS:
5294 if (cmd == BNX2X_Q_CMD_TERMINATE)
5295 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5296
5297 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5298 next_state = BNX2X_Q_STATE_MULTI_COS;
5299 next_tx_only = o->num_tx_only + 1;
5300 }
5301
5302 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5303 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5304 next_state = BNX2X_Q_STATE_MULTI_COS;
5305
5306 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5307 /* If "active" state change is requested, update the
5308 * state accordingly.
5309 */
5310 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5311 &update_params->update_flags) &&
5312 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5313 &update_params->update_flags))
5314 next_state = BNX2X_Q_STATE_INACTIVE;
5315 else
5316 next_state = BNX2X_Q_STATE_MULTI_COS;
5317 }
5318
5319 break;
5320 case BNX2X_Q_STATE_MCOS_TERMINATED:
5321 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5322 next_tx_only = o->num_tx_only - 1;
5323 if (next_tx_only == 0)
5324 next_state = BNX2X_Q_STATE_ACTIVE;
5325 else
5326 next_state = BNX2X_Q_STATE_MULTI_COS;
5327 }
5328
5329 break;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005330 case BNX2X_Q_STATE_INACTIVE:
5331 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5332 next_state = BNX2X_Q_STATE_ACTIVE;
5333
5334 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5335 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5336 next_state = BNX2X_Q_STATE_INACTIVE;
5337
5338 else if (cmd == BNX2X_Q_CMD_HALT)
5339 next_state = BNX2X_Q_STATE_STOPPED;
5340
5341 else if (cmd == BNX2X_Q_CMD_UPDATE) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005342 /* If "active" state change is requested, update the
5343 * state accordingly.
5344 */
5345 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5346 &update_params->update_flags) &&
5347 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
Ariel Elior6383c0b2011-07-14 08:31:57 +00005348 &update_params->update_flags)){
5349 if (o->num_tx_only == 0)
5350 next_state = BNX2X_Q_STATE_ACTIVE;
5351 else /* tx only queues exist for this queue */
5352 next_state = BNX2X_Q_STATE_MULTI_COS;
5353 } else
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005354 next_state = BNX2X_Q_STATE_INACTIVE;
5355 }
5356
5357 break;
5358 case BNX2X_Q_STATE_STOPPED:
5359 if (cmd == BNX2X_Q_CMD_TERMINATE)
5360 next_state = BNX2X_Q_STATE_TERMINATED;
5361
5362 break;
5363 case BNX2X_Q_STATE_TERMINATED:
5364 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5365 next_state = BNX2X_Q_STATE_RESET;
5366
5367 break;
5368 default:
5369 BNX2X_ERR("Illegal state: %d\n", state);
5370 }
5371
5372 /* Transition is assured */
5373 if (next_state != BNX2X_Q_STATE_MAX) {
5374 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5375 state, cmd, next_state);
5376 o->next_state = next_state;
Ariel Elior6383c0b2011-07-14 08:31:57 +00005377 o->next_tx_only = next_tx_only;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005378 return 0;
5379 }
5380
5381 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5382
5383 return -EINVAL;
5384}
5385
5386void bnx2x_init_queue_obj(struct bnx2x *bp,
5387 struct bnx2x_queue_sp_obj *obj,
Ariel Elior6383c0b2011-07-14 08:31:57 +00005388 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5389 void *rdata,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005390 dma_addr_t rdata_mapping, unsigned long type)
5391{
5392 memset(obj, 0, sizeof(*obj));
5393
Ariel Elior6383c0b2011-07-14 08:31:57 +00005394 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5395 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5396
5397 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5398 obj->max_cos = cid_cnt;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005399 obj->cl_id = cl_id;
5400 obj->func_id = func_id;
5401 obj->rdata = rdata;
5402 obj->rdata_mapping = rdata_mapping;
5403 obj->type = type;
5404 obj->next_state = BNX2X_Q_STATE_MAX;
5405
5406 if (CHIP_IS_E1x(bp))
5407 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5408 else
5409 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5410
5411 obj->check_transition = bnx2x_queue_chk_transition;
5412
5413 obj->complete_cmd = bnx2x_queue_comp_cmd;
5414 obj->wait_comp = bnx2x_queue_wait_comp;
5415 obj->set_pending = bnx2x_queue_set_pending;
5416}
5417
Ariel Elior67c431a2013-01-01 05:22:36 +00005418/* return a queue object's logical state*/
5419int bnx2x_get_q_logical_state(struct bnx2x *bp,
5420 struct bnx2x_queue_sp_obj *obj)
5421{
5422 switch (obj->state) {
5423 case BNX2X_Q_STATE_ACTIVE:
5424 case BNX2X_Q_STATE_MULTI_COS:
5425 return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5426 case BNX2X_Q_STATE_RESET:
5427 case BNX2X_Q_STATE_INITIALIZED:
5428 case BNX2X_Q_STATE_MCOS_TERMINATED:
5429 case BNX2X_Q_STATE_INACTIVE:
5430 case BNX2X_Q_STATE_STOPPED:
5431 case BNX2X_Q_STATE_TERMINATED:
5432 case BNX2X_Q_STATE_FLRED:
5433 return BNX2X_Q_LOGICAL_STATE_STOPPED;
5434 default:
5435 return -EINVAL;
5436 }
5437}
5438
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005439/********************** Function state object *********************************/
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005440enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5441 struct bnx2x_func_sp_obj *o)
5442{
5443 /* in the middle of transaction - return INVALID state */
5444 if (o->pending)
5445 return BNX2X_F_STATE_MAX;
5446
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005447 /* unsure the order of reading of o->pending and o->state
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005448 * o->pending should be read first
5449 */
5450 rmb();
5451
5452 return o->state;
5453}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005454
5455static int bnx2x_func_wait_comp(struct bnx2x *bp,
5456 struct bnx2x_func_sp_obj *o,
5457 enum bnx2x_func_cmd cmd)
5458{
5459 return bnx2x_state_wait(bp, cmd, &o->pending);
5460}
5461
5462/**
5463 * bnx2x_func_state_change_comp - complete the state machine transition
5464 *
5465 * @bp: device handle
5466 * @o:
5467 * @cmd:
5468 *
5469 * Called on state change transition. Completes the state
5470 * machine transition only - no HW interaction.
5471 */
5472static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5473 struct bnx2x_func_sp_obj *o,
5474 enum bnx2x_func_cmd cmd)
5475{
5476 unsigned long cur_pending = o->pending;
5477
5478 if (!test_and_clear_bit(cmd, &cur_pending)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00005479 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5480 cmd, BP_FUNC(bp), o->state,
5481 cur_pending, o->next_state);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005482 return -EINVAL;
5483 }
5484
Joe Perches94f05b02011-08-14 12:16:20 +00005485 DP(BNX2X_MSG_SP,
5486 "Completing command %d for func %d, setting state to %d\n",
5487 cmd, BP_FUNC(bp), o->next_state);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005488
5489 o->state = o->next_state;
5490 o->next_state = BNX2X_F_STATE_MAX;
5491
5492 /* It's important that o->state and o->next_state are
5493 * updated before o->pending.
5494 */
5495 wmb();
5496
5497 clear_bit(cmd, &o->pending);
5498 smp_mb__after_clear_bit();
5499
5500 return 0;
5501}
5502
5503/**
5504 * bnx2x_func_comp_cmd - complete the state change command
5505 *
5506 * @bp: device handle
5507 * @o:
5508 * @cmd:
5509 *
5510 * Checks that the arrived completion is expected.
5511 */
5512static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5513 struct bnx2x_func_sp_obj *o,
5514 enum bnx2x_func_cmd cmd)
5515{
5516 /* Complete the state machine part first, check if it's a
5517 * legal completion.
5518 */
5519 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005520 return rc;
5521}
5522
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005523/**
5524 * bnx2x_func_chk_transition - perform function state machine transition
5525 *
5526 * @bp: device handle
5527 * @o:
5528 * @params:
5529 *
5530 * It both checks if the requested command is legal in a current
5531 * state and, if it's legal, sets a `next_state' in the object
5532 * that will be used in the completion flow to set the `state'
5533 * of the object.
5534 *
5535 * returns 0 if a requested command is a legal transition,
5536 * -EINVAL otherwise.
5537 */
5538static int bnx2x_func_chk_transition(struct bnx2x *bp,
5539 struct bnx2x_func_sp_obj *o,
5540 struct bnx2x_func_state_params *params)
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005541{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005542 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5543 enum bnx2x_func_cmd cmd = params->cmd;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005544
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005545 /* Forget all pending for completion commands if a driver only state
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005546 * transition has been requested.
5547 */
5548 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5549 o->pending = 0;
5550 o->next_state = BNX2X_F_STATE_MAX;
5551 }
5552
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005553 /* Don't allow a next state transition if we are in the middle of
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005554 * the previous one.
5555 */
5556 if (o->pending)
5557 return -EBUSY;
5558
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005559 switch (state) {
5560 case BNX2X_F_STATE_RESET:
5561 if (cmd == BNX2X_F_CMD_HW_INIT)
5562 next_state = BNX2X_F_STATE_INITIALIZED;
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00005563
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005564 break;
5565 case BNX2X_F_STATE_INITIALIZED:
5566 if (cmd == BNX2X_F_CMD_START)
5567 next_state = BNX2X_F_STATE_STARTED;
5568
5569 else if (cmd == BNX2X_F_CMD_HW_RESET)
5570 next_state = BNX2X_F_STATE_RESET;
5571
5572 break;
5573 case BNX2X_F_STATE_STARTED:
5574 if (cmd == BNX2X_F_CMD_STOP)
5575 next_state = BNX2X_F_STATE_INITIALIZED;
Barak Witkowskia3348722012-04-23 03:04:46 +00005576 /* afex ramrods can be sent only in started mode, and only
5577 * if not pending for function_stop ramrod completion
5578 * for these events - next state remained STARTED.
5579 */
5580 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5581 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5582 next_state = BNX2X_F_STATE_STARTED;
5583
5584 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5585 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5586 next_state = BNX2X_F_STATE_STARTED;
Merav Sicron55c11942012-11-07 00:45:48 +00005587
5588 /* Switch_update ramrod can be sent in either started or
5589 * tx_stopped state, and it doesn't change the state.
5590 */
5591 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5592 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5593 next_state = BNX2X_F_STATE_STARTED;
5594
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005595 else if (cmd == BNX2X_F_CMD_TX_STOP)
5596 next_state = BNX2X_F_STATE_TX_STOPPED;
5597
5598 break;
5599 case BNX2X_F_STATE_TX_STOPPED:
Merav Sicron55c11942012-11-07 00:45:48 +00005600 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5601 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5602 next_state = BNX2X_F_STATE_TX_STOPPED;
5603
5604 else if (cmd == BNX2X_F_CMD_TX_START)
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005605 next_state = BNX2X_F_STATE_STARTED;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005606
5607 break;
5608 default:
5609 BNX2X_ERR("Unknown state: %d\n", state);
5610 }
5611
5612 /* Transition is assured */
5613 if (next_state != BNX2X_F_STATE_MAX) {
5614 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5615 state, cmd, next_state);
5616 o->next_state = next_state;
5617 return 0;
5618 }
5619
5620 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5621 state, cmd);
5622
5623 return -EINVAL;
5624}
5625
5626/**
5627 * bnx2x_func_init_func - performs HW init at function stage
5628 *
5629 * @bp: device handle
5630 * @drv:
5631 *
5632 * Init HW when the current phase is
5633 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5634 * HW blocks.
5635 */
5636static inline int bnx2x_func_init_func(struct bnx2x *bp,
5637 const struct bnx2x_func_sp_drv_ops *drv)
5638{
5639 return drv->init_hw_func(bp);
5640}
5641
5642/**
5643 * bnx2x_func_init_port - performs HW init at port stage
5644 *
5645 * @bp: device handle
5646 * @drv:
5647 *
5648 * Init HW when the current phase is
5649 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5650 * FUNCTION-only HW blocks.
5651 *
5652 */
5653static inline int bnx2x_func_init_port(struct bnx2x *bp,
5654 const struct bnx2x_func_sp_drv_ops *drv)
5655{
5656 int rc = drv->init_hw_port(bp);
5657 if (rc)
5658 return rc;
5659
5660 return bnx2x_func_init_func(bp, drv);
5661}
5662
5663/**
5664 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5665 *
5666 * @bp: device handle
5667 * @drv:
5668 *
5669 * Init HW when the current phase is
5670 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5671 * PORT-only and FUNCTION-only HW blocks.
5672 */
5673static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5674 const struct bnx2x_func_sp_drv_ops *drv)
5675{
5676 int rc = drv->init_hw_cmn_chip(bp);
5677 if (rc)
5678 return rc;
5679
5680 return bnx2x_func_init_port(bp, drv);
5681}
5682
5683/**
5684 * bnx2x_func_init_cmn - performs HW init at common stage
5685 *
5686 * @bp: device handle
5687 * @drv:
5688 *
5689 * Init HW when the current phase is
5690 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5691 * PORT-only and FUNCTION-only HW blocks.
5692 */
5693static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5694 const struct bnx2x_func_sp_drv_ops *drv)
5695{
5696 int rc = drv->init_hw_cmn(bp);
5697 if (rc)
5698 return rc;
5699
5700 return bnx2x_func_init_port(bp, drv);
5701}
5702
5703static int bnx2x_func_hw_init(struct bnx2x *bp,
5704 struct bnx2x_func_state_params *params)
5705{
5706 u32 load_code = params->params.hw_init.load_phase;
5707 struct bnx2x_func_sp_obj *o = params->f_obj;
5708 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5709 int rc = 0;
5710
5711 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5712 BP_ABS_FUNC(bp), load_code);
5713
5714 /* Prepare buffers for unzipping the FW */
5715 rc = drv->gunzip_init(bp);
5716 if (rc)
5717 return rc;
5718
5719 /* Prepare FW */
5720 rc = drv->init_fw(bp);
5721 if (rc) {
5722 BNX2X_ERR("Error loading firmware\n");
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005723 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005724 }
5725
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005726 /* Handle the beginning of COMMON_XXX pases separately... */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005727 switch (load_code) {
5728 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5729 rc = bnx2x_func_init_cmn_chip(bp, drv);
5730 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005731 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005732
5733 break;
5734 case FW_MSG_CODE_DRV_LOAD_COMMON:
5735 rc = bnx2x_func_init_cmn(bp, drv);
5736 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005737 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005738
5739 break;
5740 case FW_MSG_CODE_DRV_LOAD_PORT:
5741 rc = bnx2x_func_init_port(bp, drv);
5742 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005743 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005744
5745 break;
5746 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5747 rc = bnx2x_func_init_func(bp, drv);
5748 if (rc)
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005749 goto init_err;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005750
5751 break;
5752 default:
5753 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5754 rc = -EINVAL;
5755 }
5756
Dmitry Kravkoveb2afd42011-11-15 12:07:33 +00005757init_err:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005758 drv->gunzip_end(bp);
5759
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005760 /* In case of success, complete the command immediately: no ramrods
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005761 * have been sent.
5762 */
5763 if (!rc)
5764 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5765
5766 return rc;
5767}
5768
5769/**
5770 * bnx2x_func_reset_func - reset HW at function stage
5771 *
5772 * @bp: device handle
5773 * @drv:
5774 *
5775 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5776 * FUNCTION-only HW blocks.
5777 */
5778static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5779 const struct bnx2x_func_sp_drv_ops *drv)
5780{
5781 drv->reset_hw_func(bp);
5782}
5783
5784/**
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005785 * bnx2x_func_reset_port - reset HW at port stage
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005786 *
5787 * @bp: device handle
5788 * @drv:
5789 *
5790 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5791 * FUNCTION-only and PORT-only HW blocks.
5792 *
5793 * !!!IMPORTANT!!!
5794 *
5795 * It's important to call reset_port before reset_func() as the last thing
5796 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5797 * makes impossible any DMAE transactions.
5798 */
5799static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5800 const struct bnx2x_func_sp_drv_ops *drv)
5801{
5802 drv->reset_hw_port(bp);
5803 bnx2x_func_reset_func(bp, drv);
5804}
5805
5806/**
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005807 * bnx2x_func_reset_cmn - reset HW at common stage
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005808 *
5809 * @bp: device handle
5810 * @drv:
5811 *
5812 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5813 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5814 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5815 */
5816static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5817 const struct bnx2x_func_sp_drv_ops *drv)
5818{
5819 bnx2x_func_reset_port(bp, drv);
5820 drv->reset_hw_cmn(bp);
5821}
5822
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005823static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5824 struct bnx2x_func_state_params *params)
5825{
5826 u32 reset_phase = params->params.hw_reset.reset_phase;
5827 struct bnx2x_func_sp_obj *o = params->f_obj;
5828 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5829
5830 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5831 reset_phase);
5832
5833 switch (reset_phase) {
5834 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5835 bnx2x_func_reset_cmn(bp, drv);
5836 break;
5837 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5838 bnx2x_func_reset_port(bp, drv);
5839 break;
5840 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5841 bnx2x_func_reset_func(bp, drv);
5842 break;
5843 default:
5844 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5845 reset_phase);
5846 break;
5847 }
5848
Yuval Mintz16a5fd92013-06-02 00:06:18 +00005849 /* Complete the command immediately: no ramrods have been sent. */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005850 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5851
5852 return 0;
5853}
5854
5855static inline int bnx2x_func_send_start(struct bnx2x *bp,
5856 struct bnx2x_func_state_params *params)
5857{
5858 struct bnx2x_func_sp_obj *o = params->f_obj;
5859 struct function_start_data *rdata =
5860 (struct function_start_data *)o->rdata;
5861 dma_addr_t data_mapping = o->rdata_mapping;
5862 struct bnx2x_func_start_params *start_params = &params->params.start;
5863
5864 memset(rdata, 0, sizeof(*rdata));
5865
5866 /* Fill the ramrod data with provided parameters */
Dmitry Kravkov1bc277f2013-03-18 06:51:04 +00005867 rdata->function_mode = (u8)start_params->mf_mode;
5868 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5869 rdata->path_id = BP_PATH(bp);
5870 rdata->network_cos_mode = start_params->network_cos_mode;
5871 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5872 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005873
Dmitry Kravkov1bc277f2013-03-18 06:51:04 +00005874 /* No need for an explicit memory barrier here as long we would
5875 * need to ensure the ordering of writing to the SPQ element
5876 * and updating of the SPQ producer which involves a memory
5877 * read and we will have to put a full memory barrier there
5878 * (inside bnx2x_sp_post()).
Vladislav Zolotarov53e51e22011-07-19 01:45:02 +00005879 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005880
5881 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5882 U64_HI(data_mapping),
5883 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5884}
5885
Merav Sicron55c11942012-11-07 00:45:48 +00005886static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5887 struct bnx2x_func_state_params *params)
5888{
5889 struct bnx2x_func_sp_obj *o = params->f_obj;
5890 struct function_update_data *rdata =
5891 (struct function_update_data *)o->rdata;
5892 dma_addr_t data_mapping = o->rdata_mapping;
5893 struct bnx2x_func_switch_update_params *switch_update_params =
5894 &params->params.switch_update;
5895
5896 memset(rdata, 0, sizeof(*rdata));
5897
5898 /* Fill the ramrod data with provided parameters */
5899 rdata->tx_switch_suspend_change_flg = 1;
5900 rdata->tx_switch_suspend = switch_update_params->suspend;
5901 rdata->echo = SWITCH_UPDATE;
5902
5903 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5904 U64_HI(data_mapping),
5905 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5906}
5907
Barak Witkowskia3348722012-04-23 03:04:46 +00005908static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5909 struct bnx2x_func_state_params *params)
5910{
5911 struct bnx2x_func_sp_obj *o = params->f_obj;
5912 struct function_update_data *rdata =
5913 (struct function_update_data *)o->afex_rdata;
5914 dma_addr_t data_mapping = o->afex_rdata_mapping;
5915 struct bnx2x_func_afex_update_params *afex_update_params =
5916 &params->params.afex_update;
5917
5918 memset(rdata, 0, sizeof(*rdata));
5919
5920 /* Fill the ramrod data with provided parameters */
5921 rdata->vif_id_change_flg = 1;
5922 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5923 rdata->afex_default_vlan_change_flg = 1;
5924 rdata->afex_default_vlan =
5925 cpu_to_le16(afex_update_params->afex_default_vlan);
5926 rdata->allowed_priorities_change_flg = 1;
5927 rdata->allowed_priorities = afex_update_params->allowed_priorities;
Merav Sicron55c11942012-11-07 00:45:48 +00005928 rdata->echo = AFEX_UPDATE;
Barak Witkowskia3348722012-04-23 03:04:46 +00005929
5930 /* No need for an explicit memory barrier here as long we would
5931 * need to ensure the ordering of writing to the SPQ element
5932 * and updating of the SPQ producer which involves a memory
5933 * read and we will have to put a full memory barrier there
5934 * (inside bnx2x_sp_post()).
5935 */
5936 DP(BNX2X_MSG_SP,
5937 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5938 rdata->vif_id,
5939 rdata->afex_default_vlan, rdata->allowed_priorities);
5940
5941 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5942 U64_HI(data_mapping),
5943 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5944}
5945
5946static
5947inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5948 struct bnx2x_func_state_params *params)
5949{
5950 struct bnx2x_func_sp_obj *o = params->f_obj;
5951 struct afex_vif_list_ramrod_data *rdata =
5952 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
Yuval Mintz86564c32013-01-23 03:21:50 +00005953 struct bnx2x_func_afex_viflists_params *afex_vif_params =
Barak Witkowskia3348722012-04-23 03:04:46 +00005954 &params->params.afex_viflists;
5955 u64 *p_rdata = (u64 *)rdata;
5956
5957 memset(rdata, 0, sizeof(*rdata));
5958
5959 /* Fill the ramrod data with provided parameters */
Yuval Mintz86564c32013-01-23 03:21:50 +00005960 rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5961 rdata->func_bit_map = afex_vif_params->func_bit_map;
5962 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5963 rdata->func_to_clear = afex_vif_params->func_to_clear;
Barak Witkowskia3348722012-04-23 03:04:46 +00005964
5965 /* send in echo type of sub command */
Yuval Mintz86564c32013-01-23 03:21:50 +00005966 rdata->echo = afex_vif_params->afex_vif_list_command;
Barak Witkowskia3348722012-04-23 03:04:46 +00005967
5968 /* No need for an explicit memory barrier here as long we would
5969 * need to ensure the ordering of writing to the SPQ element
5970 * and updating of the SPQ producer which involves a memory
5971 * read and we will have to put a full memory barrier there
5972 * (inside bnx2x_sp_post()).
5973 */
5974
5975 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5976 rdata->afex_vif_list_command, rdata->vif_list_index,
5977 rdata->func_bit_map, rdata->func_to_clear);
5978
5979 /* this ramrod sends data directly and not through DMA mapping */
5980 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5981 U64_HI(*p_rdata), U64_LO(*p_rdata),
5982 NONE_CONNECTION_TYPE);
5983}
5984
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005985static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5986 struct bnx2x_func_state_params *params)
5987{
5988 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5989 NONE_CONNECTION_TYPE);
5990}
5991
Dmitry Kravkov6debea82011-07-19 01:42:04 +00005992static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5993 struct bnx2x_func_state_params *params)
5994{
5995 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5996 NONE_CONNECTION_TYPE);
5997}
5998static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5999 struct bnx2x_func_state_params *params)
6000{
6001 struct bnx2x_func_sp_obj *o = params->f_obj;
6002 struct flow_control_configuration *rdata =
6003 (struct flow_control_configuration *)o->rdata;
6004 dma_addr_t data_mapping = o->rdata_mapping;
6005 struct bnx2x_func_tx_start_params *tx_start_params =
6006 &params->params.tx_start;
6007 int i;
6008
6009 memset(rdata, 0, sizeof(*rdata));
6010
6011 rdata->dcb_enabled = tx_start_params->dcb_enabled;
6012 rdata->dcb_version = tx_start_params->dcb_version;
6013 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
6014
6015 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
6016 rdata->traffic_type_to_priority_cos[i] =
6017 tx_start_params->traffic_type_to_priority_cos[i];
6018
6019 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
6020 U64_HI(data_mapping),
6021 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
6022}
6023
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03006024static int bnx2x_func_send_cmd(struct bnx2x *bp,
6025 struct bnx2x_func_state_params *params)
6026{
6027 switch (params->cmd) {
6028 case BNX2X_F_CMD_HW_INIT:
6029 return bnx2x_func_hw_init(bp, params);
6030 case BNX2X_F_CMD_START:
6031 return bnx2x_func_send_start(bp, params);
6032 case BNX2X_F_CMD_STOP:
6033 return bnx2x_func_send_stop(bp, params);
6034 case BNX2X_F_CMD_HW_RESET:
6035 return bnx2x_func_hw_reset(bp, params);
Barak Witkowskia3348722012-04-23 03:04:46 +00006036 case BNX2X_F_CMD_AFEX_UPDATE:
6037 return bnx2x_func_send_afex_update(bp, params);
6038 case BNX2X_F_CMD_AFEX_VIFLISTS:
6039 return bnx2x_func_send_afex_viflists(bp, params);
Dmitry Kravkov6debea82011-07-19 01:42:04 +00006040 case BNX2X_F_CMD_TX_STOP:
6041 return bnx2x_func_send_tx_stop(bp, params);
6042 case BNX2X_F_CMD_TX_START:
6043 return bnx2x_func_send_tx_start(bp, params);
Merav Sicron55c11942012-11-07 00:45:48 +00006044 case BNX2X_F_CMD_SWITCH_UPDATE:
6045 return bnx2x_func_send_switch_update(bp, params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03006046 default:
6047 BNX2X_ERR("Unknown command: %d\n", params->cmd);
6048 return -EINVAL;
6049 }
6050}
6051
6052void bnx2x_init_func_obj(struct bnx2x *bp,
6053 struct bnx2x_func_sp_obj *obj,
6054 void *rdata, dma_addr_t rdata_mapping,
Barak Witkowskia3348722012-04-23 03:04:46 +00006055 void *afex_rdata, dma_addr_t afex_rdata_mapping,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03006056 struct bnx2x_func_sp_drv_ops *drv_iface)
6057{
6058 memset(obj, 0, sizeof(*obj));
6059
6060 mutex_init(&obj->one_pending_mutex);
6061
6062 obj->rdata = rdata;
6063 obj->rdata_mapping = rdata_mapping;
Barak Witkowskia3348722012-04-23 03:04:46 +00006064 obj->afex_rdata = afex_rdata;
6065 obj->afex_rdata_mapping = afex_rdata_mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03006066 obj->send_cmd = bnx2x_func_send_cmd;
6067 obj->check_transition = bnx2x_func_chk_transition;
6068 obj->complete_cmd = bnx2x_func_comp_cmd;
6069 obj->wait_comp = bnx2x_func_wait_comp;
6070
6071 obj->drv = drv_iface;
6072}
6073
6074/**
6075 * bnx2x_func_state_change - perform Function state change transition
6076 *
6077 * @bp: device handle
6078 * @params: parameters to perform the transaction
6079 *
6080 * returns 0 in case of successfully completed transition,
6081 * negative error code in case of failure, positive
6082 * (EBUSY) value if there is a completion to that is
6083 * still pending (possible only if RAMROD_COMP_WAIT is
6084 * not set in params->ramrod_flags for asynchronous
6085 * commands).
6086 */
6087int bnx2x_func_state_change(struct bnx2x *bp,
6088 struct bnx2x_func_state_params *params)
6089{
6090 struct bnx2x_func_sp_obj *o = params->f_obj;
Merav Sicron55c11942012-11-07 00:45:48 +00006091 int rc, cnt = 300;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03006092 enum bnx2x_func_cmd cmd = params->cmd;
6093 unsigned long *pending = &o->pending;
6094
6095 mutex_lock(&o->one_pending_mutex);
6096
6097 /* Check that the requested transition is legal */
Merav Sicron55c11942012-11-07 00:45:48 +00006098 rc = o->check_transition(bp, o, params);
6099 if ((rc == -EBUSY) &&
6100 (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
6101 while ((rc == -EBUSY) && (--cnt > 0)) {
6102 mutex_unlock(&o->one_pending_mutex);
6103 msleep(10);
6104 mutex_lock(&o->one_pending_mutex);
6105 rc = o->check_transition(bp, o, params);
6106 }
6107 if (rc == -EBUSY) {
6108 mutex_unlock(&o->one_pending_mutex);
6109 BNX2X_ERR("timeout waiting for previous ramrod completion\n");
6110 return rc;
6111 }
6112 } else if (rc) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03006113 mutex_unlock(&o->one_pending_mutex);
Merav Sicron55c11942012-11-07 00:45:48 +00006114 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03006115 }
6116
6117 /* Set "pending" bit */
6118 set_bit(cmd, pending);
6119
6120 /* Don't send a command if only driver cleanup was requested */
6121 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
6122 bnx2x_func_state_change_comp(bp, o, cmd);
6123 mutex_unlock(&o->one_pending_mutex);
6124 } else {
6125 /* Send a ramrod */
6126 rc = o->send_cmd(bp, params);
6127
6128 mutex_unlock(&o->one_pending_mutex);
6129
6130 if (rc) {
6131 o->next_state = BNX2X_F_STATE_MAX;
6132 clear_bit(cmd, pending);
6133 smp_mb__after_clear_bit();
6134 return rc;
6135 }
6136
6137 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
6138 rc = o->wait_comp(bp, o, cmd);
6139 if (rc)
6140 return rc;
6141
6142 return 0;
6143 }
6144 }
6145
6146 return !!test_bit(cmd, pending);
Vladislav Zolotarov042181f2011-06-14 01:33:39 +00006147}