Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 1 | /* QLogic qed NIC Driver |
Mintz, Yuval | e8f1cb5 | 2017-01-01 13:57:00 +0200 | [diff] [blame] | 2 | * Copyright (c) 2015-2017 QLogic Corporation |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 3 | * |
Mintz, Yuval | e8f1cb5 | 2017-01-01 13:57:00 +0200 | [diff] [blame] | 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and /or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 31 | */ |
| 32 | |
| 33 | #include <linux/types.h> |
| 34 | #include <asm/byteorder.h> |
| 35 | #include <asm/param.h> |
| 36 | #include <linux/delay.h> |
| 37 | #include <linux/dma-mapping.h> |
| 38 | #include <linux/etherdevice.h> |
| 39 | #include <linux/interrupt.h> |
| 40 | #include <linux/kernel.h> |
| 41 | #include <linux/module.h> |
| 42 | #include <linux/pci.h> |
| 43 | #include <linux/slab.h> |
| 44 | #include <linux/stddef.h> |
| 45 | #include <linux/string.h> |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 46 | #include <linux/workqueue.h> |
| 47 | #include <linux/bitops.h> |
| 48 | #include <linux/bug.h> |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 49 | #include <linux/vmalloc.h> |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 50 | #include "qed.h" |
| 51 | #include <linux/qed/qed_chain.h> |
| 52 | #include "qed_cxt.h" |
| 53 | #include "qed_dev_api.h" |
| 54 | #include <linux/qed/qed_eth_if.h> |
| 55 | #include "qed_hsi.h" |
| 56 | #include "qed_hw.h" |
| 57 | #include "qed_int.h" |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 58 | #include "qed_l2.h" |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 59 | #include "qed_mcp.h" |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 60 | #include "qed_reg_addr.h" |
| 61 | #include "qed_sp.h" |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 62 | #include "qed_sriov.h" |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 63 | |
Manish Chopra | 088c861 | 2016-03-04 12:35:05 -0500 | [diff] [blame] | 64 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 65 | #define QED_MAX_SGES_NUM 16 |
| 66 | #define CRC32_POLY 0x1edc6f41 |
| 67 | |
Mintz, Yuval | 0db711b | 2017-06-04 13:31:00 +0300 | [diff] [blame] | 68 | struct qed_l2_info { |
| 69 | u32 queues; |
| 70 | unsigned long **pp_qid_usage; |
| 71 | |
| 72 | /* The lock is meant to synchronize access to the qid usage */ |
| 73 | struct mutex lock; |
| 74 | }; |
| 75 | |
| 76 | int qed_l2_alloc(struct qed_hwfn *p_hwfn) |
| 77 | { |
| 78 | struct qed_l2_info *p_l2_info; |
| 79 | unsigned long **pp_qids; |
| 80 | u32 i; |
| 81 | |
Kalderon, Michal | c851a9d | 2017-07-02 10:29:21 +0300 | [diff] [blame] | 82 | if (!QED_IS_L2_PERSONALITY(p_hwfn)) |
Mintz, Yuval | 0db711b | 2017-06-04 13:31:00 +0300 | [diff] [blame] | 83 | return 0; |
| 84 | |
| 85 | p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL); |
| 86 | if (!p_l2_info) |
| 87 | return -ENOMEM; |
| 88 | p_hwfn->p_l2_info = p_l2_info; |
| 89 | |
| 90 | if (IS_PF(p_hwfn->cdev)) { |
| 91 | p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE); |
| 92 | } else { |
| 93 | u8 rx = 0, tx = 0; |
| 94 | |
| 95 | qed_vf_get_num_rxqs(p_hwfn, &rx); |
| 96 | qed_vf_get_num_txqs(p_hwfn, &tx); |
| 97 | |
| 98 | p_l2_info->queues = max_t(u8, rx, tx); |
| 99 | } |
| 100 | |
| 101 | pp_qids = kzalloc(sizeof(unsigned long *) * p_l2_info->queues, |
| 102 | GFP_KERNEL); |
| 103 | if (!pp_qids) |
| 104 | return -ENOMEM; |
| 105 | p_l2_info->pp_qid_usage = pp_qids; |
| 106 | |
| 107 | for (i = 0; i < p_l2_info->queues; i++) { |
| 108 | pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL); |
| 109 | if (!pp_qids[i]) |
| 110 | return -ENOMEM; |
| 111 | } |
| 112 | |
| 113 | return 0; |
| 114 | } |
| 115 | |
| 116 | void qed_l2_setup(struct qed_hwfn *p_hwfn) |
| 117 | { |
Michal Kalderon | af6858e | 2018-05-08 21:29:18 +0300 | [diff] [blame] | 118 | if (!QED_IS_L2_PERSONALITY(p_hwfn)) |
Mintz, Yuval | 0db711b | 2017-06-04 13:31:00 +0300 | [diff] [blame] | 119 | return; |
| 120 | |
| 121 | mutex_init(&p_hwfn->p_l2_info->lock); |
| 122 | } |
| 123 | |
| 124 | void qed_l2_free(struct qed_hwfn *p_hwfn) |
| 125 | { |
| 126 | u32 i; |
| 127 | |
Michal Kalderon | af6858e | 2018-05-08 21:29:18 +0300 | [diff] [blame] | 128 | if (!QED_IS_L2_PERSONALITY(p_hwfn)) |
Mintz, Yuval | 0db711b | 2017-06-04 13:31:00 +0300 | [diff] [blame] | 129 | return; |
| 130 | |
| 131 | if (!p_hwfn->p_l2_info) |
| 132 | return; |
| 133 | |
| 134 | if (!p_hwfn->p_l2_info->pp_qid_usage) |
| 135 | goto out_l2_info; |
| 136 | |
| 137 | /* Free until hit first uninitialized entry */ |
| 138 | for (i = 0; i < p_hwfn->p_l2_info->queues; i++) { |
| 139 | if (!p_hwfn->p_l2_info->pp_qid_usage[i]) |
| 140 | break; |
| 141 | kfree(p_hwfn->p_l2_info->pp_qid_usage[i]); |
| 142 | } |
| 143 | |
| 144 | kfree(p_hwfn->p_l2_info->pp_qid_usage); |
| 145 | |
| 146 | out_l2_info: |
| 147 | kfree(p_hwfn->p_l2_info); |
| 148 | p_hwfn->p_l2_info = NULL; |
| 149 | } |
| 150 | |
Mintz, Yuval | bbe3f23 | 2017-06-04 13:31:03 +0300 | [diff] [blame] | 151 | static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn, |
| 152 | struct qed_queue_cid *p_cid) |
| 153 | { |
| 154 | struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info; |
| 155 | u16 queue_id = p_cid->rel.queue_id; |
| 156 | bool b_rc = true; |
| 157 | u8 first; |
| 158 | |
| 159 | mutex_lock(&p_l2_info->lock); |
| 160 | |
Dan Carpenter | 0331402 | 2017-06-14 12:10:10 +0300 | [diff] [blame] | 161 | if (queue_id >= p_l2_info->queues) { |
Mintz, Yuval | bbe3f23 | 2017-06-04 13:31:03 +0300 | [diff] [blame] | 162 | DP_NOTICE(p_hwfn, |
| 163 | "Requested to increase usage for qzone %04x out of %08x\n", |
| 164 | queue_id, p_l2_info->queues); |
| 165 | b_rc = false; |
| 166 | goto out; |
| 167 | } |
| 168 | |
| 169 | first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id], |
| 170 | MAX_QUEUES_PER_QZONE); |
| 171 | if (first >= MAX_QUEUES_PER_QZONE) { |
| 172 | b_rc = false; |
| 173 | goto out; |
| 174 | } |
| 175 | |
| 176 | __set_bit(first, p_l2_info->pp_qid_usage[queue_id]); |
| 177 | p_cid->qid_usage_idx = first; |
| 178 | |
| 179 | out: |
| 180 | mutex_unlock(&p_l2_info->lock); |
| 181 | return b_rc; |
| 182 | } |
| 183 | |
| 184 | static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn, |
| 185 | struct qed_queue_cid *p_cid) |
| 186 | { |
| 187 | mutex_lock(&p_hwfn->p_l2_info->lock); |
| 188 | |
| 189 | clear_bit(p_cid->qid_usage_idx, |
| 190 | p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]); |
| 191 | |
| 192 | mutex_unlock(&p_hwfn->p_l2_info->lock); |
| 193 | } |
| 194 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 195 | void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, |
| 196 | struct qed_queue_cid *p_cid) |
| 197 | { |
Mintz, Yuval | 08bc8f1 | 2017-06-04 13:31:06 +0300 | [diff] [blame] | 198 | bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID); |
| 199 | |
| 200 | if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) |
| 201 | _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid); |
Mintz, Yuval | bbe3f23 | 2017-06-04 13:31:03 +0300 | [diff] [blame] | 202 | |
| 203 | /* For PF's VFs we maintain the index inside queue-zone in IOV */ |
| 204 | if (p_cid->vfid == QED_QUEUE_CID_SELF) |
| 205 | qed_eth_queue_qid_usage_del(p_hwfn, p_cid); |
| 206 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 207 | vfree(p_cid); |
| 208 | } |
| 209 | |
| 210 | /* The internal is only meant to be directly called by PFs initializeing CIDs |
| 211 | * for their VFs. |
| 212 | */ |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 213 | static struct qed_queue_cid * |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 214 | _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, |
| 215 | u16 opaque_fid, |
| 216 | u32 cid, |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 217 | struct qed_queue_start_common_params *p_params, |
Mintz, Yuval | 007bc37 | 2017-06-04 13:31:05 +0300 | [diff] [blame] | 218 | bool b_is_rx, |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 219 | struct qed_queue_cid_vf_params *p_vf_params) |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 220 | { |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 221 | struct qed_queue_cid *p_cid; |
| 222 | int rc; |
| 223 | |
Himanshu Jha | 5f58dff | 2017-12-30 21:07:04 +0530 | [diff] [blame] | 224 | p_cid = vzalloc(sizeof(*p_cid)); |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 225 | if (!p_cid) |
| 226 | return NULL; |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 227 | |
| 228 | p_cid->opaque_fid = opaque_fid; |
| 229 | p_cid->cid = cid; |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 230 | p_cid->p_owner = p_hwfn; |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 231 | |
Mintz, Yuval | f604b17 | 2017-06-04 13:31:01 +0300 | [diff] [blame] | 232 | /* Fill in parameters */ |
| 233 | p_cid->rel.vport_id = p_params->vport_id; |
| 234 | p_cid->rel.queue_id = p_params->queue_id; |
| 235 | p_cid->rel.stats_id = p_params->stats_id; |
| 236 | p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; |
Mintz, Yuval | 007bc37 | 2017-06-04 13:31:05 +0300 | [diff] [blame] | 237 | p_cid->b_is_rx = b_is_rx; |
Mintz, Yuval | f604b17 | 2017-06-04 13:31:01 +0300 | [diff] [blame] | 238 | p_cid->sb_idx = p_params->sb_idx; |
| 239 | |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 240 | /* Fill-in bits related to VFs' queues if information was provided */ |
| 241 | if (p_vf_params) { |
| 242 | p_cid->vfid = p_vf_params->vfid; |
| 243 | p_cid->vf_qid = p_vf_params->vf_qid; |
Mintz, Yuval | 3b19f47 | 2017-06-04 13:31:04 +0300 | [diff] [blame] | 244 | p_cid->vf_legacy = p_vf_params->vf_legacy; |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 245 | } else { |
| 246 | p_cid->vfid = QED_QUEUE_CID_SELF; |
| 247 | } |
| 248 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 249 | /* Don't try calculating the absolute indices for VFs */ |
| 250 | if (IS_VF(p_hwfn->cdev)) { |
| 251 | p_cid->abs = p_cid->rel; |
| 252 | goto out; |
| 253 | } |
| 254 | |
| 255 | /* Calculate the engine-absolute indices of the resources. |
| 256 | * This would guarantee they're valid later on. |
| 257 | * In some cases [SBs] we already have the right values. |
| 258 | */ |
| 259 | rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); |
| 260 | if (rc) |
| 261 | goto fail; |
| 262 | |
| 263 | rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id); |
| 264 | if (rc) |
| 265 | goto fail; |
| 266 | |
| 267 | /* In case of a PF configuring its VF's queues, the stats-id is already |
| 268 | * absolute [since there's a single index that's suitable per-VF]. |
| 269 | */ |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 270 | if (p_cid->vfid == QED_QUEUE_CID_SELF) { |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 271 | rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id, |
| 272 | &p_cid->abs.stats_id); |
| 273 | if (rc) |
| 274 | goto fail; |
| 275 | } else { |
| 276 | p_cid->abs.stats_id = p_cid->rel.stats_id; |
| 277 | } |
| 278 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 279 | out: |
Mintz, Yuval | bbe3f23 | 2017-06-04 13:31:03 +0300 | [diff] [blame] | 280 | /* VF-images have provided the qid_usage_idx on their own. |
| 281 | * Otherwise, we need to allocate a unique one. |
| 282 | */ |
| 283 | if (!p_vf_params) { |
| 284 | if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid)) |
| 285 | goto fail; |
| 286 | } else { |
| 287 | p_cid->qid_usage_idx = p_vf_params->qid_usage_idx; |
| 288 | } |
| 289 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 290 | DP_VERBOSE(p_hwfn, |
| 291 | QED_MSG_SP, |
Mintz, Yuval | bbe3f23 | 2017-06-04 13:31:03 +0300 | [diff] [blame] | 292 | "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n", |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 293 | p_cid->opaque_fid, |
| 294 | p_cid->cid, |
| 295 | p_cid->rel.vport_id, |
| 296 | p_cid->abs.vport_id, |
| 297 | p_cid->rel.queue_id, |
Mintz, Yuval | bbe3f23 | 2017-06-04 13:31:03 +0300 | [diff] [blame] | 298 | p_cid->qid_usage_idx, |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 299 | p_cid->abs.queue_id, |
| 300 | p_cid->rel.stats_id, |
Mintz, Yuval | f604b17 | 2017-06-04 13:31:01 +0300 | [diff] [blame] | 301 | p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx); |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 302 | |
| 303 | return p_cid; |
| 304 | |
| 305 | fail: |
| 306 | vfree(p_cid); |
| 307 | return NULL; |
| 308 | } |
| 309 | |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 310 | struct qed_queue_cid * |
| 311 | qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, |
| 312 | u16 opaque_fid, |
| 313 | struct qed_queue_start_common_params *p_params, |
Mintz, Yuval | 007bc37 | 2017-06-04 13:31:05 +0300 | [diff] [blame] | 314 | bool b_is_rx, |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 315 | struct qed_queue_cid_vf_params *p_vf_params) |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 316 | { |
| 317 | struct qed_queue_cid *p_cid; |
Mintz, Yuval | 08bc8f1 | 2017-06-04 13:31:06 +0300 | [diff] [blame] | 318 | u8 vfid = QED_CXT_PF_CID; |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 319 | bool b_legacy_vf = false; |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 320 | u32 cid = 0; |
| 321 | |
Mintz, Yuval | 08bc8f1 | 2017-06-04 13:31:06 +0300 | [diff] [blame] | 322 | /* In case of legacy VFs, The CID can be derived from the additional |
| 323 | * VF parameters - the VF assumes queue X uses CID X, so we can simply |
| 324 | * use the vf_qid for this purpose as well. |
| 325 | */ |
| 326 | if (p_vf_params) { |
| 327 | vfid = p_vf_params->vfid; |
| 328 | |
| 329 | if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) { |
| 330 | b_legacy_vf = true; |
| 331 | cid = p_vf_params->vf_qid; |
| 332 | } |
| 333 | } |
| 334 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 335 | /* Get a unique firmware CID for this queue, in case it's a PF. |
| 336 | * VF's don't need a CID as the queue configuration will be done |
| 337 | * by PF. |
| 338 | */ |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 339 | if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) { |
Mintz, Yuval | 08bc8f1 | 2017-06-04 13:31:06 +0300 | [diff] [blame] | 340 | if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, |
| 341 | &cid, vfid)) { |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 342 | DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); |
| 343 | return NULL; |
| 344 | } |
| 345 | } |
| 346 | |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 347 | p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, |
Mintz, Yuval | 007bc37 | 2017-06-04 13:31:05 +0300 | [diff] [blame] | 348 | p_params, b_is_rx, p_vf_params); |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 349 | if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf) |
Mintz, Yuval | 08bc8f1 | 2017-06-04 13:31:06 +0300 | [diff] [blame] | 350 | _qed_cxt_release_cid(p_hwfn, cid, vfid); |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 351 | |
| 352 | return p_cid; |
| 353 | } |
| 354 | |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 355 | static struct qed_queue_cid * |
| 356 | qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn, |
| 357 | u16 opaque_fid, |
Mintz, Yuval | 007bc37 | 2017-06-04 13:31:05 +0300 | [diff] [blame] | 358 | bool b_is_rx, |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 359 | struct qed_queue_start_common_params *p_params) |
| 360 | { |
Mintz, Yuval | 007bc37 | 2017-06-04 13:31:05 +0300 | [diff] [blame] | 361 | return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx, |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 362 | NULL); |
| 363 | } |
| 364 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 365 | int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, |
| 366 | struct qed_sp_vport_start_params *p_params) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 367 | { |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 368 | struct vport_start_ramrod_data *p_ramrod = NULL; |
| 369 | struct qed_spq_entry *p_ent = NULL; |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 370 | struct qed_sp_init_data init_data; |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 371 | u8 abs_vport_id = 0; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 372 | int rc = -EINVAL; |
| 373 | u16 rx_mode = 0; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 374 | |
Manish Chopra | 088c861 | 2016-03-04 12:35:05 -0500 | [diff] [blame] | 375 | rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 376 | if (rc) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 377 | return rc; |
| 378 | |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 379 | memset(&init_data, 0, sizeof(init_data)); |
| 380 | init_data.cid = qed_spq_get_cid(p_hwfn); |
Manish Chopra | 088c861 | 2016-03-04 12:35:05 -0500 | [diff] [blame] | 381 | init_data.opaque_fid = p_params->opaque_fid; |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 382 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 383 | |
| 384 | rc = qed_sp_init_request(p_hwfn, &p_ent, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 385 | ETH_RAMROD_VPORT_START, |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 386 | PROTOCOLID_ETH, &init_data); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 387 | if (rc) |
| 388 | return rc; |
| 389 | |
| 390 | p_ramrod = &p_ent->ramrod.vport_start; |
| 391 | p_ramrod->vport_id = abs_vport_id; |
| 392 | |
Manish Chopra | 088c861 | 2016-03-04 12:35:05 -0500 | [diff] [blame] | 393 | p_ramrod->mtu = cpu_to_le16(p_params->mtu); |
Sudarsana Reddy Kalluru | c78c70f | 2017-02-15 10:24:10 +0200 | [diff] [blame] | 394 | p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; |
Manish Chopra | 088c861 | 2016-03-04 12:35:05 -0500 | [diff] [blame] | 395 | p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; |
| 396 | p_ramrod->drop_ttl0_en = p_params->drop_ttl0; |
Yuval Mintz | e6bd892 | 2016-06-19 15:18:11 +0300 | [diff] [blame] | 397 | p_ramrod->untagged = p_params->only_untagged; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 398 | |
| 399 | SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); |
| 400 | SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); |
| 401 | |
| 402 | p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); |
| 403 | |
| 404 | /* TPA related fields */ |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 405 | memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param)); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 406 | |
Manish Chopra | 088c861 | 2016-03-04 12:35:05 -0500 | [diff] [blame] | 407 | p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe; |
| 408 | |
| 409 | switch (p_params->tpa_mode) { |
| 410 | case QED_TPA_MODE_GRO: |
| 411 | p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; |
| 412 | p_ramrod->tpa_param.tpa_max_size = (u16)-1; |
| 413 | p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2; |
| 414 | p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2; |
| 415 | p_ramrod->tpa_param.tpa_ipv4_en_flg = 1; |
| 416 | p_ramrod->tpa_param.tpa_ipv6_en_flg = 1; |
| 417 | p_ramrod->tpa_param.tpa_pkt_split_flg = 1; |
| 418 | p_ramrod->tpa_param.tpa_gro_consistent_flg = 1; |
| 419 | break; |
| 420 | default: |
| 421 | break; |
| 422 | } |
| 423 | |
Yuval Mintz | 831bfb0e | 2016-05-11 16:36:25 +0300 | [diff] [blame] | 424 | p_ramrod->tx_switching_en = p_params->tx_switching; |
| 425 | |
Yuval Mintz | 11a85d7 | 2016-08-22 13:25:10 +0300 | [diff] [blame] | 426 | p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; |
| 427 | p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; |
| 428 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 429 | /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ |
| 430 | p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, |
Manish Chopra | 088c861 | 2016-03-04 12:35:05 -0500 | [diff] [blame] | 431 | p_params->concrete_fid); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 432 | |
| 433 | return qed_spq_post(p_hwfn, p_ent, NULL); |
| 434 | } |
| 435 | |
Baoyou Xie | ba56947 | 2016-09-09 09:21:15 +0800 | [diff] [blame] | 436 | static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, |
| 437 | struct qed_sp_vport_start_params *p_params) |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 438 | { |
| 439 | if (IS_VF(p_hwfn->cdev)) { |
| 440 | return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id, |
| 441 | p_params->mtu, |
| 442 | p_params->remove_inner_vlan, |
| 443 | p_params->tpa_mode, |
Yuval Mintz | 08feecd | 2016-05-11 16:36:20 +0300 | [diff] [blame] | 444 | p_params->max_buffers_per_cqe, |
| 445 | p_params->only_untagged); |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 446 | } |
| 447 | |
| 448 | return qed_sp_eth_vport_start(p_hwfn, p_params); |
| 449 | } |
| 450 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 451 | static int |
| 452 | qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, |
| 453 | struct vport_update_ramrod_data *p_ramrod, |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 454 | struct qed_rss_params *p_rss) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 455 | { |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 456 | struct eth_vport_rss_config *p_config; |
| 457 | u16 capabilities = 0; |
| 458 | int i, table_size; |
| 459 | int rc = 0; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 460 | |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 461 | if (!p_rss) { |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 462 | p_ramrod->common.update_rss_flg = 0; |
| 463 | return rc; |
| 464 | } |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 465 | p_config = &p_ramrod->rss_config; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 466 | |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 467 | BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 468 | |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 469 | rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 470 | if (rc) |
| 471 | return rc; |
| 472 | |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 473 | p_ramrod->common.update_rss_flg = p_rss->update_rss_config; |
| 474 | p_config->update_rss_capabilities = p_rss->update_rss_capabilities; |
| 475 | p_config->update_rss_ind_table = p_rss->update_rss_ind_table; |
| 476 | p_config->update_rss_key = p_rss->update_rss_key; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 477 | |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 478 | p_config->rss_mode = p_rss->rss_enable ? |
| 479 | ETH_VPORT_RSS_MODE_REGULAR : |
| 480 | ETH_VPORT_RSS_MODE_DISABLED; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 481 | |
| 482 | SET_FIELD(capabilities, |
| 483 | ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 484 | !!(p_rss->rss_caps & QED_RSS_IPV4)); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 485 | SET_FIELD(capabilities, |
| 486 | ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 487 | !!(p_rss->rss_caps & QED_RSS_IPV6)); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 488 | SET_FIELD(capabilities, |
| 489 | ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 490 | !!(p_rss->rss_caps & QED_RSS_IPV4_TCP)); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 491 | SET_FIELD(capabilities, |
| 492 | ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 493 | !!(p_rss->rss_caps & QED_RSS_IPV6_TCP)); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 494 | SET_FIELD(capabilities, |
| 495 | ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 496 | !!(p_rss->rss_caps & QED_RSS_IPV4_UDP)); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 497 | SET_FIELD(capabilities, |
| 498 | ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 499 | !!(p_rss->rss_caps & QED_RSS_IPV6_UDP)); |
| 500 | p_config->tbl_size = p_rss->rss_table_size_log; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 501 | |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 502 | p_config->capabilities = cpu_to_le16(capabilities); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 503 | |
| 504 | DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, |
| 505 | "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", |
| 506 | p_ramrod->common.update_rss_flg, |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 507 | p_config->rss_mode, |
| 508 | p_config->update_rss_capabilities, |
| 509 | p_config->capabilities, |
| 510 | p_config->update_rss_ind_table, p_config->update_rss_key); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 511 | |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 512 | table_size = min_t(int, QED_RSS_IND_TABLE_SIZE, |
| 513 | 1 << p_config->tbl_size); |
| 514 | for (i = 0; i < table_size; i++) { |
| 515 | struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i]; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 516 | |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 517 | if (!p_queue) |
| 518 | return -EINVAL; |
| 519 | |
| 520 | p_config->indirection_table[i] = |
| 521 | cpu_to_le16(p_queue->abs.queue_id); |
| 522 | } |
| 523 | |
| 524 | DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, |
| 525 | "Configured RSS indirection table [%d entries]:\n", |
| 526 | table_size); |
| 527 | for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) { |
| 528 | DP_VERBOSE(p_hwfn, |
| 529 | NETIF_MSG_IFUP, |
| 530 | "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", |
| 531 | le16_to_cpu(p_config->indirection_table[i]), |
| 532 | le16_to_cpu(p_config->indirection_table[i + 1]), |
| 533 | le16_to_cpu(p_config->indirection_table[i + 2]), |
| 534 | le16_to_cpu(p_config->indirection_table[i + 3]), |
| 535 | le16_to_cpu(p_config->indirection_table[i + 4]), |
| 536 | le16_to_cpu(p_config->indirection_table[i + 5]), |
| 537 | le16_to_cpu(p_config->indirection_table[i + 6]), |
| 538 | le16_to_cpu(p_config->indirection_table[i + 7]), |
| 539 | le16_to_cpu(p_config->indirection_table[i + 8]), |
| 540 | le16_to_cpu(p_config->indirection_table[i + 9]), |
| 541 | le16_to_cpu(p_config->indirection_table[i + 10]), |
| 542 | le16_to_cpu(p_config->indirection_table[i + 11]), |
| 543 | le16_to_cpu(p_config->indirection_table[i + 12]), |
| 544 | le16_to_cpu(p_config->indirection_table[i + 13]), |
| 545 | le16_to_cpu(p_config->indirection_table[i + 14]), |
| 546 | le16_to_cpu(p_config->indirection_table[i + 15])); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 547 | } |
| 548 | |
| 549 | for (i = 0; i < 10; i++) |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 550 | p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 551 | |
| 552 | return rc; |
| 553 | } |
| 554 | |
| 555 | static void |
| 556 | qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, |
| 557 | struct vport_update_ramrod_data *p_ramrod, |
| 558 | struct qed_filter_accept_flags accept_flags) |
| 559 | { |
| 560 | p_ramrod->common.update_rx_mode_flg = |
| 561 | accept_flags.update_rx_mode_config; |
| 562 | |
| 563 | p_ramrod->common.update_tx_mode_flg = |
| 564 | accept_flags.update_tx_mode_config; |
| 565 | |
| 566 | /* Set Rx mode accept flags */ |
| 567 | if (p_ramrod->common.update_rx_mode_flg) { |
| 568 | u8 accept_filter = accept_flags.rx_accept_filter; |
| 569 | u16 state = 0; |
| 570 | |
| 571 | SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, |
| 572 | !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) || |
| 573 | !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); |
| 574 | |
| 575 | SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, |
| 576 | !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)); |
| 577 | |
| 578 | SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, |
| 579 | !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) || |
| 580 | !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); |
| 581 | |
| 582 | SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, |
| 583 | (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && |
| 584 | !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); |
| 585 | |
| 586 | SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, |
| 587 | !!(accept_filter & QED_ACCEPT_BCAST)); |
| 588 | |
| 589 | p_ramrod->rx_mode.state = cpu_to_le16(state); |
| 590 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
| 591 | "p_ramrod->rx_mode.state = 0x%x\n", state); |
| 592 | } |
| 593 | |
| 594 | /* Set Tx mode accept flags */ |
| 595 | if (p_ramrod->common.update_tx_mode_flg) { |
| 596 | u8 accept_filter = accept_flags.tx_accept_filter; |
| 597 | u16 state = 0; |
| 598 | |
| 599 | SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, |
| 600 | !!(accept_filter & QED_ACCEPT_NONE)); |
| 601 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 602 | SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, |
| 603 | !!(accept_filter & QED_ACCEPT_NONE)); |
| 604 | |
| 605 | SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, |
| 606 | (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && |
| 607 | !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); |
| 608 | |
| 609 | SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, |
| 610 | !!(accept_filter & QED_ACCEPT_BCAST)); |
| 611 | |
| 612 | p_ramrod->tx_mode.state = cpu_to_le16(state); |
| 613 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
| 614 | "p_ramrod->tx_mode.state = 0x%x\n", state); |
| 615 | } |
| 616 | } |
| 617 | |
| 618 | static void |
Yuval Mintz | 17b235c | 2016-05-11 16:36:18 +0300 | [diff] [blame] | 619 | qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn, |
| 620 | struct vport_update_ramrod_data *p_ramrod, |
| 621 | struct qed_sge_tpa_params *p_params) |
| 622 | { |
| 623 | struct eth_vport_tpa_param *p_tpa; |
| 624 | |
| 625 | if (!p_params) { |
| 626 | p_ramrod->common.update_tpa_param_flg = 0; |
| 627 | p_ramrod->common.update_tpa_en_flg = 0; |
| 628 | p_ramrod->common.update_tpa_param_flg = 0; |
| 629 | return; |
| 630 | } |
| 631 | |
| 632 | p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; |
| 633 | p_tpa = &p_ramrod->tpa_param; |
| 634 | p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; |
| 635 | p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; |
| 636 | p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; |
| 637 | p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; |
| 638 | |
| 639 | p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; |
| 640 | p_tpa->max_buff_num = p_params->max_buffers_per_cqe; |
| 641 | p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; |
| 642 | p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; |
| 643 | p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; |
| 644 | p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; |
| 645 | p_tpa->tpa_max_size = p_params->tpa_max_size; |
| 646 | p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start; |
| 647 | p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont; |
| 648 | } |
| 649 | |
| 650 | static void |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 651 | qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, |
| 652 | struct vport_update_ramrod_data *p_ramrod, |
| 653 | struct qed_sp_vport_update_params *p_params) |
| 654 | { |
| 655 | int i; |
| 656 | |
| 657 | memset(&p_ramrod->approx_mcast.bins, 0, |
| 658 | sizeof(p_ramrod->approx_mcast.bins)); |
| 659 | |
Yuval Mintz | 83aeb93 | 2016-08-15 10:42:44 +0300 | [diff] [blame] | 660 | if (!p_params->update_approx_mcast_flg) |
| 661 | return; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 662 | |
Yuval Mintz | 83aeb93 | 2016-08-15 10:42:44 +0300 | [diff] [blame] | 663 | p_ramrod->common.update_approx_mcast_flg = 1; |
| 664 | for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { |
| 665 | u32 *p_bins = (u32 *)p_params->bins; |
| 666 | |
| 667 | p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 668 | } |
| 669 | } |
| 670 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 671 | int qed_sp_vport_update(struct qed_hwfn *p_hwfn, |
| 672 | struct qed_sp_vport_update_params *p_params, |
| 673 | enum spq_mode comp_mode, |
| 674 | struct qed_spq_comp_cb *p_comp_data) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 675 | { |
| 676 | struct qed_rss_params *p_rss_params = p_params->rss_params; |
| 677 | struct vport_update_ramrod_data_cmn *p_cmn; |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 678 | struct qed_sp_init_data init_data; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 679 | struct vport_update_ramrod_data *p_ramrod = NULL; |
| 680 | struct qed_spq_entry *p_ent = NULL; |
Yuval Mintz | 17b235c | 2016-05-11 16:36:18 +0300 | [diff] [blame] | 681 | u8 abs_vport_id = 0, val; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 682 | int rc = -EINVAL; |
| 683 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 684 | if (IS_VF(p_hwfn->cdev)) { |
| 685 | rc = qed_vf_pf_vport_update(p_hwfn, p_params); |
| 686 | return rc; |
| 687 | } |
| 688 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 689 | rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 690 | if (rc) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 691 | return rc; |
| 692 | |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 693 | memset(&init_data, 0, sizeof(init_data)); |
| 694 | init_data.cid = qed_spq_get_cid(p_hwfn); |
| 695 | init_data.opaque_fid = p_params->opaque_fid; |
| 696 | init_data.comp_mode = comp_mode; |
| 697 | init_data.p_comp_data = p_comp_data; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 698 | |
| 699 | rc = qed_sp_init_request(p_hwfn, &p_ent, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 700 | ETH_RAMROD_VPORT_UPDATE, |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 701 | PROTOCOLID_ETH, &init_data); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 702 | if (rc) |
| 703 | return rc; |
| 704 | |
| 705 | /* Copy input params to ramrod according to FW struct */ |
| 706 | p_ramrod = &p_ent->ramrod.vport_update; |
| 707 | p_cmn = &p_ramrod->common; |
| 708 | |
| 709 | p_cmn->vport_id = abs_vport_id; |
| 710 | p_cmn->rx_active_flg = p_params->vport_active_rx_flg; |
| 711 | p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; |
| 712 | p_cmn->tx_active_flg = p_params->vport_active_tx_flg; |
| 713 | p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; |
Yuval Mintz | 3f9b4a6 | 2016-02-18 17:00:39 +0200 | [diff] [blame] | 714 | p_cmn->accept_any_vlan = p_params->accept_any_vlan; |
Yuval Mintz | 83aeb93 | 2016-08-15 10:42:44 +0300 | [diff] [blame] | 715 | val = p_params->update_accept_any_vlan_flg; |
| 716 | p_cmn->update_accept_any_vlan_flg = val; |
Yuval Mintz | 17b235c | 2016-05-11 16:36:18 +0300 | [diff] [blame] | 717 | |
| 718 | p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; |
| 719 | val = p_params->update_inner_vlan_removal_flg; |
| 720 | p_cmn->update_inner_vlan_removal_en_flg = val; |
Yuval Mintz | 08feecd | 2016-05-11 16:36:20 +0300 | [diff] [blame] | 721 | |
| 722 | p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; |
| 723 | val = p_params->update_default_vlan_enable_flg; |
| 724 | p_cmn->update_default_vlan_en_flg = val; |
| 725 | |
| 726 | p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan); |
| 727 | p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; |
| 728 | |
| 729 | p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; |
| 730 | |
Yuval Mintz | 17b235c | 2016-05-11 16:36:18 +0300 | [diff] [blame] | 731 | p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; |
| 732 | p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; |
| 733 | |
Yuval Mintz | 6ddc760 | 2016-05-11 16:36:23 +0300 | [diff] [blame] | 734 | p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; |
| 735 | val = p_params->update_anti_spoofing_en_flg; |
| 736 | p_ramrod->common.update_anti_spoofing_en_flg = val; |
| 737 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 738 | rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); |
| 739 | if (rc) { |
| 740 | /* Return spq entry which is taken in qed_sp_init_request()*/ |
| 741 | qed_spq_return_entry(p_hwfn, p_ent); |
| 742 | return rc; |
| 743 | } |
| 744 | |
| 745 | /* Update mcast bins for VFs, PF doesn't use this functionality */ |
| 746 | qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); |
| 747 | |
| 748 | qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); |
Yuval Mintz | 17b235c | 2016-05-11 16:36:18 +0300 | [diff] [blame] | 749 | qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 750 | return qed_spq_post(p_hwfn, p_ent, NULL); |
| 751 | } |
| 752 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 753 | int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 754 | { |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 755 | struct vport_stop_ramrod_data *p_ramrod; |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 756 | struct qed_sp_init_data init_data; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 757 | struct qed_spq_entry *p_ent; |
| 758 | u8 abs_vport_id = 0; |
| 759 | int rc; |
| 760 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 761 | if (IS_VF(p_hwfn->cdev)) |
| 762 | return qed_vf_pf_vport_stop(p_hwfn); |
| 763 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 764 | rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 765 | if (rc) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 766 | return rc; |
| 767 | |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 768 | memset(&init_data, 0, sizeof(init_data)); |
| 769 | init_data.cid = qed_spq_get_cid(p_hwfn); |
| 770 | init_data.opaque_fid = opaque_fid; |
| 771 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 772 | |
| 773 | rc = qed_sp_init_request(p_hwfn, &p_ent, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 774 | ETH_RAMROD_VPORT_STOP, |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 775 | PROTOCOLID_ETH, &init_data); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 776 | if (rc) |
| 777 | return rc; |
| 778 | |
| 779 | p_ramrod = &p_ent->ramrod.vport_stop; |
| 780 | p_ramrod->vport_id = abs_vport_id; |
| 781 | |
| 782 | return qed_spq_post(p_hwfn, p_ent, NULL); |
| 783 | } |
| 784 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 785 | static int |
| 786 | qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn, |
| 787 | struct qed_filter_accept_flags *p_accept_flags) |
| 788 | { |
| 789 | struct qed_sp_vport_update_params s_params; |
| 790 | |
| 791 | memset(&s_params, 0, sizeof(s_params)); |
| 792 | memcpy(&s_params.accept_flags, p_accept_flags, |
| 793 | sizeof(struct qed_filter_accept_flags)); |
| 794 | |
| 795 | return qed_vf_pf_vport_update(p_hwfn, &s_params); |
| 796 | } |
| 797 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 798 | static int qed_filter_accept_cmd(struct qed_dev *cdev, |
| 799 | u8 vport, |
| 800 | struct qed_filter_accept_flags accept_flags, |
Yuval Mintz | 3f9b4a6 | 2016-02-18 17:00:39 +0200 | [diff] [blame] | 801 | u8 update_accept_any_vlan, |
| 802 | u8 accept_any_vlan, |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 803 | enum spq_mode comp_mode, |
| 804 | struct qed_spq_comp_cb *p_comp_data) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 805 | { |
| 806 | struct qed_sp_vport_update_params vport_update_params; |
| 807 | int i, rc; |
| 808 | |
| 809 | /* Prepare and send the vport rx_mode change */ |
| 810 | memset(&vport_update_params, 0, sizeof(vport_update_params)); |
| 811 | vport_update_params.vport_id = vport; |
| 812 | vport_update_params.accept_flags = accept_flags; |
Yuval Mintz | 3f9b4a6 | 2016-02-18 17:00:39 +0200 | [diff] [blame] | 813 | vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; |
| 814 | vport_update_params.accept_any_vlan = accept_any_vlan; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 815 | |
| 816 | for_each_hwfn(cdev, i) { |
| 817 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
| 818 | |
| 819 | vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; |
| 820 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 821 | if (IS_VF(cdev)) { |
| 822 | rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags); |
| 823 | if (rc) |
| 824 | return rc; |
| 825 | continue; |
| 826 | } |
| 827 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 828 | rc = qed_sp_vport_update(p_hwfn, &vport_update_params, |
| 829 | comp_mode, p_comp_data); |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 830 | if (rc) { |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 831 | DP_ERR(cdev, "Update rx_mode failed %d\n", rc); |
| 832 | return rc; |
| 833 | } |
| 834 | |
| 835 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
| 836 | "Accept filter configured, flags = [Rx]%x [Tx]%x\n", |
| 837 | accept_flags.rx_accept_filter, |
| 838 | accept_flags.tx_accept_filter); |
Yuval Mintz | 3f9b4a6 | 2016-02-18 17:00:39 +0200 | [diff] [blame] | 839 | if (update_accept_any_vlan) |
| 840 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
| 841 | "accept_any_vlan=%d configured\n", |
| 842 | accept_any_vlan); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 843 | } |
| 844 | |
| 845 | return 0; |
| 846 | } |
| 847 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 848 | int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, |
| 849 | struct qed_queue_cid *p_cid, |
| 850 | u16 bd_max_bytes, |
| 851 | dma_addr_t bd_chain_phys_addr, |
| 852 | dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 853 | { |
| 854 | struct rx_queue_start_ramrod_data *p_ramrod = NULL; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 855 | struct qed_spq_entry *p_ent = NULL; |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 856 | struct qed_sp_init_data init_data; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 857 | int rc = -EINVAL; |
| 858 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 859 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 860 | "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", |
| 861 | p_cid->opaque_fid, p_cid->cid, |
Mintz, Yuval | f604b17 | 2017-06-04 13:31:01 +0300 | [diff] [blame] | 862 | p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 863 | |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 864 | /* Get SPQ entry */ |
| 865 | memset(&init_data, 0, sizeof(init_data)); |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 866 | init_data.cid = p_cid->cid; |
| 867 | init_data.opaque_fid = p_cid->opaque_fid; |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 868 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 869 | |
| 870 | rc = qed_sp_init_request(p_hwfn, &p_ent, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 871 | ETH_RAMROD_RX_QUEUE_START, |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 872 | PROTOCOLID_ETH, &init_data); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 873 | if (rc) |
| 874 | return rc; |
| 875 | |
| 876 | p_ramrod = &p_ent->ramrod.rx_queue_start; |
| 877 | |
Mintz, Yuval | f604b17 | 2017-06-04 13:31:01 +0300 | [diff] [blame] | 878 | p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); |
| 879 | p_ramrod->sb_index = p_cid->sb_idx; |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 880 | p_ramrod->vport_id = p_cid->abs.vport_id; |
| 881 | p_ramrod->stats_counter_id = p_cid->abs.stats_id; |
| 882 | p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 883 | p_ramrod->complete_cqe_flg = 0; |
| 884 | p_ramrod->complete_event_flg = 1; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 885 | |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 886 | p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); |
Yuval Mintz | 9449459 | 2016-02-21 11:40:10 +0200 | [diff] [blame] | 887 | DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 888 | |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 889 | p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); |
Yuval Mintz | 9449459 | 2016-02-21 11:40:10 +0200 | [diff] [blame] | 890 | DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 891 | |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 892 | if (p_cid->vfid != QED_QUEUE_CID_SELF) { |
Mintz, Yuval | 3b19f47 | 2017-06-04 13:31:04 +0300 | [diff] [blame] | 893 | bool b_legacy_vf = !!(p_cid->vf_legacy & |
| 894 | QED_QCID_LEGACY_VF_RX_PROD); |
| 895 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 896 | p_ramrod->vf_rx_prod_index = p_cid->vf_qid; |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 897 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
Yuval Mintz | a044df8 | 2016-08-22 13:25:09 +0300 | [diff] [blame] | 898 | "Queue%s is meant for VF rxq[%02x]\n", |
Mintz, Yuval | 3b19f47 | 2017-06-04 13:31:04 +0300 | [diff] [blame] | 899 | b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid); |
| 900 | p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf; |
Yuval Mintz | a044df8 | 2016-08-22 13:25:09 +0300 | [diff] [blame] | 901 | } |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 902 | |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 903 | return qed_spq_post(p_hwfn, p_ent, NULL); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 904 | } |
| 905 | |
| 906 | static int |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 907 | qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn, |
| 908 | struct qed_queue_cid *p_cid, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 909 | u16 bd_max_bytes, |
| 910 | dma_addr_t bd_chain_phys_addr, |
| 911 | dma_addr_t cqe_pbl_addr, |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 912 | u16 cqe_pbl_size, void __iomem **pp_prod) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 913 | { |
Yuval Mintz | b21290b | 2016-07-27 14:45:21 +0300 | [diff] [blame] | 914 | u32 init_prod_val = 0; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 915 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 916 | *pp_prod = p_hwfn->regview + |
| 917 | GTT_BAR0_MAP_REG_MSDM_RAM + |
| 918 | MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 919 | |
| 920 | /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ |
Yuval Mintz | b21290b | 2016-07-27 14:45:21 +0300 | [diff] [blame] | 921 | __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 922 | (u32 *)(&init_prod_val)); |
| 923 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 924 | return qed_eth_rxq_start_ramrod(p_hwfn, p_cid, |
| 925 | bd_max_bytes, |
| 926 | bd_chain_phys_addr, |
| 927 | cqe_pbl_addr, cqe_pbl_size); |
| 928 | } |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 929 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 930 | static int |
| 931 | qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn, |
| 932 | u16 opaque_fid, |
| 933 | struct qed_queue_start_common_params *p_params, |
| 934 | u16 bd_max_bytes, |
| 935 | dma_addr_t bd_chain_phys_addr, |
| 936 | dma_addr_t cqe_pbl_addr, |
| 937 | u16 cqe_pbl_size, |
| 938 | struct qed_rxq_start_ret_params *p_ret_params) |
| 939 | { |
| 940 | struct qed_queue_cid *p_cid; |
| 941 | int rc; |
| 942 | |
| 943 | /* Allocate a CID for the queue */ |
Mintz, Yuval | 007bc37 | 2017-06-04 13:31:05 +0300 | [diff] [blame] | 944 | p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params); |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 945 | if (!p_cid) |
| 946 | return -ENOMEM; |
| 947 | |
| 948 | if (IS_PF(p_hwfn->cdev)) { |
| 949 | rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid, |
| 950 | bd_max_bytes, |
| 951 | bd_chain_phys_addr, |
| 952 | cqe_pbl_addr, cqe_pbl_size, |
| 953 | &p_ret_params->p_prod); |
| 954 | } else { |
| 955 | rc = qed_vf_pf_rxq_start(p_hwfn, p_cid, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 956 | bd_max_bytes, |
| 957 | bd_chain_phys_addr, |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 958 | cqe_pbl_addr, |
| 959 | cqe_pbl_size, &p_ret_params->p_prod); |
| 960 | } |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 961 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 962 | /* Provide the caller with a reference to as handler */ |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 963 | if (rc) |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 964 | qed_eth_queue_cid_release(p_hwfn, p_cid); |
| 965 | else |
| 966 | p_ret_params->p_handle = (void *)p_cid; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 967 | |
| 968 | return rc; |
| 969 | } |
| 970 | |
Yuval Mintz | 17b235c | 2016-05-11 16:36:18 +0300 | [diff] [blame] | 971 | int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 972 | void **pp_rxq_handles, |
Yuval Mintz | 17b235c | 2016-05-11 16:36:18 +0300 | [diff] [blame] | 973 | u8 num_rxqs, |
| 974 | u8 complete_cqe_flg, |
| 975 | u8 complete_event_flg, |
| 976 | enum spq_mode comp_mode, |
| 977 | struct qed_spq_comp_cb *p_comp_data) |
| 978 | { |
| 979 | struct rx_queue_update_ramrod_data *p_ramrod = NULL; |
| 980 | struct qed_spq_entry *p_ent = NULL; |
| 981 | struct qed_sp_init_data init_data; |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 982 | struct qed_queue_cid *p_cid; |
Yuval Mintz | 17b235c | 2016-05-11 16:36:18 +0300 | [diff] [blame] | 983 | int rc = -EINVAL; |
| 984 | u8 i; |
| 985 | |
| 986 | memset(&init_data, 0, sizeof(init_data)); |
| 987 | init_data.comp_mode = comp_mode; |
| 988 | init_data.p_comp_data = p_comp_data; |
| 989 | |
| 990 | for (i = 0; i < num_rxqs; i++) { |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 991 | p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i]; |
Yuval Mintz | 17b235c | 2016-05-11 16:36:18 +0300 | [diff] [blame] | 992 | |
| 993 | /* Get SPQ entry */ |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 994 | init_data.cid = p_cid->cid; |
| 995 | init_data.opaque_fid = p_cid->opaque_fid; |
Yuval Mintz | 17b235c | 2016-05-11 16:36:18 +0300 | [diff] [blame] | 996 | |
| 997 | rc = qed_sp_init_request(p_hwfn, &p_ent, |
| 998 | ETH_RAMROD_RX_QUEUE_UPDATE, |
| 999 | PROTOCOLID_ETH, &init_data); |
| 1000 | if (rc) |
| 1001 | return rc; |
| 1002 | |
| 1003 | p_ramrod = &p_ent->ramrod.rx_queue_update; |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1004 | p_ramrod->vport_id = p_cid->abs.vport_id; |
Yuval Mintz | 17b235c | 2016-05-11 16:36:18 +0300 | [diff] [blame] | 1005 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1006 | p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); |
Yuval Mintz | 17b235c | 2016-05-11 16:36:18 +0300 | [diff] [blame] | 1007 | p_ramrod->complete_cqe_flg = complete_cqe_flg; |
| 1008 | p_ramrod->complete_event_flg = complete_event_flg; |
| 1009 | |
| 1010 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
| 1011 | if (rc) |
| 1012 | return rc; |
| 1013 | } |
| 1014 | |
| 1015 | return rc; |
| 1016 | } |
| 1017 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1018 | static int |
| 1019 | qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn, |
| 1020 | struct qed_queue_cid *p_cid, |
| 1021 | bool b_eq_completion_only, bool b_cqe_completion) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1022 | { |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1023 | struct rx_queue_stop_ramrod_data *p_ramrod = NULL; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1024 | struct qed_spq_entry *p_ent = NULL; |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1025 | struct qed_sp_init_data init_data; |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1026 | int rc; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1027 | |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1028 | memset(&init_data, 0, sizeof(init_data)); |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1029 | init_data.cid = p_cid->cid; |
| 1030 | init_data.opaque_fid = p_cid->opaque_fid; |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1031 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1032 | |
| 1033 | rc = qed_sp_init_request(p_hwfn, &p_ent, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1034 | ETH_RAMROD_RX_QUEUE_STOP, |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1035 | PROTOCOLID_ETH, &init_data); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1036 | if (rc) |
| 1037 | return rc; |
| 1038 | |
| 1039 | p_ramrod = &p_ent->ramrod.rx_queue_stop; |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1040 | p_ramrod->vport_id = p_cid->abs.vport_id; |
| 1041 | p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1042 | |
| 1043 | /* Cleaning the queue requires the completion to arrive there. |
| 1044 | * In addition, VFs require the answer to come as eqe to PF. |
| 1045 | */ |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 1046 | p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) && |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1047 | !b_eq_completion_only) || |
| 1048 | b_cqe_completion; |
Mintz, Yuval | 3946497 | 2017-06-04 13:31:02 +0300 | [diff] [blame] | 1049 | p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) || |
| 1050 | b_eq_completion_only; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1051 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1052 | return qed_spq_post(p_hwfn, p_ent, NULL); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1053 | } |
| 1054 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1055 | int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, |
| 1056 | void *p_rxq, |
| 1057 | bool eq_completion_only, bool cqe_completion) |
| 1058 | { |
| 1059 | struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq; |
| 1060 | int rc = -EINVAL; |
| 1061 | |
| 1062 | if (IS_PF(p_hwfn->cdev)) |
| 1063 | rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid, |
| 1064 | eq_completion_only, |
| 1065 | cqe_completion); |
| 1066 | else |
| 1067 | rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); |
| 1068 | |
| 1069 | if (!rc) |
| 1070 | qed_eth_queue_cid_release(p_hwfn, p_cid); |
| 1071 | return rc; |
| 1072 | } |
| 1073 | |
| 1074 | int |
| 1075 | qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, |
| 1076 | struct qed_queue_cid *p_cid, |
| 1077 | dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1078 | { |
| 1079 | struct tx_queue_start_ramrod_data *p_ramrod = NULL; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1080 | struct qed_spq_entry *p_ent = NULL; |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1081 | struct qed_sp_init_data init_data; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1082 | int rc = -EINVAL; |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 1083 | |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1084 | /* Get SPQ entry */ |
| 1085 | memset(&init_data, 0, sizeof(init_data)); |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1086 | init_data.cid = p_cid->cid; |
| 1087 | init_data.opaque_fid = p_cid->opaque_fid; |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1088 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1089 | |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1090 | rc = qed_sp_init_request(p_hwfn, &p_ent, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1091 | ETH_RAMROD_TX_QUEUE_START, |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1092 | PROTOCOLID_ETH, &init_data); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1093 | if (rc) |
| 1094 | return rc; |
| 1095 | |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1096 | p_ramrod = &p_ent->ramrod.tx_queue_start; |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1097 | p_ramrod->vport_id = p_cid->abs.vport_id; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1098 | |
Mintz, Yuval | f604b17 | 2017-06-04 13:31:01 +0300 | [diff] [blame] | 1099 | p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); |
| 1100 | p_ramrod->sb_index = p_cid->sb_idx; |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1101 | p_ramrod->stats_counter_id = p_cid->abs.stats_id; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1102 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1103 | p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id); |
| 1104 | p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id); |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1105 | |
| 1106 | p_ramrod->pbl_size = cpu_to_le16(pbl_size); |
Yuval Mintz | 9449459 | 2016-02-21 11:40:10 +0200 | [diff] [blame] | 1107 | DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1108 | |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1109 | p_ramrod->qm_pq_id = cpu_to_le16(pq_id); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1110 | |
| 1111 | return qed_spq_post(p_hwfn, p_ent, NULL); |
| 1112 | } |
| 1113 | |
| 1114 | static int |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1115 | qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn, |
| 1116 | struct qed_queue_cid *p_cid, |
| 1117 | u8 tc, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1118 | dma_addr_t pbl_addr, |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1119 | u16 pbl_size, void __iomem **pp_doorbell) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1120 | { |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1121 | int rc; |
| 1122 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1123 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1124 | rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, |
| 1125 | pbl_addr, pbl_size, |
Ariel Elior | b5a9ee7 | 2017-04-03 12:21:09 +0300 | [diff] [blame] | 1126 | qed_get_cm_pq_idx_mcos(p_hwfn, tc)); |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1127 | if (rc) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1128 | return rc; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1129 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1130 | /* Provide the caller with the necessary return values */ |
| 1131 | *pp_doorbell = p_hwfn->doorbells + |
| 1132 | qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1133 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1134 | return 0; |
| 1135 | } |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1136 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1137 | static int |
| 1138 | qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn, |
| 1139 | u16 opaque_fid, |
| 1140 | struct qed_queue_start_common_params *p_params, |
| 1141 | u8 tc, |
| 1142 | dma_addr_t pbl_addr, |
| 1143 | u16 pbl_size, |
| 1144 | struct qed_txq_start_ret_params *p_ret_params) |
| 1145 | { |
| 1146 | struct qed_queue_cid *p_cid; |
| 1147 | int rc; |
| 1148 | |
Mintz, Yuval | 007bc37 | 2017-06-04 13:31:05 +0300 | [diff] [blame] | 1149 | p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params); |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1150 | if (!p_cid) |
| 1151 | return -EINVAL; |
| 1152 | |
| 1153 | if (IS_PF(p_hwfn->cdev)) |
| 1154 | rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, |
| 1155 | pbl_addr, pbl_size, |
| 1156 | &p_ret_params->p_doorbell); |
| 1157 | else |
| 1158 | rc = qed_vf_pf_txq_start(p_hwfn, p_cid, |
| 1159 | pbl_addr, pbl_size, |
| 1160 | &p_ret_params->p_doorbell); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1161 | |
| 1162 | if (rc) |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1163 | qed_eth_queue_cid_release(p_hwfn, p_cid); |
| 1164 | else |
| 1165 | p_ret_params->p_handle = (void *)p_cid; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1166 | |
| 1167 | return rc; |
| 1168 | } |
| 1169 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1170 | static int |
| 1171 | qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1172 | { |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1173 | struct qed_spq_entry *p_ent = NULL; |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1174 | struct qed_sp_init_data init_data; |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1175 | int rc; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1176 | |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1177 | memset(&init_data, 0, sizeof(init_data)); |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1178 | init_data.cid = p_cid->cid; |
| 1179 | init_data.opaque_fid = p_cid->opaque_fid; |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1180 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1181 | |
| 1182 | rc = qed_sp_init_request(p_hwfn, &p_ent, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1183 | ETH_RAMROD_TX_QUEUE_STOP, |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1184 | PROTOCOLID_ETH, &init_data); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1185 | if (rc) |
| 1186 | return rc; |
| 1187 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1188 | return qed_spq_post(p_hwfn, p_ent, NULL); |
| 1189 | } |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1190 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 1191 | int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle) |
| 1192 | { |
| 1193 | struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle; |
| 1194 | int rc; |
| 1195 | |
| 1196 | if (IS_PF(p_hwfn->cdev)) |
| 1197 | rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid); |
| 1198 | else |
| 1199 | rc = qed_vf_pf_txq_stop(p_hwfn, p_cid); |
| 1200 | |
| 1201 | if (!rc) |
| 1202 | qed_eth_queue_cid_release(p_hwfn, p_cid); |
| 1203 | return rc; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1204 | } |
| 1205 | |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1206 | static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1207 | { |
| 1208 | enum eth_filter_action action = MAX_ETH_FILTER_ACTION; |
| 1209 | |
| 1210 | switch (opcode) { |
| 1211 | case QED_FILTER_ADD: |
| 1212 | action = ETH_FILTER_ACTION_ADD; |
| 1213 | break; |
| 1214 | case QED_FILTER_REMOVE: |
| 1215 | action = ETH_FILTER_ACTION_REMOVE; |
| 1216 | break; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1217 | case QED_FILTER_FLUSH: |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 1218 | action = ETH_FILTER_ACTION_REMOVE_ALL; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1219 | break; |
| 1220 | default: |
| 1221 | action = MAX_ETH_FILTER_ACTION; |
| 1222 | } |
| 1223 | |
| 1224 | return action; |
| 1225 | } |
| 1226 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1227 | static int |
| 1228 | qed_filter_ucast_common(struct qed_hwfn *p_hwfn, |
| 1229 | u16 opaque_fid, |
| 1230 | struct qed_filter_ucast *p_filter_cmd, |
| 1231 | struct vport_filter_update_ramrod_data **pp_ramrod, |
| 1232 | struct qed_spq_entry **pp_ent, |
| 1233 | enum spq_mode comp_mode, |
| 1234 | struct qed_spq_comp_cb *p_comp_data) |
| 1235 | { |
| 1236 | u8 vport_to_add_to = 0, vport_to_remove_from = 0; |
| 1237 | struct vport_filter_update_ramrod_data *p_ramrod; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1238 | struct eth_filter_cmd *p_first_filter; |
| 1239 | struct eth_filter_cmd *p_second_filter; |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1240 | struct qed_sp_init_data init_data; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1241 | enum eth_filter_action action; |
| 1242 | int rc; |
| 1243 | |
| 1244 | rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, |
| 1245 | &vport_to_remove_from); |
| 1246 | if (rc) |
| 1247 | return rc; |
| 1248 | |
| 1249 | rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, |
| 1250 | &vport_to_add_to); |
| 1251 | if (rc) |
| 1252 | return rc; |
| 1253 | |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1254 | /* Get SPQ entry */ |
| 1255 | memset(&init_data, 0, sizeof(init_data)); |
| 1256 | init_data.cid = qed_spq_get_cid(p_hwfn); |
| 1257 | init_data.opaque_fid = opaque_fid; |
| 1258 | init_data.comp_mode = comp_mode; |
| 1259 | init_data.p_comp_data = p_comp_data; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1260 | |
| 1261 | rc = qed_sp_init_request(p_hwfn, pp_ent, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1262 | ETH_RAMROD_FILTERS_UPDATE, |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1263 | PROTOCOLID_ETH, &init_data); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1264 | if (rc) |
| 1265 | return rc; |
| 1266 | |
| 1267 | *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; |
| 1268 | p_ramrod = *pp_ramrod; |
| 1269 | p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; |
| 1270 | p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; |
| 1271 | |
| 1272 | switch (p_filter_cmd->opcode) { |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 1273 | case QED_FILTER_REPLACE: |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1274 | case QED_FILTER_MOVE: |
| 1275 | p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; |
| 1276 | default: |
| 1277 | p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; |
| 1278 | } |
| 1279 | |
| 1280 | p_first_filter = &p_ramrod->filter_cmds[0]; |
| 1281 | p_second_filter = &p_ramrod->filter_cmds[1]; |
| 1282 | |
| 1283 | switch (p_filter_cmd->type) { |
| 1284 | case QED_FILTER_MAC: |
| 1285 | p_first_filter->type = ETH_FILTER_TYPE_MAC; break; |
| 1286 | case QED_FILTER_VLAN: |
| 1287 | p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; |
| 1288 | case QED_FILTER_MAC_VLAN: |
| 1289 | p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; |
| 1290 | case QED_FILTER_INNER_MAC: |
| 1291 | p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; |
| 1292 | case QED_FILTER_INNER_VLAN: |
| 1293 | p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; |
| 1294 | case QED_FILTER_INNER_PAIR: |
| 1295 | p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; |
| 1296 | case QED_FILTER_INNER_MAC_VNI_PAIR: |
| 1297 | p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; |
| 1298 | break; |
| 1299 | case QED_FILTER_MAC_VNI_PAIR: |
| 1300 | p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; |
| 1301 | case QED_FILTER_VNI: |
| 1302 | p_first_filter->type = ETH_FILTER_TYPE_VNI; break; |
| 1303 | } |
| 1304 | |
| 1305 | if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || |
| 1306 | (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || |
| 1307 | (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || |
| 1308 | (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || |
| 1309 | (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || |
| 1310 | (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) { |
| 1311 | qed_set_fw_mac_addr(&p_first_filter->mac_msb, |
| 1312 | &p_first_filter->mac_mid, |
| 1313 | &p_first_filter->mac_lsb, |
| 1314 | (u8 *)p_filter_cmd->mac); |
| 1315 | } |
| 1316 | |
| 1317 | if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || |
| 1318 | (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || |
| 1319 | (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || |
| 1320 | (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) |
| 1321 | p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan); |
| 1322 | |
| 1323 | if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || |
| 1324 | (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || |
| 1325 | (p_first_filter->type == ETH_FILTER_TYPE_VNI)) |
| 1326 | p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni); |
| 1327 | |
| 1328 | if (p_filter_cmd->opcode == QED_FILTER_MOVE) { |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1329 | p_second_filter->type = p_first_filter->type; |
| 1330 | p_second_filter->mac_msb = p_first_filter->mac_msb; |
| 1331 | p_second_filter->mac_mid = p_first_filter->mac_mid; |
| 1332 | p_second_filter->mac_lsb = p_first_filter->mac_lsb; |
| 1333 | p_second_filter->vlan_id = p_first_filter->vlan_id; |
| 1334 | p_second_filter->vni = p_first_filter->vni; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1335 | |
| 1336 | p_first_filter->action = ETH_FILTER_ACTION_REMOVE; |
| 1337 | |
| 1338 | p_first_filter->vport_id = vport_to_remove_from; |
| 1339 | |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1340 | p_second_filter->action = ETH_FILTER_ACTION_ADD; |
| 1341 | p_second_filter->vport_id = vport_to_add_to; |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 1342 | } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { |
| 1343 | p_first_filter->vport_id = vport_to_add_to; |
| 1344 | memcpy(p_second_filter, p_first_filter, |
| 1345 | sizeof(*p_second_filter)); |
| 1346 | p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; |
| 1347 | p_second_filter->action = ETH_FILTER_ACTION_ADD; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1348 | } else { |
| 1349 | action = qed_filter_action(p_filter_cmd->opcode); |
| 1350 | |
| 1351 | if (action == MAX_ETH_FILTER_ACTION) { |
| 1352 | DP_NOTICE(p_hwfn, |
| 1353 | "%d is not supported yet\n", |
| 1354 | p_filter_cmd->opcode); |
| 1355 | return -EINVAL; |
| 1356 | } |
| 1357 | |
| 1358 | p_first_filter->action = action; |
| 1359 | p_first_filter->vport_id = (p_filter_cmd->opcode == |
| 1360 | QED_FILTER_REMOVE) ? |
| 1361 | vport_to_remove_from : |
| 1362 | vport_to_add_to; |
| 1363 | } |
| 1364 | |
| 1365 | return 0; |
| 1366 | } |
| 1367 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1368 | int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, |
| 1369 | u16 opaque_fid, |
| 1370 | struct qed_filter_ucast *p_filter_cmd, |
| 1371 | enum spq_mode comp_mode, |
| 1372 | struct qed_spq_comp_cb *p_comp_data) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1373 | { |
| 1374 | struct vport_filter_update_ramrod_data *p_ramrod = NULL; |
| 1375 | struct qed_spq_entry *p_ent = NULL; |
| 1376 | struct eth_filter_cmd_header *p_header; |
| 1377 | int rc; |
| 1378 | |
| 1379 | rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, |
| 1380 | &p_ramrod, &p_ent, |
| 1381 | comp_mode, p_comp_data); |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1382 | if (rc) { |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1383 | DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); |
| 1384 | return rc; |
| 1385 | } |
| 1386 | p_header = &p_ramrod->filter_cmd_hdr; |
| 1387 | p_header->assert_on_error = p_filter_cmd->assert_on_error; |
| 1388 | |
| 1389 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1390 | if (rc) { |
| 1391 | DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1392 | return rc; |
| 1393 | } |
| 1394 | |
| 1395 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
| 1396 | "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", |
| 1397 | (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" : |
| 1398 | ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ? |
| 1399 | "REMOVE" : |
| 1400 | ((p_filter_cmd->opcode == QED_FILTER_MOVE) ? |
| 1401 | "MOVE" : "REPLACE")), |
| 1402 | (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" : |
| 1403 | ((p_filter_cmd->type == QED_FILTER_VLAN) ? |
| 1404 | "VLAN" : "MAC & VLAN"), |
| 1405 | p_ramrod->filter_cmd_hdr.cmd_cnt, |
| 1406 | p_filter_cmd->is_rx_filter, |
| 1407 | p_filter_cmd->is_tx_filter); |
| 1408 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
| 1409 | "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", |
| 1410 | p_filter_cmd->vport_to_add_to, |
| 1411 | p_filter_cmd->vport_to_remove_from, |
| 1412 | p_filter_cmd->mac[0], |
| 1413 | p_filter_cmd->mac[1], |
| 1414 | p_filter_cmd->mac[2], |
| 1415 | p_filter_cmd->mac[3], |
| 1416 | p_filter_cmd->mac[4], |
| 1417 | p_filter_cmd->mac[5], |
| 1418 | p_filter_cmd->vlan); |
| 1419 | |
| 1420 | return 0; |
| 1421 | } |
| 1422 | |
| 1423 | /******************************************************************************* |
| 1424 | * Description: |
| 1425 | * Calculates crc 32 on a buffer |
| 1426 | * Note: crc32_length MUST be aligned to 8 |
| 1427 | * Return: |
| 1428 | ******************************************************************************/ |
| 1429 | static u32 qed_calc_crc32c(u8 *crc32_packet, |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1430 | u32 crc32_length, u32 crc32_seed, u8 complement) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1431 | { |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1432 | u32 byte = 0, bit = 0, crc32_result = crc32_seed; |
| 1433 | u8 msb = 0, current_byte = 0; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1434 | |
| 1435 | if ((!crc32_packet) || |
| 1436 | (crc32_length == 0) || |
| 1437 | ((crc32_length % 8) != 0)) |
| 1438 | return crc32_result; |
| 1439 | for (byte = 0; byte < crc32_length; byte++) { |
| 1440 | current_byte = crc32_packet[byte]; |
| 1441 | for (bit = 0; bit < 8; bit++) { |
| 1442 | msb = (u8)(crc32_result >> 31); |
| 1443 | crc32_result = crc32_result << 1; |
| 1444 | if (msb != (0x1 & (current_byte >> bit))) { |
| 1445 | crc32_result = crc32_result ^ CRC32_POLY; |
| 1446 | crc32_result |= 1; /*crc32_result[0] = 1;*/ |
| 1447 | } |
| 1448 | } |
| 1449 | } |
| 1450 | return crc32_result; |
| 1451 | } |
| 1452 | |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1453 | static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1454 | { |
| 1455 | u32 packet_buf[2] = { 0 }; |
| 1456 | |
| 1457 | memcpy((u8 *)(&packet_buf[0]), &mac[0], 6); |
| 1458 | return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0); |
| 1459 | } |
| 1460 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1461 | u8 qed_mcast_bin_from_mac(u8 *mac) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1462 | { |
| 1463 | u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, |
| 1464 | mac, ETH_ALEN); |
| 1465 | |
| 1466 | return crc & 0xff; |
| 1467 | } |
| 1468 | |
| 1469 | static int |
| 1470 | qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, |
| 1471 | u16 opaque_fid, |
| 1472 | struct qed_filter_mcast *p_filter_cmd, |
| 1473 | enum spq_mode comp_mode, |
| 1474 | struct qed_spq_comp_cb *p_comp_data) |
| 1475 | { |
| 1476 | unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; |
| 1477 | struct vport_update_ramrod_data *p_ramrod = NULL; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1478 | struct qed_spq_entry *p_ent = NULL; |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1479 | struct qed_sp_init_data init_data; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1480 | u8 abs_vport_id = 0; |
| 1481 | int rc, i; |
| 1482 | |
Yuval Mintz | 83aeb93 | 2016-08-15 10:42:44 +0300 | [diff] [blame] | 1483 | if (p_filter_cmd->opcode == QED_FILTER_ADD) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1484 | rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, |
| 1485 | &abs_vport_id); |
Yuval Mintz | 83aeb93 | 2016-08-15 10:42:44 +0300 | [diff] [blame] | 1486 | else |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1487 | rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, |
| 1488 | &abs_vport_id); |
Yuval Mintz | 83aeb93 | 2016-08-15 10:42:44 +0300 | [diff] [blame] | 1489 | if (rc) |
| 1490 | return rc; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1491 | |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1492 | /* Get SPQ entry */ |
| 1493 | memset(&init_data, 0, sizeof(init_data)); |
| 1494 | init_data.cid = qed_spq_get_cid(p_hwfn); |
| 1495 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
| 1496 | init_data.comp_mode = comp_mode; |
| 1497 | init_data.p_comp_data = p_comp_data; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1498 | |
| 1499 | rc = qed_sp_init_request(p_hwfn, &p_ent, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1500 | ETH_RAMROD_VPORT_UPDATE, |
Yuval Mintz | 06f56b8 | 2016-02-21 11:40:09 +0200 | [diff] [blame] | 1501 | PROTOCOLID_ETH, &init_data); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1502 | if (rc) { |
| 1503 | DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); |
| 1504 | return rc; |
| 1505 | } |
| 1506 | |
| 1507 | p_ramrod = &p_ent->ramrod.vport_update; |
| 1508 | p_ramrod->common.update_approx_mcast_flg = 1; |
| 1509 | |
| 1510 | /* explicitly clear out the entire vector */ |
| 1511 | memset(&p_ramrod->approx_mcast.bins, 0, |
| 1512 | sizeof(p_ramrod->approx_mcast.bins)); |
| 1513 | memset(bins, 0, sizeof(unsigned long) * |
| 1514 | ETH_MULTICAST_MAC_BINS_IN_REGS); |
| 1515 | /* filter ADD op is explicit set op and it removes |
| 1516 | * any existing filters for the vport |
| 1517 | */ |
| 1518 | if (p_filter_cmd->opcode == QED_FILTER_ADD) { |
| 1519 | for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { |
| 1520 | u32 bit; |
| 1521 | |
| 1522 | bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); |
| 1523 | __set_bit(bit, bins); |
| 1524 | } |
| 1525 | |
| 1526 | /* Convert to correct endianity */ |
| 1527 | for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1528 | struct vport_update_ramrod_mcast *p_ramrod_bins; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1529 | u32 *p_bins = (u32 *)bins; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1530 | |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1531 | p_ramrod_bins = &p_ramrod->approx_mcast; |
| 1532 | p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1533 | } |
| 1534 | } |
| 1535 | |
| 1536 | p_ramrod->common.vport_id = abs_vport_id; |
| 1537 | |
| 1538 | return qed_spq_post(p_hwfn, p_ent, NULL); |
| 1539 | } |
| 1540 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1541 | static int qed_filter_mcast_cmd(struct qed_dev *cdev, |
| 1542 | struct qed_filter_mcast *p_filter_cmd, |
| 1543 | enum spq_mode comp_mode, |
| 1544 | struct qed_spq_comp_cb *p_comp_data) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1545 | { |
| 1546 | int rc = 0; |
| 1547 | int i; |
| 1548 | |
| 1549 | /* only ADD and REMOVE operations are supported for multi-cast */ |
| 1550 | if ((p_filter_cmd->opcode != QED_FILTER_ADD && |
| 1551 | (p_filter_cmd->opcode != QED_FILTER_REMOVE)) || |
| 1552 | (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS)) |
| 1553 | return -EINVAL; |
| 1554 | |
| 1555 | for_each_hwfn(cdev, i) { |
| 1556 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
| 1557 | |
| 1558 | u16 opaque_fid; |
| 1559 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1560 | if (IS_VF(cdev)) { |
| 1561 | qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); |
| 1562 | continue; |
| 1563 | } |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1564 | |
| 1565 | opaque_fid = p_hwfn->hw_info.opaque_fid; |
| 1566 | |
| 1567 | rc = qed_sp_eth_filter_mcast(p_hwfn, |
| 1568 | opaque_fid, |
| 1569 | p_filter_cmd, |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1570 | comp_mode, p_comp_data); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1571 | } |
| 1572 | return rc; |
| 1573 | } |
| 1574 | |
| 1575 | static int qed_filter_ucast_cmd(struct qed_dev *cdev, |
| 1576 | struct qed_filter_ucast *p_filter_cmd, |
| 1577 | enum spq_mode comp_mode, |
| 1578 | struct qed_spq_comp_cb *p_comp_data) |
| 1579 | { |
| 1580 | int rc = 0; |
| 1581 | int i; |
| 1582 | |
| 1583 | for_each_hwfn(cdev, i) { |
| 1584 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
| 1585 | u16 opaque_fid; |
| 1586 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1587 | if (IS_VF(cdev)) { |
| 1588 | rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); |
| 1589 | continue; |
| 1590 | } |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1591 | |
| 1592 | opaque_fid = p_hwfn->hw_info.opaque_fid; |
| 1593 | |
| 1594 | rc = qed_sp_eth_filter_ucast(p_hwfn, |
| 1595 | opaque_fid, |
| 1596 | p_filter_cmd, |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1597 | comp_mode, p_comp_data); |
| 1598 | if (rc) |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1599 | break; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 1600 | } |
| 1601 | |
| 1602 | return rc; |
| 1603 | } |
| 1604 | |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1605 | /* Statistics related code */ |
| 1606 | static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, |
| 1607 | u32 *p_addr, |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1608 | u32 *p_len, u16 statistics_bin) |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1609 | { |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1610 | if (IS_PF(p_hwfn->cdev)) { |
| 1611 | *p_addr = BAR0_MAP_REG_PSDM_RAM + |
| 1612 | PSTORM_QUEUE_STAT_OFFSET(statistics_bin); |
| 1613 | *p_len = sizeof(struct eth_pstorm_per_queue_stat); |
| 1614 | } else { |
| 1615 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; |
| 1616 | struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; |
| 1617 | |
| 1618 | *p_addr = p_resp->pfdev_info.stats_info.pstats.address; |
| 1619 | *p_len = p_resp->pfdev_info.stats_info.pstats.len; |
| 1620 | } |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1621 | } |
| 1622 | |
| 1623 | static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, |
| 1624 | struct qed_ptt *p_ptt, |
| 1625 | struct qed_eth_stats *p_stats, |
| 1626 | u16 statistics_bin) |
| 1627 | { |
| 1628 | struct eth_pstorm_per_queue_stat pstats; |
| 1629 | u32 pstats_addr = 0, pstats_len = 0; |
| 1630 | |
| 1631 | __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, |
| 1632 | statistics_bin); |
| 1633 | |
| 1634 | memset(&pstats, 0, sizeof(pstats)); |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1635 | qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1636 | |
Mintz, Yuval | 9c79dda | 2017-03-14 16:23:54 +0200 | [diff] [blame] | 1637 | p_stats->common.tx_ucast_bytes += |
| 1638 | HILO_64_REGPAIR(pstats.sent_ucast_bytes); |
| 1639 | p_stats->common.tx_mcast_bytes += |
| 1640 | HILO_64_REGPAIR(pstats.sent_mcast_bytes); |
| 1641 | p_stats->common.tx_bcast_bytes += |
| 1642 | HILO_64_REGPAIR(pstats.sent_bcast_bytes); |
| 1643 | p_stats->common.tx_ucast_pkts += |
| 1644 | HILO_64_REGPAIR(pstats.sent_ucast_pkts); |
| 1645 | p_stats->common.tx_mcast_pkts += |
| 1646 | HILO_64_REGPAIR(pstats.sent_mcast_pkts); |
| 1647 | p_stats->common.tx_bcast_pkts += |
| 1648 | HILO_64_REGPAIR(pstats.sent_bcast_pkts); |
| 1649 | p_stats->common.tx_err_drop_pkts += |
| 1650 | HILO_64_REGPAIR(pstats.error_drop_pkts); |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1651 | } |
| 1652 | |
| 1653 | static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, |
| 1654 | struct qed_ptt *p_ptt, |
| 1655 | struct qed_eth_stats *p_stats, |
| 1656 | u16 statistics_bin) |
| 1657 | { |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1658 | struct tstorm_per_port_stat tstats; |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1659 | u32 tstats_addr, tstats_len; |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1660 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1661 | if (IS_PF(p_hwfn->cdev)) { |
| 1662 | tstats_addr = BAR0_MAP_REG_TSDM_RAM + |
| 1663 | TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); |
| 1664 | tstats_len = sizeof(struct tstorm_per_port_stat); |
| 1665 | } else { |
| 1666 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; |
| 1667 | struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; |
| 1668 | |
| 1669 | tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; |
| 1670 | tstats_len = p_resp->pfdev_info.stats_info.tstats.len; |
| 1671 | } |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1672 | |
| 1673 | memset(&tstats, 0, sizeof(tstats)); |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1674 | qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1675 | |
Mintz, Yuval | 9c79dda | 2017-03-14 16:23:54 +0200 | [diff] [blame] | 1676 | p_stats->common.mftag_filter_discards += |
| 1677 | HILO_64_REGPAIR(tstats.mftag_filter_discard); |
| 1678 | p_stats->common.mac_filter_discards += |
| 1679 | HILO_64_REGPAIR(tstats.eth_mac_filter_discard); |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1680 | } |
| 1681 | |
| 1682 | static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, |
| 1683 | u32 *p_addr, |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1684 | u32 *p_len, u16 statistics_bin) |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1685 | { |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1686 | if (IS_PF(p_hwfn->cdev)) { |
| 1687 | *p_addr = BAR0_MAP_REG_USDM_RAM + |
| 1688 | USTORM_QUEUE_STAT_OFFSET(statistics_bin); |
| 1689 | *p_len = sizeof(struct eth_ustorm_per_queue_stat); |
| 1690 | } else { |
| 1691 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; |
| 1692 | struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; |
| 1693 | |
| 1694 | *p_addr = p_resp->pfdev_info.stats_info.ustats.address; |
| 1695 | *p_len = p_resp->pfdev_info.stats_info.ustats.len; |
| 1696 | } |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1697 | } |
| 1698 | |
| 1699 | static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, |
| 1700 | struct qed_ptt *p_ptt, |
| 1701 | struct qed_eth_stats *p_stats, |
| 1702 | u16 statistics_bin) |
| 1703 | { |
| 1704 | struct eth_ustorm_per_queue_stat ustats; |
| 1705 | u32 ustats_addr = 0, ustats_len = 0; |
| 1706 | |
| 1707 | __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, |
| 1708 | statistics_bin); |
| 1709 | |
| 1710 | memset(&ustats, 0, sizeof(ustats)); |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1711 | qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1712 | |
Mintz, Yuval | 9c79dda | 2017-03-14 16:23:54 +0200 | [diff] [blame] | 1713 | p_stats->common.rx_ucast_bytes += |
| 1714 | HILO_64_REGPAIR(ustats.rcv_ucast_bytes); |
| 1715 | p_stats->common.rx_mcast_bytes += |
| 1716 | HILO_64_REGPAIR(ustats.rcv_mcast_bytes); |
| 1717 | p_stats->common.rx_bcast_bytes += |
| 1718 | HILO_64_REGPAIR(ustats.rcv_bcast_bytes); |
| 1719 | p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); |
| 1720 | p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); |
| 1721 | p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1722 | } |
| 1723 | |
| 1724 | static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, |
| 1725 | u32 *p_addr, |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1726 | u32 *p_len, u16 statistics_bin) |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1727 | { |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1728 | if (IS_PF(p_hwfn->cdev)) { |
| 1729 | *p_addr = BAR0_MAP_REG_MSDM_RAM + |
| 1730 | MSTORM_QUEUE_STAT_OFFSET(statistics_bin); |
| 1731 | *p_len = sizeof(struct eth_mstorm_per_queue_stat); |
| 1732 | } else { |
| 1733 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; |
| 1734 | struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; |
| 1735 | |
| 1736 | *p_addr = p_resp->pfdev_info.stats_info.mstats.address; |
| 1737 | *p_len = p_resp->pfdev_info.stats_info.mstats.len; |
| 1738 | } |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1739 | } |
| 1740 | |
| 1741 | static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, |
| 1742 | struct qed_ptt *p_ptt, |
| 1743 | struct qed_eth_stats *p_stats, |
| 1744 | u16 statistics_bin) |
| 1745 | { |
| 1746 | struct eth_mstorm_per_queue_stat mstats; |
| 1747 | u32 mstats_addr = 0, mstats_len = 0; |
| 1748 | |
| 1749 | __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, |
| 1750 | statistics_bin); |
| 1751 | |
| 1752 | memset(&mstats, 0, sizeof(mstats)); |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1753 | qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1754 | |
Mintz, Yuval | 9c79dda | 2017-03-14 16:23:54 +0200 | [diff] [blame] | 1755 | p_stats->common.no_buff_discards += |
| 1756 | HILO_64_REGPAIR(mstats.no_buff_discard); |
| 1757 | p_stats->common.packet_too_big_discard += |
| 1758 | HILO_64_REGPAIR(mstats.packet_too_big_discard); |
| 1759 | p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); |
| 1760 | p_stats->common.tpa_coalesced_pkts += |
| 1761 | HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); |
| 1762 | p_stats->common.tpa_coalesced_events += |
| 1763 | HILO_64_REGPAIR(mstats.tpa_coalesced_events); |
| 1764 | p_stats->common.tpa_aborts_num += |
| 1765 | HILO_64_REGPAIR(mstats.tpa_aborts_num); |
| 1766 | p_stats->common.tpa_coalesced_bytes += |
| 1767 | HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1768 | } |
| 1769 | |
| 1770 | static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, |
| 1771 | struct qed_ptt *p_ptt, |
| 1772 | struct qed_eth_stats *p_stats) |
| 1773 | { |
Mintz, Yuval | 9c79dda | 2017-03-14 16:23:54 +0200 | [diff] [blame] | 1774 | struct qed_eth_stats_common *p_common = &p_stats->common; |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1775 | struct port_stats port_stats; |
| 1776 | int j; |
| 1777 | |
| 1778 | memset(&port_stats, 0, sizeof(port_stats)); |
| 1779 | |
| 1780 | qed_memcpy_from(p_hwfn, p_ptt, &port_stats, |
| 1781 | p_hwfn->mcp_info->port_addr + |
| 1782 | offsetof(struct public_port, stats), |
| 1783 | sizeof(port_stats)); |
| 1784 | |
Mintz, Yuval | 9c79dda | 2017-03-14 16:23:54 +0200 | [diff] [blame] | 1785 | p_common->rx_64_byte_packets += port_stats.eth.r64; |
| 1786 | p_common->rx_65_to_127_byte_packets += port_stats.eth.r127; |
| 1787 | p_common->rx_128_to_255_byte_packets += port_stats.eth.r255; |
| 1788 | p_common->rx_256_to_511_byte_packets += port_stats.eth.r511; |
| 1789 | p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023; |
| 1790 | p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; |
| 1791 | p_common->rx_crc_errors += port_stats.eth.rfcs; |
| 1792 | p_common->rx_mac_crtl_frames += port_stats.eth.rxcf; |
| 1793 | p_common->rx_pause_frames += port_stats.eth.rxpf; |
| 1794 | p_common->rx_pfc_frames += port_stats.eth.rxpp; |
| 1795 | p_common->rx_align_errors += port_stats.eth.raln; |
| 1796 | p_common->rx_carrier_errors += port_stats.eth.rfcr; |
| 1797 | p_common->rx_oversize_packets += port_stats.eth.rovr; |
| 1798 | p_common->rx_jabbers += port_stats.eth.rjbr; |
| 1799 | p_common->rx_undersize_packets += port_stats.eth.rund; |
| 1800 | p_common->rx_fragments += port_stats.eth.rfrg; |
| 1801 | p_common->tx_64_byte_packets += port_stats.eth.t64; |
| 1802 | p_common->tx_65_to_127_byte_packets += port_stats.eth.t127; |
| 1803 | p_common->tx_128_to_255_byte_packets += port_stats.eth.t255; |
| 1804 | p_common->tx_256_to_511_byte_packets += port_stats.eth.t511; |
| 1805 | p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023; |
| 1806 | p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; |
| 1807 | p_common->tx_pause_frames += port_stats.eth.txpf; |
| 1808 | p_common->tx_pfc_frames += port_stats.eth.txpp; |
| 1809 | p_common->rx_mac_bytes += port_stats.eth.rbyte; |
| 1810 | p_common->rx_mac_uc_packets += port_stats.eth.rxuca; |
| 1811 | p_common->rx_mac_mc_packets += port_stats.eth.rxmca; |
| 1812 | p_common->rx_mac_bc_packets += port_stats.eth.rxbca; |
| 1813 | p_common->rx_mac_frames_ok += port_stats.eth.rxpok; |
| 1814 | p_common->tx_mac_bytes += port_stats.eth.tbyte; |
| 1815 | p_common->tx_mac_uc_packets += port_stats.eth.txuca; |
| 1816 | p_common->tx_mac_mc_packets += port_stats.eth.txmca; |
| 1817 | p_common->tx_mac_bc_packets += port_stats.eth.txbca; |
| 1818 | p_common->tx_mac_ctrl_frames += port_stats.eth.txcf; |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1819 | for (j = 0; j < 8; j++) { |
Mintz, Yuval | 9c79dda | 2017-03-14 16:23:54 +0200 | [diff] [blame] | 1820 | p_common->brb_truncates += port_stats.brb.brb_truncate[j]; |
| 1821 | p_common->brb_discards += port_stats.brb.brb_discard[j]; |
| 1822 | } |
| 1823 | |
| 1824 | if (QED_IS_BB(p_hwfn->cdev)) { |
| 1825 | struct qed_eth_stats_bb *p_bb = &p_stats->bb; |
| 1826 | |
| 1827 | p_bb->rx_1519_to_1522_byte_packets += |
| 1828 | port_stats.eth.u0.bb0.r1522; |
| 1829 | p_bb->rx_1519_to_2047_byte_packets += |
| 1830 | port_stats.eth.u0.bb0.r2047; |
| 1831 | p_bb->rx_2048_to_4095_byte_packets += |
| 1832 | port_stats.eth.u0.bb0.r4095; |
| 1833 | p_bb->rx_4096_to_9216_byte_packets += |
| 1834 | port_stats.eth.u0.bb0.r9216; |
| 1835 | p_bb->rx_9217_to_16383_byte_packets += |
| 1836 | port_stats.eth.u0.bb0.r16383; |
| 1837 | p_bb->tx_1519_to_2047_byte_packets += |
| 1838 | port_stats.eth.u1.bb1.t2047; |
| 1839 | p_bb->tx_2048_to_4095_byte_packets += |
| 1840 | port_stats.eth.u1.bb1.t4095; |
| 1841 | p_bb->tx_4096_to_9216_byte_packets += |
| 1842 | port_stats.eth.u1.bb1.t9216; |
| 1843 | p_bb->tx_9217_to_16383_byte_packets += |
| 1844 | port_stats.eth.u1.bb1.t16383; |
| 1845 | p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec; |
| 1846 | p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl; |
| 1847 | } else { |
| 1848 | struct qed_eth_stats_ah *p_ah = &p_stats->ah; |
| 1849 | |
| 1850 | p_ah->rx_1519_to_max_byte_packets += |
| 1851 | port_stats.eth.u0.ah0.r1519_to_max; |
| 1852 | p_ah->tx_1519_to_max_byte_packets = |
| 1853 | port_stats.eth.u1.ah1.t1519_to_max; |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1854 | } |
| 1855 | } |
| 1856 | |
| 1857 | static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, |
| 1858 | struct qed_ptt *p_ptt, |
| 1859 | struct qed_eth_stats *stats, |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1860 | u16 statistics_bin, bool b_get_port_stats) |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1861 | { |
| 1862 | __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); |
| 1863 | __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); |
| 1864 | __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); |
| 1865 | __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); |
| 1866 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1867 | if (b_get_port_stats && p_hwfn->mcp_info) |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1868 | __qed_get_vport_port_stats(p_hwfn, p_ptt, stats); |
| 1869 | } |
| 1870 | |
| 1871 | static void _qed_get_vport_stats(struct qed_dev *cdev, |
| 1872 | struct qed_eth_stats *stats) |
| 1873 | { |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1874 | u8 fw_vport = 0; |
| 1875 | int i; |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1876 | |
| 1877 | memset(stats, 0, sizeof(*stats)); |
| 1878 | |
| 1879 | for_each_hwfn(cdev, i) { |
| 1880 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1881 | struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) |
| 1882 | : NULL; |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1883 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1884 | if (IS_PF(cdev)) { |
| 1885 | /* The main vport index is relative first */ |
| 1886 | if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { |
| 1887 | DP_ERR(p_hwfn, "No vport available!\n"); |
| 1888 | goto out; |
| 1889 | } |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1890 | } |
| 1891 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1892 | if (IS_PF(cdev) && !p_ptt) { |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1893 | DP_ERR(p_hwfn, "Failed to acquire ptt\n"); |
| 1894 | continue; |
| 1895 | } |
| 1896 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1897 | __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, |
| 1898 | IS_PF(cdev) ? true : false); |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1899 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1900 | out: |
| 1901 | if (IS_PF(cdev) && p_ptt) |
| 1902 | qed_ptt_release(p_hwfn, p_ptt); |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1903 | } |
| 1904 | } |
| 1905 | |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 1906 | void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1907 | { |
| 1908 | u32 i; |
| 1909 | |
| 1910 | if (!cdev) { |
| 1911 | memset(stats, 0, sizeof(*stats)); |
| 1912 | return; |
| 1913 | } |
| 1914 | |
| 1915 | _qed_get_vport_stats(cdev, stats); |
| 1916 | |
| 1917 | if (!cdev->reset_stats) |
| 1918 | return; |
| 1919 | |
| 1920 | /* Reduce the statistics baseline */ |
| 1921 | for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) |
| 1922 | ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; |
| 1923 | } |
| 1924 | |
| 1925 | /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ |
| 1926 | void qed_reset_vport_stats(struct qed_dev *cdev) |
| 1927 | { |
| 1928 | int i; |
| 1929 | |
| 1930 | for_each_hwfn(cdev, i) { |
| 1931 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
| 1932 | struct eth_mstorm_per_queue_stat mstats; |
| 1933 | struct eth_ustorm_per_queue_stat ustats; |
| 1934 | struct eth_pstorm_per_queue_stat pstats; |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1935 | struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) |
| 1936 | : NULL; |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1937 | u32 addr = 0, len = 0; |
| 1938 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1939 | if (IS_PF(cdev) && !p_ptt) { |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1940 | DP_ERR(p_hwfn, "Failed to acquire ptt\n"); |
| 1941 | continue; |
| 1942 | } |
| 1943 | |
| 1944 | memset(&mstats, 0, sizeof(mstats)); |
| 1945 | __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); |
| 1946 | qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); |
| 1947 | |
| 1948 | memset(&ustats, 0, sizeof(ustats)); |
| 1949 | __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); |
| 1950 | qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); |
| 1951 | |
| 1952 | memset(&pstats, 0, sizeof(pstats)); |
| 1953 | __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); |
| 1954 | qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); |
| 1955 | |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 1956 | if (IS_PF(cdev)) |
| 1957 | qed_ptt_release(p_hwfn, p_ptt); |
Yuval Mintz | 86622ee | 2016-03-02 20:26:02 +0200 | [diff] [blame] | 1958 | } |
| 1959 | |
| 1960 | /* PORT statistics are not necessarily reset, so we need to |
| 1961 | * read and create a baseline for future statistics. |
| 1962 | */ |
| 1963 | if (!cdev->reset_stats) |
| 1964 | DP_INFO(cdev, "Reset stats not allocated\n"); |
| 1965 | else |
| 1966 | _qed_get_vport_stats(cdev, cdev->reset_stats); |
| 1967 | } |
| 1968 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1969 | static enum gft_profile_type |
| 1970 | qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode) |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1971 | { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1972 | if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE) |
| 1973 | return GFT_PROFILE_TYPE_4_TUPLE; |
| 1974 | if (mode == QED_FILTER_CONFIG_MODE_IP_DEST) |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1975 | return GFT_PROFILE_TYPE_IP_DST_ADDR; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1976 | return GFT_PROFILE_TYPE_L4_DST_PORT; |
| 1977 | } |
| 1978 | |
| 1979 | void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, |
| 1980 | struct qed_ptt *p_ptt, |
| 1981 | struct qed_arfs_config_params *p_cfg_params) |
| 1982 | { |
| 1983 | if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) { |
| 1984 | qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id, |
| 1985 | p_cfg_params->tcp, |
| 1986 | p_cfg_params->udp, |
| 1987 | p_cfg_params->ipv4, |
| 1988 | p_cfg_params->ipv6, |
| 1989 | qed_arfs_mode_to_hsi(p_cfg_params->mode)); |
| 1990 | DP_VERBOSE(p_hwfn, |
| 1991 | QED_MSG_SP, |
| 1992 | "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n", |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1993 | p_cfg_params->tcp ? "Enable" : "Disable", |
| 1994 | p_cfg_params->udp ? "Enable" : "Disable", |
| 1995 | p_cfg_params->ipv4 ? "Enable" : "Disable", |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1996 | p_cfg_params->ipv6 ? "Enable" : "Disable", |
| 1997 | (u32)p_cfg_params->mode); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1998 | } else { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1999 | DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n"); |
| 2000 | qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 2001 | } |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 2002 | } |
| 2003 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 2004 | int |
| 2005 | qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 2006 | struct qed_spq_comp_cb *p_cb, |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 2007 | struct qed_ntuple_filter_params *p_params) |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 2008 | { |
| 2009 | struct rx_update_gft_filter_data *p_ramrod = NULL; |
| 2010 | struct qed_spq_entry *p_ent = NULL; |
| 2011 | struct qed_sp_init_data init_data; |
| 2012 | u16 abs_rx_q_id = 0; |
| 2013 | u8 abs_vport_id = 0; |
| 2014 | int rc = -EINVAL; |
| 2015 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 2016 | rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 2017 | if (rc) |
| 2018 | return rc; |
| 2019 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 2020 | if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { |
| 2021 | rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id); |
| 2022 | if (rc) |
| 2023 | return rc; |
| 2024 | } |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 2025 | |
| 2026 | /* Get SPQ entry */ |
| 2027 | memset(&init_data, 0, sizeof(init_data)); |
| 2028 | init_data.cid = qed_spq_get_cid(p_hwfn); |
| 2029 | |
| 2030 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
| 2031 | |
| 2032 | if (p_cb) { |
| 2033 | init_data.comp_mode = QED_SPQ_MODE_CB; |
| 2034 | init_data.p_comp_data = p_cb; |
| 2035 | } else { |
| 2036 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
| 2037 | } |
| 2038 | |
| 2039 | rc = qed_sp_init_request(p_hwfn, &p_ent, |
| 2040 | ETH_RAMROD_GFT_UPDATE_FILTER, |
| 2041 | PROTOCOLID_ETH, &init_data); |
| 2042 | if (rc) |
| 2043 | return rc; |
| 2044 | |
| 2045 | p_ramrod = &p_ent->ramrod.rx_update_gft; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 2046 | |
| 2047 | DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr); |
| 2048 | p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length); |
| 2049 | |
| 2050 | if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { |
| 2051 | p_ramrod->rx_qid_valid = 1; |
| 2052 | p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); |
| 2053 | } |
| 2054 | |
| 2055 | p_ramrod->flow_id_valid = 0; |
| 2056 | p_ramrod->flow_id = 0; |
| 2057 | |
| 2058 | p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id); |
| 2059 | p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER |
| 2060 | : GFT_DELETE_FILTER; |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 2061 | |
| 2062 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
| 2063 | "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n", |
| 2064 | abs_vport_id, abs_rx_q_id, |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 2065 | p_params->b_is_add ? "Adding" : "Removing", |
| 2066 | (u64)p_params->addr, p_params->length); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 2067 | |
| 2068 | return qed_spq_post(p_hwfn, p_ent, NULL); |
| 2069 | } |
| 2070 | |
Rahul Verma | bf5a94b | 2017-07-26 06:07:14 -0700 | [diff] [blame] | 2071 | int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, |
| 2072 | struct qed_ptt *p_ptt, |
| 2073 | struct qed_queue_cid *p_cid, u16 *p_rx_coal) |
| 2074 | { |
| 2075 | u32 coalesce, address, is_valid; |
| 2076 | struct cau_sb_entry sb_entry; |
| 2077 | u8 timer_res; |
| 2078 | int rc; |
| 2079 | |
| 2080 | rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + |
| 2081 | p_cid->sb_igu_id * sizeof(u64), |
| 2082 | (u64)(uintptr_t)&sb_entry, 2, 0); |
| 2083 | if (rc) { |
| 2084 | DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); |
| 2085 | return rc; |
| 2086 | } |
| 2087 | |
| 2088 | timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0); |
| 2089 | |
| 2090 | address = BAR0_MAP_REG_USDM_RAM + |
| 2091 | USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); |
| 2092 | coalesce = qed_rd(p_hwfn, p_ptt, address); |
| 2093 | |
| 2094 | is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); |
| 2095 | if (!is_valid) |
| 2096 | return -EINVAL; |
| 2097 | |
| 2098 | coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); |
| 2099 | *p_rx_coal = (u16)(coalesce << timer_res); |
| 2100 | |
| 2101 | return 0; |
| 2102 | } |
| 2103 | |
| 2104 | int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, |
| 2105 | struct qed_ptt *p_ptt, |
| 2106 | struct qed_queue_cid *p_cid, u16 *p_tx_coal) |
| 2107 | { |
| 2108 | u32 coalesce, address, is_valid; |
| 2109 | struct cau_sb_entry sb_entry; |
| 2110 | u8 timer_res; |
| 2111 | int rc; |
| 2112 | |
| 2113 | rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + |
| 2114 | p_cid->sb_igu_id * sizeof(u64), |
| 2115 | (u64)(uintptr_t)&sb_entry, 2, 0); |
| 2116 | if (rc) { |
| 2117 | DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); |
| 2118 | return rc; |
| 2119 | } |
| 2120 | |
| 2121 | timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1); |
| 2122 | |
| 2123 | address = BAR0_MAP_REG_XSDM_RAM + |
| 2124 | XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); |
| 2125 | coalesce = qed_rd(p_hwfn, p_ptt, address); |
| 2126 | |
| 2127 | is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); |
| 2128 | if (!is_valid) |
| 2129 | return -EINVAL; |
| 2130 | |
| 2131 | coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); |
| 2132 | *p_tx_coal = (u16)(coalesce << timer_res); |
| 2133 | |
| 2134 | return 0; |
| 2135 | } |
| 2136 | |
| 2137 | int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle) |
| 2138 | { |
| 2139 | struct qed_queue_cid *p_cid = handle; |
| 2140 | struct qed_ptt *p_ptt; |
| 2141 | int rc = 0; |
| 2142 | |
| 2143 | if (IS_VF(p_hwfn->cdev)) { |
| 2144 | rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid); |
| 2145 | if (rc) |
| 2146 | DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); |
| 2147 | |
| 2148 | return rc; |
| 2149 | } |
| 2150 | |
| 2151 | p_ptt = qed_ptt_acquire(p_hwfn); |
| 2152 | if (!p_ptt) |
| 2153 | return -EAGAIN; |
| 2154 | |
| 2155 | if (p_cid->b_is_rx) { |
| 2156 | rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); |
| 2157 | if (rc) |
| 2158 | goto out; |
| 2159 | } else { |
| 2160 | rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); |
| 2161 | if (rc) |
| 2162 | goto out; |
| 2163 | } |
| 2164 | |
| 2165 | out: |
| 2166 | qed_ptt_release(p_hwfn, p_ptt); |
| 2167 | |
| 2168 | return rc; |
| 2169 | } |
| 2170 | |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 2171 | static int qed_fill_eth_dev_info(struct qed_dev *cdev, |
| 2172 | struct qed_dev_eth_info *info) |
| 2173 | { |
| 2174 | int i; |
| 2175 | |
| 2176 | memset(info, 0, sizeof(*info)); |
| 2177 | |
| 2178 | info->num_tc = 1; |
| 2179 | |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 2180 | if (IS_PF(cdev)) { |
Yuval Mintz | 25eb8d4 | 2016-07-27 14:45:24 +0300 | [diff] [blame] | 2181 | int max_vf_vlan_filters = 0; |
Yuval Mintz | 7b7e70f | 2016-10-14 05:19:20 -0400 | [diff] [blame] | 2182 | int max_vf_mac_filters = 0; |
Yuval Mintz | 25eb8d4 | 2016-07-27 14:45:24 +0300 | [diff] [blame] | 2183 | |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 2184 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { |
Mintz, Yuval | e1d32ac | 2017-01-01 13:57:03 +0200 | [diff] [blame] | 2185 | u16 num_queues = 0; |
| 2186 | |
| 2187 | /* Since the feature controls only queue-zones, |
| 2188 | * make sure we have the contexts [rx, tx, xdp] to |
| 2189 | * match. |
| 2190 | */ |
| 2191 | for_each_hwfn(cdev, i) { |
| 2192 | struct qed_hwfn *hwfn = &cdev->hwfns[i]; |
| 2193 | u16 l2_queues = (u16)FEAT_NUM(hwfn, |
| 2194 | QED_PF_L2_QUE); |
| 2195 | u16 cids; |
| 2196 | |
| 2197 | cids = hwfn->pf_params.eth_pf_params.num_cons; |
| 2198 | num_queues += min_t(u16, l2_queues, cids / 3); |
| 2199 | } |
| 2200 | |
| 2201 | /* queues might theoretically be >256, but interrupts' |
| 2202 | * upper-limit guarantes that it would fit in a u8. |
| 2203 | */ |
| 2204 | if (cdev->int_params.fp_msix_cnt) { |
| 2205 | u8 irqs = cdev->int_params.fp_msix_cnt; |
| 2206 | |
| 2207 | info->num_queues = (u8)min_t(u16, |
| 2208 | num_queues, irqs); |
| 2209 | } |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 2210 | } else { |
| 2211 | info->num_queues = cdev->num_hwfns; |
| 2212 | } |
| 2213 | |
Yuval Mintz | 7b7e70f | 2016-10-14 05:19:20 -0400 | [diff] [blame] | 2214 | if (IS_QED_SRIOV(cdev)) { |
Yuval Mintz | 25eb8d4 | 2016-07-27 14:45:24 +0300 | [diff] [blame] | 2215 | max_vf_vlan_filters = cdev->p_iov_info->total_vfs * |
| 2216 | QED_ETH_VF_NUM_VLAN_FILTERS; |
Yuval Mintz | 7b7e70f | 2016-10-14 05:19:20 -0400 | [diff] [blame] | 2217 | max_vf_mac_filters = cdev->p_iov_info->total_vfs * |
| 2218 | QED_ETH_VF_NUM_MAC_FILTERS; |
| 2219 | } |
| 2220 | info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev), |
| 2221 | QED_VLAN) - |
Yuval Mintz | 25eb8d4 | 2016-07-27 14:45:24 +0300 | [diff] [blame] | 2222 | max_vf_vlan_filters; |
Yuval Mintz | 7b7e70f | 2016-10-14 05:19:20 -0400 | [diff] [blame] | 2223 | info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev), |
| 2224 | QED_MAC) - |
| 2225 | max_vf_mac_filters; |
Yuval Mintz | 25eb8d4 | 2016-07-27 14:45:24 +0300 | [diff] [blame] | 2226 | |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 2227 | ether_addr_copy(info->port_mac, |
| 2228 | cdev->hwfns[0].hw_info.hw_mac_addr); |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 2229 | |
Mintz, Yuval | cbb8a12 | 2017-06-04 13:31:08 +0300 | [diff] [blame] | 2230 | info->xdp_supported = true; |
| 2231 | } else { |
| 2232 | u16 total_cids = 0; |
| 2233 | |
| 2234 | /* Determine queues & XDP support */ |
| 2235 | for_each_hwfn(cdev, i) { |
| 2236 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
| 2237 | u8 queues, cids; |
| 2238 | |
| 2239 | qed_vf_get_num_cids(p_hwfn, &cids); |
| 2240 | qed_vf_get_num_rxqs(p_hwfn, &queues); |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 2241 | info->num_queues += queues; |
Mintz, Yuval | cbb8a12 | 2017-06-04 13:31:08 +0300 | [diff] [blame] | 2242 | total_cids += cids; |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 2243 | } |
| 2244 | |
Mintz, Yuval | cbb8a12 | 2017-06-04 13:31:08 +0300 | [diff] [blame] | 2245 | /* Enable VF XDP in case PF guarntees sufficient connections */ |
| 2246 | if (total_cids >= info->num_queues * 3) |
| 2247 | info->xdp_supported = true; |
| 2248 | |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 2249 | qed_vf_get_num_vlan_filters(&cdev->hwfns[0], |
Tomer Tayar | 2edbff8 | 2016-10-31 07:14:27 +0200 | [diff] [blame] | 2250 | (u8 *)&info->num_vlan_filters); |
Mintz, Yuval | b0fca31 | 2016-10-31 22:26:54 +0200 | [diff] [blame] | 2251 | qed_vf_get_num_mac_filters(&cdev->hwfns[0], |
| 2252 | (u8 *)&info->num_mac_filters); |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 2253 | qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); |
Yuval Mintz | d8c2c7e | 2016-08-22 13:25:11 +0300 | [diff] [blame] | 2254 | |
| 2255 | info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi; |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 2256 | } |
| 2257 | |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 2258 | qed_fill_dev_info(cdev, &info->common); |
| 2259 | |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 2260 | if (IS_VF(cdev)) |
Shyam Saini | 0ee28e3 | 2017-01-17 07:35:04 +0530 | [diff] [blame] | 2261 | eth_zero_addr(info->common.hw_mac); |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 2262 | |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 2263 | return 0; |
| 2264 | } |
| 2265 | |
Yuval Mintz | cc875c2 | 2015-10-26 11:02:31 +0200 | [diff] [blame] | 2266 | static void qed_register_eth_ops(struct qed_dev *cdev, |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 2267 | struct qed_eth_cb_ops *ops, void *cookie) |
Yuval Mintz | cc875c2 | 2015-10-26 11:02:31 +0200 | [diff] [blame] | 2268 | { |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 2269 | cdev->protocol_ops.eth = ops; |
| 2270 | cdev->ops_cookie = cookie; |
| 2271 | |
| 2272 | /* For VF, we start bulletin reading */ |
| 2273 | if (IS_VF(cdev)) |
| 2274 | qed_vf_start_iov_wq(cdev); |
Yuval Mintz | cc875c2 | 2015-10-26 11:02:31 +0200 | [diff] [blame] | 2275 | } |
| 2276 | |
Yuval Mintz | eff1696 | 2016-05-11 16:36:21 +0300 | [diff] [blame] | 2277 | static bool qed_check_mac(struct qed_dev *cdev, u8 *mac) |
| 2278 | { |
| 2279 | if (IS_PF(cdev)) |
| 2280 | return true; |
| 2281 | |
| 2282 | return qed_vf_check_mac(&cdev->hwfns[0], mac); |
| 2283 | } |
| 2284 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2285 | static int qed_start_vport(struct qed_dev *cdev, |
Manish Chopra | 088c861 | 2016-03-04 12:35:05 -0500 | [diff] [blame] | 2286 | struct qed_start_vport_params *params) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2287 | { |
| 2288 | int rc, i; |
| 2289 | |
| 2290 | for_each_hwfn(cdev, i) { |
Manish Chopra | 088c861 | 2016-03-04 12:35:05 -0500 | [diff] [blame] | 2291 | struct qed_sp_vport_start_params start = { 0 }; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2292 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
| 2293 | |
Manish Chopra | 088c861 | 2016-03-04 12:35:05 -0500 | [diff] [blame] | 2294 | start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : |
| 2295 | QED_TPA_MODE_NONE; |
| 2296 | start.remove_inner_vlan = params->remove_inner_vlan; |
Yuval Mintz | 08feecd | 2016-05-11 16:36:20 +0300 | [diff] [blame] | 2297 | start.only_untagged = true; /* untagged only */ |
Manish Chopra | 088c861 | 2016-03-04 12:35:05 -0500 | [diff] [blame] | 2298 | start.drop_ttl0 = params->drop_ttl0; |
| 2299 | start.opaque_fid = p_hwfn->hw_info.opaque_fid; |
| 2300 | start.concrete_fid = p_hwfn->hw_info.concrete_fid; |
Sudarsana Reddy Kalluru | c78c70f | 2017-02-15 10:24:10 +0200 | [diff] [blame] | 2301 | start.handle_ptp_pkts = params->handle_ptp_pkts; |
Manish Chopra | 088c861 | 2016-03-04 12:35:05 -0500 | [diff] [blame] | 2302 | start.vport_id = params->vport_id; |
| 2303 | start.max_buffers_per_cqe = 16; |
| 2304 | start.mtu = params->mtu; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2305 | |
Manish Chopra | 088c861 | 2016-03-04 12:35:05 -0500 | [diff] [blame] | 2306 | rc = qed_sp_vport_start(p_hwfn, &start); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2307 | if (rc) { |
| 2308 | DP_ERR(cdev, "Failed to start VPORT\n"); |
| 2309 | return rc; |
| 2310 | } |
| 2311 | |
Rahul Verma | 1558296 | 2017-04-06 15:58:29 +0300 | [diff] [blame] | 2312 | rc = qed_hw_start_fastpath(p_hwfn); |
| 2313 | if (rc) { |
| 2314 | DP_ERR(cdev, "Failed to start VPORT fastpath\n"); |
| 2315 | return rc; |
| 2316 | } |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2317 | |
| 2318 | DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), |
| 2319 | "Started V-PORT %d with MTU %d\n", |
Manish Chopra | 088c861 | 2016-03-04 12:35:05 -0500 | [diff] [blame] | 2320 | start.vport_id, start.mtu); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2321 | } |
| 2322 | |
Yuval Mintz | a0d26d5 | 2016-06-19 15:18:13 +0300 | [diff] [blame] | 2323 | if (params->clear_stats) |
| 2324 | qed_reset_vport_stats(cdev); |
Manish Chopra | 9df2ed0 | 2015-10-26 11:02:33 +0200 | [diff] [blame] | 2325 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2326 | return 0; |
| 2327 | } |
| 2328 | |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 2329 | static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2330 | { |
| 2331 | int rc, i; |
| 2332 | |
| 2333 | for_each_hwfn(cdev, i) { |
| 2334 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
| 2335 | |
| 2336 | rc = qed_sp_vport_stop(p_hwfn, |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 2337 | p_hwfn->hw_info.opaque_fid, vport_id); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2338 | |
| 2339 | if (rc) { |
| 2340 | DP_ERR(cdev, "Failed to stop VPORT\n"); |
| 2341 | return rc; |
| 2342 | } |
| 2343 | } |
| 2344 | return 0; |
| 2345 | } |
| 2346 | |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 2347 | static int qed_update_vport_rss(struct qed_dev *cdev, |
| 2348 | struct qed_update_vport_rss_params *input, |
| 2349 | struct qed_rss_params *rss) |
| 2350 | { |
| 2351 | int i, fn; |
| 2352 | |
| 2353 | /* Update configuration with what's correct regardless of CMT */ |
| 2354 | rss->update_rss_config = 1; |
| 2355 | rss->rss_enable = 1; |
| 2356 | rss->update_rss_capabilities = 1; |
| 2357 | rss->update_rss_ind_table = 1; |
| 2358 | rss->update_rss_key = 1; |
| 2359 | rss->rss_caps = input->rss_caps; |
| 2360 | memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32)); |
| 2361 | |
| 2362 | /* In regular scenario, we'd simply need to take input handlers. |
| 2363 | * But in CMT, we'd have to split the handlers according to the |
| 2364 | * engine they were configured on. We'd then have to understand |
| 2365 | * whether RSS is really required, since 2-queues on CMT doesn't |
| 2366 | * require RSS. |
| 2367 | */ |
| 2368 | if (cdev->num_hwfns == 1) { |
| 2369 | memcpy(rss->rss_ind_table, |
| 2370 | input->rss_ind_table, |
| 2371 | QED_RSS_IND_TABLE_SIZE * sizeof(void *)); |
| 2372 | rss->rss_table_size_log = 7; |
| 2373 | return 0; |
| 2374 | } |
| 2375 | |
| 2376 | /* Start by copying the non-spcific information to the 2nd copy */ |
| 2377 | memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params)); |
| 2378 | |
| 2379 | /* CMT should be round-robin */ |
| 2380 | for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { |
| 2381 | struct qed_queue_cid *cid = input->rss_ind_table[i]; |
| 2382 | struct qed_rss_params *t_rss; |
| 2383 | |
| 2384 | if (cid->p_owner == QED_LEADING_HWFN(cdev)) |
| 2385 | t_rss = &rss[0]; |
| 2386 | else |
| 2387 | t_rss = &rss[1]; |
| 2388 | |
| 2389 | t_rss->rss_ind_table[i / cdev->num_hwfns] = cid; |
| 2390 | } |
| 2391 | |
| 2392 | /* Make sure RSS is actually required */ |
| 2393 | for_each_hwfn(cdev, fn) { |
| 2394 | for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) { |
| 2395 | if (rss[fn].rss_ind_table[i] != |
| 2396 | rss[fn].rss_ind_table[0]) |
| 2397 | break; |
| 2398 | } |
| 2399 | if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) { |
| 2400 | DP_VERBOSE(cdev, NETIF_MSG_IFUP, |
| 2401 | "CMT - 1 queue per-hwfn; Disabling RSS\n"); |
| 2402 | return -EINVAL; |
| 2403 | } |
| 2404 | rss[fn].rss_table_size_log = 6; |
| 2405 | } |
| 2406 | |
| 2407 | return 0; |
| 2408 | } |
| 2409 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2410 | static int qed_update_vport(struct qed_dev *cdev, |
| 2411 | struct qed_update_vport_params *params) |
| 2412 | { |
| 2413 | struct qed_sp_vport_update_params sp_params; |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 2414 | struct qed_rss_params *rss; |
| 2415 | int rc = 0, i; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2416 | |
| 2417 | if (!cdev) |
| 2418 | return -ENODEV; |
| 2419 | |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 2420 | rss = vzalloc(sizeof(*rss) * cdev->num_hwfns); |
| 2421 | if (!rss) |
| 2422 | return -ENOMEM; |
| 2423 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2424 | memset(&sp_params, 0, sizeof(sp_params)); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2425 | |
| 2426 | /* Translate protocol params into sp params */ |
| 2427 | sp_params.vport_id = params->vport_id; |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 2428 | sp_params.update_vport_active_rx_flg = params->update_vport_active_flg; |
| 2429 | sp_params.update_vport_active_tx_flg = params->update_vport_active_flg; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2430 | sp_params.vport_active_rx_flg = params->vport_active_flg; |
| 2431 | sp_params.vport_active_tx_flg = params->vport_active_flg; |
Yuval Mintz | 831bfb0e | 2016-05-11 16:36:25 +0300 | [diff] [blame] | 2432 | sp_params.update_tx_switching_flg = params->update_tx_switching_flg; |
| 2433 | sp_params.tx_switching_flg = params->tx_switching_flg; |
Yuval Mintz | 3f9b4a6 | 2016-02-18 17:00:39 +0200 | [diff] [blame] | 2434 | sp_params.accept_any_vlan = params->accept_any_vlan; |
| 2435 | sp_params.update_accept_any_vlan_flg = |
| 2436 | params->update_accept_any_vlan_flg; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2437 | |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 2438 | /* Prepare the RSS configuration */ |
| 2439 | if (params->update_rss_flg) |
| 2440 | if (qed_update_vport_rss(cdev, ¶ms->rss_params, rss)) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2441 | params->update_rss_flg = 0; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2442 | |
| 2443 | for_each_hwfn(cdev, i) { |
| 2444 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
| 2445 | |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 2446 | if (params->update_rss_flg) |
| 2447 | sp_params.rss_params = &rss[i]; |
| 2448 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2449 | sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; |
| 2450 | rc = qed_sp_vport_update(p_hwfn, &sp_params, |
| 2451 | QED_SPQ_MODE_EBLOCK, |
| 2452 | NULL); |
| 2453 | if (rc) { |
| 2454 | DP_ERR(cdev, "Failed to update VPORT\n"); |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 2455 | goto out; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2456 | } |
| 2457 | |
| 2458 | DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), |
| 2459 | "Updated V-PORT %d: active_flag %d [update %d]\n", |
| 2460 | params->vport_id, params->vport_active_flg, |
| 2461 | params->update_vport_active_flg); |
| 2462 | } |
| 2463 | |
Mintz, Yuval | f29ffdb | 2017-01-01 13:57:07 +0200 | [diff] [blame] | 2464 | out: |
| 2465 | vfree(rss); |
| 2466 | return rc; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2467 | } |
| 2468 | |
| 2469 | static int qed_start_rxq(struct qed_dev *cdev, |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2470 | u8 rss_num, |
| 2471 | struct qed_queue_start_common_params *p_params, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2472 | u16 bd_max_bytes, |
| 2473 | dma_addr_t bd_chain_phys_addr, |
| 2474 | dma_addr_t cqe_pbl_addr, |
| 2475 | u16 cqe_pbl_size, |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2476 | struct qed_rxq_start_ret_params *ret_params) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2477 | { |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2478 | struct qed_hwfn *p_hwfn; |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 2479 | int rc, hwfn_index; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2480 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2481 | hwfn_index = rss_num % cdev->num_hwfns; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2482 | p_hwfn = &cdev->hwfns[hwfn_index]; |
| 2483 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2484 | p_params->queue_id = p_params->queue_id / cdev->num_hwfns; |
| 2485 | p_params->stats_id = p_params->vport_id; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2486 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2487 | rc = qed_eth_rx_queue_start(p_hwfn, |
| 2488 | p_hwfn->hw_info.opaque_fid, |
| 2489 | p_params, |
| 2490 | bd_max_bytes, |
| 2491 | bd_chain_phys_addr, |
| 2492 | cqe_pbl_addr, cqe_pbl_size, ret_params); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2493 | if (rc) { |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2494 | DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2495 | return rc; |
| 2496 | } |
| 2497 | |
| 2498 | DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), |
Mintz, Yuval | f604b17 | 2017-06-04 13:31:01 +0300 | [diff] [blame] | 2499 | "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2500 | p_params->queue_id, rss_num, p_params->vport_id, |
Mintz, Yuval | f604b17 | 2017-06-04 13:31:01 +0300 | [diff] [blame] | 2501 | p_params->p_sb->igu_sb_id); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2502 | |
| 2503 | return 0; |
| 2504 | } |
| 2505 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2506 | static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2507 | { |
| 2508 | int rc, hwfn_index; |
| 2509 | struct qed_hwfn *p_hwfn; |
| 2510 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2511 | hwfn_index = rss_id % cdev->num_hwfns; |
| 2512 | p_hwfn = &cdev->hwfns[hwfn_index]; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2513 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2514 | rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2515 | if (rc) { |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2516 | DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2517 | return rc; |
| 2518 | } |
| 2519 | |
| 2520 | return 0; |
| 2521 | } |
| 2522 | |
| 2523 | static int qed_start_txq(struct qed_dev *cdev, |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2524 | u8 rss_num, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2525 | struct qed_queue_start_common_params *p_params, |
| 2526 | dma_addr_t pbl_addr, |
| 2527 | u16 pbl_size, |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2528 | struct qed_txq_start_ret_params *ret_params) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2529 | { |
| 2530 | struct qed_hwfn *p_hwfn; |
| 2531 | int rc, hwfn_index; |
| 2532 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2533 | hwfn_index = rss_num % cdev->num_hwfns; |
| 2534 | p_hwfn = &cdev->hwfns[hwfn_index]; |
| 2535 | p_params->queue_id = p_params->queue_id / cdev->num_hwfns; |
| 2536 | p_params->stats_id = p_params->vport_id; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2537 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2538 | rc = qed_eth_tx_queue_start(p_hwfn, |
| 2539 | p_hwfn->hw_info.opaque_fid, |
| 2540 | p_params, 0, |
| 2541 | pbl_addr, pbl_size, ret_params); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2542 | |
| 2543 | if (rc) { |
| 2544 | DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id); |
| 2545 | return rc; |
| 2546 | } |
| 2547 | |
| 2548 | DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), |
Mintz, Yuval | f604b17 | 2017-06-04 13:31:01 +0300 | [diff] [blame] | 2549 | "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2550 | p_params->queue_id, rss_num, p_params->vport_id, |
Mintz, Yuval | f604b17 | 2017-06-04 13:31:01 +0300 | [diff] [blame] | 2551 | p_params->p_sb->igu_sb_id); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2552 | |
| 2553 | return 0; |
| 2554 | } |
| 2555 | |
| 2556 | #define QED_HW_STOP_RETRY_LIMIT (10) |
| 2557 | static int qed_fastpath_stop(struct qed_dev *cdev) |
| 2558 | { |
Rahul Verma | 1558296 | 2017-04-06 15:58:29 +0300 | [diff] [blame] | 2559 | int rc; |
| 2560 | |
| 2561 | rc = qed_hw_stop_fastpath(cdev); |
| 2562 | if (rc) { |
| 2563 | DP_ERR(cdev, "Failed to stop Fastpath\n"); |
| 2564 | return rc; |
| 2565 | } |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2566 | |
| 2567 | return 0; |
| 2568 | } |
| 2569 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2570 | static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2571 | { |
| 2572 | struct qed_hwfn *p_hwfn; |
| 2573 | int rc, hwfn_index; |
| 2574 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2575 | hwfn_index = rss_id % cdev->num_hwfns; |
| 2576 | p_hwfn = &cdev->hwfns[hwfn_index]; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2577 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2578 | rc = qed_eth_tx_queue_stop(p_hwfn, handle); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2579 | if (rc) { |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 2580 | DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2581 | return rc; |
| 2582 | } |
| 2583 | |
| 2584 | return 0; |
| 2585 | } |
| 2586 | |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 2587 | static int qed_tunn_configure(struct qed_dev *cdev, |
| 2588 | struct qed_tunn_params *tunn_params) |
| 2589 | { |
Chopra, Manish | 19968430 | 2017-04-24 10:00:44 -0700 | [diff] [blame] | 2590 | struct qed_tunnel_info tunn_info; |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 2591 | int i, rc; |
| 2592 | |
| 2593 | memset(&tunn_info, 0, sizeof(tunn_info)); |
Chopra, Manish | 19968430 | 2017-04-24 10:00:44 -0700 | [diff] [blame] | 2594 | if (tunn_params->update_vxlan_port) { |
| 2595 | tunn_info.vxlan_port.b_update_port = true; |
| 2596 | tunn_info.vxlan_port.port = tunn_params->vxlan_port; |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 2597 | } |
| 2598 | |
Chopra, Manish | 19968430 | 2017-04-24 10:00:44 -0700 | [diff] [blame] | 2599 | if (tunn_params->update_geneve_port) { |
| 2600 | tunn_info.geneve_port.b_update_port = true; |
| 2601 | tunn_info.geneve_port.port = tunn_params->geneve_port; |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 2602 | } |
| 2603 | |
| 2604 | for_each_hwfn(cdev, i) { |
| 2605 | struct qed_hwfn *hwfn = &cdev->hwfns[i]; |
Manish Chopra | 4f64675 | 2017-05-23 09:41:20 +0300 | [diff] [blame] | 2606 | struct qed_ptt *p_ptt; |
Chopra, Manish | 97379f1 | 2017-04-24 10:00:48 -0700 | [diff] [blame] | 2607 | struct qed_tunnel_info *tun; |
| 2608 | |
| 2609 | tun = &hwfn->cdev->tunnel; |
Manish Chopra | 4f64675 | 2017-05-23 09:41:20 +0300 | [diff] [blame] | 2610 | if (IS_PF(cdev)) { |
| 2611 | p_ptt = qed_ptt_acquire(hwfn); |
| 2612 | if (!p_ptt) |
| 2613 | return -EAGAIN; |
| 2614 | } else { |
| 2615 | p_ptt = NULL; |
| 2616 | } |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 2617 | |
Manish Chopra | 4f64675 | 2017-05-23 09:41:20 +0300 | [diff] [blame] | 2618 | rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info, |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 2619 | QED_SPQ_MODE_EBLOCK, NULL); |
Manish Chopra | 4f64675 | 2017-05-23 09:41:20 +0300 | [diff] [blame] | 2620 | if (rc) { |
| 2621 | if (IS_PF(cdev)) |
| 2622 | qed_ptt_release(hwfn, p_ptt); |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 2623 | return rc; |
Manish Chopra | 4f64675 | 2017-05-23 09:41:20 +0300 | [diff] [blame] | 2624 | } |
Chopra, Manish | 97379f1 | 2017-04-24 10:00:48 -0700 | [diff] [blame] | 2625 | |
| 2626 | if (IS_PF_SRIOV(hwfn)) { |
| 2627 | u16 vxlan_port, geneve_port; |
| 2628 | int j; |
| 2629 | |
| 2630 | vxlan_port = tun->vxlan_port.port; |
| 2631 | geneve_port = tun->geneve_port.port; |
| 2632 | |
| 2633 | qed_for_each_vf(hwfn, j) { |
| 2634 | qed_iov_bulletin_set_udp_ports(hwfn, j, |
| 2635 | vxlan_port, |
| 2636 | geneve_port); |
| 2637 | } |
| 2638 | |
| 2639 | qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); |
| 2640 | } |
Manish Chopra | 4f64675 | 2017-05-23 09:41:20 +0300 | [diff] [blame] | 2641 | if (IS_PF(cdev)) |
| 2642 | qed_ptt_release(hwfn, p_ptt); |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 2643 | } |
| 2644 | |
| 2645 | return 0; |
| 2646 | } |
| 2647 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2648 | static int qed_configure_filter_rx_mode(struct qed_dev *cdev, |
| 2649 | enum qed_filter_rx_mode_type type) |
| 2650 | { |
| 2651 | struct qed_filter_accept_flags accept_flags; |
| 2652 | |
| 2653 | memset(&accept_flags, 0, sizeof(accept_flags)); |
| 2654 | |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 2655 | accept_flags.update_rx_mode_config = 1; |
| 2656 | accept_flags.update_tx_mode_config = 1; |
| 2657 | accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | |
| 2658 | QED_ACCEPT_MCAST_MATCHED | |
| 2659 | QED_ACCEPT_BCAST; |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2660 | accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED | |
| 2661 | QED_ACCEPT_MCAST_MATCHED | |
| 2662 | QED_ACCEPT_BCAST; |
| 2663 | |
Mintz, Yuval | 8806787 | 2017-01-01 13:57:09 +0200 | [diff] [blame] | 2664 | if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2665 | accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | |
| 2666 | QED_ACCEPT_MCAST_UNMATCHED; |
Mintz, Yuval | 8806787 | 2017-01-01 13:57:09 +0200 | [diff] [blame] | 2667 | accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; |
| 2668 | } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2669 | accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; |
Mintz, Yuval | 8806787 | 2017-01-01 13:57:09 +0200 | [diff] [blame] | 2670 | accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; |
| 2671 | } |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2672 | |
Yuval Mintz | 3f9b4a6 | 2016-02-18 17:00:39 +0200 | [diff] [blame] | 2673 | return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2674 | QED_SPQ_MODE_CB, NULL); |
| 2675 | } |
| 2676 | |
| 2677 | static int qed_configure_filter_ucast(struct qed_dev *cdev, |
| 2678 | struct qed_filter_ucast_params *params) |
| 2679 | { |
| 2680 | struct qed_filter_ucast ucast; |
| 2681 | |
| 2682 | if (!params->vlan_valid && !params->mac_valid) { |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 2683 | DP_NOTICE(cdev, |
| 2684 | "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2685 | return -EINVAL; |
| 2686 | } |
| 2687 | |
| 2688 | memset(&ucast, 0, sizeof(ucast)); |
| 2689 | switch (params->type) { |
| 2690 | case QED_FILTER_XCAST_TYPE_ADD: |
| 2691 | ucast.opcode = QED_FILTER_ADD; |
| 2692 | break; |
| 2693 | case QED_FILTER_XCAST_TYPE_DEL: |
| 2694 | ucast.opcode = QED_FILTER_REMOVE; |
| 2695 | break; |
| 2696 | case QED_FILTER_XCAST_TYPE_REPLACE: |
| 2697 | ucast.opcode = QED_FILTER_REPLACE; |
| 2698 | break; |
| 2699 | default: |
| 2700 | DP_NOTICE(cdev, "Unknown unicast filter type %d\n", |
| 2701 | params->type); |
| 2702 | } |
| 2703 | |
| 2704 | if (params->vlan_valid && params->mac_valid) { |
| 2705 | ucast.type = QED_FILTER_MAC_VLAN; |
| 2706 | ether_addr_copy(ucast.mac, params->mac); |
| 2707 | ucast.vlan = params->vlan; |
| 2708 | } else if (params->mac_valid) { |
| 2709 | ucast.type = QED_FILTER_MAC; |
| 2710 | ether_addr_copy(ucast.mac, params->mac); |
| 2711 | } else { |
| 2712 | ucast.type = QED_FILTER_VLAN; |
| 2713 | ucast.vlan = params->vlan; |
| 2714 | } |
| 2715 | |
| 2716 | ucast.is_rx_filter = true; |
| 2717 | ucast.is_tx_filter = true; |
| 2718 | |
| 2719 | return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL); |
| 2720 | } |
| 2721 | |
| 2722 | static int qed_configure_filter_mcast(struct qed_dev *cdev, |
| 2723 | struct qed_filter_mcast_params *params) |
| 2724 | { |
| 2725 | struct qed_filter_mcast mcast; |
| 2726 | int i; |
| 2727 | |
| 2728 | memset(&mcast, 0, sizeof(mcast)); |
| 2729 | switch (params->type) { |
| 2730 | case QED_FILTER_XCAST_TYPE_ADD: |
| 2731 | mcast.opcode = QED_FILTER_ADD; |
| 2732 | break; |
| 2733 | case QED_FILTER_XCAST_TYPE_DEL: |
| 2734 | mcast.opcode = QED_FILTER_REMOVE; |
| 2735 | break; |
| 2736 | default: |
| 2737 | DP_NOTICE(cdev, "Unknown multicast filter type %d\n", |
| 2738 | params->type); |
| 2739 | } |
| 2740 | |
| 2741 | mcast.num_mc_addrs = params->num; |
| 2742 | for (i = 0; i < mcast.num_mc_addrs; i++) |
| 2743 | ether_addr_copy(mcast.mac[i], params->mac[i]); |
| 2744 | |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 2745 | return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2746 | } |
| 2747 | |
| 2748 | static int qed_configure_filter(struct qed_dev *cdev, |
| 2749 | struct qed_filter_params *params) |
| 2750 | { |
| 2751 | enum qed_filter_rx_mode_type accept_flags; |
| 2752 | |
| 2753 | switch (params->type) { |
| 2754 | case QED_FILTER_TYPE_UCAST: |
| 2755 | return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast); |
| 2756 | case QED_FILTER_TYPE_MCAST: |
| 2757 | return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast); |
| 2758 | case QED_FILTER_TYPE_RX_MODE: |
| 2759 | accept_flags = params->filter.accept_flags; |
| 2760 | return qed_configure_filter_rx_mode(cdev, accept_flags); |
| 2761 | default: |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 2762 | DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type); |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2763 | return -EINVAL; |
| 2764 | } |
| 2765 | } |
| 2766 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 2767 | static int qed_configure_arfs_searcher(struct qed_dev *cdev, |
| 2768 | enum qed_filter_config_mode mode) |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 2769 | { |
| 2770 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
| 2771 | struct qed_arfs_config_params arfs_config_params; |
| 2772 | |
| 2773 | memset(&arfs_config_params, 0, sizeof(arfs_config_params)); |
| 2774 | arfs_config_params.tcp = true; |
| 2775 | arfs_config_params.udp = true; |
| 2776 | arfs_config_params.ipv4 = true; |
| 2777 | arfs_config_params.ipv6 = true; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 2778 | arfs_config_params.mode = mode; |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 2779 | qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, |
| 2780 | &arfs_config_params); |
| 2781 | return 0; |
| 2782 | } |
| 2783 | |
| 2784 | static void |
| 2785 | qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 2786 | void *cookie, |
| 2787 | union event_ring_data *data, u8 fw_return_code) |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 2788 | { |
| 2789 | struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common; |
| 2790 | void *dev = p_hwfn->cdev->ops_cookie; |
| 2791 | |
| 2792 | op->arfs_filter_op(dev, cookie, fw_return_code); |
| 2793 | } |
| 2794 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 2795 | static int |
| 2796 | qed_ntuple_arfs_filter_config(struct qed_dev *cdev, |
| 2797 | void *cookie, |
| 2798 | struct qed_ntuple_filter_params *params) |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 2799 | { |
| 2800 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
| 2801 | struct qed_spq_comp_cb cb; |
| 2802 | int rc = -EINVAL; |
| 2803 | |
| 2804 | cb.function = qed_arfs_sp_response_handler; |
| 2805 | cb.cookie = cookie; |
| 2806 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 2807 | if (params->b_is_vf) { |
| 2808 | if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false, |
| 2809 | false)) { |
| 2810 | DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n", |
| 2811 | params->vf_id); |
| 2812 | return rc; |
| 2813 | } |
| 2814 | |
| 2815 | params->vport_id = params->vf_id + 1; |
| 2816 | params->qid = QED_RFS_NTUPLE_QID_RSS; |
| 2817 | } |
| 2818 | |
| 2819 | rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 2820 | if (rc) |
| 2821 | DP_NOTICE(p_hwfn, |
| 2822 | "Failed to issue a-RFS filter configuration\n"); |
| 2823 | else |
| 2824 | DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, |
| 2825 | "Successfully issued a-RFS filter configuration\n"); |
| 2826 | |
| 2827 | return rc; |
| 2828 | } |
| 2829 | |
Rahul Verma | bf5a94b | 2017-07-26 06:07:14 -0700 | [diff] [blame] | 2830 | static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) |
| 2831 | { |
| 2832 | struct qed_queue_cid *p_cid = handle; |
| 2833 | struct qed_hwfn *p_hwfn; |
| 2834 | int rc; |
| 2835 | |
| 2836 | p_hwfn = p_cid->p_owner; |
| 2837 | rc = qed_get_queue_coalesce(p_hwfn, coal, handle); |
| 2838 | if (rc) |
Colin Ian King | 9e4a561 | 2017-08-30 12:40:12 +0100 | [diff] [blame] | 2839 | DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); |
Rahul Verma | bf5a94b | 2017-07-26 06:07:14 -0700 | [diff] [blame] | 2840 | |
| 2841 | return rc; |
| 2842 | } |
| 2843 | |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2844 | static int qed_fp_cqe_completion(struct qed_dev *dev, |
Yuval Mintz | 1a635e4 | 2016-08-15 10:42:43 +0300 | [diff] [blame] | 2845 | u8 rss_id, struct eth_slow_path_rx_cqe *cqe) |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2846 | { |
| 2847 | return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns], |
| 2848 | cqe); |
| 2849 | } |
| 2850 | |
Yuval Mintz | 0b55e27 | 2016-05-11 16:36:15 +0300 | [diff] [blame] | 2851 | #ifdef CONFIG_QED_SRIOV |
| 2852 | extern const struct qed_iov_hv_ops qed_iov_ops_pass; |
| 2853 | #endif |
| 2854 | |
Sudarsana Reddy Kalluru | a1d8d8a | 2016-06-08 06:22:11 -0400 | [diff] [blame] | 2855 | #ifdef CONFIG_DCB |
| 2856 | extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; |
| 2857 | #endif |
| 2858 | |
Sudarsana Reddy Kalluru | c78c70f | 2017-02-15 10:24:10 +0200 | [diff] [blame] | 2859 | extern const struct qed_eth_ptp_ops qed_ptp_ops_pass; |
| 2860 | |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 2861 | static const struct qed_eth_ops qed_eth_ops_pass = { |
| 2862 | .common = &qed_common_ops_pass, |
Yuval Mintz | 0b55e27 | 2016-05-11 16:36:15 +0300 | [diff] [blame] | 2863 | #ifdef CONFIG_QED_SRIOV |
| 2864 | .iov = &qed_iov_ops_pass, |
| 2865 | #endif |
Sudarsana Reddy Kalluru | a1d8d8a | 2016-06-08 06:22:11 -0400 | [diff] [blame] | 2866 | #ifdef CONFIG_DCB |
| 2867 | .dcb = &qed_dcbnl_ops_pass, |
| 2868 | #endif |
Sudarsana Reddy Kalluru | c78c70f | 2017-02-15 10:24:10 +0200 | [diff] [blame] | 2869 | .ptp = &qed_ptp_ops_pass, |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 2870 | .fill_dev_info = &qed_fill_eth_dev_info, |
Yuval Mintz | cc875c2 | 2015-10-26 11:02:31 +0200 | [diff] [blame] | 2871 | .register_ops = &qed_register_eth_ops, |
Yuval Mintz | eff1696 | 2016-05-11 16:36:21 +0300 | [diff] [blame] | 2872 | .check_mac = &qed_check_mac, |
Manish Chopra | cee4d26 | 2015-10-26 11:02:28 +0200 | [diff] [blame] | 2873 | .vport_start = &qed_start_vport, |
| 2874 | .vport_stop = &qed_stop_vport, |
| 2875 | .vport_update = &qed_update_vport, |
| 2876 | .q_rx_start = &qed_start_rxq, |
| 2877 | .q_rx_stop = &qed_stop_rxq, |
| 2878 | .q_tx_start = &qed_start_txq, |
| 2879 | .q_tx_stop = &qed_stop_txq, |
| 2880 | .filter_config = &qed_configure_filter, |
| 2881 | .fastpath_stop = &qed_fastpath_stop, |
| 2882 | .eth_cqe_completion = &qed_fp_cqe_completion, |
Manish Chopra | 9df2ed0 | 2015-10-26 11:02:33 +0200 | [diff] [blame] | 2883 | .get_vport_stats = &qed_get_vport_stats, |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 2884 | .tunn_config = &qed_tunn_configure, |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 2885 | .ntuple_filter_config = &qed_ntuple_arfs_filter_config, |
| 2886 | .configure_arfs_searcher = &qed_configure_arfs_searcher, |
Rahul Verma | bf5a94b | 2017-07-26 06:07:14 -0700 | [diff] [blame] | 2887 | .get_coalesce = &qed_get_coalesce, |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 2888 | }; |
| 2889 | |
Rahul Verma | 9511434 | 2016-04-10 12:42:59 +0300 | [diff] [blame] | 2890 | const struct qed_eth_ops *qed_get_eth_ops(void) |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 2891 | { |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 2892 | return &qed_eth_ops_pass; |
| 2893 | } |
| 2894 | EXPORT_SYMBOL(qed_get_eth_ops); |
| 2895 | |
| 2896 | void qed_put_eth_ops(void) |
| 2897 | { |
| 2898 | /* TODO - reference count for module? */ |
| 2899 | } |
| 2900 | EXPORT_SYMBOL(qed_put_eth_ops); |