blob: bf8fca7d874f12b897ac36c159fbdb4905ec8de3 [file] [log] [blame]
Rajesh Borundia02feda12013-03-29 05:46:33 +00001/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include "qlcnic_sriov.h"
9#include "qlcnic.h"
Rajesh Borundiaf8468332013-03-29 05:46:34 +000010#include "qlcnic_83xx_hw.h"
Rajesh Borundia02feda12013-03-29 05:46:33 +000011#include <linux/types.h>
12
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +000013#define QLC_BC_COMMAND 0
14#define QLC_BC_RESPONSE 1
15
16#define QLC_MBOX_RESP_TIMEOUT (10 * HZ)
17#define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ)
18
19#define QLC_BC_MSG 0
20#define QLC_BC_CFREE 1
Rajesh Borundia97d81052013-04-19 07:01:09 +000021#define QLC_BC_FLR 2
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +000022#define QLC_BC_HDR_SZ 16
23#define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ)
24
Rajesh Borundia7cb03b22013-03-29 05:46:37 +000025#define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048
26#define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512
27
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +000028#define QLC_83XX_VF_RESET_FAIL_THRESH 8
29#define QLC_BC_CMD_MAX_RETRY_CNT 5
30
Rajesh Borundia91b72822013-04-19 07:01:12 +000031static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
32static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +000033static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
34static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
Rajesh Borundia97d81052013-04-19 07:01:09 +000035static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
Manish Choprae5c4e6c2013-08-02 00:57:40 -040036static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +000037 struct qlcnic_cmd_args *);
Sucheta Chakraborty1267ff92013-05-23 21:04:29 +000038static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +000039
Rajesh Borundiaf8468332013-03-29 05:46:34 +000040static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
41 .read_crb = qlcnic_83xx_read_crb,
42 .write_crb = qlcnic_83xx_write_crb,
43 .read_reg = qlcnic_83xx_rd_reg_indirect,
44 .write_reg = qlcnic_83xx_wrt_reg_indirect,
45 .get_mac_address = qlcnic_83xx_get_mac_address,
46 .setup_intr = qlcnic_83xx_setup_intr,
47 .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
Manish Choprae5c4e6c2013-08-02 00:57:40 -040048 .mbx_cmd = qlcnic_sriov_issue_cmd,
Rajesh Borundiaf8468332013-03-29 05:46:34 +000049 .get_func_no = qlcnic_83xx_get_func_no,
50 .api_lock = qlcnic_83xx_cam_lock,
51 .api_unlock = qlcnic_83xx_cam_unlock,
52 .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
53 .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
54 .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
Rajesh Borundia7cb03b22013-03-29 05:46:37 +000055 .del_rx_ctx = qlcnic_83xx_del_rx_ctx,
56 .del_tx_ctx = qlcnic_83xx_del_tx_ctx,
Rajesh Borundiaf8468332013-03-29 05:46:34 +000057 .setup_link_event = qlcnic_83xx_setup_link_event,
58 .get_nic_info = qlcnic_83xx_get_nic_info,
59 .get_pci_info = qlcnic_83xx_get_pci_info,
60 .set_nic_info = qlcnic_83xx_set_nic_info,
61 .change_macvlan = qlcnic_83xx_sre_macaddr_change,
62 .napi_enable = qlcnic_83xx_napi_enable,
63 .napi_disable = qlcnic_83xx_napi_disable,
64 .config_intr_coal = qlcnic_83xx_config_intr_coal,
65 .config_rss = qlcnic_83xx_config_rss,
66 .config_hw_lro = qlcnic_83xx_config_hw_lro,
67 .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
68 .change_l2_filter = qlcnic_83xx_change_l2_filter,
69 .get_board_info = qlcnic_83xx_get_port_info,
Rajesh Borundia91b72822013-04-19 07:01:12 +000070 .free_mac_list = qlcnic_sriov_vf_free_mac_list,
Rajesh Borundiaf8468332013-03-29 05:46:34 +000071};
72
73static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
74 .config_bridged_mode = qlcnic_config_bridged_mode,
75 .config_led = qlcnic_config_led,
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +000076 .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work,
Rajesh Borundiaf8468332013-03-29 05:46:34 +000077 .napi_add = qlcnic_83xx_napi_add,
78 .napi_del = qlcnic_83xx_napi_del,
Rajesh Borundia486a5bc2013-06-22 04:12:06 -040079 .shutdown = qlcnic_sriov_vf_shutdown,
80 .resume = qlcnic_sriov_vf_resume,
Rajesh Borundiaf8468332013-03-29 05:46:34 +000081 .config_ipaddr = qlcnic_83xx_config_ipaddr,
82 .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
83};
84
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +000085static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
86 {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
87 {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
Rajesh Borundia91b72822013-04-19 07:01:12 +000088 {QLCNIC_BC_CMD_GET_ACL, 3, 14},
89 {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +000090};
91
92static inline bool qlcnic_sriov_bc_msg_check(u32 val)
93{
94 return (val & (1 << QLC_BC_MSG)) ? true : false;
95}
96
97static inline bool qlcnic_sriov_channel_free_check(u32 val)
98{
99 return (val & (1 << QLC_BC_CFREE)) ? true : false;
100}
101
Rajesh Borundia97d81052013-04-19 07:01:09 +0000102static inline bool qlcnic_sriov_flr_check(u32 val)
103{
104 return (val & (1 << QLC_BC_FLR)) ? true : false;
105}
106
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000107static inline u8 qlcnic_sriov_target_func_id(u32 val)
108{
109 return (val >> 4) & 0xff;
110}
111
112static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
113{
114 struct pci_dev *dev = adapter->pdev;
115 int pos;
116 u16 stride, offset;
117
118 if (qlcnic_sriov_vf_check(adapter))
119 return 0;
120
121 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
122 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
123 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
124
125 return (dev->devfn + offset + stride * vf_id) & 0xff;
126}
127
Rajesh Borundia02feda12013-03-29 05:46:33 +0000128int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
129{
130 struct qlcnic_sriov *sriov;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000131 struct qlcnic_back_channel *bc;
132 struct workqueue_struct *wq;
133 struct qlcnic_vport *vp;
134 struct qlcnic_vf_info *vf;
135 int err, i;
Rajesh Borundia02feda12013-03-29 05:46:33 +0000136
137 if (!qlcnic_sriov_enable_check(adapter))
138 return -EIO;
139
140 sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
141 if (!sriov)
142 return -ENOMEM;
143
144 adapter->ahw->sriov = sriov;
145 sriov->num_vfs = num_vfs;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000146 bc = &sriov->bc;
147 sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
148 num_vfs, GFP_KERNEL);
149 if (!sriov->vf_info) {
150 err = -ENOMEM;
151 goto qlcnic_free_sriov;
152 }
153
154 wq = create_singlethread_workqueue("bc-trans");
155 if (wq == NULL) {
156 err = -ENOMEM;
157 dev_err(&adapter->pdev->dev,
158 "Cannot create bc-trans workqueue\n");
159 goto qlcnic_free_vf_info;
160 }
161
162 bc->bc_trans_wq = wq;
163
Rajesh Borundiae8b508e2013-03-29 05:46:38 +0000164 wq = create_singlethread_workqueue("async");
165 if (wq == NULL) {
166 err = -ENOMEM;
167 dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
168 goto qlcnic_destroy_trans_wq;
169 }
170
171 bc->bc_async_wq = wq;
172 INIT_LIST_HEAD(&bc->async_list);
173
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000174 for (i = 0; i < num_vfs; i++) {
175 vf = &sriov->vf_info[i];
176 vf->adapter = adapter;
177 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
178 mutex_init(&vf->send_cmd_lock);
Manish Chopra154d0c82013-12-17 09:01:53 -0500179 mutex_init(&vf->vlan_list_lock);
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000180 INIT_LIST_HEAD(&vf->rcv_act.wait_list);
181 INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
182 spin_lock_init(&vf->rcv_act.lock);
183 spin_lock_init(&vf->rcv_pend.lock);
184 init_completion(&vf->ch_free_cmpl);
185
Sucheta Chakraborty1267ff92013-05-23 21:04:29 +0000186 INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
187
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000188 if (qlcnic_sriov_pf_check(adapter)) {
189 vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
190 if (!vp) {
191 err = -ENOMEM;
Rajesh Borundiae8b508e2013-03-29 05:46:38 +0000192 goto qlcnic_destroy_async_wq;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000193 }
194 sriov->vf_info[i].vp = vp;
Rajesh Borundia4000e7a2013-04-19 07:01:11 +0000195 vp->max_tx_bw = MAX_BW;
Rajesh Borundiaa80be5a2013-05-23 21:04:25 +0000196 vp->spoofchk = true;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000197 random_ether_addr(vp->mac);
198 dev_info(&adapter->pdev->dev,
199 "MAC Address %pM is configured for VF %d\n",
200 vp->mac, i);
201 }
202 }
203
Rajesh Borundia02feda12013-03-29 05:46:33 +0000204 return 0;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000205
Rajesh Borundiae8b508e2013-03-29 05:46:38 +0000206qlcnic_destroy_async_wq:
207 destroy_workqueue(bc->bc_async_wq);
208
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000209qlcnic_destroy_trans_wq:
210 destroy_workqueue(bc->bc_trans_wq);
211
212qlcnic_free_vf_info:
213 kfree(sriov->vf_info);
214
215qlcnic_free_sriov:
216 kfree(adapter->ahw->sriov);
217 return err;
Rajesh Borundia02feda12013-03-29 05:46:33 +0000218}
219
Rajesh Borundia97d81052013-04-19 07:01:09 +0000220void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
221{
222 struct qlcnic_bc_trans *trans;
223 struct qlcnic_cmd_args cmd;
224 unsigned long flags;
225
226 spin_lock_irqsave(&t_list->lock, flags);
227
228 while (!list_empty(&t_list->wait_list)) {
229 trans = list_first_entry(&t_list->wait_list,
230 struct qlcnic_bc_trans, list);
231 list_del(&trans->list);
232 t_list->count--;
233 cmd.req.arg = (u32 *)trans->req_pay;
234 cmd.rsp.arg = (u32 *)trans->rsp_pay;
235 qlcnic_free_mbx_args(&cmd);
236 qlcnic_sriov_cleanup_transaction(trans);
237 }
238
239 spin_unlock_irqrestore(&t_list->lock, flags);
240}
241
Rajesh Borundia02feda12013-03-29 05:46:33 +0000242void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
243{
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000244 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
245 struct qlcnic_back_channel *bc = &sriov->bc;
Rajesh Borundia97d81052013-04-19 07:01:09 +0000246 struct qlcnic_vf_info *vf;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000247 int i;
248
Rajesh Borundia02feda12013-03-29 05:46:33 +0000249 if (!qlcnic_sriov_enable_check(adapter))
250 return;
251
Rajesh Borundiae8b508e2013-03-29 05:46:38 +0000252 qlcnic_sriov_cleanup_async_list(bc);
253 destroy_workqueue(bc->bc_async_wq);
Rajesh Borundia97d81052013-04-19 07:01:09 +0000254
255 for (i = 0; i < sriov->num_vfs; i++) {
256 vf = &sriov->vf_info[i];
257 qlcnic_sriov_cleanup_list(&vf->rcv_pend);
258 cancel_work_sync(&vf->trans_work);
259 qlcnic_sriov_cleanup_list(&vf->rcv_act);
260 }
261
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000262 destroy_workqueue(bc->bc_trans_wq);
263
264 for (i = 0; i < sriov->num_vfs; i++)
265 kfree(sriov->vf_info[i].vp);
266
267 kfree(sriov->vf_info);
Rajesh Borundia02feda12013-03-29 05:46:33 +0000268 kfree(adapter->ahw->sriov);
269}
270
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000271static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
272{
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000273 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
274 qlcnic_sriov_cfg_bc_intr(adapter, 0);
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000275 __qlcnic_sriov_cleanup(adapter);
276}
277
Rajesh Borundia02feda12013-03-29 05:46:33 +0000278void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
279{
Manish Chopra154d0c82013-12-17 09:01:53 -0500280 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
281
282 if (!sriov)
283 return;
284
285 qlcnic_sriov_free_vlans(adapter);
286
Rajesh Borundia02feda12013-03-29 05:46:33 +0000287 if (qlcnic_sriov_pf_check(adapter))
288 qlcnic_sriov_pf_cleanup(adapter);
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000289
290 if (qlcnic_sriov_vf_check(adapter))
291 qlcnic_sriov_vf_cleanup(adapter);
292}
293
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000294static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
295 u32 *pay, u8 pci_func, u8 size)
296{
297 struct qlcnic_hardware_context *ahw = adapter->ahw;
Manish Chopra068a8d12013-08-02 00:57:41 -0400298 struct qlcnic_mailbox *mbx = ahw->mailbox;
299 struct qlcnic_cmd_args cmd;
300 unsigned long timeout;
301 int err;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000302
Manish Chopra068a8d12013-08-02 00:57:41 -0400303 memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
304 cmd.hdr = hdr;
305 cmd.pay = pay;
306 cmd.pay_size = size;
307 cmd.func_num = pci_func;
308 cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
309 cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000310
Manish Chopra068a8d12013-08-02 00:57:41 -0400311 err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
312 if (err) {
313 dev_err(&adapter->pdev->dev,
314 "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
315 __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
316 ahw->op_mode);
317 return err;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000318 }
319
Manish Chopra068a8d12013-08-02 00:57:41 -0400320 if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
321 dev_err(&adapter->pdev->dev,
322 "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
323 __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
324 ahw->op_mode);
325 flush_workqueue(mbx->work_q);
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000326 }
327
Manish Chopra068a8d12013-08-02 00:57:41 -0400328 return cmd.rsp_opcode;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000329}
330
Rajesh Borundia7cb03b22013-03-29 05:46:37 +0000331static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
332{
333 adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
334 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
335 adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
336 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
337 adapter->num_txd = MAX_CMD_DESCRIPTORS;
338 adapter->max_rds_rings = MAX_RDS_RINGS;
339}
340
Rajesh Borundia4000e7a2013-04-19 07:01:11 +0000341int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
342 struct qlcnic_info *npar_info, u16 vport_id)
343{
344 struct device *dev = &adapter->pdev->dev;
345 struct qlcnic_cmd_args cmd;
346 int err;
347 u32 status;
348
349 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
350 if (err)
351 return err;
352
353 cmd.req.arg[1] = vport_id << 16 | 0x1;
354 err = qlcnic_issue_cmd(adapter, &cmd);
355 if (err) {
356 dev_err(&adapter->pdev->dev,
357 "Failed to get vport info, err=%d\n", err);
358 qlcnic_free_mbx_args(&cmd);
359 return err;
360 }
361
362 status = cmd.rsp.arg[2] & 0xffff;
363 if (status & BIT_0)
364 npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
365 if (status & BIT_1)
366 npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
367 if (status & BIT_2)
368 npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
369 if (status & BIT_3)
370 npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
371 if (status & BIT_4)
372 npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
373 if (status & BIT_5)
374 npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
375 if (status & BIT_6)
376 npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
377 if (status & BIT_7)
378 npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
379 if (status & BIT_8)
380 npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
381 if (status & BIT_9)
382 npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
383
384 npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
385 npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
386 npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
387 npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
388
389 dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
390 "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
391 "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
392 "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
393 "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
394 "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
395 npar_info->min_tx_bw, npar_info->max_tx_bw,
396 npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
397 npar_info->max_rx_mcast_mac_filters,
398 npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
399 npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
400 npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
401 npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
402 npar_info->max_remote_ipv6_addrs);
403
404 qlcnic_free_mbx_args(&cmd);
405 return err;
406}
407
Rajesh Borundia91b72822013-04-19 07:01:12 +0000408static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
Manish Chopra991ca262013-08-30 13:51:16 -0400409 struct qlcnic_cmd_args *cmd)
Rajesh Borundia91b72822013-04-19 07:01:12 +0000410{
Manish Chopra991ca262013-08-30 13:51:16 -0400411 adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
412 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
Rajesh Borundia91b72822013-04-19 07:01:12 +0000413 return 0;
414}
415
416static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
417 struct qlcnic_cmd_args *cmd)
418{
419 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
420 int i, num_vlans;
421 u16 *vlans;
422
423 if (sriov->allowed_vlans)
424 return 0;
425
426 sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
Manish Chopra154d0c82013-12-17 09:01:53 -0500427 sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
428 dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
429 sriov->num_allowed_vlans);
430
431 qlcnic_sriov_alloc_vlans(adapter);
432
Rajesh Borundia91b72822013-04-19 07:01:12 +0000433 if (!sriov->any_vlan)
434 return 0;
435
Rajesh Borundia91b72822013-04-19 07:01:12 +0000436 num_vlans = sriov->num_allowed_vlans;
437 sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
438 if (!sriov->allowed_vlans)
439 return -ENOMEM;
440
441 vlans = (u16 *)&cmd->rsp.arg[3];
442 for (i = 0; i < num_vlans; i++)
443 sriov->allowed_vlans[i] = vlans[i];
444
445 return 0;
446}
447
Manish Chopra58945e12013-08-16 19:07:14 -0400448static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
449 struct qlcnic_info *info)
Rajesh Borundia91b72822013-04-19 07:01:12 +0000450{
451 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
452 struct qlcnic_cmd_args cmd;
Manish Chopra991ca262013-08-30 13:51:16 -0400453 int ret = 0;
Rajesh Borundia91b72822013-04-19 07:01:12 +0000454
455 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
456 if (ret)
457 return ret;
458
459 ret = qlcnic_issue_cmd(adapter, &cmd);
460 if (ret) {
461 dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
462 ret);
463 } else {
464 sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
465 switch (sriov->vlan_mode) {
466 case QLC_GUEST_VLAN_MODE:
467 ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
468 break;
469 case QLC_PVID_MODE:
Manish Chopra991ca262013-08-30 13:51:16 -0400470 ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
Rajesh Borundia91b72822013-04-19 07:01:12 +0000471 break;
472 }
473 }
474
475 qlcnic_free_mbx_args(&cmd);
476 return ret;
477}
478
Rajesh Borundia7cb03b22013-03-29 05:46:37 +0000479static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
480{
Rajesh Borundia7cb03b22013-03-29 05:46:37 +0000481 struct qlcnic_hardware_context *ahw = adapter->ahw;
Manish Chopra068a8d12013-08-02 00:57:41 -0400482 struct qlcnic_info nic_info;
Rajesh Borundia7cb03b22013-03-29 05:46:37 +0000483 int err;
484
Rajesh Borundia4000e7a2013-04-19 07:01:11 +0000485 err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
486 if (err)
487 return err;
488
Manish Chopra154d0c82013-12-17 09:01:53 -0500489 ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
490
Rajesh Borundia7cb03b22013-03-29 05:46:37 +0000491 err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
492 if (err)
493 return -EIO;
494
Manish Chopra58945e12013-08-16 19:07:14 -0400495 err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
Rajesh Borundia91b72822013-04-19 07:01:12 +0000496 if (err)
497 return err;
498
Rajesh Borundia7cb03b22013-03-29 05:46:37 +0000499 if (qlcnic_83xx_get_port_info(adapter))
500 return -EIO;
501
502 qlcnic_sriov_vf_cfg_buff_desc(adapter);
503 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
504 dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
505 adapter->ahw->fw_hal_version);
506
507 ahw->physical_port = (u8) nic_info.phys_port;
508 ahw->switch_mode = nic_info.switch_mode;
509 ahw->max_mtu = nic_info.max_mtu;
510 ahw->op_mode = nic_info.op_mode;
511 ahw->capabilities = nic_info.capabilities;
512 return 0;
513}
514
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000515static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
516 int pci_using_dac)
517{
518 int err;
519
Rajesh Borundiae8b508e2013-03-29 05:46:38 +0000520 INIT_LIST_HEAD(&adapter->vf_mc_list);
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000521 if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
522 dev_warn(&adapter->pdev->dev,
Pratik Pujar01b91f42013-08-02 23:15:57 -0400523 "Device does not support MSI interrupts\n");
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000524
Himanshu Madhani34e8c402013-11-04 13:31:31 -0500525 /* compute and set default and max tx/sds rings */
526 qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
527 qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
528
529 err = qlcnic_setup_intr(adapter);
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000530 if (err) {
531 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
532 goto err_out_disable_msi;
533 }
534
535 err = qlcnic_83xx_setup_mbx_intr(adapter);
536 if (err)
537 goto err_out_disable_msi;
538
539 err = qlcnic_sriov_init(adapter, 1);
540 if (err)
541 goto err_out_disable_mbx_intr;
542
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000543 err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000544 if (err)
545 goto err_out_cleanup_sriov;
546
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000547 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
548 if (err)
549 goto err_out_disable_bc_intr;
550
Rajesh Borundia7cb03b22013-03-29 05:46:37 +0000551 err = qlcnic_sriov_vf_init_driver(adapter);
552 if (err)
553 goto err_out_send_channel_term;
554
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000555 err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
556 if (err)
557 goto err_out_send_channel_term;
558
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000559 pci_set_drvdata(adapter->pdev, adapter);
560 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
561 adapter->netdev->name);
Sucheta Chakraborty14d385b2013-08-23 13:38:25 -0400562
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +0000563 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
564 adapter->ahw->idc.delay);
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000565 return 0;
566
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000567err_out_send_channel_term:
568 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
569
570err_out_disable_bc_intr:
571 qlcnic_sriov_cfg_bc_intr(adapter, 0);
572
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000573err_out_cleanup_sriov:
574 __qlcnic_sriov_cleanup(adapter);
575
576err_out_disable_mbx_intr:
577 qlcnic_83xx_free_mbx_intr(adapter);
578
579err_out_disable_msi:
580 qlcnic_teardown_intr(adapter);
581 return err;
582}
583
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +0000584static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
585{
586 u32 state;
587
588 do {
589 msleep(20);
590 if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
591 return -EIO;
592 state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
593 } while (state != QLC_83XX_IDC_DEV_READY);
594
595 return 0;
596}
597
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000598int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
599{
600 struct qlcnic_hardware_context *ahw = adapter->ahw;
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +0000601 int err;
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000602
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +0000603 set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
604 ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
605 ahw->reset_context = 0;
606 adapter->fw_fail_cnt = 0;
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000607 ahw->msix_supported = 1;
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +0000608 adapter->need_fw_reset = 0;
Rajesh Borundiada6c8062013-03-29 05:46:35 +0000609 adapter->flags |= QLCNIC_TX_INTR_SHARED;
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000610
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +0000611 err = qlcnic_sriov_check_dev_ready(adapter);
612 if (err)
613 return err;
614
615 err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
616 if (err)
617 return err;
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000618
619 if (qlcnic_read_mac_addr(adapter))
620 dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
621
Sucheta Chakraborty1267ff92013-05-23 21:04:29 +0000622 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
623
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000624 clear_bit(__QLCNIC_RESETTING, &adapter->state);
Rajesh Borundiaf8468332013-03-29 05:46:34 +0000625 return 0;
626}
627
628void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
629{
630 struct qlcnic_hardware_context *ahw = adapter->ahw;
631
632 ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
633 dev_info(&adapter->pdev->dev,
634 "HAL Version: %d Non Privileged SRIOV function\n",
635 ahw->fw_hal_version);
636 adapter->nic_ops = &qlcnic_sriov_vf_ops;
637 set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
638 return;
639}
640
641void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
642{
643 ahw->hw_ops = &qlcnic_sriov_vf_hw_ops;
644 ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl;
645 ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl;
Rajesh Borundia02feda12013-03-29 05:46:33 +0000646}
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000647
648static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
649{
650 u32 pay_size;
651
652 pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
653
654 if (pay_size)
655 pay_size = QLC_BC_PAYLOAD_SZ;
656 else
657 pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
658
659 return pay_size;
660}
661
662int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
663{
664 struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
665 u8 i;
666
667 if (qlcnic_sriov_vf_check(adapter))
668 return 0;
669
670 for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
671 if (vf_info[i].pci_func == pci_func)
672 return i;
673 }
674
675 return -EINVAL;
676}
677
678static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
679{
680 *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
681 if (!*trans)
682 return -ENOMEM;
683
684 init_completion(&(*trans)->resp_cmpl);
685 return 0;
686}
687
688static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
689 u32 size)
690{
691 *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
692 if (!*hdr)
693 return -ENOMEM;
694
695 return 0;
696}
697
698static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
699{
700 const struct qlcnic_mailbox_metadata *mbx_tbl;
701 int i, size;
702
703 mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
704 size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
705
706 for (i = 0; i < size; i++) {
707 if (type == mbx_tbl[i].cmd) {
708 mbx->op_type = QLC_BC_CMD;
709 mbx->req.num = mbx_tbl[i].in_args;
710 mbx->rsp.num = mbx_tbl[i].out_args;
711 mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
712 GFP_ATOMIC);
713 if (!mbx->req.arg)
714 return -ENOMEM;
715 mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
716 GFP_ATOMIC);
717 if (!mbx->rsp.arg) {
718 kfree(mbx->req.arg);
719 mbx->req.arg = NULL;
720 return -ENOMEM;
721 }
722 memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
723 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
724 mbx->req.arg[0] = (type | (mbx->req.num << 16) |
725 (3 << 29));
Pratik Pujar62262042013-07-26 16:24:01 -0400726 mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000727 return 0;
728 }
729 }
730 return -EINVAL;
731}
732
733static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
734 struct qlcnic_cmd_args *cmd,
735 u16 seq, u8 msg_type)
736{
737 struct qlcnic_bc_hdr *hdr;
738 int i;
739 u32 num_regs, bc_pay_sz;
740 u16 remainder;
741 u8 cmd_op, num_frags, t_num_frags;
742
743 bc_pay_sz = QLC_BC_PAYLOAD_SZ;
744 if (msg_type == QLC_BC_COMMAND) {
745 trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
746 trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
747 num_regs = cmd->req.num;
748 trans->req_pay_size = (num_regs * 4);
749 num_regs = cmd->rsp.num;
750 trans->rsp_pay_size = (num_regs * 4);
751 cmd_op = cmd->req.arg[0] & 0xff;
752 remainder = (trans->req_pay_size) % (bc_pay_sz);
753 num_frags = (trans->req_pay_size) / (bc_pay_sz);
754 if (remainder)
755 num_frags++;
756 t_num_frags = num_frags;
757 if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
758 return -ENOMEM;
759 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
760 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
761 if (remainder)
762 num_frags++;
763 if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
764 return -ENOMEM;
765 num_frags = t_num_frags;
766 hdr = trans->req_hdr;
767 } else {
768 cmd->req.arg = (u32 *)trans->req_pay;
769 cmd->rsp.arg = (u32 *)trans->rsp_pay;
770 cmd_op = cmd->req.arg[0] & 0xff;
771 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
772 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
773 if (remainder)
774 num_frags++;
775 cmd->req.num = trans->req_pay_size / 4;
776 cmd->rsp.num = trans->rsp_pay_size / 4;
777 hdr = trans->rsp_hdr;
Pratik Pujar62262042013-07-26 16:24:01 -0400778 cmd->op_type = trans->req_hdr->op_type;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000779 }
780
781 trans->trans_id = seq;
782 trans->cmd_id = cmd_op;
783 for (i = 0; i < num_frags; i++) {
784 hdr[i].version = 2;
785 hdr[i].msg_type = msg_type;
786 hdr[i].op_type = cmd->op_type;
787 hdr[i].num_cmds = 1;
788 hdr[i].num_frags = num_frags;
789 hdr[i].frag_num = i + 1;
790 hdr[i].cmd_op = cmd_op;
791 hdr[i].seq_id = seq;
792 }
793 return 0;
794}
795
796static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
797{
798 if (!trans)
799 return;
800 kfree(trans->req_hdr);
801 kfree(trans->rsp_hdr);
802 kfree(trans);
803}
804
805static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
806 struct qlcnic_bc_trans *trans, u8 type)
807{
808 struct qlcnic_trans_list *t_list;
809 unsigned long flags;
810 int ret = 0;
811
812 if (type == QLC_BC_RESPONSE) {
813 t_list = &vf->rcv_act;
814 spin_lock_irqsave(&t_list->lock, flags);
815 t_list->count--;
816 list_del(&trans->list);
817 if (t_list->count > 0)
818 ret = 1;
819 spin_unlock_irqrestore(&t_list->lock, flags);
820 }
821 if (type == QLC_BC_COMMAND) {
822 while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
823 msleep(100);
824 vf->send_cmd = NULL;
825 clear_bit(QLC_BC_VF_SEND, &vf->state);
826 }
827 return ret;
828}
829
830static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
831 struct qlcnic_vf_info *vf,
832 work_func_t func)
833{
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +0000834 if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
835 vf->adapter->need_fw_reset)
Rajesh Borundia97d81052013-04-19 07:01:09 +0000836 return;
837
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000838 queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
839}
840
841static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
842{
843 struct completion *cmpl = &trans->resp_cmpl;
844
845 if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
846 trans->trans_state = QLC_END;
847 else
848 trans->trans_state = QLC_ABORT;
849
850 return;
851}
852
853static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
854 u8 type)
855{
856 if (type == QLC_BC_RESPONSE) {
857 trans->curr_rsp_frag++;
858 if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
859 trans->trans_state = QLC_INIT;
860 else
861 trans->trans_state = QLC_END;
862 } else {
863 trans->curr_req_frag++;
864 if (trans->curr_req_frag < trans->req_hdr->num_frags)
865 trans->trans_state = QLC_INIT;
866 else
867 trans->trans_state = QLC_WAIT_FOR_RESP;
868 }
869}
870
871static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
872 u8 type)
873{
874 struct qlcnic_vf_info *vf = trans->vf;
875 struct completion *cmpl = &vf->ch_free_cmpl;
876
877 if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
878 trans->trans_state = QLC_ABORT;
879 return;
880 }
881
882 clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
883 qlcnic_sriov_handle_multi_frags(trans, type);
884}
885
886static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
887 u32 *hdr, u32 *pay, u32 size)
888{
889 struct qlcnic_hardware_context *ahw = adapter->ahw;
890 u32 fw_mbx;
891 u8 i, max = 2, hdr_size, j;
892
893 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
894 max = (size / sizeof(u32)) + hdr_size;
895
896 fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
897 for (i = 2, j = 0; j < hdr_size; i++, j++)
898 *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
899 for (; j < max; i++, j++)
900 *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
901}
902
903static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
904{
905 int ret = -EBUSY;
906 u32 timeout = 10000;
907
908 do {
909 if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
910 ret = 0;
911 break;
912 }
913 mdelay(1);
914 } while (--timeout);
915
916 return ret;
917}
918
919static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
920{
921 struct qlcnic_vf_info *vf = trans->vf;
922 u32 pay_size, hdr_size;
923 u32 *hdr, *pay;
924 int ret;
925 u8 pci_func = trans->func_id;
926
927 if (__qlcnic_sriov_issue_bc_post(vf))
928 return -EBUSY;
929
930 if (type == QLC_BC_COMMAND) {
931 hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
932 pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
933 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
934 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
935 trans->curr_req_frag);
936 pay_size = (pay_size / sizeof(u32));
937 } else {
938 hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
939 pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
940 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
941 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
942 trans->curr_rsp_frag);
943 pay_size = (pay_size / sizeof(u32));
944 }
945
946 ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
947 pci_func, pay_size);
948 return ret;
949}
950
951static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
952 struct qlcnic_vf_info *vf, u8 type)
953{
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000954 bool flag = true;
Rajesh Borundia97d81052013-04-19 07:01:09 +0000955 int err = -EIO;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000956
957 while (flag) {
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +0000958 if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
959 vf->adapter->need_fw_reset)
Rajesh Borundia97d81052013-04-19 07:01:09 +0000960 trans->trans_state = QLC_ABORT;
961
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +0000962 switch (trans->trans_state) {
963 case QLC_INIT:
964 trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
965 if (qlcnic_sriov_issue_bc_post(trans, type))
966 trans->trans_state = QLC_ABORT;
967 break;
968 case QLC_WAIT_FOR_CHANNEL_FREE:
969 qlcnic_sriov_wait_for_channel_free(trans, type);
970 break;
971 case QLC_WAIT_FOR_RESP:
972 qlcnic_sriov_wait_for_resp(trans);
973 break;
974 case QLC_END:
975 err = 0;
976 flag = false;
977 break;
978 case QLC_ABORT:
979 err = -EIO;
980 flag = false;
981 clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
982 break;
983 default:
984 err = -EIO;
985 flag = false;
986 }
987 }
988 return err;
989}
990
991static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
992 struct qlcnic_bc_trans *trans, int pci_func)
993{
994 struct qlcnic_vf_info *vf;
995 int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
996
997 if (index < 0)
998 return -EIO;
999
1000 vf = &adapter->ahw->sriov->vf_info[index];
1001 trans->vf = vf;
1002 trans->func_id = pci_func;
1003
1004 if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
1005 if (qlcnic_sriov_pf_check(adapter))
1006 return -EIO;
1007 if (qlcnic_sriov_vf_check(adapter) &&
1008 trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
1009 return -EIO;
1010 }
1011
1012 mutex_lock(&vf->send_cmd_lock);
1013 vf->send_cmd = trans;
1014 err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
1015 qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
1016 mutex_unlock(&vf->send_cmd_lock);
1017 return err;
1018}
1019
1020static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
1021 struct qlcnic_bc_trans *trans,
1022 struct qlcnic_cmd_args *cmd)
1023{
1024#ifdef CONFIG_QLCNIC_SRIOV
1025 if (qlcnic_sriov_pf_check(adapter)) {
1026 qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
1027 return;
1028 }
1029#endif
1030 cmd->rsp.arg[0] |= (0x9 << 25);
1031 return;
1032}
1033
1034static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
1035{
1036 struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
1037 trans_work);
1038 struct qlcnic_bc_trans *trans = NULL;
1039 struct qlcnic_adapter *adapter = vf->adapter;
1040 struct qlcnic_cmd_args cmd;
1041 u8 req;
1042
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001043 if (adapter->need_fw_reset)
1044 return;
1045
Rajesh Borundia97d81052013-04-19 07:01:09 +00001046 if (test_bit(QLC_BC_VF_FLR, &vf->state))
1047 return;
1048
Manish Choprae5c4e6c2013-08-02 00:57:40 -04001049 memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001050 trans = list_first_entry(&vf->rcv_act.wait_list,
1051 struct qlcnic_bc_trans, list);
1052 adapter = vf->adapter;
1053
1054 if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
1055 QLC_BC_RESPONSE))
1056 goto cleanup_trans;
1057
1058 __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
1059 trans->trans_state = QLC_INIT;
1060 __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
1061
1062cleanup_trans:
1063 qlcnic_free_mbx_args(&cmd);
1064 req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
1065 qlcnic_sriov_cleanup_transaction(trans);
1066 if (req)
1067 qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
1068 qlcnic_sriov_process_bc_cmd);
1069}
1070
1071static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
1072 struct qlcnic_vf_info *vf)
1073{
1074 struct qlcnic_bc_trans *trans;
1075 u32 pay_size;
1076
1077 if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
1078 return;
1079
1080 trans = vf->send_cmd;
1081
1082 if (trans == NULL)
1083 goto clear_send;
1084
1085 if (trans->trans_id != hdr->seq_id)
1086 goto clear_send;
1087
1088 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
1089 trans->curr_rsp_frag);
1090 qlcnic_sriov_pull_bc_msg(vf->adapter,
1091 (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
1092 (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
1093 pay_size);
1094 if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
1095 goto clear_send;
1096
1097 complete(&trans->resp_cmpl);
1098
1099clear_send:
1100 clear_bit(QLC_BC_VF_SEND, &vf->state);
1101}
1102
Rajesh Borundia97d81052013-04-19 07:01:09 +00001103int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1104 struct qlcnic_vf_info *vf,
1105 struct qlcnic_bc_trans *trans)
1106{
1107 struct qlcnic_trans_list *t_list = &vf->rcv_act;
1108
1109 t_list->count++;
1110 list_add_tail(&trans->list, &t_list->wait_list);
1111 if (t_list->count == 1)
1112 qlcnic_sriov_schedule_bc_cmd(sriov, vf,
1113 qlcnic_sriov_process_bc_cmd);
1114 return 0;
1115}
1116
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001117static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1118 struct qlcnic_vf_info *vf,
1119 struct qlcnic_bc_trans *trans)
1120{
1121 struct qlcnic_trans_list *t_list = &vf->rcv_act;
1122
1123 spin_lock(&t_list->lock);
Rajesh Borundia97d81052013-04-19 07:01:09 +00001124
1125 __qlcnic_sriov_add_act_list(sriov, vf, trans);
1126
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001127 spin_unlock(&t_list->lock);
1128 return 0;
1129}
1130
1131static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
1132 struct qlcnic_vf_info *vf,
1133 struct qlcnic_bc_hdr *hdr)
1134{
1135 struct qlcnic_bc_trans *trans = NULL;
1136 struct list_head *node;
1137 u32 pay_size, curr_frag;
1138 u8 found = 0, active = 0;
1139
1140 spin_lock(&vf->rcv_pend.lock);
1141 if (vf->rcv_pend.count > 0) {
1142 list_for_each(node, &vf->rcv_pend.wait_list) {
1143 trans = list_entry(node, struct qlcnic_bc_trans, list);
1144 if (trans->trans_id == hdr->seq_id) {
1145 found = 1;
1146 break;
1147 }
1148 }
1149 }
1150
1151 if (found) {
1152 curr_frag = trans->curr_req_frag;
1153 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1154 curr_frag);
1155 qlcnic_sriov_pull_bc_msg(vf->adapter,
1156 (u32 *)(trans->req_hdr + curr_frag),
1157 (u32 *)(trans->req_pay + curr_frag),
1158 pay_size);
1159 trans->curr_req_frag++;
1160 if (trans->curr_req_frag >= hdr->num_frags) {
1161 vf->rcv_pend.count--;
1162 list_del(&trans->list);
1163 active = 1;
1164 }
1165 }
1166 spin_unlock(&vf->rcv_pend.lock);
1167
1168 if (active)
1169 if (qlcnic_sriov_add_act_list(sriov, vf, trans))
1170 qlcnic_sriov_cleanup_transaction(trans);
1171
1172 return;
1173}
1174
1175static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
1176 struct qlcnic_bc_hdr *hdr,
1177 struct qlcnic_vf_info *vf)
1178{
1179 struct qlcnic_bc_trans *trans;
1180 struct qlcnic_adapter *adapter = vf->adapter;
1181 struct qlcnic_cmd_args cmd;
1182 u32 pay_size;
1183 int err;
1184 u8 cmd_op;
1185
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001186 if (adapter->need_fw_reset)
1187 return;
1188
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001189 if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
1190 hdr->op_type != QLC_BC_CMD &&
1191 hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
1192 return;
1193
1194 if (hdr->frag_num > 1) {
1195 qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
1196 return;
1197 }
1198
Manish Choprae5c4e6c2013-08-02 00:57:40 -04001199 memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001200 cmd_op = hdr->cmd_op;
1201 if (qlcnic_sriov_alloc_bc_trans(&trans))
1202 return;
1203
1204 if (hdr->op_type == QLC_BC_CMD)
1205 err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
1206 else
1207 err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
1208
1209 if (err) {
1210 qlcnic_sriov_cleanup_transaction(trans);
1211 return;
1212 }
1213
1214 cmd.op_type = hdr->op_type;
1215 if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
1216 QLC_BC_COMMAND)) {
1217 qlcnic_free_mbx_args(&cmd);
1218 qlcnic_sriov_cleanup_transaction(trans);
1219 return;
1220 }
1221
1222 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1223 trans->curr_req_frag);
1224 qlcnic_sriov_pull_bc_msg(vf->adapter,
1225 (u32 *)(trans->req_hdr + trans->curr_req_frag),
1226 (u32 *)(trans->req_pay + trans->curr_req_frag),
1227 pay_size);
1228 trans->func_id = vf->pci_func;
1229 trans->vf = vf;
1230 trans->trans_id = hdr->seq_id;
1231 trans->curr_req_frag++;
Rajesh Borundia97d81052013-04-19 07:01:09 +00001232
1233 if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
1234 return;
1235
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001236 if (trans->curr_req_frag == trans->req_hdr->num_frags) {
1237 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
1238 qlcnic_free_mbx_args(&cmd);
1239 qlcnic_sriov_cleanup_transaction(trans);
1240 }
1241 } else {
1242 spin_lock(&vf->rcv_pend.lock);
1243 list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
1244 vf->rcv_pend.count++;
1245 spin_unlock(&vf->rcv_pend.lock);
1246 }
1247}
1248
1249static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
1250 struct qlcnic_vf_info *vf)
1251{
1252 struct qlcnic_bc_hdr hdr;
1253 u32 *ptr = (u32 *)&hdr;
1254 u8 msg_type, i;
1255
1256 for (i = 2; i < 6; i++)
1257 ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
1258 msg_type = hdr.msg_type;
1259
1260 switch (msg_type) {
1261 case QLC_BC_COMMAND:
1262 qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
1263 break;
1264 case QLC_BC_RESPONSE:
1265 qlcnic_sriov_handle_bc_resp(&hdr, vf);
1266 break;
1267 }
1268}
1269
Rajesh Borundia97d81052013-04-19 07:01:09 +00001270static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
1271 struct qlcnic_vf_info *vf)
1272{
1273 struct qlcnic_adapter *adapter = vf->adapter;
1274
1275 if (qlcnic_sriov_pf_check(adapter))
1276 qlcnic_sriov_pf_handle_flr(sriov, vf);
1277 else
1278 dev_err(&adapter->pdev->dev,
1279 "Invalid event to VF. VF should not get FLR event\n");
1280}
1281
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001282void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
1283{
1284 struct qlcnic_vf_info *vf;
1285 struct qlcnic_sriov *sriov;
1286 int index;
1287 u8 pci_func;
1288
1289 sriov = adapter->ahw->sriov;
1290 pci_func = qlcnic_sriov_target_func_id(event);
1291 index = qlcnic_sriov_func_to_index(adapter, pci_func);
1292
1293 if (index < 0)
1294 return;
1295
1296 vf = &sriov->vf_info[index];
1297 vf->pci_func = pci_func;
1298
1299 if (qlcnic_sriov_channel_free_check(event))
1300 complete(&vf->ch_free_cmpl);
1301
Rajesh Borundia97d81052013-04-19 07:01:09 +00001302 if (qlcnic_sriov_flr_check(event)) {
1303 qlcnic_sriov_handle_flr_event(sriov, vf);
1304 return;
1305 }
1306
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001307 if (qlcnic_sriov_bc_msg_check(event))
1308 qlcnic_sriov_handle_msg_event(sriov, vf);
1309}
1310
1311int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
1312{
1313 struct qlcnic_cmd_args cmd;
1314 int err;
1315
1316 if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
1317 return 0;
1318
1319 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
1320 return -ENOMEM;
1321
1322 if (enable)
1323 cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1324
Manish Choprae5c4e6c2013-08-02 00:57:40 -04001325 err = qlcnic_83xx_issue_cmd(adapter, &cmd);
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001326
1327 if (err != QLCNIC_RCODE_SUCCESS) {
1328 dev_err(&adapter->pdev->dev,
1329 "Failed to %s bc events, err=%d\n",
1330 (enable ? "enable" : "disable"), err);
1331 }
1332
1333 qlcnic_free_mbx_args(&cmd);
1334 return err;
1335}
1336
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001337static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
1338 struct qlcnic_bc_trans *trans)
1339{
1340 u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
1341 u32 state;
1342
1343 state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1344 if (state == QLC_83XX_IDC_DEV_READY) {
1345 msleep(20);
1346 clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
1347 trans->trans_state = QLC_INIT;
1348 if (++adapter->fw_fail_cnt > max)
1349 return -EIO;
1350 else
1351 return 0;
1352 }
1353
1354 return -EIO;
1355}
1356
Manish Choprae5c4e6c2013-08-02 00:57:40 -04001357static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001358 struct qlcnic_cmd_args *cmd)
1359{
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001360 struct qlcnic_hardware_context *ahw = adapter->ahw;
Manish Chopra068a8d12013-08-02 00:57:41 -04001361 struct qlcnic_mailbox *mbx = ahw->mailbox;
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001362 struct device *dev = &adapter->pdev->dev;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001363 struct qlcnic_bc_trans *trans;
1364 int err;
1365 u32 rsp_data, opcode, mbx_err_code, rsp;
1366 u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001367 u8 func = ahw->pci_func;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001368
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001369 rsp = qlcnic_sriov_alloc_bc_trans(&trans);
1370 if (rsp)
1371 return rsp;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001372
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001373 rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
1374 if (rsp)
1375 goto cleanup_transaction;
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001376
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001377retry:
Manish Chopra068a8d12013-08-02 00:57:41 -04001378 if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001379 rsp = -EIO;
1380 QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001381 QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001382 goto err_out;
1383 }
1384
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001385 err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001386 if (err) {
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001387 dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
1388 (cmd->req.arg[0] & 0xffff), func);
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001389 rsp = QLCNIC_RCODE_TIMEOUT;
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001390
1391 /* After adapter reset PF driver may take some time to
1392 * respond to VF's request. Retry request till maximum retries.
1393 */
1394 if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
1395 !qlcnic_sriov_retry_bc_cmd(adapter, trans))
1396 goto retry;
1397
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001398 goto err_out;
1399 }
1400
1401 rsp_data = cmd->rsp.arg[0];
1402 mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
1403 opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
1404
1405 if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
1406 (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
1407 rsp = QLCNIC_RCODE_SUCCESS;
1408 } else {
1409 rsp = mbx_err_code;
1410 if (!rsp)
1411 rsp = 1;
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001412 dev_err(dev,
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001413 "MBX command 0x%x failed with err:0x%x for VF %d\n",
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001414 opcode, mbx_err_code, func);
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001415 }
1416
1417err_out:
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001418 if (rsp == QLCNIC_RCODE_TIMEOUT) {
1419 ahw->reset_context = 1;
1420 adapter->need_fw_reset = 1;
Manish Chopra068a8d12013-08-02 00:57:41 -04001421 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001422 }
1423
1424cleanup_transaction:
Rajesh Borundiaf197a7a2013-03-29 05:46:36 +00001425 qlcnic_sriov_cleanup_transaction(trans);
1426 return rsp;
1427}
1428
1429int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
1430{
1431 struct qlcnic_cmd_args cmd;
1432 struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
1433 int ret;
1434
1435 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
1436 return -ENOMEM;
1437
1438 ret = qlcnic_issue_cmd(adapter, &cmd);
1439 if (ret) {
1440 dev_err(&adapter->pdev->dev,
1441 "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
1442 ret);
1443 goto out;
1444 }
1445
1446 cmd_op = (cmd.rsp.arg[0] & 0xff);
1447 if (cmd.rsp.arg[0] >> 25 == 2)
1448 return 2;
1449 if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
1450 set_bit(QLC_BC_VF_STATE, &vf->state);
1451 else
1452 clear_bit(QLC_BC_VF_STATE, &vf->state);
1453
1454out:
1455 qlcnic_free_mbx_args(&cmd);
1456 return ret;
1457}
Rajesh Borundiae8b508e2013-03-29 05:46:38 +00001458
Manish Chopra154d0c82013-12-17 09:01:53 -05001459static void qlcnic_vf_add_mc_list(struct net_device *netdev)
Rajesh Borundiae8b508e2013-03-29 05:46:38 +00001460{
1461 struct qlcnic_adapter *adapter = netdev_priv(netdev);
Manish Chopra154d0c82013-12-17 09:01:53 -05001462 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1463 struct qlcnic_mac_vlan_list *cur;
Rajesh Borundiae8b508e2013-03-29 05:46:38 +00001464 struct list_head *head, tmp_list;
Manish Chopra154d0c82013-12-17 09:01:53 -05001465 struct qlcnic_vf_info *vf;
1466 u16 vlan_id;
1467 int i;
Rajesh Borundiae8b508e2013-03-29 05:46:38 +00001468
Manish Chopra154d0c82013-12-17 09:01:53 -05001469 static const u8 bcast_addr[ETH_ALEN] = {
1470 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1471 };
1472
1473 vf = &adapter->ahw->sriov->vf_info[0];
Rajesh Borundiae8b508e2013-03-29 05:46:38 +00001474 INIT_LIST_HEAD(&tmp_list);
1475 head = &adapter->vf_mc_list;
1476 netif_addr_lock_bh(netdev);
1477
1478 while (!list_empty(head)) {
Manish Chopra154d0c82013-12-17 09:01:53 -05001479 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
Rajesh Borundiae8b508e2013-03-29 05:46:38 +00001480 list_move(&cur->list, &tmp_list);
1481 }
1482
1483 netif_addr_unlock_bh(netdev);
1484
1485 while (!list_empty(&tmp_list)) {
1486 cur = list_entry((&tmp_list)->next,
Manish Chopra154d0c82013-12-17 09:01:53 -05001487 struct qlcnic_mac_vlan_list, list);
1488 if (!qlcnic_sriov_check_any_vlan(vf)) {
1489 qlcnic_nic_add_mac(adapter, bcast_addr, 0);
1490 qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
1491 } else {
1492 mutex_lock(&vf->vlan_list_lock);
1493 for (i = 0; i < sriov->num_allowed_vlans; i++) {
1494 vlan_id = vf->sriov_vlans[i];
1495 if (vlan_id) {
1496 qlcnic_nic_add_mac(adapter, bcast_addr,
1497 vlan_id);
1498 qlcnic_nic_add_mac(adapter,
1499 cur->mac_addr,
1500 vlan_id);
1501 }
1502 }
1503 mutex_unlock(&vf->vlan_list_lock);
1504 if (qlcnic_84xx_check(adapter)) {
1505 qlcnic_nic_add_mac(adapter, bcast_addr, 0);
1506 qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
1507 }
1508 }
Rajesh Borundiae8b508e2013-03-29 05:46:38 +00001509 list_del(&cur->list);
1510 kfree(cur);
1511 }
1512}
1513
1514void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1515{
1516 struct list_head *head = &bc->async_list;
1517 struct qlcnic_async_work_list *entry;
1518
1519 while (!list_empty(head)) {
1520 entry = list_entry(head->next, struct qlcnic_async_work_list,
1521 list);
1522 cancel_work_sync(&entry->work);
1523 list_del(&entry->list);
1524 kfree(entry);
1525 }
1526}
1527
1528static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1529{
1530 struct qlcnic_adapter *adapter = netdev_priv(netdev);
Manish Chopra154d0c82013-12-17 09:01:53 -05001531 struct qlcnic_hardware_context *ahw = adapter->ahw;
1532 u32 mode = VPORT_MISS_MODE_DROP;
Rajesh Borundiae8b508e2013-03-29 05:46:38 +00001533
1534 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
1535 return;
1536
Manish Chopra154d0c82013-12-17 09:01:53 -05001537 if (netdev->flags & IFF_PROMISC) {
1538 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
1539 mode = VPORT_MISS_MODE_ACCEPT_ALL;
1540 } else if ((netdev->flags & IFF_ALLMULTI) ||
1541 (netdev_mc_count(netdev) > ahw->max_mc_count)) {
1542 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1543 }
1544
1545 if (qlcnic_sriov_vf_check(adapter))
1546 qlcnic_vf_add_mc_list(netdev);
1547
1548 qlcnic_nic_set_promisc(adapter, mode);
Rajesh Borundiae8b508e2013-03-29 05:46:38 +00001549}
1550
1551static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
1552{
1553 struct qlcnic_async_work_list *entry;
1554 struct net_device *netdev;
1555
1556 entry = container_of(work, struct qlcnic_async_work_list, work);
1557 netdev = (struct net_device *)entry->ptr;
1558
1559 qlcnic_sriov_vf_set_multi(netdev);
1560 return;
1561}
1562
1563static struct qlcnic_async_work_list *
1564qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
1565{
1566 struct list_head *node;
1567 struct qlcnic_async_work_list *entry = NULL;
1568 u8 empty = 0;
1569
1570 list_for_each(node, &bc->async_list) {
1571 entry = list_entry(node, struct qlcnic_async_work_list, list);
1572 if (!work_pending(&entry->work)) {
1573 empty = 1;
1574 break;
1575 }
1576 }
1577
1578 if (!empty) {
1579 entry = kzalloc(sizeof(struct qlcnic_async_work_list),
1580 GFP_ATOMIC);
1581 if (entry == NULL)
1582 return NULL;
1583 list_add_tail(&entry->list, &bc->async_list);
1584 }
1585
1586 return entry;
1587}
1588
1589static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
1590 work_func_t func, void *data)
1591{
1592 struct qlcnic_async_work_list *entry = NULL;
1593
1594 entry = qlcnic_sriov_get_free_node_async_work(bc);
1595 if (!entry)
1596 return;
1597
1598 entry->ptr = data;
1599 INIT_WORK(&entry->work, func);
1600 queue_work(bc->bc_async_wq, &entry->work);
1601}
1602
1603void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
1604{
1605
1606 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1607 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
1608
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001609 if (adapter->need_fw_reset)
1610 return;
1611
Rajesh Borundiae8b508e2013-03-29 05:46:38 +00001612 qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
1613 netdev);
1614}
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001615
1616static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1617{
1618 int err;
1619
Manish Chopra5c44bbd2013-09-13 06:13:47 -04001620 adapter->need_fw_reset = 0;
Manish Chopra91b86e32013-08-15 08:27:25 -04001621 qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
Manish Choprae5c4e6c2013-08-02 00:57:40 -04001622 qlcnic_83xx_enable_mbx_interrupt(adapter);
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001623
1624 err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1625 if (err)
1626 return err;
1627
1628 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
1629 if (err)
1630 goto err_out_cleanup_bc_intr;
1631
1632 err = qlcnic_sriov_vf_init_driver(adapter);
1633 if (err)
1634 goto err_out_term_channel;
1635
1636 return 0;
1637
1638err_out_term_channel:
1639 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
1640
1641err_out_cleanup_bc_intr:
1642 qlcnic_sriov_cfg_bc_intr(adapter, 0);
1643 return err;
1644}
1645
1646static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
1647{
1648 struct net_device *netdev = adapter->netdev;
1649
1650 if (netif_running(netdev)) {
1651 if (!qlcnic_up(adapter, netdev))
1652 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1653 }
1654
1655 netif_device_attach(netdev);
1656}
1657
1658static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
1659{
1660 struct qlcnic_hardware_context *ahw = adapter->ahw;
1661 struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
1662 struct net_device *netdev = adapter->netdev;
1663 u8 i, max_ints = ahw->num_msix - 1;
1664
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001665 netif_device_detach(netdev);
Manish Chopra068a8d12013-08-02 00:57:41 -04001666 qlcnic_83xx_detach_mailbox_work(adapter);
1667 qlcnic_83xx_disable_mbx_intr(adapter);
1668
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001669 if (netif_running(netdev))
1670 qlcnic_down(adapter, netdev);
1671
1672 for (i = 0; i < max_ints; i++) {
1673 intr_tbl[i].id = i;
1674 intr_tbl[i].enabled = 0;
1675 intr_tbl[i].src = 0;
1676 }
1677 ahw->reset_context = 0;
1678}
1679
1680static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
1681{
1682 struct qlcnic_hardware_context *ahw = adapter->ahw;
1683 struct device *dev = &adapter->pdev->dev;
1684 struct qlc_83xx_idc *idc = &ahw->idc;
1685 u8 func = ahw->pci_func;
1686 u32 state;
1687
1688 if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
1689 (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
1690 if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1691 qlcnic_sriov_vf_attach(adapter);
1692 adapter->fw_fail_cnt = 0;
1693 dev_info(dev,
Masanari Iida8b513d02013-05-21 23:13:12 +09001694 "%s: Reinitialization of VF 0x%x done after FW reset\n",
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001695 __func__, func);
1696 } else {
1697 dev_err(dev,
1698 "%s: Reinitialization of VF 0x%x failed after FW reset\n",
1699 __func__, func);
1700 state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1701 dev_info(dev, "Current state 0x%x after FW reset\n",
1702 state);
1703 }
1704 }
1705
1706 return 0;
1707}
1708
1709static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1710{
1711 struct qlcnic_hardware_context *ahw = adapter->ahw;
Manish Chopra068a8d12013-08-02 00:57:41 -04001712 struct qlcnic_mailbox *mbx = ahw->mailbox;
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001713 struct device *dev = &adapter->pdev->dev;
1714 struct qlc_83xx_idc *idc = &ahw->idc;
1715 u8 func = ahw->pci_func;
1716 u32 state;
1717
1718 adapter->reset_ctx_cnt++;
1719
1720 /* Skip the context reset and check if FW is hung */
1721 if (adapter->reset_ctx_cnt < 3) {
1722 adapter->need_fw_reset = 1;
Manish Chopra068a8d12013-08-02 00:57:41 -04001723 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001724 dev_info(dev,
1725 "Resetting context, wait here to check if FW is in failed state\n");
1726 return 0;
1727 }
1728
1729 /* Check if number of resets exceed the threshold.
1730 * If it exceeds the threshold just fail the VF.
1731 */
1732 if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
1733 clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1734 adapter->tx_timeo_cnt = 0;
1735 adapter->fw_fail_cnt = 0;
1736 adapter->reset_ctx_cnt = 0;
1737 qlcnic_sriov_vf_detach(adapter);
1738 dev_err(dev,
1739 "Device context resets have exceeded the threshold, device interface will be shutdown\n");
1740 return -EIO;
1741 }
1742
1743 dev_info(dev, "Resetting context of VF 0x%x\n", func);
1744 dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
1745 __func__, adapter->reset_ctx_cnt, func);
1746 set_bit(__QLCNIC_RESETTING, &adapter->state);
1747 adapter->need_fw_reset = 1;
Manish Chopra068a8d12013-08-02 00:57:41 -04001748 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001749 qlcnic_sriov_vf_detach(adapter);
1750 adapter->need_fw_reset = 0;
1751
1752 if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1753 qlcnic_sriov_vf_attach(adapter);
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001754 adapter->tx_timeo_cnt = 0;
1755 adapter->reset_ctx_cnt = 0;
1756 adapter->fw_fail_cnt = 0;
1757 dev_info(dev, "Done resetting context for VF 0x%x\n", func);
1758 } else {
1759 dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
1760 __func__, func);
1761 state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1762 dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
1763 }
1764
1765 return 0;
1766}
1767
1768static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
1769{
1770 struct qlcnic_hardware_context *ahw = adapter->ahw;
1771 int ret = 0;
1772
1773 if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
1774 ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
1775 else if (ahw->reset_context)
1776 ret = qlcnic_sriov_vf_handle_context_reset(adapter);
1777
1778 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1779 return ret;
1780}
1781
1782static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
1783{
1784 struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1785
1786 dev_err(&adapter->pdev->dev, "Device is in failed state\n");
1787 if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
1788 qlcnic_sriov_vf_detach(adapter);
1789
1790 clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1791 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1792 return -EIO;
1793}
1794
1795static int
1796qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1797{
Manish Chopra068a8d12013-08-02 00:57:41 -04001798 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001799 struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1800
1801 dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
1802 if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1803 set_bit(__QLCNIC_RESETTING, &adapter->state);
1804 adapter->tx_timeo_cnt = 0;
1805 adapter->reset_ctx_cnt = 0;
Manish Chopra068a8d12013-08-02 00:57:41 -04001806 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001807 qlcnic_sriov_vf_detach(adapter);
1808 }
1809
1810 return 0;
1811}
1812
1813static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
1814{
Manish Chopra068a8d12013-08-02 00:57:41 -04001815 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001816 struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1817 u8 func = adapter->ahw->pci_func;
1818
1819 if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1820 dev_err(&adapter->pdev->dev,
1821 "Firmware hang detected by VF 0x%x\n", func);
1822 set_bit(__QLCNIC_RESETTING, &adapter->state);
1823 adapter->tx_timeo_cnt = 0;
1824 adapter->reset_ctx_cnt = 0;
Manish Chopra068a8d12013-08-02 00:57:41 -04001825 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
Rajesh Borundiaf036e4f2013-04-19 07:01:10 +00001826 qlcnic_sriov_vf_detach(adapter);
1827 }
1828 return 0;
1829}
1830
1831static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
1832{
1833 dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
1834 return 0;
1835}
1836
1837static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1838{
1839 struct qlcnic_adapter *adapter;
1840 struct qlc_83xx_idc *idc;
1841 int ret = 0;
1842
1843 adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
1844 idc = &adapter->ahw->idc;
1845 idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1846
1847 switch (idc->curr_state) {
1848 case QLC_83XX_IDC_DEV_READY:
1849 ret = qlcnic_sriov_vf_idc_ready_state(adapter);
1850 break;
1851 case QLC_83XX_IDC_DEV_NEED_RESET:
1852 case QLC_83XX_IDC_DEV_INIT:
1853 ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
1854 break;
1855 case QLC_83XX_IDC_DEV_NEED_QUISCENT:
1856 ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
1857 break;
1858 case QLC_83XX_IDC_DEV_FAILED:
1859 ret = qlcnic_sriov_vf_idc_failed_state(adapter);
1860 break;
1861 case QLC_83XX_IDC_DEV_QUISCENT:
1862 break;
1863 default:
1864 ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
1865 }
1866
1867 idc->prev_state = idc->curr_state;
1868 if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
1869 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
1870 idc->delay);
1871}
1872
1873static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
1874{
1875 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1876 msleep(20);
1877
1878 clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
1879 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1880 cancel_delayed_work_sync(&adapter->fw_work);
1881}
Rajesh Borundia91b72822013-04-19 07:01:12 +00001882
Manish Chopra154d0c82013-12-17 09:01:53 -05001883static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
1884 struct qlcnic_vf_info *vf, u16 vlan_id)
1885{
1886 int i, err = -EINVAL;
1887
1888 if (!vf->sriov_vlans)
1889 return err;
1890
1891 mutex_lock(&vf->vlan_list_lock);
1892
1893 for (i = 0; i < sriov->num_allowed_vlans; i++) {
1894 if (vf->sriov_vlans[i] == vlan_id) {
1895 err = 0;
1896 break;
1897 }
1898 }
1899
1900 mutex_unlock(&vf->vlan_list_lock);
1901 return err;
1902}
1903
1904static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
1905 struct qlcnic_vf_info *vf)
1906{
1907 int err = 0;
1908
1909 mutex_lock(&vf->vlan_list_lock);
1910
1911 if (vf->num_vlan >= sriov->num_allowed_vlans)
1912 err = -EINVAL;
1913
1914 mutex_unlock(&vf->vlan_list_lock);
1915 return err;
1916}
1917
1918static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
Rajesh Borundia91b72822013-04-19 07:01:12 +00001919 u16 vid, u8 enable)
1920{
Manish Chopra154d0c82013-12-17 09:01:53 -05001921 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1922 struct qlcnic_vf_info *vf;
1923 bool vlan_exist;
Rajesh Borundia91b72822013-04-19 07:01:12 +00001924 u8 allowed = 0;
1925 int i;
1926
Manish Chopra154d0c82013-12-17 09:01:53 -05001927 vf = &adapter->ahw->sriov->vf_info[0];
1928 vlan_exist = qlcnic_sriov_check_any_vlan(vf);
Rajesh Borundia91b72822013-04-19 07:01:12 +00001929 if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
1930 return -EINVAL;
1931
1932 if (enable) {
Manish Chopra154d0c82013-12-17 09:01:53 -05001933 if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
1934 return -EINVAL;
1935
1936 if (qlcnic_sriov_validate_num_vlans(sriov, vf))
Rajesh Borundia91b72822013-04-19 07:01:12 +00001937 return -EINVAL;
1938
1939 if (sriov->any_vlan) {
1940 for (i = 0; i < sriov->num_allowed_vlans; i++) {
1941 if (sriov->allowed_vlans[i] == vid)
1942 allowed = 1;
1943 }
1944
1945 if (!allowed)
1946 return -EINVAL;
1947 }
1948 } else {
Manish Chopra154d0c82013-12-17 09:01:53 -05001949 if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
Rajesh Borundia91b72822013-04-19 07:01:12 +00001950 return -EINVAL;
1951 }
1952
1953 return 0;
1954}
1955
Manish Chopra154d0c82013-12-17 09:01:53 -05001956static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
1957 enum qlcnic_vlan_operations opcode)
1958{
1959 struct qlcnic_adapter *adapter = vf->adapter;
1960 struct qlcnic_sriov *sriov;
1961
1962 sriov = adapter->ahw->sriov;
1963
1964 if (!vf->sriov_vlans)
1965 return;
1966
1967 mutex_lock(&vf->vlan_list_lock);
1968
1969 switch (opcode) {
1970 case QLC_VLAN_ADD:
1971 qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
1972 break;
1973 case QLC_VLAN_DELETE:
1974 qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
1975 break;
1976 default:
1977 netdev_err(adapter->netdev, "Invalid VLAN operation\n");
1978 }
1979
1980 mutex_unlock(&vf->vlan_list_lock);
1981 return;
1982}
1983
Rajesh Borundia91b72822013-04-19 07:01:12 +00001984int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
1985 u16 vid, u8 enable)
1986{
1987 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
Manish Chopra154d0c82013-12-17 09:01:53 -05001988 struct qlcnic_vf_info *vf;
Rajesh Borundia91b72822013-04-19 07:01:12 +00001989 struct qlcnic_cmd_args cmd;
1990 int ret;
1991
1992 if (vid == 0)
1993 return 0;
1994
Manish Chopra154d0c82013-12-17 09:01:53 -05001995 vf = &adapter->ahw->sriov->vf_info[0];
1996 ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
Rajesh Borundia91b72822013-04-19 07:01:12 +00001997 if (ret)
1998 return ret;
1999
2000 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
2001 QLCNIC_BC_CMD_CFG_GUEST_VLAN);
2002 if (ret)
2003 return ret;
2004
2005 cmd.req.arg[1] = (enable & 1) | vid << 16;
2006
2007 qlcnic_sriov_cleanup_async_list(&sriov->bc);
2008 ret = qlcnic_issue_cmd(adapter, &cmd);
2009 if (ret) {
2010 dev_err(&adapter->pdev->dev,
2011 "Failed to configure guest VLAN, err=%d\n", ret);
2012 } else {
2013 qlcnic_free_mac_list(adapter);
2014
2015 if (enable)
Manish Chopra154d0c82013-12-17 09:01:53 -05002016 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
Rajesh Borundia91b72822013-04-19 07:01:12 +00002017 else
Manish Chopra154d0c82013-12-17 09:01:53 -05002018 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
Rajesh Borundia91b72822013-04-19 07:01:12 +00002019
Manish Chopra154d0c82013-12-17 09:01:53 -05002020 qlcnic_set_multi(adapter->netdev);
Rajesh Borundia91b72822013-04-19 07:01:12 +00002021 }
2022
2023 qlcnic_free_mbx_args(&cmd);
2024 return ret;
2025}
2026
2027static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
2028{
2029 struct list_head *head = &adapter->mac_list;
Manish Chopra154d0c82013-12-17 09:01:53 -05002030 struct qlcnic_mac_vlan_list *cur;
Rajesh Borundia91b72822013-04-19 07:01:12 +00002031
2032 while (!list_empty(head)) {
Manish Chopra154d0c82013-12-17 09:01:53 -05002033 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
2034 qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
2035 QLCNIC_MAC_DEL);
Rajesh Borundia91b72822013-04-19 07:01:12 +00002036 list_del(&cur->list);
2037 kfree(cur);
2038 }
2039}
Rajesh Borundia486a5bc2013-06-22 04:12:06 -04002040
Manish Chopra154d0c82013-12-17 09:01:53 -05002041
Rajesh Borundia486a5bc2013-06-22 04:12:06 -04002042int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
2043{
2044 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2045 struct net_device *netdev = adapter->netdev;
2046 int retval;
2047
2048 netif_device_detach(netdev);
2049 qlcnic_cancel_idc_work(adapter);
2050
2051 if (netif_running(netdev))
2052 qlcnic_down(adapter, netdev);
2053
2054 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
2055 qlcnic_sriov_cfg_bc_intr(adapter, 0);
2056 qlcnic_83xx_disable_mbx_intr(adapter);
2057 cancel_delayed_work_sync(&adapter->idc_aen_work);
2058
2059 retval = pci_save_state(pdev);
2060 if (retval)
2061 return retval;
2062
2063 return 0;
2064}
2065
2066int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
2067{
2068 struct qlc_83xx_idc *idc = &adapter->ahw->idc;
2069 struct net_device *netdev = adapter->netdev;
2070 int err;
2071
2072 set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
Manish Choprae5c4e6c2013-08-02 00:57:40 -04002073 qlcnic_83xx_enable_mbx_interrupt(adapter);
Rajesh Borundia486a5bc2013-06-22 04:12:06 -04002074 err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
2075 if (err)
2076 return err;
2077
2078 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
2079 if (!err) {
2080 if (netif_running(netdev)) {
2081 err = qlcnic_up(adapter, netdev);
2082 if (!err)
2083 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2084 }
2085 }
2086
2087 netif_device_attach(netdev);
2088 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
2089 idc->delay);
2090 return err;
2091}
Manish Chopra154d0c82013-12-17 09:01:53 -05002092
2093void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
2094{
2095 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2096 struct qlcnic_vf_info *vf;
2097 int i;
2098
2099 for (i = 0; i < sriov->num_vfs; i++) {
2100 vf = &sriov->vf_info[i];
2101 vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
2102 sizeof(*vf->sriov_vlans), GFP_KERNEL);
2103 }
2104}
2105
2106void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
2107{
2108 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2109 struct qlcnic_vf_info *vf;
2110 int i;
2111
2112 for (i = 0; i < sriov->num_vfs; i++) {
2113 vf = &sriov->vf_info[i];
2114 kfree(vf->sriov_vlans);
2115 vf->sriov_vlans = NULL;
2116 }
2117}
2118
2119void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
2120 struct qlcnic_vf_info *vf, u16 vlan_id)
2121{
2122 int i;
2123
2124 for (i = 0; i < sriov->num_allowed_vlans; i++) {
2125 if (!vf->sriov_vlans[i]) {
2126 vf->sriov_vlans[i] = vlan_id;
2127 vf->num_vlan++;
2128 return;
2129 }
2130 }
2131}
2132
2133void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
2134 struct qlcnic_vf_info *vf, u16 vlan_id)
2135{
2136 int i;
2137
2138 for (i = 0; i < sriov->num_allowed_vlans; i++) {
2139 if (vf->sriov_vlans[i] == vlan_id) {
2140 vf->sriov_vlans[i] = 0;
2141 vf->num_vlan--;
2142 return;
2143 }
2144 }
2145}
2146
2147bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
2148{
2149 bool err = false;
2150
2151 mutex_lock(&vf->vlan_list_lock);
2152
2153 if (vf->num_vlan)
2154 err = true;
2155
2156 mutex_unlock(&vf->vlan_list_lock);
2157 return err;
2158}