Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | * |
| 3 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 4 | * redistributing this file, you may do so under either license. |
| 5 | * |
| 6 | * GPL LICENSE SUMMARY |
| 7 | * |
| 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
Johannes Berg | 8b4139d | 2014-07-24 14:05:26 +0200 | [diff] [blame] | 9 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 10 | * Copyright(c) 2015 Intel Deutschland GmbH |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of version 2 of the GNU General Public License as |
| 14 | * published by the Free Software Foundation. |
| 15 | * |
| 16 | * This program is distributed in the hope that it will be useful, but |
| 17 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 19 | * General Public License for more details. |
| 20 | * |
| 21 | * You should have received a copy of the GNU General Public License |
| 22 | * along with this program; if not, write to the Free Software |
| 23 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
| 24 | * USA |
| 25 | * |
| 26 | * The full GNU General Public License is included in this distribution |
| 27 | * in the file called COPYING. |
| 28 | * |
| 29 | * Contact Information: |
Emmanuel Grumbach | d01c536 | 2015-11-17 15:39:56 +0200 | [diff] [blame] | 30 | * Intel Linux Wireless <linuxwifi@intel.com> |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 31 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 32 | * |
| 33 | * BSD LICENSE |
| 34 | * |
| 35 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
Johannes Berg | 8b4139d | 2014-07-24 14:05:26 +0200 | [diff] [blame] | 36 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 37 | * Copyright(c) 2015 Intel Deutschland GmbH |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 38 | * All rights reserved. |
| 39 | * |
| 40 | * Redistribution and use in source and binary forms, with or without |
| 41 | * modification, are permitted provided that the following conditions |
| 42 | * are met: |
| 43 | * |
| 44 | * * Redistributions of source code must retain the above copyright |
| 45 | * notice, this list of conditions and the following disclaimer. |
| 46 | * * Redistributions in binary form must reproduce the above copyright |
| 47 | * notice, this list of conditions and the following disclaimer in |
| 48 | * the documentation and/or other materials provided with the |
| 49 | * distribution. |
| 50 | * * Neither the name Intel Corporation nor the names of its |
| 51 | * contributors may be used to endorse or promote products derived |
| 52 | * from this software without specific prior written permission. |
| 53 | * |
| 54 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 55 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 56 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 57 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 58 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 59 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 60 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 61 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 62 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 63 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 64 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 65 | * |
| 66 | *****************************************************************************/ |
| 67 | #include <net/ipv6.h> |
| 68 | #include <net/addrconf.h> |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 69 | #include <linux/bitops.h> |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 70 | #include "mvm.h" |
| 71 | |
| 72 | void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, |
Emmanuel Grumbach | c8b06a9 | 2014-11-24 09:06:57 +0200 | [diff] [blame] | 73 | struct iwl_wowlan_config_cmd *cmd) |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 74 | { |
| 75 | int i; |
| 76 | |
| 77 | /* |
| 78 | * For QoS counters, we store the one to use next, so subtract 0x10 |
| 79 | * since the uCode will add 0x10 *before* using the value while we |
| 80 | * increment after using the value (i.e. store the next value to use). |
| 81 | */ |
| 82 | for (i = 0; i < IWL_MAX_TID_COUNT; i++) { |
| 83 | u16 seq = mvm_ap_sta->tid_data[i].seq_number; |
| 84 | seq -= 0x10; |
| 85 | cmd->qos_seq[i] = cpu_to_le16(seq); |
| 86 | } |
| 87 | } |
| 88 | |
Eliad Peller | 8bd22e7 | 2013-11-03 19:48:50 +0200 | [diff] [blame] | 89 | int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, |
| 90 | struct ieee80211_vif *vif, |
| 91 | bool disable_offloading, |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 92 | bool offload_ns, |
Eliad Peller | 8bd22e7 | 2013-11-03 19:48:50 +0200 | [diff] [blame] | 93 | u32 cmd_flags) |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 94 | { |
| 95 | union { |
| 96 | struct iwl_proto_offload_cmd_v1 v1; |
| 97 | struct iwl_proto_offload_cmd_v2 v2; |
| 98 | struct iwl_proto_offload_cmd_v3_small v3s; |
| 99 | struct iwl_proto_offload_cmd_v3_large v3l; |
| 100 | } cmd = {}; |
| 101 | struct iwl_host_cmd hcmd = { |
| 102 | .id = PROT_OFFLOAD_CONFIG_CMD, |
Eliad Peller | 8bd22e7 | 2013-11-03 19:48:50 +0200 | [diff] [blame] | 103 | .flags = cmd_flags, |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 104 | .data[0] = &cmd, |
| 105 | .dataflags[0] = IWL_HCMD_DFL_DUP, |
| 106 | }; |
| 107 | struct iwl_proto_offload_cmd_common *common; |
| 108 | u32 enabled = 0, size; |
| 109 | u32 capa_flags = mvm->fw->ucode_capa.flags; |
| 110 | #if IS_ENABLED(CONFIG_IPV6) |
| 111 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
| 112 | int i; |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 113 | /* |
| 114 | * Skip tentative address when ns offload is enabled to avoid |
| 115 | * violating RFC4862. |
| 116 | * Keep tentative address when ns offload is disabled so the NS packets |
| 117 | * will not be filtered out and will wake up the host. |
| 118 | */ |
| 119 | bool skip_tentative = offload_ns; |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 120 | |
| 121 | if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL || |
| 122 | capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) { |
| 123 | struct iwl_ns_config *nsc; |
| 124 | struct iwl_targ_addr *addrs; |
| 125 | int n_nsc, n_addrs; |
| 126 | int c; |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 127 | int num_skipped = 0; |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 128 | |
| 129 | if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) { |
| 130 | nsc = cmd.v3s.ns_config; |
| 131 | n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S; |
| 132 | addrs = cmd.v3s.targ_addrs; |
| 133 | n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S; |
| 134 | } else { |
| 135 | nsc = cmd.v3l.ns_config; |
| 136 | n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L; |
| 137 | addrs = cmd.v3l.targ_addrs; |
| 138 | n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L; |
| 139 | } |
| 140 | |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 141 | /* |
| 142 | * For each address we have (and that will fit) fill a target |
| 143 | * address struct and combine for NS offload structs with the |
| 144 | * solicited node addresses. |
| 145 | */ |
| 146 | for (i = 0, c = 0; |
| 147 | i < mvmvif->num_target_ipv6_addrs && |
| 148 | i < n_addrs && c < n_nsc; i++) { |
| 149 | struct in6_addr solicited_addr; |
| 150 | int j; |
| 151 | |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 152 | if (skip_tentative && |
| 153 | test_bit(i, mvmvif->tentative_addrs)) { |
| 154 | num_skipped++; |
| 155 | continue; |
| 156 | } |
| 157 | |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 158 | addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i], |
| 159 | &solicited_addr); |
| 160 | for (j = 0; j < c; j++) |
| 161 | if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr, |
| 162 | &solicited_addr) == 0) |
| 163 | break; |
| 164 | if (j == c) |
| 165 | c++; |
| 166 | addrs[i].addr = mvmvif->target_ipv6_addrs[i]; |
| 167 | addrs[i].config_num = cpu_to_le32(j); |
| 168 | nsc[j].dest_ipv6_addr = solicited_addr; |
| 169 | memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN); |
| 170 | } |
| 171 | |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 172 | if (mvmvif->num_target_ipv6_addrs - num_skipped) |
| 173 | enabled |= IWL_D3_PROTO_IPV6_VALID; |
| 174 | |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 175 | if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 176 | cmd.v3s.num_valid_ipv6_addrs = |
| 177 | cpu_to_le32(i - num_skipped); |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 178 | else |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 179 | cmd.v3l.num_valid_ipv6_addrs = |
| 180 | cpu_to_le32(i - num_skipped); |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 181 | } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 182 | bool found = false; |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 183 | |
| 184 | BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) != |
| 185 | sizeof(mvmvif->target_ipv6_addrs[0])); |
| 186 | |
| 187 | for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 188 | IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) { |
| 189 | if (skip_tentative && |
| 190 | test_bit(i, mvmvif->tentative_addrs)) |
| 191 | continue; |
| 192 | |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 193 | memcpy(cmd.v2.target_ipv6_addr[i], |
| 194 | &mvmvif->target_ipv6_addrs[i], |
| 195 | sizeof(cmd.v2.target_ipv6_addr[i])); |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 196 | |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 197 | found = true; |
| 198 | } |
| 199 | if (found) { |
| 200 | enabled |= IWL_D3_PROTO_IPV6_VALID; |
| 201 | memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN); |
| 202 | } |
| 203 | } else { |
| 204 | bool found = false; |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 205 | BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) != |
| 206 | sizeof(mvmvif->target_ipv6_addrs[0])); |
| 207 | |
| 208 | for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 209 | IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) { |
| 210 | if (skip_tentative && |
| 211 | test_bit(i, mvmvif->tentative_addrs)) |
| 212 | continue; |
| 213 | |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 214 | memcpy(cmd.v1.target_ipv6_addr[i], |
| 215 | &mvmvif->target_ipv6_addrs[i], |
| 216 | sizeof(cmd.v1.target_ipv6_addr[i])); |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 217 | |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 218 | found = true; |
| 219 | } |
| 220 | |
| 221 | if (found) { |
| 222 | enabled |= IWL_D3_PROTO_IPV6_VALID; |
| 223 | memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN); |
| 224 | } |
| 225 | } |
| 226 | |
| 227 | if (offload_ns && (enabled & IWL_D3_PROTO_IPV6_VALID)) |
| 228 | enabled |= IWL_D3_PROTO_OFFLOAD_NS; |
| 229 | #endif |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 230 | if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) { |
| 231 | common = &cmd.v3s.common; |
| 232 | size = sizeof(cmd.v3s); |
| 233 | } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) { |
| 234 | common = &cmd.v3l.common; |
| 235 | size = sizeof(cmd.v3l); |
| 236 | } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { |
| 237 | common = &cmd.v2.common; |
| 238 | size = sizeof(cmd.v2); |
| 239 | } else { |
| 240 | common = &cmd.v1.common; |
| 241 | size = sizeof(cmd.v1); |
| 242 | } |
| 243 | |
| 244 | if (vif->bss_conf.arp_addr_cnt) { |
Sara Sharon | c97dab4 | 2015-11-19 11:53:49 +0200 | [diff] [blame] | 245 | enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID; |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 246 | common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0]; |
| 247 | memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN); |
| 248 | } |
| 249 | |
Eliad Peller | 8bd22e7 | 2013-11-03 19:48:50 +0200 | [diff] [blame] | 250 | if (!disable_offloading) |
| 251 | common->enabled = cpu_to_le32(enabled); |
Eliad Peller | 1a95c8d | 2013-11-21 19:19:52 +0200 | [diff] [blame] | 252 | |
| 253 | hcmd.len[0] = size; |
| 254 | return iwl_mvm_send_cmd(mvm, &hcmd); |
| 255 | } |