Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | * |
| 3 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 4 | * redistributing this file, you may do so under either license. |
| 5 | * |
| 6 | * GPL LICENSE SUMMARY |
| 7 | * |
Johannes Berg | 128e63e | 2013-01-21 21:39:26 +0100 | [diff] [blame] | 8 | * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved. |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of version 2 of the GNU General Public License as |
| 12 | * published by the Free Software Foundation. |
| 13 | * |
| 14 | * This program is distributed in the hope that it will be useful, but |
| 15 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 17 | * General Public License for more details. |
| 18 | * |
| 19 | * You should have received a copy of the GNU General Public License |
| 20 | * along with this program; if not, write to the Free Software |
| 21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
| 22 | * USA |
| 23 | * |
| 24 | * The full GNU General Public License is included in this distribution |
Emmanuel Grumbach | 410dc5a | 2013-02-18 09:22:28 +0200 | [diff] [blame] | 25 | * in the file called COPYING. |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 26 | * |
| 27 | * Contact Information: |
| 28 | * Intel Linux Wireless <ilw@linux.intel.com> |
| 29 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 30 | * |
| 31 | * BSD LICENSE |
| 32 | * |
Johannes Berg | 128e63e | 2013-01-21 21:39:26 +0100 | [diff] [blame] | 33 | * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved. |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 34 | * All rights reserved. |
| 35 | * |
| 36 | * Redistribution and use in source and binary forms, with or without |
| 37 | * modification, are permitted provided that the following conditions |
| 38 | * are met: |
| 39 | * |
| 40 | * * Redistributions of source code must retain the above copyright |
| 41 | * notice, this list of conditions and the following disclaimer. |
| 42 | * * Redistributions in binary form must reproduce the above copyright |
| 43 | * notice, this list of conditions and the following disclaimer in |
| 44 | * the documentation and/or other materials provided with the |
| 45 | * distribution. |
| 46 | * * Neither the name Intel Corporation nor the names of its |
| 47 | * contributors may be used to endorse or promote products derived |
| 48 | * from this software without specific prior written permission. |
| 49 | * |
| 50 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 51 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 52 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 53 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 54 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 55 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 56 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 60 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 61 | * |
| 62 | *****************************************************************************/ |
| 63 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 64 | #include <linux/export.h> |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 65 | #include <net/netlink.h> |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 66 | |
Johannes Berg | 48e2934 | 2013-03-01 00:13:33 +0100 | [diff] [blame] | 67 | #include "iwl-drv.h" |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 68 | #include "iwl-io.h" |
| 69 | #include "iwl-fh.h" |
| 70 | #include "iwl-prph.h" |
| 71 | #include "iwl-trans.h" |
| 72 | #include "iwl-test.h" |
| 73 | #include "iwl-csr.h" |
| 74 | #include "iwl-testmode.h" |
| 75 | |
| 76 | /* |
| 77 | * Periphery registers absolute lower bound. This is used in order to |
| 78 | * differentiate registery access through HBUS_TARG_PRPH_* and |
| 79 | * HBUS_TARG_MEM_* accesses. |
| 80 | */ |
| 81 | #define IWL_ABS_PRPH_START (0xA00000) |
| 82 | |
| 83 | /* |
| 84 | * The TLVs used in the gnl message policy between the kernel module and |
| 85 | * user space application. iwl_testmode_gnl_msg_policy is to be carried |
| 86 | * through the NL80211_CMD_TESTMODE channel regulated by nl80211. |
| 87 | * See iwl-testmode.h |
| 88 | */ |
| 89 | static |
| 90 | struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = { |
| 91 | [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, }, |
| 92 | |
| 93 | [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, }, |
| 94 | [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, }, |
| 95 | |
| 96 | [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, }, |
| 97 | [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, }, |
| 98 | [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, }, |
| 99 | |
| 100 | [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, }, |
| 101 | [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, }, |
| 102 | |
| 103 | [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, }, |
| 104 | |
| 105 | [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, }, |
| 106 | [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, }, |
| 107 | [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, }, |
| 108 | |
| 109 | [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, }, |
| 110 | |
| 111 | [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, }, |
| 112 | |
| 113 | [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, }, |
| 114 | [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, }, |
| 115 | [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, }, |
| 116 | |
| 117 | [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, }, |
| 118 | [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, }, |
| 119 | [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, }, |
| 120 | [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, }, |
| 121 | [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, }, |
| 122 | |
| 123 | [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, }, |
| 124 | }; |
| 125 | |
| 126 | static inline void iwl_test_trace_clear(struct iwl_test *tst) |
| 127 | { |
| 128 | memset(&tst->trace, 0, sizeof(struct iwl_test_trace)); |
| 129 | } |
| 130 | |
| 131 | static void iwl_test_trace_stop(struct iwl_test *tst) |
| 132 | { |
| 133 | if (!tst->trace.enabled) |
| 134 | return; |
| 135 | |
| 136 | if (tst->trace.cpu_addr && tst->trace.dma_addr) |
| 137 | dma_free_coherent(tst->trans->dev, |
| 138 | tst->trace.tsize, |
| 139 | tst->trace.cpu_addr, |
| 140 | tst->trace.dma_addr); |
| 141 | |
| 142 | iwl_test_trace_clear(tst); |
| 143 | } |
| 144 | |
| 145 | static inline void iwl_test_mem_clear(struct iwl_test *tst) |
| 146 | { |
| 147 | memset(&tst->mem, 0, sizeof(struct iwl_test_mem)); |
| 148 | } |
| 149 | |
| 150 | static inline void iwl_test_mem_stop(struct iwl_test *tst) |
| 151 | { |
| 152 | if (!tst->mem.in_read) |
| 153 | return; |
| 154 | |
| 155 | iwl_test_mem_clear(tst); |
| 156 | } |
| 157 | |
| 158 | /* |
| 159 | * Initializes the test object |
| 160 | * During the lifetime of the test object it is assumed that the transport is |
| 161 | * started. The test object should be stopped before the transport is stopped. |
| 162 | */ |
| 163 | void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans, |
| 164 | struct iwl_test_ops *ops) |
| 165 | { |
| 166 | tst->trans = trans; |
| 167 | tst->ops = ops; |
| 168 | |
| 169 | iwl_test_trace_clear(tst); |
| 170 | iwl_test_mem_clear(tst); |
| 171 | } |
| 172 | EXPORT_SYMBOL_GPL(iwl_test_init); |
| 173 | |
| 174 | /* |
| 175 | * Stop the test object |
| 176 | */ |
| 177 | void iwl_test_free(struct iwl_test *tst) |
| 178 | { |
| 179 | iwl_test_mem_stop(tst); |
| 180 | iwl_test_trace_stop(tst); |
| 181 | } |
| 182 | EXPORT_SYMBOL_GPL(iwl_test_free); |
| 183 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 184 | static inline int iwl_test_send_cmd(struct iwl_test *tst, |
| 185 | struct iwl_host_cmd *cmd) |
| 186 | { |
| 187 | return tst->ops->send_cmd(tst->trans->op_mode, cmd); |
| 188 | } |
| 189 | |
| 190 | static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr) |
| 191 | { |
| 192 | return tst->ops->valid_hw_addr(addr); |
| 193 | } |
| 194 | |
| 195 | static inline u32 iwl_test_fw_ver(struct iwl_test *tst) |
| 196 | { |
| 197 | return tst->ops->get_fw_ver(tst->trans->op_mode); |
| 198 | } |
| 199 | |
| 200 | static inline struct sk_buff* |
| 201 | iwl_test_alloc_reply(struct iwl_test *tst, int len) |
| 202 | { |
| 203 | return tst->ops->alloc_reply(tst->trans->op_mode, len); |
| 204 | } |
| 205 | |
| 206 | static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb) |
| 207 | { |
| 208 | return tst->ops->reply(tst->trans->op_mode, skb); |
| 209 | } |
| 210 | |
| 211 | static inline struct sk_buff* |
| 212 | iwl_test_alloc_event(struct iwl_test *tst, int len) |
| 213 | { |
| 214 | return tst->ops->alloc_event(tst->trans->op_mode, len); |
| 215 | } |
| 216 | |
| 217 | static inline void |
| 218 | iwl_test_event(struct iwl_test *tst, struct sk_buff *skb) |
| 219 | { |
| 220 | return tst->ops->event(tst->trans->op_mode, skb); |
| 221 | } |
| 222 | |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 223 | /* |
| 224 | * This function handles the user application commands to the fw. The fw |
| 225 | * commands are sent in a synchronuous manner. In case that the user requested |
| 226 | * to get commands response, it is send to the user. |
| 227 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 228 | static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb) |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 229 | { |
| 230 | struct iwl_host_cmd cmd; |
| 231 | struct iwl_rx_packet *pkt; |
| 232 | struct sk_buff *skb; |
| 233 | void *reply_buf; |
| 234 | u32 reply_len; |
| 235 | int ret; |
| 236 | bool cmd_want_skb; |
| 237 | |
| 238 | memset(&cmd, 0, sizeof(struct iwl_host_cmd)); |
| 239 | |
| 240 | if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] || |
| 241 | !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) { |
| 242 | IWL_ERR(tst->trans, "Missing fw command mandatory fields\n"); |
| 243 | return -ENOMSG; |
| 244 | } |
| 245 | |
| 246 | cmd.flags = CMD_ON_DEMAND | CMD_SYNC; |
| 247 | cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]); |
| 248 | if (cmd_want_skb) |
| 249 | cmd.flags |= CMD_WANT_SKB; |
| 250 | |
| 251 | cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]); |
| 252 | cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); |
| 253 | cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); |
| 254 | cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; |
| 255 | IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n", |
| 256 | cmd.id, cmd.flags, cmd.len[0]); |
| 257 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 258 | ret = iwl_test_send_cmd(tst, &cmd); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 259 | if (ret) { |
| 260 | IWL_ERR(tst->trans, "Failed to send hcmd\n"); |
| 261 | return ret; |
| 262 | } |
| 263 | if (!cmd_want_skb) |
| 264 | return ret; |
| 265 | |
| 266 | /* Handling return of SKB to the user */ |
| 267 | pkt = cmd.resp_pkt; |
| 268 | if (!pkt) { |
| 269 | IWL_ERR(tst->trans, "HCMD received a null response packet\n"); |
| 270 | return ret; |
| 271 | } |
| 272 | |
| 273 | reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 274 | skb = iwl_test_alloc_reply(tst, reply_len + 20); |
Andrei Epure | 5649ce4 | 2013-03-10 15:22:33 +0200 | [diff] [blame] | 275 | reply_buf = kmemdup(&pkt->hdr, reply_len, GFP_KERNEL); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 276 | if (!skb || !reply_buf) { |
| 277 | kfree_skb(skb); |
| 278 | kfree(reply_buf); |
| 279 | return -ENOMEM; |
| 280 | } |
| 281 | |
| 282 | /* The reply is in a page, that we cannot send to user space. */ |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 283 | iwl_free_resp(&cmd); |
| 284 | |
| 285 | if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, |
| 286 | IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) || |
| 287 | nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf)) |
| 288 | goto nla_put_failure; |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 289 | return iwl_test_reply(tst, skb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 290 | |
| 291 | nla_put_failure: |
| 292 | IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n"); |
| 293 | kfree(reply_buf); |
| 294 | kfree_skb(skb); |
| 295 | return -ENOMSG; |
| 296 | } |
| 297 | |
| 298 | /* |
| 299 | * Handles the user application commands for register access. |
| 300 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 301 | static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb) |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 302 | { |
| 303 | u32 ofs, val32, cmd; |
| 304 | u8 val8; |
| 305 | struct sk_buff *skb; |
| 306 | int status = 0; |
| 307 | struct iwl_trans *trans = tst->trans; |
| 308 | |
| 309 | if (!tb[IWL_TM_ATTR_REG_OFFSET]) { |
| 310 | IWL_ERR(trans, "Missing reg offset\n"); |
| 311 | return -ENOMSG; |
| 312 | } |
| 313 | |
| 314 | ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]); |
| 315 | IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs); |
| 316 | |
| 317 | cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); |
| 318 | |
| 319 | /* |
| 320 | * Allow access only to FH/CSR/HBUS in direct mode. |
| 321 | * Since we don't have the upper bounds for the CSR and HBUS segments, |
| 322 | * we will use only the upper bound of FH for sanity check. |
| 323 | */ |
| 324 | if (ofs >= FH_MEM_UPPER_BOUND) { |
| 325 | IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n", |
| 326 | FH_MEM_UPPER_BOUND); |
| 327 | return -EINVAL; |
| 328 | } |
| 329 | |
| 330 | switch (cmd) { |
| 331 | case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: |
| 332 | val32 = iwl_read_direct32(tst->trans, ofs); |
| 333 | IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32); |
| 334 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 335 | skb = iwl_test_alloc_reply(tst, 20); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 336 | if (!skb) { |
| 337 | IWL_ERR(trans, "Memory allocation fail\n"); |
| 338 | return -ENOMEM; |
| 339 | } |
| 340 | if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32)) |
| 341 | goto nla_put_failure; |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 342 | status = iwl_test_reply(tst, skb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 343 | if (status < 0) |
| 344 | IWL_ERR(trans, "Error sending msg : %d\n", status); |
| 345 | break; |
| 346 | |
| 347 | case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: |
| 348 | if (!tb[IWL_TM_ATTR_REG_VALUE32]) { |
| 349 | IWL_ERR(trans, "Missing value to write\n"); |
| 350 | return -ENOMSG; |
| 351 | } else { |
| 352 | val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); |
| 353 | IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32); |
| 354 | iwl_write_direct32(tst->trans, ofs, val32); |
| 355 | } |
| 356 | break; |
| 357 | |
| 358 | case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: |
| 359 | if (!tb[IWL_TM_ATTR_REG_VALUE8]) { |
| 360 | IWL_ERR(trans, "Missing value to write\n"); |
| 361 | return -ENOMSG; |
| 362 | } else { |
| 363 | val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]); |
| 364 | IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8); |
| 365 | iwl_write8(tst->trans, ofs, val8); |
| 366 | } |
| 367 | break; |
| 368 | |
| 369 | default: |
| 370 | IWL_ERR(trans, "Unknown test register cmd ID\n"); |
| 371 | return -ENOMSG; |
| 372 | } |
| 373 | |
| 374 | return status; |
| 375 | |
| 376 | nla_put_failure: |
| 377 | kfree_skb(skb); |
| 378 | return -EMSGSIZE; |
| 379 | } |
| 380 | |
| 381 | /* |
| 382 | * Handles the request to start FW tracing. Allocates of the trace buffer |
| 383 | * and sends a reply to user space with the address of the allocated buffer. |
| 384 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 385 | static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb) |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 386 | { |
| 387 | struct sk_buff *skb; |
| 388 | int status = 0; |
| 389 | |
| 390 | if (tst->trace.enabled) |
| 391 | return -EBUSY; |
| 392 | |
| 393 | if (!tb[IWL_TM_ATTR_TRACE_SIZE]) |
| 394 | tst->trace.size = TRACE_BUFF_SIZE_DEF; |
| 395 | else |
| 396 | tst->trace.size = |
| 397 | nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]); |
| 398 | |
| 399 | if (!tst->trace.size) |
| 400 | return -EINVAL; |
| 401 | |
| 402 | if (tst->trace.size < TRACE_BUFF_SIZE_MIN || |
| 403 | tst->trace.size > TRACE_BUFF_SIZE_MAX) |
| 404 | return -EINVAL; |
| 405 | |
| 406 | tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD; |
| 407 | tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev, |
| 408 | tst->trace.tsize, |
| 409 | &tst->trace.dma_addr, |
| 410 | GFP_KERNEL); |
| 411 | if (!tst->trace.cpu_addr) |
| 412 | return -ENOMEM; |
| 413 | |
| 414 | tst->trace.enabled = true; |
| 415 | tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100); |
| 416 | |
| 417 | memset(tst->trace.trace_addr, 0x03B, tst->trace.size); |
| 418 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 419 | skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 420 | if (!skb) { |
| 421 | IWL_ERR(tst->trans, "Memory allocation fail\n"); |
| 422 | iwl_test_trace_stop(tst); |
| 423 | return -ENOMEM; |
| 424 | } |
| 425 | |
| 426 | if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR, |
| 427 | sizeof(tst->trace.dma_addr), |
| 428 | (u64 *)&tst->trace.dma_addr)) |
| 429 | goto nla_put_failure; |
| 430 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 431 | status = iwl_test_reply(tst, skb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 432 | if (status < 0) |
| 433 | IWL_ERR(tst->trans, "Error sending msg : %d\n", status); |
| 434 | |
| 435 | tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size, |
| 436 | DUMP_CHUNK_SIZE); |
| 437 | |
| 438 | return status; |
| 439 | |
| 440 | nla_put_failure: |
| 441 | kfree_skb(skb); |
| 442 | if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) == |
| 443 | IWL_TM_CMD_APP2DEV_BEGIN_TRACE) |
| 444 | iwl_test_trace_stop(tst); |
| 445 | return -EMSGSIZE; |
| 446 | } |
| 447 | |
| 448 | /* |
| 449 | * Handles indirect read from the periphery or the SRAM. The read is performed |
| 450 | * to a temporary buffer. The user space application should later issue a dump |
| 451 | */ |
| 452 | static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size) |
| 453 | { |
| 454 | struct iwl_trans *trans = tst->trans; |
| 455 | unsigned long flags; |
| 456 | int i; |
| 457 | |
| 458 | if (size & 0x3) |
| 459 | return -EINVAL; |
| 460 | |
| 461 | tst->mem.size = size; |
| 462 | tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL); |
| 463 | if (tst->mem.addr == NULL) |
| 464 | return -ENOMEM; |
| 465 | |
| 466 | /* Hard-coded periphery absolute address */ |
| 467 | if (IWL_ABS_PRPH_START <= addr && |
| 468 | addr < IWL_ABS_PRPH_START + PRPH_END) { |
Lilach Edelstein | e56b04e | 2013-01-16 11:34:49 +0200 | [diff] [blame] | 469 | if (!iwl_trans_grab_nic_access(trans, false, &flags)) { |
Emmanuel Grumbach | abae238 | 2012-12-31 13:46:42 +0200 | [diff] [blame] | 470 | return -EIO; |
| 471 | } |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 472 | iwl_write32(trans, HBUS_TARG_PRPH_RADDR, |
| 473 | addr | (3 << 24)); |
| 474 | for (i = 0; i < size; i += 4) |
| 475 | *(u32 *)(tst->mem.addr + i) = |
| 476 | iwl_read32(trans, HBUS_TARG_PRPH_RDAT); |
Lilach Edelstein | e56b04e | 2013-01-16 11:34:49 +0200 | [diff] [blame] | 477 | iwl_trans_release_nic_access(trans, &flags); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 478 | } else { /* target memory (SRAM) */ |
Emmanuel Grumbach | 4fd442d | 2012-12-24 14:27:11 +0200 | [diff] [blame] | 479 | iwl_trans_read_mem(trans, addr, tst->mem.addr, |
| 480 | tst->mem.size / 4); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 481 | } |
| 482 | |
| 483 | tst->mem.nchunks = |
| 484 | DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE); |
| 485 | tst->mem.in_read = true; |
| 486 | return 0; |
| 487 | |
| 488 | } |
| 489 | |
| 490 | /* |
| 491 | * Handles indirect write to the periphery or SRAM. The is performed to a |
| 492 | * temporary buffer. |
| 493 | */ |
| 494 | static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr, |
| 495 | u32 size, unsigned char *buf) |
| 496 | { |
| 497 | struct iwl_trans *trans = tst->trans; |
| 498 | u32 val, i; |
| 499 | unsigned long flags; |
| 500 | |
| 501 | if (IWL_ABS_PRPH_START <= addr && |
| 502 | addr < IWL_ABS_PRPH_START + PRPH_END) { |
Emmanuel Grumbach | abae238 | 2012-12-31 13:46:42 +0200 | [diff] [blame] | 503 | /* Periphery writes can be 1-3 bytes long, or DWORDs */ |
| 504 | if (size < 4) { |
| 505 | memcpy(&val, buf, size); |
Lilach Edelstein | e56b04e | 2013-01-16 11:34:49 +0200 | [diff] [blame] | 506 | if (!iwl_trans_grab_nic_access(trans, false, &flags)) |
Emmanuel Grumbach | abae238 | 2012-12-31 13:46:42 +0200 | [diff] [blame] | 507 | return -EIO; |
Emmanuel Grumbach | abae238 | 2012-12-31 13:46:42 +0200 | [diff] [blame] | 508 | iwl_write32(trans, HBUS_TARG_PRPH_WADDR, |
| 509 | (addr & 0x0000FFFF) | |
| 510 | ((size - 1) << 24)); |
| 511 | iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val); |
Lilach Edelstein | e56b04e | 2013-01-16 11:34:49 +0200 | [diff] [blame] | 512 | iwl_trans_release_nic_access(trans, &flags); |
Emmanuel Grumbach | abae238 | 2012-12-31 13:46:42 +0200 | [diff] [blame] | 513 | } else { |
| 514 | if (size % 4) |
| 515 | return -EINVAL; |
| 516 | for (i = 0; i < size; i += 4) |
| 517 | iwl_write_prph(trans, addr+i, |
| 518 | *(u32 *)(buf+i)); |
| 519 | } |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 520 | } else if (iwl_test_valid_hw_addr(tst, addr)) { |
Emmanuel Grumbach | 4fd442d | 2012-12-24 14:27:11 +0200 | [diff] [blame] | 521 | iwl_trans_write_mem(trans, addr, buf, size / 4); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 522 | } else { |
| 523 | return -EINVAL; |
| 524 | } |
| 525 | return 0; |
| 526 | } |
| 527 | |
| 528 | /* |
| 529 | * Handles the user application commands for indirect read/write |
| 530 | * to/from the periphery or the SRAM. |
| 531 | */ |
| 532 | static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb) |
| 533 | { |
| 534 | u32 addr, size, cmd; |
| 535 | unsigned char *buf; |
| 536 | |
| 537 | /* Both read and write should be blocked, for atomicity */ |
| 538 | if (tst->mem.in_read) |
| 539 | return -EBUSY; |
| 540 | |
| 541 | cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); |
| 542 | if (!tb[IWL_TM_ATTR_MEM_ADDR]) { |
| 543 | IWL_ERR(tst->trans, "Error finding memory offset address\n"); |
| 544 | return -ENOMSG; |
| 545 | } |
| 546 | addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]); |
| 547 | if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) { |
| 548 | IWL_ERR(tst->trans, "Error finding size for memory reading\n"); |
| 549 | return -ENOMSG; |
| 550 | } |
| 551 | size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]); |
| 552 | |
| 553 | if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) { |
| 554 | return iwl_test_indirect_read(tst, addr, size); |
| 555 | } else { |
| 556 | if (!tb[IWL_TM_ATTR_BUFFER_DUMP]) |
| 557 | return -EINVAL; |
| 558 | buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]); |
| 559 | return iwl_test_indirect_write(tst, addr, size, buf); |
| 560 | } |
| 561 | } |
| 562 | |
| 563 | /* |
| 564 | * Enable notifications to user space |
| 565 | */ |
| 566 | static int iwl_test_notifications(struct iwl_test *tst, |
| 567 | struct nlattr **tb) |
| 568 | { |
| 569 | tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]); |
| 570 | return 0; |
| 571 | } |
| 572 | |
| 573 | /* |
| 574 | * Handles the request to get the device id |
| 575 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 576 | static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb) |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 577 | { |
| 578 | u32 devid = tst->trans->hw_id; |
| 579 | struct sk_buff *skb; |
| 580 | int status; |
| 581 | |
| 582 | IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid); |
| 583 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 584 | skb = iwl_test_alloc_reply(tst, 20); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 585 | if (!skb) { |
| 586 | IWL_ERR(tst->trans, "Memory allocation fail\n"); |
| 587 | return -ENOMEM; |
| 588 | } |
| 589 | |
| 590 | if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid)) |
| 591 | goto nla_put_failure; |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 592 | status = iwl_test_reply(tst, skb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 593 | if (status < 0) |
| 594 | IWL_ERR(tst->trans, "Error sending msg : %d\n", status); |
| 595 | |
| 596 | return 0; |
| 597 | |
| 598 | nla_put_failure: |
| 599 | kfree_skb(skb); |
| 600 | return -EMSGSIZE; |
| 601 | } |
| 602 | |
| 603 | /* |
| 604 | * Handles the request to get the FW version |
| 605 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 606 | static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb) |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 607 | { |
| 608 | struct sk_buff *skb; |
| 609 | int status; |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 610 | u32 ver = iwl_test_fw_ver(tst); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 611 | |
| 612 | IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver); |
| 613 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 614 | skb = iwl_test_alloc_reply(tst, 20); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 615 | if (!skb) { |
| 616 | IWL_ERR(tst->trans, "Memory allocation fail\n"); |
| 617 | return -ENOMEM; |
| 618 | } |
| 619 | |
| 620 | if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver)) |
| 621 | goto nla_put_failure; |
| 622 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 623 | status = iwl_test_reply(tst, skb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 624 | if (status < 0) |
| 625 | IWL_ERR(tst->trans, "Error sending msg : %d\n", status); |
| 626 | |
| 627 | return 0; |
| 628 | |
| 629 | nla_put_failure: |
| 630 | kfree_skb(skb); |
| 631 | return -EMSGSIZE; |
| 632 | } |
| 633 | |
| 634 | /* |
| 635 | * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists |
| 636 | */ |
| 637 | int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb, |
| 638 | void *data, int len) |
| 639 | { |
| 640 | int result; |
| 641 | |
| 642 | result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len, |
| 643 | iwl_testmode_gnl_msg_policy); |
| 644 | if (result) { |
| 645 | IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result); |
| 646 | return result; |
| 647 | } |
| 648 | |
| 649 | /* IWL_TM_ATTR_COMMAND is absolutely mandatory */ |
| 650 | if (!tb[IWL_TM_ATTR_COMMAND]) { |
| 651 | IWL_ERR(tst->trans, "Missing testmode command type\n"); |
| 652 | return -ENOMSG; |
| 653 | } |
| 654 | return 0; |
| 655 | } |
Johannes Berg | 48e2934 | 2013-03-01 00:13:33 +0100 | [diff] [blame] | 656 | IWL_EXPORT_SYMBOL(iwl_test_parse); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 657 | |
| 658 | /* |
| 659 | * Handle test commands. |
| 660 | * Returns 1 for unknown commands (not handled by the test object); negative |
| 661 | * value in case of error. |
| 662 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 663 | int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb) |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 664 | { |
| 665 | int result; |
| 666 | |
| 667 | switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { |
| 668 | case IWL_TM_CMD_APP2DEV_UCODE: |
| 669 | IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n"); |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 670 | result = iwl_test_fw_cmd(tst, tb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 671 | break; |
| 672 | |
| 673 | case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: |
| 674 | case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: |
| 675 | case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: |
| 676 | IWL_DEBUG_INFO(tst->trans, "test cmd to register\n"); |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 677 | result = iwl_test_reg(tst, tb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 678 | break; |
| 679 | |
| 680 | case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: |
| 681 | IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n"); |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 682 | result = iwl_test_trace_begin(tst, tb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 683 | break; |
| 684 | |
| 685 | case IWL_TM_CMD_APP2DEV_END_TRACE: |
| 686 | iwl_test_trace_stop(tst); |
| 687 | result = 0; |
| 688 | break; |
| 689 | |
| 690 | case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ: |
| 691 | case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE: |
| 692 | IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n"); |
| 693 | result = iwl_test_indirect_mem(tst, tb); |
| 694 | break; |
| 695 | |
| 696 | case IWL_TM_CMD_APP2DEV_NOTIFICATIONS: |
| 697 | IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n"); |
| 698 | result = iwl_test_notifications(tst, tb); |
| 699 | break; |
| 700 | |
| 701 | case IWL_TM_CMD_APP2DEV_GET_FW_VERSION: |
| 702 | IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n"); |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 703 | result = iwl_test_get_fw_ver(tst, tb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 704 | break; |
| 705 | |
| 706 | case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: |
| 707 | IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n"); |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 708 | result = iwl_test_get_dev_id(tst, tb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 709 | break; |
| 710 | |
| 711 | default: |
| 712 | IWL_DEBUG_INFO(tst->trans, "Unknown test command\n"); |
| 713 | result = 1; |
| 714 | break; |
| 715 | } |
| 716 | return result; |
| 717 | } |
Johannes Berg | 48e2934 | 2013-03-01 00:13:33 +0100 | [diff] [blame] | 718 | IWL_EXPORT_SYMBOL(iwl_test_handle_cmd); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 719 | |
| 720 | static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb, |
| 721 | struct netlink_callback *cb) |
| 722 | { |
| 723 | int idx, length; |
| 724 | |
| 725 | if (!tst->trace.enabled || !tst->trace.trace_addr) |
| 726 | return -EFAULT; |
| 727 | |
| 728 | idx = cb->args[4]; |
| 729 | if (idx >= tst->trace.nchunks) |
| 730 | return -ENOENT; |
| 731 | |
| 732 | length = DUMP_CHUNK_SIZE; |
| 733 | if (((idx + 1) == tst->trace.nchunks) && |
| 734 | (tst->trace.size % DUMP_CHUNK_SIZE)) |
| 735 | length = tst->trace.size % |
| 736 | DUMP_CHUNK_SIZE; |
| 737 | |
| 738 | if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length, |
| 739 | tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx))) |
| 740 | goto nla_put_failure; |
| 741 | |
| 742 | cb->args[4] = ++idx; |
| 743 | return 0; |
| 744 | |
| 745 | nla_put_failure: |
| 746 | return -ENOBUFS; |
| 747 | } |
| 748 | |
| 749 | static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb, |
| 750 | struct netlink_callback *cb) |
| 751 | { |
| 752 | int idx, length; |
| 753 | |
| 754 | if (!tst->mem.in_read) |
| 755 | return -EFAULT; |
| 756 | |
| 757 | idx = cb->args[4]; |
| 758 | if (idx >= tst->mem.nchunks) { |
| 759 | iwl_test_mem_stop(tst); |
| 760 | return -ENOENT; |
| 761 | } |
| 762 | |
| 763 | length = DUMP_CHUNK_SIZE; |
| 764 | if (((idx + 1) == tst->mem.nchunks) && |
| 765 | (tst->mem.size % DUMP_CHUNK_SIZE)) |
| 766 | length = tst->mem.size % DUMP_CHUNK_SIZE; |
| 767 | |
| 768 | if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length, |
| 769 | tst->mem.addr + (DUMP_CHUNK_SIZE * idx))) |
| 770 | goto nla_put_failure; |
| 771 | |
| 772 | cb->args[4] = ++idx; |
| 773 | return 0; |
| 774 | |
| 775 | nla_put_failure: |
| 776 | return -ENOBUFS; |
| 777 | } |
| 778 | |
| 779 | /* |
| 780 | * Handle dump commands. |
| 781 | * Returns 1 for unknown commands (not handled by the test object); negative |
| 782 | * value in case of error. |
| 783 | */ |
| 784 | int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb, |
| 785 | struct netlink_callback *cb) |
| 786 | { |
| 787 | int result; |
| 788 | |
| 789 | switch (cmd) { |
| 790 | case IWL_TM_CMD_APP2DEV_READ_TRACE: |
| 791 | IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n"); |
| 792 | result = iwl_test_trace_dump(tst, skb, cb); |
| 793 | break; |
| 794 | |
| 795 | case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP: |
| 796 | IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n"); |
| 797 | result = iwl_test_buffer_dump(tst, skb, cb); |
| 798 | break; |
| 799 | |
| 800 | default: |
| 801 | result = 1; |
| 802 | break; |
| 803 | } |
| 804 | return result; |
| 805 | } |
Johannes Berg | 48e2934 | 2013-03-01 00:13:33 +0100 | [diff] [blame] | 806 | IWL_EXPORT_SYMBOL(iwl_test_dump); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 807 | |
| 808 | /* |
| 809 | * Multicast a spontaneous messages from the device to the user space. |
| 810 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 811 | static void iwl_test_send_rx(struct iwl_test *tst, |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 812 | struct iwl_rx_cmd_buffer *rxb) |
| 813 | { |
| 814 | struct sk_buff *skb; |
| 815 | struct iwl_rx_packet *data; |
| 816 | int length; |
| 817 | |
| 818 | data = rxb_addr(rxb); |
| 819 | length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; |
| 820 | |
| 821 | /* the length doesn't include len_n_flags field, so add it manually */ |
| 822 | length += sizeof(__le32); |
| 823 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 824 | skb = iwl_test_alloc_event(tst, length + 20); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 825 | if (skb == NULL) { |
| 826 | IWL_ERR(tst->trans, "Out of memory for message to user\n"); |
| 827 | return; |
| 828 | } |
| 829 | |
| 830 | if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, |
| 831 | IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) || |
| 832 | nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data)) |
| 833 | goto nla_put_failure; |
| 834 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 835 | iwl_test_event(tst, skb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 836 | return; |
| 837 | |
| 838 | nla_put_failure: |
| 839 | kfree_skb(skb); |
| 840 | IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n"); |
| 841 | } |
| 842 | |
| 843 | /* |
| 844 | * Called whenever a Rx frames is recevied from the device. If notifications to |
| 845 | * the user space are requested, sends the frames to the user. |
| 846 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 847 | void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb) |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 848 | { |
| 849 | if (tst->notify) |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 850 | iwl_test_send_rx(tst, rxb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 851 | } |
Johannes Berg | 48e2934 | 2013-03-01 00:13:33 +0100 | [diff] [blame] | 852 | IWL_EXPORT_SYMBOL(iwl_test_rx); |