Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | * |
| 3 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 4 | * redistributing this file, you may do so under either license. |
| 5 | * |
| 6 | * GPL LICENSE SUMMARY |
| 7 | * |
Johannes Berg | 128e63e | 2013-01-21 21:39:26 +0100 | [diff] [blame] | 8 | * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved. |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of version 2 of the GNU General Public License as |
| 12 | * published by the Free Software Foundation. |
| 13 | * |
| 14 | * This program is distributed in the hope that it will be useful, but |
| 15 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 17 | * General Public License for more details. |
| 18 | * |
| 19 | * You should have received a copy of the GNU General Public License |
| 20 | * along with this program; if not, write to the Free Software |
| 21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
| 22 | * USA |
| 23 | * |
| 24 | * The full GNU General Public License is included in this distribution |
Emmanuel Grumbach | 410dc5a | 2013-02-18 09:22:28 +0200 | [diff] [blame] | 25 | * in the file called COPYING. |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 26 | * |
| 27 | * Contact Information: |
| 28 | * Intel Linux Wireless <ilw@linux.intel.com> |
| 29 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 30 | * |
| 31 | * BSD LICENSE |
| 32 | * |
Johannes Berg | 128e63e | 2013-01-21 21:39:26 +0100 | [diff] [blame] | 33 | * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved. |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 34 | * All rights reserved. |
| 35 | * |
| 36 | * Redistribution and use in source and binary forms, with or without |
| 37 | * modification, are permitted provided that the following conditions |
| 38 | * are met: |
| 39 | * |
| 40 | * * Redistributions of source code must retain the above copyright |
| 41 | * notice, this list of conditions and the following disclaimer. |
| 42 | * * Redistributions in binary form must reproduce the above copyright |
| 43 | * notice, this list of conditions and the following disclaimer in |
| 44 | * the documentation and/or other materials provided with the |
| 45 | * distribution. |
| 46 | * * Neither the name Intel Corporation nor the names of its |
| 47 | * contributors may be used to endorse or promote products derived |
| 48 | * from this software without specific prior written permission. |
| 49 | * |
| 50 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 51 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 52 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 53 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 54 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 55 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 56 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 60 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 61 | * |
| 62 | *****************************************************************************/ |
| 63 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 64 | #include <linux/export.h> |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 65 | #include <net/netlink.h> |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 66 | |
Johannes Berg | 48e2934 | 2013-03-01 00:13:33 +0100 | [diff] [blame] | 67 | #include "iwl-drv.h" |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 68 | #include "iwl-io.h" |
| 69 | #include "iwl-fh.h" |
| 70 | #include "iwl-prph.h" |
| 71 | #include "iwl-trans.h" |
| 72 | #include "iwl-test.h" |
| 73 | #include "iwl-csr.h" |
| 74 | #include "iwl-testmode.h" |
| 75 | |
| 76 | /* |
| 77 | * Periphery registers absolute lower bound. This is used in order to |
| 78 | * differentiate registery access through HBUS_TARG_PRPH_* and |
| 79 | * HBUS_TARG_MEM_* accesses. |
| 80 | */ |
| 81 | #define IWL_ABS_PRPH_START (0xA00000) |
| 82 | |
| 83 | /* |
| 84 | * The TLVs used in the gnl message policy between the kernel module and |
| 85 | * user space application. iwl_testmode_gnl_msg_policy is to be carried |
| 86 | * through the NL80211_CMD_TESTMODE channel regulated by nl80211. |
| 87 | * See iwl-testmode.h |
| 88 | */ |
| 89 | static |
| 90 | struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = { |
| 91 | [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, }, |
| 92 | |
| 93 | [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, }, |
| 94 | [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, }, |
| 95 | |
| 96 | [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, }, |
| 97 | [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, }, |
| 98 | [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, }, |
| 99 | |
| 100 | [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, }, |
| 101 | [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, }, |
| 102 | |
| 103 | [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, }, |
| 104 | |
| 105 | [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, }, |
| 106 | [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, }, |
| 107 | [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, }, |
| 108 | |
| 109 | [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, }, |
| 110 | |
| 111 | [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, }, |
| 112 | |
| 113 | [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, }, |
| 114 | [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, }, |
| 115 | [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, }, |
| 116 | |
| 117 | [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, }, |
| 118 | [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, }, |
| 119 | [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, }, |
| 120 | [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, }, |
| 121 | [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, }, |
| 122 | |
| 123 | [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, }, |
| 124 | }; |
| 125 | |
| 126 | static inline void iwl_test_trace_clear(struct iwl_test *tst) |
| 127 | { |
| 128 | memset(&tst->trace, 0, sizeof(struct iwl_test_trace)); |
| 129 | } |
| 130 | |
| 131 | static void iwl_test_trace_stop(struct iwl_test *tst) |
| 132 | { |
| 133 | if (!tst->trace.enabled) |
| 134 | return; |
| 135 | |
| 136 | if (tst->trace.cpu_addr && tst->trace.dma_addr) |
| 137 | dma_free_coherent(tst->trans->dev, |
| 138 | tst->trace.tsize, |
| 139 | tst->trace.cpu_addr, |
| 140 | tst->trace.dma_addr); |
| 141 | |
| 142 | iwl_test_trace_clear(tst); |
| 143 | } |
| 144 | |
| 145 | static inline void iwl_test_mem_clear(struct iwl_test *tst) |
| 146 | { |
| 147 | memset(&tst->mem, 0, sizeof(struct iwl_test_mem)); |
| 148 | } |
| 149 | |
| 150 | static inline void iwl_test_mem_stop(struct iwl_test *tst) |
| 151 | { |
| 152 | if (!tst->mem.in_read) |
| 153 | return; |
| 154 | |
| 155 | iwl_test_mem_clear(tst); |
| 156 | } |
| 157 | |
| 158 | /* |
| 159 | * Initializes the test object |
| 160 | * During the lifetime of the test object it is assumed that the transport is |
| 161 | * started. The test object should be stopped before the transport is stopped. |
| 162 | */ |
| 163 | void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans, |
| 164 | struct iwl_test_ops *ops) |
| 165 | { |
| 166 | tst->trans = trans; |
| 167 | tst->ops = ops; |
| 168 | |
| 169 | iwl_test_trace_clear(tst); |
| 170 | iwl_test_mem_clear(tst); |
| 171 | } |
| 172 | EXPORT_SYMBOL_GPL(iwl_test_init); |
| 173 | |
| 174 | /* |
| 175 | * Stop the test object |
| 176 | */ |
| 177 | void iwl_test_free(struct iwl_test *tst) |
| 178 | { |
| 179 | iwl_test_mem_stop(tst); |
| 180 | iwl_test_trace_stop(tst); |
| 181 | } |
| 182 | EXPORT_SYMBOL_GPL(iwl_test_free); |
| 183 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 184 | static inline int iwl_test_send_cmd(struct iwl_test *tst, |
| 185 | struct iwl_host_cmd *cmd) |
| 186 | { |
| 187 | return tst->ops->send_cmd(tst->trans->op_mode, cmd); |
| 188 | } |
| 189 | |
| 190 | static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr) |
| 191 | { |
| 192 | return tst->ops->valid_hw_addr(addr); |
| 193 | } |
| 194 | |
| 195 | static inline u32 iwl_test_fw_ver(struct iwl_test *tst) |
| 196 | { |
| 197 | return tst->ops->get_fw_ver(tst->trans->op_mode); |
| 198 | } |
| 199 | |
| 200 | static inline struct sk_buff* |
| 201 | iwl_test_alloc_reply(struct iwl_test *tst, int len) |
| 202 | { |
| 203 | return tst->ops->alloc_reply(tst->trans->op_mode, len); |
| 204 | } |
| 205 | |
| 206 | static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb) |
| 207 | { |
| 208 | return tst->ops->reply(tst->trans->op_mode, skb); |
| 209 | } |
| 210 | |
| 211 | static inline struct sk_buff* |
| 212 | iwl_test_alloc_event(struct iwl_test *tst, int len) |
| 213 | { |
| 214 | return tst->ops->alloc_event(tst->trans->op_mode, len); |
| 215 | } |
| 216 | |
| 217 | static inline void |
| 218 | iwl_test_event(struct iwl_test *tst, struct sk_buff *skb) |
| 219 | { |
| 220 | return tst->ops->event(tst->trans->op_mode, skb); |
| 221 | } |
| 222 | |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 223 | /* |
| 224 | * This function handles the user application commands to the fw. The fw |
| 225 | * commands are sent in a synchronuous manner. In case that the user requested |
| 226 | * to get commands response, it is send to the user. |
| 227 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 228 | static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb) |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 229 | { |
| 230 | struct iwl_host_cmd cmd; |
| 231 | struct iwl_rx_packet *pkt; |
| 232 | struct sk_buff *skb; |
| 233 | void *reply_buf; |
| 234 | u32 reply_len; |
| 235 | int ret; |
| 236 | bool cmd_want_skb; |
| 237 | |
| 238 | memset(&cmd, 0, sizeof(struct iwl_host_cmd)); |
| 239 | |
| 240 | if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] || |
| 241 | !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) { |
| 242 | IWL_ERR(tst->trans, "Missing fw command mandatory fields\n"); |
| 243 | return -ENOMSG; |
| 244 | } |
| 245 | |
| 246 | cmd.flags = CMD_ON_DEMAND | CMD_SYNC; |
| 247 | cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]); |
| 248 | if (cmd_want_skb) |
| 249 | cmd.flags |= CMD_WANT_SKB; |
| 250 | |
| 251 | cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]); |
| 252 | cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); |
| 253 | cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); |
| 254 | cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; |
| 255 | IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n", |
| 256 | cmd.id, cmd.flags, cmd.len[0]); |
| 257 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 258 | ret = iwl_test_send_cmd(tst, &cmd); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 259 | if (ret) { |
| 260 | IWL_ERR(tst->trans, "Failed to send hcmd\n"); |
| 261 | return ret; |
| 262 | } |
| 263 | if (!cmd_want_skb) |
| 264 | return ret; |
| 265 | |
| 266 | /* Handling return of SKB to the user */ |
| 267 | pkt = cmd.resp_pkt; |
| 268 | if (!pkt) { |
| 269 | IWL_ERR(tst->trans, "HCMD received a null response packet\n"); |
| 270 | return ret; |
| 271 | } |
| 272 | |
| 273 | reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 274 | skb = iwl_test_alloc_reply(tst, reply_len + 20); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 275 | reply_buf = kmalloc(reply_len, GFP_KERNEL); |
| 276 | if (!skb || !reply_buf) { |
| 277 | kfree_skb(skb); |
| 278 | kfree(reply_buf); |
| 279 | return -ENOMEM; |
| 280 | } |
| 281 | |
| 282 | /* The reply is in a page, that we cannot send to user space. */ |
| 283 | memcpy(reply_buf, &(pkt->hdr), reply_len); |
| 284 | iwl_free_resp(&cmd); |
| 285 | |
| 286 | if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, |
| 287 | IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) || |
| 288 | nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf)) |
| 289 | goto nla_put_failure; |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 290 | return iwl_test_reply(tst, skb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 291 | |
| 292 | nla_put_failure: |
| 293 | IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n"); |
| 294 | kfree(reply_buf); |
| 295 | kfree_skb(skb); |
| 296 | return -ENOMSG; |
| 297 | } |
| 298 | |
| 299 | /* |
| 300 | * Handles the user application commands for register access. |
| 301 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 302 | static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb) |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 303 | { |
| 304 | u32 ofs, val32, cmd; |
| 305 | u8 val8; |
| 306 | struct sk_buff *skb; |
| 307 | int status = 0; |
| 308 | struct iwl_trans *trans = tst->trans; |
| 309 | |
| 310 | if (!tb[IWL_TM_ATTR_REG_OFFSET]) { |
| 311 | IWL_ERR(trans, "Missing reg offset\n"); |
| 312 | return -ENOMSG; |
| 313 | } |
| 314 | |
| 315 | ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]); |
| 316 | IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs); |
| 317 | |
| 318 | cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); |
| 319 | |
| 320 | /* |
| 321 | * Allow access only to FH/CSR/HBUS in direct mode. |
| 322 | * Since we don't have the upper bounds for the CSR and HBUS segments, |
| 323 | * we will use only the upper bound of FH for sanity check. |
| 324 | */ |
| 325 | if (ofs >= FH_MEM_UPPER_BOUND) { |
| 326 | IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n", |
| 327 | FH_MEM_UPPER_BOUND); |
| 328 | return -EINVAL; |
| 329 | } |
| 330 | |
| 331 | switch (cmd) { |
| 332 | case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: |
| 333 | val32 = iwl_read_direct32(tst->trans, ofs); |
| 334 | IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32); |
| 335 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 336 | skb = iwl_test_alloc_reply(tst, 20); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 337 | if (!skb) { |
| 338 | IWL_ERR(trans, "Memory allocation fail\n"); |
| 339 | return -ENOMEM; |
| 340 | } |
| 341 | if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32)) |
| 342 | goto nla_put_failure; |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 343 | status = iwl_test_reply(tst, skb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 344 | if (status < 0) |
| 345 | IWL_ERR(trans, "Error sending msg : %d\n", status); |
| 346 | break; |
| 347 | |
| 348 | case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: |
| 349 | if (!tb[IWL_TM_ATTR_REG_VALUE32]) { |
| 350 | IWL_ERR(trans, "Missing value to write\n"); |
| 351 | return -ENOMSG; |
| 352 | } else { |
| 353 | val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); |
| 354 | IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32); |
| 355 | iwl_write_direct32(tst->trans, ofs, val32); |
| 356 | } |
| 357 | break; |
| 358 | |
| 359 | case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: |
| 360 | if (!tb[IWL_TM_ATTR_REG_VALUE8]) { |
| 361 | IWL_ERR(trans, "Missing value to write\n"); |
| 362 | return -ENOMSG; |
| 363 | } else { |
| 364 | val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]); |
| 365 | IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8); |
| 366 | iwl_write8(tst->trans, ofs, val8); |
| 367 | } |
| 368 | break; |
| 369 | |
| 370 | default: |
| 371 | IWL_ERR(trans, "Unknown test register cmd ID\n"); |
| 372 | return -ENOMSG; |
| 373 | } |
| 374 | |
| 375 | return status; |
| 376 | |
| 377 | nla_put_failure: |
| 378 | kfree_skb(skb); |
| 379 | return -EMSGSIZE; |
| 380 | } |
| 381 | |
| 382 | /* |
| 383 | * Handles the request to start FW tracing. Allocates of the trace buffer |
| 384 | * and sends a reply to user space with the address of the allocated buffer. |
| 385 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 386 | static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb) |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 387 | { |
| 388 | struct sk_buff *skb; |
| 389 | int status = 0; |
| 390 | |
| 391 | if (tst->trace.enabled) |
| 392 | return -EBUSY; |
| 393 | |
| 394 | if (!tb[IWL_TM_ATTR_TRACE_SIZE]) |
| 395 | tst->trace.size = TRACE_BUFF_SIZE_DEF; |
| 396 | else |
| 397 | tst->trace.size = |
| 398 | nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]); |
| 399 | |
| 400 | if (!tst->trace.size) |
| 401 | return -EINVAL; |
| 402 | |
| 403 | if (tst->trace.size < TRACE_BUFF_SIZE_MIN || |
| 404 | tst->trace.size > TRACE_BUFF_SIZE_MAX) |
| 405 | return -EINVAL; |
| 406 | |
| 407 | tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD; |
| 408 | tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev, |
| 409 | tst->trace.tsize, |
| 410 | &tst->trace.dma_addr, |
| 411 | GFP_KERNEL); |
| 412 | if (!tst->trace.cpu_addr) |
| 413 | return -ENOMEM; |
| 414 | |
| 415 | tst->trace.enabled = true; |
| 416 | tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100); |
| 417 | |
| 418 | memset(tst->trace.trace_addr, 0x03B, tst->trace.size); |
| 419 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 420 | skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 421 | if (!skb) { |
| 422 | IWL_ERR(tst->trans, "Memory allocation fail\n"); |
| 423 | iwl_test_trace_stop(tst); |
| 424 | return -ENOMEM; |
| 425 | } |
| 426 | |
| 427 | if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR, |
| 428 | sizeof(tst->trace.dma_addr), |
| 429 | (u64 *)&tst->trace.dma_addr)) |
| 430 | goto nla_put_failure; |
| 431 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 432 | status = iwl_test_reply(tst, skb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 433 | if (status < 0) |
| 434 | IWL_ERR(tst->trans, "Error sending msg : %d\n", status); |
| 435 | |
| 436 | tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size, |
| 437 | DUMP_CHUNK_SIZE); |
| 438 | |
| 439 | return status; |
| 440 | |
| 441 | nla_put_failure: |
| 442 | kfree_skb(skb); |
| 443 | if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) == |
| 444 | IWL_TM_CMD_APP2DEV_BEGIN_TRACE) |
| 445 | iwl_test_trace_stop(tst); |
| 446 | return -EMSGSIZE; |
| 447 | } |
| 448 | |
| 449 | /* |
| 450 | * Handles indirect read from the periphery or the SRAM. The read is performed |
| 451 | * to a temporary buffer. The user space application should later issue a dump |
| 452 | */ |
| 453 | static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size) |
| 454 | { |
| 455 | struct iwl_trans *trans = tst->trans; |
| 456 | unsigned long flags; |
| 457 | int i; |
| 458 | |
| 459 | if (size & 0x3) |
| 460 | return -EINVAL; |
| 461 | |
| 462 | tst->mem.size = size; |
| 463 | tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL); |
| 464 | if (tst->mem.addr == NULL) |
| 465 | return -ENOMEM; |
| 466 | |
| 467 | /* Hard-coded periphery absolute address */ |
| 468 | if (IWL_ABS_PRPH_START <= addr && |
| 469 | addr < IWL_ABS_PRPH_START + PRPH_END) { |
Lilach Edelstein | e56b04e | 2013-01-16 11:34:49 +0200 | [diff] [blame] | 470 | if (!iwl_trans_grab_nic_access(trans, false, &flags)) { |
Emmanuel Grumbach | abae238 | 2012-12-31 13:46:42 +0200 | [diff] [blame] | 471 | return -EIO; |
| 472 | } |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 473 | iwl_write32(trans, HBUS_TARG_PRPH_RADDR, |
| 474 | addr | (3 << 24)); |
| 475 | for (i = 0; i < size; i += 4) |
| 476 | *(u32 *)(tst->mem.addr + i) = |
| 477 | iwl_read32(trans, HBUS_TARG_PRPH_RDAT); |
Lilach Edelstein | e56b04e | 2013-01-16 11:34:49 +0200 | [diff] [blame] | 478 | iwl_trans_release_nic_access(trans, &flags); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 479 | } else { /* target memory (SRAM) */ |
Emmanuel Grumbach | 4fd442d | 2012-12-24 14:27:11 +0200 | [diff] [blame] | 480 | iwl_trans_read_mem(trans, addr, tst->mem.addr, |
| 481 | tst->mem.size / 4); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 482 | } |
| 483 | |
| 484 | tst->mem.nchunks = |
| 485 | DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE); |
| 486 | tst->mem.in_read = true; |
| 487 | return 0; |
| 488 | |
| 489 | } |
| 490 | |
| 491 | /* |
| 492 | * Handles indirect write to the periphery or SRAM. The is performed to a |
| 493 | * temporary buffer. |
| 494 | */ |
| 495 | static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr, |
| 496 | u32 size, unsigned char *buf) |
| 497 | { |
| 498 | struct iwl_trans *trans = tst->trans; |
| 499 | u32 val, i; |
| 500 | unsigned long flags; |
| 501 | |
| 502 | if (IWL_ABS_PRPH_START <= addr && |
| 503 | addr < IWL_ABS_PRPH_START + PRPH_END) { |
Emmanuel Grumbach | abae238 | 2012-12-31 13:46:42 +0200 | [diff] [blame] | 504 | /* Periphery writes can be 1-3 bytes long, or DWORDs */ |
| 505 | if (size < 4) { |
| 506 | memcpy(&val, buf, size); |
Lilach Edelstein | e56b04e | 2013-01-16 11:34:49 +0200 | [diff] [blame] | 507 | if (!iwl_trans_grab_nic_access(trans, false, &flags)) |
Emmanuel Grumbach | abae238 | 2012-12-31 13:46:42 +0200 | [diff] [blame] | 508 | return -EIO; |
Emmanuel Grumbach | abae238 | 2012-12-31 13:46:42 +0200 | [diff] [blame] | 509 | iwl_write32(trans, HBUS_TARG_PRPH_WADDR, |
| 510 | (addr & 0x0000FFFF) | |
| 511 | ((size - 1) << 24)); |
| 512 | iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val); |
Lilach Edelstein | e56b04e | 2013-01-16 11:34:49 +0200 | [diff] [blame] | 513 | iwl_trans_release_nic_access(trans, &flags); |
Emmanuel Grumbach | abae238 | 2012-12-31 13:46:42 +0200 | [diff] [blame] | 514 | } else { |
| 515 | if (size % 4) |
| 516 | return -EINVAL; |
| 517 | for (i = 0; i < size; i += 4) |
| 518 | iwl_write_prph(trans, addr+i, |
| 519 | *(u32 *)(buf+i)); |
| 520 | } |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 521 | } else if (iwl_test_valid_hw_addr(tst, addr)) { |
Emmanuel Grumbach | 4fd442d | 2012-12-24 14:27:11 +0200 | [diff] [blame] | 522 | iwl_trans_write_mem(trans, addr, buf, size / 4); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 523 | } else { |
| 524 | return -EINVAL; |
| 525 | } |
| 526 | return 0; |
| 527 | } |
| 528 | |
| 529 | /* |
| 530 | * Handles the user application commands for indirect read/write |
| 531 | * to/from the periphery or the SRAM. |
| 532 | */ |
| 533 | static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb) |
| 534 | { |
| 535 | u32 addr, size, cmd; |
| 536 | unsigned char *buf; |
| 537 | |
| 538 | /* Both read and write should be blocked, for atomicity */ |
| 539 | if (tst->mem.in_read) |
| 540 | return -EBUSY; |
| 541 | |
| 542 | cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); |
| 543 | if (!tb[IWL_TM_ATTR_MEM_ADDR]) { |
| 544 | IWL_ERR(tst->trans, "Error finding memory offset address\n"); |
| 545 | return -ENOMSG; |
| 546 | } |
| 547 | addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]); |
| 548 | if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) { |
| 549 | IWL_ERR(tst->trans, "Error finding size for memory reading\n"); |
| 550 | return -ENOMSG; |
| 551 | } |
| 552 | size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]); |
| 553 | |
| 554 | if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) { |
| 555 | return iwl_test_indirect_read(tst, addr, size); |
| 556 | } else { |
| 557 | if (!tb[IWL_TM_ATTR_BUFFER_DUMP]) |
| 558 | return -EINVAL; |
| 559 | buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]); |
| 560 | return iwl_test_indirect_write(tst, addr, size, buf); |
| 561 | } |
| 562 | } |
| 563 | |
| 564 | /* |
| 565 | * Enable notifications to user space |
| 566 | */ |
| 567 | static int iwl_test_notifications(struct iwl_test *tst, |
| 568 | struct nlattr **tb) |
| 569 | { |
| 570 | tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]); |
| 571 | return 0; |
| 572 | } |
| 573 | |
| 574 | /* |
| 575 | * Handles the request to get the device id |
| 576 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 577 | static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb) |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 578 | { |
| 579 | u32 devid = tst->trans->hw_id; |
| 580 | struct sk_buff *skb; |
| 581 | int status; |
| 582 | |
| 583 | IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid); |
| 584 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 585 | skb = iwl_test_alloc_reply(tst, 20); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 586 | if (!skb) { |
| 587 | IWL_ERR(tst->trans, "Memory allocation fail\n"); |
| 588 | return -ENOMEM; |
| 589 | } |
| 590 | |
| 591 | if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid)) |
| 592 | goto nla_put_failure; |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 593 | status = iwl_test_reply(tst, skb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 594 | if (status < 0) |
| 595 | IWL_ERR(tst->trans, "Error sending msg : %d\n", status); |
| 596 | |
| 597 | return 0; |
| 598 | |
| 599 | nla_put_failure: |
| 600 | kfree_skb(skb); |
| 601 | return -EMSGSIZE; |
| 602 | } |
| 603 | |
| 604 | /* |
| 605 | * Handles the request to get the FW version |
| 606 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 607 | static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb) |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 608 | { |
| 609 | struct sk_buff *skb; |
| 610 | int status; |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 611 | u32 ver = iwl_test_fw_ver(tst); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 612 | |
| 613 | IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver); |
| 614 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 615 | skb = iwl_test_alloc_reply(tst, 20); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 616 | if (!skb) { |
| 617 | IWL_ERR(tst->trans, "Memory allocation fail\n"); |
| 618 | return -ENOMEM; |
| 619 | } |
| 620 | |
| 621 | if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver)) |
| 622 | goto nla_put_failure; |
| 623 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 624 | status = iwl_test_reply(tst, skb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 625 | if (status < 0) |
| 626 | IWL_ERR(tst->trans, "Error sending msg : %d\n", status); |
| 627 | |
| 628 | return 0; |
| 629 | |
| 630 | nla_put_failure: |
| 631 | kfree_skb(skb); |
| 632 | return -EMSGSIZE; |
| 633 | } |
| 634 | |
| 635 | /* |
| 636 | * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists |
| 637 | */ |
| 638 | int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb, |
| 639 | void *data, int len) |
| 640 | { |
| 641 | int result; |
| 642 | |
| 643 | result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len, |
| 644 | iwl_testmode_gnl_msg_policy); |
| 645 | if (result) { |
| 646 | IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result); |
| 647 | return result; |
| 648 | } |
| 649 | |
| 650 | /* IWL_TM_ATTR_COMMAND is absolutely mandatory */ |
| 651 | if (!tb[IWL_TM_ATTR_COMMAND]) { |
| 652 | IWL_ERR(tst->trans, "Missing testmode command type\n"); |
| 653 | return -ENOMSG; |
| 654 | } |
| 655 | return 0; |
| 656 | } |
Johannes Berg | 48e2934 | 2013-03-01 00:13:33 +0100 | [diff] [blame] | 657 | IWL_EXPORT_SYMBOL(iwl_test_parse); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 658 | |
| 659 | /* |
| 660 | * Handle test commands. |
| 661 | * Returns 1 for unknown commands (not handled by the test object); negative |
| 662 | * value in case of error. |
| 663 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 664 | int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb) |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 665 | { |
| 666 | int result; |
| 667 | |
| 668 | switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { |
| 669 | case IWL_TM_CMD_APP2DEV_UCODE: |
| 670 | IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n"); |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 671 | result = iwl_test_fw_cmd(tst, tb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 672 | break; |
| 673 | |
| 674 | case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: |
| 675 | case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: |
| 676 | case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: |
| 677 | IWL_DEBUG_INFO(tst->trans, "test cmd to register\n"); |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 678 | result = iwl_test_reg(tst, tb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 679 | break; |
| 680 | |
| 681 | case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: |
| 682 | IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n"); |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 683 | result = iwl_test_trace_begin(tst, tb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 684 | break; |
| 685 | |
| 686 | case IWL_TM_CMD_APP2DEV_END_TRACE: |
| 687 | iwl_test_trace_stop(tst); |
| 688 | result = 0; |
| 689 | break; |
| 690 | |
| 691 | case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ: |
| 692 | case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE: |
| 693 | IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n"); |
| 694 | result = iwl_test_indirect_mem(tst, tb); |
| 695 | break; |
| 696 | |
| 697 | case IWL_TM_CMD_APP2DEV_NOTIFICATIONS: |
| 698 | IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n"); |
| 699 | result = iwl_test_notifications(tst, tb); |
| 700 | break; |
| 701 | |
| 702 | case IWL_TM_CMD_APP2DEV_GET_FW_VERSION: |
| 703 | IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n"); |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 704 | result = iwl_test_get_fw_ver(tst, tb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 705 | break; |
| 706 | |
| 707 | case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: |
| 708 | IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n"); |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 709 | result = iwl_test_get_dev_id(tst, tb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 710 | break; |
| 711 | |
| 712 | default: |
| 713 | IWL_DEBUG_INFO(tst->trans, "Unknown test command\n"); |
| 714 | result = 1; |
| 715 | break; |
| 716 | } |
| 717 | return result; |
| 718 | } |
Johannes Berg | 48e2934 | 2013-03-01 00:13:33 +0100 | [diff] [blame] | 719 | IWL_EXPORT_SYMBOL(iwl_test_handle_cmd); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 720 | |
| 721 | static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb, |
| 722 | struct netlink_callback *cb) |
| 723 | { |
| 724 | int idx, length; |
| 725 | |
| 726 | if (!tst->trace.enabled || !tst->trace.trace_addr) |
| 727 | return -EFAULT; |
| 728 | |
| 729 | idx = cb->args[4]; |
| 730 | if (idx >= tst->trace.nchunks) |
| 731 | return -ENOENT; |
| 732 | |
| 733 | length = DUMP_CHUNK_SIZE; |
| 734 | if (((idx + 1) == tst->trace.nchunks) && |
| 735 | (tst->trace.size % DUMP_CHUNK_SIZE)) |
| 736 | length = tst->trace.size % |
| 737 | DUMP_CHUNK_SIZE; |
| 738 | |
| 739 | if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length, |
| 740 | tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx))) |
| 741 | goto nla_put_failure; |
| 742 | |
| 743 | cb->args[4] = ++idx; |
| 744 | return 0; |
| 745 | |
| 746 | nla_put_failure: |
| 747 | return -ENOBUFS; |
| 748 | } |
| 749 | |
| 750 | static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb, |
| 751 | struct netlink_callback *cb) |
| 752 | { |
| 753 | int idx, length; |
| 754 | |
| 755 | if (!tst->mem.in_read) |
| 756 | return -EFAULT; |
| 757 | |
| 758 | idx = cb->args[4]; |
| 759 | if (idx >= tst->mem.nchunks) { |
| 760 | iwl_test_mem_stop(tst); |
| 761 | return -ENOENT; |
| 762 | } |
| 763 | |
| 764 | length = DUMP_CHUNK_SIZE; |
| 765 | if (((idx + 1) == tst->mem.nchunks) && |
| 766 | (tst->mem.size % DUMP_CHUNK_SIZE)) |
| 767 | length = tst->mem.size % DUMP_CHUNK_SIZE; |
| 768 | |
| 769 | if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length, |
| 770 | tst->mem.addr + (DUMP_CHUNK_SIZE * idx))) |
| 771 | goto nla_put_failure; |
| 772 | |
| 773 | cb->args[4] = ++idx; |
| 774 | return 0; |
| 775 | |
| 776 | nla_put_failure: |
| 777 | return -ENOBUFS; |
| 778 | } |
| 779 | |
| 780 | /* |
| 781 | * Handle dump commands. |
| 782 | * Returns 1 for unknown commands (not handled by the test object); negative |
| 783 | * value in case of error. |
| 784 | */ |
| 785 | int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb, |
| 786 | struct netlink_callback *cb) |
| 787 | { |
| 788 | int result; |
| 789 | |
| 790 | switch (cmd) { |
| 791 | case IWL_TM_CMD_APP2DEV_READ_TRACE: |
| 792 | IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n"); |
| 793 | result = iwl_test_trace_dump(tst, skb, cb); |
| 794 | break; |
| 795 | |
| 796 | case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP: |
| 797 | IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n"); |
| 798 | result = iwl_test_buffer_dump(tst, skb, cb); |
| 799 | break; |
| 800 | |
| 801 | default: |
| 802 | result = 1; |
| 803 | break; |
| 804 | } |
| 805 | return result; |
| 806 | } |
Johannes Berg | 48e2934 | 2013-03-01 00:13:33 +0100 | [diff] [blame] | 807 | IWL_EXPORT_SYMBOL(iwl_test_dump); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 808 | |
| 809 | /* |
| 810 | * Multicast a spontaneous messages from the device to the user space. |
| 811 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 812 | static void iwl_test_send_rx(struct iwl_test *tst, |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 813 | struct iwl_rx_cmd_buffer *rxb) |
| 814 | { |
| 815 | struct sk_buff *skb; |
| 816 | struct iwl_rx_packet *data; |
| 817 | int length; |
| 818 | |
| 819 | data = rxb_addr(rxb); |
| 820 | length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; |
| 821 | |
| 822 | /* the length doesn't include len_n_flags field, so add it manually */ |
| 823 | length += sizeof(__le32); |
| 824 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 825 | skb = iwl_test_alloc_event(tst, length + 20); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 826 | if (skb == NULL) { |
| 827 | IWL_ERR(tst->trans, "Out of memory for message to user\n"); |
| 828 | return; |
| 829 | } |
| 830 | |
| 831 | if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, |
| 832 | IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) || |
| 833 | nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data)) |
| 834 | goto nla_put_failure; |
| 835 | |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 836 | iwl_test_event(tst, skb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 837 | return; |
| 838 | |
| 839 | nla_put_failure: |
| 840 | kfree_skb(skb); |
| 841 | IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n"); |
| 842 | } |
| 843 | |
| 844 | /* |
| 845 | * Called whenever a Rx frames is recevied from the device. If notifications to |
| 846 | * the user space are requested, sends the frames to the user. |
| 847 | */ |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 848 | void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb) |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 849 | { |
| 850 | if (tst->notify) |
Ilan Peer | c76fe6d | 2012-06-04 19:39:30 +0300 | [diff] [blame] | 851 | iwl_test_send_rx(tst, rxb); |
Ilan Peer | 3a6490c | 2012-06-03 13:36:51 +0300 | [diff] [blame] | 852 | } |
Johannes Berg | 48e2934 | 2013-03-01 00:13:33 +0100 | [diff] [blame] | 853 | IWL_EXPORT_SYMBOL(iwl_test_rx); |