blob: f8d8df8f96c379de3a62f9c47db6b9a8a919c79e [file] [log] [blame]
Ilan Peer3a6490c2012-06-03 13:36:51 +03001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
Ilan Peerc76fe6d2012-06-04 19:39:30 +030064#include <linux/export.h>
Ilan Peer3a6490c2012-06-03 13:36:51 +030065#include <net/netlink.h>
Ilan Peerc76fe6d2012-06-04 19:39:30 +030066
Ilan Peer3a6490c2012-06-03 13:36:51 +030067#include "iwl-io.h"
68#include "iwl-fh.h"
69#include "iwl-prph.h"
70#include "iwl-trans.h"
71#include "iwl-test.h"
72#include "iwl-csr.h"
73#include "iwl-testmode.h"
74
75/*
76 * Periphery registers absolute lower bound. This is used in order to
77 * differentiate registery access through HBUS_TARG_PRPH_* and
78 * HBUS_TARG_MEM_* accesses.
79 */
80#define IWL_ABS_PRPH_START (0xA00000)
81
82/*
83 * The TLVs used in the gnl message policy between the kernel module and
84 * user space application. iwl_testmode_gnl_msg_policy is to be carried
85 * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
86 * See iwl-testmode.h
87 */
88static
89struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
90 [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
91
92 [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
93 [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
94
95 [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
96 [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
97 [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
98
99 [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
100 [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
101
102 [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
103
104 [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
105 [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
106 [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
107
108 [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
109
110 [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
111
112 [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
113 [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
114 [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
115
116 [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
117 [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
118 [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
119 [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
120 [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
121
122 [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
123};
124
125static inline void iwl_test_trace_clear(struct iwl_test *tst)
126{
127 memset(&tst->trace, 0, sizeof(struct iwl_test_trace));
128}
129
130static void iwl_test_trace_stop(struct iwl_test *tst)
131{
132 if (!tst->trace.enabled)
133 return;
134
135 if (tst->trace.cpu_addr && tst->trace.dma_addr)
136 dma_free_coherent(tst->trans->dev,
137 tst->trace.tsize,
138 tst->trace.cpu_addr,
139 tst->trace.dma_addr);
140
141 iwl_test_trace_clear(tst);
142}
143
144static inline void iwl_test_mem_clear(struct iwl_test *tst)
145{
146 memset(&tst->mem, 0, sizeof(struct iwl_test_mem));
147}
148
149static inline void iwl_test_mem_stop(struct iwl_test *tst)
150{
151 if (!tst->mem.in_read)
152 return;
153
154 iwl_test_mem_clear(tst);
155}
156
157/*
158 * Initializes the test object
159 * During the lifetime of the test object it is assumed that the transport is
160 * started. The test object should be stopped before the transport is stopped.
161 */
162void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
163 struct iwl_test_ops *ops)
164{
165 tst->trans = trans;
166 tst->ops = ops;
167
168 iwl_test_trace_clear(tst);
169 iwl_test_mem_clear(tst);
170}
171EXPORT_SYMBOL_GPL(iwl_test_init);
172
173/*
174 * Stop the test object
175 */
176void iwl_test_free(struct iwl_test *tst)
177{
178 iwl_test_mem_stop(tst);
179 iwl_test_trace_stop(tst);
180}
181EXPORT_SYMBOL_GPL(iwl_test_free);
182
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300183static inline int iwl_test_send_cmd(struct iwl_test *tst,
184 struct iwl_host_cmd *cmd)
185{
186 return tst->ops->send_cmd(tst->trans->op_mode, cmd);
187}
188
189static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr)
190{
191 return tst->ops->valid_hw_addr(addr);
192}
193
194static inline u32 iwl_test_fw_ver(struct iwl_test *tst)
195{
196 return tst->ops->get_fw_ver(tst->trans->op_mode);
197}
198
199static inline struct sk_buff*
200iwl_test_alloc_reply(struct iwl_test *tst, int len)
201{
202 return tst->ops->alloc_reply(tst->trans->op_mode, len);
203}
204
205static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb)
206{
207 return tst->ops->reply(tst->trans->op_mode, skb);
208}
209
210static inline struct sk_buff*
211iwl_test_alloc_event(struct iwl_test *tst, int len)
212{
213 return tst->ops->alloc_event(tst->trans->op_mode, len);
214}
215
216static inline void
217iwl_test_event(struct iwl_test *tst, struct sk_buff *skb)
218{
219 return tst->ops->event(tst->trans->op_mode, skb);
220}
221
Ilan Peer3a6490c2012-06-03 13:36:51 +0300222/*
223 * This function handles the user application commands to the fw. The fw
224 * commands are sent in a synchronuous manner. In case that the user requested
225 * to get commands response, it is send to the user.
226 */
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300227static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
Ilan Peer3a6490c2012-06-03 13:36:51 +0300228{
229 struct iwl_host_cmd cmd;
230 struct iwl_rx_packet *pkt;
231 struct sk_buff *skb;
232 void *reply_buf;
233 u32 reply_len;
234 int ret;
235 bool cmd_want_skb;
236
237 memset(&cmd, 0, sizeof(struct iwl_host_cmd));
238
239 if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
240 !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
241 IWL_ERR(tst->trans, "Missing fw command mandatory fields\n");
242 return -ENOMSG;
243 }
244
245 cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
246 cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
247 if (cmd_want_skb)
248 cmd.flags |= CMD_WANT_SKB;
249
250 cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
251 cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
252 cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
253 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
254 IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n",
255 cmd.id, cmd.flags, cmd.len[0]);
256
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300257 ret = iwl_test_send_cmd(tst, &cmd);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300258 if (ret) {
259 IWL_ERR(tst->trans, "Failed to send hcmd\n");
260 return ret;
261 }
262 if (!cmd_want_skb)
263 return ret;
264
265 /* Handling return of SKB to the user */
266 pkt = cmd.resp_pkt;
267 if (!pkt) {
268 IWL_ERR(tst->trans, "HCMD received a null response packet\n");
269 return ret;
270 }
271
272 reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300273 skb = iwl_test_alloc_reply(tst, reply_len + 20);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300274 reply_buf = kmalloc(reply_len, GFP_KERNEL);
275 if (!skb || !reply_buf) {
276 kfree_skb(skb);
277 kfree(reply_buf);
278 return -ENOMEM;
279 }
280
281 /* The reply is in a page, that we cannot send to user space. */
282 memcpy(reply_buf, &(pkt->hdr), reply_len);
283 iwl_free_resp(&cmd);
284
285 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
286 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
287 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
288 goto nla_put_failure;
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300289 return iwl_test_reply(tst, skb);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300290
291nla_put_failure:
292 IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n");
293 kfree(reply_buf);
294 kfree_skb(skb);
295 return -ENOMSG;
296}
297
298/*
299 * Handles the user application commands for register access.
300 */
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300301static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb)
Ilan Peer3a6490c2012-06-03 13:36:51 +0300302{
303 u32 ofs, val32, cmd;
304 u8 val8;
305 struct sk_buff *skb;
306 int status = 0;
307 struct iwl_trans *trans = tst->trans;
308
309 if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
310 IWL_ERR(trans, "Missing reg offset\n");
311 return -ENOMSG;
312 }
313
314 ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
315 IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs);
316
317 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
318
319 /*
320 * Allow access only to FH/CSR/HBUS in direct mode.
321 * Since we don't have the upper bounds for the CSR and HBUS segments,
322 * we will use only the upper bound of FH for sanity check.
323 */
324 if (ofs >= FH_MEM_UPPER_BOUND) {
325 IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n",
326 FH_MEM_UPPER_BOUND);
327 return -EINVAL;
328 }
329
330 switch (cmd) {
331 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
332 val32 = iwl_read_direct32(tst->trans, ofs);
333 IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32);
334
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300335 skb = iwl_test_alloc_reply(tst, 20);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300336 if (!skb) {
337 IWL_ERR(trans, "Memory allocation fail\n");
338 return -ENOMEM;
339 }
340 if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
341 goto nla_put_failure;
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300342 status = iwl_test_reply(tst, skb);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300343 if (status < 0)
344 IWL_ERR(trans, "Error sending msg : %d\n", status);
345 break;
346
347 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
348 if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
349 IWL_ERR(trans, "Missing value to write\n");
350 return -ENOMSG;
351 } else {
352 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
353 IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32);
354 iwl_write_direct32(tst->trans, ofs, val32);
355 }
356 break;
357
358 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
359 if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
360 IWL_ERR(trans, "Missing value to write\n");
361 return -ENOMSG;
362 } else {
363 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
364 IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8);
365 iwl_write8(tst->trans, ofs, val8);
366 }
367 break;
368
369 default:
370 IWL_ERR(trans, "Unknown test register cmd ID\n");
371 return -ENOMSG;
372 }
373
374 return status;
375
376nla_put_failure:
377 kfree_skb(skb);
378 return -EMSGSIZE;
379}
380
381/*
382 * Handles the request to start FW tracing. Allocates of the trace buffer
383 * and sends a reply to user space with the address of the allocated buffer.
384 */
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300385static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb)
Ilan Peer3a6490c2012-06-03 13:36:51 +0300386{
387 struct sk_buff *skb;
388 int status = 0;
389
390 if (tst->trace.enabled)
391 return -EBUSY;
392
393 if (!tb[IWL_TM_ATTR_TRACE_SIZE])
394 tst->trace.size = TRACE_BUFF_SIZE_DEF;
395 else
396 tst->trace.size =
397 nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
398
399 if (!tst->trace.size)
400 return -EINVAL;
401
402 if (tst->trace.size < TRACE_BUFF_SIZE_MIN ||
403 tst->trace.size > TRACE_BUFF_SIZE_MAX)
404 return -EINVAL;
405
406 tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD;
407 tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev,
408 tst->trace.tsize,
409 &tst->trace.dma_addr,
410 GFP_KERNEL);
411 if (!tst->trace.cpu_addr)
412 return -ENOMEM;
413
414 tst->trace.enabled = true;
415 tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100);
416
417 memset(tst->trace.trace_addr, 0x03B, tst->trace.size);
418
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300419 skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300420 if (!skb) {
421 IWL_ERR(tst->trans, "Memory allocation fail\n");
422 iwl_test_trace_stop(tst);
423 return -ENOMEM;
424 }
425
426 if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
427 sizeof(tst->trace.dma_addr),
428 (u64 *)&tst->trace.dma_addr))
429 goto nla_put_failure;
430
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300431 status = iwl_test_reply(tst, skb);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300432 if (status < 0)
433 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
434
435 tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size,
436 DUMP_CHUNK_SIZE);
437
438 return status;
439
440nla_put_failure:
441 kfree_skb(skb);
442 if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
443 IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
444 iwl_test_trace_stop(tst);
445 return -EMSGSIZE;
446}
447
448/*
449 * Handles indirect read from the periphery or the SRAM. The read is performed
450 * to a temporary buffer. The user space application should later issue a dump
451 */
452static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
453{
454 struct iwl_trans *trans = tst->trans;
455 unsigned long flags;
456 int i;
457
458 if (size & 0x3)
459 return -EINVAL;
460
461 tst->mem.size = size;
462 tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL);
463 if (tst->mem.addr == NULL)
464 return -ENOMEM;
465
466 /* Hard-coded periphery absolute address */
467 if (IWL_ABS_PRPH_START <= addr &&
468 addr < IWL_ABS_PRPH_START + PRPH_END) {
469 spin_lock_irqsave(&trans->reg_lock, flags);
Emmanuel Grumbach7a65d172012-12-24 15:01:24 +0200470 iwl_trans_grab_nic_access(trans, false);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300471 iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
472 addr | (3 << 24));
473 for (i = 0; i < size; i += 4)
474 *(u32 *)(tst->mem.addr + i) =
475 iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
Emmanuel Grumbach7a65d172012-12-24 15:01:24 +0200476 iwl_trans_release_nic_access(trans);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300477 spin_unlock_irqrestore(&trans->reg_lock, flags);
478 } else { /* target memory (SRAM) */
Emmanuel Grumbach4fd442d2012-12-24 14:27:11 +0200479 iwl_trans_read_mem(trans, addr, tst->mem.addr,
480 tst->mem.size / 4);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300481 }
482
483 tst->mem.nchunks =
484 DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE);
485 tst->mem.in_read = true;
486 return 0;
487
488}
489
490/*
491 * Handles indirect write to the periphery or SRAM. The is performed to a
492 * temporary buffer.
493 */
494static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
495 u32 size, unsigned char *buf)
496{
497 struct iwl_trans *trans = tst->trans;
498 u32 val, i;
499 unsigned long flags;
500
501 if (IWL_ABS_PRPH_START <= addr &&
502 addr < IWL_ABS_PRPH_START + PRPH_END) {
503 /* Periphery writes can be 1-3 bytes long, or DWORDs */
504 if (size < 4) {
505 memcpy(&val, buf, size);
506 spin_lock_irqsave(&trans->reg_lock, flags);
Emmanuel Grumbach7a65d172012-12-24 15:01:24 +0200507 iwl_trans_grab_nic_access(trans, false);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300508 iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
509 (addr & 0x0000FFFF) |
510 ((size - 1) << 24));
511 iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
Emmanuel Grumbach7a65d172012-12-24 15:01:24 +0200512 iwl_trans_release_nic_access(trans);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300513 /* needed after consecutive writes w/o read */
514 mmiowb();
515 spin_unlock_irqrestore(&trans->reg_lock, flags);
516 } else {
517 if (size % 4)
518 return -EINVAL;
519 for (i = 0; i < size; i += 4)
520 iwl_write_prph(trans, addr+i,
521 *(u32 *)(buf+i));
522 }
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300523 } else if (iwl_test_valid_hw_addr(tst, addr)) {
Emmanuel Grumbach4fd442d2012-12-24 14:27:11 +0200524 iwl_trans_write_mem(trans, addr, buf, size / 4);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300525 } else {
526 return -EINVAL;
527 }
528 return 0;
529}
530
531/*
532 * Handles the user application commands for indirect read/write
533 * to/from the periphery or the SRAM.
534 */
535static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb)
536{
537 u32 addr, size, cmd;
538 unsigned char *buf;
539
540 /* Both read and write should be blocked, for atomicity */
541 if (tst->mem.in_read)
542 return -EBUSY;
543
544 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
545 if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
546 IWL_ERR(tst->trans, "Error finding memory offset address\n");
547 return -ENOMSG;
548 }
549 addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
550 if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
551 IWL_ERR(tst->trans, "Error finding size for memory reading\n");
552 return -ENOMSG;
553 }
554 size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
555
556 if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) {
557 return iwl_test_indirect_read(tst, addr, size);
558 } else {
559 if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
560 return -EINVAL;
561 buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
562 return iwl_test_indirect_write(tst, addr, size, buf);
563 }
564}
565
566/*
567 * Enable notifications to user space
568 */
569static int iwl_test_notifications(struct iwl_test *tst,
570 struct nlattr **tb)
571{
572 tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
573 return 0;
574}
575
576/*
577 * Handles the request to get the device id
578 */
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300579static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb)
Ilan Peer3a6490c2012-06-03 13:36:51 +0300580{
581 u32 devid = tst->trans->hw_id;
582 struct sk_buff *skb;
583 int status;
584
585 IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid);
586
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300587 skb = iwl_test_alloc_reply(tst, 20);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300588 if (!skb) {
589 IWL_ERR(tst->trans, "Memory allocation fail\n");
590 return -ENOMEM;
591 }
592
593 if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
594 goto nla_put_failure;
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300595 status = iwl_test_reply(tst, skb);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300596 if (status < 0)
597 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
598
599 return 0;
600
601nla_put_failure:
602 kfree_skb(skb);
603 return -EMSGSIZE;
604}
605
606/*
607 * Handles the request to get the FW version
608 */
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300609static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb)
Ilan Peer3a6490c2012-06-03 13:36:51 +0300610{
611 struct sk_buff *skb;
612 int status;
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300613 u32 ver = iwl_test_fw_ver(tst);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300614
615 IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver);
616
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300617 skb = iwl_test_alloc_reply(tst, 20);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300618 if (!skb) {
619 IWL_ERR(tst->trans, "Memory allocation fail\n");
620 return -ENOMEM;
621 }
622
623 if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver))
624 goto nla_put_failure;
625
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300626 status = iwl_test_reply(tst, skb);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300627 if (status < 0)
628 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
629
630 return 0;
631
632nla_put_failure:
633 kfree_skb(skb);
634 return -EMSGSIZE;
635}
636
637/*
638 * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
639 */
640int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
641 void *data, int len)
642{
643 int result;
644
645 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
646 iwl_testmode_gnl_msg_policy);
647 if (result) {
648 IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result);
649 return result;
650 }
651
652 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
653 if (!tb[IWL_TM_ATTR_COMMAND]) {
654 IWL_ERR(tst->trans, "Missing testmode command type\n");
655 return -ENOMSG;
656 }
657 return 0;
658}
659EXPORT_SYMBOL_GPL(iwl_test_parse);
660
661/*
662 * Handle test commands.
663 * Returns 1 for unknown commands (not handled by the test object); negative
664 * value in case of error.
665 */
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300666int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
Ilan Peer3a6490c2012-06-03 13:36:51 +0300667{
668 int result;
669
670 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
671 case IWL_TM_CMD_APP2DEV_UCODE:
672 IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n");
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300673 result = iwl_test_fw_cmd(tst, tb);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300674 break;
675
676 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
677 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
678 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
679 IWL_DEBUG_INFO(tst->trans, "test cmd to register\n");
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300680 result = iwl_test_reg(tst, tb);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300681 break;
682
683 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
684 IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n");
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300685 result = iwl_test_trace_begin(tst, tb);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300686 break;
687
688 case IWL_TM_CMD_APP2DEV_END_TRACE:
689 iwl_test_trace_stop(tst);
690 result = 0;
691 break;
692
693 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
694 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
695 IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n");
696 result = iwl_test_indirect_mem(tst, tb);
697 break;
698
699 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
700 IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n");
701 result = iwl_test_notifications(tst, tb);
702 break;
703
704 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
705 IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n");
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300706 result = iwl_test_get_fw_ver(tst, tb);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300707 break;
708
709 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
710 IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n");
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300711 result = iwl_test_get_dev_id(tst, tb);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300712 break;
713
714 default:
715 IWL_DEBUG_INFO(tst->trans, "Unknown test command\n");
716 result = 1;
717 break;
718 }
719 return result;
720}
721EXPORT_SYMBOL_GPL(iwl_test_handle_cmd);
722
723static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
724 struct netlink_callback *cb)
725{
726 int idx, length;
727
728 if (!tst->trace.enabled || !tst->trace.trace_addr)
729 return -EFAULT;
730
731 idx = cb->args[4];
732 if (idx >= tst->trace.nchunks)
733 return -ENOENT;
734
735 length = DUMP_CHUNK_SIZE;
736 if (((idx + 1) == tst->trace.nchunks) &&
737 (tst->trace.size % DUMP_CHUNK_SIZE))
738 length = tst->trace.size %
739 DUMP_CHUNK_SIZE;
740
741 if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
742 tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx)))
743 goto nla_put_failure;
744
745 cb->args[4] = ++idx;
746 return 0;
747
748 nla_put_failure:
749 return -ENOBUFS;
750}
751
752static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb,
753 struct netlink_callback *cb)
754{
755 int idx, length;
756
757 if (!tst->mem.in_read)
758 return -EFAULT;
759
760 idx = cb->args[4];
761 if (idx >= tst->mem.nchunks) {
762 iwl_test_mem_stop(tst);
763 return -ENOENT;
764 }
765
766 length = DUMP_CHUNK_SIZE;
767 if (((idx + 1) == tst->mem.nchunks) &&
768 (tst->mem.size % DUMP_CHUNK_SIZE))
769 length = tst->mem.size % DUMP_CHUNK_SIZE;
770
771 if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
772 tst->mem.addr + (DUMP_CHUNK_SIZE * idx)))
773 goto nla_put_failure;
774
775 cb->args[4] = ++idx;
776 return 0;
777
778 nla_put_failure:
779 return -ENOBUFS;
780}
781
782/*
783 * Handle dump commands.
784 * Returns 1 for unknown commands (not handled by the test object); negative
785 * value in case of error.
786 */
787int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
788 struct netlink_callback *cb)
789{
790 int result;
791
792 switch (cmd) {
793 case IWL_TM_CMD_APP2DEV_READ_TRACE:
794 IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n");
795 result = iwl_test_trace_dump(tst, skb, cb);
796 break;
797
798 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
799 IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n");
800 result = iwl_test_buffer_dump(tst, skb, cb);
801 break;
802
803 default:
804 result = 1;
805 break;
806 }
807 return result;
808}
809EXPORT_SYMBOL_GPL(iwl_test_dump);
810
811/*
812 * Multicast a spontaneous messages from the device to the user space.
813 */
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300814static void iwl_test_send_rx(struct iwl_test *tst,
Ilan Peer3a6490c2012-06-03 13:36:51 +0300815 struct iwl_rx_cmd_buffer *rxb)
816{
817 struct sk_buff *skb;
818 struct iwl_rx_packet *data;
819 int length;
820
821 data = rxb_addr(rxb);
822 length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
823
824 /* the length doesn't include len_n_flags field, so add it manually */
825 length += sizeof(__le32);
826
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300827 skb = iwl_test_alloc_event(tst, length + 20);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300828 if (skb == NULL) {
829 IWL_ERR(tst->trans, "Out of memory for message to user\n");
830 return;
831 }
832
833 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
834 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
835 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data))
836 goto nla_put_failure;
837
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300838 iwl_test_event(tst, skb);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300839 return;
840
841nla_put_failure:
842 kfree_skb(skb);
843 IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n");
844}
845
846/*
847 * Called whenever a Rx frames is recevied from the device. If notifications to
848 * the user space are requested, sends the frames to the user.
849 */
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300850void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
Ilan Peer3a6490c2012-06-03 13:36:51 +0300851{
852 if (tst->notify)
Ilan Peerc76fe6d2012-06-04 19:39:30 +0300853 iwl_test_send_rx(tst, rxb);
Ilan Peer3a6490c2012-06-03 13:36:51 +0300854}
855EXPORT_SYMBOL_GPL(iwl_test_rx);