1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *****************************************************************************/
64 #include <linux/export.h>
65 #include <net/netlink.h>
71 #include "iwl-trans.h"
74 #include "iwl-testmode.h"
77 * Periphery registers absolute lower bound. This is used in order to
78 * differentiate registery access through HBUS_TARG_PRPH_* and
79 * HBUS_TARG_MEM_* accesses.
81 #define IWL_ABS_PRPH_START (0xA00000)
84 * The TLVs used in the gnl message policy between the kernel module and
85 * user space application. iwl_testmode_gnl_msg_policy is to be carried
86 * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
90 struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
91 [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
93 [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
94 [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
96 [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
97 [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
98 [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
100 [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
101 [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
103 [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
105 [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
106 [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
107 [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
109 [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
111 [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
113 [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
114 [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
115 [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
117 [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
118 [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
119 [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
120 [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
121 [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
123 [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
126 static inline void iwl_test_trace_clear(struct iwl_test *tst)
128 memset(&tst->trace, 0, sizeof(struct iwl_test_trace));
131 static void iwl_test_trace_stop(struct iwl_test *tst)
133 if (!tst->trace.enabled)
136 if (tst->trace.cpu_addr && tst->trace.dma_addr)
137 dma_free_coherent(tst->trans->dev,
140 tst->trace.dma_addr);
142 iwl_test_trace_clear(tst);
145 static inline void iwl_test_mem_clear(struct iwl_test *tst)
147 memset(&tst->mem, 0, sizeof(struct iwl_test_mem));
150 static inline void iwl_test_mem_stop(struct iwl_test *tst)
152 if (!tst->mem.in_read)
155 iwl_test_mem_clear(tst);
159 * Initializes the test object
160 * During the lifetime of the test object it is assumed that the transport is
161 * started. The test object should be stopped before the transport is stopped.
163 void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
164 struct iwl_test_ops *ops)
169 iwl_test_trace_clear(tst);
170 iwl_test_mem_clear(tst);
172 EXPORT_SYMBOL_GPL(iwl_test_init);
175 * Stop the test object
177 void iwl_test_free(struct iwl_test *tst)
179 iwl_test_mem_stop(tst);
180 iwl_test_trace_stop(tst);
182 EXPORT_SYMBOL_GPL(iwl_test_free);
184 static inline int iwl_test_send_cmd(struct iwl_test *tst,
185 struct iwl_host_cmd *cmd)
187 return tst->ops->send_cmd(tst->trans->op_mode, cmd);
190 static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr)
192 return tst->ops->valid_hw_addr(addr);
195 static inline u32 iwl_test_fw_ver(struct iwl_test *tst)
197 return tst->ops->get_fw_ver(tst->trans->op_mode);
200 static inline struct sk_buff*
201 iwl_test_alloc_reply(struct iwl_test *tst, int len)
203 return tst->ops->alloc_reply(tst->trans->op_mode, len);
206 static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb)
208 return tst->ops->reply(tst->trans->op_mode, skb);
211 static inline struct sk_buff*
212 iwl_test_alloc_event(struct iwl_test *tst, int len)
214 return tst->ops->alloc_event(tst->trans->op_mode, len);
218 iwl_test_event(struct iwl_test *tst, struct sk_buff *skb)
220 return tst->ops->event(tst->trans->op_mode, skb);
224 * This function handles the user application commands to the fw. The fw
225 * commands are sent in a synchronuous manner. In case that the user requested
226 * to get commands response, it is send to the user.
228 static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
230 struct iwl_host_cmd cmd;
231 struct iwl_rx_packet *pkt;
238 memset(&cmd, 0, sizeof(struct iwl_host_cmd));
240 if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
241 !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
242 IWL_ERR(tst->trans, "Missing fw command mandatory fields\n");
246 cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
247 cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
249 cmd.flags |= CMD_WANT_SKB;
251 cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
252 cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
253 cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
254 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
255 IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n",
256 cmd.id, cmd.flags, cmd.len[0]);
258 ret = iwl_test_send_cmd(tst, &cmd);
260 IWL_ERR(tst->trans, "Failed to send hcmd\n");
266 /* Handling return of SKB to the user */
269 IWL_ERR(tst->trans, "HCMD received a null response packet\n");
273 reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
274 skb = iwl_test_alloc_reply(tst, reply_len + 20);
275 reply_buf = kmalloc(reply_len, GFP_KERNEL);
276 if (!skb || !reply_buf) {
282 /* The reply is in a page, that we cannot send to user space. */
283 memcpy(reply_buf, &(pkt->hdr), reply_len);
286 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
287 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
288 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
289 goto nla_put_failure;
290 return iwl_test_reply(tst, skb);
293 IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n");
300 * Handles the user application commands for register access.
302 static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb)
308 struct iwl_trans *trans = tst->trans;
310 if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
311 IWL_ERR(trans, "Missing reg offset\n");
315 ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
316 IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs);
318 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
321 * Allow access only to FH/CSR/HBUS in direct mode.
322 * Since we don't have the upper bounds for the CSR and HBUS segments,
323 * we will use only the upper bound of FH for sanity check.
325 if (ofs >= FH_MEM_UPPER_BOUND) {
326 IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n",
332 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
333 val32 = iwl_read_direct32(tst->trans, ofs);
334 IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32);
336 skb = iwl_test_alloc_reply(tst, 20);
338 IWL_ERR(trans, "Memory allocation fail\n");
341 if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
342 goto nla_put_failure;
343 status = iwl_test_reply(tst, skb);
345 IWL_ERR(trans, "Error sending msg : %d\n", status);
348 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
349 if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
350 IWL_ERR(trans, "Missing value to write\n");
353 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
354 IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32);
355 iwl_write_direct32(tst->trans, ofs, val32);
359 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
360 if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
361 IWL_ERR(trans, "Missing value to write\n");
364 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
365 IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8);
366 iwl_write8(tst->trans, ofs, val8);
371 IWL_ERR(trans, "Unknown test register cmd ID\n");
383 * Handles the request to start FW tracing. Allocates of the trace buffer
384 * and sends a reply to user space with the address of the allocated buffer.
386 static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb)
391 if (tst->trace.enabled)
394 if (!tb[IWL_TM_ATTR_TRACE_SIZE])
395 tst->trace.size = TRACE_BUFF_SIZE_DEF;
398 nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
400 if (!tst->trace.size)
403 if (tst->trace.size < TRACE_BUFF_SIZE_MIN ||
404 tst->trace.size > TRACE_BUFF_SIZE_MAX)
407 tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD;
408 tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev,
410 &tst->trace.dma_addr,
412 if (!tst->trace.cpu_addr)
415 tst->trace.enabled = true;
416 tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100);
418 memset(tst->trace.trace_addr, 0x03B, tst->trace.size);
420 skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20);
422 IWL_ERR(tst->trans, "Memory allocation fail\n");
423 iwl_test_trace_stop(tst);
427 if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
428 sizeof(tst->trace.dma_addr),
429 (u64 *)&tst->trace.dma_addr))
430 goto nla_put_failure;
432 status = iwl_test_reply(tst, skb);
434 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
436 tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size,
443 if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
444 IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
445 iwl_test_trace_stop(tst);
450 * Handles indirect read from the periphery or the SRAM. The read is performed
451 * to a temporary buffer. The user space application should later issue a dump
453 static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
455 struct iwl_trans *trans = tst->trans;
462 tst->mem.size = size;
463 tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL);
464 if (tst->mem.addr == NULL)
467 /* Hard-coded periphery absolute address */
468 if (IWL_ABS_PRPH_START <= addr &&
469 addr < IWL_ABS_PRPH_START + PRPH_END) {
470 if (!iwl_trans_grab_nic_access(trans, false, &flags)) {
473 iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
475 for (i = 0; i < size; i += 4)
476 *(u32 *)(tst->mem.addr + i) =
477 iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
478 iwl_trans_release_nic_access(trans, &flags);
479 } else { /* target memory (SRAM) */
480 iwl_trans_read_mem(trans, addr, tst->mem.addr,
485 DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE);
486 tst->mem.in_read = true;
492 * Handles indirect write to the periphery or SRAM. The is performed to a
495 static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
496 u32 size, unsigned char *buf)
498 struct iwl_trans *trans = tst->trans;
502 if (IWL_ABS_PRPH_START <= addr &&
503 addr < IWL_ABS_PRPH_START + PRPH_END) {
504 /* Periphery writes can be 1-3 bytes long, or DWORDs */
506 memcpy(&val, buf, size);
507 if (!iwl_trans_grab_nic_access(trans, false, &flags))
509 iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
510 (addr & 0x0000FFFF) |
512 iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
513 iwl_trans_release_nic_access(trans, &flags);
517 for (i = 0; i < size; i += 4)
518 iwl_write_prph(trans, addr+i,
521 } else if (iwl_test_valid_hw_addr(tst, addr)) {
522 iwl_trans_write_mem(trans, addr, buf, size / 4);
530 * Handles the user application commands for indirect read/write
531 * to/from the periphery or the SRAM.
533 static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb)
538 /* Both read and write should be blocked, for atomicity */
539 if (tst->mem.in_read)
542 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
543 if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
544 IWL_ERR(tst->trans, "Error finding memory offset address\n");
547 addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
548 if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
549 IWL_ERR(tst->trans, "Error finding size for memory reading\n");
552 size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
554 if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) {
555 return iwl_test_indirect_read(tst, addr, size);
557 if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
559 buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
560 return iwl_test_indirect_write(tst, addr, size, buf);
565 * Enable notifications to user space
567 static int iwl_test_notifications(struct iwl_test *tst,
570 tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
575 * Handles the request to get the device id
577 static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb)
579 u32 devid = tst->trans->hw_id;
583 IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid);
585 skb = iwl_test_alloc_reply(tst, 20);
587 IWL_ERR(tst->trans, "Memory allocation fail\n");
591 if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
592 goto nla_put_failure;
593 status = iwl_test_reply(tst, skb);
595 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
605 * Handles the request to get the FW version
607 static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb)
611 u32 ver = iwl_test_fw_ver(tst);
613 IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver);
615 skb = iwl_test_alloc_reply(tst, 20);
617 IWL_ERR(tst->trans, "Memory allocation fail\n");
621 if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver))
622 goto nla_put_failure;
624 status = iwl_test_reply(tst, skb);
626 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
636 * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
638 int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
643 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
644 iwl_testmode_gnl_msg_policy);
646 IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result);
650 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
651 if (!tb[IWL_TM_ATTR_COMMAND]) {
652 IWL_ERR(tst->trans, "Missing testmode command type\n");
657 IWL_EXPORT_SYMBOL(iwl_test_parse);
660 * Handle test commands.
661 * Returns 1 for unknown commands (not handled by the test object); negative
662 * value in case of error.
664 int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
668 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
669 case IWL_TM_CMD_APP2DEV_UCODE:
670 IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n");
671 result = iwl_test_fw_cmd(tst, tb);
674 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
675 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
676 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
677 IWL_DEBUG_INFO(tst->trans, "test cmd to register\n");
678 result = iwl_test_reg(tst, tb);
681 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
682 IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n");
683 result = iwl_test_trace_begin(tst, tb);
686 case IWL_TM_CMD_APP2DEV_END_TRACE:
687 iwl_test_trace_stop(tst);
691 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
692 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
693 IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n");
694 result = iwl_test_indirect_mem(tst, tb);
697 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
698 IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n");
699 result = iwl_test_notifications(tst, tb);
702 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
703 IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n");
704 result = iwl_test_get_fw_ver(tst, tb);
707 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
708 IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n");
709 result = iwl_test_get_dev_id(tst, tb);
713 IWL_DEBUG_INFO(tst->trans, "Unknown test command\n");
719 IWL_EXPORT_SYMBOL(iwl_test_handle_cmd);
721 static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
722 struct netlink_callback *cb)
726 if (!tst->trace.enabled || !tst->trace.trace_addr)
730 if (idx >= tst->trace.nchunks)
733 length = DUMP_CHUNK_SIZE;
734 if (((idx + 1) == tst->trace.nchunks) &&
735 (tst->trace.size % DUMP_CHUNK_SIZE))
736 length = tst->trace.size %
739 if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
740 tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx)))
741 goto nla_put_failure;
750 static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb,
751 struct netlink_callback *cb)
755 if (!tst->mem.in_read)
759 if (idx >= tst->mem.nchunks) {
760 iwl_test_mem_stop(tst);
764 length = DUMP_CHUNK_SIZE;
765 if (((idx + 1) == tst->mem.nchunks) &&
766 (tst->mem.size % DUMP_CHUNK_SIZE))
767 length = tst->mem.size % DUMP_CHUNK_SIZE;
769 if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
770 tst->mem.addr + (DUMP_CHUNK_SIZE * idx)))
771 goto nla_put_failure;
781 * Handle dump commands.
782 * Returns 1 for unknown commands (not handled by the test object); negative
783 * value in case of error.
785 int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
786 struct netlink_callback *cb)
791 case IWL_TM_CMD_APP2DEV_READ_TRACE:
792 IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n");
793 result = iwl_test_trace_dump(tst, skb, cb);
796 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
797 IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n");
798 result = iwl_test_buffer_dump(tst, skb, cb);
807 IWL_EXPORT_SYMBOL(iwl_test_dump);
810 * Multicast a spontaneous messages from the device to the user space.
812 static void iwl_test_send_rx(struct iwl_test *tst,
813 struct iwl_rx_cmd_buffer *rxb)
816 struct iwl_rx_packet *data;
819 data = rxb_addr(rxb);
820 length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
822 /* the length doesn't include len_n_flags field, so add it manually */
823 length += sizeof(__le32);
825 skb = iwl_test_alloc_event(tst, length + 20);
827 IWL_ERR(tst->trans, "Out of memory for message to user\n");
831 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
832 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
833 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data))
834 goto nla_put_failure;
836 iwl_test_event(tst, skb);
841 IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n");
845 * Called whenever a Rx frames is recevied from the device. If notifications to
846 * the user space are requested, sends the frames to the user.
848 void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
851 iwl_test_send_rx(tst, rxb);
853 IWL_EXPORT_SYMBOL(iwl_test_rx);