Emmanuel Grumbach | ab697a9 | 2011-07-11 07:35:34 -0700 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | * |
Ilan Peer | fc8a350 | 2015-05-13 14:34:07 +0300 | [diff] [blame] | 3 | * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. |
| 4 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
Emmanuel Grumbach | ab697a9 | 2011-07-11 07:35:34 -0700 | [diff] [blame] | 5 | * |
| 6 | * Portions of this file are derived from the ipw3945 project, as well |
| 7 | * as portions of the ieee80211 subsystem header files. |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify it |
| 10 | * under the terms of version 2 of the GNU General Public License as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 16 | * more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License along with |
| 19 | * this program; if not, write to the Free Software Foundation, Inc., |
| 20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA |
| 21 | * |
| 22 | * The full GNU General Public License is included in this distribution in the |
| 23 | * file called LICENSE. |
| 24 | * |
| 25 | * Contact Information: |
Emmanuel Grumbach | cb2f827 | 2015-11-17 15:39:56 +0200 | [diff] [blame] | 26 | * Intel Linux Wireless <linuxwifi@intel.com> |
Emmanuel Grumbach | ab697a9 | 2011-07-11 07:35:34 -0700 | [diff] [blame] | 27 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 28 | * |
| 29 | *****************************************************************************/ |
| 30 | #ifndef __iwl_trans_int_pcie_h__ |
| 31 | #define __iwl_trans_int_pcie_h__ |
| 32 | |
Emmanuel Grumbach | a72b8b0 | 2011-08-25 23:11:13 -0700 | [diff] [blame] | 33 | #include <linux/spinlock.h> |
| 34 | #include <linux/interrupt.h> |
| 35 | #include <linux/skbuff.h> |
Johannes Berg | 13df1aa | 2012-03-06 13:31:00 -0800 | [diff] [blame] | 36 | #include <linux/wait.h> |
Emmanuel Grumbach | 522376d | 2011-09-06 09:31:19 -0700 | [diff] [blame] | 37 | #include <linux/pci.h> |
Johannes Berg | 7c5ba4a | 2012-04-09 17:46:54 -0700 | [diff] [blame] | 38 | #include <linux/timer.h> |
Emmanuel Grumbach | a72b8b0 | 2011-08-25 23:11:13 -0700 | [diff] [blame] | 39 | |
Emmanuel Grumbach | dda61a4 | 2011-08-25 23:11:11 -0700 | [diff] [blame] | 40 | #include "iwl-fh.h" |
Emmanuel Grumbach | a72b8b0 | 2011-08-25 23:11:13 -0700 | [diff] [blame] | 41 | #include "iwl-csr.h" |
Emmanuel Grumbach | a72b8b0 | 2011-08-25 23:11:13 -0700 | [diff] [blame] | 42 | #include "iwl-trans.h" |
| 43 | #include "iwl-debug.h" |
| 44 | #include "iwl-io.h" |
Emmanuel Grumbach | 02e3835 | 2012-02-09 16:08:15 +0200 | [diff] [blame] | 45 | #include "iwl-op-mode.h" |
Emmanuel Grumbach | a72b8b0 | 2011-08-25 23:11:13 -0700 | [diff] [blame] | 46 | |
Johannes Berg | 206eea7 | 2015-04-17 16:38:31 +0200 | [diff] [blame] | 47 | /* We need 2 entries for the TX command and header, and another one might |
| 48 | * be needed for potential data in the SKB's head. The remaining ones can |
| 49 | * be used for frags. |
| 50 | */ |
| 51 | #define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3) |
| 52 | |
Sara Sharon | 26d535a | 2015-04-28 12:56:54 +0300 | [diff] [blame] | 53 | /* |
| 54 | * RX related structures and functions |
| 55 | */ |
| 56 | #define RX_NUM_QUEUES 1 |
| 57 | #define RX_POST_REQ_ALLOC 2 |
| 58 | #define RX_CLAIM_REQ_ALLOC 8 |
| 59 | #define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) |
| 60 | #define RX_LOW_WATERMARK 8 |
| 61 | |
Emmanuel Grumbach | a72b8b0 | 2011-08-25 23:11:13 -0700 | [diff] [blame] | 62 | struct iwl_host_cmd; |
Emmanuel Grumbach | dda61a4 | 2011-08-25 23:11:11 -0700 | [diff] [blame] | 63 | |
Emmanuel Grumbach | ab697a9 | 2011-07-11 07:35:34 -0700 | [diff] [blame] | 64 | /*This file includes the declaration that are internal to the |
| 65 | * trans_pcie layer */ |
| 66 | |
Johannes Berg | 48a2d66 | 2012-03-05 11:24:39 -0800 | [diff] [blame] | 67 | struct iwl_rx_mem_buffer { |
| 68 | dma_addr_t page_dma; |
| 69 | struct page *page; |
| 70 | struct list_head list; |
| 71 | }; |
| 72 | |
Emmanuel Grumbach | e6bb4c9 | 2011-08-25 23:10:48 -0700 | [diff] [blame] | 73 | /** |
Emmanuel Grumbach | 1f7b617 | 2011-08-25 23:10:59 -0700 | [diff] [blame] | 74 | * struct isr_statistics - interrupt statistics |
| 75 | * |
| 76 | */ |
| 77 | struct isr_statistics { |
| 78 | u32 hw; |
| 79 | u32 sw; |
| 80 | u32 err_code; |
| 81 | u32 sch; |
| 82 | u32 alive; |
| 83 | u32 rfkill; |
| 84 | u32 ctkill; |
| 85 | u32 wakeup; |
| 86 | u32 rx; |
| 87 | u32 tx; |
| 88 | u32 unhandled; |
| 89 | }; |
| 90 | |
| 91 | /** |
Emmanuel Grumbach | 990aa6d | 2012-11-14 12:39:52 +0200 | [diff] [blame] | 92 | * struct iwl_rxq - Rx queue |
Emmanuel Grumbach | 5a878bf | 2011-08-25 23:10:51 -0700 | [diff] [blame] | 93 | * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) |
| 94 | * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) |
Emmanuel Grumbach | 5a878bf | 2011-08-25 23:10:51 -0700 | [diff] [blame] | 95 | * @read: Shared index to newest available Rx buffer |
| 96 | * @write: Shared index to oldest written Rx packet |
| 97 | * @free_count: Number of pre-allocated buffers in rx_free |
Sara Sharon | 26d535a | 2015-04-28 12:56:54 +0300 | [diff] [blame] | 98 | * @used_count: Number of RBDs handled to allocator to use for allocation |
Emmanuel Grumbach | 5a878bf | 2011-08-25 23:10:51 -0700 | [diff] [blame] | 99 | * @write_actual: |
Sara Sharon | 26d535a | 2015-04-28 12:56:54 +0300 | [diff] [blame] | 100 | * @rx_free: list of RBDs with allocated RB ready for use |
| 101 | * @rx_used: list of RBDs with no RB attached |
Emmanuel Grumbach | 5a878bf | 2011-08-25 23:10:51 -0700 | [diff] [blame] | 102 | * @need_update: flag to indicate we need to update read/write index |
| 103 | * @rb_stts: driver's pointer to receive buffer status |
| 104 | * @rb_stts_dma: bus address of receive buffer status |
| 105 | * @lock: |
Sara Sharon | 26d535a | 2015-04-28 12:56:54 +0300 | [diff] [blame] | 106 | * @pool: initial pool of iwl_rx_mem_buffer for the queue |
| 107 | * @queue: actual rx queue |
Emmanuel Grumbach | 5a878bf | 2011-08-25 23:10:51 -0700 | [diff] [blame] | 108 | * |
| 109 | * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers |
| 110 | */ |
Emmanuel Grumbach | 990aa6d | 2012-11-14 12:39:52 +0200 | [diff] [blame] | 111 | struct iwl_rxq { |
Emmanuel Grumbach | 5a878bf | 2011-08-25 23:10:51 -0700 | [diff] [blame] | 112 | __le32 *bd; |
| 113 | dma_addr_t bd_dma; |
Emmanuel Grumbach | 5a878bf | 2011-08-25 23:10:51 -0700 | [diff] [blame] | 114 | u32 read; |
| 115 | u32 write; |
| 116 | u32 free_count; |
Sara Sharon | 26d535a | 2015-04-28 12:56:54 +0300 | [diff] [blame] | 117 | u32 used_count; |
Emmanuel Grumbach | 5a878bf | 2011-08-25 23:10:51 -0700 | [diff] [blame] | 118 | u32 write_actual; |
| 119 | struct list_head rx_free; |
| 120 | struct list_head rx_used; |
Johannes Berg | 5d63f92 | 2014-02-27 11:20:07 +0100 | [diff] [blame] | 121 | bool need_update; |
Emmanuel Grumbach | 5a878bf | 2011-08-25 23:10:51 -0700 | [diff] [blame] | 122 | struct iwl_rb_status *rb_stts; |
| 123 | dma_addr_t rb_stts_dma; |
| 124 | spinlock_t lock; |
Sara Sharon | 26d535a | 2015-04-28 12:56:54 +0300 | [diff] [blame] | 125 | struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; |
| 126 | struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; |
| 127 | }; |
| 128 | |
| 129 | /** |
| 130 | * struct iwl_rb_allocator - Rx allocator |
| 131 | * @pool: initial pool of allocator |
| 132 | * @req_pending: number of requests the allcator had not processed yet |
| 133 | * @req_ready: number of requests honored and ready for claiming |
| 134 | * @rbd_allocated: RBDs with pages allocated and ready to be handled to |
| 135 | * the queue. This is a list of &struct iwl_rx_mem_buffer |
| 136 | * @rbd_empty: RBDs with no page attached for allocator use. This is a list |
| 137 | * of &struct iwl_rx_mem_buffer |
| 138 | * @lock: protects the rbd_allocated and rbd_empty lists |
| 139 | * @alloc_wq: work queue for background calls |
| 140 | * @rx_alloc: work struct for background calls |
| 141 | */ |
| 142 | struct iwl_rb_allocator { |
| 143 | struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; |
| 144 | atomic_t req_pending; |
| 145 | atomic_t req_ready; |
| 146 | struct list_head rbd_allocated; |
| 147 | struct list_head rbd_empty; |
| 148 | spinlock_t lock; |
| 149 | struct workqueue_struct *alloc_wq; |
| 150 | struct work_struct rx_alloc; |
Emmanuel Grumbach | 5a878bf | 2011-08-25 23:10:51 -0700 | [diff] [blame] | 151 | }; |
| 152 | |
Emmanuel Grumbach | a72b8b0 | 2011-08-25 23:11:13 -0700 | [diff] [blame] | 153 | struct iwl_dma_ptr { |
| 154 | dma_addr_t dma; |
| 155 | void *addr; |
| 156 | size_t size; |
| 157 | }; |
| 158 | |
Johannes Berg | bffc66c | 2012-03-05 11:24:42 -0800 | [diff] [blame] | 159 | /** |
| 160 | * iwl_queue_inc_wrap - increment queue index, wrap back to beginning |
| 161 | * @index -- current index |
Johannes Berg | bffc66c | 2012-03-05 11:24:42 -0800 | [diff] [blame] | 162 | */ |
Johannes Berg | 83f32a4 | 2014-04-24 09:57:40 +0200 | [diff] [blame] | 163 | static inline int iwl_queue_inc_wrap(int index) |
Johannes Berg | bffc66c | 2012-03-05 11:24:42 -0800 | [diff] [blame] | 164 | { |
Johannes Berg | 83f32a4 | 2014-04-24 09:57:40 +0200 | [diff] [blame] | 165 | return ++index & (TFD_QUEUE_SIZE_MAX - 1); |
Johannes Berg | bffc66c | 2012-03-05 11:24:42 -0800 | [diff] [blame] | 166 | } |
| 167 | |
| 168 | /** |
| 169 | * iwl_queue_dec_wrap - decrement queue index, wrap back to end |
| 170 | * @index -- current index |
Johannes Berg | bffc66c | 2012-03-05 11:24:42 -0800 | [diff] [blame] | 171 | */ |
Johannes Berg | 83f32a4 | 2014-04-24 09:57:40 +0200 | [diff] [blame] | 172 | static inline int iwl_queue_dec_wrap(int index) |
Johannes Berg | bffc66c | 2012-03-05 11:24:42 -0800 | [diff] [blame] | 173 | { |
Johannes Berg | 83f32a4 | 2014-04-24 09:57:40 +0200 | [diff] [blame] | 174 | return --index & (TFD_QUEUE_SIZE_MAX - 1); |
Johannes Berg | bffc66c | 2012-03-05 11:24:42 -0800 | [diff] [blame] | 175 | } |
| 176 | |
Emmanuel Grumbach | 522376d | 2011-09-06 09:31:19 -0700 | [diff] [blame] | 177 | struct iwl_cmd_meta { |
| 178 | /* only for SYNC commands, iff the reply skb is wanted */ |
| 179 | struct iwl_host_cmd *source; |
Johannes Berg | c14c737 | 2012-04-16 14:48:08 -0700 | [diff] [blame] | 180 | u32 flags; |
Emmanuel Grumbach | 522376d | 2011-09-06 09:31:19 -0700 | [diff] [blame] | 181 | }; |
| 182 | |
| 183 | /* |
| 184 | * Generic queue structure |
| 185 | * |
| 186 | * Contains common data for Rx and Tx queues. |
| 187 | * |
Johannes Berg | 83f32a4 | 2014-04-24 09:57:40 +0200 | [diff] [blame] | 188 | * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware |
| 189 | * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless |
Emmanuel Grumbach | 522376d | 2011-09-06 09:31:19 -0700 | [diff] [blame] | 190 | * there might be HW changes in the future). For the normal TX |
| 191 | * queues, n_window, which is the size of the software queue data |
| 192 | * is also 256; however, for the command queue, n_window is only |
| 193 | * 32 since we don't need so many commands pending. Since the HW |
Johannes Berg | 83f32a4 | 2014-04-24 09:57:40 +0200 | [diff] [blame] | 194 | * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result, |
Emmanuel Grumbach | 522376d | 2011-09-06 09:31:19 -0700 | [diff] [blame] | 195 | * the software buffers (in the variables @meta, @txb in struct |
Emmanuel Grumbach | 990aa6d | 2012-11-14 12:39:52 +0200 | [diff] [blame] | 196 | * iwl_txq) only have 32 entries, while the HW buffers (@tfds in |
| 197 | * the same struct) have 256. |
Emmanuel Grumbach | 522376d | 2011-09-06 09:31:19 -0700 | [diff] [blame] | 198 | * This means that we end up with the following: |
| 199 | * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | |
| 200 | * SW entries: | 0 | ... | 31 | |
| 201 | * where N is a number between 0 and 7. This means that the SW |
| 202 | * data is a window overlayed over the HW queue. |
| 203 | */ |
| 204 | struct iwl_queue { |
Emmanuel Grumbach | 522376d | 2011-09-06 09:31:19 -0700 | [diff] [blame] | 205 | int write_ptr; /* 1-st empty entry (index) host_w*/ |
| 206 | int read_ptr; /* last used entry (index) host_r*/ |
| 207 | /* use for monitoring and recovering the stuck queue */ |
| 208 | dma_addr_t dma_addr; /* physical addr for BD's */ |
| 209 | int n_window; /* safe queue window */ |
| 210 | u32 id; |
| 211 | int low_mark; /* low watermark, resume queue if free |
| 212 | * space more than this */ |
| 213 | int high_mark; /* high watermark, stop queue if free |
| 214 | * space less than this */ |
| 215 | }; |
| 216 | |
Johannes Berg | bf8440e | 2012-03-19 17:12:06 +0100 | [diff] [blame] | 217 | #define TFD_TX_CMD_SLOTS 256 |
| 218 | #define TFD_CMD_SLOTS 32 |
| 219 | |
Johannes Berg | 8a964f4 | 2013-02-25 16:01:34 +0100 | [diff] [blame] | 220 | /* |
| 221 | * The FH will write back to the first TB only, so we need |
| 222 | * to copy some data into the buffer regardless of whether |
Johannes Berg | 38c0f334 | 2013-02-27 13:18:50 +0100 | [diff] [blame] | 223 | * it should be mapped or not. This indicates how big the |
| 224 | * first TB must be to include the scratch buffer. Since |
| 225 | * the scratch is 4 bytes at offset 12, it's 16 now. If we |
| 226 | * make it bigger then allocations will be bigger and copy |
| 227 | * slower, so that's probably not useful. |
Johannes Berg | 8a964f4 | 2013-02-25 16:01:34 +0100 | [diff] [blame] | 228 | */ |
Johannes Berg | 38c0f334 | 2013-02-27 13:18:50 +0100 | [diff] [blame] | 229 | #define IWL_HCMD_SCRATCHBUF_SIZE 16 |
Johannes Berg | 8a964f4 | 2013-02-25 16:01:34 +0100 | [diff] [blame] | 230 | |
Emmanuel Grumbach | 990aa6d | 2012-11-14 12:39:52 +0200 | [diff] [blame] | 231 | struct iwl_pcie_txq_entry { |
Johannes Berg | bf8440e | 2012-03-19 17:12:06 +0100 | [diff] [blame] | 232 | struct iwl_device_cmd *cmd; |
| 233 | struct sk_buff *skb; |
Johannes Berg | f4feb8a | 2012-10-19 14:24:43 +0200 | [diff] [blame] | 234 | /* buffer to free after command completes */ |
| 235 | const void *free_buf; |
Johannes Berg | bf8440e | 2012-03-19 17:12:06 +0100 | [diff] [blame] | 236 | struct iwl_cmd_meta meta; |
| 237 | }; |
| 238 | |
Johannes Berg | 38c0f334 | 2013-02-27 13:18:50 +0100 | [diff] [blame] | 239 | struct iwl_pcie_txq_scratch_buf { |
| 240 | struct iwl_cmd_header hdr; |
| 241 | u8 buf[8]; |
| 242 | __le32 scratch; |
| 243 | }; |
| 244 | |
Emmanuel Grumbach | 522376d | 2011-09-06 09:31:19 -0700 | [diff] [blame] | 245 | /** |
Emmanuel Grumbach | 990aa6d | 2012-11-14 12:39:52 +0200 | [diff] [blame] | 246 | * struct iwl_txq - Tx Queue for DMA |
Emmanuel Grumbach | 522376d | 2011-09-06 09:31:19 -0700 | [diff] [blame] | 247 | * @q: generic Rx/Tx queue descriptor |
Johannes Berg | bf8440e | 2012-03-19 17:12:06 +0100 | [diff] [blame] | 248 | * @tfds: transmit frame descriptors (DMA memory) |
Johannes Berg | 38c0f334 | 2013-02-27 13:18:50 +0100 | [diff] [blame] | 249 | * @scratchbufs: start of command headers, including scratch buffers, for |
| 250 | * the writeback -- this is DMA memory and an array holding one buffer |
| 251 | * for each command on the queue |
| 252 | * @scratchbufs_dma: DMA address for the scratchbufs start |
Johannes Berg | bf8440e | 2012-03-19 17:12:06 +0100 | [diff] [blame] | 253 | * @entries: transmit entries (driver state) |
| 254 | * @lock: queue lock |
| 255 | * @stuck_timer: timer that fires if queue gets stuck |
| 256 | * @trans_pcie: pointer back to transport (for timer) |
Emmanuel Grumbach | 522376d | 2011-09-06 09:31:19 -0700 | [diff] [blame] | 257 | * @need_update: indicates need to update read/write index |
Johannes Berg | bf8440e | 2012-03-19 17:12:06 +0100 | [diff] [blame] | 258 | * @active: stores if queue is active |
Johannes Berg | 68972c4 | 2013-06-11 19:05:27 +0200 | [diff] [blame] | 259 | * @ampdu: true if this queue is an ampdu queue for an specific RA/TID |
Emmanuel Grumbach | 4cf677f | 2015-01-12 14:38:29 +0200 | [diff] [blame] | 260 | * @wd_timeout: queue watchdog timeout (jiffies) - per queue |
Emmanuel Grumbach | e0b8d405 | 2015-01-20 17:02:40 +0200 | [diff] [blame] | 261 | * @frozen: tx stuck queue timer is frozen |
| 262 | * @frozen_expiry_remainder: remember how long until the timer fires |
Emmanuel Grumbach | 522376d | 2011-09-06 09:31:19 -0700 | [diff] [blame] | 263 | * |
| 264 | * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame |
| 265 | * descriptors) and required locking structures. |
| 266 | */ |
Emmanuel Grumbach | 990aa6d | 2012-11-14 12:39:52 +0200 | [diff] [blame] | 267 | struct iwl_txq { |
Emmanuel Grumbach | 522376d | 2011-09-06 09:31:19 -0700 | [diff] [blame] | 268 | struct iwl_queue q; |
| 269 | struct iwl_tfd *tfds; |
Johannes Berg | 38c0f334 | 2013-02-27 13:18:50 +0100 | [diff] [blame] | 270 | struct iwl_pcie_txq_scratch_buf *scratchbufs; |
| 271 | dma_addr_t scratchbufs_dma; |
Emmanuel Grumbach | 990aa6d | 2012-11-14 12:39:52 +0200 | [diff] [blame] | 272 | struct iwl_pcie_txq_entry *entries; |
Johannes Berg | 015c15e | 2012-03-05 11:24:24 -0800 | [diff] [blame] | 273 | spinlock_t lock; |
Emmanuel Grumbach | e0b8d405 | 2015-01-20 17:02:40 +0200 | [diff] [blame] | 274 | unsigned long frozen_expiry_remainder; |
Johannes Berg | 7c5ba4a | 2012-04-09 17:46:54 -0700 | [diff] [blame] | 275 | struct timer_list stuck_timer; |
| 276 | struct iwl_trans_pcie *trans_pcie; |
Johannes Berg | 43aa616 | 2014-02-27 14:24:36 +0100 | [diff] [blame] | 277 | bool need_update; |
Emmanuel Grumbach | e0b8d405 | 2015-01-20 17:02:40 +0200 | [diff] [blame] | 278 | bool frozen; |
Emmanuel Grumbach | 522376d | 2011-09-06 09:31:19 -0700 | [diff] [blame] | 279 | u8 active; |
Johannes Berg | 68972c4 | 2013-06-11 19:05:27 +0200 | [diff] [blame] | 280 | bool ampdu; |
Emmanuel Grumbach | 0cd58ea | 2015-11-24 13:24:24 +0200 | [diff] [blame] | 281 | bool block; |
Emmanuel Grumbach | 4cf677f | 2015-01-12 14:38:29 +0200 | [diff] [blame] | 282 | unsigned long wd_timeout; |
Emmanuel Grumbach | 522376d | 2011-09-06 09:31:19 -0700 | [diff] [blame] | 283 | }; |
| 284 | |
Johannes Berg | 38c0f334 | 2013-02-27 13:18:50 +0100 | [diff] [blame] | 285 | static inline dma_addr_t |
| 286 | iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) |
| 287 | { |
| 288 | return txq->scratchbufs_dma + |
| 289 | sizeof(struct iwl_pcie_txq_scratch_buf) * idx; |
| 290 | } |
| 291 | |
Emmanuel Grumbach | 6eb5e529 | 2015-10-18 09:31:24 +0300 | [diff] [blame^] | 292 | struct iwl_tso_hdr_page { |
| 293 | struct page *page; |
| 294 | u8 *pos; |
| 295 | }; |
| 296 | |
Emmanuel Grumbach | 5a878bf | 2011-08-25 23:10:51 -0700 | [diff] [blame] | 297 | /** |
Emmanuel Grumbach | e6bb4c9 | 2011-08-25 23:10:48 -0700 | [diff] [blame] | 298 | * struct iwl_trans_pcie - PCIe transport specific data |
Emmanuel Grumbach | 5a878bf | 2011-08-25 23:10:51 -0700 | [diff] [blame] | 299 | * @rxq: all the RX queue data |
Sara Sharon | 26d535a | 2015-04-28 12:56:54 +0300 | [diff] [blame] | 300 | * @rba: allocator for RX replenishing |
Emmanuel Grumbach | 9130bab | 2012-03-26 08:51:09 -0700 | [diff] [blame] | 301 | * @drv - pointer to iwl_drv |
Emmanuel Grumbach | 5a878bf | 2011-08-25 23:10:51 -0700 | [diff] [blame] | 302 | * @trans: pointer to the generic transport area |
Emmanuel Grumbach | 105183b | 2011-08-25 23:11:02 -0700 | [diff] [blame] | 303 | * @scd_base_addr: scheduler sram base address in SRAM |
| 304 | * @scd_bc_tbls: pointer to the byte count table of the scheduler |
Emmanuel Grumbach | 9d6b2cb | 2011-08-25 23:11:12 -0700 | [diff] [blame] | 305 | * @kw: keep warm address |
Emmanuel Grumbach | a42a184 | 2012-02-02 14:33:08 -0800 | [diff] [blame] | 306 | * @pci_dev: basic pci-network driver stuff |
| 307 | * @hw_base: pci hardware address support |
Johannes Berg | 13df1aa | 2012-03-06 13:31:00 -0800 | [diff] [blame] | 308 | * @ucode_write_complete: indicates that the ucode has been copied. |
| 309 | * @ucode_write_waitq: wait queue for uCode load |
Meenakshi Venkataraman | c6f600f | 2012-03-08 11:29:12 -0800 | [diff] [blame] | 310 | * @cmd_queue - command queue number |
Emmanuel Grumbach | 6c4fbcb | 2015-11-10 11:57:41 +0200 | [diff] [blame] | 311 | * @rx_buf_size: Rx buffer size |
Emmanuel Grumbach | 046db34 | 2012-12-05 15:07:54 +0200 | [diff] [blame] | 312 | * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) |
Emmanuel Grumbach | 3a736bc | 2014-09-10 11:16:41 +0300 | [diff] [blame] | 313 | * @scd_set_active: should the transport configure the SCD for HCMD queue |
Aviya Erenfeld | ab02165 | 2015-06-09 16:45:52 +0300 | [diff] [blame] | 314 | * @wide_cmd_header: true when ucode supports wide command header format |
Emmanuel Grumbach | 41837ca9 | 2015-10-21 09:00:07 +0300 | [diff] [blame] | 315 | * @sw_csum_tx: if true, then the transport will compute the csum of the TXed |
| 316 | * frame. |
Johannes Berg | b2cf410 | 2012-04-09 17:46:51 -0700 | [diff] [blame] | 317 | * @rx_page_order: page order for receive buffer size |
Lilach Edelstein | e56b04e | 2013-01-16 11:34:49 +0200 | [diff] [blame] | 318 | * @reg_lock: protect hw register access |
Emmanuel Grumbach | fa9f328 | 2015-06-11 20:45:49 +0300 | [diff] [blame] | 319 | * @mutex: to protect stop_device / start_fw / start_hw |
Emmanuel Grumbach | b943949 | 2013-12-22 15:09:40 +0200 | [diff] [blame] | 320 | * @cmd_in_flight: true when we have a host command in flight |
Emmanuel Grumbach | c2d2020 | 2014-06-01 08:05:52 +0300 | [diff] [blame] | 321 | * @fw_mon_phys: physical address of the buffer for the firmware monitor |
| 322 | * @fw_mon_page: points to the first page of the buffer for the firmware monitor |
| 323 | * @fw_mon_size: size of the buffer for the firmware monitor |
Emmanuel Grumbach | e6bb4c9 | 2011-08-25 23:10:48 -0700 | [diff] [blame] | 324 | */ |
| 325 | struct iwl_trans_pcie { |
Emmanuel Grumbach | 990aa6d | 2012-11-14 12:39:52 +0200 | [diff] [blame] | 326 | struct iwl_rxq rxq; |
Sara Sharon | 26d535a | 2015-04-28 12:56:54 +0300 | [diff] [blame] | 327 | struct iwl_rb_allocator rba; |
Emmanuel Grumbach | 5a878bf | 2011-08-25 23:10:51 -0700 | [diff] [blame] | 328 | struct iwl_trans *trans; |
Emmanuel Grumbach | 9130bab | 2012-03-26 08:51:09 -0700 | [diff] [blame] | 329 | struct iwl_drv *drv; |
Emmanuel Grumbach | 0c32576 | 2011-08-25 23:10:53 -0700 | [diff] [blame] | 330 | |
Johannes Berg | f14d6b3 | 2014-03-21 13:30:03 +0100 | [diff] [blame] | 331 | struct net_device napi_dev; |
| 332 | struct napi_struct napi; |
| 333 | |
Emmanuel Grumbach | 6eb5e529 | 2015-10-18 09:31:24 +0300 | [diff] [blame^] | 334 | struct __percpu iwl_tso_hdr_page *tso_hdr_page; |
| 335 | |
Emmanuel Grumbach | 0c32576 | 2011-08-25 23:10:53 -0700 | [diff] [blame] | 336 | /* INT ICT Table */ |
| 337 | __le32 *ict_tbl; |
Emmanuel Grumbach | 0c32576 | 2011-08-25 23:10:53 -0700 | [diff] [blame] | 338 | dma_addr_t ict_tbl_dma; |
Emmanuel Grumbach | 0c32576 | 2011-08-25 23:10:53 -0700 | [diff] [blame] | 339 | int ict_index; |
Emmanuel Grumbach | 0c32576 | 2011-08-25 23:10:53 -0700 | [diff] [blame] | 340 | bool use_ict; |
Emmanuel Grumbach | fa9f328 | 2015-06-11 20:45:49 +0300 | [diff] [blame] | 341 | bool is_down; |
Emmanuel Grumbach | 1f7b617 | 2011-08-25 23:10:59 -0700 | [diff] [blame] | 342 | struct isr_statistics isr_stats; |
Emmanuel Grumbach | 0c32576 | 2011-08-25 23:10:53 -0700 | [diff] [blame] | 343 | |
Johannes Berg | 7b11488 | 2012-02-05 13:55:11 -0800 | [diff] [blame] | 344 | spinlock_t irq_lock; |
Emmanuel Grumbach | fa9f328 | 2015-06-11 20:45:49 +0300 | [diff] [blame] | 345 | struct mutex mutex; |
Emmanuel Grumbach | 0c32576 | 2011-08-25 23:10:53 -0700 | [diff] [blame] | 346 | u32 inta_mask; |
Emmanuel Grumbach | 105183b | 2011-08-25 23:11:02 -0700 | [diff] [blame] | 347 | u32 scd_base_addr; |
| 348 | struct iwl_dma_ptr scd_bc_tbls; |
Emmanuel Grumbach | 9d6b2cb | 2011-08-25 23:11:12 -0700 | [diff] [blame] | 349 | struct iwl_dma_ptr kw; |
Emmanuel Grumbach | e13c0c5 | 2011-08-25 23:11:24 -0700 | [diff] [blame] | 350 | |
Emmanuel Grumbach | 990aa6d | 2012-11-14 12:39:52 +0200 | [diff] [blame] | 351 | struct iwl_txq *txq; |
Johannes Berg | 9eae88f | 2012-03-15 13:26:52 -0700 | [diff] [blame] | 352 | unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; |
Emmanuel Grumbach | 8ad71be | 2011-08-25 23:11:32 -0700 | [diff] [blame] | 353 | unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; |
Emmanuel Grumbach | a42a184 | 2012-02-02 14:33:08 -0800 | [diff] [blame] | 354 | |
| 355 | /* PCI bus related data */ |
| 356 | struct pci_dev *pci_dev; |
| 357 | void __iomem *hw_base; |
Johannes Berg | 13df1aa | 2012-03-06 13:31:00 -0800 | [diff] [blame] | 358 | |
| 359 | bool ucode_write_complete; |
| 360 | wait_queue_head_t ucode_write_waitq; |
Emmanuel Grumbach | f946b52 | 2012-10-25 17:25:52 +0200 | [diff] [blame] | 361 | wait_queue_head_t wait_command_queue; |
| 362 | |
Meenakshi Venkataraman | c6f600f | 2012-03-08 11:29:12 -0800 | [diff] [blame] | 363 | u8 cmd_queue; |
Emmanuel Grumbach | b04db9a | 2012-06-21 11:53:44 +0300 | [diff] [blame] | 364 | u8 cmd_fifo; |
Emmanuel Grumbach | 4cf677f | 2015-01-12 14:38:29 +0200 | [diff] [blame] | 365 | unsigned int cmd_q_wdg_timeout; |
Johannes Berg | d663ee7 | 2012-03-10 13:00:07 -0800 | [diff] [blame] | 366 | u8 n_no_reclaim_cmds; |
| 367 | u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; |
Johannes Berg | b2cf410 | 2012-04-09 17:46:51 -0700 | [diff] [blame] | 368 | |
Emmanuel Grumbach | 6c4fbcb | 2015-11-10 11:57:41 +0200 | [diff] [blame] | 369 | enum iwl_amsdu_size rx_buf_size; |
Emmanuel Grumbach | 046db34 | 2012-12-05 15:07:54 +0200 | [diff] [blame] | 370 | bool bc_table_dword; |
Emmanuel Grumbach | 3a736bc | 2014-09-10 11:16:41 +0300 | [diff] [blame] | 371 | bool scd_set_active; |
Aviya Erenfeld | ab02165 | 2015-06-09 16:45:52 +0300 | [diff] [blame] | 372 | bool wide_cmd_header; |
Emmanuel Grumbach | 41837ca9 | 2015-10-21 09:00:07 +0300 | [diff] [blame] | 373 | bool sw_csum_tx; |
Johannes Berg | b2cf410 | 2012-04-09 17:46:51 -0700 | [diff] [blame] | 374 | u32 rx_page_order; |
Johannes Berg | 7c5ba4a | 2012-04-09 17:46:54 -0700 | [diff] [blame] | 375 | |
Lilach Edelstein | e56b04e | 2013-01-16 11:34:49 +0200 | [diff] [blame] | 376 | /*protect hw register */ |
| 377 | spinlock_t reg_lock; |
Ilan Peer | fc8a350 | 2015-05-13 14:34:07 +0300 | [diff] [blame] | 378 | bool cmd_hold_nic_awake; |
Eliad Peller | 7616f33 | 2014-11-20 17:33:43 +0200 | [diff] [blame] | 379 | bool ref_cmd_in_flight; |
| 380 | |
| 381 | /* protect ref counter */ |
| 382 | spinlock_t ref_lock; |
| 383 | u32 ref_count; |
Emmanuel Grumbach | c2d2020 | 2014-06-01 08:05:52 +0300 | [diff] [blame] | 384 | |
| 385 | dma_addr_t fw_mon_phys; |
| 386 | struct page *fw_mon_page; |
| 387 | u32 fw_mon_size; |
Emmanuel Grumbach | e6bb4c9 | 2011-08-25 23:10:48 -0700 | [diff] [blame] | 388 | }; |
| 389 | |
Johannes Berg | 85e5a38 | 2015-11-12 16:16:01 +0100 | [diff] [blame] | 390 | static inline struct iwl_trans_pcie * |
| 391 | IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) |
| 392 | { |
| 393 | return (void *)trans->trans_specific; |
| 394 | } |
Emmanuel Grumbach | 5a878bf | 2011-08-25 23:10:51 -0700 | [diff] [blame] | 395 | |
Johannes Berg | 7c5ba4a | 2012-04-09 17:46:54 -0700 | [diff] [blame] | 396 | static inline struct iwl_trans * |
| 397 | iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) |
| 398 | { |
| 399 | return container_of((void *)trans_pcie, struct iwl_trans, |
| 400 | trans_specific); |
| 401 | } |
| 402 | |
Emmanuel Grumbach | f02831b | 2012-11-14 14:44:18 +0200 | [diff] [blame] | 403 | /* |
| 404 | * Convention: trans API functions: iwl_trans_pcie_XXX |
| 405 | * Other functions: iwl_pcie_XXX |
| 406 | */ |
Johannes Berg | d1ff525 | 2012-04-12 06:24:30 -0700 | [diff] [blame] | 407 | struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, |
| 408 | const struct pci_device_id *ent, |
| 409 | const struct iwl_cfg *cfg); |
| 410 | void iwl_trans_pcie_free(struct iwl_trans *trans); |
| 411 | |
Emmanuel Grumbach | 253a634 | 2011-07-11 07:39:46 -0700 | [diff] [blame] | 412 | /***************************************************** |
| 413 | * RX |
| 414 | ******************************************************/ |
Emmanuel Grumbach | 9805c446 | 2012-11-14 14:44:18 +0200 | [diff] [blame] | 415 | int iwl_pcie_rx_init(struct iwl_trans *trans); |
Johannes Berg | 2bfb509 | 2012-12-27 21:43:48 +0100 | [diff] [blame] | 416 | irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); |
Emmanuel Grumbach | 9805c446 | 2012-11-14 14:44:18 +0200 | [diff] [blame] | 417 | int iwl_pcie_rx_stop(struct iwl_trans *trans); |
| 418 | void iwl_pcie_rx_free(struct iwl_trans *trans); |
Emmanuel Grumbach | ab697a9 | 2011-07-11 07:35:34 -0700 | [diff] [blame] | 419 | |
Emmanuel Grumbach | 253a634 | 2011-07-11 07:39:46 -0700 | [diff] [blame] | 420 | /***************************************************** |
Emmanuel Grumbach | 990aa6d | 2012-11-14 12:39:52 +0200 | [diff] [blame] | 421 | * ICT - interrupt handling |
Emmanuel Grumbach | 1a361cd | 2011-07-11 07:44:57 -0700 | [diff] [blame] | 422 | ******************************************************/ |
Emmanuel Grumbach | 85bf9da | 2013-12-09 11:48:30 +0200 | [diff] [blame] | 423 | irqreturn_t iwl_pcie_isr(int irq, void *data); |
Emmanuel Grumbach | 990aa6d | 2012-11-14 12:39:52 +0200 | [diff] [blame] | 424 | int iwl_pcie_alloc_ict(struct iwl_trans *trans); |
| 425 | void iwl_pcie_free_ict(struct iwl_trans *trans); |
| 426 | void iwl_pcie_reset_ict(struct iwl_trans *trans); |
| 427 | void iwl_pcie_disable_ict(struct iwl_trans *trans); |
Emmanuel Grumbach | 1a361cd | 2011-07-11 07:44:57 -0700 | [diff] [blame] | 428 | |
Emmanuel Grumbach | 1a361cd | 2011-07-11 07:44:57 -0700 | [diff] [blame] | 429 | /***************************************************** |
Emmanuel Grumbach | 253a634 | 2011-07-11 07:39:46 -0700 | [diff] [blame] | 430 | * TX / HCMD |
| 431 | ******************************************************/ |
Emmanuel Grumbach | f02831b | 2012-11-14 14:44:18 +0200 | [diff] [blame] | 432 | int iwl_pcie_tx_init(struct iwl_trans *trans); |
| 433 | void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); |
| 434 | int iwl_pcie_tx_stop(struct iwl_trans *trans); |
| 435 | void iwl_pcie_tx_free(struct iwl_trans *trans); |
Johannes Berg | fea7795 | 2014-08-01 11:58:47 +0200 | [diff] [blame] | 436 | void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, |
Emmanuel Grumbach | 4cf677f | 2015-01-12 14:38:29 +0200 | [diff] [blame] | 437 | const struct iwl_trans_txq_scd_cfg *cfg, |
| 438 | unsigned int wdg_timeout); |
Johannes Berg | d4578ea | 2014-08-01 12:17:40 +0200 | [diff] [blame] | 439 | void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, |
| 440 | bool configure_scd); |
Emmanuel Grumbach | f02831b | 2012-11-14 14:44:18 +0200 | [diff] [blame] | 441 | int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, |
| 442 | struct iwl_device_cmd *dev_cmd, int txq_id); |
Johannes Berg | ea68f46 | 2014-02-27 14:36:55 +0100 | [diff] [blame] | 443 | void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); |
Emmanuel Grumbach | f02831b | 2012-11-14 14:44:18 +0200 | [diff] [blame] | 444 | int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); |
Emmanuel Grumbach | 990aa6d | 2012-11-14 12:39:52 +0200 | [diff] [blame] | 445 | void iwl_pcie_hcmd_complete(struct iwl_trans *trans, |
Johannes Berg | f7e6469 | 2015-06-23 21:58:17 +0200 | [diff] [blame] | 446 | struct iwl_rx_cmd_buffer *rxb); |
Emmanuel Grumbach | f02831b | 2012-11-14 14:44:18 +0200 | [diff] [blame] | 447 | void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, |
| 448 | struct sk_buff_head *skbs); |
Johannes Berg | ddaf5a5 | 2013-01-08 11:25:44 +0100 | [diff] [blame] | 449 | void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); |
| 450 | |
Eliad Peller | 7616f33 | 2014-11-20 17:33:43 +0200 | [diff] [blame] | 451 | void iwl_trans_pcie_ref(struct iwl_trans *trans); |
| 452 | void iwl_trans_pcie_unref(struct iwl_trans *trans); |
| 453 | |
Johannes Berg | 4d07500 | 2014-04-24 10:41:31 +0200 | [diff] [blame] | 454 | static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) |
| 455 | { |
| 456 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; |
| 457 | |
| 458 | return le16_to_cpu(tb->hi_n_len) >> 4; |
| 459 | } |
| 460 | |
Emmanuel Grumbach | 7ff9470 | 2011-08-25 23:10:54 -0700 | [diff] [blame] | 461 | /***************************************************** |
| 462 | * Error handling |
| 463 | ******************************************************/ |
Emmanuel Grumbach | 990aa6d | 2012-11-14 12:39:52 +0200 | [diff] [blame] | 464 | void iwl_pcie_dump_csr(struct iwl_trans *trans); |
Emmanuel Grumbach | 16db88b | 2011-08-25 23:11:08 -0700 | [diff] [blame] | 465 | |
Emmanuel Grumbach | 8ad71be | 2011-08-25 23:11:32 -0700 | [diff] [blame] | 466 | /***************************************************** |
| 467 | * Helpers |
| 468 | ******************************************************/ |
Emmanuel Grumbach | 0c32576 | 2011-08-25 23:10:53 -0700 | [diff] [blame] | 469 | static inline void iwl_disable_interrupts(struct iwl_trans *trans) |
| 470 | { |
Arik Nemtsov | eb7ff77 | 2013-12-01 12:30:38 +0200 | [diff] [blame] | 471 | clear_bit(STATUS_INT_ENABLED, &trans->status); |
Emmanuel Grumbach | 0c32576 | 2011-08-25 23:10:53 -0700 | [diff] [blame] | 472 | |
| 473 | /* disable interrupts from uCode/NIC to host */ |
Emmanuel Grumbach | 1042db2 | 2012-01-03 16:56:15 +0200 | [diff] [blame] | 474 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); |
Emmanuel Grumbach | 0c32576 | 2011-08-25 23:10:53 -0700 | [diff] [blame] | 475 | |
| 476 | /* acknowledge/clear/reset any interrupts still pending |
| 477 | * from uCode or flow handler (Rx/Tx DMA) */ |
Emmanuel Grumbach | 1042db2 | 2012-01-03 16:56:15 +0200 | [diff] [blame] | 478 | iwl_write32(trans, CSR_INT, 0xffffffff); |
| 479 | iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); |
Emmanuel Grumbach | 0c32576 | 2011-08-25 23:10:53 -0700 | [diff] [blame] | 480 | IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); |
| 481 | } |
| 482 | |
| 483 | static inline void iwl_enable_interrupts(struct iwl_trans *trans) |
| 484 | { |
Don Fry | 8362640 | 2012-03-07 09:52:37 -0800 | [diff] [blame] | 485 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
Emmanuel Grumbach | 0c32576 | 2011-08-25 23:10:53 -0700 | [diff] [blame] | 486 | |
| 487 | IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); |
Arik Nemtsov | eb7ff77 | 2013-12-01 12:30:38 +0200 | [diff] [blame] | 488 | set_bit(STATUS_INT_ENABLED, &trans->status); |
Emmanuel Grumbach | 2dbc368 | 2013-12-09 11:09:47 +0200 | [diff] [blame] | 489 | trans_pcie->inta_mask = CSR_INI_SET_MASK; |
Emmanuel Grumbach | 1042db2 | 2012-01-03 16:56:15 +0200 | [diff] [blame] | 490 | iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); |
Emmanuel Grumbach | 0c32576 | 2011-08-25 23:10:53 -0700 | [diff] [blame] | 491 | } |
| 492 | |
Stanislaw Gruszka | 8722c89 | 2012-03-07 09:52:28 -0800 | [diff] [blame] | 493 | static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) |
| 494 | { |
Emmanuel Grumbach | 2dbc368 | 2013-12-09 11:09:47 +0200 | [diff] [blame] | 495 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
| 496 | |
Stanislaw Gruszka | 8722c89 | 2012-03-07 09:52:28 -0800 | [diff] [blame] | 497 | IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); |
Emmanuel Grumbach | 2dbc368 | 2013-12-09 11:09:47 +0200 | [diff] [blame] | 498 | trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; |
| 499 | iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); |
Stanislaw Gruszka | 8722c89 | 2012-03-07 09:52:28 -0800 | [diff] [blame] | 500 | } |
| 501 | |
Emmanuel Grumbach | e20d4341 | 2011-08-25 23:11:31 -0700 | [diff] [blame] | 502 | static inline void iwl_wake_queue(struct iwl_trans *trans, |
Emmanuel Grumbach | 990aa6d | 2012-11-14 12:39:52 +0200 | [diff] [blame] | 503 | struct iwl_txq *txq) |
Emmanuel Grumbach | e20d4341 | 2011-08-25 23:11:31 -0700 | [diff] [blame] | 504 | { |
Johannes Berg | 9eae88f | 2012-03-15 13:26:52 -0700 | [diff] [blame] | 505 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
Emmanuel Grumbach | e20d4341 | 2011-08-25 23:11:31 -0700 | [diff] [blame] | 506 | |
Johannes Berg | 9eae88f | 2012-03-15 13:26:52 -0700 | [diff] [blame] | 507 | if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { |
| 508 | IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); |
| 509 | iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); |
Emmanuel Grumbach | 81a3de1 | 2011-11-10 06:55:24 -0800 | [diff] [blame] | 510 | } |
Emmanuel Grumbach | e20d4341 | 2011-08-25 23:11:31 -0700 | [diff] [blame] | 511 | } |
| 512 | |
| 513 | static inline void iwl_stop_queue(struct iwl_trans *trans, |
Emmanuel Grumbach | 990aa6d | 2012-11-14 12:39:52 +0200 | [diff] [blame] | 514 | struct iwl_txq *txq) |
Emmanuel Grumbach | e20d4341 | 2011-08-25 23:11:31 -0700 | [diff] [blame] | 515 | { |
Johannes Berg | 9eae88f | 2012-03-15 13:26:52 -0700 | [diff] [blame] | 516 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
Emmanuel Grumbach | e20d4341 | 2011-08-25 23:11:31 -0700 | [diff] [blame] | 517 | |
Johannes Berg | 9eae88f | 2012-03-15 13:26:52 -0700 | [diff] [blame] | 518 | if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { |
| 519 | iwl_op_mode_queue_full(trans->op_mode, txq->q.id); |
| 520 | IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id); |
| 521 | } else |
| 522 | IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", |
| 523 | txq->q.id); |
Emmanuel Grumbach | 8ad71be | 2011-08-25 23:11:32 -0700 | [diff] [blame] | 524 | } |
| 525 | |
Emmanuel Grumbach | 6ca6ebc | 2012-11-14 23:38:08 +0200 | [diff] [blame] | 526 | static inline bool iwl_queue_used(const struct iwl_queue *q, int i) |
Emmanuel Grumbach | 8ad71be | 2011-08-25 23:11:32 -0700 | [diff] [blame] | 527 | { |
| 528 | return q->write_ptr >= q->read_ptr ? |
| 529 | (i >= q->read_ptr && i < q->write_ptr) : |
| 530 | !(i < q->read_ptr && i >= q->write_ptr); |
| 531 | } |
| 532 | |
| 533 | static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) |
| 534 | { |
| 535 | return index & (q->n_window - 1); |
| 536 | } |
| 537 | |
Emmanuel Grumbach | 8d42551 | 2012-03-28 11:00:58 +0200 | [diff] [blame] | 538 | static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) |
| 539 | { |
| 540 | return !(iwl_read32(trans, CSR_GP_CNTRL) & |
| 541 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); |
| 542 | } |
| 543 | |
Emmanuel Grumbach | b943949 | 2013-12-22 15:09:40 +0200 | [diff] [blame] | 544 | static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, |
| 545 | u32 reg, u32 mask, u32 value) |
| 546 | { |
| 547 | u32 v; |
| 548 | |
| 549 | #ifdef CONFIG_IWLWIFI_DEBUG |
| 550 | WARN_ON_ONCE(value & ~mask); |
| 551 | #endif |
| 552 | |
| 553 | v = iwl_read32(trans, reg); |
| 554 | v &= ~mask; |
| 555 | v |= value; |
| 556 | iwl_write32(trans, reg, v); |
| 557 | } |
| 558 | |
| 559 | static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, |
| 560 | u32 reg, u32 mask) |
| 561 | { |
| 562 | __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); |
| 563 | } |
| 564 | |
| 565 | static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, |
| 566 | u32 reg, u32 mask) |
| 567 | { |
| 568 | __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); |
| 569 | } |
| 570 | |
Johannes Berg | 14cfca7 | 2014-02-25 20:50:53 +0100 | [diff] [blame] | 571 | void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); |
| 572 | |
Johannes Berg | f8a1edb | 2015-11-11 11:53:32 +0100 | [diff] [blame] | 573 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
| 574 | int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); |
| 575 | #else |
| 576 | static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) |
| 577 | { |
| 578 | return 0; |
| 579 | } |
| 580 | #endif |
| 581 | |
Emmanuel Grumbach | ab697a9 | 2011-07-11 07:35:34 -0700 | [diff] [blame] | 582 | #endif /* __iwl_trans_int_pcie_h__ */ |