blob: 5f1e7d802cbf576afcd8f3e008572e9fac506561 [file] [log] [blame]
/******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
#include "iwl-agn.h"
#include "iwl-sta.h"
static inline u32 iwlagn_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
{
return le32_to_cpup((__le32 *)&tx_resp->status +
tx_resp->frame_count) & MAX_SN;
}
static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
struct iwl_ht_agg *agg,
struct iwl5000_tx_resp *tx_resp,
int txq_id, u16 start_idx)
{
u16 status;
struct agg_tx_status *frame_status = &tx_resp->status;
struct ieee80211_tx_info *info = NULL;
struct ieee80211_hdr *hdr = NULL;
u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
int i, sh, idx;
u16 seq;
if (agg->wait_for_ba)
IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
agg->frame_count = tx_resp->frame_count;
agg->start_idx = start_idx;
agg->rate_n_flags = rate_n_flags;
agg->bitmap = 0;
/* # frames attempted by Tx command */
if (agg->frame_count == 1) {
/* Only one frame was attempted; no block-ack will arrive */
status = le16_to_cpu(frame_status[0].status);
idx = start_idx;
/* FIXME: code repetition */
IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
agg->frame_count, agg->start_idx, idx);
info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
info->flags |= iwl_tx_status_to_mac80211(status);
iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
/* FIXME: code repetition end */
IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
status & 0xff, tx_resp->failure_frame);
IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
agg->wait_for_ba = 0;
} else {
/* Two or more frames were attempted; expect block-ack */
u64 bitmap = 0;
/*
* Start is the lowest frame sent. It may not be the first
* frame in the batch; we figure this out dynamically during
* the following loop.
*/
int start = agg->start_idx;
/* Construct bit-map of pending frames within Tx window */
for (i = 0; i < agg->frame_count; i++) {
u16 sc;
status = le16_to_cpu(frame_status[i].status);
seq = le16_to_cpu(frame_status[i].sequence);
idx = SEQ_TO_INDEX(seq);
txq_id = SEQ_TO_QUEUE(seq);
if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
AGG_TX_STATE_ABORT_MSK))
continue;
IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
agg->frame_count, txq_id, idx);
hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
if (!hdr) {
IWL_ERR(priv,
"BUG_ON idx doesn't point to valid skb"
" idx=%d, txq_id=%d\n", idx, txq_id);
return -1;
}
sc = le16_to_cpu(hdr->seq_ctrl);
if (idx != (SEQ_TO_SN(sc) & 0xff)) {
IWL_ERR(priv,
"BUG_ON idx doesn't match seq control"
" idx=%d, seq_idx=%d, seq=%d\n",
idx, SEQ_TO_SN(sc),
hdr->seq_ctrl);
return -1;
}
IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
i, idx, SEQ_TO_SN(sc));
/*
* sh -> how many frames ahead of the starting frame is
* the current one?
*
* Note that all frames sent in the batch must be in a
* 64-frame window, so this number should be in [0,63].
* If outside of this window, then we've found a new
* "first" frame in the batch and need to change start.
*/
sh = idx - start;
/*
* If >= 64, out of window. start must be at the front
* of the circular buffer, idx must be near the end of
* the buffer, and idx is the new "first" frame. Shift
* the indices around.
*/
if (sh >= 64) {
/* Shift bitmap by start - idx, wrapped */
sh = 0x100 - idx + start;
bitmap = bitmap << sh;
/* Now idx is the new start so sh = 0 */
sh = 0;
start = idx;
/*
* If <= -64 then wraps the 256-pkt circular buffer
* (e.g., start = 255 and idx = 0, sh should be 1)
*/
} else if (sh <= -64) {
sh = 0x100 - start + idx;
/*
* If < 0 but > -64, out of window. idx is before start
* but not wrapped. Shift the indices around.
*/
} else if (sh < 0) {
/* Shift by how far start is ahead of idx */
sh = start - idx;
bitmap = bitmap << sh;
/* Now idx is the new start so sh = 0 */
start = idx;
sh = 0;
}
/* Sequence number start + sh was sent in this batch */
bitmap |= 1ULL << sh;
IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
start, (unsigned long long)bitmap);
}
/*
* Store the bitmap and possibly the new start, if we wrapped
* the buffer above
*/
agg->bitmap = bitmap;
agg->start_idx = start;
IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
agg->frame_count, agg->start_idx,
(unsigned long long)agg->bitmap);
if (bitmap)
agg->wait_for_ba = 1;
}
return 0;
}
void iwl_check_abort_status(struct iwl_priv *priv,
u8 frame_count, u32 status)
{
if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
IWL_ERR(priv, "TODO: Implement Tx flush command!!!\n");
}
}
static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
int txq_id = SEQ_TO_QUEUE(sequence);
int index = SEQ_TO_INDEX(sequence);
struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct ieee80211_tx_info *info;
struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
u32 status = le16_to_cpu(tx_resp->status.status);
int tid;
int sta_id;
int freed;
unsigned long flags;
if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
"is out of range [0-%d] %d %d\n", txq_id,
index, txq->q.n_bd, txq->q.write_ptr,
txq->q.read_ptr);
return;
}
info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
memset(&info->status, 0, sizeof(info->status));
tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
spin_lock_irqsave(&priv->sta_lock, flags);
if (txq->sched_retry) {
const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
struct iwl_ht_agg *agg;
agg = &priv->stations[sta_id].tid[tid].agg;
iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
/* check if BAR is needed */
if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
if (txq->q.read_ptr != (scd_ssn & 0xff)) {
index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
"scd_ssn=%d idx=%d txq=%d swq=%d\n",
scd_ssn , index, txq_id, txq->swq_id);
freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
(agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
if (agg->state == IWL_AGG_OFF)
iwl_wake_queue(priv, txq_id);
else
iwl_wake_queue(priv, txq->swq_id);
}
}
} else {
BUG_ON(txq_id != txq->swq_id);
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags |= iwl_tx_status_to_mac80211(status);
iwlagn_hwrate_to_tx_control(priv,
le32_to_cpu(tx_resp->rate_n_flags),
info);
IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
"0x%x retries %d\n",
txq_id,
iwl_get_tx_fail_reason(status), status,
le32_to_cpu(tx_resp->rate_n_flags),
tx_resp->failure_frame);
freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark))
iwl_wake_queue(priv, txq_id);
}
iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
iwl_check_abort_status(priv, tx_resp->frame_count, status);
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
void iwlagn_rx_handler_setup(struct iwl_priv *priv)
{
/* init calibration handlers */
priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
iwlagn_rx_calib_result;
priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
iwlagn_rx_calib_complete;
priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
}
void iwlagn_setup_deferred_work(struct iwl_priv *priv)
{
/* in agn, the tx power calibration is done in uCode */
priv->disable_tx_power_cal = 1;
}
int iwlagn_hw_valid_rtc_data_addr(u32 addr)
{
return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
(addr < IWLAGN_RTC_DATA_UPPER_BOUND);
}
int iwlagn_send_tx_power(struct iwl_priv *priv)
{
struct iwl5000_tx_power_dbm_cmd tx_power_cmd;
u8 tx_ant_cfg_cmd;
/* half dBm need to multiply */
tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
if (priv->tx_power_lmt_in_half_dbm &&
priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
/*
* For the newer devices which using enhanced/extend tx power
* table in EEPROM, the format is in half dBm. driver need to
* convert to dBm format before report to mac80211.
* By doing so, there is a possibility of 1/2 dBm resolution
* lost. driver will perform "round-up" operation before
* reporting, but it will cause 1/2 dBm tx power over the
* regulatory limit. Perform the checking here, if the
* "tx_power_user_lmt" is higher than EEPROM value (in
* half-dBm format), lower the tx power based on EEPROM
*/
tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
}
tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
if (IWL_UCODE_API(priv->ucode_ver) == 1)
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
else
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
sizeof(tx_power_cmd), &tx_power_cmd,
NULL);
}
void iwlagn_temperature(struct iwl_priv *priv)
{
/* store temperature from statistics (in Celsius) */
priv->temperature =
le32_to_cpu(priv->_agn.statistics.general.temperature);
iwl_tt_handler(priv);
}
u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
{
struct iwl_eeprom_calib_hdr {
u8 version;
u8 pa_type;
u16 voltage;
} *hdr;
hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
EEPROM_CALIB_ALL);
return hdr->version;
}
/*
* EEPROM
*/
static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
{
u16 offset = 0;
if ((address & INDIRECT_ADDRESS) == 0)
return address;
switch (address & INDIRECT_TYPE_MSK) {
case INDIRECT_HOST:
offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
break;
case INDIRECT_GENERAL:
offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
break;
case INDIRECT_REGULATORY:
offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
break;
case INDIRECT_CALIBRATION:
offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
break;
case INDIRECT_PROCESS_ADJST:
offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
break;
case INDIRECT_OTHERS:
offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
break;
default:
IWL_ERR(priv, "illegal indirect type: 0x%X\n",
address & INDIRECT_TYPE_MSK);
break;
}
/* translate the offset from words to byte */
return (address & ADDRESS_MSK) + (offset << 1);
}
const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
size_t offset)
{
u32 address = eeprom_indirect_address(priv, offset);
BUG_ON(address >= priv->cfg->eeprom_size);
return &priv->eeprom[address];
}
struct iwl_mod_params iwlagn_mod_params = {
.amsdu_size_8K = 1,
.restart_fw = 1,
/* the rest are 0 by default */
};
void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
unsigned long flags;
int i;
spin_lock_irqsave(&rxq->lock, flags);
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
/* Fill the rx_used queue with _all_ of the Rx buffers */
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
/* In the reset function, these buffers may have been allocated
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].page != NULL) {
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
__iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
}
for (i = 0; i < RX_QUEUE_SIZE; i++)
rxq->queue[i] = NULL;
/* Set us so that we have processed and used all buffers, but have
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
rxq->write_actual = 0;
rxq->free_count = 0;
spin_unlock_irqrestore(&rxq->lock, flags);
}
int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
u32 rb_size;
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
if (!priv->cfg->use_isr_legacy)
rb_timeout = RX_RB_TIMEOUT;
if (priv->cfg->mod_params->amsdu_size_8K)
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
else
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
/* Stop Rx DMA */
iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
/* Reset driver's Rx queue write index */
iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
/* Tell device where to find RBD circular buffer in DRAM */
iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
(u32)(rxq->bd_dma >> 8));
/* Tell device where in DRAM to update its Rx status */
iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
rxq->rb_stts_dma >> 4);
/* Enable Rx DMA
* FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
* the credit mechanism in 5000 HW RX FIFO
* Direct rx interrupts to hosts
* Rx buffer size 4 or 8k
* RB timeout 0x10
* 256 RBDs
*/
iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
rb_size|
(rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
return 0;
}
int iwlagn_hw_nic_init(struct iwl_priv *priv)
{
unsigned long flags;
struct iwl_rx_queue *rxq = &priv->rxq;
int ret;
/* nic_init */
spin_lock_irqsave(&priv->lock, flags);
priv->cfg->ops->lib->apm_ops.init(priv);
/* Set interrupt coalescing calibration timer to default (512 usecs) */
iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
spin_unlock_irqrestore(&priv->lock, flags);
ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
priv->cfg->ops->lib->apm_ops.config(priv);
/* Allocate the RX queue, or reset if it is already allocated */
if (!rxq->bd) {
ret = iwl_rx_queue_alloc(priv);
if (ret) {
IWL_ERR(priv, "Unable to initialize Rx queue\n");
return -ENOMEM;
}
} else
iwlagn_rx_queue_reset(priv, rxq);
iwlagn_rx_replenish(priv);
iwlagn_rx_init(priv, rxq);
spin_lock_irqsave(&priv->lock, flags);
rxq->need_update = 1;
iwl_rx_queue_update_write_ptr(priv, rxq);
spin_unlock_irqrestore(&priv->lock, flags);
/* Allocate or reset and init all Tx and Command queues */
if (!priv->txq) {
ret = iwlagn_txq_ctx_alloc(priv);
if (ret)
return ret;
} else
iwlagn_txq_ctx_reset(priv);
set_bit(STATUS_INIT, &priv->status);
return 0;
}
/**
* iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
*/
static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
dma_addr_t dma_addr)
{
return cpu_to_le32((u32)(dma_addr >> 8));
}
/**
* iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
*
* If there are slots in the RX queue that need to be restocked,
* and we have free pre-allocated buffers, fill the ranks as much
* as we can, pulling from rx_free.
*
* This moves the 'write' index forward to catch up with 'processed', and
* also updates the memory address in the firmware to reference the new
* target buffer.
*/
void iwlagn_rx_queue_restock(struct iwl_priv *priv)
{
struct iwl_rx_queue *rxq = &priv->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
spin_lock_irqsave(&rxq->lock, flags);
while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
/* The overwritten rxb must be a used one */
rxb = rxq->queue[rxq->write];
BUG_ON(rxb && rxb->page);
/* Get next free Rx buffer, remove from free list */
element = rxq->rx_free.next;
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
list_del(element);
/* Point to Rx buffer via next RBD in circular buffer */
rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
rxb->page_dma);
rxq->queue[rxq->write] = rxb;
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
}
spin_unlock_irqrestore(&rxq->lock, flags);
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
queue_work(priv->workqueue, &priv->rx_replenish);
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags);
iwl_rx_queue_update_write_ptr(priv, rxq);
}
}
/**
* iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
*
* When moving to rx_free an SKB is allocated for the slot.
*
* Also restock the Rx queue via iwl_rx_queue_restock.
* This is called as a scheduled work item (except for during initialization)
*/
void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
{
struct iwl_rx_queue *rxq = &priv->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
unsigned long flags;
gfp_t gfp_mask = priority;
while (1) {
spin_lock_irqsave(&rxq->lock, flags);
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
return;
}
spin_unlock_irqrestore(&rxq->lock, flags);
if (rxq->free_count > RX_LOW_WATERMARK)
gfp_mask |= __GFP_NOWARN;
if (priv->hw_params.rx_page_order > 0)
gfp_mask |= __GFP_COMP;
/* Alloc a new receive buffer */
page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
if (!page) {
if (net_ratelimit())
IWL_DEBUG_INFO(priv, "alloc_pages failed, "
"order: %d\n",
priv->hw_params.rx_page_order);
if ((rxq->free_count <= RX_LOW_WATERMARK) &&
net_ratelimit())
IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
rxq->free_count);
/* We don't reschedule replenish work here -- we will
* call the restock method and if it still needs
* more buffers it will schedule replenish */
return;
}
spin_lock_irqsave(&rxq->lock, flags);
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
__free_pages(page, priv->hw_params.rx_page_order);
return;
}
element = rxq->rx_used.next;
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
list_del(element);
spin_unlock_irqrestore(&rxq->lock, flags);
BUG_ON(rxb->page);
rxb->page = page;
/* Get physical address of the RB */
rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
/* dma address must be no more than 36 bits */
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
/* and also 256 byte aligned! */
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
spin_lock_irqsave(&rxq->lock, flags);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
priv->alloc_rxb_page++;
spin_unlock_irqrestore(&rxq->lock, flags);
}
}
void iwlagn_rx_replenish(struct iwl_priv *priv)
{
unsigned long flags;
iwlagn_rx_allocate(priv, GFP_KERNEL);
spin_lock_irqsave(&priv->lock, flags);
iwlagn_rx_queue_restock(priv);
spin_unlock_irqrestore(&priv->lock, flags);
}
void iwlagn_rx_replenish_now(struct iwl_priv *priv)
{
iwlagn_rx_allocate(priv, GFP_ATOMIC);
iwlagn_rx_queue_restock(priv);
}
/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
* If an SKB has been detached, the POOL needs to have its SKB set to NULL
* This free routine walks the list of POOL entries and if SKB is set to
* non NULL it is unmapped and freed
*/
void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
int i;
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
if (rxq->pool[i].page != NULL) {
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
__iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
}
dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq->bd_dma);
dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
rxq->rb_stts = NULL;
}
int iwlagn_rxq_stop(struct iwl_priv *priv)
{
/* stop Rx DMA */
iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
return 0;
}
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
{
int idx = 0;
int band_offset = 0;
/* HT rate format: mac80211 wants an MCS number, which is just LSB */
if (rate_n_flags & RATE_MCS_HT_MSK) {
idx = (rate_n_flags & 0xff);
return idx;
/* Legacy rate format, search for match in table */
} else {
if (band == IEEE80211_BAND_5GHZ)
band_offset = IWL_FIRST_OFDM_RATE;
for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
return idx - band_offset;
}
return -1;
}
/* Calc max signal level (dBm) among 3 possible receivers */
static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp)
{
return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
{
u32 decrypt_out = 0;
if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
RX_RES_STATUS_STATION_FOUND)
decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
/* packet was not encrypted */
if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
RX_RES_STATUS_SEC_TYPE_NONE)
return decrypt_out;
/* packet was encrypted with unknown alg */
if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
RX_RES_STATUS_SEC_TYPE_ERR)
return decrypt_out;
/* decryption was not done in HW */
if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
RX_MPDU_RES_STATUS_DEC_DONE_MSK)
return decrypt_out;
switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
case RX_RES_STATUS_SEC_TYPE_CCMP:
/* alg is CCM: check MIC only */
if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
/* Bad MIC */
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
else
decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
break;
case RX_RES_STATUS_SEC_TYPE_TKIP:
if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
/* Bad TTAK */
decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
break;
}
/* fall through if TTAK OK */
default:
if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
else
decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
break;
}
IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
decrypt_in, decrypt_out);
return decrypt_out;
}
static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u16 len,
u32 ampdu_status,
struct iwl_rx_mem_buffer *rxb,
struct ieee80211_rx_status *stats)
{
struct sk_buff *skb;
__le16 fc = hdr->frame_control;
/* We only process data packets if the interface is open */
if (unlikely(!priv->is_open)) {
IWL_DEBUG_DROP_LIMIT(priv,
"Dropping packet while interface is not open.\n");
return;
}
/* In case of HW accelerated crypto and bad decryption, drop */
if (!priv->cfg->mod_params->sw_crypto &&
iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
return;
skb = dev_alloc_skb(128);
if (!skb) {
IWL_ERR(priv, "dev_alloc_skb failed\n");
return;
}
skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
iwl_update_stats(priv, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(priv->hw, skb);
priv->alloc_rxb_page--;
rxb->page = NULL;
}
/* Called for REPLY_RX (legacy ABG frames), or
* REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
void iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct ieee80211_hdr *header;
struct ieee80211_rx_status rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_phy_res *phy_res;
__le32 rx_pkt_status;
struct iwl_rx_mpdu_res_start *amsdu;
u32 len;
u32 ampdu_status;
u32 rate_n_flags;
/**
* REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
* REPLY_RX: physical layer info is in this buffer
* REPLY_RX_MPDU_CMD: physical layer info was sent in separate
* command and cached in priv->last_phy_res
*
* Here we set up local variables depending on which command is
* received.
*/
if (pkt->hdr.cmd == REPLY_RX) {
phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+ phy_res->cfg_phy_cnt);
len = le16_to_cpu(phy_res->byte_count);
rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
phy_res->cfg_phy_cnt + len);
ampdu_status = le32_to_cpu(rx_pkt_status);
} else {
if (!priv->_agn.last_phy_res_valid) {
IWL_ERR(priv, "MPDU frame without cached PHY data\n");
return;
}
phy_res = &priv->_agn.last_phy_res;
amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
len = le16_to_cpu(amsdu->byte_count);
rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
ampdu_status = iwlagn_translate_rx_status(priv,
le32_to_cpu(rx_pkt_status));
}
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
phy_res->cfg_phy_cnt);
return;
}
if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
le32_to_cpu(rx_pkt_status));
return;
}
/* This will be used in several places later */
rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
/* rx_status carries information about the packet to mac80211 */
rx_status.mactime = le64_to_cpu(phy_res->timestamp);
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
rx_status.rate_idx =
iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
rx_status.flag = 0;
/* TSF isn't reliable. In order to allow smooth user experience,
* this W/A doesn't propagate it to the mac80211 */
/*rx_status.flag |= RX_FLAG_TSFT;*/
priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
/* Find max signal strength (dBm) among 3 antenna/receiver chains */
rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
iwl_dbg_log_rx_data_frame(priv, len, header);
IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
rx_status.signal, (unsigned long long)rx_status.mactime);
/*
* "antenna number"
*
* It seems that the antenna field in the phy flags value
* is actually a bit field. This is undefined by radiotap,
* it wants an actual antenna number but I always get "7"
* for most legacy frames I receive indicating that the
* same frame was received on all three RX chains.
*
* I think this field should be removed in favor of a
* new 802.11n radiotap field "RX chains" that is defined
* as a bitmask.
*/
rx_status.antenna =
(le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
>> RX_RES_PHY_FLAGS_ANTENNA_POS;
/* set the preamble flag if appropriate */
if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
rx_status.flag |= RX_FLAG_SHORTPRE;
/* Set up the HT phy flags */
if (rate_n_flags & RATE_MCS_HT_MSK)
rx_status.flag |= RX_FLAG_HT;
if (rate_n_flags & RATE_MCS_HT40_MSK)
rx_status.flag |= RX_FLAG_40MHZ;
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.flag |= RX_FLAG_SHORT_GI;
iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
rxb, &rx_status);
}
/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
* This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
priv->_agn.last_phy_res_valid = true;
memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
sizeof(struct iwl_rx_phy_res));
}
static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
struct ieee80211_vif *vif,
enum ieee80211_band band,
struct iwl_scan_channel *scan_ch)
{
const struct ieee80211_supported_band *sband;
u16 passive_dwell = 0;
u16 active_dwell = 0;
int added = 0;
u16 channel = 0;
sband = iwl_get_hw_mode(priv, band);
if (!sband) {
IWL_ERR(priv, "invalid band\n");
return added;
}
active_dwell = iwl_get_active_dwell_time(priv, band, 0);
passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
if (passive_dwell <= active_dwell)
passive_dwell = active_dwell + 1;
channel = iwl_get_single_channel_number(priv, band);
if (channel) {
scan_ch->channel = cpu_to_le16(channel);
scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
scan_ch->active_dwell = cpu_to_le16(active_dwell);
scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
/* Set txpower levels to defaults */
scan_ch->dsp_atten = 110;
if (band == IEEE80211_BAND_5GHZ)
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
else
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
added++;
} else
IWL_ERR(priv, "no valid channel found\n");
return added;
}
static int iwl_get_channels_for_scan(struct iwl_priv *priv,
struct ieee80211_vif *vif,
enum ieee80211_band band,
u8 is_active, u8 n_probes,
struct iwl_scan_channel *scan_ch)
{
struct ieee80211_channel *chan;
const struct ieee80211_supported_band *sband;
const struct iwl_channel_info *ch_info;
u16 passive_dwell = 0;
u16 active_dwell = 0;
int added, i;
u16 channel;
sband = iwl_get_hw_mode(priv, band);
if (!sband)
return 0;
active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
if (passive_dwell <= active_dwell)
passive_dwell = active_dwell + 1;
for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
chan = priv->scan_request->channels[i];
if (chan->band != band)
continue;
channel = ieee80211_frequency_to_channel(chan->center_freq);
scan_ch->channel = cpu_to_le16(channel);
ch_info = iwl_get_channel_info(priv, band, channel);
if (!is_channel_valid(ch_info)) {
IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
channel);
continue;
}
if (!is_active || is_channel_passive(ch_info) ||
(chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
else
scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
if (n_probes)
scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
scan_ch->active_dwell = cpu_to_le16(active_dwell);
scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
/* Set txpower levels to defaults */
scan_ch->dsp_atten = 110;
/* NOTE: if we were doing 6Mb OFDM for scans we'd use
* power level:
* scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
*/
if (band == IEEE80211_BAND_5GHZ)
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
else
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
channel, le32_to_cpu(scan_ch->type),
(scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
"ACTIVE" : "PASSIVE",
(scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
active_dwell : passive_dwell);
scan_ch++;
added++;
}
IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
return added;
}
void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
struct iwl_host_cmd cmd = {
.id = REPLY_SCAN_CMD,
.len = sizeof(struct iwl_scan_cmd),
.flags = CMD_SIZE_HUGE,
};
struct iwl_scan_cmd *scan;
struct ieee80211_conf *conf = NULL;
u32 rate_flags = 0;
u16 cmd_len;
u16 rx_chain = 0;
enum ieee80211_band band;
u8 n_probes = 0;
u8 rx_ant = priv->hw_params.valid_rx_ant;
u8 rate;
bool is_active = false;
int chan_mod;
u8 active_chains;
u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
conf = ieee80211_get_hw_conf(priv->hw);
cancel_delayed_work(&priv->scan_check);
if (!iwl_is_ready(priv)) {
IWL_WARN(priv, "request scan called when driver not ready.\n");
goto done;
}
/* Make sure the scan wasn't canceled before this queued work
* was given the chance to run... */
if (!test_bit(STATUS_SCANNING, &priv->status))
goto done;
/* This should never be called or scheduled if there is currently
* a scan active in the hardware. */
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
"Ignoring second request.\n");
goto done;
}
if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
goto done;
}
if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
goto done;
}
if (iwl_is_rfkill(priv)) {
IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
goto done;
}
if (!test_bit(STATUS_READY, &priv->status)) {
IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n");
goto done;
}
if (!priv->scan_cmd) {
priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
IWL_MAX_SCAN_SIZE, GFP_KERNEL);
if (!priv->scan_cmd) {
IWL_DEBUG_SCAN(priv,
"fail to allocate memory for scan\n");
goto done;
}
}
scan = priv->scan_cmd;
memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
if (iwl_is_associated(priv)) {
u16 interval = 0;
u32 extra;
u32 suspend_time = 100;
u32 scan_suspend_time = 100;
unsigned long flags;
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
spin_lock_irqsave(&priv->lock, flags);
interval = vif ? vif->bss_conf.beacon_int : 0;
spin_unlock_irqrestore(&priv->lock, flags);
scan->suspend_time = 0;
scan->max_out_time = cpu_to_le32(200 * 1024);
if (!interval)
interval = suspend_time;
extra = (suspend_time / interval) << 22;
scan_suspend_time = (extra |
((suspend_time % interval) * 1024));
scan->suspend_time = cpu_to_le32(scan_suspend_time);
IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
scan_suspend_time, interval);
}
if (priv->is_internal_short_scan) {
IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
} else if (priv->scan_request->n_ssids) {
int i, p = 0;
IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
for (i = 0; i < priv->scan_request->n_ssids; i++) {
/* always does wildcard anyway */
if (!priv->scan_request->ssids[i].ssid_len)
continue;
scan->direct_scan[p].id = WLAN_EID_SSID;
scan->direct_scan[p].len =
priv->scan_request->ssids[i].ssid_len;
memcpy(scan->direct_scan[p].ssid,
priv->scan_request->ssids[i].ssid,
priv->scan_request->ssids[i].ssid_len);
n_probes++;
p++;
}
is_active = true;
} else
IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
switch (priv->scan_band) {
case IEEE80211_BAND_2GHZ:
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK)
>> RXON_FLG_CHANNEL_MODE_POS;
if (chan_mod == CHANNEL_MODE_PURE_40) {
rate = IWL_RATE_6M_PLCP;
} else {
rate = IWL_RATE_1M_PLCP;
rate_flags = RATE_MCS_CCK_MSK;
}
scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED;
break;
case IEEE80211_BAND_5GHZ:
rate = IWL_RATE_6M_PLCP;
/*
* If active scanning is requested but a certain channel is
* marked passive, we can do active scanning if we detect
* transmissions.
*
* There is an issue with some firmware versions that triggers
* a sysassert on a "good CRC threshold" of zero (== disabled),
* on a radar channel even though this means that we should NOT
* send probes.
*
* The "good CRC threshold" is the number of frames that we
* need to receive during our dwell time on a channel before
* sending out probes -- setting this to a huge value will
* mean we never reach it, but at the same time work around
* the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
* here instead of IWL_GOOD_CRC_TH_DISABLED.
*/
scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
IWL_GOOD_CRC_TH_NEVER;
break;
default:
IWL_WARN(priv, "Invalid scan band count\n");
goto done;
}
band = priv->scan_band;
if (priv->cfg->scan_rx_antennas[band])
rx_ant = priv->cfg->scan_rx_antennas[band];
if (priv->cfg->scan_tx_antennas[band])
scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
scan_tx_antennas);
rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
/* In power save mode use one chain, otherwise use all chains */
if (test_bit(STATUS_POWER_PMI, &priv->status)) {
/* rx_ant has been set to all valid chains previously */
active_chains = rx_ant &
((u8)(priv->chain_noise_data.active_chains));
if (!active_chains)
active_chains = rx_ant;
IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
priv->chain_noise_data.active_chains);
rx_ant = first_antenna(active_chains);
}
/* MIMO is not used here, but value is required */
rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
scan->rx_chain = cpu_to_le16(rx_chain);
if (!priv->is_internal_short_scan) {
cmd_len = iwl_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
vif->addr,
priv->scan_request->ie,
priv->scan_request->ie_len,
IWL_MAX_SCAN_SIZE - sizeof(*scan));
} else {
/* use bcast addr, will not be transmitted but must be valid */
cmd_len = iwl_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
iwl_bcast_addr, NULL, 0,
IWL_MAX_SCAN_SIZE - sizeof(*scan));
}
scan->tx_cmd.len = cpu_to_le16(cmd_len);
scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
RXON_FILTER_BCON_AWARE_MSK);
if (priv->is_internal_short_scan) {
scan->channel_count =
iwl_get_single_channel_for_scan(priv, vif, band,
(void *)&scan->data[le16_to_cpu(
scan->tx_cmd.len)]);
} else {
scan->channel_count =
iwl_get_channels_for_scan(priv, vif, band,
is_active, n_probes,
(void *)&scan->data[le16_to_cpu(
scan->tx_cmd.len)]);
}
if (scan->channel_count == 0) {
IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
goto done;
}
cmd.len += le16_to_cpu(scan->tx_cmd.len) +
scan->channel_count * sizeof(struct iwl_scan_channel);
cmd.data = scan;
scan->len = cpu_to_le16(cmd.len);
set_bit(STATUS_SCAN_HW, &priv->status);
if (iwl_send_cmd_sync(priv, &cmd))
goto done;
queue_delayed_work(priv->workqueue, &priv->scan_check,
IWL_SCAN_CHECK_WATCHDOG);
return;
done:
/* Cannot perform scan. Make sure we clear scanning
* bits from status so next scan request can be performed.
* If we don't clear scanning status bit here all next scan
* will fail
*/
clear_bit(STATUS_SCAN_HW, &priv->status);
clear_bit(STATUS_SCANNING, &priv->status);
/* inform mac80211 scan aborted */
queue_work(priv->workqueue, &priv->scan_completed);
}
int iwlagn_manage_ibss_station(struct iwl_priv *priv,
struct ieee80211_vif *vif, bool add)
{
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
if (add)
return iwl_add_bssid_station(priv, vif->bss_conf.bssid, true,
&vif_priv->ibss_bssid_sta_id);
return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
vif->bss_conf.bssid);
}
void iwl_free_tfds_in_queue(struct iwl_priv *priv,
int sta_id, int tid, int freed)
{
WARN_ON(!spin_is_locked(&priv->sta_lock));
if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
else {
IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
priv->stations[sta_id].tid[tid].tfds_in_queue,
freed);
priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
}
}