iwlwifi: move PCIe into subdirectory

Structure the code a bit more and move all PCIe code
including the hardware configuration files into a
PCIe specific subdirectory.

Reviewed-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
diff --git a/drivers/net/wireless/iwlwifi/pcie/1000.c b/drivers/net/wireless/iwlwifi/pcie/1000.c
new file mode 100644
index 0000000..0cfbf9f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/1000.c
@@ -0,0 +1,126 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-csr.h"
+#include "iwl-agn-hw.h"
+#include "cfg.h"
+
+/* Highest firmware API version supported */
+#define IWL1000_UCODE_API_MAX 5
+#define IWL100_UCODE_API_MAX 5
+
+/* Oldest version we won't warn about */
+#define IWL1000_UCODE_API_OK 5
+#define IWL100_UCODE_API_OK 5
+
+/* Lowest firmware API version supported */
+#define IWL1000_UCODE_API_MIN 1
+#define IWL100_UCODE_API_MIN 5
+
+/* EEPROM version */
+#define EEPROM_1000_TX_POWER_VERSION	(4)
+#define EEPROM_1000_EEPROM_VERSION	(0x15C)
+
+#define IWL1000_FW_PRE "iwlwifi-1000-"
+#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode"
+
+#define IWL100_FW_PRE "iwlwifi-100-"
+#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode"
+
+
+static const struct iwl_base_params iwl1000_base_params = {
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.max_ll_items = OTP_MAX_LL_ITEMS_1000,
+	.shadow_ram_support = false,
+	.led_compensation = 51,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.wd_timeout = IWL_WATCHHDOG_DISABLED,
+	.max_event_log_size = 128,
+};
+
+static const struct iwl_ht_params iwl1000_ht_params = {
+	.ht_greenfield_support = true,
+	.use_rts_for_aggregation = true, /* use rts/cts protection */
+};
+
+#define IWL_DEVICE_1000						\
+	.fw_name_pre = IWL1000_FW_PRE,				\
+	.ucode_api_max = IWL1000_UCODE_API_MAX,			\
+	.ucode_api_ok = IWL1000_UCODE_API_OK,			\
+	.ucode_api_min = IWL1000_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_1000,		\
+	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
+	.max_data_size = IWLAGN_RTC_DATA_SIZE,			\
+	.eeprom_ver = EEPROM_1000_EEPROM_VERSION,		\
+	.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,	\
+	.base_params = &iwl1000_base_params,			\
+	.led_mode = IWL_LED_BLINK
+
+const struct iwl_cfg iwl1000_bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
+	IWL_DEVICE_1000,
+	.ht_params = &iwl1000_ht_params,
+};
+
+const struct iwl_cfg iwl1000_bg_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
+	IWL_DEVICE_1000,
+};
+
+#define IWL_DEVICE_100						\
+	.fw_name_pre = IWL100_FW_PRE,				\
+	.ucode_api_max = IWL100_UCODE_API_MAX,			\
+	.ucode_api_ok = IWL100_UCODE_API_OK,			\
+	.ucode_api_min = IWL100_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_100,			\
+	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
+	.max_data_size = IWLAGN_RTC_DATA_SIZE,			\
+	.eeprom_ver = EEPROM_1000_EEPROM_VERSION,		\
+	.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,	\
+	.base_params = &iwl1000_base_params,			\
+	.led_mode = IWL_LED_RF_STATE,				\
+	.rx_with_siso_diversity = true
+
+const struct iwl_cfg iwl100_bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
+	IWL_DEVICE_100,
+	.ht_params = &iwl1000_ht_params,
+};
+
+const struct iwl_cfg iwl100_bg_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 100 BG",
+	IWL_DEVICE_100,
+};
+
+MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK));
+MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/2000.c b/drivers/net/wireless/iwlwifi/pcie/2000.c
new file mode 100644
index 0000000..f6efa12
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/2000.c
@@ -0,0 +1,225 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+#include "cfg.h"
+#include "iwl-commands.h" /* needed for BT for now */
+
+/* Highest firmware API version supported */
+#define IWL2030_UCODE_API_MAX 6
+#define IWL2000_UCODE_API_MAX 6
+#define IWL105_UCODE_API_MAX 6
+#define IWL135_UCODE_API_MAX 6
+
+/* Oldest version we won't warn about */
+#define IWL2030_UCODE_API_OK 6
+#define IWL2000_UCODE_API_OK 6
+#define IWL105_UCODE_API_OK 6
+#define IWL135_UCODE_API_OK 6
+
+/* Lowest firmware API version supported */
+#define IWL2030_UCODE_API_MIN 5
+#define IWL2000_UCODE_API_MIN 5
+#define IWL105_UCODE_API_MIN 5
+#define IWL135_UCODE_API_MIN 5
+
+/* EEPROM version */
+#define EEPROM_2000_TX_POWER_VERSION	(6)
+#define EEPROM_2000_EEPROM_VERSION	(0x805)
+
+
+#define IWL2030_FW_PRE "iwlwifi-2030-"
+#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode"
+
+#define IWL2000_FW_PRE "iwlwifi-2000-"
+#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE __stringify(api) ".ucode"
+
+#define IWL105_FW_PRE "iwlwifi-105-"
+#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode"
+
+#define IWL135_FW_PRE "iwlwifi-135-"
+#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode"
+
+static const struct iwl_base_params iwl2000_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.pll_cfg_val = 0,
+	.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
+	.shadow_ram_support = true,
+	.led_compensation = 51,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.wd_timeout = IWL_DEF_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+	.hd_v2 = true,
+};
+
+
+static const struct iwl_base_params iwl2030_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.pll_cfg_val = 0,
+	.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
+	.shadow_ram_support = true,
+	.led_compensation = 57,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.wd_timeout = IWL_LONG_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+	.hd_v2 = true,
+};
+
+static const struct iwl_ht_params iwl2000_ht_params = {
+	.ht_greenfield_support = true,
+	.use_rts_for_aggregation = true, /* use rts/cts protection */
+};
+
+static const struct iwl_bt_params iwl2030_bt_params = {
+	/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
+	.advanced_bt_coexist = true,
+	.agg_time_limit = BT_AGG_THRESHOLD_DEF,
+	.bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
+	.bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
+	.bt_sco_disable = true,
+	.bt_session_2 = true,
+};
+
+#define IWL_DEVICE_2000						\
+	.fw_name_pre = IWL2000_FW_PRE,				\
+	.ucode_api_max = IWL2000_UCODE_API_MAX,			\
+	.ucode_api_ok = IWL2000_UCODE_API_OK,			\
+	.ucode_api_min = IWL2000_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_2000,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.eeprom_ver = EEPROM_2000_EEPROM_VERSION,		\
+	.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,	\
+	.base_params = &iwl2000_base_params,			\
+	.need_temp_offset_calib = true,				\
+	.temp_offset_v2 = true,					\
+	.led_mode = IWL_LED_RF_STATE
+
+const struct iwl_cfg iwl2000_2bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
+	IWL_DEVICE_2000,
+	.ht_params = &iwl2000_ht_params,
+};
+
+const struct iwl_cfg iwl2000_2bgn_d_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 2200D BGN",
+	IWL_DEVICE_2000,
+	.ht_params = &iwl2000_ht_params,
+};
+
+#define IWL_DEVICE_2030						\
+	.fw_name_pre = IWL2030_FW_PRE,				\
+	.ucode_api_max = IWL2030_UCODE_API_MAX,			\
+	.ucode_api_ok = IWL2030_UCODE_API_OK,			\
+	.ucode_api_min = IWL2030_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_2030,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.eeprom_ver = EEPROM_2000_EEPROM_VERSION,		\
+	.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,	\
+	.base_params = &iwl2030_base_params,			\
+	.bt_params = &iwl2030_bt_params,			\
+	.need_temp_offset_calib = true,				\
+	.temp_offset_v2 = true,					\
+	.led_mode = IWL_LED_RF_STATE,				\
+	.adv_pm = true
+
+const struct iwl_cfg iwl2030_2bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
+	IWL_DEVICE_2030,
+	.ht_params = &iwl2000_ht_params,
+};
+
+#define IWL_DEVICE_105						\
+	.fw_name_pre = IWL105_FW_PRE,				\
+	.ucode_api_max = IWL105_UCODE_API_MAX,			\
+	.ucode_api_ok = IWL105_UCODE_API_OK,			\
+	.ucode_api_min = IWL105_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_105,			\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.eeprom_ver = EEPROM_2000_EEPROM_VERSION,		\
+	.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,	\
+	.base_params = &iwl2000_base_params,			\
+	.need_temp_offset_calib = true,				\
+	.temp_offset_v2 = true,					\
+	.led_mode = IWL_LED_RF_STATE,				\
+	.adv_pm = true,						\
+	.rx_with_siso_diversity = true
+
+const struct iwl_cfg iwl105_bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
+	IWL_DEVICE_105,
+	.ht_params = &iwl2000_ht_params,
+};
+
+const struct iwl_cfg iwl105_bgn_d_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 105D BGN",
+	IWL_DEVICE_105,
+	.ht_params = &iwl2000_ht_params,
+};
+
+#define IWL_DEVICE_135						\
+	.fw_name_pre = IWL135_FW_PRE,				\
+	.ucode_api_max = IWL135_UCODE_API_MAX,			\
+	.ucode_api_ok = IWL135_UCODE_API_OK,			\
+	.ucode_api_min = IWL135_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_135,			\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.eeprom_ver = EEPROM_2000_EEPROM_VERSION,		\
+	.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,	\
+	.base_params = &iwl2030_base_params,			\
+	.bt_params = &iwl2030_bt_params,			\
+	.need_temp_offset_calib = true,				\
+	.temp_offset_v2 = true,					\
+	.led_mode = IWL_LED_RF_STATE,				\
+	.adv_pm = true,						\
+	.rx_with_siso_diversity = true
+
+const struct iwl_cfg iwl135_bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
+	IWL_DEVICE_135,
+	.ht_params = &iwl2000_ht_params,
+};
+
+MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK));
+MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK));
+MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK));
+MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/5000.c b/drivers/net/wireless/iwlwifi/pcie/5000.c
new file mode 100644
index 0000000..92e502e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/5000.c
@@ -0,0 +1,164 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+#include "iwl-csr.h"
+#include "cfg.h"
+
+/* Highest firmware API version supported */
+#define IWL5000_UCODE_API_MAX 5
+#define IWL5150_UCODE_API_MAX 2
+
+/* Oldest version we won't warn about */
+#define IWL5000_UCODE_API_OK 5
+#define IWL5150_UCODE_API_OK 2
+
+/* Lowest firmware API version supported */
+#define IWL5000_UCODE_API_MIN 1
+#define IWL5150_UCODE_API_MIN 1
+
+/* EEPROM versions */
+#define EEPROM_5000_TX_POWER_VERSION	(4)
+#define EEPROM_5000_EEPROM_VERSION	(0x11A)
+#define EEPROM_5050_TX_POWER_VERSION	(4)
+#define EEPROM_5050_EEPROM_VERSION	(0x21E)
+
+#define IWL5000_FW_PRE "iwlwifi-5000-"
+#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode"
+
+#define IWL5150_FW_PRE "iwlwifi-5150-"
+#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode"
+
+static const struct iwl_base_params iwl5000_base_params = {
+	.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.led_compensation = 51,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.wd_timeout = IWL_WATCHHDOG_DISABLED,
+	.max_event_log_size = 512,
+	.no_idle_support = true,
+};
+
+static const struct iwl_ht_params iwl5000_ht_params = {
+	.ht_greenfield_support = true,
+};
+
+#define IWL_DEVICE_5000						\
+	.fw_name_pre = IWL5000_FW_PRE,				\
+	.ucode_api_max = IWL5000_UCODE_API_MAX,			\
+	.ucode_api_ok = IWL5000_UCODE_API_OK,			\
+	.ucode_api_min = IWL5000_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_5000,		\
+	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
+	.max_data_size = IWLAGN_RTC_DATA_SIZE,			\
+	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,		\
+	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,	\
+	.base_params = &iwl5000_base_params,			\
+	.led_mode = IWL_LED_BLINK
+
+const struct iwl_cfg iwl5300_agn_cfg = {
+	.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
+	IWL_DEVICE_5000,
+	/* at least EEPROM 0x11A has wrong info */
+	.valid_tx_ant = ANT_ABC,	/* .cfg overwrite */
+	.valid_rx_ant = ANT_ABC,	/* .cfg overwrite */
+	.ht_params = &iwl5000_ht_params,
+};
+
+const struct iwl_cfg iwl5100_bgn_cfg = {
+	.name = "Intel(R) WiFi Link 5100 BGN",
+	IWL_DEVICE_5000,
+	.valid_tx_ant = ANT_B,		/* .cfg overwrite */
+	.valid_rx_ant = ANT_AB,		/* .cfg overwrite */
+	.ht_params = &iwl5000_ht_params,
+};
+
+const struct iwl_cfg iwl5100_abg_cfg = {
+	.name = "Intel(R) WiFi Link 5100 ABG",
+	IWL_DEVICE_5000,
+	.valid_tx_ant = ANT_B,		/* .cfg overwrite */
+	.valid_rx_ant = ANT_AB,		/* .cfg overwrite */
+};
+
+const struct iwl_cfg iwl5100_agn_cfg = {
+	.name = "Intel(R) WiFi Link 5100 AGN",
+	IWL_DEVICE_5000,
+	.valid_tx_ant = ANT_B,		/* .cfg overwrite */
+	.valid_rx_ant = ANT_AB,		/* .cfg overwrite */
+	.ht_params = &iwl5000_ht_params,
+};
+
+const struct iwl_cfg iwl5350_agn_cfg = {
+	.name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
+	.fw_name_pre = IWL5000_FW_PRE,
+	.ucode_api_max = IWL5000_UCODE_API_MAX,
+	.ucode_api_ok = IWL5000_UCODE_API_OK,
+	.ucode_api_min = IWL5000_UCODE_API_MIN,
+	.device_family = IWL_DEVICE_FAMILY_5000,
+	.max_inst_size = IWLAGN_RTC_INST_SIZE,
+	.max_data_size = IWLAGN_RTC_DATA_SIZE,
+	.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
+	.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
+	.base_params = &iwl5000_base_params,
+	.ht_params = &iwl5000_ht_params,
+	.led_mode = IWL_LED_BLINK,
+	.internal_wimax_coex = true,
+};
+
+#define IWL_DEVICE_5150						\
+	.fw_name_pre = IWL5150_FW_PRE,				\
+	.ucode_api_max = IWL5150_UCODE_API_MAX,			\
+	.ucode_api_ok = IWL5150_UCODE_API_OK,			\
+	.ucode_api_min = IWL5150_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_5150,		\
+	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
+	.max_data_size = IWLAGN_RTC_DATA_SIZE,			\
+	.eeprom_ver = EEPROM_5050_EEPROM_VERSION,		\
+	.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,	\
+	.base_params = &iwl5000_base_params,			\
+	.no_xtal_calib = true,					\
+	.led_mode = IWL_LED_BLINK,				\
+	.internal_wimax_coex = true
+
+const struct iwl_cfg iwl5150_agn_cfg = {
+	.name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
+	IWL_DEVICE_5150,
+	.ht_params = &iwl5000_ht_params,
+
+};
+
+const struct iwl_cfg iwl5150_abg_cfg = {
+	.name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
+	IWL_DEVICE_5150,
+};
+
+MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK));
+MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c
new file mode 100644
index 0000000..7360cd9
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/6000.c
@@ -0,0 +1,363 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+#include "cfg.h"
+#include "iwl-commands.h" /* needed for BT for now */
+
+/* Highest firmware API version supported */
+#define IWL6000_UCODE_API_MAX 6
+#define IWL6050_UCODE_API_MAX 5
+#define IWL6000G2_UCODE_API_MAX 6
+
+/* Oldest version we won't warn about */
+#define IWL6000_UCODE_API_OK 4
+#define IWL6000G2_UCODE_API_OK 5
+#define IWL6050_UCODE_API_OK 5
+#define IWL6000G2B_UCODE_API_OK 6
+
+/* Lowest firmware API version supported */
+#define IWL6000_UCODE_API_MIN 4
+#define IWL6050_UCODE_API_MIN 4
+#define IWL6000G2_UCODE_API_MIN 4
+
+/* EEPROM versions */
+#define EEPROM_6000_TX_POWER_VERSION	(4)
+#define EEPROM_6000_EEPROM_VERSION	(0x423)
+#define EEPROM_6050_TX_POWER_VERSION	(4)
+#define EEPROM_6050_EEPROM_VERSION	(0x532)
+#define EEPROM_6150_TX_POWER_VERSION	(6)
+#define EEPROM_6150_EEPROM_VERSION	(0x553)
+#define EEPROM_6005_TX_POWER_VERSION	(6)
+#define EEPROM_6005_EEPROM_VERSION	(0x709)
+#define EEPROM_6030_TX_POWER_VERSION	(6)
+#define EEPROM_6030_EEPROM_VERSION	(0x709)
+#define EEPROM_6035_TX_POWER_VERSION	(6)
+#define EEPROM_6035_EEPROM_VERSION	(0x753)
+
+#define IWL6000_FW_PRE "iwlwifi-6000-"
+#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
+
+#define IWL6050_FW_PRE "iwlwifi-6050-"
+#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode"
+
+#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
+#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode"
+
+#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
+#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
+
+static const struct iwl_base_params iwl6000_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.pll_cfg_val = 0,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.shadow_ram_support = true,
+	.led_compensation = 51,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.wd_timeout = IWL_DEF_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+};
+
+static const struct iwl_base_params iwl6050_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.pll_cfg_val = 0,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
+	.shadow_ram_support = true,
+	.led_compensation = 51,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1500,
+	.wd_timeout = IWL_DEF_WD_TIMEOUT,
+	.max_event_log_size = 1024,
+	.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+};
+
+static const struct iwl_base_params iwl6000_g2_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.pll_cfg_val = 0,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.shadow_ram_support = true,
+	.led_compensation = 57,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.wd_timeout = IWL_LONG_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+};
+
+static const struct iwl_ht_params iwl6000_ht_params = {
+	.ht_greenfield_support = true,
+	.use_rts_for_aggregation = true, /* use rts/cts protection */
+};
+
+static const struct iwl_bt_params iwl6000_bt_params = {
+	/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
+	.advanced_bt_coexist = true,
+	.agg_time_limit = BT_AGG_THRESHOLD_DEF,
+	.bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
+	.bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
+	.bt_sco_disable = true,
+};
+
+#define IWL_DEVICE_6005						\
+	.fw_name_pre = IWL6005_FW_PRE,				\
+	.ucode_api_max = IWL6000G2_UCODE_API_MAX,		\
+	.ucode_api_ok = IWL6000G2_UCODE_API_OK,			\
+	.ucode_api_min = IWL6000G2_UCODE_API_MIN,		\
+	.device_family = IWL_DEVICE_FAMILY_6005,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.eeprom_ver = EEPROM_6005_EEPROM_VERSION,		\
+	.eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION,	\
+	.base_params = &iwl6000_g2_base_params,			\
+	.need_temp_offset_calib = true,				\
+	.led_mode = IWL_LED_RF_STATE
+
+const struct iwl_cfg iwl6005_2agn_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
+	IWL_DEVICE_6005,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6005_2abg_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6205 ABG",
+	IWL_DEVICE_6005,
+};
+
+const struct iwl_cfg iwl6005_2bg_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6205 BG",
+	IWL_DEVICE_6005,
+};
+
+const struct iwl_cfg iwl6005_2agn_sff_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
+	IWL_DEVICE_6005,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6005_2agn_d_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6205D AGN",
+	IWL_DEVICE_6005,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6005_2agn_mow1_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6206 AGN",
+	IWL_DEVICE_6005,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6207 AGN",
+	IWL_DEVICE_6005,
+	.ht_params = &iwl6000_ht_params,
+};
+
+#define IWL_DEVICE_6030						\
+	.fw_name_pre = IWL6030_FW_PRE,				\
+	.ucode_api_max = IWL6000G2_UCODE_API_MAX,		\
+	.ucode_api_ok = IWL6000G2B_UCODE_API_OK,		\
+	.ucode_api_min = IWL6000G2_UCODE_API_MIN,		\
+	.device_family = IWL_DEVICE_FAMILY_6030,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.eeprom_ver = EEPROM_6030_EEPROM_VERSION,		\
+	.eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION,	\
+	.base_params = &iwl6000_g2_base_params,			\
+	.bt_params = &iwl6000_bt_params,			\
+	.need_temp_offset_calib = true,				\
+	.led_mode = IWL_LED_RF_STATE,				\
+	.adv_pm = true						\
+
+const struct iwl_cfg iwl6030_2agn_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
+	IWL_DEVICE_6030,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6030_2abg_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6230 ABG",
+	IWL_DEVICE_6030,
+};
+
+const struct iwl_cfg iwl6030_2bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6230 BGN",
+	IWL_DEVICE_6030,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6030_2bg_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6230 BG",
+	IWL_DEVICE_6030,
+};
+
+const struct iwl_cfg iwl6035_2agn_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
+	IWL_DEVICE_6030,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl1030_bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
+	IWL_DEVICE_6030,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl1030_bg_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 1030 BG",
+	IWL_DEVICE_6030,
+};
+
+const struct iwl_cfg iwl130_bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 130 BGN",
+	IWL_DEVICE_6030,
+	.ht_params = &iwl6000_ht_params,
+	.rx_with_siso_diversity = true,
+};
+
+const struct iwl_cfg iwl130_bg_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 130 BG",
+	IWL_DEVICE_6030,
+	.rx_with_siso_diversity = true,
+};
+
+/*
+ * "i": Internal configuration, use internal Power Amplifier
+ */
+#define IWL_DEVICE_6000i					\
+	.fw_name_pre = IWL6000_FW_PRE,				\
+	.ucode_api_max = IWL6000_UCODE_API_MAX,			\
+	.ucode_api_ok = IWL6000_UCODE_API_OK,			\
+	.ucode_api_min = IWL6000_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_6000i,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.valid_tx_ant = ANT_BC,		/* .cfg overwrite */	\
+	.valid_rx_ant = ANT_BC,		/* .cfg overwrite */	\
+	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,		\
+	.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,	\
+	.base_params = &iwl6000_base_params,			\
+	.led_mode = IWL_LED_BLINK
+
+const struct iwl_cfg iwl6000i_2agn_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
+	IWL_DEVICE_6000i,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6000i_2abg_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
+	IWL_DEVICE_6000i,
+};
+
+const struct iwl_cfg iwl6000i_2bg_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
+	IWL_DEVICE_6000i,
+};
+
+#define IWL_DEVICE_6050						\
+	.fw_name_pre = IWL6050_FW_PRE,				\
+	.ucode_api_max = IWL6050_UCODE_API_MAX,			\
+	.ucode_api_min = IWL6050_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_6050,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.valid_tx_ant = ANT_AB,		/* .cfg overwrite */	\
+	.valid_rx_ant = ANT_AB,		/* .cfg overwrite */	\
+	.eeprom_ver = EEPROM_6050_EEPROM_VERSION,		\
+	.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,	\
+	.base_params = &iwl6050_base_params,			\
+	.led_mode = IWL_LED_BLINK,				\
+	.internal_wimax_coex = true
+
+const struct iwl_cfg iwl6050_2agn_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
+	IWL_DEVICE_6050,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6050_2abg_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
+	IWL_DEVICE_6050,
+};
+
+#define IWL_DEVICE_6150						\
+	.fw_name_pre = IWL6050_FW_PRE,				\
+	.ucode_api_max = IWL6050_UCODE_API_MAX,			\
+	.ucode_api_min = IWL6050_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_6150,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.eeprom_ver = EEPROM_6150_EEPROM_VERSION,		\
+	.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,	\
+	.base_params = &iwl6050_base_params,			\
+	.led_mode = IWL_LED_BLINK,				\
+	.internal_wimax_coex = true
+
+const struct iwl_cfg iwl6150_bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
+	IWL_DEVICE_6150,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6150_bg_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
+	IWL_DEVICE_6150,
+};
+
+const struct iwl_cfg iwl6000_3agn_cfg = {
+	.name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
+	.fw_name_pre = IWL6000_FW_PRE,
+	.ucode_api_max = IWL6000_UCODE_API_MAX,
+	.ucode_api_ok = IWL6000_UCODE_API_OK,
+	.ucode_api_min = IWL6000_UCODE_API_MIN,
+	.device_family = IWL_DEVICE_FAMILY_6000,
+	.max_inst_size = IWL60_RTC_INST_SIZE,
+	.max_data_size = IWL60_RTC_DATA_SIZE,
+	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
+	.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+	.base_params = &iwl6000_base_params,
+	.ht_params = &iwl6000_ht_params,
+	.led_mode = IWL_LED_BLINK,
+};
+
+MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
+MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
+MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/cfg.h b/drivers/net/wireless/iwlwifi/pcie/cfg.h
new file mode 100644
index 0000000..82152311
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/cfg.h
@@ -0,0 +1,113 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_pci_h__
+#define __iwl_pci_h__
+
+
+/*
+ * This file declares the config structures for all devices.
+ */
+
+extern const struct iwl_cfg iwl5300_agn_cfg;
+extern const struct iwl_cfg iwl5100_agn_cfg;
+extern const struct iwl_cfg iwl5350_agn_cfg;
+extern const struct iwl_cfg iwl5100_bgn_cfg;
+extern const struct iwl_cfg iwl5100_abg_cfg;
+extern const struct iwl_cfg iwl5150_agn_cfg;
+extern const struct iwl_cfg iwl5150_abg_cfg;
+extern const struct iwl_cfg iwl6005_2agn_cfg;
+extern const struct iwl_cfg iwl6005_2abg_cfg;
+extern const struct iwl_cfg iwl6005_2bg_cfg;
+extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
+extern const struct iwl_cfg iwl6005_2agn_d_cfg;
+extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
+extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
+extern const struct iwl_cfg iwl1030_bgn_cfg;
+extern const struct iwl_cfg iwl1030_bg_cfg;
+extern const struct iwl_cfg iwl6030_2agn_cfg;
+extern const struct iwl_cfg iwl6030_2abg_cfg;
+extern const struct iwl_cfg iwl6030_2bgn_cfg;
+extern const struct iwl_cfg iwl6030_2bg_cfg;
+extern const struct iwl_cfg iwl6000i_2agn_cfg;
+extern const struct iwl_cfg iwl6000i_2abg_cfg;
+extern const struct iwl_cfg iwl6000i_2bg_cfg;
+extern const struct iwl_cfg iwl6000_3agn_cfg;
+extern const struct iwl_cfg iwl6050_2agn_cfg;
+extern const struct iwl_cfg iwl6050_2abg_cfg;
+extern const struct iwl_cfg iwl6150_bgn_cfg;
+extern const struct iwl_cfg iwl6150_bg_cfg;
+extern const struct iwl_cfg iwl1000_bgn_cfg;
+extern const struct iwl_cfg iwl1000_bg_cfg;
+extern const struct iwl_cfg iwl100_bgn_cfg;
+extern const struct iwl_cfg iwl100_bg_cfg;
+extern const struct iwl_cfg iwl130_bgn_cfg;
+extern const struct iwl_cfg iwl130_bg_cfg;
+extern const struct iwl_cfg iwl2000_2bgn_cfg;
+extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
+extern const struct iwl_cfg iwl2030_2bgn_cfg;
+extern const struct iwl_cfg iwl6035_2agn_cfg;
+extern const struct iwl_cfg iwl105_bgn_cfg;
+extern const struct iwl_cfg iwl105_bgn_d_cfg;
+extern const struct iwl_cfg iwl135_bgn_cfg;
+
+#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
new file mode 100644
index 0000000..f4c3500
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -0,0 +1,380 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+
+#include "iwl-trans.h"
+#include "iwl-drv.h"
+#include "iwl-trans.h"
+
+#include "cfg.h"
+#include "internal.h"
+
+#define IWL_PCI_DEVICE(dev, subdev, cfg) \
+	.vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
+	.subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+	.driver_data = (kernel_ulong_t)&(cfg)
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+	{IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */
+
+/* 5300 Series WiFi */
+	{IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */
+
+/* 5350 Series WiFi/WiMax */
+	{IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */
+
+/* 5150 Series Wifi/WiMax */
+	{IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
+
+	{IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */
+
+/* 6x00 Series */
+	{IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
+	{IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
+	{IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
+	{IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
+	{IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
+
+/* 6x05 Series */
+	{IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
+	{IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
+	{IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
+
+/* 6x30 Series */
+	{IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)},
+	{IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)},
+	{IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)},
+
+/* 6x50 WiFi/WiMax Series */
+	{IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
+
+/* 6150 WiFi/WiMax Series */
+	{IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
+
+/* 1000 Series WiFi */
+	{IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)},
+
+/* 100 Series WiFi */
+	{IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
+	{IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
+	{IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
+
+/* 130 Series WiFi */
+	{IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
+
+/* 2x00 Series */
+	{IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
+
+/* 2x30 Series */
+	{IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
+
+/* 6x35 Series */
+	{IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
+
+/* 105 Series */
+	{IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
+
+/* 135 Series */
+	{IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
+
+	{0}
+};
+MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT	0x041
+
+#ifndef CONFIG_IWLWIFI_IDI
+
+static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+	struct iwl_trans *iwl_trans;
+	struct iwl_trans_pcie *trans_pcie;
+
+	iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
+	if (iwl_trans == NULL)
+		return -ENOMEM;
+
+	pci_set_drvdata(pdev, iwl_trans);
+
+	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
+	trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
+	if (!trans_pcie->drv)
+		goto out_free_trans;
+
+	return 0;
+
+out_free_trans:
+	iwl_trans_pcie_free(iwl_trans);
+	pci_set_drvdata(pdev, NULL);
+	return -EFAULT;
+}
+
+static void __devexit iwl_pci_remove(struct pci_dev *pdev)
+{
+	struct iwl_trans *trans = pci_get_drvdata(pdev);
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	iwl_drv_stop(trans_pcie->drv);
+	iwl_trans_pcie_free(trans);
+
+	pci_set_drvdata(pdev, NULL);
+}
+
+#endif /* CONFIG_IWLWIFI_IDI */
+
+#ifdef CONFIG_PM_SLEEP
+
+static int iwl_pci_suspend(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
+
+	/* Before you put code here, think about WoWLAN. You cannot check here
+	 * whether WoWLAN is enabled or not, and your code will run even if
+	 * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
+	 */
+
+	return iwl_trans_suspend(iwl_trans);
+}
+
+static int iwl_pci_resume(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
+
+	/* Before you put code here, think about WoWLAN. You cannot check here
+	 * whether WoWLAN is enabled or not, and your code will run even if
+	 * WoWLAN is enabled - the NIC may be alive.
+	 */
+
+	/*
+	 * We disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state.
+	 */
+	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+	return iwl_trans_resume(iwl_trans);
+}
+
+static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
+
+#define IWL_PM_OPS	(&iwl_dev_pm_ops)
+
+#else
+
+#define IWL_PM_OPS	NULL
+
+#endif
+
+#ifdef CONFIG_IWLWIFI_IDI
+/*
+ * Defined externally in iwl-idi.c
+ */
+int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+void __devexit iwl_pci_remove(struct pci_dev *pdev);
+
+#endif /* CONFIG_IWLWIFI_IDI */
+
+static struct pci_driver iwl_pci_driver = {
+	.name = DRV_NAME,
+	.id_table = iwl_hw_card_ids,
+	.probe = iwl_pci_probe,
+	.remove = __devexit_p(iwl_pci_remove),
+	.driver.pm = IWL_PM_OPS,
+};
+
+int __must_check iwl_pci_register_driver(void)
+{
+	int ret;
+	ret = pci_register_driver(&iwl_pci_driver);
+	if (ret)
+		pr_err("Unable to initialize PCI module\n");
+
+	return ret;
+}
+
+void iwl_pci_unregister_driver(void)
+{
+	pci_unregister_driver(&iwl_pci_driver);
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
new file mode 100644
index 0000000..f027769
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -0,0 +1,444 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __iwl_trans_int_pcie_h__
+#define __iwl_trans_int_pcie_h__
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/timer.h>
+
+#include "iwl-fh.h"
+#include "iwl-csr.h"
+#include "iwl-trans.h"
+#include "iwl-debug.h"
+#include "iwl-io.h"
+#include "iwl-op-mode.h"
+
+struct iwl_host_cmd;
+
+/*This file includes the declaration that are internal to the
+ * trans_pcie layer */
+
+struct iwl_rx_mem_buffer {
+	dma_addr_t page_dma;
+	struct page *page;
+	struct list_head list;
+};
+
+/**
+ * struct isr_statistics - interrupt statistics
+ *
+ */
+struct isr_statistics {
+	u32 hw;
+	u32 sw;
+	u32 err_code;
+	u32 sch;
+	u32 alive;
+	u32 rfkill;
+	u32 ctkill;
+	u32 wakeup;
+	u32 rx;
+	u32 tx;
+	u32 unhandled;
+};
+
+/**
+ * struct iwl_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @pool:
+ * @queue:
+ * @read: Shared index to newest available Rx buffer
+ * @write: Shared index to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @write_actual:
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write index
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ * @lock:
+ *
+ * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
+ */
+struct iwl_rx_queue {
+	__le32 *bd;
+	dma_addr_t bd_dma;
+	struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+	u32 read;
+	u32 write;
+	u32 free_count;
+	u32 write_actual;
+	struct list_head rx_free;
+	struct list_head rx_used;
+	int need_update;
+	struct iwl_rb_status *rb_stts;
+	dma_addr_t rb_stts_dma;
+	spinlock_t lock;
+};
+
+struct iwl_dma_ptr {
+	dma_addr_t dma;
+	void *addr;
+	size_t size;
+};
+
+/**
+ * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_queue_inc_wrap(int index, int n_bd)
+{
+	return ++index & (n_bd - 1);
+}
+
+/**
+ * iwl_queue_dec_wrap - decrement queue index, wrap back to end
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_queue_dec_wrap(int index, int n_bd)
+{
+	return --index & (n_bd - 1);
+}
+
+struct iwl_cmd_meta {
+	/* only for SYNC commands, iff the reply skb is wanted */
+	struct iwl_host_cmd *source;
+
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+	DEFINE_DMA_UNMAP_LEN(len);
+
+	u32 flags;
+};
+
+/*
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues.
+ *
+ * Note the difference between n_bd and n_window: the hardware
+ * always assumes 256 descriptors, so n_bd is always 256 (unless
+ * there might be HW changes in the future). For the normal TX
+ * queues, n_window, which is the size of the software queue data
+ * is also 256; however, for the command queue, n_window is only
+ * 32 since we don't need so many commands pending. Since the HW
+ * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
+ * the software buffers (in the variables @meta, @txb in struct
+ * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds
+ * in the same struct) have 256.
+ * This means that we end up with the following:
+ *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
+ *  SW entries:           | 0      | ... | 31          |
+ * where N is a number between 0 and 7. This means that the SW
+ * data is a window overlayed over the HW queue.
+ */
+struct iwl_queue {
+	int n_bd;              /* number of BDs in this queue */
+	int write_ptr;       /* 1-st empty entry (index) host_w*/
+	int read_ptr;         /* last used entry (index) host_r*/
+	/* use for monitoring and recovering the stuck queue */
+	dma_addr_t dma_addr;   /* physical addr for BD's */
+	int n_window;	       /* safe queue window */
+	u32 id;
+	int low_mark;	       /* low watermark, resume queue if free
+				* space more than this */
+	int high_mark;         /* high watermark, stop queue if free
+				* space less than this */
+};
+
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+struct iwl_pcie_tx_queue_entry {
+	struct iwl_device_cmd *cmd;
+	struct sk_buff *skb;
+	struct iwl_cmd_meta meta;
+};
+
+/**
+ * struct iwl_tx_queue - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @tfds: transmit frame descriptors (DMA memory)
+ * @entries: transmit entries (driver state)
+ * @lock: queue lock
+ * @stuck_timer: timer that fires if queue gets stuck
+ * @trans_pcie: pointer back to transport (for timer)
+ * @need_update: indicates need to update read/write index
+ * @active: stores if queue is active
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ */
+struct iwl_tx_queue {
+	struct iwl_queue q;
+	struct iwl_tfd *tfds;
+	struct iwl_pcie_tx_queue_entry *entries;
+	spinlock_t lock;
+	struct timer_list stuck_timer;
+	struct iwl_trans_pcie *trans_pcie;
+	u8 need_update;
+	u8 active;
+};
+
+/**
+ * struct iwl_trans_pcie - PCIe transport specific data
+ * @rxq: all the RX queue data
+ * @rx_replenish: work that will be called when buffers need to be allocated
+ * @drv - pointer to iwl_drv
+ * @trans: pointer to the generic transport area
+ * @irq - the irq number for the device
+ * @irq_requested: true when the irq has been requested
+ * @scd_base_addr: scheduler sram base address in SRAM
+ * @scd_bc_tbls: pointer to the byte count table of the scheduler
+ * @kw: keep warm address
+ * @pci_dev: basic pci-network driver stuff
+ * @hw_base: pci hardware address support
+ * @ucode_write_complete: indicates that the ucode has been copied.
+ * @ucode_write_waitq: wait queue for uCode load
+ * @status - transport specific status flags
+ * @cmd_queue - command queue number
+ * @rx_buf_size_8k: 8 kB RX buffer size
+ * @rx_page_order: page order for receive buffer size
+ * @wd_timeout: queue watchdog timeout (jiffies)
+ */
+struct iwl_trans_pcie {
+	struct iwl_rx_queue rxq;
+	struct work_struct rx_replenish;
+	struct iwl_trans *trans;
+	struct iwl_drv *drv;
+
+	/* INT ICT Table */
+	__le32 *ict_tbl;
+	dma_addr_t ict_tbl_dma;
+	int ict_index;
+	u32 inta;
+	bool use_ict;
+	bool irq_requested;
+	struct tasklet_struct irq_tasklet;
+	struct isr_statistics isr_stats;
+
+	unsigned int irq;
+	spinlock_t irq_lock;
+	u32 inta_mask;
+	u32 scd_base_addr;
+	struct iwl_dma_ptr scd_bc_tbls;
+	struct iwl_dma_ptr kw;
+
+	struct iwl_tx_queue *txq;
+	unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
+	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
+
+	/* PCI bus related data */
+	struct pci_dev *pci_dev;
+	void __iomem *hw_base;
+
+	bool ucode_write_complete;
+	wait_queue_head_t ucode_write_waitq;
+	unsigned long status;
+	u8 cmd_queue;
+	u8 n_no_reclaim_cmds;
+	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
+	u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES];
+	u8 n_q_to_fifo;
+
+	bool rx_buf_size_8k;
+	u32 rx_page_order;
+
+	const char **command_names;
+
+	/* queue watchdog */
+	unsigned long wd_timeout;
+};
+
+/*****************************************************
+* DRIVER STATUS FUNCTIONS
+******************************************************/
+#define STATUS_HCMD_ACTIVE	0
+#define STATUS_DEVICE_ENABLED	1
+#define STATUS_TPOWER_PMI	2
+#define STATUS_INT_ENABLED	3
+
+#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
+	((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
+
+static inline struct iwl_trans *
+iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
+{
+	return container_of((void *)trans_pcie, struct iwl_trans,
+			    trans_specific);
+}
+
+struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+				       const struct pci_device_id *ent,
+				       const struct iwl_cfg *cfg);
+void iwl_trans_pcie_free(struct iwl_trans *trans);
+
+/*****************************************************
+* RX
+******************************************************/
+void iwl_bg_rx_replenish(struct work_struct *data);
+void iwl_irq_tasklet(struct iwl_trans *trans);
+void iwlagn_rx_replenish(struct iwl_trans *trans);
+void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
+				   struct iwl_rx_queue *q);
+
+/*****************************************************
+* ICT
+******************************************************/
+void iwl_reset_ict(struct iwl_trans *trans);
+void iwl_disable_ict(struct iwl_trans *trans);
+int iwl_alloc_isr_ict(struct iwl_trans *trans);
+void iwl_free_isr_ict(struct iwl_trans *trans);
+irqreturn_t iwl_isr_ict(int irq, void *data);
+
+/*****************************************************
+* TX / HCMD
+******************************************************/
+void iwl_txq_update_write_ptr(struct iwl_trans *trans,
+			      struct iwl_tx_queue *txq);
+int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
+				 struct iwl_tx_queue *txq,
+				 dma_addr_t addr, u16 len, u8 reset);
+int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
+int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
+void iwl_tx_cmd_complete(struct iwl_trans *trans,
+			 struct iwl_rx_cmd_buffer *rxb, int handler_status);
+void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
+				       struct iwl_tx_queue *txq,
+				       u16 byte_cnt);
+void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int queue);
+void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
+void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
+				   struct iwl_tx_queue *txq,
+				   int tx_fifo_id, bool active);
+void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
+				 int sta_id, int tid, int frame_limit, u16 ssn);
+void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
+		      enum dma_data_direction dma_dir);
+int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
+			 struct sk_buff_head *skbs);
+int iwl_queue_space(const struct iwl_queue *q);
+
+/*****************************************************
+* Error handling
+******************************************************/
+int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
+void iwl_dump_csr(struct iwl_trans *trans);
+
+/*****************************************************
+* Helpers
+******************************************************/
+static inline void iwl_disable_interrupts(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+
+	/* disable interrupts from uCode/NIC to host */
+	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
+
+	/* acknowledge/clear/reset any interrupts still pending
+	 * from uCode or flow handler (Rx/Tx DMA) */
+	iwl_write32(trans, CSR_INT, 0xffffffff);
+	iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
+	IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
+}
+
+static inline void iwl_enable_interrupts(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
+	set_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+	iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+}
+
+static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
+{
+	IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
+	iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+}
+
+static inline void iwl_wake_queue(struct iwl_trans *trans,
+				  struct iwl_tx_queue *txq)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
+		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
+		iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
+	}
+}
+
+static inline void iwl_stop_queue(struct iwl_trans *trans,
+				  struct iwl_tx_queue *txq)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
+		iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
+		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
+	} else
+		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
+				    txq->q.id);
+}
+
+static inline int iwl_queue_used(const struct iwl_queue *q, int i)
+{
+	return q->write_ptr >= q->read_ptr ?
+		(i >= q->read_ptr && i < q->write_ptr) :
+		!(i < q->read_ptr && i >= q->write_ptr);
+}
+
+static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
+{
+	return index & (q->n_window - 1);
+}
+
+static inline const char *
+trans_pcie_get_cmd_string(struct iwl_trans_pcie *trans_pcie, u8 cmd)
+{
+	if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
+		return "UNKNOWN";
+	return trans_pcie->command_names[cmd];
+}
+
+static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
+{
+	return !(iwl_read32(trans, CSR_GP_CNTRL) &
+		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+}
+
+#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
new file mode 100644
index 0000000..d6860c0
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -0,0 +1,1058 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/gfp.h>
+
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "internal.h"
+#include "iwl-op-mode.h"
+
+#ifdef CONFIG_IWLWIFI_IDI
+#include "iwl-amfh.h"
+#endif
+
+/******************************************************************************
+ *
+ * RX path functions
+ *
+ ******************************************************************************/
+
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC.  These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC.  The driver and NIC manage the Rx buffers by means
+ * of indexes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two index registers for managing the Rx buffers.
+ *
+ * The READ index maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ index is managed by the firmware once the card is enabled.
+ *
+ * The WRITE index maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * INDEX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ index
+ * and fire the RX interrupt.  The driver can then query the READ index and
+ * process as many packets as possible, moving the WRITE index forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
+ *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ *   to replenish the iwl->rxq->rx_free.
+ * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
+ *   iwl->rxq is replenished and the READ INDEX is updated (updating the
+ *   'processed' and 'read' driver indexes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ *   detached from the iwl->rxq.  The driver 'processed' index is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
+ *   were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * iwl_rx_queue_alloc()   Allocates rx_free
+ * iwl_rx_replenish()     Replenishes rx_free list from rx_used, and calls
+ *                            iwl_rx_queue_restock
+ * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
+ *                            queue, updates firmware pointers, and updates
+ *                            the WRITE index.  If insufficient rx_free buffers
+ *                            are available, schedules iwl_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - iwl_rx()         Detach iwl_rx_mem_buffers from pool up to the
+ *                            READ INDEX, detaching the SKB from the pool.
+ *                            Moves the packet buffer from queue to rx_used.
+ *                            Calls iwl_rx_queue_restock to refill any empty
+ *                            slots.
+ * ...
+ *
+ */
+
+/**
+ * iwl_rx_queue_space - Return number of free slots available in queue.
+ */
+static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
+{
+	int s = q->read - q->write;
+	if (s <= 0)
+		s += RX_QUEUE_SIZE;
+	/* keep some buffer to not confuse full and empty queue */
+	s -= 2;
+	if (s < 0)
+		s = 0;
+	return s;
+}
+
+/**
+ * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+ */
+void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
+				   struct iwl_rx_queue *q)
+{
+	unsigned long flags;
+	u32 reg;
+
+	spin_lock_irqsave(&q->lock, flags);
+
+	if (q->need_update == 0)
+		goto exit_unlock;
+
+	if (trans->cfg->base_params->shadow_reg_enable) {
+		/* shadow register enabled */
+		/* Device expects a multiple of 8 */
+		q->write_actual = (q->write & ~0x7);
+		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
+	} else {
+		struct iwl_trans_pcie *trans_pcie =
+			IWL_TRANS_GET_PCIE_TRANS(trans);
+
+		/* If power-saving is in use, make sure device is awake */
+		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
+			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+				IWL_DEBUG_INFO(trans,
+					"Rx queue requesting wakeup,"
+					" GP1 = 0x%x\n", reg);
+				iwl_set_bit(trans, CSR_GP_CNTRL,
+					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+				goto exit_unlock;
+			}
+
+			q->write_actual = (q->write & ~0x7);
+			iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
+					q->write_actual);
+
+		/* Else device is assumed to be awake */
+		} else {
+			/* Device expects a multiple of 8 */
+			q->write_actual = (q->write & ~0x7);
+			iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
+				q->write_actual);
+		}
+	}
+	q->need_update = 0;
+
+ exit_unlock:
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+/**
+ * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
+{
+	return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/**
+ * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+	struct list_head *element;
+	struct iwl_rx_mem_buffer *rxb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rxq->lock, flags);
+	while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+		/* The overwritten rxb must be a used one */
+		rxb = rxq->queue[rxq->write];
+		BUG_ON(rxb && rxb->page);
+
+		/* Get next free Rx buffer, remove from free list */
+		element = rxq->rx_free.next;
+		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+		list_del(element);
+
+		/* Point to Rx buffer via next RBD in circular buffer */
+		rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma);
+		rxq->queue[rxq->write] = rxb;
+		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+		rxq->free_count--;
+	}
+	spin_unlock_irqrestore(&rxq->lock, flags);
+	/* If the pre-allocated buffer pool is dropping low, schedule to
+	 * refill it */
+	if (rxq->free_count <= RX_LOW_WATERMARK)
+		schedule_work(&trans_pcie->rx_replenish);
+
+
+	/* If we've added more space for the firmware to place data, tell it.
+	 * Increment device's write pointer in multiples of 8. */
+	if (rxq->write_actual != (rxq->write & ~0x7)) {
+		spin_lock_irqsave(&rxq->lock, flags);
+		rxq->need_update = 1;
+		spin_unlock_irqrestore(&rxq->lock, flags);
+		iwl_rx_queue_update_write_ptr(trans, rxq);
+	}
+}
+
+/**
+ * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+	struct list_head *element;
+	struct iwl_rx_mem_buffer *rxb;
+	struct page *page;
+	unsigned long flags;
+	gfp_t gfp_mask = priority;
+
+	while (1) {
+		spin_lock_irqsave(&rxq->lock, flags);
+		if (list_empty(&rxq->rx_used)) {
+			spin_unlock_irqrestore(&rxq->lock, flags);
+			return;
+		}
+		spin_unlock_irqrestore(&rxq->lock, flags);
+
+		if (rxq->free_count > RX_LOW_WATERMARK)
+			gfp_mask |= __GFP_NOWARN;
+
+		if (trans_pcie->rx_page_order > 0)
+			gfp_mask |= __GFP_COMP;
+
+		/* Alloc a new receive buffer */
+		page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+		if (!page) {
+			if (net_ratelimit())
+				IWL_DEBUG_INFO(trans, "alloc_pages failed, "
+					   "order: %d\n",
+					   trans_pcie->rx_page_order);
+
+			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+			    net_ratelimit())
+				IWL_CRIT(trans, "Failed to alloc_pages with %s."
+					 "Only %u free buffers remaining.\n",
+					 priority == GFP_ATOMIC ?
+					 "GFP_ATOMIC" : "GFP_KERNEL",
+					 rxq->free_count);
+			/* We don't reschedule replenish work here -- we will
+			 * call the restock method and if it still needs
+			 * more buffers it will schedule replenish */
+			return;
+		}
+
+		spin_lock_irqsave(&rxq->lock, flags);
+
+		if (list_empty(&rxq->rx_used)) {
+			spin_unlock_irqrestore(&rxq->lock, flags);
+			__free_pages(page, trans_pcie->rx_page_order);
+			return;
+		}
+		element = rxq->rx_used.next;
+		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+		list_del(element);
+
+		spin_unlock_irqrestore(&rxq->lock, flags);
+
+		BUG_ON(rxb->page);
+		rxb->page = page;
+		/* Get physical address of the RB */
+		rxb->page_dma =
+			dma_map_page(trans->dev, page, 0,
+				     PAGE_SIZE << trans_pcie->rx_page_order,
+				     DMA_FROM_DEVICE);
+		/* dma address must be no more than 36 bits */
+		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+		/* and also 256 byte aligned! */
+		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+		spin_lock_irqsave(&rxq->lock, flags);
+
+		list_add_tail(&rxb->list, &rxq->rx_free);
+		rxq->free_count++;
+
+		spin_unlock_irqrestore(&rxq->lock, flags);
+	}
+}
+
+void iwlagn_rx_replenish(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	unsigned long flags;
+
+	iwlagn_rx_allocate(trans, GFP_KERNEL);
+
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	iwlagn_rx_queue_restock(trans);
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+}
+
+static void iwlagn_rx_replenish_now(struct iwl_trans *trans)
+{
+	iwlagn_rx_allocate(trans, GFP_ATOMIC);
+
+	iwlagn_rx_queue_restock(trans);
+}
+
+void iwl_bg_rx_replenish(struct work_struct *data)
+{
+	struct iwl_trans_pcie *trans_pcie =
+	    container_of(data, struct iwl_trans_pcie, rx_replenish);
+
+	iwlagn_rx_replenish(trans_pcie->trans);
+}
+
+static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
+				struct iwl_rx_mem_buffer *rxb)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+	unsigned long flags;
+	bool page_stolen = false;
+	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+	u32 offset = 0;
+
+	if (WARN_ON(!rxb))
+		return;
+
+	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
+
+	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
+		struct iwl_rx_packet *pkt;
+		struct iwl_device_cmd *cmd;
+		u16 sequence;
+		bool reclaim;
+		int index, cmd_index, err, len;
+		struct iwl_rx_cmd_buffer rxcb = {
+			._offset = offset,
+			._page = rxb->page,
+			._page_stolen = false,
+			.truesize = max_len,
+		};
+
+		pkt = rxb_addr(&rxcb);
+
+		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
+			break;
+
+		IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
+			rxcb._offset,
+			trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd),
+			pkt->hdr.cmd);
+
+		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+		len += sizeof(u32); /* account for status word */
+		trace_iwlwifi_dev_rx(trans->dev, pkt, len);
+
+		/* Reclaim a command buffer only if this packet is a response
+		 *   to a (driver-originated) command.
+		 * If the packet (e.g. Rx frame) originated from uCode,
+		 *   there is no command buffer to reclaim.
+		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+		 *   but apparently a few don't get set; catch them here. */
+		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
+		if (reclaim) {
+			int i;
+
+			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
+				if (trans_pcie->no_reclaim_cmds[i] ==
+							pkt->hdr.cmd) {
+					reclaim = false;
+					break;
+				}
+			}
+		}
+
+		sequence = le16_to_cpu(pkt->hdr.sequence);
+		index = SEQ_TO_INDEX(sequence);
+		cmd_index = get_cmd_index(&txq->q, index);
+
+		if (reclaim)
+			cmd = txq->entries[cmd_index].cmd;
+		else
+			cmd = NULL;
+
+		err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
+
+		/*
+		 * After here, we should always check rxcb._page_stolen,
+		 * if it is true then one of the handlers took the page.
+		 */
+
+		if (reclaim) {
+			/* Invoke any callbacks, transfer the buffer to caller,
+			 * and fire off the (possibly) blocking
+			 * iwl_trans_send_cmd()
+			 * as we reclaim the driver command queue */
+			if (!rxcb._page_stolen)
+				iwl_tx_cmd_complete(trans, &rxcb, err);
+			else
+				IWL_WARN(trans, "Claim null rxb?\n");
+		}
+
+		page_stolen |= rxcb._page_stolen;
+		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
+	}
+
+	/* page was stolen from us -- free our reference */
+	if (page_stolen) {
+		__free_pages(rxb->page, trans_pcie->rx_page_order);
+		rxb->page = NULL;
+	}
+
+	/* Reuse the page if possible. For notification packets and
+	 * SKBs that fail to Rx correctly, add them back into the
+	 * rx_free list for reuse later. */
+	spin_lock_irqsave(&rxq->lock, flags);
+	if (rxb->page != NULL) {
+		rxb->page_dma =
+			dma_map_page(trans->dev, rxb->page, 0,
+				     PAGE_SIZE << trans_pcie->rx_page_order,
+				     DMA_FROM_DEVICE);
+		list_add_tail(&rxb->list, &rxq->rx_free);
+		rxq->free_count++;
+	} else
+		list_add_tail(&rxb->list, &rxq->rx_used);
+	spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+/**
+ * iwl_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the priv->rx_handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+static void iwl_rx_handle(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+	u32 r, i;
+	u8 fill_rx = 0;
+	u32 count = 8;
+	int total_empty;
+
+	/* uCode's read index (stored in shared DRAM) indicates the last Rx
+	 * buffer that the driver may process (last buffer filled by ucode). */
+	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
+	i = rxq->read;
+
+	/* Rx interrupt, but nothing sent from uCode */
+	if (i == r)
+		IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
+
+	/* calculate total frames need to be restock after handling RX */
+	total_empty = r - rxq->write_actual;
+	if (total_empty < 0)
+		total_empty += RX_QUEUE_SIZE;
+
+	if (total_empty > (RX_QUEUE_SIZE / 2))
+		fill_rx = 1;
+
+	while (i != r) {
+		struct iwl_rx_mem_buffer *rxb;
+
+		rxb = rxq->queue[i];
+		rxq->queue[i] = NULL;
+
+		IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
+			     r, i, rxb);
+		iwl_rx_handle_rxbuf(trans, rxb);
+
+		i = (i + 1) & RX_QUEUE_MASK;
+		/* If there are a lot of unused frames,
+		 * restock the Rx queue so ucode wont assert. */
+		if (fill_rx) {
+			count++;
+			if (count >= 8) {
+				rxq->read = i;
+				iwlagn_rx_replenish_now(trans);
+				count = 0;
+			}
+		}
+	}
+
+	/* Backtrack one entry */
+	rxq->read = i;
+	if (fill_rx)
+		iwlagn_rx_replenish_now(trans);
+	else
+		iwlagn_rx_queue_restock(trans);
+}
+
+/**
+ * iwl_irq_handle_error - called for HW or SW error interrupt from card
+ */
+static void iwl_irq_handle_error(struct iwl_trans *trans)
+{
+	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
+	if (trans->cfg->internal_wimax_coex &&
+	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
+			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
+	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
+			    APMG_PS_CTRL_VAL_RESET_REQ))) {
+		struct iwl_trans_pcie *trans_pcie =
+			IWL_TRANS_GET_PCIE_TRANS(trans);
+
+		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+		iwl_op_mode_wimax_active(trans->op_mode);
+		wake_up(&trans->wait_command_queue);
+		return;
+	}
+
+	iwl_dump_csr(trans);
+	iwl_dump_fh(trans, NULL, false);
+
+	iwl_op_mode_nic_error(trans->op_mode);
+}
+
+/* tasklet for iwlagn interrupt */
+void iwl_irq_tasklet(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+	u32 inta = 0;
+	u32 handled = 0;
+	unsigned long flags;
+	u32 i;
+#ifdef CONFIG_IWLWIFI_DEBUG
+	u32 inta_mask;
+#endif
+
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
+	/* Ack/clear/reset pending uCode interrupts.
+	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+	 */
+	/* There is a hardware bug in the interrupt mask function that some
+	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
+	 * they are disabled in the CSR_INT_MASK register. Furthermore the
+	 * ICT interrupt handling mechanism has another bug that might cause
+	 * these unmasked interrupts fail to be detected. We workaround the
+	 * hardware bugs here by ACKing all the possible interrupts so that
+	 * interrupt coalescing can still be achieved.
+	 */
+	iwl_write32(trans, CSR_INT,
+		    trans_pcie->inta | ~trans_pcie->inta_mask);
+
+	inta = trans_pcie->inta;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+	if (iwl_have_debug_level(IWL_DL_ISR)) {
+		/* just for debug */
+		inta_mask = iwl_read32(trans, CSR_INT_MASK);
+		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
+			      inta, inta_mask);
+	}
+#endif
+
+	/* saved interrupt in inta variable now we can reset trans_pcie->inta */
+	trans_pcie->inta = 0;
+
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+	/* Now service all interrupt bits discovered above. */
+	if (inta & CSR_INT_BIT_HW_ERR) {
+		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
+
+		/* Tell the device to stop sending interrupts */
+		iwl_disable_interrupts(trans);
+
+		isr_stats->hw++;
+		iwl_irq_handle_error(trans);
+
+		handled |= CSR_INT_BIT_HW_ERR;
+
+		return;
+	}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+	if (iwl_have_debug_level(IWL_DL_ISR)) {
+		/* NIC fires this, but we don't use it, redundant with WAKEUP */
+		if (inta & CSR_INT_BIT_SCD) {
+			IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
+				      "the frame/frames.\n");
+			isr_stats->sch++;
+		}
+
+		/* Alive notification via Rx interrupt will do the real work */
+		if (inta & CSR_INT_BIT_ALIVE) {
+			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
+			isr_stats->alive++;
+		}
+	}
+#endif
+	/* Safely ignore these bits for debug checks below */
+	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+	/* HW RF KILL switch toggled */
+	if (inta & CSR_INT_BIT_RF_KILL) {
+		bool hw_rfkill;
+
+		hw_rfkill = iwl_is_rfkill_set(trans);
+		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
+			 hw_rfkill ? "disable radio" : "enable radio");
+
+		isr_stats->rfkill++;
+
+		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+
+		handled |= CSR_INT_BIT_RF_KILL;
+	}
+
+	/* Chip got too hot and stopped itself */
+	if (inta & CSR_INT_BIT_CT_KILL) {
+		IWL_ERR(trans, "Microcode CT kill error detected.\n");
+		isr_stats->ctkill++;
+		handled |= CSR_INT_BIT_CT_KILL;
+	}
+
+	/* Error detected by uCode */
+	if (inta & CSR_INT_BIT_SW_ERR) {
+		IWL_ERR(trans, "Microcode SW error detected. "
+			" Restarting 0x%X.\n", inta);
+		isr_stats->sw++;
+		iwl_irq_handle_error(trans);
+		handled |= CSR_INT_BIT_SW_ERR;
+	}
+
+	/* uCode wakes up after power-down sleep */
+	if (inta & CSR_INT_BIT_WAKEUP) {
+		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
+		iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
+		for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
+			iwl_txq_update_write_ptr(trans,
+						 &trans_pcie->txq[i]);
+
+		isr_stats->wakeup++;
+
+		handled |= CSR_INT_BIT_WAKEUP;
+	}
+
+	/* All uCode command responses, including Tx command responses,
+	 * Rx "responses" (frame-received notification), and other
+	 * notifications from uCode come through here*/
+	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
+		    CSR_INT_BIT_RX_PERIODIC)) {
+		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
+		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+			iwl_write32(trans, CSR_FH_INT_STATUS,
+					CSR_FH_INT_RX_MASK);
+		}
+		if (inta & CSR_INT_BIT_RX_PERIODIC) {
+			handled |= CSR_INT_BIT_RX_PERIODIC;
+			iwl_write32(trans,
+				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
+		}
+		/* Sending RX interrupt require many steps to be done in the
+		 * the device:
+		 * 1- write interrupt to current index in ICT table.
+		 * 2- dma RX frame.
+		 * 3- update RX shared data to indicate last write index.
+		 * 4- send interrupt.
+		 * This could lead to RX race, driver could receive RX interrupt
+		 * but the shared data changes does not reflect this;
+		 * periodic interrupt will detect any dangling Rx activity.
+		 */
+
+		/* Disable periodic interrupt; we use it as just a one-shot. */
+		iwl_write8(trans, CSR_INT_PERIODIC_REG,
+			    CSR_INT_PERIODIC_DIS);
+#ifdef CONFIG_IWLWIFI_IDI
+		iwl_amfh_rx_handler();
+#else
+		iwl_rx_handle(trans);
+#endif
+		/*
+		 * Enable periodic interrupt in 8 msec only if we received
+		 * real RX interrupt (instead of just periodic int), to catch
+		 * any dangling Rx interrupt.  If it was just the periodic
+		 * interrupt, there was no dangling Rx activity, and no need
+		 * to extend the periodic interrupt; one-shot is enough.
+		 */
+		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
+			iwl_write8(trans, CSR_INT_PERIODIC_REG,
+				   CSR_INT_PERIODIC_ENA);
+
+		isr_stats->rx++;
+	}
+
+	/* This "Tx" DMA channel is used only for loading uCode */
+	if (inta & CSR_INT_BIT_FH_TX) {
+		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
+		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
+		isr_stats->tx++;
+		handled |= CSR_INT_BIT_FH_TX;
+		/* Wake up uCode load routine, now that load is complete */
+		trans_pcie->ucode_write_complete = true;
+		wake_up(&trans_pcie->ucode_write_waitq);
+	}
+
+	if (inta & ~handled) {
+		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+		isr_stats->unhandled++;
+	}
+
+	if (inta & ~(trans_pcie->inta_mask)) {
+		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
+			 inta & ~trans_pcie->inta_mask);
+	}
+
+	/* Re-enable all interrupts */
+	/* only Re-enable if disabled by irq */
+	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
+		iwl_enable_interrupts(trans);
+	/* Re-enable RF_KILL if it occurred */
+	else if (handled & CSR_INT_BIT_RF_KILL)
+		iwl_enable_rfkill_int(trans);
+}
+
+/******************************************************************************
+ *
+ * ICT functions
+ *
+ ******************************************************************************/
+
+/* a device (PCI-E) page is 4096 bytes long */
+#define ICT_SHIFT	12
+#define ICT_SIZE	(1 << ICT_SHIFT)
+#define ICT_COUNT	(ICT_SIZE / sizeof(u32))
+
+/* Free dram table */
+void iwl_free_isr_ict(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (trans_pcie->ict_tbl) {
+		dma_free_coherent(trans->dev, ICT_SIZE,
+				  trans_pcie->ict_tbl,
+				  trans_pcie->ict_tbl_dma);
+		trans_pcie->ict_tbl = NULL;
+		trans_pcie->ict_tbl_dma = 0;
+	}
+}
+
+
+/*
+ * allocate dram shared table, it is an aligned memory
+ * block of ICT_SIZE.
+ * also reset all data related to ICT table interrupt.
+ */
+int iwl_alloc_isr_ict(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	trans_pcie->ict_tbl =
+		dma_alloc_coherent(trans->dev, ICT_SIZE,
+				   &trans_pcie->ict_tbl_dma,
+				   GFP_KERNEL);
+	if (!trans_pcie->ict_tbl)
+		return -ENOMEM;
+
+	/* just an API sanity check ... it is guaranteed to be aligned */
+	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
+		iwl_free_isr_ict(trans);
+		return -EINVAL;
+	}
+
+	IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
+		      (unsigned long long)trans_pcie->ict_tbl_dma);
+
+	IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
+
+	/* reset table and index to all 0 */
+	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
+	trans_pcie->ict_index = 0;
+
+	/* add periodic RX interrupt */
+	trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
+	return 0;
+}
+
+/* Device is going up inform it about using ICT interrupt table,
+ * also we need to tell the driver to start using ICT interrupt.
+ */
+void iwl_reset_ict(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 val;
+	unsigned long flags;
+
+	if (!trans_pcie->ict_tbl)
+		return;
+
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	iwl_disable_interrupts(trans);
+
+	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
+
+	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
+
+	val |= CSR_DRAM_INT_TBL_ENABLE;
+	val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+
+	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
+
+	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
+	trans_pcie->use_ict = true;
+	trans_pcie->ict_index = 0;
+	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
+	iwl_enable_interrupts(trans);
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+}
+
+/* Device is going down disable ict interrupt usage */
+void iwl_disable_ict(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	unsigned long flags;
+
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	trans_pcie->use_ict = false;
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+}
+
+static irqreturn_t iwl_isr(int irq, void *data)
+{
+	struct iwl_trans *trans = data;
+	struct iwl_trans_pcie *trans_pcie;
+	u32 inta, inta_mask;
+	unsigned long flags;
+#ifdef CONFIG_IWLWIFI_DEBUG
+	u32 inta_fh;
+#endif
+	if (!trans)
+		return IRQ_NONE;
+
+	trace_iwlwifi_dev_irq(trans->dev);
+
+	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
+	/* Disable (but don't clear!) interrupts here to avoid
+	 *    back-to-back ISRs and sporadic interrupts from our NIC.
+	 * If we have something to service, the tasklet will re-enable ints.
+	 * If we *don't* have something, we'll re-enable before leaving here. */
+	inta_mask = iwl_read32(trans, CSR_INT_MASK);  /* just for debug */
+	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
+
+	/* Discover which interrupts are active/pending */
+	inta = iwl_read32(trans, CSR_INT);
+
+	/* Ignore interrupt if there's nothing in NIC to service.
+	 * This may be due to IRQ shared with another device,
+	 * or due to sporadic interrupts thrown from our NIC. */
+	if (!inta) {
+		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+		goto none;
+	}
+
+	if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+		/* Hardware disappeared. It might have already raised
+		 * an interrupt */
+		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+		goto unplugged;
+	}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+	if (iwl_have_debug_level(IWL_DL_ISR)) {
+		inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
+		IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
+			      "fh 0x%08x\n", inta, inta_mask, inta_fh);
+	}
+#endif
+
+	trans_pcie->inta |= inta;
+	/* iwl_irq_tasklet() will service interrupts and re-enable them */
+	if (likely(inta))
+		tasklet_schedule(&trans_pcie->irq_tasklet);
+	else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
+		 !trans_pcie->inta)
+		iwl_enable_interrupts(trans);
+
+ unplugged:
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+	return IRQ_HANDLED;
+
+ none:
+	/* re-enable interrupts here since we don't have anything to service. */
+	/* only Re-enable if disabled by irq  and no schedules tasklet. */
+	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
+	    !trans_pcie->inta)
+		iwl_enable_interrupts(trans);
+
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+	return IRQ_NONE;
+}
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+irqreturn_t iwl_isr_ict(int irq, void *data)
+{
+	struct iwl_trans *trans = data;
+	struct iwl_trans_pcie *trans_pcie;
+	u32 inta, inta_mask;
+	u32 val = 0;
+	u32 read;
+	unsigned long flags;
+
+	if (!trans)
+		return IRQ_NONE;
+
+	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	/* dram interrupt table not set yet,
+	 * use legacy interrupt.
+	 */
+	if (!trans_pcie->use_ict)
+		return iwl_isr(irq, data);
+
+	trace_iwlwifi_dev_irq(trans->dev);
+
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
+	/* Disable (but don't clear!) interrupts here to avoid
+	 * back-to-back ISRs and sporadic interrupts from our NIC.
+	 * If we have something to service, the tasklet will re-enable ints.
+	 * If we *don't* have something, we'll re-enable before leaving here.
+	 */
+	inta_mask = iwl_read32(trans, CSR_INT_MASK);  /* just for debug */
+	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
+
+
+	/* Ignore interrupt if there's nothing in NIC to service.
+	 * This may be due to IRQ shared with another device,
+	 * or due to sporadic interrupts thrown from our NIC. */
+	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
+	if (!read) {
+		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+		goto none;
+	}
+
+	/*
+	 * Collect all entries up to the first 0, starting from ict_index;
+	 * note we already read at ict_index.
+	 */
+	do {
+		val |= read;
+		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
+				trans_pcie->ict_index, read);
+		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
+		trans_pcie->ict_index =
+			iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
+
+		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
+					   read);
+	} while (read);
+
+	/* We should not get this value, just ignore it. */
+	if (val == 0xffffffff)
+		val = 0;
+
+	/*
+	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+	 * (bit 15 before shifting it to 31) to clear when using interrupt
+	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
+	 * so we use them to decide on the real state of the Rx bit.
+	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
+	 */
+	if (val & 0xC0000)
+		val |= 0x8000;
+
+	inta = (0xff & val) | ((0xff00 & val) << 16);
+	IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
+		      inta, inta_mask, val);
+
+	inta &= trans_pcie->inta_mask;
+	trans_pcie->inta |= inta;
+
+	/* iwl_irq_tasklet() will service interrupts and re-enable them */
+	if (likely(inta))
+		tasklet_schedule(&trans_pcie->irq_tasklet);
+	else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
+		 !trans_pcie->inta) {
+		/* Allow interrupt if was disabled by this handler and
+		 * no tasklet was schedules, We should not enable interrupt,
+		 * tasklet will enable it.
+		 */
+		iwl_enable_interrupts(trans);
+	}
+
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+	return IRQ_HANDLED;
+
+ none:
+	/* re-enable interrupts here since we don't have anything to service.
+	 * only Re-enable if disabled by irq.
+	 */
+	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
+	    !trans_pcie->inta)
+		iwl_enable_interrupts(trans);
+
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+	return IRQ_NONE;
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
new file mode 100644
index 0000000..e8a04a1
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -0,0 +1,2178 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/gfp.h>
+
+#include "iwl-drv.h"
+#include "iwl-trans.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-eeprom.h"
+#include "iwl-agn-hw.h"
+#include "internal.h"
+/* FIXME: need to abstract out TX command (once we know what it looks like) */
+#include "iwl-commands.h"
+
+#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie)	\
+	(((1<<trans->cfg->base_params->num_of_queues) - 1) &\
+	(~(1<<(trans_pcie)->cmd_queue)))
+
+static int iwl_trans_rx_alloc(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+	struct device *dev = trans->dev;
+
+	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
+
+	spin_lock_init(&rxq->lock);
+
+	if (WARN_ON(rxq->bd || rxq->rb_stts))
+		return -EINVAL;
+
+	/* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
+	rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+				      &rxq->bd_dma, GFP_KERNEL);
+	if (!rxq->bd)
+		goto err_bd;
+
+	/*Allocate the driver's pointer to receive buffer status */
+	rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
+					   &rxq->rb_stts_dma, GFP_KERNEL);
+	if (!rxq->rb_stts)
+		goto err_rb_stts;
+
+	return 0;
+
+err_rb_stts:
+	dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+			  rxq->bd, rxq->bd_dma);
+	memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+	rxq->bd = NULL;
+err_bd:
+	return -ENOMEM;
+}
+
+static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+	int i;
+
+	/* Fill the rx_used queue with _all_ of the Rx buffers */
+	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+		/* In the reset function, these buffers may have been allocated
+		 * to an SKB, so we need to unmap and free potential storage */
+		if (rxq->pool[i].page != NULL) {
+			dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
+				       PAGE_SIZE << trans_pcie->rx_page_order,
+				       DMA_FROM_DEVICE);
+			__free_pages(rxq->pool[i].page,
+				     trans_pcie->rx_page_order);
+			rxq->pool[i].page = NULL;
+		}
+		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+	}
+}
+
+static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
+				 struct iwl_rx_queue *rxq)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 rb_size;
+	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+	u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
+
+	if (trans_pcie->rx_buf_size_8k)
+		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+	else
+		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+	/* Stop Rx DMA */
+	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+	/* Reset driver's Rx queue write index */
+	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+	/* Tell device where to find RBD circular buffer in DRAM */
+	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+			   (u32)(rxq->bd_dma >> 8));
+
+	/* Tell device where in DRAM to update its Rx status */
+	iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+			   rxq->rb_stts_dma >> 4);
+
+	/* Enable Rx DMA
+	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
+	 *      the credit mechanism in 5000 HW RX FIFO
+	 * Direct rx interrupts to hosts
+	 * Rx buffer size 4 or 8k
+	 * RB timeout 0x10
+	 * 256 RBDs
+	 */
+	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+			   rb_size|
+			   (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+	/* Set interrupt coalescing timer to default (2048 usecs) */
+	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+}
+
+static int iwl_rx_init(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+
+	int i, err;
+	unsigned long flags;
+
+	if (!rxq->bd) {
+		err = iwl_trans_rx_alloc(trans);
+		if (err)
+			return err;
+	}
+
+	spin_lock_irqsave(&rxq->lock, flags);
+	INIT_LIST_HEAD(&rxq->rx_free);
+	INIT_LIST_HEAD(&rxq->rx_used);
+
+	iwl_trans_rxq_free_rx_bufs(trans);
+
+	for (i = 0; i < RX_QUEUE_SIZE; i++)
+		rxq->queue[i] = NULL;
+
+	/* Set us so that we have processed and used all buffers, but have
+	 * not restocked the Rx queue with fresh buffers */
+	rxq->read = rxq->write = 0;
+	rxq->write_actual = 0;
+	rxq->free_count = 0;
+	spin_unlock_irqrestore(&rxq->lock, flags);
+
+	iwlagn_rx_replenish(trans);
+
+	iwl_trans_rx_hw_init(trans, rxq);
+
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	rxq->need_update = 1;
+	iwl_rx_queue_update_write_ptr(trans, rxq);
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+	return 0;
+}
+
+static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+	unsigned long flags;
+
+	/*if rxq->bd is NULL, it means that nothing has been allocated,
+	 * exit now */
+	if (!rxq->bd) {
+		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
+		return;
+	}
+
+	spin_lock_irqsave(&rxq->lock, flags);
+	iwl_trans_rxq_free_rx_bufs(trans);
+	spin_unlock_irqrestore(&rxq->lock, flags);
+
+	dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
+			  rxq->bd, rxq->bd_dma);
+	memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+	rxq->bd = NULL;
+
+	if (rxq->rb_stts)
+		dma_free_coherent(trans->dev,
+				  sizeof(struct iwl_rb_status),
+				  rxq->rb_stts, rxq->rb_stts_dma);
+	else
+		IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
+	memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
+	rxq->rb_stts = NULL;
+}
+
+static int iwl_trans_rx_stop(struct iwl_trans *trans)
+{
+
+	/* stop Rx DMA */
+	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+	return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
+				   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+}
+
+static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
+				struct iwl_dma_ptr *ptr, size_t size)
+{
+	if (WARN_ON(ptr->addr))
+		return -EINVAL;
+
+	ptr->addr = dma_alloc_coherent(trans->dev, size,
+				       &ptr->dma, GFP_KERNEL);
+	if (!ptr->addr)
+		return -ENOMEM;
+	ptr->size = size;
+	return 0;
+}
+
+static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
+				struct iwl_dma_ptr *ptr)
+{
+	if (unlikely(!ptr->addr))
+		return;
+
+	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
+	memset(ptr, 0, sizeof(*ptr));
+}
+
+static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
+{
+	struct iwl_tx_queue *txq = (void *)data;
+	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
+	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+
+	spin_lock(&txq->lock);
+	/* check if triggered erroneously */
+	if (txq->q.read_ptr == txq->q.write_ptr) {
+		spin_unlock(&txq->lock);
+		return;
+	}
+	spin_unlock(&txq->lock);
+
+
+	IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
+		jiffies_to_msecs(trans_pcie->wd_timeout));
+	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
+		txq->q.read_ptr, txq->q.write_ptr);
+	IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
+		iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id))
+					& (TFD_QUEUE_SIZE_MAX - 1),
+		iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id)));
+
+	iwl_op_mode_nic_error(trans->op_mode);
+}
+
+static int iwl_trans_txq_alloc(struct iwl_trans *trans,
+			       struct iwl_tx_queue *txq, int slots_num,
+			       u32 txq_id)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+	int i;
+
+	if (WARN_ON(txq->entries || txq->tfds))
+		return -EINVAL;
+
+	setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
+		    (unsigned long)txq);
+	txq->trans_pcie = trans_pcie;
+
+	txq->q.n_window = slots_num;
+
+	txq->entries = kcalloc(slots_num,
+			       sizeof(struct iwl_pcie_tx_queue_entry),
+			       GFP_KERNEL);
+
+	if (!txq->entries)
+		goto error;
+
+	if (txq_id == trans_pcie->cmd_queue)
+		for (i = 0; i < slots_num; i++) {
+			txq->entries[i].cmd =
+				kmalloc(sizeof(struct iwl_device_cmd),
+					GFP_KERNEL);
+			if (!txq->entries[i].cmd)
+				goto error;
+		}
+
+	/* Circular buffer of transmit frame descriptors (TFDs),
+	 * shared with device */
+	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
+				       &txq->q.dma_addr, GFP_KERNEL);
+	if (!txq->tfds) {
+		IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
+		goto error;
+	}
+	txq->q.id = txq_id;
+
+	return 0;
+error:
+	if (txq->entries && txq_id == trans_pcie->cmd_queue)
+		for (i = 0; i < slots_num; i++)
+			kfree(txq->entries[i].cmd);
+	kfree(txq->entries);
+	txq->entries = NULL;
+
+	return -ENOMEM;
+
+}
+
+static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
+			      int slots_num, u32 txq_id)
+{
+	int ret;
+
+	txq->need_update = 0;
+
+	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
+	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+	/* Initialize queue's high/low-water marks, and head/tail indexes */
+	ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
+			txq_id);
+	if (ret)
+		return ret;
+
+	spin_lock_init(&txq->lock);
+
+	/*
+	 * Tell nic where to find circular buffer of Tx Frame Descriptors for
+	 * given Tx queue, and enable the DMA channel used for that queue.
+	 * Circular buffer (TFD queue in DRAM) physical base address */
+	iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
+			     txq->q.dma_addr >> 8);
+
+	return 0;
+}
+
+/**
+ * iwl_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
+ */
+static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+	struct iwl_queue *q = &txq->q;
+	enum dma_data_direction dma_dir;
+
+	if (!q->n_bd)
+		return;
+
+	/* In the command queue, all the TBs are mapped as BIDI
+	 * so unmap them as such.
+	 */
+	if (txq_id == trans_pcie->cmd_queue)
+		dma_dir = DMA_BIDIRECTIONAL;
+	else
+		dma_dir = DMA_TO_DEVICE;
+
+	spin_lock_bh(&txq->lock);
+	while (q->write_ptr != q->read_ptr) {
+		iwl_txq_free_tfd(trans, txq, dma_dir);
+		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+	}
+	spin_unlock_bh(&txq->lock);
+}
+
+/**
+ * iwl_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+	struct device *dev = trans->dev;
+	int i;
+
+	if (WARN_ON(!txq))
+		return;
+
+	iwl_tx_queue_unmap(trans, txq_id);
+
+	/* De-alloc array of command/tx buffers */
+
+	if (txq_id == trans_pcie->cmd_queue)
+		for (i = 0; i < txq->q.n_window; i++)
+			kfree(txq->entries[i].cmd);
+
+	/* De-alloc circular buffer of TFDs */
+	if (txq->q.n_bd) {
+		dma_free_coherent(dev, sizeof(struct iwl_tfd) *
+				  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+		memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
+	}
+
+	kfree(txq->entries);
+	txq->entries = NULL;
+
+	del_timer_sync(&txq->stuck_timer);
+
+	/* 0-fill queue descriptor structure */
+	memset(txq, 0, sizeof(*txq));
+}
+
+/**
+ * iwl_trans_tx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
+{
+	int txq_id;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	/* Tx queues */
+	if (trans_pcie->txq) {
+		for (txq_id = 0;
+		     txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
+			iwl_tx_queue_free(trans, txq_id);
+	}
+
+	kfree(trans_pcie->txq);
+	trans_pcie->txq = NULL;
+
+	iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
+
+	iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
+}
+
+/**
+ * iwl_trans_tx_alloc - allocate TX context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param priv
+ * @return error code
+ */
+static int iwl_trans_tx_alloc(struct iwl_trans *trans)
+{
+	int ret;
+	int txq_id, slots_num;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
+			sizeof(struct iwlagn_scd_bc_tbl);
+
+	/*It is not allowed to alloc twice, so warn when this happens.
+	 * We cannot rely on the previous allocation, so free and fail */
+	if (WARN_ON(trans_pcie->txq)) {
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
+				   scd_bc_tbls_size);
+	if (ret) {
+		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+		goto error;
+	}
+
+	/* Alloc keep-warm buffer */
+	ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
+	if (ret) {
+		IWL_ERR(trans, "Keep Warm allocation failed\n");
+		goto error;
+	}
+
+	trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
+				  sizeof(struct iwl_tx_queue), GFP_KERNEL);
+	if (!trans_pcie->txq) {
+		IWL_ERR(trans, "Not enough memory for txq\n");
+		ret = ENOMEM;
+		goto error;
+	}
+
+	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
+	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+	     txq_id++) {
+		slots_num = (txq_id == trans_pcie->cmd_queue) ?
+					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+		ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
+					  slots_num, txq_id);
+		if (ret) {
+			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+			goto error;
+		}
+	}
+
+	return 0;
+
+error:
+	iwl_trans_pcie_tx_free(trans);
+
+	return ret;
+}
+static int iwl_tx_init(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int ret;
+	int txq_id, slots_num;
+	unsigned long flags;
+	bool alloc = false;
+
+	if (!trans_pcie->txq) {
+		ret = iwl_trans_tx_alloc(trans);
+		if (ret)
+			goto error;
+		alloc = true;
+	}
+
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
+	/* Turn off all Tx DMA fifos */
+	iwl_write_prph(trans, SCD_TXFACT, 0);
+
+	/* Tell NIC where to find the "keep warm" buffer */
+	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+			   trans_pcie->kw.dma >> 4);
+
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
+	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+	     txq_id++) {
+		slots_num = (txq_id == trans_pcie->cmd_queue) ?
+					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+		ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
+					 slots_num, txq_id);
+		if (ret) {
+			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+			goto error;
+		}
+	}
+
+	return 0;
+error:
+	/*Upon error, free only if we allocated something */
+	if (alloc)
+		iwl_trans_pcie_tx_free(trans);
+	return ret;
+}
+
+static void iwl_set_pwr_vmain(struct iwl_trans *trans)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+		if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
+			iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+					       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+					       ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+	iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+			       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+			       ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT	0x041
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN	0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN	0x02
+
+static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int pos;
+	u16 pci_lnk_ctl;
+
+	struct pci_dev *pci_dev = trans_pcie->pci_dev;
+
+	pos = pci_pcie_cap(pci_dev);
+	pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+	return pci_lnk_ctl;
+}
+
+static void iwl_apm_config(struct iwl_trans *trans)
+{
+	/*
+	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
+	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
+	 * If so (likely), disable L0S, so device moves directly L0->L1;
+	 *    costs negligible amount of power savings.
+	 * If not (unlikely), enable L0S, so there is at least some
+	 *    power savings, even without L1.
+	 */
+	u16 lctl = iwl_pciexp_link_ctrl(trans);
+
+	if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+				PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+		/* L1-ASPM enabled; disable(!) L0S */
+		iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+		dev_printk(KERN_INFO, trans->dev,
+			   "L1 Enabled; Disabling L0S\n");
+	} else {
+		/* L1-ASPM disabled; enable(!) L0S */
+		iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+		dev_printk(KERN_INFO, trans->dev,
+			   "L1 Disabled; Enabling L0S\n");
+	}
+	trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+}
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
+static int iwl_apm_init(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int ret = 0;
+	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
+
+	/*
+	 * Use "set_bit" below rather than "write", to preserve any hardware
+	 * bits already set by default after reset.
+	 */
+
+	/* Disable L0S exit timer (platform NMI Work/Around) */
+	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+		    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+	/*
+	 * Disable L0s without affecting L1;
+	 *  don't wait for ICH L0s (ICH bug W/A)
+	 */
+	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+	/* Set FH wait threshold to maximum (HW error during stress W/A) */
+	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+	/*
+	 * Enable HAP INTA (interrupt from management bus) to
+	 * wake device's PCI Express link L1a -> L0s
+	 */
+	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+	iwl_apm_config(trans);
+
+	/* Configure analog phase-lock-loop before activating to D0A */
+	if (trans->cfg->base_params->pll_cfg_val)
+		iwl_set_bit(trans, CSR_ANA_PLL_CFG,
+			    trans->cfg->base_params->pll_cfg_val);
+
+	/*
+	 * Set "initialization complete" bit to move adapter from
+	 * D0U* --> D0A* (powered-up active) state.
+	 */
+	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+	/*
+	 * Wait for clock stabilization; once stabilized, access to
+	 * device-internal resources is supported, e.g. iwl_write_prph()
+	 * and accesses to uCode SRAM.
+	 */
+	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+	if (ret < 0) {
+		IWL_DEBUG_INFO(trans, "Failed to init the card\n");
+		goto out;
+	}
+
+	/*
+	 * Enable DMA clock and wait for it to stabilize.
+	 *
+	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+	 * do not disable clocks.  This preserves any hardware bits already
+	 * set by default in "CLK_CTRL_REG" after reset.
+	 */
+	iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+	udelay(20);
+
+	/* Disable L1-Active */
+	iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+	set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+
+out:
+	return ret;
+}
+
+static int iwl_apm_stop_master(struct iwl_trans *trans)
+{
+	int ret = 0;
+
+	/* stop device's busmaster DMA activity */
+	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+	ret = iwl_poll_bit(trans, CSR_RESET,
+			   CSR_RESET_REG_FLAG_MASTER_DISABLED,
+			   CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+	if (ret)
+		IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
+
+	IWL_DEBUG_INFO(trans, "stop master\n");
+
+	return ret;
+}
+
+static void iwl_apm_stop(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
+
+	clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+
+	/* Stop device's DMA activity */
+	iwl_apm_stop_master(trans);
+
+	/* Reset the entire device */
+	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+	udelay(10);
+
+	/*
+	 * Clear "initialization complete" bit to move adapter from
+	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+	 */
+	iwl_clear_bit(trans, CSR_GP_CNTRL,
+		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+
+static int iwl_nic_init(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	unsigned long flags;
+
+	/* nic_init */
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	iwl_apm_init(trans);
+
+	/* Set interrupt coalescing calibration timer to default (512 usecs) */
+	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+	iwl_set_pwr_vmain(trans);
+
+	iwl_op_mode_nic_config(trans->op_mode);
+
+#ifndef CONFIG_IWLWIFI_IDI
+	/* Allocate the RX queue, or reset if it is already allocated */
+	iwl_rx_init(trans);
+#endif
+
+	/* Allocate or reset and init all Tx and Command queues */
+	if (iwl_tx_init(trans))
+		return -ENOMEM;
+
+	if (trans->cfg->base_params->shadow_reg_enable) {
+		/* enable shadow regs in HW */
+		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
+		IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
+	}
+
+	return 0;
+}
+
+#define HW_READY_TIMEOUT (50)
+
+/* Note: returns poll_bit return value, which is >= 0 if success */
+static int iwl_set_hw_ready(struct iwl_trans *trans)
+{
+	int ret;
+
+	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+		    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+	/* See if we got it */
+	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
+			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+			   HW_READY_TIMEOUT);
+
+	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
+	return ret;
+}
+
+/* Note: returns standard 0/-ERROR code */
+static int iwl_prepare_card_hw(struct iwl_trans *trans)
+{
+	int ret;
+
+	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
+
+	ret = iwl_set_hw_ready(trans);
+	/* If the card is ready, exit 0 */
+	if (ret >= 0)
+		return 0;
+
+	/* If HW is not ready, prepare the conditions to check again */
+	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+		    CSR_HW_IF_CONFIG_REG_PREPARE);
+
+	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
+			   ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+			   CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+	if (ret < 0)
+		return ret;
+
+	/* HW should be ready by now, check again. */
+	ret = iwl_set_hw_ready(trans);
+	if (ret >= 0)
+		return 0;
+	return ret;
+}
+
+/*
+ * ucode
+ */
+static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
+			    const struct fw_desc *section)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	dma_addr_t phy_addr = section->p_addr;
+	u32 byte_cnt = section->len;
+	u32 dst_addr = section->offset;
+	int ret;
+
+	trans_pcie->ucode_write_complete = false;
+
+	iwl_write_direct32(trans,
+			   FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+
+	iwl_write_direct32(trans,
+			   FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
+			   dst_addr);
+
+	iwl_write_direct32(trans,
+		FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+		phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+
+	iwl_write_direct32(trans,
+			   FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+			   (iwl_get_dma_hi_addr(phy_addr)
+				<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+
+	iwl_write_direct32(trans,
+			   FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+			   1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
+			   1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
+			   FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+
+	iwl_write_direct32(trans,
+			   FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	|
+			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	|
+			   FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+
+	IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
+		     section_num);
+	ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
+				 trans_pcie->ucode_write_complete, 5 * HZ);
+	if (!ret) {
+		IWL_ERR(trans, "Could not load the [%d] uCode section\n",
+			section_num);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int iwl_load_given_ucode(struct iwl_trans *trans,
+				const struct fw_img *image)
+{
+	int ret = 0;
+		int i;
+
+		for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
+			if (!image->sec[i].p_addr)
+				break;
+
+			ret = iwl_load_section(trans, i, &image->sec[i]);
+			if (ret)
+				return ret;
+		}
+
+	/* Remove all resets to allow NIC to operate */
+	iwl_write32(trans, CSR_RESET, 0);
+
+	return 0;
+}
+
+static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+				   const struct fw_img *fw)
+{
+	int ret;
+	bool hw_rfkill;
+
+	/* This may fail if AMT took ownership of the device */
+	if (iwl_prepare_card_hw(trans)) {
+		IWL_WARN(trans, "Exit HW not ready\n");
+		return -EIO;
+	}
+
+	iwl_enable_rfkill_int(trans);
+
+	/* If platform's RF_KILL switch is NOT set to KILL */
+	hw_rfkill = iwl_is_rfkill_set(trans);
+	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+	if (hw_rfkill)
+		return -ERFKILL;
+
+	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+	ret = iwl_nic_init(trans);
+	if (ret) {
+		IWL_ERR(trans, "Unable to init nic\n");
+		return ret;
+	}
+
+	/* make sure rfkill handshake bits are cleared */
+	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
+		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+	/* clear (again), then enable host interrupts */
+	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+	iwl_enable_interrupts(trans);
+
+	/* really make sure rfkill handshake bits are cleared */
+	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+	/* Load the given image to the HW */
+	return iwl_load_given_ucode(trans, fw);
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under the irq lock and with MAC access
+ */
+static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
+{
+	struct iwl_trans_pcie __maybe_unused *trans_pcie =
+		IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	lockdep_assert_held(&trans_pcie->irq_lock);
+
+	iwl_write_prph(trans, SCD_TXFACT, mask);
+}
+
+static void iwl_tx_start(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 a;
+	unsigned long flags;
+	int i, chan;
+	u32 reg_val;
+
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
+	trans_pcie->scd_base_addr =
+		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
+	a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
+	/* reset conext data memory */
+	for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
+		a += 4)
+		iwl_write_targ_mem(trans, a, 0);
+	/* reset tx status memory */
+	for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
+		a += 4)
+		iwl_write_targ_mem(trans, a, 0);
+	for (; a < trans_pcie->scd_base_addr +
+	       SCD_TRANS_TBL_OFFSET_QUEUE(
+				trans->cfg->base_params->num_of_queues);
+	       a += 4)
+		iwl_write_targ_mem(trans, a, 0);
+
+	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
+		       trans_pcie->scd_bc_tbls.dma >> 10);
+
+	/* Enable DMA channel */
+	for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
+		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+				FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+				FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+	/* Update FH chicken bits */
+	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
+	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
+			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+	iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
+		       SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
+	iwl_write_prph(trans, SCD_AGGR_SEL, 0);
+
+	/* initiate the queues */
+	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+		iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
+		iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
+		iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
+				SCD_CONTEXT_QUEUE_OFFSET(i), 0);
+		iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
+				SCD_CONTEXT_QUEUE_OFFSET(i) +
+				sizeof(u32),
+				((SCD_WIN_SIZE <<
+				SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+				((SCD_FRAME_LIMIT <<
+				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+	}
+
+	iwl_write_prph(trans, SCD_INTERRUPT_MASK,
+		       IWL_MASK(0, trans->cfg->base_params->num_of_queues));
+
+	/* Activate all Tx DMA/FIFO channels */
+	iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
+
+	iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
+
+	/* make sure all queue are not stopped/used */
+	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+	for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
+		int fifo = trans_pcie->setup_q_to_fifo[i];
+
+		set_bit(i, trans_pcie->queue_used);
+
+		iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
+					      fifo, true);
+	}
+
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+	/* Enable L1-Active */
+	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+			    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+}
+
+static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
+{
+	iwl_reset_ict(trans);
+	iwl_tx_start(trans);
+}
+
+/**
+ * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
+ */
+static int iwl_trans_tx_stop(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int ch, txq_id, ret;
+	unsigned long flags;
+
+	/* Turn off all Tx DMA fifos */
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
+	iwl_trans_txq_set_sched(trans, 0);
+
+	/* Stop each Tx DMA channel, and wait for it to be idle */
+	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
+		iwl_write_direct32(trans,
+				   FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+		ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
+			FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
+		if (ret < 0)
+			IWL_ERR(trans,
+				"Failing on timeout while stopping DMA channel %d [0x%08x]",
+				ch,
+				iwl_read_direct32(trans,
+						  FH_TSSR_TX_STATUS_REG));
+	}
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+	if (!trans_pcie->txq) {
+		IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
+		return 0;
+	}
+
+	/* Unmap DMA from host system and free skb's */
+	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+	     txq_id++)
+		iwl_tx_queue_unmap(trans, txq_id);
+
+	return 0;
+}
+
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	unsigned long flags;
+
+	/* tell the device to stop sending interrupts */
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	iwl_disable_interrupts(trans);
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+	/* device going down, Stop using ICT table */
+	iwl_disable_ict(trans);
+
+	/*
+	 * If a HW restart happens during firmware loading,
+	 * then the firmware loading might call this function
+	 * and later it might be called again due to the
+	 * restart. So don't process again if the device is
+	 * already dead.
+	 */
+	if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
+		iwl_trans_tx_stop(trans);
+#ifndef CONFIG_IWLWIFI_IDI
+		iwl_trans_rx_stop(trans);
+#endif
+		/* Power-down device's busmaster DMA clocks */
+		iwl_write_prph(trans, APMG_CLK_DIS_REG,
+			       APMG_CLK_VAL_DMA_CLK_RQT);
+		udelay(5);
+	}
+
+	/* Make sure (redundant) we've released our request to stay awake */
+	iwl_clear_bit(trans, CSR_GP_CNTRL,
+		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+	/* Stop the device, and put it in low power state */
+	iwl_apm_stop(trans);
+
+	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
+	 * Clean again the interrupt here
+	 */
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	iwl_disable_interrupts(trans);
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+	iwl_enable_rfkill_int(trans);
+
+	/* wait to make sure we flush pending tasklet*/
+	synchronize_irq(trans_pcie->irq);
+	tasklet_kill(&trans_pcie->irq_tasklet);
+
+	cancel_work_sync(&trans_pcie->rx_replenish);
+
+	/* stop and reset the on-board processor */
+	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+	/* clear all status bits */
+	clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+	clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+	clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+	clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+}
+
+static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
+{
+	/* let the ucode operate on its own */
+	iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
+		    CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+	iwl_disable_interrupts(trans);
+	iwl_clear_bit(trans, CSR_GP_CNTRL,
+		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+
+static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+			     struct iwl_device_cmd *dev_cmd, int txq_id)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
+	struct iwl_cmd_meta *out_meta;
+	struct iwl_tx_queue *txq;
+	struct iwl_queue *q;
+	dma_addr_t phys_addr = 0;
+	dma_addr_t txcmd_phys;
+	dma_addr_t scratch_phys;
+	u16 len, firstlen, secondlen;
+	u8 wait_write_ptr = 0;
+	__le16 fc = hdr->frame_control;
+	u8 hdr_len = ieee80211_hdrlen(fc);
+	u16 __maybe_unused wifi_seq;
+
+	txq = &trans_pcie->txq[txq_id];
+	q = &txq->q;
+
+	if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
+		WARN_ON_ONCE(1);
+		return -EINVAL;
+	}
+
+	spin_lock(&txq->lock);
+
+	/* Set up driver data for this TFD */
+	txq->entries[q->write_ptr].skb = skb;
+	txq->entries[q->write_ptr].cmd = dev_cmd;
+
+	dev_cmd->hdr.cmd = REPLY_TX;
+	dev_cmd->hdr.sequence =
+		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+			    INDEX_TO_SEQ(q->write_ptr)));
+
+	/* Set up first empty entry in queue's array of Tx/cmd buffers */
+	out_meta = &txq->entries[q->write_ptr].meta;
+
+	/*
+	 * Use the first empty entry in this queue's command buffer array
+	 * to contain the Tx command and MAC header concatenated together
+	 * (payload data will be in another buffer).
+	 * Size of this varies, due to varying MAC header length.
+	 * If end is not dword aligned, we'll have 2 extra bytes at the end
+	 * of the MAC header (device reads on dword boundaries).
+	 * We'll tell device about this padding later.
+	 */
+	len = sizeof(struct iwl_tx_cmd) +
+		sizeof(struct iwl_cmd_header) + hdr_len;
+	firstlen = (len + 3) & ~3;
+
+	/* Tell NIC about any 2-byte padding after MAC header */
+	if (firstlen != len)
+		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+	/* Physical address of this Tx command's header (not MAC header!),
+	 * within command buffer array. */
+	txcmd_phys = dma_map_single(trans->dev,
+				    &dev_cmd->hdr, firstlen,
+				    DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
+		goto out_err;
+	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+	dma_unmap_len_set(out_meta, len, firstlen);
+
+	if (!ieee80211_has_morefrags(fc)) {
+		txq->need_update = 1;
+	} else {
+		wait_write_ptr = 1;
+		txq->need_update = 0;
+	}
+
+	/* Set up TFD's 2nd entry to point directly to remainder of skb,
+	 * if any (802.11 null frames have no payload). */
+	secondlen = skb->len - hdr_len;
+	if (secondlen > 0) {
+		phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
+					   secondlen, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
+			dma_unmap_single(trans->dev,
+					 dma_unmap_addr(out_meta, mapping),
+					 dma_unmap_len(out_meta, len),
+					 DMA_BIDIRECTIONAL);
+			goto out_err;
+		}
+	}
+
+	/* Attach buffers to TFD */
+	iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
+	if (secondlen > 0)
+		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
+					     secondlen, 0);
+
+	scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
+				offsetof(struct iwl_tx_cmd, scratch);
+
+	/* take back ownership of DMA buffer to enable update */
+	dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
+				DMA_BIDIRECTIONAL);
+	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+
+	IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
+		     le16_to_cpu(dev_cmd->hdr.sequence));
+	IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+
+	/* Set up entry for this TFD in Tx byte-count array */
+	iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
+
+	dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
+				   DMA_BIDIRECTIONAL);
+
+	trace_iwlwifi_dev_tx(trans->dev,
+			     &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+			     sizeof(struct iwl_tfd),
+			     &dev_cmd->hdr, firstlen,
+			     skb->data + hdr_len, secondlen);
+
+	/* start timer if queue currently empty */
+	if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
+		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+
+	/* Tell device the write index *just past* this latest filled TFD */
+	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+	iwl_txq_update_write_ptr(trans, txq);
+
+	/*
+	 * At this point the frame is "transmitted" successfully
+	 * and we will get a TX status notification eventually,
+	 * regardless of the value of ret. "ret" only indicates
+	 * whether or not we should update the write pointer.
+	 */
+	if (iwl_queue_space(q) < q->high_mark) {
+		if (wait_write_ptr) {
+			txq->need_update = 1;
+			iwl_txq_update_write_ptr(trans, txq);
+		} else {
+			iwl_stop_queue(trans, txq);
+		}
+	}
+	spin_unlock(&txq->lock);
+	return 0;
+ out_err:
+	spin_unlock(&txq->lock);
+	return -1;
+}
+
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int err;
+	bool hw_rfkill;
+
+	trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
+	if (!trans_pcie->irq_requested) {
+		tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
+			iwl_irq_tasklet, (unsigned long)trans);
+
+		iwl_alloc_isr_ict(trans);
+
+		err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
+				  DRV_NAME, trans);
+		if (err) {
+			IWL_ERR(trans, "Error allocating IRQ %d\n",
+				trans_pcie->irq);
+			goto error;
+		}
+
+		INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
+		trans_pcie->irq_requested = true;
+	}
+
+	err = iwl_prepare_card_hw(trans);
+	if (err) {
+		IWL_ERR(trans, "Error while preparing HW: %d", err);
+		goto err_free_irq;
+	}
+
+	iwl_apm_init(trans);
+
+	/* From now on, the op_mode will be kept updated about RF kill state */
+	iwl_enable_rfkill_int(trans);
+
+	hw_rfkill = iwl_is_rfkill_set(trans);
+	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+
+	return err;
+
+err_free_irq:
+	free_irq(trans_pcie->irq, trans);
+error:
+	iwl_free_isr_ict(trans);
+	tasklet_kill(&trans_pcie->irq_tasklet);
+	return err;
+}
+
+static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
+				   bool op_mode_leaving)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	bool hw_rfkill;
+	unsigned long flags;
+
+	iwl_apm_stop(trans);
+
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	iwl_disable_interrupts(trans);
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+	if (!op_mode_leaving) {
+		/*
+		 * Even if we stop the HW, we still want the RF kill
+		 * interrupt
+		 */
+		iwl_enable_rfkill_int(trans);
+
+		/*
+		 * Check again since the RF kill state may have changed while
+		 * all the interrupts were disabled, in this case we couldn't
+		 * receive the RF kill interrupt and update the state in the
+		 * op_mode.
+		 */
+		hw_rfkill = iwl_is_rfkill_set(trans);
+		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+	}
+}
+
+static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+				   struct sk_buff_head *skbs)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+	/* n_bd is usually 256 => n_bd - 1 = 0xff */
+	int tfd_num = ssn & (txq->q.n_bd - 1);
+	int freed = 0;
+
+	spin_lock(&txq->lock);
+
+	if (txq->q.read_ptr != tfd_num) {
+		IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
+				   txq_id, txq->q.read_ptr, tfd_num, ssn);
+		freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
+		if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+			iwl_wake_queue(trans, txq);
+	}
+
+	spin_unlock(&txq->lock);
+}
+
+static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+{
+	writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+	writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
+{
+	return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+static void iwl_trans_pcie_configure(struct iwl_trans *trans,
+				     const struct iwl_trans_config *trans_cfg)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	trans_pcie->cmd_queue = trans_cfg->cmd_queue;
+	if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
+		trans_pcie->n_no_reclaim_cmds = 0;
+	else
+		trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
+	if (trans_pcie->n_no_reclaim_cmds)
+		memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
+		       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
+
+	trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
+
+	if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
+		trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
+
+	/* at least the command queue must be mapped */
+	WARN_ON(!trans_pcie->n_q_to_fifo);
+
+	memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
+	       trans_pcie->n_q_to_fifo * sizeof(u8));
+
+	trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
+	if (trans_pcie->rx_buf_size_8k)
+		trans_pcie->rx_page_order = get_order(8 * 1024);
+	else
+		trans_pcie->rx_page_order = get_order(4 * 1024);
+
+	trans_pcie->wd_timeout =
+		msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
+
+	trans_pcie->command_names = trans_cfg->command_names;
+}
+
+void iwl_trans_pcie_free(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	iwl_trans_pcie_tx_free(trans);
+#ifndef CONFIG_IWLWIFI_IDI
+	iwl_trans_pcie_rx_free(trans);
+#endif
+	if (trans_pcie->irq_requested == true) {
+		free_irq(trans_pcie->irq, trans);
+		iwl_free_isr_ict(trans);
+	}
+
+	pci_disable_msi(trans_pcie->pci_dev);
+	iounmap(trans_pcie->hw_base);
+	pci_release_regions(trans_pcie->pci_dev);
+	pci_disable_device(trans_pcie->pci_dev);
+
+	kfree(trans);
+}
+
+static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (state)
+		set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+	else
+		clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
+{
+	return 0;
+}
+
+static int iwl_trans_pcie_resume(struct iwl_trans *trans)
+{
+	bool hw_rfkill;
+
+	iwl_enable_rfkill_int(trans);
+
+	hw_rfkill = iwl_is_rfkill_set(trans);
+	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+
+	if (!hw_rfkill)
+		iwl_enable_interrupts(trans);
+
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#define IWL_FLUSH_WAIT_MS	2000
+
+static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_tx_queue *txq;
+	struct iwl_queue *q;
+	int cnt;
+	unsigned long now = jiffies;
+	int ret = 0;
+
+	/* waiting for all the tx frames complete might take a while */
+	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+		if (cnt == trans_pcie->cmd_queue)
+			continue;
+		txq = &trans_pcie->txq[cnt];
+		q = &txq->q;
+		while (q->read_ptr != q->write_ptr && !time_after(jiffies,
+		       now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
+			msleep(1);
+
+		if (q->read_ptr != q->write_ptr) {
+			IWL_ERR(trans, "fail to flush all tx fifo queues\n");
+			ret = -ETIMEDOUT;
+			break;
+		}
+	}
+	return ret;
+}
+
+static const char *get_fh_string(int cmd)
+{
+#define IWL_CMD(x) case x: return #x
+	switch (cmd) {
+	IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+	IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+	IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+	IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+	IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+	IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+	IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+	IWL_CMD(FH_TSSR_TX_STATUS_REG);
+	IWL_CMD(FH_TSSR_TX_ERROR_REG);
+	default:
+		return "UNKNOWN";
+	}
+#undef IWL_CMD
+}
+
+int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
+{
+	int i;
+#ifdef CONFIG_IWLWIFI_DEBUG
+	int pos = 0;
+	size_t bufsz = 0;
+#endif
+	static const u32 fh_tbl[] = {
+		FH_RSCSR_CHNL0_STTS_WPTR_REG,
+		FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+		FH_RSCSR_CHNL0_WPTR,
+		FH_MEM_RCSR_CHNL0_CONFIG_REG,
+		FH_MEM_RSSR_SHARED_CTRL_REG,
+		FH_MEM_RSSR_RX_STATUS_REG,
+		FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+		FH_TSSR_TX_STATUS_REG,
+		FH_TSSR_TX_ERROR_REG
+	};
+#ifdef CONFIG_IWLWIFI_DEBUG
+	if (display) {
+		bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+		*buf = kmalloc(bufsz, GFP_KERNEL);
+		if (!*buf)
+			return -ENOMEM;
+		pos += scnprintf(*buf + pos, bufsz - pos,
+				"FH register values:\n");
+		for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+			pos += scnprintf(*buf + pos, bufsz - pos,
+				"  %34s: 0X%08x\n",
+				get_fh_string(fh_tbl[i]),
+				iwl_read_direct32(trans, fh_tbl[i]));
+		}
+		return pos;
+	}
+#endif
+	IWL_ERR(trans, "FH register values:\n");
+	for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++) {
+		IWL_ERR(trans, "  %34s: 0X%08x\n",
+			get_fh_string(fh_tbl[i]),
+			iwl_read_direct32(trans, fh_tbl[i]));
+	}
+	return 0;
+}
+
+static const char *get_csr_string(int cmd)
+{
+#define IWL_CMD(x) case x: return #x
+	switch (cmd) {
+	IWL_CMD(CSR_HW_IF_CONFIG_REG);
+	IWL_CMD(CSR_INT_COALESCING);
+	IWL_CMD(CSR_INT);
+	IWL_CMD(CSR_INT_MASK);
+	IWL_CMD(CSR_FH_INT_STATUS);
+	IWL_CMD(CSR_GPIO_IN);
+	IWL_CMD(CSR_RESET);
+	IWL_CMD(CSR_GP_CNTRL);
+	IWL_CMD(CSR_HW_REV);
+	IWL_CMD(CSR_EEPROM_REG);
+	IWL_CMD(CSR_EEPROM_GP);
+	IWL_CMD(CSR_OTP_GP_REG);
+	IWL_CMD(CSR_GIO_REG);
+	IWL_CMD(CSR_GP_UCODE_REG);
+	IWL_CMD(CSR_GP_DRIVER_REG);
+	IWL_CMD(CSR_UCODE_DRV_GP1);
+	IWL_CMD(CSR_UCODE_DRV_GP2);
+	IWL_CMD(CSR_LED_REG);
+	IWL_CMD(CSR_DRAM_INT_TBL_REG);
+	IWL_CMD(CSR_GIO_CHICKEN_BITS);
+	IWL_CMD(CSR_ANA_PLL_CFG);
+	IWL_CMD(CSR_HW_REV_WA_REG);
+	IWL_CMD(CSR_DBG_HPET_MEM_REG);
+	default:
+		return "UNKNOWN";
+	}
+#undef IWL_CMD
+}
+
+void iwl_dump_csr(struct iwl_trans *trans)
+{
+	int i;
+	static const u32 csr_tbl[] = {
+		CSR_HW_IF_CONFIG_REG,
+		CSR_INT_COALESCING,
+		CSR_INT,
+		CSR_INT_MASK,
+		CSR_FH_INT_STATUS,
+		CSR_GPIO_IN,
+		CSR_RESET,
+		CSR_GP_CNTRL,
+		CSR_HW_REV,
+		CSR_EEPROM_REG,
+		CSR_EEPROM_GP,
+		CSR_OTP_GP_REG,
+		CSR_GIO_REG,
+		CSR_GP_UCODE_REG,
+		CSR_GP_DRIVER_REG,
+		CSR_UCODE_DRV_GP1,
+		CSR_UCODE_DRV_GP2,
+		CSR_LED_REG,
+		CSR_DRAM_INT_TBL_REG,
+		CSR_GIO_CHICKEN_BITS,
+		CSR_ANA_PLL_CFG,
+		CSR_HW_REV_WA_REG,
+		CSR_DBG_HPET_MEM_REG
+	};
+	IWL_ERR(trans, "CSR values:\n");
+	IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
+		"CSR_INT_PERIODIC_REG)\n");
+	for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
+		IWL_ERR(trans, "  %25s: 0X%08x\n",
+			get_csr_string(csr_tbl[i]),
+			iwl_read32(trans, csr_tbl[i]));
+	}
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
+	if (!debugfs_create_file(#name, mode, parent, trans,		\
+				 &iwl_dbgfs_##name##_ops))		\
+		return -ENOMEM;						\
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FUNC(name)                                         \
+static ssize_t iwl_dbgfs_##name##_read(struct file *file,               \
+					char __user *user_buf,          \
+					size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name)                                        \
+static ssize_t iwl_dbgfs_##name##_write(struct file *file,              \
+					const char __user *user_buf,    \
+					size_t count, loff_t *ppos);
+
+
+#define DEBUGFS_READ_FILE_OPS(name)					\
+	DEBUGFS_READ_FUNC(name);					\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.read = iwl_dbgfs_##name##_read,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name)                                    \
+	DEBUGFS_WRITE_FUNC(name);                                       \
+static const struct file_operations iwl_dbgfs_##name##_ops = {          \
+	.write = iwl_dbgfs_##name##_write,                              \
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
+	DEBUGFS_READ_FUNC(name);					\
+	DEBUGFS_WRITE_FUNC(name);					\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.write = iwl_dbgfs_##name##_write,				\
+	.read = iwl_dbgfs_##name##_read,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+};
+
+static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
+				       char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_tx_queue *txq;
+	struct iwl_queue *q;
+	char *buf;
+	int pos = 0;
+	int cnt;
+	int ret;
+	size_t bufsz;
+
+	bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
+
+	if (!trans_pcie->txq)
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+		txq = &trans_pcie->txq[cnt];
+		q = &txq->q;
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"hwq %.2d: read=%u write=%u use=%d stop=%d\n",
+				cnt, q->read_ptr, q->write_ptr,
+				!!test_bit(cnt, trans_pcie->queue_used),
+				!!test_bit(cnt, trans_pcie->queue_stopped));
+	}
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
+				       char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+	char buf[256];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
+						rxq->read);
+	pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
+						rxq->write);
+	pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+						rxq->free_count);
+	if (rxq->rb_stts) {
+		pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+			 le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
+	} else {
+		pos += scnprintf(buf + pos, bufsz - pos,
+					"closed_rb_num: Not Allocated\n");
+	}
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+
+	int pos = 0;
+	char *buf;
+	int bufsz = 24 * 64; /* 24 items * 64 char per item */
+	ssize_t ret;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"Interrupt Statistics Report:\n");
+
+	pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+		isr_stats->hw);
+	pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+		isr_stats->sw);
+	if (isr_stats->sw || isr_stats->hw) {
+		pos += scnprintf(buf + pos, bufsz - pos,
+			"\tLast Restarting Code:  0x%X\n",
+			isr_stats->err_code);
+	}
+#ifdef CONFIG_IWLWIFI_DEBUG
+	pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+		isr_stats->sch);
+	pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+		isr_stats->alive);
+#endif
+	pos += scnprintf(buf + pos, bufsz - pos,
+		"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+		isr_stats->ctkill);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+		isr_stats->wakeup);
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+		"Rx command responses:\t\t %u\n", isr_stats->rx);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+		isr_stats->tx);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+		isr_stats->unhandled);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+					 const char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+
+	char buf[8];
+	int buf_size;
+	u32 reset_flag;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%x", &reset_flag) != 1)
+		return -EFAULT;
+	if (reset_flag == 0)
+		memset(isr_stats, 0, sizeof(*isr_stats));
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_csr_write(struct file *file,
+				   const char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+	char buf[8];
+	int buf_size;
+	int csr;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &csr) != 1)
+		return -EFAULT;
+
+	iwl_dump_csr(trans);
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+				     char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+	char *buf;
+	int pos = 0;
+	ssize_t ret = -EFAULT;
+
+	ret = pos = iwl_dump_fh(trans, &buf, true);
+	if (buf) {
+		ret = simple_read_from_buffer(user_buf,
+					      count, ppos, buf, pos);
+		kfree(buf);
+	}
+
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
+					  const char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+
+	if (!trans->op_mode)
+		return -EAGAIN;
+
+	iwl_op_mode_nic_error(trans->op_mode);
+
+	return count;
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_WRITE_FILE_OPS(csr);
+DEBUGFS_WRITE_FILE_OPS(fw_restart);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
+					 struct dentry *dir)
+{
+	DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
+	DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
+	DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
+	DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
+	DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
+	DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
+	return 0;
+}
+#else
+static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
+					 struct dentry *dir)
+{
+	return 0;
+}
+#endif /*CONFIG_IWLWIFI_DEBUGFS */
+
+static const struct iwl_trans_ops trans_ops_pcie = {
+	.start_hw = iwl_trans_pcie_start_hw,
+	.stop_hw = iwl_trans_pcie_stop_hw,
+	.fw_alive = iwl_trans_pcie_fw_alive,
+	.start_fw = iwl_trans_pcie_start_fw,
+	.stop_device = iwl_trans_pcie_stop_device,
+
+	.wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
+
+	.send_cmd = iwl_trans_pcie_send_cmd,
+
+	.tx = iwl_trans_pcie_tx,
+	.reclaim = iwl_trans_pcie_reclaim,
+
+	.tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
+	.tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
+
+	.dbgfs_register = iwl_trans_pcie_dbgfs_register,
+
+	.wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
+
+#ifdef CONFIG_PM_SLEEP
+	.suspend = iwl_trans_pcie_suspend,
+	.resume = iwl_trans_pcie_resume,
+#endif
+	.write8 = iwl_trans_pcie_write8,
+	.write32 = iwl_trans_pcie_write32,
+	.read32 = iwl_trans_pcie_read32,
+	.configure = iwl_trans_pcie_configure,
+	.set_pmi = iwl_trans_pcie_set_pmi,
+};
+
+struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+				       const struct pci_device_id *ent,
+				       const struct iwl_cfg *cfg)
+{
+	struct iwl_trans_pcie *trans_pcie;
+	struct iwl_trans *trans;
+	u16 pci_cmd;
+	int err;
+
+	trans = kzalloc(sizeof(struct iwl_trans) +
+			sizeof(struct iwl_trans_pcie), GFP_KERNEL);
+
+	if (WARN_ON(!trans))
+		return NULL;
+
+	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	trans->ops = &trans_ops_pcie;
+	trans->cfg = cfg;
+	trans_pcie->trans = trans;
+	spin_lock_init(&trans_pcie->irq_lock);
+	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+
+	/* W/A - seems to solve weird behavior. We need to remove this if we
+	 * don't want to stay in L1 all the time. This wastes a lot of power */
+	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+			       PCIE_LINK_STATE_CLKPM);
+
+	if (pci_enable_device(pdev)) {
+		err = -ENODEV;
+		goto out_no_pci;
+	}
+
+	pci_set_master(pdev);
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+	if (!err)
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+	if (err) {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (!err)
+			err = pci_set_consistent_dma_mask(pdev,
+							  DMA_BIT_MASK(32));
+		/* both attempts failed: */
+		if (err) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				   "No suitable DMA available.\n");
+			goto out_pci_disable_device;
+		}
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed");
+		goto out_pci_disable_device;
+	}
+
+	trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
+	if (!trans_pcie->hw_base) {
+		dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed");
+		err = -ENODEV;
+		goto out_pci_release_regions;
+	}
+
+	dev_printk(KERN_INFO, &pdev->dev,
+		   "pci_resource_len = 0x%08llx\n",
+		   (unsigned long long) pci_resource_len(pdev, 0));
+	dev_printk(KERN_INFO, &pdev->dev,
+		   "pci_resource_base = %p\n", trans_pcie->hw_base);
+
+	dev_printk(KERN_INFO, &pdev->dev,
+		   "HW Revision ID = 0x%X\n", pdev->revision);
+
+	/* We disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state */
+	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+	err = pci_enable_msi(pdev);
+	if (err)
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "pci_enable_msi failed(0X%x)", err);
+
+	trans->dev = &pdev->dev;
+	trans_pcie->irq = pdev->irq;
+	trans_pcie->pci_dev = pdev;
+	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
+	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
+	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
+		 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
+
+	/* TODO: Move this away, not needed if not MSI */
+	/* enable rfkill interrupt: hw bug w/a */
+	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+	if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+		pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+		pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+	}
+
+	/* Initialize the wait queue for commands */
+	init_waitqueue_head(&trans->wait_command_queue);
+	spin_lock_init(&trans->reg_lock);
+
+	return trans;
+
+out_pci_release_regions:
+	pci_release_regions(pdev);
+out_pci_disable_device:
+	pci_disable_device(pdev);
+out_no_pci:
+	kfree(trans);
+	return NULL;
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
new file mode 100644
index 0000000..0b8bbb0
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -0,0 +1,984 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "iwl-debug.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "iwl-agn-hw.h"
+#include "iwl-op-mode.h"
+#include "internal.h"
+/* FIXME: need to abstract out TX command (once we know what it looks like) */
+#include "iwl-commands.h"
+
+#define IWL_TX_CRC_SIZE 4
+#define IWL_TX_DELIMITER_SIZE 4
+
+/**
+ * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
+				       struct iwl_tx_queue *txq,
+				       u16 byte_cnt)
+{
+	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int write_ptr = txq->q.write_ptr;
+	int txq_id = txq->q.id;
+	u8 sec_ctl = 0;
+	u8 sta_id = 0;
+	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
+	__le16 bc_ent;
+	struct iwl_tx_cmd *tx_cmd =
+		(void *) txq->entries[txq->q.write_ptr].cmd->payload;
+
+	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
+
+	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
+
+	sta_id = tx_cmd->sta_id;
+	sec_ctl = tx_cmd->sec_ctl;
+
+	switch (sec_ctl & TX_CMD_SEC_MSK) {
+	case TX_CMD_SEC_CCM:
+		len += CCMP_MIC_LEN;
+		break;
+	case TX_CMD_SEC_TKIP:
+		len += TKIP_ICV_LEN;
+		break;
+	case TX_CMD_SEC_WEP:
+		len += WEP_IV_LEN + WEP_ICV_LEN;
+		break;
+	}
+
+	bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
+
+	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+		scd_bc_tbl[txq_id].
+			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
+}
+
+/**
+ * iwl_txq_update_write_ptr - Send new write index to hardware
+ */
+void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
+{
+	u32 reg = 0;
+	int txq_id = txq->q.id;
+
+	if (txq->need_update == 0)
+		return;
+
+	if (trans->cfg->base_params->shadow_reg_enable) {
+		/* shadow register enabled */
+		iwl_write32(trans, HBUS_TARG_WRPTR,
+			    txq->q.write_ptr | (txq_id << 8));
+	} else {
+		struct iwl_trans_pcie *trans_pcie =
+			IWL_TRANS_GET_PCIE_TRANS(trans);
+		/* if we're trying to save power */
+		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
+			/* wake up nic if it's powered down ...
+			 * uCode will wake up, and interrupt us again, so next
+			 * time we'll skip this part. */
+			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+				IWL_DEBUG_INFO(trans,
+					"Tx queue %d requesting wakeup,"
+					" GP1 = 0x%x\n", txq_id, reg);
+				iwl_set_bit(trans, CSR_GP_CNTRL,
+					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+				return;
+			}
+
+			iwl_write_direct32(trans, HBUS_TARG_WRPTR,
+				     txq->q.write_ptr | (txq_id << 8));
+
+		/*
+		 * else not in power-save mode,
+		 * uCode will never sleep when we're
+		 * trying to tx (during RFKILL, we're not trying to tx).
+		 */
+		} else
+			iwl_write32(trans, HBUS_TARG_WRPTR,
+				    txq->q.write_ptr | (txq_id << 8));
+	}
+	txq->need_update = 0;
+}
+
+static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+{
+	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+	dma_addr_t addr = get_unaligned_le32(&tb->lo);
+	if (sizeof(dma_addr_t) > sizeof(u32))
+		addr |=
+		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
+
+	return addr;
+}
+
+static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+{
+	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+	return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
+				  dma_addr_t addr, u16 len)
+{
+	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+	u16 hi_n_len = len << 4;
+
+	put_unaligned_le32(addr, &tb->lo);
+	if (sizeof(dma_addr_t) > sizeof(u32))
+		hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+	tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+	tfd->num_tbs = idx + 1;
+}
+
+static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
+{
+	return tfd->num_tbs & 0x1f;
+}
+
+static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
+			  struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
+{
+	int i;
+	int num_tbs;
+
+	/* Sanity check on number of chunks */
+	num_tbs = iwl_tfd_get_num_tbs(tfd);
+
+	if (num_tbs >= IWL_NUM_OF_TBS) {
+		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+		/* @todo issue fatal error, it is quite serious situation */
+		return;
+	}
+
+	/* Unmap tx_cmd */
+	if (num_tbs)
+		dma_unmap_single(trans->dev,
+				dma_unmap_addr(meta, mapping),
+				dma_unmap_len(meta, len),
+				DMA_BIDIRECTIONAL);
+
+	/* Unmap chunks, if any. */
+	for (i = 1; i < num_tbs; i++)
+		dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
+				iwl_tfd_tb_get_len(tfd, i), dma_dir);
+
+	tfd->num_tbs = 0;
+}
+
+/**
+ * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @trans - transport private data
+ * @txq - tx queue
+ * @dma_dir - the direction of the DMA mapping
+ *
+ * Does NOT advance any TFD circular buffer read/write indexes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
+		      enum dma_data_direction dma_dir)
+{
+	struct iwl_tfd *tfd_tmp = txq->tfds;
+
+	/* rd_ptr is bounded by n_bd and idx is bounded by n_window */
+	int rd_ptr = txq->q.read_ptr;
+	int idx = get_cmd_index(&txq->q, rd_ptr);
+
+	lockdep_assert_held(&txq->lock);
+
+	/* We have only q->n_window txq->entries, but we use q->n_bd tfds */
+	iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
+		      dma_dir);
+
+	/* free SKB */
+	if (txq->entries) {
+		struct sk_buff *skb;
+
+		skb = txq->entries[idx].skb;
+
+		/* Can be called from irqs-disabled context
+		 * If skb is not NULL, it means that the whole queue is being
+		 * freed and that the queue is not empty - free the skb
+		 */
+		if (skb) {
+			iwl_op_mode_free_skb(trans->op_mode, skb);
+			txq->entries[idx].skb = NULL;
+		}
+	}
+}
+
+int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
+				 struct iwl_tx_queue *txq,
+				 dma_addr_t addr, u16 len,
+				 u8 reset)
+{
+	struct iwl_queue *q;
+	struct iwl_tfd *tfd, *tfd_tmp;
+	u32 num_tbs;
+
+	q = &txq->q;
+	tfd_tmp = txq->tfds;
+	tfd = &tfd_tmp[q->write_ptr];
+
+	if (reset)
+		memset(tfd, 0, sizeof(*tfd));
+
+	num_tbs = iwl_tfd_get_num_tbs(tfd);
+
+	/* Each TFD can point to a maximum 20 Tx buffers */
+	if (num_tbs >= IWL_NUM_OF_TBS) {
+		IWL_ERR(trans, "Error can not send more than %d chunks\n",
+			IWL_NUM_OF_TBS);
+		return -EINVAL;
+	}
+
+	if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
+		return -EINVAL;
+
+	if (unlikely(addr & ~IWL_TX_DMA_MASK))
+		IWL_ERR(trans, "Unaligned address = %llx\n",
+			(unsigned long long)addr);
+
+	iwl_tfd_set_tb(tfd, num_tbs, addr, len);
+
+	return 0;
+}
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill.  Driver and device exchange status of each
+ * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ ***************************************************/
+
+int iwl_queue_space(const struct iwl_queue *q)
+{
+	int s = q->read_ptr - q->write_ptr;
+
+	if (q->read_ptr > q->write_ptr)
+		s -= q->n_bd;
+
+	if (s <= 0)
+		s += q->n_window;
+	/* keep some reserve to not confuse empty and full situations */
+	s -= 2;
+	if (s < 0)
+		s = 0;
+	return s;
+}
+
+/**
+ * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
+{
+	q->n_bd = count;
+	q->n_window = slots_num;
+	q->id = id;
+
+	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
+	 * and iwl_queue_dec_wrap are broken. */
+	if (WARN_ON(!is_power_of_2(count)))
+		return -EINVAL;
+
+	/* slots_num must be power-of-two size, otherwise
+	 * get_cmd_index is broken. */
+	if (WARN_ON(!is_power_of_2(slots_num)))
+		return -EINVAL;
+
+	q->low_mark = q->n_window / 4;
+	if (q->low_mark < 4)
+		q->low_mark = 4;
+
+	q->high_mark = q->n_window / 8;
+	if (q->high_mark < 2)
+		q->high_mark = 2;
+
+	q->write_ptr = q->read_ptr = 0;
+
+	return 0;
+}
+
+static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
+					  struct iwl_tx_queue *txq)
+{
+	struct iwl_trans_pcie *trans_pcie =
+		IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
+	int txq_id = txq->q.id;
+	int read_ptr = txq->q.read_ptr;
+	u8 sta_id = 0;
+	__le16 bc_ent;
+	struct iwl_tx_cmd *tx_cmd =
+		(void *)txq->entries[txq->q.read_ptr].cmd->payload;
+
+	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+	if (txq_id != trans_pcie->cmd_queue)
+		sta_id = tx_cmd->sta_id;
+
+	bc_ent = cpu_to_le16(1 | (sta_id << 12));
+	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+		scd_bc_tbl[txq_id].
+			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+}
+
+static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
+				       u16 txq_id)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 tbl_dw_addr;
+	u32 tbl_dw;
+	u16 scd_q2ratid;
+
+	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+	tbl_dw_addr = trans_pcie->scd_base_addr +
+			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
+
+	tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
+
+	if (txq_id & 0x1)
+		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+	else
+		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+	iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
+
+	return 0;
+}
+
+static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
+{
+	/* Simply stop the queue, but don't change any configuration;
+	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+	iwl_write_prph(trans,
+		SCD_QUEUE_STATUS_BITS(txq_id),
+		(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
+		(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index)
+{
+	IWL_DEBUG_TX_QUEUES(trans, "Q %d  WrPtr: %d\n", txq_id, index & 0xff);
+	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
+			   (index & 0xff) | (txq_id << 8));
+	iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
+}
+
+void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
+				   struct iwl_tx_queue *txq,
+				   int tx_fifo_id, bool active)
+{
+	int txq_id = txq->q.id;
+
+	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
+			(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+			(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
+			(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
+			SCD_QUEUE_STTS_REG_MSK);
+
+	if (active)
+		IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
+				    txq_id, tx_fifo_id);
+	else
+		IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
+}
+
+void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo,
+				 int sta_id, int tid, int frame_limit, u16 ssn)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	unsigned long flags;
+	u16 ra_tid = BUILD_RAxTID(sta_id, tid);
+
+	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
+		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
+
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
+	/* Stop this Tx queue before configuring it */
+	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
+
+	/* Map receiver-address / traffic-ID to this queue */
+	iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
+
+	/* Set this queue as a chain-building queue */
+	iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
+
+	/* enable aggregations for the queue */
+	iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+
+	/* Place first TFD at index corresponding to start sequence number.
+	 * Assumes that ssn_idx is valid (!= 0xFFF) */
+	trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
+	trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
+	iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
+
+	/* Set up Tx window size and frame limit for this queue */
+	iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
+			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+			((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+			((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+
+	iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
+
+	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
+				      fifo, true);
+
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+}
+
+void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int txq_id)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
+		WARN_ONCE(1, "queue %d not used", txq_id);
+		return;
+	}
+
+	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
+
+	iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+
+	trans_pcie->txq[txq_id].q.read_ptr = 0;
+	trans_pcie->txq[txq_id].q.write_ptr = 0;
+	iwl_trans_set_wr_ptrs(trans, txq_id, 0);
+
+	iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, BIT(txq_id));
+
+	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
+				      0, false);
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
+
+/**
+ * iwl_enqueue_hcmd - enqueue a uCode command
+ * @priv: device private data point
+ * @cmd: a point to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation is
+ * failed. On success, it turns the index (> 0) of command in the
+ * command queue.
+ */
+static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+	struct iwl_queue *q = &txq->q;
+	struct iwl_device_cmd *out_cmd;
+	struct iwl_cmd_meta *out_meta;
+	dma_addr_t phys_addr;
+	u32 idx;
+	u16 copy_size, cmd_size;
+	bool had_nocopy = false;
+	int i;
+	u8 *cmd_dest;
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+	const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
+	int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
+	int trace_idx;
+#endif
+
+	copy_size = sizeof(out_cmd->hdr);
+	cmd_size = sizeof(out_cmd->hdr);
+
+	/* need one for the header if the first is NOCOPY */
+	BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
+
+	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+		if (!cmd->len[i])
+			continue;
+		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
+			had_nocopy = true;
+		} else {
+			/* NOCOPY must not be followed by normal! */
+			if (WARN_ON(had_nocopy))
+				return -EINVAL;
+			copy_size += cmd->len[i];
+		}
+		cmd_size += cmd->len[i];
+	}
+
+	/*
+	 * If any of the command structures end up being larger than
+	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
+	 * allocated into separate TFDs, then we will need to
+	 * increase the size of the buffers.
+	 */
+	if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
+		return -EINVAL;
+
+	spin_lock_bh(&txq->lock);
+
+	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+		spin_unlock_bh(&txq->lock);
+
+		IWL_ERR(trans, "No space in command queue\n");
+		iwl_op_mode_cmd_queue_full(trans->op_mode);
+		return -ENOSPC;
+	}
+
+	idx = get_cmd_index(q, q->write_ptr);
+	out_cmd = txq->entries[idx].cmd;
+	out_meta = &txq->entries[idx].meta;
+
+	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
+	if (cmd->flags & CMD_WANT_SKB)
+		out_meta->source = cmd;
+
+	/* set up the header */
+
+	out_cmd->hdr.cmd = cmd->id;
+	out_cmd->hdr.flags = 0;
+	out_cmd->hdr.sequence =
+		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+					 INDEX_TO_SEQ(q->write_ptr));
+
+	/* and copy the data that needs to be copied */
+
+	cmd_dest = out_cmd->payload;
+	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+		if (!cmd->len[i])
+			continue;
+		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
+			break;
+		memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
+		cmd_dest += cmd->len[i];
+	}
+
+	IWL_DEBUG_HC(trans,
+		     "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+		     trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
+		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+		     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
+
+	phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
+				   DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
+		idx = -ENOMEM;
+		goto out;
+	}
+
+	dma_unmap_addr_set(out_meta, mapping, phys_addr);
+	dma_unmap_len_set(out_meta, len, copy_size);
+
+	iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+	trace_bufs[0] = &out_cmd->hdr;
+	trace_lens[0] = copy_size;
+	trace_idx = 1;
+#endif
+
+	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+		if (!cmd->len[i])
+			continue;
+		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
+			continue;
+		phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
+					   cmd->len[i], DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(trans->dev, phys_addr)) {
+			iwl_unmap_tfd(trans, out_meta,
+				      &txq->tfds[q->write_ptr],
+				      DMA_BIDIRECTIONAL);
+			idx = -ENOMEM;
+			goto out;
+		}
+
+		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
+					     cmd->len[i], 0);
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+		trace_bufs[trace_idx] = cmd->data[i];
+		trace_lens[trace_idx] = cmd->len[i];
+		trace_idx++;
+#endif
+	}
+
+	out_meta->flags = cmd->flags;
+
+	txq->need_update = 1;
+
+	/* check that tracing gets all possible blocks */
+	BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+	trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags,
+			       trace_bufs[0], trace_lens[0],
+			       trace_bufs[1], trace_lens[1],
+			       trace_bufs[2], trace_lens[2]);
+#endif
+
+	/* start timer if queue currently empty */
+	if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
+		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+
+	/* Increment and update queue's write index */
+	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+	iwl_txq_update_write_ptr(trans, txq);
+
+ out:
+	spin_unlock_bh(&txq->lock);
+	return idx;
+}
+
+static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie,
+				      struct iwl_tx_queue *txq)
+{
+	if (!trans_pcie->wd_timeout)
+		return;
+
+	/*
+	 * if empty delete timer, otherwise move timer forward
+	 * since we're making progress on this queue
+	 */
+	if (txq->q.read_ptr == txq->q.write_ptr)
+		del_timer(&txq->stuck_timer);
+	else
+		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+}
+
+/**
+ * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' index, all entries between old and new 'R' index
+ * need to be reclaimed. As result, some free space forms.  If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
+				   int idx)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+	struct iwl_queue *q = &txq->q;
+	int nfreed = 0;
+
+	lockdep_assert_held(&txq->lock);
+
+	if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
+		IWL_ERR(trans,
+			"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
+			__func__, txq_id, idx, q->n_bd,
+			q->write_ptr, q->read_ptr);
+		return;
+	}
+
+	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+		if (nfreed++ > 0) {
+			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
+				idx, q->write_ptr, q->read_ptr);
+			iwl_op_mode_nic_error(trans->op_mode);
+		}
+
+	}
+
+	iwl_queue_progress(trans_pcie, txq);
+}
+
+/**
+ * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ * @handler_status: return value of the handler of the command
+ *	(put in setup_rx_handlers)
+ *
+ * If an Rx buffer has an async callback associated with it the callback
+ * will be executed.  The attached skb (if present) will only be freed
+ * if the callback returns 1
+ */
+void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
+			 int handler_status)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+	int txq_id = SEQ_TO_QUEUE(sequence);
+	int index = SEQ_TO_INDEX(sequence);
+	int cmd_index;
+	struct iwl_device_cmd *cmd;
+	struct iwl_cmd_meta *meta;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+
+	/* If a Tx command is being handled and it isn't in the actual
+	 * command queue then there a command routing bug has been introduced
+	 * in the queue management code. */
+	if (WARN(txq_id != trans_pcie->cmd_queue,
+		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+		 txq_id, trans_pcie->cmd_queue, sequence,
+		 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
+		 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
+		iwl_print_hex_error(trans, pkt, 32);
+		return;
+	}
+
+	spin_lock(&txq->lock);
+
+	cmd_index = get_cmd_index(&txq->q, index);
+	cmd = txq->entries[cmd_index].cmd;
+	meta = &txq->entries[cmd_index].meta;
+
+	iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
+
+	/* Input error checking is done when commands are added to queue. */
+	if (meta->flags & CMD_WANT_SKB) {
+		struct page *p = rxb_steal_page(rxb);
+
+		meta->source->resp_pkt = pkt;
+		meta->source->_rx_page_addr = (unsigned long)page_address(p);
+		meta->source->_rx_page_order = trans_pcie->rx_page_order;
+		meta->source->handler_status = handler_status;
+	}
+
+	iwl_hcmd_queue_reclaim(trans, txq_id, index);
+
+	if (!(meta->flags & CMD_ASYNC)) {
+		if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
+			IWL_WARN(trans,
+				 "HCMD_ACTIVE already clear for command %s\n",
+				 trans_pcie_get_cmd_string(trans_pcie,
+							   cmd->hdr.cmd));
+		}
+		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+			       trans_pcie_get_cmd_string(trans_pcie,
+							 cmd->hdr.cmd));
+		wake_up(&trans->wait_command_queue);
+	}
+
+	meta->flags = 0;
+
+	spin_unlock(&txq->lock);
+}
+
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+
+static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int ret;
+
+	/* An asynchronous command can not expect an SKB to be set. */
+	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
+		return -EINVAL;
+
+
+	ret = iwl_enqueue_hcmd(trans, cmd);
+	if (ret < 0) {
+		IWL_ERR(trans,
+			"Error sending %s: enqueue_hcmd failed: %d\n",
+			trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
+		return ret;
+	}
+	return 0;
+}
+
+static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int cmd_idx;
+	int ret;
+
+	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
+		       trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+
+	if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
+				     &trans_pcie->status))) {
+		IWL_ERR(trans, "Command %s: a command is already active!\n",
+			trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+		return -EIO;
+	}
+
+	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
+		       trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+
+	cmd_idx = iwl_enqueue_hcmd(trans, cmd);
+	if (cmd_idx < 0) {
+		ret = cmd_idx;
+		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+		IWL_ERR(trans,
+			"Error sending %s: enqueue_hcmd failed: %d\n",
+			trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
+		return ret;
+	}
+
+	ret = wait_event_timeout(trans->wait_command_queue,
+				 !test_bit(STATUS_HCMD_ACTIVE,
+					   &trans_pcie->status),
+				 HOST_COMPLETE_TIMEOUT);
+	if (!ret) {
+		if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
+			struct iwl_tx_queue *txq =
+				&trans_pcie->txq[trans_pcie->cmd_queue];
+			struct iwl_queue *q = &txq->q;
+
+			IWL_ERR(trans,
+				"Error sending %s: time out after %dms.\n",
+				trans_pcie_get_cmd_string(trans_pcie, cmd->id),
+				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+			IWL_ERR(trans,
+				"Current CMD queue read_ptr %d write_ptr %d\n",
+				q->read_ptr, q->write_ptr);
+
+			clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+			IWL_DEBUG_INFO(trans,
+				       "Clearing HCMD_ACTIVE for command %s\n",
+				       trans_pcie_get_cmd_string(trans_pcie,
+								 cmd->id));
+			ret = -ETIMEDOUT;
+			goto cancel;
+		}
+	}
+
+	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
+		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
+			trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+		ret = -EIO;
+		goto cancel;
+	}
+
+	return 0;
+
+cancel:
+	if (cmd->flags & CMD_WANT_SKB) {
+		/*
+		 * Cancel the CMD_WANT_SKB flag for the cmd in the
+		 * TX cmd queue. Otherwise in case the cmd comes
+		 * in later, it will possibly set an invalid
+		 * address (cmd->meta.source).
+		 */
+		trans_pcie->txq[trans_pcie->cmd_queue].
+			entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+	}
+
+	if (cmd->resp_pkt) {
+		iwl_free_resp(cmd);
+		cmd->resp_pkt = NULL;
+	}
+
+	return ret;
+}
+
+int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+{
+	if (cmd->flags & CMD_ASYNC)
+		return iwl_send_cmd_async(trans, cmd);
+
+	return iwl_send_cmd_sync(trans, cmd);
+}
+
+/* Frees buffers until index _not_ inclusive */
+int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
+			 struct sk_buff_head *skbs)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
+	struct iwl_queue *q = &txq->q;
+	int last_to_free;
+	int freed = 0;
+
+	/* This function is not meant to release cmd queue*/
+	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
+		return 0;
+
+	lockdep_assert_held(&txq->lock);
+
+	/*Since we free until index _not_ inclusive, the one before index is
+	 * the last we will free. This one must be used */
+	last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
+
+	if ((index >= q->n_bd) ||
+	   (iwl_queue_used(q, last_to_free) == 0)) {
+		IWL_ERR(trans,
+			"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
+			__func__, txq_id, last_to_free, q->n_bd,
+			q->write_ptr, q->read_ptr);
+		return 0;
+	}
+
+	if (WARN_ON(!skb_queue_empty(skbs)))
+		return 0;
+
+	for (;
+	     q->read_ptr != index;
+	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+		if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
+			continue;
+
+		__skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
+
+		txq->entries[txq->q.read_ptr].skb = NULL;
+
+		iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
+
+		iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
+		freed++;
+	}
+
+	iwl_queue_progress(trans_pcie, txq);
+
+	return freed;
+}