msm: ipa: initial commit of IPA driver
This is a snapshot of IPA from kernel msm-4.4 based on
commit ebc2a18351d4 ("msm: ipa: WA to get PA of sgt_tbl from wlan")
CRs-Fixed: 1077422
Change-Id: I97cf9ee9c104ac5ab5bc0577eb9413264b08a7a5
Signed-off-by: Amir Levy <alevy@codeaurora.org>
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
new file mode 100644
index 0000000..b945eb06
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_IPA3) += ipa_hal.o
+
+ipa_hal-y := ipahal.o ipahal_reg.o ipahal_fltrt.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
new file mode 100644
index 0000000..c88b104
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -0,0 +1,1359 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include "ipahal.h"
+#include "ipahal_i.h"
+#include "ipahal_reg_i.h"
+#include "ipahal_fltrt_i.h"
+
+struct ipahal_context *ipahal_ctx;
+
+static const char *ipahal_imm_cmd_name_to_str[IPA_IMM_CMD_MAX] = {
+ __stringify(IPA_IMM_CMD_IP_V4_FILTER_INIT),
+ __stringify(IPA_IMM_CMD_IP_V6_FILTER_INIT),
+ __stringify(IPA_IMM_CMD_IP_V4_NAT_INIT),
+ __stringify(IPA_IMM_CMD_IP_V4_ROUTING_INIT),
+ __stringify(IPA_IMM_CMD_IP_V6_ROUTING_INIT),
+ __stringify(IPA_IMM_CMD_HDR_INIT_LOCAL),
+ __stringify(IPA_IMM_CMD_HDR_INIT_SYSTEM),
+ __stringify(IPA_IMM_CMD_REGISTER_WRITE),
+ __stringify(IPA_IMM_CMD_NAT_DMA),
+ __stringify(IPA_IMM_CMD_IP_PACKET_INIT),
+ __stringify(IPA_IMM_CMD_DMA_SHARED_MEM),
+ __stringify(IPA_IMM_CMD_IP_PACKET_TAG_STATUS),
+ __stringify(IPA_IMM_CMD_DMA_TASK_32B_ADDR),
+};
+
+static const char *ipahal_pkt_status_exception_to_str
+ [IPAHAL_PKT_STATUS_EXCEPTION_MAX] = {
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_NONE),
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR),
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE),
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH),
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD),
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS),
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT),
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_NAT),
+};
+
+#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
+ (kzalloc((__size), ((__is_atomic_ctx)?GFP_ATOMIC:GFP_KERNEL)))
+
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_dma_task_32b_addr *data;
+ struct ipahal_imm_cmd_dma_task_32b_addr *dma_params =
+ (struct ipahal_imm_cmd_dma_task_32b_addr *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_dma_task_32b_addr *)pyld->data;
+
+ if (unlikely(dma_params->size1 & ~0xFFFF)) {
+ IPAHAL_ERR("Size1 is bigger than 16bit width 0x%x\n",
+ dma_params->size1);
+ WARN_ON(1);
+ }
+ if (unlikely(dma_params->packet_size & ~0xFFFF)) {
+ IPAHAL_ERR("Pkt size is bigger than 16bit width 0x%x\n",
+ dma_params->packet_size);
+ WARN_ON(1);
+ }
+ data->cmplt = dma_params->cmplt ? 1 : 0;
+ data->eof = dma_params->eof ? 1 : 0;
+ data->flsh = dma_params->flsh ? 1 : 0;
+ data->lock = dma_params->lock ? 1 : 0;
+ data->unlock = dma_params->unlock ? 1 : 0;
+ data->size1 = dma_params->size1;
+ data->addr1 = dma_params->addr1;
+ data->packet_size = dma_params->packet_size;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_tag_status(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_packet_tag_status *data;
+ struct ipahal_imm_cmd_ip_packet_tag_status *tag_params =
+ (struct ipahal_imm_cmd_ip_packet_tag_status *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_packet_tag_status *)pyld->data;
+
+ if (unlikely(tag_params->tag & ~0xFFFFFFFFFFFF)) {
+ IPAHAL_ERR("tag is bigger than 48bit width 0x%llx\n",
+ tag_params->tag);
+ WARN_ON(1);
+ }
+ data->tag = tag_params->tag;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_dma_shared_mem *data;
+ struct ipahal_imm_cmd_dma_shared_mem *mem_params =
+ (struct ipahal_imm_cmd_dma_shared_mem *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_dma_shared_mem *)pyld->data;
+
+ if (unlikely(mem_params->size & ~0xFFFF)) {
+ IPAHAL_ERR("Size is bigger than 16bit width 0x%x\n",
+ mem_params->size);
+ WARN_ON(1);
+ }
+ if (unlikely(mem_params->local_addr & ~0xFFFF)) {
+ IPAHAL_ERR("Local addr is bigger than 16bit width 0x%x\n",
+ mem_params->local_addr);
+ WARN_ON(1);
+ }
+ data->direction = mem_params->is_read ? 1 : 0;
+ data->size = mem_params->size;
+ data->local_addr = mem_params->local_addr;
+ data->system_addr = mem_params->system_addr;
+ data->skip_pipeline_clear = mem_params->skip_pipeline_clear ? 1 : 0;
+ switch (mem_params->pipeline_clear_options) {
+ case IPAHAL_HPS_CLEAR:
+ data->pipeline_clear_options = 0;
+ break;
+ case IPAHAL_SRC_GRP_CLEAR:
+ data->pipeline_clear_options = 1;
+ break;
+ case IPAHAL_FULL_PIPELINE_CLEAR:
+ data->pipeline_clear_options = 2;
+ break;
+ default:
+ IPAHAL_ERR("unsupported pipline clear option %d\n",
+ mem_params->pipeline_clear_options);
+ WARN_ON(1);
+ };
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_register_write *data;
+ struct ipahal_imm_cmd_register_write *regwrt_params =
+ (struct ipahal_imm_cmd_register_write *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_register_write *)pyld->data;
+
+ if (unlikely(regwrt_params->offset & ~0xFFFF)) {
+ IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n",
+ regwrt_params->offset);
+ WARN_ON(1);
+ }
+ data->offset = regwrt_params->offset;
+ data->value = regwrt_params->value;
+ data->value_mask = regwrt_params->value_mask;
+
+ data->skip_pipeline_clear = regwrt_params->skip_pipeline_clear ? 1 : 0;
+ switch (regwrt_params->pipeline_clear_options) {
+ case IPAHAL_HPS_CLEAR:
+ data->pipeline_clear_options = 0;
+ break;
+ case IPAHAL_SRC_GRP_CLEAR:
+ data->pipeline_clear_options = 1;
+ break;
+ case IPAHAL_FULL_PIPELINE_CLEAR:
+ data->pipeline_clear_options = 2;
+ break;
+ default:
+ IPAHAL_ERR("unsupported pipline clear option %d\n",
+ regwrt_params->pipeline_clear_options);
+ WARN_ON(1);
+ };
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_packet_init *data;
+ struct ipahal_imm_cmd_ip_packet_init *pktinit_params =
+ (struct ipahal_imm_cmd_ip_packet_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_packet_init *)pyld->data;
+
+ if (unlikely(pktinit_params->destination_pipe_index & ~0x1F)) {
+ IPAHAL_ERR("Dst pipe idx is bigger than 5bit width 0x%x\n",
+ pktinit_params->destination_pipe_index);
+ WARN_ON(1);
+ }
+ data->destination_pipe_index = pktinit_params->destination_pipe_index;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_nat_dma(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_nat_dma *data;
+ struct ipahal_imm_cmd_nat_dma *nat_params =
+ (struct ipahal_imm_cmd_nat_dma *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_nat_dma *)pyld->data;
+
+ data->table_index = nat_params->table_index;
+ data->base_addr = nat_params->base_addr;
+ data->offset = nat_params->offset;
+ data->data = nat_params->data;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_system(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_hdr_init_system *data;
+ struct ipahal_imm_cmd_hdr_init_system *syshdr_params =
+ (struct ipahal_imm_cmd_hdr_init_system *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_hdr_init_system *)pyld->data;
+
+ data->hdr_table_addr = syshdr_params->hdr_table_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_local(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_hdr_init_local *data;
+ struct ipahal_imm_cmd_hdr_init_local *lclhdr_params =
+ (struct ipahal_imm_cmd_hdr_init_local *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_hdr_init_local *)pyld->data;
+
+ if (unlikely(lclhdr_params->size_hdr_table & ~0xFFF)) {
+ IPAHAL_ERR("Hdr tble size is bigger than 12bit width 0x%x\n",
+ lclhdr_params->size_hdr_table);
+ WARN_ON(1);
+ }
+ data->hdr_table_addr = lclhdr_params->hdr_table_addr;
+ data->size_hdr_table = lclhdr_params->size_hdr_table;
+ data->hdr_addr = lclhdr_params->hdr_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_routing_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v6_routing_init *data;
+ struct ipahal_imm_cmd_ip_v6_routing_init *rt6_params =
+ (struct ipahal_imm_cmd_ip_v6_routing_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v6_routing_init *)pyld->data;
+
+ data->hash_rules_addr = rt6_params->hash_rules_addr;
+ data->hash_rules_size = rt6_params->hash_rules_size;
+ data->hash_local_addr = rt6_params->hash_local_addr;
+ data->nhash_rules_addr = rt6_params->nhash_rules_addr;
+ data->nhash_rules_size = rt6_params->nhash_rules_size;
+ data->nhash_local_addr = rt6_params->nhash_local_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_routing_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v4_routing_init *data;
+ struct ipahal_imm_cmd_ip_v4_routing_init *rt4_params =
+ (struct ipahal_imm_cmd_ip_v4_routing_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v4_routing_init *)pyld->data;
+
+ data->hash_rules_addr = rt4_params->hash_rules_addr;
+ data->hash_rules_size = rt4_params->hash_rules_size;
+ data->hash_local_addr = rt4_params->hash_local_addr;
+ data->nhash_rules_addr = rt4_params->nhash_rules_addr;
+ data->nhash_rules_size = rt4_params->nhash_rules_size;
+ data->nhash_local_addr = rt4_params->nhash_local_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_nat_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v4_nat_init *data;
+ struct ipahal_imm_cmd_ip_v4_nat_init *nat4_params =
+ (struct ipahal_imm_cmd_ip_v4_nat_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v4_nat_init *)pyld->data;
+
+ data->ipv4_rules_addr = nat4_params->ipv4_rules_addr;
+ data->ipv4_expansion_rules_addr =
+ nat4_params->ipv4_expansion_rules_addr;
+ data->index_table_addr = nat4_params->index_table_addr;
+ data->index_table_expansion_addr =
+ nat4_params->index_table_expansion_addr;
+ data->table_index = nat4_params->table_index;
+ data->ipv4_rules_addr_type =
+ nat4_params->ipv4_rules_addr_shared ? 1 : 0;
+ data->ipv4_expansion_rules_addr_type =
+ nat4_params->ipv4_expansion_rules_addr_shared ? 1 : 0;
+ data->index_table_addr_type =
+ nat4_params->index_table_addr_shared ? 1 : 0;
+ data->index_table_expansion_addr_type =
+ nat4_params->index_table_expansion_addr_shared ? 1 : 0;
+ data->size_base_tables = nat4_params->size_base_tables;
+ data->size_expansion_tables = nat4_params->size_expansion_tables;
+ data->public_ip_addr = nat4_params->public_ip_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_filter_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v6_filter_init *data;
+ struct ipahal_imm_cmd_ip_v6_filter_init *flt6_params =
+ (struct ipahal_imm_cmd_ip_v6_filter_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v6_filter_init *)pyld->data;
+
+ data->hash_rules_addr = flt6_params->hash_rules_addr;
+ data->hash_rules_size = flt6_params->hash_rules_size;
+ data->hash_local_addr = flt6_params->hash_local_addr;
+ data->nhash_rules_addr = flt6_params->nhash_rules_addr;
+ data->nhash_rules_size = flt6_params->nhash_rules_size;
+ data->nhash_local_addr = flt6_params->nhash_local_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_filter_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v4_filter_init *data;
+ struct ipahal_imm_cmd_ip_v4_filter_init *flt4_params =
+ (struct ipahal_imm_cmd_ip_v4_filter_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v4_filter_init *)pyld->data;
+
+ data->hash_rules_addr = flt4_params->hash_rules_addr;
+ data->hash_rules_size = flt4_params->hash_rules_size;
+ data->hash_local_addr = flt4_params->hash_local_addr;
+ data->nhash_rules_addr = flt4_params->nhash_rules_addr;
+ data->nhash_rules_size = flt4_params->nhash_rules_size;
+ data->nhash_local_addr = flt4_params->nhash_local_addr;
+
+ return pyld;
+}
+
+/*
+ * struct ipahal_imm_cmd_obj - immediate command H/W information for
+ * specific IPA version
+ * @construct - CB to construct imm command payload from abstracted structure
+ * @opcode - Immediate command OpCode
+ * @dyn_op - Does this command supports Dynamic opcode?
+ * Some commands opcode are dynamic where the part of the opcode is
+ * supplied as param. This flag indicates if the specific command supports it
+ * or not.
+ */
+struct ipahal_imm_cmd_obj {
+ struct ipahal_imm_cmd_pyld *(*construct)(enum ipahal_imm_cmd_name cmd,
+ const void *params, bool is_atomic_ctx);
+ u16 opcode;
+ bool dyn_op;
+};
+
+/*
+ * This table contains the info regard each immediate command for IPAv3
+ * and later.
+ * Information like: opcode and construct functions.
+ * All the information on the IMM on IPAv3 are statically defined below.
+ * If information is missing regard some IMM on some IPA version,
+ * the init function will fill it with the information from the previous
+ * IPA version.
+ * Information is considered missing if all of the fields are 0
+ * If opcode is -1, this means that the IMM is removed on the
+ * specific version
+ */
+static struct ipahal_imm_cmd_obj
+ ipahal_imm_cmd_objs[IPA_HW_MAX][IPA_IMM_CMD_MAX] = {
+ /* IPAv3 */
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_FILTER_INIT] = {
+ ipa_imm_cmd_construct_ip_v4_filter_init,
+ 3, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_FILTER_INIT] = {
+ ipa_imm_cmd_construct_ip_v6_filter_init,
+ 4, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_NAT_INIT] = {
+ ipa_imm_cmd_construct_ip_v4_nat_init,
+ 5, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_ROUTING_INIT] = {
+ ipa_imm_cmd_construct_ip_v4_routing_init,
+ 7, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_ROUTING_INIT] = {
+ ipa_imm_cmd_construct_ip_v6_routing_init,
+ 8, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_LOCAL] = {
+ ipa_imm_cmd_construct_hdr_init_local,
+ 9, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_SYSTEM] = {
+ ipa_imm_cmd_construct_hdr_init_system,
+ 10, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_REGISTER_WRITE] = {
+ ipa_imm_cmd_construct_register_write,
+ 12, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_NAT_DMA] = {
+ ipa_imm_cmd_construct_nat_dma,
+ 14, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_INIT] = {
+ ipa_imm_cmd_construct_ip_packet_init,
+ 16, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_DMA_TASK_32B_ADDR] = {
+ ipa_imm_cmd_construct_dma_task_32b_addr,
+ 17, true},
+ [IPA_HW_v3_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
+ ipa_imm_cmd_construct_dma_shared_mem,
+ 19, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_TAG_STATUS] = {
+ ipa_imm_cmd_construct_ip_packet_tag_status,
+ 20, false},
+};
+
+/*
+ * ipahal_imm_cmd_init() - Build the Immediate command information table
+ * See ipahal_imm_cmd_objs[][] comments
+ */
+static int ipahal_imm_cmd_init(enum ipa_hw_type ipa_hw_type)
+{
+ int i;
+ int j;
+ struct ipahal_imm_cmd_obj zero_obj;
+
+ IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+ IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+ return -EINVAL;
+ }
+
+ memset(&zero_obj, 0, sizeof(zero_obj));
+ for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+ for (j = 0; j < IPA_IMM_CMD_MAX ; j++) {
+ if (!memcmp(&ipahal_imm_cmd_objs[i+1][j], &zero_obj,
+ sizeof(struct ipahal_imm_cmd_obj))) {
+ memcpy(&ipahal_imm_cmd_objs[i+1][j],
+ &ipahal_imm_cmd_objs[i][j],
+ sizeof(struct ipahal_imm_cmd_obj));
+ } else {
+ /*
+ * explicitly overridden immediate command.
+ * Check validity
+ */
+ if (!ipahal_imm_cmd_objs[i+1][j].opcode) {
+ IPAHAL_ERR(
+ "imm_cmd=%s with zero opcode ipa_ver=%d\n",
+ ipahal_imm_cmd_name_str(j), i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_imm_cmd_objs[i+1][j].construct) {
+ IPAHAL_ERR(
+ "imm_cmd=%s with NULL construct func ipa_ver=%d\n",
+ ipahal_imm_cmd_name_str(j), i+1);
+ WARN_ON(1);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
+ * @cmd_name: [in] Immediate command name
+ */
+const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name)
+{
+ if (cmd_name < 0 || cmd_name >= IPA_IMM_CMD_MAX) {
+ IPAHAL_ERR("requested name of invalid imm_cmd=%d\n", cmd_name);
+ return "Invalid IMM_CMD";
+ }
+
+ return ipahal_imm_cmd_name_to_str[cmd_name];
+}
+
+/*
+ * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
+ */
+u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd)
+{
+ u32 opcode;
+
+ if (cmd >= IPA_IMM_CMD_MAX) {
+ IPAHAL_ERR("Invalid immediate command imm_cmd=%u\n", cmd);
+ ipa_assert();
+ return -EFAULT;
+ }
+
+ IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n",
+ ipahal_imm_cmd_name_str(cmd));
+ opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
+ if (opcode == -1) {
+ IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
+ ipahal_imm_cmd_name_str(cmd));
+ ipa_assert();
+ return -EFAULT;
+ }
+
+ return opcode;
+}
+
+/*
+ * ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
+ * that supports dynamic opcode
+ * Some commands opcode are not totaly fixed, but part of it is
+ * a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
+ * is a given parameter.
+ * This API will return the composed opcode of the command given
+ * the parameter
+ * Note: Use this API only for immediate comamnds that support Dynamic Opcode
+ */
+u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param)
+{
+ u32 opcode;
+
+ if (cmd >= IPA_IMM_CMD_MAX) {
+ IPAHAL_ERR("Invalid immediate command IMM_CMD=%u\n", cmd);
+ ipa_assert();
+ return -EFAULT;
+ }
+
+ IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n",
+ ipahal_imm_cmd_name_str(cmd));
+
+ if (!ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].dyn_op) {
+ IPAHAL_ERR("IMM_CMD=%s does not support dynamic opcode\n",
+ ipahal_imm_cmd_name_str(cmd));
+ ipa_assert();
+ return -EFAULT;
+ }
+
+ /* Currently, dynamic opcode commands uses params to be set
+ * on the Opcode hi-byte (lo-byte is fixed).
+ * If this to be changed in the future, make the opcode calculation
+ * a CB per command
+ */
+ if (param & ~0xFFFF) {
+ IPAHAL_ERR("IMM_CMD=%s opcode param is invalid\n",
+ ipahal_imm_cmd_name_str(cmd));
+ ipa_assert();
+ return -EFAULT;
+ }
+ opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
+ if (opcode == -1) {
+ IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
+ ipahal_imm_cmd_name_str(cmd));
+ ipa_assert();
+ return -EFAULT;
+ }
+ if (opcode & ~0xFFFF) {
+ IPAHAL_ERR("IMM_CMD=%s opcode will be overridden\n",
+ ipahal_imm_cmd_name_str(cmd));
+ ipa_assert();
+ return -EFAULT;
+ }
+ return (opcode + (param<<8));
+}
+
+/*
+ * ipahal_construct_imm_cmd() - Construct immdiate command
+ * This function builds imm cmd bulk that can be be sent to IPA
+ * The command will be allocated dynamically.
+ * After done using it, call ipahal_destroy_imm_cmd() to release it
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ if (!params) {
+ IPAHAL_ERR("Input error: params=%p\n", params);
+ ipa_assert();
+ return NULL;
+ }
+
+ if (cmd >= IPA_IMM_CMD_MAX) {
+ IPAHAL_ERR("Invalid immediate command %u\n", cmd);
+ ipa_assert();
+ return NULL;
+ }
+
+ IPAHAL_DBG_LOW("construct IMM_CMD:%s\n", ipahal_imm_cmd_name_str(cmd));
+ return ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].construct(
+ cmd, params, is_atomic_ctx);
+}
+
+/*
+ * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
+ * Core driver may want functionality to inject NOP commands to IPA
+ * to ensure e.g., PIPLINE clear before someother operation.
+ * The functionality given by this function can be reached by
+ * ipahal_construct_imm_cmd(). This function is helper to the core driver
+ * to reach this NOP functionlity easily.
+ * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipline_clr_opt: options for pipeline clear waiting
+ * @is_atomic_ctx: is called in atomic context or can sleep?
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
+ bool skip_pipline_clear,
+ enum ipahal_pipeline_clear_option pipline_clr_opt,
+ bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_register_write cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.skip_pipeline_clear = skip_pipline_clear;
+ cmd.pipeline_clear_options = pipline_clr_opt;
+ cmd.value_mask = 0x0;
+
+ cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+ &cmd, is_atomic_ctx);
+
+ if (!cmd_pyld)
+ IPAHAL_ERR("failed to construct register_write imm cmd\n");
+
+ return cmd_pyld;
+}
+
+
+/* IPA Packet Status Logic */
+
+#define IPA_PKT_STATUS_SET_MSK(__hw_bit_msk, __shft) \
+ (status->status_mask |= \
+ ((hw_status->status_mask & (__hw_bit_msk) ? 1 : 0) << (__shft)))
+
+static void ipa_pkt_status_parse(
+ const void *unparsed_status, struct ipahal_pkt_status *status)
+{
+ enum ipahal_pkt_status_opcode opcode = 0;
+ enum ipahal_pkt_status_exception exception_type = 0;
+
+ struct ipa_pkt_status_hw *hw_status =
+ (struct ipa_pkt_status_hw *)unparsed_status;
+
+ status->pkt_len = hw_status->pkt_len;
+ status->endp_src_idx = hw_status->endp_src_idx;
+ status->endp_dest_idx = hw_status->endp_dest_idx;
+ status->metadata = hw_status->metadata;
+ status->flt_local = hw_status->flt_local;
+ status->flt_hash = hw_status->flt_hash;
+ status->flt_global = hw_status->flt_hash;
+ status->flt_ret_hdr = hw_status->flt_ret_hdr;
+ status->flt_miss = ~(hw_status->flt_rule_id) ? false : true;
+ status->flt_rule_id = hw_status->flt_rule_id;
+ status->rt_local = hw_status->rt_local;
+ status->rt_hash = hw_status->rt_hash;
+ status->ucp = hw_status->ucp;
+ status->rt_tbl_idx = hw_status->rt_tbl_idx;
+ status->rt_miss = ~(hw_status->rt_rule_id) ? false : true;
+ status->rt_rule_id = hw_status->rt_rule_id;
+ status->nat_hit = hw_status->nat_hit;
+ status->nat_entry_idx = hw_status->nat_entry_idx;
+ status->tag_info = hw_status->tag_info;
+ status->seq_num = hw_status->seq_num;
+ status->time_of_day_ctr = hw_status->time_of_day_ctr;
+ status->hdr_local = hw_status->hdr_local;
+ status->hdr_offset = hw_status->hdr_offset;
+ status->frag_hit = hw_status->frag_hit;
+ status->frag_rule = hw_status->frag_rule;
+
+ switch (hw_status->status_opcode) {
+ case 0x1:
+ opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET;
+ break;
+ case 0x2:
+ opcode = IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE;
+ break;
+ case 0x4:
+ opcode = IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET;
+ break;
+ case 0x8:
+ opcode = IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET;
+ break;
+ case 0x10:
+ opcode = IPAHAL_PKT_STATUS_OPCODE_LOG;
+ break;
+ case 0x20:
+ opcode = IPAHAL_PKT_STATUS_OPCODE_DCMP;
+ break;
+ case 0x40:
+ opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS;
+ break;
+ default:
+ IPAHAL_ERR("unsupported Status Opcode 0x%x\n",
+ hw_status->status_opcode);
+ WARN_ON(1);
+ };
+ status->status_opcode = opcode;
+
+ switch (hw_status->nat_type) {
+ case 0:
+ status->nat_type = IPAHAL_PKT_STATUS_NAT_NONE;
+ break;
+ case 1:
+ status->nat_type = IPAHAL_PKT_STATUS_NAT_SRC;
+ break;
+ case 2:
+ status->nat_type = IPAHAL_PKT_STATUS_NAT_DST;
+ break;
+ default:
+ IPAHAL_ERR("unsupported Status NAT type 0x%x\n",
+ hw_status->nat_type);
+ WARN_ON(1);
+ };
+
+ switch (hw_status->exception) {
+ case 0:
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NONE;
+ break;
+ case 1:
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR;
+ break;
+ case 4:
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE;
+ break;
+ case 8:
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH;
+ break;
+ case 16:
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS;
+ break;
+ case 32:
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT;
+ break;
+ case 64:
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NAT;
+ break;
+ default:
+ IPAHAL_ERR("unsupported Status Exception type 0x%x\n",
+ hw_status->exception);
+ WARN_ON(1);
+ };
+ status->exception = exception_type;
+
+ IPA_PKT_STATUS_SET_MSK(0x1, IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x2, IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x4, IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x8, IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x10, IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x20, IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x40,
+ IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x80, IPAHAL_PKT_STATUS_MASK_V4_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x100,
+ IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x200, IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x400, IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x800,
+ IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x1000, IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x2000, IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x4000, IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT);
+ IPA_PKT_STATUS_SET_MSK(0x8000, IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT);
+ status->status_mask &= 0xFFFF;
+}
+
+/*
+ * struct ipahal_pkt_status_obj - Pakcet Status H/W information for
+ * specific IPA version
+ * @size: H/W size of the status packet
+ * @parse: CB that parses the H/W packet status into the abstracted structure
+ */
+struct ipahal_pkt_status_obj {
+ u32 size;
+ void (*parse)(const void *unparsed_status,
+ struct ipahal_pkt_status *status);
+};
+
+/*
+ * This table contains the info regard packet status for IPAv3 and later
+ * Information like: size of packet status and parsing function
+ * All the information on the pkt Status on IPAv3 are statically defined below.
+ * If information is missing regard some IPA version, the init function
+ * will fill it with the information from the previous IPA version.
+ * Information is considered missing if all of the fields are 0
+ */
+static struct ipahal_pkt_status_obj ipahal_pkt_status_objs[IPA_HW_MAX] = {
+ /* IPAv3 */
+ [IPA_HW_v3_0] = {
+ IPA3_0_PKT_STATUS_SIZE,
+ ipa_pkt_status_parse,
+ },
+};
+
+/*
+ * ipahal_pkt_status_init() - Build the packet status information array
+ * for the different IPA versions
+ * See ipahal_pkt_status_objs[] comments
+ */
+static int ipahal_pkt_status_init(enum ipa_hw_type ipa_hw_type)
+{
+ int i;
+ struct ipahal_pkt_status_obj zero_obj;
+
+ IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+ IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+ return -EINVAL;
+ }
+
+ /*
+ * Since structure alignment is implementation dependent,
+ * add test to avoid different and incompatible data layouts.
+ *
+ * In case new H/W has different size or structure of status packet,
+ * add a compile time validty check for it like below (as well as
+ * the new defines and/or the new strucutre in the internal header).
+ */
+ BUILD_BUG_ON(sizeof(struct ipa_pkt_status_hw) !=
+ IPA3_0_PKT_STATUS_SIZE);
+
+ memset(&zero_obj, 0, sizeof(zero_obj));
+ for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+ if (!memcmp(&ipahal_pkt_status_objs[i+1], &zero_obj,
+ sizeof(struct ipahal_pkt_status_obj))) {
+ memcpy(&ipahal_pkt_status_objs[i+1],
+ &ipahal_pkt_status_objs[i],
+ sizeof(struct ipahal_pkt_status_obj));
+ } else {
+ /*
+ * explicitly overridden Packet Status info
+ * Check validity
+ */
+ if (!ipahal_pkt_status_objs[i+1].size) {
+ IPAHAL_ERR(
+ "Packet Status with zero size ipa_ver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_pkt_status_objs[i+1].parse) {
+ IPAHAL_ERR(
+ "Packet Status without Parse func ipa_ver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * ipahal_pkt_status_get_size() - Get H/W size of packet status
+ */
+u32 ipahal_pkt_status_get_size(void)
+{
+ return ipahal_pkt_status_objs[ipahal_ctx->hw_type].size;
+}
+
+/*
+ * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form
+ * @unparsed_status: Pointer to H/W format of the packet status as read from H/W
+ * @status: Pointer to pre-allocated buffer where the parsed info will be stored
+ */
+void ipahal_pkt_status_parse(const void *unparsed_status,
+ struct ipahal_pkt_status *status)
+{
+ if (!unparsed_status || !status) {
+ IPAHAL_ERR("Input Error: unparsed_status=%p status=%p\n",
+ unparsed_status, status);
+ return;
+ }
+
+ IPAHAL_DBG_LOW("Parse Status Packet\n");
+ memset(status, 0, sizeof(*status));
+ ipahal_pkt_status_objs[ipahal_ctx->hw_type].parse(unparsed_status,
+ status);
+}
+
+/*
+ * ipahal_pkt_status_exception_str() - returns string represents exception type
+ * @exception: [in] The exception type
+ */
+const char *ipahal_pkt_status_exception_str(
+ enum ipahal_pkt_status_exception exception)
+{
+ if (exception < 0 || exception >= IPAHAL_PKT_STATUS_EXCEPTION_MAX) {
+ IPAHAL_ERR(
+ "requested string of invalid pkt_status exception=%d\n",
+ exception);
+ return "Invalid PKT_STATUS_EXCEPTION";
+ }
+
+ return ipahal_pkt_status_exception_to_str[exception];
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void ipahal_debugfs_init(void)
+{
+ ipahal_ctx->dent = debugfs_create_dir("ipahal", 0);
+ if (!ipahal_ctx->dent || IS_ERR(ipahal_ctx->dent)) {
+ IPAHAL_ERR("fail to create ipahal debugfs folder\n");
+ goto fail;
+ }
+
+ return;
+fail:
+ debugfs_remove_recursive(ipahal_ctx->dent);
+ ipahal_ctx->dent = NULL;
+}
+
+static void ipahal_debugfs_remove(void)
+{
+ if (!ipahal_ctx)
+ return;
+
+ if (IS_ERR(ipahal_ctx->dent)) {
+ IPAHAL_ERR("ipahal debugfs folder was not created\n");
+ return;
+ }
+
+ debugfs_remove_recursive(ipahal_ctx->dent);
+}
+#else /* CONFIG_DEBUG_FS */
+static void ipahal_debugfs_init(void) {}
+static void ipahal_debugfs_remove(void) {}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * ipahal_cp_hdr_to_hw_buff_v3() - copy header to hardware buffer according to
+ * base address and offset given.
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr: the header to be copied
+ * @hdr_len: the length of the header
+ */
+static void ipahal_cp_hdr_to_hw_buff_v3(void *const base, u32 offset,
+ u8 *const hdr, u32 hdr_len)
+{
+ memcpy(base + offset, hdr, hdr_len);
+}
+
+/*
+ * ipahal_cp_proc_ctx_to_hw_buff_v3() - copy processing context to
+ * base address and offset given.
+ * @type: header processing context type (no processing context,
+ * IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr_len: the length of the header
+ * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
+ * @phys_base: memory location in DDR
+ * @hdr_base_addr: base address in table
+ * @offset_entry: offset from hdr_base_addr in table
+ */
+static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
+ void *const base, u32 offset,
+ u32 hdr_len, bool is_hdr_proc_ctx,
+ dma_addr_t phys_base, u32 hdr_base_addr,
+ struct ipa_hdr_offset_entry *offset_entry){
+ if (type == IPA_HDR_PROC_NONE) {
+ struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx;
+
+ ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_seq *)
+ (base + offset);
+ ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+ ctx->hdr_add.tlv.length = 1;
+ ctx->hdr_add.tlv.value = hdr_len;
+ ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+ hdr_base_addr + offset_entry->offset;
+ IPAHAL_DBG("header address 0x%x\n",
+ ctx->hdr_add.hdr_addr);
+ ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+ ctx->end.length = 0;
+ ctx->end.value = 0;
+ } else {
+ struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *ctx;
+
+ ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *)
+ (base + offset);
+ ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+ ctx->hdr_add.tlv.length = 1;
+ ctx->hdr_add.tlv.value = hdr_len;
+ ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+ hdr_base_addr + offset_entry->offset;
+ IPAHAL_DBG("header address 0x%x\n",
+ ctx->hdr_add.hdr_addr);
+ ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+ ctx->cmd.length = 0;
+ switch (type) {
+ case IPA_HDR_PROC_ETHII_TO_ETHII:
+ ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_ETHII;
+ break;
+ case IPA_HDR_PROC_ETHII_TO_802_3:
+ ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_802_3;
+ break;
+ case IPA_HDR_PROC_802_3_TO_ETHII:
+ ctx->cmd.value = IPA_HDR_UCP_802_3_TO_ETHII;
+ break;
+ case IPA_HDR_PROC_802_3_TO_802_3:
+ ctx->cmd.value = IPA_HDR_UCP_802_3_TO_802_3;
+ break;
+ default:
+ IPAHAL_ERR("unknown ipa_hdr_proc_type %d", type);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ IPAHAL_DBG("command id %d\n", ctx->cmd.value);
+ ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+ ctx->end.length = 0;
+ ctx->end.value = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * ipahal_get_proc_ctx_needed_len_v3() - calculates the needed length for
+ * addition of header processing context according to the type of processing
+ * context.
+ * @type: header processing context type (no processing context,
+ * IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ */
+static int ipahal_get_proc_ctx_needed_len_v3(enum ipa_hdr_proc_type type)
+{
+ return (type == IPA_HDR_PROC_NONE) ?
+ sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_seq) :
+ sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq);
+}
+
+/*
+ * struct ipahal_hdr_funcs - headers handling functions for specific IPA
+ * version
+ * @ipahal_cp_hdr_to_hw_buff - copy function for regular headers
+ */
+struct ipahal_hdr_funcs {
+ void (*ipahal_cp_hdr_to_hw_buff)(void *const base, u32 offset,
+ u8 *const hdr, u32 hdr_len);
+
+ int (*ipahal_cp_proc_ctx_to_hw_buff)(enum ipa_hdr_proc_type type,
+ void *const base, u32 offset, u32 hdr_len,
+ bool is_hdr_proc_ctx, dma_addr_t phys_base,
+ u32 hdr_base_addr,
+ struct ipa_hdr_offset_entry *offset_entry);
+
+ int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type);
+};
+
+static struct ipahal_hdr_funcs hdr_funcs;
+
+static void ipahal_hdr_init(enum ipa_hw_type ipa_hw_type)
+{
+
+ IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ /*
+ * once there are changes in HW and need to use different case, insert
+ * new case for the new h/w. put the default always for the latest HW
+ * and make sure all previous supported versions have their cases.
+ */
+ switch (ipa_hw_type) {
+ case IPA_HW_v3_0:
+ default:
+ hdr_funcs.ipahal_cp_hdr_to_hw_buff =
+ ipahal_cp_hdr_to_hw_buff_v3;
+ hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff =
+ ipahal_cp_proc_ctx_to_hw_buff_v3;
+ hdr_funcs.ipahal_get_proc_ctx_needed_len =
+ ipahal_get_proc_ctx_needed_len_v3;
+ }
+ IPAHAL_DBG("Exit\n");
+}
+
+/*
+ * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to
+ * base address and offset given.
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr: the header to be copied
+ * @hdr_len: the length of the header
+ */
+void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *const hdr,
+ u32 hdr_len)
+{
+ IPAHAL_DBG_LOW("Entry\n");
+ IPAHAL_DBG("base %p, offset %d, hdr %p, hdr_len %d\n", base,
+ offset, hdr, hdr_len);
+ if (!base || !hdr_len || !hdr) {
+ IPAHAL_ERR("failed on validating params");
+ return;
+ }
+
+ hdr_funcs.ipahal_cp_hdr_to_hw_buff(base, offset, hdr, hdr_len);
+
+ IPAHAL_DBG_LOW("Exit\n");
+}
+
+/*
+ * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to
+ * base address and offset given.
+ * @type: type of header processing context
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr_len: the length of the header
+ * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
+ * @phys_base: memory location in DDR
+ * @hdr_base_addr: base address in table
+ * @offset_entry: offset from hdr_base_addr in table
+ */
+int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
+ void *const base, u32 offset, u32 hdr_len,
+ bool is_hdr_proc_ctx, dma_addr_t phys_base,
+ u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry)
+{
+ IPAHAL_DBG(
+ "type %d, base %p, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %d, offset_entry %p\n"
+ , type, base, offset, hdr_len, is_hdr_proc_ctx,
+ hdr_base_addr, offset_entry);
+
+ if (!base ||
+ !hdr_len ||
+ (!phys_base && !hdr_base_addr) ||
+ !hdr_base_addr ||
+ ((is_hdr_proc_ctx == false) && !offset_entry)) {
+ IPAHAL_ERR(
+ "invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%u is_hdr_proc_ctx:%d offset_entry:%pK\n"
+ , hdr_len, &phys_base, hdr_base_addr
+ , is_hdr_proc_ctx, offset_entry);
+ return -EINVAL;
+ }
+
+ return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset,
+ hdr_len, is_hdr_proc_ctx, phys_base,
+ hdr_base_addr, offset_entry);
+}
+
+/*
+ * ipahal_get_proc_ctx_needed_len() - calculates the needed length for
+ * addition of header processing context according to the type of processing
+ * context
+ * @type: header processing context type (no processing context,
+ * IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ */
+int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type)
+{
+ int res;
+
+ IPAHAL_DBG("entry\n");
+
+ res = hdr_funcs.ipahal_get_proc_ctx_needed_len(type);
+
+ IPAHAL_DBG("Exit\n");
+
+ return res;
+}
+
+
+int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
+ struct device *ipa_pdev)
+{
+ int result;
+
+ IPAHAL_DBG("Entry - IPA HW TYPE=%d base=%p ipa_pdev=%p\n",
+ ipa_hw_type, base, ipa_pdev);
+
+ ipahal_ctx = kzalloc(sizeof(*ipahal_ctx), GFP_KERNEL);
+ if (!ipahal_ctx) {
+ IPAHAL_ERR("kzalloc err for ipahal_ctx\n");
+ result = -ENOMEM;
+ goto bail_err_exit;
+ }
+
+ if (ipa_hw_type < IPA_HW_v3_0) {
+ IPAHAL_ERR("ipahal supported on IPAv3 and later only\n");
+ result = -EINVAL;
+ goto bail_free_ctx;
+ }
+
+ if (ipa_hw_type >= IPA_HW_MAX) {
+ IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+ result = -EINVAL;
+ goto bail_free_ctx;
+ }
+
+ if (!base) {
+ IPAHAL_ERR("invalid memory io mapping addr\n");
+ result = -EINVAL;
+ goto bail_free_ctx;
+ }
+
+ if (!ipa_pdev) {
+ IPAHAL_ERR("invalid IPA platform device\n");
+ result = -EINVAL;
+ goto bail_free_ctx;
+ }
+
+ ipahal_ctx->hw_type = ipa_hw_type;
+ ipahal_ctx->base = base;
+ ipahal_ctx->ipa_pdev = ipa_pdev;
+
+ if (ipahal_reg_init(ipa_hw_type)) {
+ IPAHAL_ERR("failed to init ipahal reg\n");
+ result = -EFAULT;
+ goto bail_free_ctx;
+ }
+
+ if (ipahal_imm_cmd_init(ipa_hw_type)) {
+ IPAHAL_ERR("failed to init ipahal imm cmd\n");
+ result = -EFAULT;
+ goto bail_free_ctx;
+ }
+
+ if (ipahal_pkt_status_init(ipa_hw_type)) {
+ IPAHAL_ERR("failed to init ipahal pkt status\n");
+ result = -EFAULT;
+ goto bail_free_ctx;
+ }
+
+ ipahal_hdr_init(ipa_hw_type);
+
+ if (ipahal_fltrt_init(ipa_hw_type)) {
+ IPAHAL_ERR("failed to init ipahal flt rt\n");
+ result = -EFAULT;
+ goto bail_free_ctx;
+ }
+
+ ipahal_debugfs_init();
+
+ return 0;
+
+bail_free_ctx:
+ kfree(ipahal_ctx);
+ ipahal_ctx = NULL;
+bail_err_exit:
+ return result;
+}
+
+void ipahal_destroy(void)
+{
+ IPAHAL_DBG("Entry\n");
+ ipahal_fltrt_destroy();
+ ipahal_debugfs_remove();
+ kfree(ipahal_ctx);
+ ipahal_ctx = NULL;
+}
+
+void ipahal_free_dma_mem(struct ipa_mem_buffer *mem)
+{
+ if (likely(mem)) {
+ dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base,
+ mem->phys_base);
+ mem->size = 0;
+ mem->base = NULL;
+ mem->phys_base = 0;
+ }
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
new file mode 100644
index 0000000..6549775
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -0,0 +1,642 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_H_
+#define _IPAHAL_H_
+
+#include <linux/msm_ipa.h>
+#include "../../ipa_common_i.h"
+
+/*
+ * Immediate command names
+ *
+ * NOTE:: Any change to this enum, need to change to ipahal_imm_cmd_name_to_str
+ * array as well.
+ */
+enum ipahal_imm_cmd_name {
+ IPA_IMM_CMD_IP_V4_FILTER_INIT,
+ IPA_IMM_CMD_IP_V6_FILTER_INIT,
+ IPA_IMM_CMD_IP_V4_NAT_INIT,
+ IPA_IMM_CMD_IP_V4_ROUTING_INIT,
+ IPA_IMM_CMD_IP_V6_ROUTING_INIT,
+ IPA_IMM_CMD_HDR_INIT_LOCAL,
+ IPA_IMM_CMD_HDR_INIT_SYSTEM,
+ IPA_IMM_CMD_REGISTER_WRITE,
+ IPA_IMM_CMD_NAT_DMA,
+ IPA_IMM_CMD_IP_PACKET_INIT,
+ IPA_IMM_CMD_DMA_SHARED_MEM,
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS,
+ IPA_IMM_CMD_DMA_TASK_32B_ADDR,
+ IPA_IMM_CMD_MAX,
+};
+
+/* Immediate commands abstracted structures */
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_filter_init - IP_V4_FILTER_INIT cmd payload
+ * Inits IPv4 filter block.
+ * @hash_rules_addr: Addr in sys mem where ipv4 hashable flt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
+ * be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
+ * be copied to
+ */
+struct ipahal_imm_cmd_ip_v4_filter_init {
+ u64 hash_rules_addr;
+ u32 hash_rules_size;
+ u32 hash_local_addr;
+ u64 nhash_rules_addr;
+ u32 nhash_rules_size;
+ u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v6_filter_init - IP_V6_FILTER_INIT cmd payload
+ * Inits IPv6 filter block.
+ * @hash_rules_addr: Addr in sys mem where ipv6 hashable flt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
+ * be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
+ * be copied to
+ */
+struct ipahal_imm_cmd_ip_v6_filter_init {
+ u64 hash_rules_addr;
+ u32 hash_rules_size;
+ u32 hash_local_addr;
+ u64 nhash_rules_addr;
+ u32 nhash_rules_size;
+ u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_nat_init - IP_V4_NAT_INIT cmd payload
+ * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
+ * cache address abd itger related parameters.
+ * @table_index: For future support of multiple NAT tables
+ * @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
+ * @ipv4_rules_addr_shared: ipv4_rules_addr in shared mem (if not, then sys)
+ * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT
+ * table starts. IPv4 NAT rules that result in NAT collision are located
+ * in this table.
+ * @ipv4_expansion_rules_addr_shared: ipv4_expansion_rules_addr in
+ * shared mem (if not, then sys)
+ * @index_table_addr: Addr in sys/shared mem where index table, which points
+ * to NAT table starts
+ * @index_table_addr_shared: index_table_addr in shared mem (if not, then sys)
+ * @index_table_expansion_addr: Addr in sys/shared mem where expansion index
+ * table starts
+ * @index_table_expansion_addr_shared: index_table_expansion_addr in
+ * shared mem (if not, then sys)
+ * @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
+ * @size_expansion_tables: Num of entries in NAT expantion tbl and expantion
+ * idx tbl (each)
+ * @public_ip_addr: public IP address
+ */
+struct ipahal_imm_cmd_ip_v4_nat_init {
+ u8 table_index;
+ u64 ipv4_rules_addr;
+ bool ipv4_rules_addr_shared;
+ u64 ipv4_expansion_rules_addr;
+ bool ipv4_expansion_rules_addr_shared;
+ u64 index_table_addr;
+ bool index_table_addr_shared;
+ u64 index_table_expansion_addr;
+ bool index_table_expansion_addr_shared;
+ u16 size_base_tables;
+ u16 size_expansion_tables;
+ u32 public_ip_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_routing_init - IP_V4_ROUTING_INIT cmd payload
+ * Inits IPv4 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in sys mem where ipv4 hashable rt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
+ * be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
+ * be copied to
+ */
+struct ipahal_imm_cmd_ip_v4_routing_init {
+ u64 hash_rules_addr;
+ u32 hash_rules_size;
+ u32 hash_local_addr;
+ u64 nhash_rules_addr;
+ u32 nhash_rules_size;
+ u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v6_routing_init - IP_V6_ROUTING_INIT cmd payload
+ * Inits IPv6 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in sys mem where ipv6 hashable rt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
+ * be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
+ * be copied to
+ */
+struct ipahal_imm_cmd_ip_v6_routing_init {
+ u64 hash_rules_addr;
+ u32 hash_rules_size;
+ u32 hash_local_addr;
+ u64 nhash_rules_addr;
+ u32 nhash_rules_size;
+ u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_hdr_init_local - HDR_INIT_LOCAL cmd payload
+ * Inits hdr table within local mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in sys mem where the table starts (SRC)
+ * @size_hdr_table: Size of the above (in bytes)
+ * @hdr_addr: header address in IPA sram (used as DST for memory copy)
+ * @rsvd: reserved
+ */
+struct ipahal_imm_cmd_hdr_init_local {
+ u64 hdr_table_addr;
+ u32 size_hdr_table;
+ u32 hdr_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_hdr_init_system - HDR_INIT_SYSTEM cmd payload
+ * Inits hdr table within sys mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
+ */
+struct ipahal_imm_cmd_hdr_init_system {
+ u64 hdr_table_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_nat_dma - NAT_DMA cmd payload
+ * Perform DMA operation on NAT related mem addressess. Copy data into
+ * different locations within NAT associated tbls. (For add/remove NAT rules)
+ * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
+ * @base_addr: Base addr to which the DMA operation should be performed.
+ * @offset: offset in bytes from base addr to write 'data' to
+ * @data: data to be written
+ */
+struct ipahal_imm_cmd_nat_dma {
+ u8 table_index;
+ u8 base_addr;
+ u32 offset;
+ u16 data;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_packet_init - IP_PACKET_INIT cmd payload
+ * Configuration for specific IP pkt. Shall be called prior to an IP pkt
+ * data. Pkt will not go through IP pkt processing.
+ * @destination_pipe_index: Destination pipe index (in case routing
+ * is enabled, this field will overwrite the rt rule)
+ */
+struct ipahal_imm_cmd_ip_packet_init {
+ u32 destination_pipe_index;
+};
+
+/*
+ * enum ipa_pipeline_clear_option - Values for pipeline clear waiting options
+ * @IPAHAL_HPS_CLEAR: Wait for HPS clear. All queues except high priority queue
+ * shall not be serviced until HPS is clear of packets or immediate commands.
+ * The high priority Rx queue / Q6ZIP group shall still be serviced normally.
+ *
+ * @IPAHAL_SRC_GRP_CLEAR: Wait for originating source group to be clear
+ * (for no packet contexts allocated to the originating source group).
+ * The source group / Rx queue shall not be serviced until all previously
+ * allocated packet contexts are released. All other source groups/queues shall
+ * be serviced normally.
+ *
+ * @IPAHAL_FULL_PIPELINE_CLEAR: Wait for full pipeline to be clear.
+ * All groups / Rx queues shall not be serviced until IPA pipeline is fully
+ * clear. This should be used for debug only.
+ */
+enum ipahal_pipeline_clear_option {
+ IPAHAL_HPS_CLEAR,
+ IPAHAL_SRC_GRP_CLEAR,
+ IPAHAL_FULL_PIPELINE_CLEAR
+};
+
+/*
+ * struct ipahal_imm_cmd_register_write - REGISTER_WRITE cmd payload
+ * Write value to register. Allows reg changes to be synced with data packet
+ * and other immediate commands. Can be used to access the sram
+ * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipeline_clear_option: options for pipeline clear waiting
+ */
+struct ipahal_imm_cmd_register_write {
+ u32 offset;
+ u32 value;
+ u32 value_mask;
+ bool skip_pipeline_clear;
+ enum ipahal_pipeline_clear_option pipeline_clear_options;
+};
+
+/*
+ * struct ipahal_imm_cmd_dma_shared_mem - DMA_SHARED_MEM cmd payload
+ * Perform mem copy into or out of the SW area of IPA local mem
+ * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
+ * @local_addr: Address in IPA local memory
+ * @is_read: Read operation from local memory? If not, then write.
+ * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipeline_clear_option: options for pipeline clear waiting
+ * @system_addr: Address in system memory
+ */
+struct ipahal_imm_cmd_dma_shared_mem {
+ u32 size;
+ u32 local_addr;
+ bool is_read;
+ bool skip_pipeline_clear;
+ enum ipahal_pipeline_clear_option pipeline_clear_options;
+ u64 system_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_packet_tag_status - IP_PACKET_TAG_STATUS cmd payload
+ * This cmd is used for to allow SW to track HW processing by setting a TAG
+ * value that is passed back to SW inside Packet Status information.
+ * TAG info will be provided as part of Packet Status info generated for
+ * the next pkt transferred over the pipe.
+ * This immediate command must be followed by a packet in the same transfer.
+ * @tag: Tag that is provided back to SW
+ */
+struct ipahal_imm_cmd_ip_packet_tag_status {
+ u64 tag;
+};
+
+/*
+ * struct ipahal_imm_cmd_dma_task_32b_addr - IPA_DMA_TASK_32B_ADDR cmd payload
+ * Used by clients using 32bit addresses. Used to perform DMA operation on
+ * multiple descriptors.
+ * The Opcode is dynamic, where it holds the number of buffer to process
+ * @cmplt: Complete flag: If true, IPA interrupt SW when the entire
+ * DMA related data was completely xfered to its destination.
+ * @eof: Enf Of Frame flag: If true, IPA assert the EOT to the
+ * dest client. This is used used for aggr sequence
+ * @flsh: Flush flag: If true pkt will go through the IPA blocks but
+ * will not be xfered to dest client but rather will be discarded
+ * @lock: Lock pipe flag: If true, IPA will stop processing descriptors
+ * from other EPs in the same src grp (RX queue)
+ * @unlock: Unlock pipe flag: If true, IPA will stop exclusively
+ * servicing current EP out of the src EPs of the grp (RX queue)
+ * @size1: Size of buffer1 data
+ * @addr1: Pointer to buffer1 data
+ * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
+ * only the first one needs to have this field set. It will be ignored
+ * in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
+ * must contain this field (2 or more buffers) or EOT.
+ */
+struct ipahal_imm_cmd_dma_task_32b_addr {
+ bool cmplt;
+ bool eof;
+ bool flsh;
+ bool lock;
+ bool unlock;
+ u32 size1;
+ u32 addr1;
+ u32 packet_size;
+};
+
+/*
+ * struct ipahal_imm_cmd_pyld - Immediate cmd payload information
+ * @len: length of the buffer
+ * @data: buffer contains the immediate command payload. Buffer goes
+ * back to back with this structure
+ */
+struct ipahal_imm_cmd_pyld {
+ u16 len;
+ u8 data[0];
+};
+
+
+/* Immediate command Function APIs */
+
+/*
+ * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
+ * @cmd_name: [in] Immediate command name
+ */
+const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name);
+
+/*
+ * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
+ */
+u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
+
+/*
+ * ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
+ * that supports dynamic opcode
+ * Some commands opcode are not totaly fixed, but part of it is
+ * a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
+ * is a given parameter.
+ * This API will return the composed opcode of the command given
+ * the parameter
+ * Note: Use this API only for immediate comamnds that support Dynamic Opcode
+ */
+u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param);
+
+/*
+ * ipahal_construct_imm_cmd() - Construct immdiate command
+ * This function builds imm cmd bulk that can be be sent to IPA
+ * The command will be allocated dynamically.
+ * After done using it, call ipahal_destroy_imm_cmd() to release it
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx);
+
+/*
+ * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
+ * Core driver may want functionality to inject NOP commands to IPA
+ * to ensure e.g., PIPLINE clear before someother operation.
+ * The functionality given by this function can be reached by
+ * ipahal_construct_imm_cmd(). This function is helper to the core driver
+ * to reach this NOP functionlity easily.
+ * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipline_clr_opt: options for pipeline clear waiting
+ * @is_atomic_ctx: is called in atomic context or can sleep?
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
+ bool skip_pipline_clear,
+ enum ipahal_pipeline_clear_option pipline_clr_opt,
+ bool is_atomic_ctx);
+
+/*
+ * ipahal_destroy_imm_cmd() - Destroy/Release bulk that was built
+ * by the construction functions
+ */
+static inline void ipahal_destroy_imm_cmd(struct ipahal_imm_cmd_pyld *pyld)
+{
+ kfree(pyld);
+}
+
+
+/* IPA Status packet Structures and Function APIs */
+
+/*
+ * enum ipahal_pkt_status_opcode - Packet Status Opcode
+ * @IPAHAL_STATUS_OPCODE_PACKET_2ND_PASS: Packet Status generated as part of
+ * IPA second processing pass for a packet (i.e. IPA XLAT processing for
+ * the translated packet).
+ */
+enum ipahal_pkt_status_opcode {
+ IPAHAL_PKT_STATUS_OPCODE_PACKET = 0,
+ IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE,
+ IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET,
+ IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET,
+ IPAHAL_PKT_STATUS_OPCODE_LOG,
+ IPAHAL_PKT_STATUS_OPCODE_DCMP,
+ IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS,
+};
+
+/*
+ * enum ipahal_pkt_status_exception - Packet Status exception type
+ * @IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH: formerly IHL exception.
+ *
+ * Note: IPTYPE, PACKET_LENGTH and PACKET_THRESHOLD exceptions means that
+ * partial / no IP processing took place and corresponding Status Mask
+ * fields should be ignored. Flt and rt info is not valid.
+ *
+ * NOTE:: Any change to this enum, need to change to
+ * ipahal_pkt_status_exception_to_str array as well.
+ */
+enum ipahal_pkt_status_exception {
+ IPAHAL_PKT_STATUS_EXCEPTION_NONE = 0,
+ IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR,
+ IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE,
+ IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH,
+ IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD,
+ IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS,
+ IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT,
+ IPAHAL_PKT_STATUS_EXCEPTION_NAT,
+ IPAHAL_PKT_STATUS_EXCEPTION_MAX,
+};
+
+/*
+ * enum ipahal_pkt_status_mask - Packet Status bitmask shift values of
+ * the contained flags. This bitmask indicates flags on the properties of
+ * the packet as well as IPA processing it may had.
+ * @FRAG_PROCESS: Frag block processing flag: Was pkt processed by frag block?
+ * Also means the frag info is valid unless exception or first frag
+ * @FILT_PROCESS: Flt block processing flag: Was pkt processed by flt block?
+ * Also means that flt info is valid.
+ * @NAT_PROCESS: NAT block processing flag: Was pkt processed by NAT block?
+ * Also means that NAT info is valid, unless exception.
+ * @ROUTE_PROCESS: Rt block processing flag: Was pkt processed by rt block?
+ * Also means that rt info is valid, unless exception.
+ * @TAG_VALID: Flag specifying if TAG and TAG info valid?
+ * @FRAGMENT: Flag specifying if pkt is IP fragment.
+ * @FIRST_FRAGMENT: Flag specifying if pkt is first fragment. In this case, frag
+ * info is invalid
+ * @V4: Flag specifying pkt is IPv4 or IPv6
+ * @CKSUM_PROCESS: CSUM block processing flag: Was pkt processed by csum block?
+ * If so, csum trailer exists
+ * @AGGR_PROCESS: Aggr block processing flag: Was pkt processed by aggr block?
+ * @DEST_EOT: Flag specifying if EOT was asserted for the pkt on dest endp
+ * @DEAGGR_PROCESS: Deaggr block processing flag: Was pkt processed by deaggr
+ * block?
+ * @DEAGG_FIRST: Flag specifying if this is the first pkt in deaggr frame
+ * @SRC_EOT: Flag specifying if EOT asserted by src endp when sending the buffer
+ * @PREV_EOT: Flag specifying if EOT was sent just before the pkt as part of
+ * aggr hard-byte-limit
+ * @BYTE_LIMIT: Flag specifying if pkt is over a configured byte limit.
+ */
+enum ipahal_pkt_status_mask {
+ IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT = 0,
+ IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT,
+ IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT,
+ IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT,
+ IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT,
+ IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT,
+ IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT,
+ IPAHAL_PKT_STATUS_MASK_V4_SHFT,
+ IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT,
+ IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT,
+ IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT,
+ IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT,
+ IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT,
+ IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT,
+ IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT,
+ IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT,
+};
+
+/*
+ * Returns boolean value representing a property of the a packet.
+ * @__flag_shft: The shift value of the flag of the status bitmask of
+ * @__status: Pointer to abstracrted status structure
+ * the needed property. See enum ipahal_pkt_status_mask
+ */
+#define IPAHAL_PKT_STATUS_MASK_FLAG_VAL(__flag_shft, __status) \
+ (((__status)->status_mask) & ((u32)0x1<<(__flag_shft)) ? true : false)
+
+/*
+ * enum ipahal_pkt_status_nat_type - Type of NAT
+ */
+enum ipahal_pkt_status_nat_type {
+ IPAHAL_PKT_STATUS_NAT_NONE,
+ IPAHAL_PKT_STATUS_NAT_SRC,
+ IPAHAL_PKT_STATUS_NAT_DST,
+};
+
+/*
+ * struct ipahal_pkt_status - IPA status packet abstracted payload.
+ * This structure describes the status packet fields for the
+ * following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET,
+ * IPA_STATUS_SUSPENDED_PACKET.
+ * Other statuses types has different status packet structure.
+ * @status_opcode: The Type of the status (Opcode).
+ * @exception: The first exception that took place.
+ * In case of exception, src endp and pkt len are always valid.
+ * @status_mask: Bit mask for flags on several properties on the packet
+ * and processing it may passed at IPA. See enum ipahal_pkt_status_mask
+ * @pkt_len: Pkt pyld len including hdr and retained hdr if used. Does
+ * not include padding or checksum trailer len.
+ * @endp_src_idx: Source end point index.
+ * @endp_dest_idx: Destination end point index.
+ * Not valid in case of exception
+ * @metadata: meta data value used by packet
+ * @flt_local: Filter table location flag: Does matching flt rule belongs to
+ * flt tbl that resides in lcl memory? (if not, then system mem)
+ * @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl?
+ * @flt_global: Global filter rule flag: Does matching flt rule belongs to
+ * the global flt tbl? (if not, then the per endp tables)
+ * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule
+ * specifies to retain header?
+ * @flt_miss: Filtering miss flag: Was their a filtering rule miss?
+ * In case of miss, all flt info to be ignored
+ * @flt_rule_id: The ID of the matching filter rule (if no miss).
+ * This info can be combined with endp_src_idx to locate the exact rule.
+ * @rt_local: Route table location flag: Does matching rt rule belongs to
+ * rt tbl that resides in lcl memory? (if not, then system mem)
+ * @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl?
+ * @ucp: UC Processing flag
+ * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match
+ * @rt_miss: Routing miss flag: Was their a routing rule miss?
+ * @rt_rule_id: The ID of the matching rt rule. (if no miss). This info
+ * can be combined with rt_tbl_idx to locate the exact rule.
+ * @nat_hit: NAT hit flag: Was their NAT hit?
+ * @nat_entry_idx: Index of the NAT entry used of NAT processing
+ * @nat_type: Defines the type of the NAT operation:
+ * @tag_info: S/W defined value provided via immediate command
+ * @seq_num: Per source endp unique packet sequence number
+ * @time_of_day_ctr: running counter from IPA clock
+ * @hdr_local: Header table location flag: In header insertion, was the header
+ * taken from the table resides in local memory? (If no, then system mem)
+ * @hdr_offset: Offset of used header in the header table
+ * @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table?
+ * @frag_rule: Frag rule index in H/W frag table in case of frag hit
+ */
+struct ipahal_pkt_status {
+ enum ipahal_pkt_status_opcode status_opcode;
+ enum ipahal_pkt_status_exception exception;
+ u32 status_mask;
+ u32 pkt_len;
+ u8 endp_src_idx;
+ u8 endp_dest_idx;
+ u32 metadata;
+ bool flt_local;
+ bool flt_hash;
+ bool flt_global;
+ bool flt_ret_hdr;
+ bool flt_miss;
+ u16 flt_rule_id;
+ bool rt_local;
+ bool rt_hash;
+ bool ucp;
+ u8 rt_tbl_idx;
+ bool rt_miss;
+ u16 rt_rule_id;
+ bool nat_hit;
+ u16 nat_entry_idx;
+ enum ipahal_pkt_status_nat_type nat_type;
+ u64 tag_info;
+ u8 seq_num;
+ u32 time_of_day_ctr;
+ bool hdr_local;
+ u16 hdr_offset;
+ bool frag_hit;
+ u8 frag_rule;
+};
+
+/*
+ * ipahal_pkt_status_get_size() - Get H/W size of packet status
+ */
+u32 ipahal_pkt_status_get_size(void);
+
+/*
+ * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form
+ * @unparsed_status: Pointer to H/W format of the packet status as read from H/W
+ * @status: Pointer to pre-allocated buffer where the parsed info will be stored
+ */
+void ipahal_pkt_status_parse(const void *unparsed_status,
+ struct ipahal_pkt_status *status);
+
+/*
+ * ipahal_pkt_status_exception_str() - returns string represents exception type
+ * @exception: [in] The exception type
+ */
+const char *ipahal_pkt_status_exception_str(
+ enum ipahal_pkt_status_exception exception);
+
+/*
+ * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to
+ * base address and offset given.
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr: the header to be copied
+ * @hdr_len: the length of the header
+ */
+void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *hdr, u32 hdr_len);
+
+/*
+ * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to
+ * base address and offset given.
+ * @type: type of header processing context
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr_len: the length of the header
+ * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
+ * @phys_base: memory location in DDR
+ * @hdr_base_addr: base address in table
+ * @offset_entry: offset from hdr_base_addr in table
+ */
+int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
+ void *base, u32 offset, u32 hdr_len,
+ bool is_hdr_proc_ctx, dma_addr_t phys_base,
+ u32 hdr_base_addr,
+ struct ipa_hdr_offset_entry *offset_entry);
+
+/*
+ * ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition
+ * of header processing context according to the type of processing context
+ * @type: header processing context type (no processing context,
+ * IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ */
+int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type);
+
+int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
+ struct device *ipa_pdev);
+void ipahal_destroy(void);
+void ipahal_free_dma_mem(struct ipa_mem_buffer *mem);
+
+#endif /* _IPAHAL_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
new file mode 100644
index 0000000..e355d9d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -0,0 +1,3200 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipc_logging.h>
+#include <linux/debugfs.h>
+#include <linux/ipa.h>
+#include "ipahal.h"
+#include "ipahal_fltrt.h"
+#include "ipahal_fltrt_i.h"
+#include "ipahal_i.h"
+#include "../../ipa_common_i.h"
+
+/*
+ * struct ipahal_fltrt_obj - Flt/Rt H/W information for specific IPA version
+ * @support_hash: Is hashable tables supported
+ * @tbl_width: Width of table in bytes
+ * @sysaddr_alignment: System table address alignment
+ * @lcladdr_alignment: Local table offset alignment
+ * @blk_sz_alignment: Rules block size alignment
+ * @rule_start_alignment: Rule start address alignment
+ * @tbl_hdr_width: Width of the header structure in bytes
+ * @tbl_addr_mask: Masking for Table address
+ * @rule_max_prio: Max possible priority of a rule
+ * @rule_min_prio: Min possible priority of a rule
+ * @low_rule_id: Low value of Rule ID that can be used
+ * @rule_id_bit_len: Rule is high (MSB) bit len
+ * @rule_buf_size: Max size rule may utilize.
+ * @write_val_to_hdr: Write address or offset to header entry
+ * @create_flt_bitmap: Create bitmap in H/W format using given bitmap
+ * @create_tbl_addr: Given raw table address, create H/W formated one
+ * @parse_tbl_addr: Parse the given H/W address (hdr format)
+ * @rt_generate_hw_rule: Generate RT rule in H/W format
+ * @flt_generate_hw_rule: Generate FLT rule in H/W format
+ * @flt_generate_eq: Generate flt equation attributes from rule attributes
+ * @rt_parse_hw_rule: Parse rt rule read from H/W
+ * @flt_parse_hw_rule: Parse flt rule read from H/W
+ * @eq_bitfield: Array of the bit fields of the support equations
+ */
+struct ipahal_fltrt_obj {
+ bool support_hash;
+ u32 tbl_width;
+ u32 sysaddr_alignment;
+ u32 lcladdr_alignment;
+ u32 blk_sz_alignment;
+ u32 rule_start_alignment;
+ u32 tbl_hdr_width;
+ u32 tbl_addr_mask;
+ int rule_max_prio;
+ int rule_min_prio;
+ u32 low_rule_id;
+ u32 rule_id_bit_len;
+ u32 rule_buf_size;
+ u8* (*write_val_to_hdr)(u64 val, u8 *hdr);
+ u64 (*create_flt_bitmap)(u64 ep_bitmap);
+ u64 (*create_tbl_addr)(bool is_sys, u64 addr);
+ void (*parse_tbl_addr)(u64 hwaddr, u64 *addr, bool *is_sys);
+ int (*rt_generate_hw_rule)(struct ipahal_rt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf);
+ int (*flt_generate_hw_rule)(struct ipahal_flt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf);
+ int (*flt_generate_eq)(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb);
+ int (*rt_parse_hw_rule)(u8 *addr, struct ipahal_rt_rule_entry *rule);
+ int (*flt_parse_hw_rule)(u8 *addr, struct ipahal_flt_rule_entry *rule);
+ u8 eq_bitfield[IPA_EQ_MAX];
+};
+
+
+static u64 ipa_fltrt_create_flt_bitmap(u64 ep_bitmap)
+{
+ /* At IPA3, there global configuration is possible but not used */
+ return (ep_bitmap << 1) & ~0x1;
+}
+
+static u64 ipa_fltrt_create_tbl_addr(bool is_sys, u64 addr)
+{
+ if (is_sys) {
+ if (addr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) {
+ IPAHAL_ERR(
+ "sys addr is not aligned accordingly addr=0x%pad\n",
+ &addr);
+ ipa_assert();
+ return 0;
+ }
+ } else {
+ if (addr & IPA3_0_HW_TBL_LCLADDR_ALIGNMENT) {
+ IPAHAL_ERR("addr/ofst isn't lcl addr aligned %llu\n",
+ addr);
+ ipa_assert();
+ return 0;
+ }
+ /*
+ * for local tables (at sram) offsets is used as tables
+ * addresses. offset need to be in 8B units
+ * (local address aligned) and left shifted to its place.
+ * Local bit need to be enabled.
+ */
+ addr /= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1;
+ addr *= IPA3_0_HW_TBL_ADDR_MASK + 1;
+ addr += 1;
+ }
+
+ return addr;
+}
+
+static void ipa_fltrt_parse_tbl_addr(u64 hwaddr, u64 *addr, bool *is_sys)
+{
+ IPAHAL_DBG("Parsing hwaddr 0x%llx\n", hwaddr);
+
+ *is_sys = !(hwaddr & 0x1);
+ hwaddr &= (~0ULL - 1);
+ if (hwaddr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) {
+ IPAHAL_ERR(
+ "sys addr is not aligned accordingly addr=0x%pad\n",
+ &hwaddr);
+ ipa_assert();
+ return;
+ }
+
+ if (!*is_sys) {
+ hwaddr /= IPA3_0_HW_TBL_ADDR_MASK + 1;
+ hwaddr *= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1;
+ }
+
+ *addr = hwaddr;
+}
+
+/* Update these tables of the number of equations changes */
+static const int ipa3_0_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
+ IPA_OFFSET_MEQ32_1};
+static const int ipa3_0_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
+ IPA_OFFSET_MEQ128_1};
+static const int ipa3_0_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
+ IPA_IHL_OFFSET_RANGE16_1};
+static const int ipa3_0_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
+ IPA_IHL_OFFSET_MEQ32_1};
+
+static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule);
+static int ipa_fltrt_generate_hw_rule_bdy_from_eq(
+ const struct ipa_ipfltri_rule_eq *attrib, u8 **buf);
+static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_flt_generate_eq(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_rt_parse_hw_rule(u8 *addr,
+ struct ipahal_rt_rule_entry *rule);
+static int ipa_flt_parse_hw_rule(u8 *addr,
+ struct ipahal_flt_rule_entry *rule);
+
+#define IPA_IS_RAN_OUT_OF_EQ(__eq_array, __eq_index) \
+ (ARRAY_SIZE(__eq_array) <= (__eq_index))
+
+#define IPA_GET_RULE_EQ_BIT_PTRN(__eq) \
+ (BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].eq_bitfield[(__eq)]))
+
+/*
+ * ipa_fltrt_rule_generation_err_check() - check basic validity on the rule
+ * attribs before starting building it
+ * checks if not not using ipv4 attribs on ipv6 and vice-versa
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ *
+ * Return: 0 on success, -EPERM on failure
+ */
+static int ipa_fltrt_rule_generation_err_check(
+ enum ipa_ip_type ipt, const struct ipa_rule_attrib *attrib)
+{
+ if (ipt == IPA_IP_v4) {
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
+ attrib->attrib_mask & IPA_FLT_TC ||
+ attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ IPAHAL_ERR("v6 attrib's specified for v4 rule\n");
+ return -EPERM;
+ }
+ } else if (ipt == IPA_IP_v6) {
+ if (attrib->attrib_mask & IPA_FLT_TOS ||
+ attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ IPAHAL_ERR("v4 attrib's specified for v6 rule\n");
+ return -EPERM;
+ }
+ } else {
+ IPAHAL_ERR("unsupported ip %d\n", ipt);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int ipa_rt_gen_hw_rule(struct ipahal_rt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf)
+{
+ struct ipa3_0_rt_rule_hw_hdr *rule_hdr;
+ u8 *start;
+ u16 en_rule = 0;
+
+ start = buf;
+ rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)buf;
+
+ ipa_assert_on(params->dst_pipe_idx & ~0x1F);
+ rule_hdr->u.hdr.pipe_dest_idx = params->dst_pipe_idx;
+ switch (params->hdr_type) {
+ case IPAHAL_RT_RULE_HDR_PROC_CTX:
+ rule_hdr->u.hdr.system = !params->hdr_lcl;
+ rule_hdr->u.hdr.proc_ctx = 1;
+ ipa_assert_on(params->hdr_ofst & 31);
+ rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 5;
+ break;
+ case IPAHAL_RT_RULE_HDR_RAW:
+ rule_hdr->u.hdr.system = !params->hdr_lcl;
+ rule_hdr->u.hdr.proc_ctx = 0;
+ ipa_assert_on(params->hdr_ofst & 3);
+ rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 2;
+ break;
+ case IPAHAL_RT_RULE_HDR_NONE:
+ rule_hdr->u.hdr.system = !params->hdr_lcl;
+ rule_hdr->u.hdr.proc_ctx = 0;
+ rule_hdr->u.hdr.hdr_offset = 0;
+ break;
+ default:
+ IPAHAL_ERR("Invalid HDR type %d\n", params->hdr_type);
+ WARN_ON(1);
+ return -EINVAL;
+ };
+
+ ipa_assert_on(params->priority & ~0x3FF);
+ rule_hdr->u.hdr.priority = params->priority;
+ rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+ ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+ ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+ rule_hdr->u.hdr.rule_id = params->id;
+
+ buf += sizeof(struct ipa3_0_rt_rule_hw_hdr);
+
+ if (ipa_fltrt_generate_hw_rule_bdy(params->ipt, ¶ms->rule->attrib,
+ &buf, &en_rule)) {
+ IPAHAL_ERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+ rule_hdr->u.hdr.en_rule = en_rule;
+
+ IPAHAL_DBG("en_rule 0x%x\n", en_rule);
+ ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+ if (*hw_len == 0) {
+ *hw_len = buf - start;
+ } else if (*hw_len != (buf - start)) {
+ IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+ *hw_len, (buf - start));
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int ipa_flt_gen_hw_rule(struct ipahal_flt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf)
+{
+ struct ipa3_0_flt_rule_hw_hdr *rule_hdr;
+ u8 *start;
+ u16 en_rule = 0;
+
+ start = buf;
+ rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)buf;
+
+ switch (params->rule->action) {
+ case IPA_PASS_TO_ROUTING:
+ rule_hdr->u.hdr.action = 0x0;
+ break;
+ case IPA_PASS_TO_SRC_NAT:
+ rule_hdr->u.hdr.action = 0x1;
+ break;
+ case IPA_PASS_TO_DST_NAT:
+ rule_hdr->u.hdr.action = 0x2;
+ break;
+ case IPA_PASS_TO_EXCEPTION:
+ rule_hdr->u.hdr.action = 0x3;
+ break;
+ default:
+ IPAHAL_ERR("Invalid Rule Action %d\n", params->rule->action);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ ipa_assert_on(params->rt_tbl_idx & ~0x1F);
+ rule_hdr->u.hdr.rt_tbl_idx = params->rt_tbl_idx;
+ rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+ rule_hdr->u.hdr.rsvd1 = 0;
+ rule_hdr->u.hdr.rsvd2 = 0;
+ rule_hdr->u.hdr.rsvd3 = 0;
+
+ ipa_assert_on(params->priority & ~0x3FF);
+ rule_hdr->u.hdr.priority = params->priority;
+ ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+ ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+ rule_hdr->u.hdr.rule_id = params->id;
+
+ buf += sizeof(struct ipa3_0_flt_rule_hw_hdr);
+
+ if (params->rule->eq_attrib_type) {
+ if (ipa_fltrt_generate_hw_rule_bdy_from_eq(
+ ¶ms->rule->eq_attrib, &buf)) {
+ IPAHAL_ERR("fail to generate hw rule from eq\n");
+ return -EPERM;
+ }
+ en_rule = params->rule->eq_attrib.rule_eq_bitmap;
+ } else {
+ if (ipa_fltrt_generate_hw_rule_bdy(params->ipt,
+ ¶ms->rule->attrib, &buf, &en_rule)) {
+ IPAHAL_ERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+ }
+ rule_hdr->u.hdr.en_rule = en_rule;
+
+ IPAHAL_DBG("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
+ en_rule,
+ rule_hdr->u.hdr.action,
+ rule_hdr->u.hdr.rt_tbl_idx,
+ rule_hdr->u.hdr.retain_hdr);
+ IPAHAL_DBG("priority=%d, rule_id=%d\n",
+ rule_hdr->u.hdr.priority,
+ rule_hdr->u.hdr.rule_id);
+
+ ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+ if (*hw_len == 0) {
+ *hw_len = buf - start;
+ } else if (*hw_len != (buf - start)) {
+ IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+ *hw_len, (buf - start));
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/*
+ * This array contains the FLT/RT info for IPAv3 and later.
+ * All the information on IPAv3 are statically defined below.
+ * If information is missing regarding on some IPA version,
+ * the init function will fill it with the information from the previous
+ * IPA version.
+ * Information is considered missing if all of the fields are 0.
+ */
+static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = {
+ /* IPAv3 */
+ [IPA_HW_v3_0] = {
+ true,
+ IPA3_0_HW_TBL_WIDTH,
+ IPA3_0_HW_TBL_SYSADDR_ALIGNMENT,
+ IPA3_0_HW_TBL_LCLADDR_ALIGNMENT,
+ IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT,
+ IPA3_0_HW_RULE_START_ALIGNMENT,
+ IPA3_0_HW_TBL_HDR_WIDTH,
+ IPA3_0_HW_TBL_ADDR_MASK,
+ IPA3_0_RULE_MAX_PRIORITY,
+ IPA3_0_RULE_MIN_PRIORITY,
+ IPA3_0_LOW_RULE_ID,
+ IPA3_0_RULE_ID_BIT_LEN,
+ IPA3_0_HW_RULE_BUF_SIZE,
+ ipa_write_64,
+ ipa_fltrt_create_flt_bitmap,
+ ipa_fltrt_create_tbl_addr,
+ ipa_fltrt_parse_tbl_addr,
+ ipa_rt_gen_hw_rule,
+ ipa_flt_gen_hw_rule,
+ ipa_flt_generate_eq,
+ ipa_rt_parse_hw_rule,
+ ipa_flt_parse_hw_rule,
+ {
+ [IPA_TOS_EQ] = 0,
+ [IPA_PROTOCOL_EQ] = 1,
+ [IPA_TC_EQ] = 2,
+ [IPA_OFFSET_MEQ128_0] = 3,
+ [IPA_OFFSET_MEQ128_1] = 4,
+ [IPA_OFFSET_MEQ32_0] = 5,
+ [IPA_OFFSET_MEQ32_1] = 6,
+ [IPA_IHL_OFFSET_MEQ32_0] = 7,
+ [IPA_IHL_OFFSET_MEQ32_1] = 8,
+ [IPA_METADATA_COMPARE] = 9,
+ [IPA_IHL_OFFSET_RANGE16_0] = 10,
+ [IPA_IHL_OFFSET_RANGE16_1] = 11,
+ [IPA_IHL_OFFSET_EQ_32] = 12,
+ [IPA_IHL_OFFSET_EQ_16] = 13,
+ [IPA_FL_EQ] = 14,
+ [IPA_IS_FRAG] = 15,
+ },
+ },
+};
+
+static int ipa_flt_generate_eq(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+ if (ipa_fltrt_rule_generation_err_check(ipt, attrib))
+ return -EPERM;
+
+ if (ipt == IPA_IP_v4) {
+ if (ipa_flt_generate_eq_ip4(ipt, attrib, eq_atrb)) {
+ IPAHAL_ERR("failed to build ipv4 flt eq rule\n");
+ return -EPERM;
+ }
+ } else if (ipt == IPA_IP_v6) {
+ if (ipa_flt_generate_eq_ip6(ipt, attrib, eq_atrb)) {
+ IPAHAL_ERR("failed to build ipv6 flt eq rule\n");
+ return -EPERM;
+ }
+ } else {
+ IPAHAL_ERR("unsupported ip %d\n", ipt);
+ return -EPERM;
+ }
+
+ /*
+ * default "rule" means no attributes set -> map to
+ * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+ */
+ if (attrib->attrib_mask == 0) {
+ eq_atrb->rule_eq_bitmap = 0;
+ eq_atrb->rule_eq_bitmap |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_OFFSET_MEQ32_0);
+ eq_atrb->offset_meq_32[0].offset = 0;
+ eq_atrb->offset_meq_32[0].mask = 0;
+ eq_atrb->offset_meq_32[0].value = 0;
+ }
+
+ return 0;
+}
+
+static void ipa_fltrt_generate_mac_addr_hw_rule(u8 **extra, u8 **rest,
+ u8 hdr_mac_addr_offset,
+ const uint8_t mac_addr_mask[ETH_ALEN],
+ const uint8_t mac_addr[ETH_ALEN])
+{
+ int i;
+
+ *extra = ipa_write_8(hdr_mac_addr_offset, *extra);
+
+ /* LSB MASK and ADDR */
+ *rest = ipa_write_64(0, *rest);
+ *rest = ipa_write_64(0, *rest);
+
+ /* MSB MASK and ADDR */
+ *rest = ipa_write_16(0, *rest);
+ for (i = 5; i >= 0; i--)
+ *rest = ipa_write_8(mac_addr_mask[i], *rest);
+ *rest = ipa_write_16(0, *rest);
+ for (i = 5; i >= 0; i--)
+ *rest = ipa_write_8(mac_addr[i], *rest);
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule,
+ const struct ipa_rule_attrib *attrib,
+ u8 **extra_wrds, u8 **rest_wrds)
+{
+ u8 *extra = *extra_wrds;
+ u8 *rest = *rest_wrds;
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+ int rc = 0;
+
+ if (attrib->attrib_mask & IPA_FLT_TOS) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ);
+ extra = ipa_write_8(attrib->u.v4.tos, extra);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+ extra = ipa_write_8(attrib->u.v4.protocol, extra);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -14,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -8,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -22,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -16,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ /* 0 => offset of TOS in v4 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_32((attrib->tos_mask << 16), rest);
+ rest = ipa_write_32((attrib->tos_value << 16), rest);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ /* 12 => offset of src ip in v4 header */
+ extra = ipa_write_8(12, extra);
+ rest = ipa_write_32(attrib->u.v4.src_addr_mask, rest);
+ rest = ipa_write_32(attrib->u.v4.src_addr, rest);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ /* 16 => offset of dst ip in v4 header */
+ extra = ipa_write_8(16, extra);
+ rest = ipa_write_32(attrib->u.v4.dst_addr_mask, rest);
+ rest = ipa_write_32(attrib->u.v4.dst_addr, rest);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ /* -2 => offset of ether type in L2 hdr */
+ extra = ipa_write_8((u8)-2, extra);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_16(htons(attrib->ether_type), rest);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_16(htons(attrib->ether_type), rest);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 0 => offset of type after v4 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_32(0xFF, rest);
+ rest = ipa_write_32(attrib->type, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 1 => offset of code after v4 header */
+ extra = ipa_write_8(1, extra);
+ rest = ipa_write_32(0xFF, rest);
+ rest = ipa_write_32(attrib->code, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 0 => offset of SPI after v4 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_32(0xFFFFFFFF, rest);
+ rest = ipa_write_32(attrib->spi, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
+ rest = ipa_write_32(attrib->meta_data_mask, rest);
+ rest = ipa_write_32(attrib->meta_data, rest);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAHAL_ERR("bad src port range param\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 0 => offset of src port after v4 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_16(attrib->src_port_hi, rest);
+ rest = ipa_write_16(attrib->src_port_lo, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAHAL_ERR("bad dst port range param\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 2 => offset of dst port after v4 header */
+ extra = ipa_write_8(2, extra);
+ rest = ipa_write_16(attrib->dst_port_hi, rest);
+ rest = ipa_write_16(attrib->dst_port_lo, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 0 => offset of src port after v4 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_16(attrib->src_port, rest);
+ rest = ipa_write_16(attrib->src_port, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 2 => offset of dst port after v4 header */
+ extra = ipa_write_8(2, extra);
+ rest = ipa_write_16(attrib->dst_port, rest);
+ rest = ipa_write_16(attrib->dst_port, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+
+ goto done;
+
+err:
+ rc = -EPERM;
+done:
+ *extra_wrds = extra;
+ *rest_wrds = rest;
+ return rc;
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule,
+ const struct ipa_rule_attrib *attrib,
+ u8 **extra_wrds, u8 **rest_wrds)
+{
+ u8 *extra = *extra_wrds;
+ u8 *rest = *rest_wrds;
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+ int rc = 0;
+
+ /* v6 code below assumes no extension headers TODO: fix this */
+
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+ extra = ipa_write_8(attrib->u.v6.next_hdr, extra);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TC) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ);
+ extra = ipa_write_8(attrib->u.v6.tc, extra);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ /* 8 => offset of src ip in v6 header */
+ extra = ipa_write_8(8, extra);
+ rest = ipa_write_32(attrib->u.v6.src_addr_mask[3], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr_mask[2], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr[3], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr[2], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr_mask[1], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr_mask[0], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr[1], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr[0], rest);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ /* 24 => offset of dst ip in v6 header */
+ extra = ipa_write_8(24, extra);
+ rest = ipa_write_32(attrib->u.v6.dst_addr_mask[3], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr_mask[2], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr[3], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr[2], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr_mask[1], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr_mask[0], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr[1], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr[0], rest);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ /* 0 => offset of TOS in v6 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_64(0, rest);
+ rest = ipa_write_64(0, rest);
+ rest = ipa_write_32(0, rest);
+ rest = ipa_write_32((attrib->tos_mask << 20), rest);
+ rest = ipa_write_32(0, rest);
+ rest = ipa_write_32((attrib->tos_value << 20), rest);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -14,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -8,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -22,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -16,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ /* -2 => offset of ether type in L2 hdr */
+ extra = ipa_write_8((u8)-2, extra);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_16(htons(attrib->ether_type), rest);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_16(htons(attrib->ether_type), rest);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 0 => offset of type after v6 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_32(0xFF, rest);
+ rest = ipa_write_32(attrib->type, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 1 => offset of code after v6 header */
+ extra = ipa_write_8(1, extra);
+ rest = ipa_write_32(0xFF, rest);
+ rest = ipa_write_32(attrib->code, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 0 => offset of SPI after v6 header FIXME */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_32(0xFFFFFFFF, rest);
+ rest = ipa_write_32(attrib->spi, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
+ rest = ipa_write_32(attrib->meta_data_mask, rest);
+ rest = ipa_write_32(attrib->meta_data, rest);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 0 => offset of src port after v6 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_16(attrib->src_port, rest);
+ rest = ipa_write_16(attrib->src_port, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 2 => offset of dst port after v6 header */
+ extra = ipa_write_8(2, extra);
+ rest = ipa_write_16(attrib->dst_port, rest);
+ rest = ipa_write_16(attrib->dst_port, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAHAL_ERR("bad src port range param\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 0 => offset of src port after v6 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_16(attrib->src_port_hi, rest);
+ rest = ipa_write_16(attrib->src_port_lo, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAHAL_ERR("bad dst port range param\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 2 => offset of dst port after v6 header */
+ extra = ipa_write_8(2, extra);
+ rest = ipa_write_16(attrib->dst_port_hi, rest);
+ rest = ipa_write_16(attrib->dst_port_lo, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ);
+ rest = ipa_write_32(attrib->u.v6.flow_label & 0xFFFFF,
+ rest);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+
+ goto done;
+
+err:
+ rc = -EPERM;
+done:
+ *extra_wrds = extra;
+ *rest_wrds = rest;
+ return rc;
+}
+
+static u8 *ipa_fltrt_copy_mem(u8 *src, u8 *dst, int cnt)
+{
+ while (cnt--)
+ *dst++ = *src++;
+
+ return dst;
+}
+
+/*
+ * ipa_fltrt_generate_hw_rule_bdy() - generate HW rule body (w/o header)
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ * @buf: output buffer. Advance it after building the rule
+ * @en_rule: enable rule
+ *
+ * Return codes:
+ * 0: success
+ * -EPERM: wrong input
+ */
+static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
+{
+ int sz;
+ int rc = 0;
+ u8 *extra_wrd_buf;
+ u8 *rest_wrd_buf;
+ u8 *extra_wrd_start;
+ u8 *rest_wrd_start;
+ u8 *extra_wrd_i;
+ u8 *rest_wrd_i;
+
+ sz = IPA3_0_HW_TBL_WIDTH * 2 + IPA3_0_HW_RULE_START_ALIGNMENT;
+ extra_wrd_buf = kzalloc(sz, GFP_KERNEL);
+ if (!extra_wrd_buf) {
+ IPAHAL_ERR("failed to allocate %d bytes\n", sz);
+ rc = -ENOMEM;
+ goto fail_extra_alloc;
+ }
+
+ sz = IPA3_0_HW_RULE_BUF_SIZE + IPA3_0_HW_RULE_START_ALIGNMENT;
+ rest_wrd_buf = kzalloc(sz, GFP_KERNEL);
+ if (!rest_wrd_buf) {
+ IPAHAL_ERR("failed to allocate %d bytes\n", sz);
+ rc = -ENOMEM;
+ goto fail_rest_alloc;
+ }
+
+ extra_wrd_start = extra_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT;
+ extra_wrd_start = (u8 *)((long)extra_wrd_start &
+ ~IPA3_0_HW_RULE_START_ALIGNMENT);
+
+ rest_wrd_start = rest_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT;
+ rest_wrd_start = (u8 *)((long)rest_wrd_start &
+ ~IPA3_0_HW_RULE_START_ALIGNMENT);
+
+ extra_wrd_i = extra_wrd_start;
+ rest_wrd_i = rest_wrd_start;
+
+ rc = ipa_fltrt_rule_generation_err_check(ipt, attrib);
+ if (rc) {
+ IPAHAL_ERR("rule generation err check failed\n");
+ goto fail_err_check;
+ }
+
+ if (ipt == IPA_IP_v4) {
+ if (ipa_fltrt_generate_hw_rule_bdy_ip4(en_rule, attrib,
+ &extra_wrd_i, &rest_wrd_i)) {
+ IPAHAL_ERR("failed to build ipv4 hw rule\n");
+ rc = -EPERM;
+ goto fail_err_check;
+ }
+
+ } else if (ipt == IPA_IP_v6) {
+ if (ipa_fltrt_generate_hw_rule_bdy_ip6(en_rule, attrib,
+ &extra_wrd_i, &rest_wrd_i)) {
+ IPAHAL_ERR("failed to build ipv6 hw rule\n");
+ rc = -EPERM;
+ goto fail_err_check;
+ }
+ } else {
+ IPAHAL_ERR("unsupported ip %d\n", ipt);
+ goto fail_err_check;
+ }
+
+ /*
+ * default "rule" means no attributes set -> map to
+ * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+ */
+ if (attrib->attrib_mask == 0) {
+ IPAHAL_DBG("building default rule\n");
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(ipa3_0_ofst_meq32[0]);
+ extra_wrd_i = ipa_write_8(0, extra_wrd_i); /* offset */
+ rest_wrd_i = ipa_write_32(0, rest_wrd_i); /* mask */
+ rest_wrd_i = ipa_write_32(0, rest_wrd_i); /* val */
+ }
+
+ IPAHAL_DBG("extra_word_1 0x%llx\n", *(u64 *)extra_wrd_start);
+ IPAHAL_DBG("extra_word_2 0x%llx\n",
+ *(u64 *)(extra_wrd_start + IPA3_0_HW_TBL_WIDTH));
+
+ extra_wrd_i = ipa_pad_to_64(extra_wrd_i);
+ sz = extra_wrd_i - extra_wrd_start;
+ IPAHAL_DBG("extra words params sz %d\n", sz);
+ *buf = ipa_fltrt_copy_mem(extra_wrd_start, *buf, sz);
+
+ rest_wrd_i = ipa_pad_to_64(rest_wrd_i);
+ sz = rest_wrd_i - rest_wrd_start;
+ IPAHAL_DBG("non extra words params sz %d\n", sz);
+ *buf = ipa_fltrt_copy_mem(rest_wrd_start, *buf, sz);
+
+fail_err_check:
+ kfree(rest_wrd_buf);
+fail_rest_alloc:
+ kfree(extra_wrd_buf);
+fail_extra_alloc:
+ return rc;
+}
+
+
+/**
+ * ipa_fltrt_calc_extra_wrd_bytes()- Calculate the number of extra words for eq
+ * @attrib: equation attribute
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+static int ipa_fltrt_calc_extra_wrd_bytes(
+ const struct ipa_ipfltri_rule_eq *attrib)
+{
+ int num = 0;
+
+ if (attrib->tos_eq_present)
+ num++;
+ if (attrib->protocol_eq_present)
+ num++;
+ if (attrib->tc_eq_present)
+ num++;
+ num += attrib->num_offset_meq_128;
+ num += attrib->num_offset_meq_32;
+ num += attrib->num_ihl_offset_meq_32;
+ num += attrib->num_ihl_offset_range_16;
+ if (attrib->ihl_offset_eq_32_present)
+ num++;
+ if (attrib->ihl_offset_eq_16_present)
+ num++;
+
+ IPAHAL_DBG("extra bytes number %d\n", num);
+
+ return num;
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_from_eq(
+ const struct ipa_ipfltri_rule_eq *attrib, u8 **buf)
+{
+ int num_offset_meq_32 = attrib->num_offset_meq_32;
+ int num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16;
+ int num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32;
+ int num_offset_meq_128 = attrib->num_offset_meq_128;
+ int i;
+ int extra_bytes;
+ u8 *extra;
+ u8 *rest;
+
+ extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(attrib);
+ /* only 3 eq does not have extra word param, 13 out of 16 is the number
+ * of equations that needs extra word param
+ */
+ if (extra_bytes > 13) {
+ IPAHAL_ERR("too much extra bytes\n");
+ return -EPERM;
+ } else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) {
+ /* two extra words */
+ extra = *buf;
+ rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH * 2;
+ } else if (extra_bytes > 0) {
+ /* single exra word */
+ extra = *buf;
+ rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH;
+ } else {
+ /* no extra words */
+ extra = NULL;
+ rest = *buf;
+ }
+
+ if (attrib->tos_eq_present)
+ extra = ipa_write_8(attrib->tos_eq, extra);
+
+ if (attrib->protocol_eq_present)
+ extra = ipa_write_8(attrib->protocol_eq, extra);
+
+ if (attrib->tc_eq_present)
+ extra = ipa_write_8(attrib->tc_eq, extra);
+
+ if (num_offset_meq_128) {
+ extra = ipa_write_8(attrib->offset_meq_128[0].offset, extra);
+ for (i = 0; i < 8; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[0].mask[i],
+ rest);
+ for (i = 0; i < 8; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[0].value[i],
+ rest);
+ for (i = 8; i < 16; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[0].mask[i],
+ rest);
+ for (i = 8; i < 16; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[0].value[i],
+ rest);
+ num_offset_meq_128--;
+ }
+
+ if (num_offset_meq_128) {
+ extra = ipa_write_8(attrib->offset_meq_128[1].offset, extra);
+ for (i = 0; i < 8; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[1].mask[i],
+ rest);
+ for (i = 0; i < 8; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[1].value[i],
+ rest);
+ for (i = 8; i < 16; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[1].mask[i],
+ rest);
+ for (i = 8; i < 16; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[1].value[i],
+ rest);
+ num_offset_meq_128--;
+ }
+
+ if (num_offset_meq_32) {
+ extra = ipa_write_8(attrib->offset_meq_32[0].offset, extra);
+ rest = ipa_write_32(attrib->offset_meq_32[0].mask, rest);
+ rest = ipa_write_32(attrib->offset_meq_32[0].value, rest);
+ num_offset_meq_32--;
+ }
+
+ if (num_offset_meq_32) {
+ extra = ipa_write_8(attrib->offset_meq_32[1].offset, extra);
+ rest = ipa_write_32(attrib->offset_meq_32[1].mask, rest);
+ rest = ipa_write_32(attrib->offset_meq_32[1].value, rest);
+ num_offset_meq_32--;
+ }
+
+ if (num_ihl_offset_meq_32) {
+ extra = ipa_write_8(attrib->ihl_offset_meq_32[0].offset,
+ extra);
+
+ rest = ipa_write_32(attrib->ihl_offset_meq_32[0].mask, rest);
+ rest = ipa_write_32(attrib->ihl_offset_meq_32[0].value, rest);
+ num_ihl_offset_meq_32--;
+ }
+
+ if (num_ihl_offset_meq_32) {
+ extra = ipa_write_8(attrib->ihl_offset_meq_32[1].offset,
+ extra);
+
+ rest = ipa_write_32(attrib->ihl_offset_meq_32[1].mask, rest);
+ rest = ipa_write_32(attrib->ihl_offset_meq_32[1].value, rest);
+ num_ihl_offset_meq_32--;
+ }
+
+ if (attrib->metadata_meq32_present) {
+ rest = ipa_write_32(attrib->metadata_meq32.mask, rest);
+ rest = ipa_write_32(attrib->metadata_meq32.value, rest);
+ }
+
+ if (num_ihl_offset_range_16) {
+ extra = ipa_write_8(attrib->ihl_offset_range_16[0].offset,
+ extra);
+
+ rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_high,
+ rest);
+ rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_low,
+ rest);
+ num_ihl_offset_range_16--;
+ }
+
+ if (num_ihl_offset_range_16) {
+ extra = ipa_write_8(attrib->ihl_offset_range_16[1].offset,
+ extra);
+
+ rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_high,
+ rest);
+ rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_low,
+ rest);
+ num_ihl_offset_range_16--;
+ }
+
+ if (attrib->ihl_offset_eq_32_present) {
+ extra = ipa_write_8(attrib->ihl_offset_eq_32.offset, extra);
+ rest = ipa_write_32(attrib->ihl_offset_eq_32.value, rest);
+ }
+
+ if (attrib->ihl_offset_eq_16_present) {
+ extra = ipa_write_8(attrib->ihl_offset_eq_16.offset, extra);
+ rest = ipa_write_16(attrib->ihl_offset_eq_16.value, rest);
+ rest = ipa_write_16(0, rest);
+ }
+
+ if (attrib->fl_eq_present)
+ rest = ipa_write_32(attrib->fl_eq & 0xFFFFF, rest);
+
+ extra = ipa_pad_to_64(extra);
+ rest = ipa_pad_to_64(rest);
+ *buf = rest;
+
+ return 0;
+}
+
+static void ipa_flt_generate_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb,
+ u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN],
+ const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128)
+{
+ int i;
+
+ eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset;
+
+ /* LSB MASK and ADDR */
+ memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 8);
+ memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 8);
+
+ /* MSB MASK and ADDR */
+ memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 2);
+ for (i = 0; i <= 5; i++)
+ eq_atrb->offset_meq_128[ofst_meq128].mask[15 - i] =
+ mac_addr_mask[i];
+
+ memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 2);
+ for (i = 0; i <= 5; i++)
+ eq_atrb->offset_meq_128[ofst_meq128].value[15 - i] =
+ mac_addr[i];
+}
+
+static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+ u16 eq_bitmap = 0;
+ u16 *en_rule = &eq_bitmap;
+
+ if (attrib->attrib_mask & IPA_FLT_TOS) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ);
+ eq_atrb->tos_eq_present = 1;
+ eq_atrb->tos_eq = attrib->u.v4.tos;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+ eq_atrb->protocol_eq_present = 1;
+ eq_atrb->protocol_eq = attrib->u.v4.protocol;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -14,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -8,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -22,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -16,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ attrib->tos_mask << 16;
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ attrib->tos_value << 16;
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ eq_atrb->offset_meq_32[ofst_meq32].offset = 12;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ attrib->u.v4.src_addr_mask;
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ attrib->u.v4.src_addr;
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ eq_atrb->offset_meq_32[ofst_meq32].offset = 16;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ attrib->u.v4.dst_addr_mask;
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ attrib->u.v4.dst_addr;
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ htons(attrib->ether_type);
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ htons(attrib->ether_type);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->type;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->code;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ 0xFFFFFFFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->spi;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_METADATA_COMPARE);
+ eq_atrb->metadata_meq32_present = 1;
+ eq_atrb->metadata_meq32.offset = 0;
+ eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+ eq_atrb->metadata_meq32.value = attrib->meta_data;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAHAL_ERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAHAL_ERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+ eq_atrb->ipv4_frag_eq_present = 1;
+ }
+
+ eq_atrb->rule_eq_bitmap = *en_rule;
+ eq_atrb->num_offset_meq_32 = ofst_meq32;
+ eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
+ eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
+ eq_atrb->num_offset_meq_128 = ofst_meq128;
+
+ return 0;
+}
+
+static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+ u16 eq_bitmap = 0;
+ u16 *en_rule = &eq_bitmap;
+
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_PROTOCOL_EQ);
+ eq_atrb->protocol_eq_present = 1;
+ eq_atrb->protocol_eq = attrib->u.v6.next_hdr;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TC) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_TC_EQ);
+ eq_atrb->tc_eq_present = 1;
+ eq_atrb->tc_eq = attrib->u.v6.tc;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ /* use the same word order as in ipa v2 */
+ eq_atrb->offset_meq_128[ofst_meq128].offset = 8;
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+ = attrib->u.v6.src_addr_mask[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+ = attrib->u.v6.src_addr_mask[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+ = attrib->u.v6.src_addr_mask[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+ = attrib->u.v6.src_addr_mask[3];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+ = attrib->u.v6.src_addr[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+ = attrib->u.v6.src_addr[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+ = attrib->u.v6.src_addr[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+ 12) = attrib->u.v6.src_addr[3];
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ eq_atrb->offset_meq_128[ofst_meq128].offset = 24;
+ /* use the same word order as in ipa v2 */
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+ = attrib->u.v6.dst_addr_mask[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+ = attrib->u.v6.dst_addr_mask[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+ = attrib->u.v6.dst_addr_mask[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+ = attrib->u.v6.dst_addr_mask[3];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+ = attrib->u.v6.dst_addr[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+ = attrib->u.v6.dst_addr[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+ = attrib->u.v6.dst_addr[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+ 12) = attrib->u.v6.dst_addr[3];
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ eq_atrb->offset_meq_128[ofst_meq128].offset = 0;
+ memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 12);
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+ = attrib->tos_mask << 20;
+ memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 12);
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+ 12) = attrib->tos_value << 20;
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -14,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -8,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -22,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -16,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ htons(attrib->ether_type);
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ htons(attrib->ether_type);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->type;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->code;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ 0xFFFFFFFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->spi;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_METADATA_COMPARE);
+ eq_atrb->metadata_meq32_present = 1;
+ eq_atrb->metadata_meq32.offset = 0;
+ eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+ eq_atrb->metadata_meq32.value = attrib->meta_data;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAHAL_ERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAHAL_ERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ);
+ eq_atrb->fl_eq_present = 1;
+ eq_atrb->fl_eq = attrib->u.v6.flow_label;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_IS_FRAG);
+ eq_atrb->ipv4_frag_eq_present = 1;
+ }
+
+ eq_atrb->rule_eq_bitmap = *en_rule;
+ eq_atrb->num_offset_meq_32 = ofst_meq32;
+ eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
+ eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
+ eq_atrb->num_offset_meq_128 = ofst_meq128;
+
+ return 0;
+}
+
+static int ipa_fltrt_parse_hw_rule_eq(u8 *addr, u32 hdr_sz,
+ struct ipa_ipfltri_rule_eq *atrb, u32 *rule_size)
+{
+ u16 eq_bitmap;
+ int extra_bytes;
+ u8 *extra;
+ u8 *rest;
+ int i;
+ u8 dummy_extra_wrd;
+
+ if (!addr || !atrb || !rule_size) {
+ IPAHAL_ERR("Input error: addr=%p atrb=%p rule_size=%p\n",
+ addr, atrb, rule_size);
+ return -EINVAL;
+ }
+
+ eq_bitmap = atrb->rule_eq_bitmap;
+
+ IPAHAL_DBG("eq_bitmap=0x%x\n", eq_bitmap);
+
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ))
+ atrb->tos_eq_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ))
+ atrb->protocol_eq_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ))
+ atrb->tc_eq_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_0))
+ atrb->num_offset_meq_128++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_1))
+ atrb->num_offset_meq_128++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_0))
+ atrb->num_offset_meq_32++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_1))
+ atrb->num_offset_meq_32++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_0))
+ atrb->num_ihl_offset_meq_32++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_1))
+ atrb->num_ihl_offset_meq_32++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE))
+ atrb->metadata_meq32_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_0))
+ atrb->num_ihl_offset_range_16++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_1))
+ atrb->num_ihl_offset_range_16++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_32))
+ atrb->ihl_offset_eq_32_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_16))
+ atrb->ihl_offset_eq_16_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ))
+ atrb->fl_eq_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG))
+ atrb->ipv4_frag_eq_present = true;
+
+ extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(atrb);
+ /* only 3 eq does not have extra word param, 13 out of 16 is the number
+ * of equations that needs extra word param
+ */
+ if (extra_bytes > 13) {
+ IPAHAL_ERR("too much extra bytes\n");
+ return -EPERM;
+ } else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) {
+ /* two extra words */
+ extra = addr + hdr_sz;
+ rest = extra + IPA3_0_HW_TBL_HDR_WIDTH * 2;
+ } else if (extra_bytes > 0) {
+ /* single extra word */
+ extra = addr + hdr_sz;
+ rest = extra + IPA3_0_HW_TBL_HDR_WIDTH;
+ } else {
+ /* no extra words */
+ dummy_extra_wrd = 0;
+ extra = &dummy_extra_wrd;
+ rest = addr + hdr_sz;
+ }
+ IPAHAL_DBG("addr=0x%p extra=0x%p rest=0x%p\n", addr, extra, rest);
+
+ if (atrb->tos_eq_present)
+ atrb->tos_eq = *extra++;
+ if (atrb->protocol_eq_present)
+ atrb->protocol_eq = *extra++;
+ if (atrb->tc_eq_present)
+ atrb->tc_eq = *extra++;
+
+ if (atrb->num_offset_meq_128 > 0) {
+ atrb->offset_meq_128[0].offset = *extra++;
+ for (i = 0; i < 8; i++)
+ atrb->offset_meq_128[0].mask[i] = *rest++;
+ for (i = 0; i < 8; i++)
+ atrb->offset_meq_128[0].value[i] = *rest++;
+ for (i = 8; i < 16; i++)
+ atrb->offset_meq_128[0].mask[i] = *rest++;
+ for (i = 8; i < 16; i++)
+ atrb->offset_meq_128[0].value[i] = *rest++;
+ }
+ if (atrb->num_offset_meq_128 > 1) {
+ atrb->offset_meq_128[1].offset = *extra++;
+ for (i = 0; i < 8; i++)
+ atrb->offset_meq_128[1].mask[i] = *rest++;
+ for (i = 0; i < 8; i++)
+ atrb->offset_meq_128[1].value[i] = *rest++;
+ for (i = 8; i < 16; i++)
+ atrb->offset_meq_128[1].mask[i] = *rest++;
+ for (i = 8; i < 16; i++)
+ atrb->offset_meq_128[1].value[i] = *rest++;
+ }
+
+ if (atrb->num_offset_meq_32 > 0) {
+ atrb->offset_meq_32[0].offset = *extra++;
+ atrb->offset_meq_32[0].mask = *((u32 *)rest);
+ rest += 4;
+ atrb->offset_meq_32[0].value = *((u32 *)rest);
+ rest += 4;
+ }
+ if (atrb->num_offset_meq_32 > 1) {
+ atrb->offset_meq_32[1].offset = *extra++;
+ atrb->offset_meq_32[1].mask = *((u32 *)rest);
+ rest += 4;
+ atrb->offset_meq_32[1].value = *((u32 *)rest);
+ rest += 4;
+ }
+
+ if (atrb->num_ihl_offset_meq_32 > 0) {
+ atrb->ihl_offset_meq_32[0].offset = *extra++;
+ atrb->ihl_offset_meq_32[0].mask = *((u32 *)rest);
+ rest += 4;
+ atrb->ihl_offset_meq_32[0].value = *((u32 *)rest);
+ rest += 4;
+ }
+ if (atrb->num_ihl_offset_meq_32 > 1) {
+ atrb->ihl_offset_meq_32[1].offset = *extra++;
+ atrb->ihl_offset_meq_32[1].mask = *((u32 *)rest);
+ rest += 4;
+ atrb->ihl_offset_meq_32[1].value = *((u32 *)rest);
+ rest += 4;
+ }
+
+ if (atrb->metadata_meq32_present) {
+ atrb->metadata_meq32.mask = *((u32 *)rest);
+ rest += 4;
+ atrb->metadata_meq32.value = *((u32 *)rest);
+ rest += 4;
+ }
+
+ if (atrb->num_ihl_offset_range_16 > 0) {
+ atrb->ihl_offset_range_16[0].offset = *extra++;
+ atrb->ihl_offset_range_16[0].range_high = *((u16 *)rest);
+ rest += 2;
+ atrb->ihl_offset_range_16[0].range_low = *((u16 *)rest);
+ rest += 2;
+ }
+ if (atrb->num_ihl_offset_range_16 > 1) {
+ atrb->ihl_offset_range_16[1].offset = *extra++;
+ atrb->ihl_offset_range_16[1].range_high = *((u16 *)rest);
+ rest += 2;
+ atrb->ihl_offset_range_16[1].range_low = *((u16 *)rest);
+ rest += 2;
+ }
+
+ if (atrb->ihl_offset_eq_32_present) {
+ atrb->ihl_offset_eq_32.offset = *extra++;
+ atrb->ihl_offset_eq_32.value = *((u32 *)rest);
+ rest += 4;
+ }
+
+ if (atrb->ihl_offset_eq_16_present) {
+ atrb->ihl_offset_eq_16.offset = *extra++;
+ atrb->ihl_offset_eq_16.value = *((u16 *)rest);
+ rest += 4;
+ }
+
+ if (atrb->fl_eq_present) {
+ atrb->fl_eq = *((u32 *)rest);
+ atrb->fl_eq &= 0xfffff;
+ rest += 4;
+ }
+
+ IPAHAL_DBG("before rule alignment rest=0x%p\n", rest);
+ rest = (u8 *)(((unsigned long)rest + IPA3_0_HW_RULE_START_ALIGNMENT) &
+ ~IPA3_0_HW_RULE_START_ALIGNMENT);
+ IPAHAL_DBG("after rule alignment rest=0x%p\n", rest);
+
+ *rule_size = rest - addr;
+ IPAHAL_DBG("rule_size=0x%x\n", *rule_size);
+
+ return 0;
+}
+
+static int ipa_rt_parse_hw_rule(u8 *addr, struct ipahal_rt_rule_entry *rule)
+{
+ struct ipa3_0_rt_rule_hw_hdr *rule_hdr;
+ struct ipa_ipfltri_rule_eq *atrb;
+
+ IPAHAL_DBG("Entry\n");
+
+ rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)addr;
+ atrb = &rule->eq_attrib;
+
+ IPAHAL_DBG("read hdr 0x%llx\n", rule_hdr->u.word);
+
+ if (rule_hdr->u.word == 0) {
+ /* table termintator - empty table */
+ rule->rule_size = 0;
+ return 0;
+ }
+
+ rule->dst_pipe_idx = rule_hdr->u.hdr.pipe_dest_idx;
+ if (rule_hdr->u.hdr.proc_ctx) {
+ rule->hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX;
+ rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 5;
+ } else {
+ rule->hdr_type = IPAHAL_RT_RULE_HDR_RAW;
+ rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 2;
+ }
+ rule->hdr_lcl = !rule_hdr->u.hdr.system;
+
+ rule->priority = rule_hdr->u.hdr.priority;
+ rule->retain_hdr = rule_hdr->u.hdr.retain_hdr;
+ rule->id = rule_hdr->u.hdr.rule_id;
+
+ atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+ return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+ atrb, &rule->rule_size);
+}
+
+static int ipa_flt_parse_hw_rule(u8 *addr, struct ipahal_flt_rule_entry *rule)
+{
+ struct ipa3_0_flt_rule_hw_hdr *rule_hdr;
+ struct ipa_ipfltri_rule_eq *atrb;
+
+ IPAHAL_DBG("Entry\n");
+
+ rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)addr;
+ atrb = &rule->rule.eq_attrib;
+
+ if (rule_hdr->u.word == 0) {
+ /* table termintator - empty table */
+ rule->rule_size = 0;
+ return 0;
+ }
+
+ switch (rule_hdr->u.hdr.action) {
+ case 0x0:
+ rule->rule.action = IPA_PASS_TO_ROUTING;
+ break;
+ case 0x1:
+ rule->rule.action = IPA_PASS_TO_SRC_NAT;
+ break;
+ case 0x2:
+ rule->rule.action = IPA_PASS_TO_DST_NAT;
+ break;
+ case 0x3:
+ rule->rule.action = IPA_PASS_TO_EXCEPTION;
+ break;
+ default:
+ IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action);
+ WARN_ON(1);
+ rule->rule.action = rule_hdr->u.hdr.action;
+ }
+
+ rule->rule.rt_tbl_idx = rule_hdr->u.hdr.rt_tbl_idx;
+ rule->rule.retain_hdr = rule_hdr->u.hdr.retain_hdr;
+ rule->priority = rule_hdr->u.hdr.priority;
+ rule->id = rule_hdr->u.hdr.rule_id;
+
+ atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+ rule->rule.eq_attrib_type = 1;
+ return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+ atrb, &rule->rule_size);
+}
+
+/*
+ * ipahal_fltrt_init() - Build the FLT/RT information table
+ * See ipahal_fltrt_objs[] comments
+ *
+ * Note: As global variables are initialized with zero, any un-overridden
+ * register entry will be zero. By this we recognize them.
+ */
+int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type)
+{
+ struct ipahal_fltrt_obj zero_obj;
+ int i;
+ struct ipa_mem_buffer *mem;
+ int rc = -EFAULT;
+
+ IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ if (ipa_hw_type >= IPA_HW_MAX) {
+ IPAHAL_ERR("Invalid H/W type\n");
+ return -EFAULT;
+ }
+
+ memset(&zero_obj, 0, sizeof(zero_obj));
+ for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+ if (!memcmp(&ipahal_fltrt_objs[i+1], &zero_obj,
+ sizeof(struct ipahal_fltrt_obj))) {
+ memcpy(&ipahal_fltrt_objs[i+1],
+ &ipahal_fltrt_objs[i],
+ sizeof(struct ipahal_fltrt_obj));
+ } else {
+ /*
+ * explicitly overridden FLT RT info
+ * Check validity
+ */
+ if (!ipahal_fltrt_objs[i+1].tbl_width) {
+ IPAHAL_ERR(
+ "Zero tbl width ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].sysaddr_alignment) {
+ IPAHAL_ERR(
+ "No tbl sysaddr alignment ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].lcladdr_alignment) {
+ IPAHAL_ERR(
+ "No tbl lcladdr alignment ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].blk_sz_alignment) {
+ IPAHAL_ERR(
+ "No blk sz alignment ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].rule_start_alignment) {
+ IPAHAL_ERR(
+ "No rule start alignment ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].tbl_hdr_width) {
+ IPAHAL_ERR(
+ "Zero tbl hdr width ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].tbl_addr_mask) {
+ IPAHAL_ERR(
+ "Zero tbl hdr width ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (ipahal_fltrt_objs[i+1].rule_id_bit_len < 2) {
+ IPAHAL_ERR(
+ "Too little bits for rule_id ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].rule_buf_size) {
+ IPAHAL_ERR(
+ "zero rule buf size ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].write_val_to_hdr) {
+ IPAHAL_ERR(
+ "No write_val_to_hdr CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].create_flt_bitmap) {
+ IPAHAL_ERR(
+ "No create_flt_bitmap CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].create_tbl_addr) {
+ IPAHAL_ERR(
+ "No create_tbl_addr CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].parse_tbl_addr) {
+ IPAHAL_ERR(
+ "No parse_tbl_addr CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].rt_generate_hw_rule) {
+ IPAHAL_ERR(
+ "No rt_generate_hw_rule CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].flt_generate_hw_rule) {
+ IPAHAL_ERR(
+ "No flt_generate_hw_rule CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].flt_generate_eq) {
+ IPAHAL_ERR(
+ "No flt_generate_eq CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].rt_parse_hw_rule) {
+ IPAHAL_ERR(
+ "No rt_parse_hw_rule CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].flt_parse_hw_rule) {
+ IPAHAL_ERR(
+ "No flt_parse_hw_rule CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ }
+ }
+
+ mem = &ipahal_ctx->empty_fltrt_tbl;
+
+ /* setup an empty table in system memory; This will
+ * be used, for example, to delete a rt tbl safely
+ */
+ mem->size = ipahal_fltrt_objs[ipa_hw_type].tbl_width;
+ mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAHAL_ERR("DMA buff alloc fail %d bytes for empty tbl\n",
+ mem->size);
+ return -ENOMEM;
+ }
+
+ if (mem->phys_base &
+ ipahal_fltrt_objs[ipa_hw_type].sysaddr_alignment) {
+ IPAHAL_ERR("Empty table buf is not address aligned 0x%pad\n",
+ &mem->phys_base);
+ rc = -EFAULT;
+ goto clear_empty_tbl;
+ }
+
+ memset(mem->base, 0, mem->size);
+ IPAHAL_DBG("empty table allocated in system memory");
+
+ return 0;
+
+clear_empty_tbl:
+ dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base,
+ mem->phys_base);
+ return rc;
+}
+
+void ipahal_fltrt_destroy(void)
+{
+ IPAHAL_DBG("Entry\n");
+
+ if (ipahal_ctx && ipahal_ctx->empty_fltrt_tbl.base)
+ dma_free_coherent(ipahal_ctx->ipa_pdev,
+ ipahal_ctx->empty_fltrt_tbl.size,
+ ipahal_ctx->empty_fltrt_tbl.base,
+ ipahal_ctx->empty_fltrt_tbl.phys_base);
+}
+
+/* Get the H/W table (flt/rt) header width */
+u32 ipahal_get_hw_tbl_hdr_width(void)
+{
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].tbl_hdr_width;
+}
+
+/* Get the H/W local table (SRAM) address alignment
+ * Tables headers references to local tables via offsets in SRAM
+ * This function return the alignment of the offset that IPA expects
+ */
+u32 ipahal_get_lcl_tbl_addr_alignment(void)
+{
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].lcladdr_alignment;
+}
+
+/*
+ * Rule priority is used to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable tables. Max priority are rules that once are
+ * scanned by IPA, IPA will not look for further rules and use it.
+ */
+int ipahal_get_rule_max_priority(void)
+{
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_max_prio;
+}
+
+/* Given a priority, calc and return the next lower one if it is in
+ * legal range.
+ */
+int ipahal_rule_decrease_priority(int *prio)
+{
+ struct ipahal_fltrt_obj *obj;
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!prio) {
+ IPAHAL_ERR("Invalid Input\n");
+ return -EINVAL;
+ }
+
+ /* Priority logic is reverse. 0 priority considred max priority */
+ if (*prio > obj->rule_min_prio || *prio < obj->rule_max_prio) {
+ IPAHAL_ERR("Invalid given priority %d\n", *prio);
+ return -EINVAL;
+ }
+
+ *prio += 1;
+
+ if (*prio > obj->rule_min_prio) {
+ IPAHAL_ERR("Cannot decrease priority. Already on min\n");
+ *prio -= 1;
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* Does the given ID represents rule miss?
+ * Rule miss ID, is always the max ID possible in the bit-pattern
+ */
+bool ipahal_is_rule_miss_id(u32 id)
+{
+ return (id ==
+ ((1U << ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len)
+ -1));
+}
+
+/* Get rule ID with high bit only asserted
+ * Used e.g. to create groups of IDs according to this bit
+ */
+u32 ipahal_get_rule_id_hi_bit(void)
+{
+ return BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len - 1);
+}
+
+/* Get the low value possible to be used for rule-id */
+u32 ipahal_get_low_rule_id(void)
+{
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].low_rule_id;
+}
+
+/*
+ * ipahal_rt_generate_empty_img() - Generate empty route image
+ * Creates routing header buffer for the given tables number.
+ * For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+ u32 nhash_hdr_size, struct ipa_mem_buffer *mem)
+{
+ int i;
+ u64 addr;
+ struct ipahal_fltrt_obj *obj;
+
+ IPAHAL_DBG("Entry\n");
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!tbls_num || !nhash_hdr_size || !mem) {
+ IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%p\n",
+ tbls_num, nhash_hdr_size, mem);
+ return -EINVAL;
+ }
+ if (obj->support_hash && !hash_hdr_size) {
+ IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size);
+ return -EINVAL;
+ }
+
+ if (nhash_hdr_size < (tbls_num * obj->tbl_hdr_width)) {
+ IPAHAL_ERR("No enough spc at non-hash hdr blk for all tbls\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ if (obj->support_hash &&
+ (hash_hdr_size < (tbls_num * obj->tbl_hdr_width))) {
+ IPAHAL_ERR("No enough spc at hash hdr blk for all tbls\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ mem->size = tbls_num * obj->tbl_hdr_width;
+ mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
+ return -ENOMEM;
+ }
+
+ addr = obj->create_tbl_addr(true,
+ ipahal_ctx->empty_fltrt_tbl.phys_base);
+ for (i = 0; i < tbls_num; i++)
+ obj->write_val_to_hdr(addr,
+ mem->base + i * obj->tbl_hdr_width);
+
+ return 0;
+}
+
+/*
+ * ipahal_flt_generate_empty_img() - Generate empty filter image
+ * Creates filter header buffer for the given tables number.
+ * For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @ep_bitmap: Bitmap representing the EP that has flt tables. The format
+ * should be: bit0->EP0, bit1->EP1
+ * If bitmap is zero -> create tbl without bitmap entry
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+ u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem)
+{
+ int flt_spc;
+ u64 flt_bitmap;
+ int i;
+ u64 addr;
+ struct ipahal_fltrt_obj *obj;
+
+ IPAHAL_DBG("Entry - ep_bitmap 0x%llx\n", ep_bitmap);
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!tbls_num || !nhash_hdr_size || !mem) {
+ IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%p\n",
+ tbls_num, nhash_hdr_size, mem);
+ return -EINVAL;
+ }
+ if (obj->support_hash && !hash_hdr_size) {
+ IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size);
+ return -EINVAL;
+ }
+
+ if (obj->support_hash) {
+ flt_spc = hash_hdr_size;
+ /* bitmap word */
+ if (ep_bitmap)
+ flt_spc -= obj->tbl_hdr_width;
+ flt_spc /= obj->tbl_hdr_width;
+ if (tbls_num > flt_spc) {
+ IPAHAL_ERR("space for hash flt hdr is too small\n");
+ WARN_ON(1);
+ return -EPERM;
+ }
+ }
+
+ flt_spc = nhash_hdr_size;
+ /* bitmap word */
+ if (ep_bitmap)
+ flt_spc -= obj->tbl_hdr_width;
+ flt_spc /= obj->tbl_hdr_width;
+ if (tbls_num > flt_spc) {
+ IPAHAL_ERR("space for non-hash flt hdr is too small\n");
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mem->size = tbls_num * obj->tbl_hdr_width;
+ if (ep_bitmap)
+ mem->size += obj->tbl_hdr_width;
+ mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
+ return -ENOMEM;
+ }
+
+ if (ep_bitmap) {
+ flt_bitmap = obj->create_flt_bitmap(ep_bitmap);
+ IPAHAL_DBG("flt bitmap 0x%llx\n", flt_bitmap);
+ obj->write_val_to_hdr(flt_bitmap, mem->base);
+ }
+
+ addr = obj->create_tbl_addr(true,
+ ipahal_ctx->empty_fltrt_tbl.phys_base);
+
+ if (ep_bitmap) {
+ for (i = 1; i <= tbls_num; i++)
+ obj->write_val_to_hdr(addr,
+ mem->base + i * obj->tbl_hdr_width);
+ } else {
+ for (i = 0; i < tbls_num; i++)
+ obj->write_val_to_hdr(addr,
+ mem->base + i * obj->tbl_hdr_width);
+ }
+
+ return 0;
+}
+
+/*
+ * ipa_fltrt_alloc_init_tbl_hdr() - allocate and initialize buffers for
+ * flt/rt tables headers to be filled into sram. Init each table to point
+ * to empty system table
+ * @params: Allocate IN and OUT params
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_fltrt_alloc_init_tbl_hdr(
+ struct ipahal_fltrt_alloc_imgs_params *params)
+{
+ u64 addr;
+ int i;
+ struct ipahal_fltrt_obj *obj;
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!params) {
+ IPAHAL_ERR("Input error: params=%p\n", params);
+ return -EINVAL;
+ }
+
+ params->nhash_hdr.size = params->tbls_num * obj->tbl_hdr_width;
+ params->nhash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev,
+ params->nhash_hdr.size,
+ ¶ms->nhash_hdr.phys_base, GFP_KERNEL);
+ if (!params->nhash_hdr.size) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ params->nhash_hdr.size);
+ goto nhash_alloc_fail;
+ }
+
+ if (obj->support_hash) {
+ params->hash_hdr.size = params->tbls_num * obj->tbl_hdr_width;
+ params->hash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev,
+ params->hash_hdr.size, ¶ms->hash_hdr.phys_base,
+ GFP_KERNEL);
+ if (!params->hash_hdr.base) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ params->hash_hdr.size);
+ goto hash_alloc_fail;
+ }
+ }
+
+ addr = obj->create_tbl_addr(true,
+ ipahal_ctx->empty_fltrt_tbl.phys_base);
+ for (i = 0; i < params->tbls_num; i++) {
+ obj->write_val_to_hdr(addr,
+ params->nhash_hdr.base + i * obj->tbl_hdr_width);
+ if (obj->support_hash)
+ obj->write_val_to_hdr(addr,
+ params->hash_hdr.base +
+ i * obj->tbl_hdr_width);
+ }
+
+ return 0;
+
+hash_alloc_fail:
+ ipahal_free_dma_mem(¶ms->nhash_hdr);
+nhash_alloc_fail:
+ return -ENOMEM;
+}
+
+/*
+ * ipa_fltrt_alloc_lcl_bdy() - allocate and initialize buffers for
+ * local flt/rt tables bodies to be filled into sram
+ * @params: Allocate IN and OUT params
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_fltrt_alloc_lcl_bdy(
+ struct ipahal_fltrt_alloc_imgs_params *params)
+{
+ struct ipahal_fltrt_obj *obj;
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ /* The HAL allocates larger sizes than the given effective ones
+ * for alignments and border indications
+ */
+ IPAHAL_DBG("lcl tbl bdy total effective sizes: hash=%u nhash=%u\n",
+ params->total_sz_lcl_hash_tbls,
+ params->total_sz_lcl_nhash_tbls);
+
+ IPAHAL_DBG("lcl tbl bdy count: hash=%u nhash=%u\n",
+ params->num_lcl_hash_tbls,
+ params->num_lcl_nhash_tbls);
+
+ /* Align the sizes to coop with termination word
+ * and H/W local table start offset alignment
+ */
+ if (params->nhash_bdy.size) {
+ params->nhash_bdy.size = params->total_sz_lcl_nhash_tbls;
+ /* for table terminator */
+ params->nhash_bdy.size += obj->tbl_width *
+ params->num_lcl_nhash_tbls;
+ /* align the start of local rule-set */
+ params->nhash_bdy.size += obj->lcladdr_alignment *
+ params->num_lcl_nhash_tbls;
+ /* SRAM block size alignment */
+ params->nhash_bdy.size += obj->blk_sz_alignment;
+ params->nhash_bdy.size &= ~(obj->blk_sz_alignment);
+
+ IPAHAL_DBG("nhash lcl tbl bdy total h/w size = %u\n",
+ params->nhash_bdy.size);
+
+ params->nhash_bdy.base = dma_alloc_coherent(
+ ipahal_ctx->ipa_pdev, params->nhash_bdy.size,
+ ¶ms->nhash_bdy.phys_base, GFP_KERNEL);
+ if (!params->nhash_bdy.base) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ params->nhash_bdy.size);
+ return -ENOMEM;
+ }
+ memset(params->nhash_bdy.base, 0, params->nhash_bdy.size);
+ }
+
+ if (!obj->support_hash && params->hash_bdy.size) {
+ IPAHAL_ERR("No HAL Hash tbls support - Will be ignored\n");
+ WARN_ON(1);
+ }
+
+ if (obj->support_hash && params->hash_bdy.size) {
+ params->hash_bdy.size = params->total_sz_lcl_hash_tbls;
+ /* for table terminator */
+ params->hash_bdy.size += obj->tbl_width *
+ params->num_lcl_hash_tbls;
+ /* align the start of local rule-set */
+ params->hash_bdy.size += obj->lcladdr_alignment *
+ params->num_lcl_hash_tbls;
+ /* SRAM block size alignment */
+ params->hash_bdy.size += obj->blk_sz_alignment;
+ params->hash_bdy.size &= ~(obj->blk_sz_alignment);
+
+ IPAHAL_DBG("hash lcl tbl bdy total h/w size = %u\n",
+ params->hash_bdy.size);
+
+ params->hash_bdy.base = dma_alloc_coherent(
+ ipahal_ctx->ipa_pdev, params->hash_bdy.size,
+ ¶ms->hash_bdy.phys_base, GFP_KERNEL);
+ if (!params->hash_bdy.base) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ params->hash_bdy.size);
+ goto hash_bdy_fail;
+ }
+ memset(params->hash_bdy.base, 0, params->hash_bdy.size);
+ }
+
+ return 0;
+
+hash_bdy_fail:
+ if (params->nhash_bdy.size)
+ ipahal_free_dma_mem(¶ms->nhash_bdy);
+
+ return -ENOMEM;
+}
+
+/*
+ * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures
+ * Used usually during commit.
+ * Allocates header structures and init them to point to empty DDR table
+ * Allocate body strucutres for local bodies tables
+ * @params: Parameters for IN and OUT regard the allocation.
+ */
+int ipahal_fltrt_allocate_hw_tbl_imgs(
+ struct ipahal_fltrt_alloc_imgs_params *params)
+{
+ IPAHAL_DBG("Entry\n");
+
+ /* Input validation */
+ if (!params) {
+ IPAHAL_ERR("Input err: no params\n");
+ return -EINVAL;
+ }
+ if (params->ipt >= IPA_IP_MAX) {
+ IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+ return -EINVAL;
+ }
+
+ if (ipa_fltrt_alloc_init_tbl_hdr(params)) {
+ IPAHAL_ERR("fail to alloc and init tbl hdr\n");
+ return -ENOMEM;
+ }
+
+ if (ipa_fltrt_alloc_lcl_bdy(params)) {
+ IPAHAL_ERR("fail to alloc tbl bodies\n");
+ goto bdy_alloc_fail;
+ }
+
+ return 0;
+
+bdy_alloc_fail:
+ ipahal_free_dma_mem(¶ms->nhash_hdr);
+ if (params->hash_hdr.size)
+ ipahal_free_dma_mem(¶ms->hash_hdr);
+ return -ENOMEM;
+}
+
+/*
+ * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl
+ * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the
+ * allocated memory.
+ *
+ * The size is adapted for needed alignments/borders.
+ */
+int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem)
+{
+ struct ipahal_fltrt_obj *obj;
+
+ IPAHAL_DBG("Entry\n");
+
+ if (!tbl_mem) {
+ IPAHAL_ERR("Input err\n");
+ return -EINVAL;
+ }
+
+ if (!tbl_mem->size) {
+ IPAHAL_ERR("Input err: zero table size\n");
+ return -EINVAL;
+ }
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ /* add word for rule-set terminator */
+ tbl_mem->size += obj->tbl_width;
+
+ tbl_mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, tbl_mem->size,
+ &tbl_mem->phys_base, GFP_KERNEL);
+ if (!tbl_mem->base) {
+ IPAHAL_ERR("fail to alloc DMA buf of size %d\n",
+ tbl_mem->size);
+ return -ENOMEM;
+ }
+ if (tbl_mem->phys_base & obj->sysaddr_alignment) {
+ IPAHAL_ERR("sys rt tbl address is not aligned\n");
+ goto align_err;
+ }
+
+ memset(tbl_mem->base, 0, tbl_mem->size);
+
+ return 0;
+
+align_err:
+ ipahal_free_dma_mem(tbl_mem);
+ return -EPERM;
+}
+
+/*
+ * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address
+ * Given table addr/offset, adapt it to IPA H/W format and write it
+ * to given header index.
+ * @addr: Address or offset to be used
+ * @hdr_base: base address of header structure to write the address
+ * @hdr_idx: index of the address in the header structure
+ * @is_sys: Is it system address or local offset
+ */
+int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx,
+ bool is_sys)
+{
+ struct ipahal_fltrt_obj *obj;
+ u64 hwaddr;
+ u8 *hdr;
+
+ IPAHAL_DBG("Entry\n");
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!addr || !hdr_base) {
+ IPAHAL_ERR("Input err: addr=0x%llx hdr_base=%p\n",
+ addr, hdr_base);
+ return -EINVAL;
+ }
+
+ hdr = (u8 *)hdr_base;
+ hdr += hdr_idx * obj->tbl_hdr_width;
+ hwaddr = obj->create_tbl_addr(is_sys, addr);
+ obj->write_val_to_hdr(hwaddr, hdr);
+
+ return 0;
+}
+
+/*
+ * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's
+ * content (physical address or offset) and parse it.
+ * @hdr_base: base sram address of the header structure.
+ * @hdr_idx: index of the header entry line in the header structure.
+ * @addr: The parsed address - Out parameter
+ * @is_sys: Is this system or local address - Out parameter
+ */
+int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr,
+ bool *is_sys)
+{
+ struct ipahal_fltrt_obj *obj;
+ u64 hwaddr;
+ u8 *hdr;
+
+ IPAHAL_DBG("Entry\n");
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!addr || !hdr_base || !is_sys) {
+ IPAHAL_ERR("Input err: addr=%p hdr_base=%p is_sys=%p\n",
+ addr, hdr_base, is_sys);
+ return -EINVAL;
+ }
+
+ hdr = (u8 *)hdr_base;
+ hdr += hdr_idx * obj->tbl_hdr_width;
+ hwaddr = *((u64 *)hdr);
+ obj->parse_tbl_addr(hwaddr, addr, is_sys);
+ return 0;
+}
+
+/*
+ * ipahal_rt_generate_hw_rule() - generates the routing hardware rule
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ * be built in internal temp buf. This is used e.g. to get the rule size
+ * only.
+ */
+int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf)
+{
+ struct ipahal_fltrt_obj *obj;
+ u8 *tmp = NULL;
+ int rc;
+
+ IPAHAL_DBG("Entry\n");
+
+ if (!params || !hw_len) {
+ IPAHAL_ERR("Input err: params=%p hw_len=%p\n", params, hw_len);
+ return -EINVAL;
+ }
+ if (!params->rule) {
+ IPAHAL_ERR("Input err: invalid rule\n");
+ return -EINVAL;
+ }
+ if (params->ipt >= IPA_IP_MAX) {
+ IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+ return -EINVAL;
+ }
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (buf == NULL) {
+ tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL);
+ if (!tmp) {
+ IPAHAL_ERR("failed to alloc %u bytes\n",
+ obj->rule_buf_size);
+ return -ENOMEM;
+ }
+ buf = tmp;
+ } else
+ if ((long)buf & obj->rule_start_alignment) {
+ IPAHAL_ERR("buff is not rule rule start aligned\n");
+ return -EPERM;
+ }
+
+ rc = ipahal_fltrt_objs[ipahal_ctx->hw_type].rt_generate_hw_rule(
+ params, hw_len, buf);
+ if (!tmp && !rc) {
+ /* write the rule-set terminator */
+ memset(buf + *hw_len, 0, obj->tbl_width);
+ }
+
+ kfree(tmp);
+
+ return rc;
+}
+
+/*
+ * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ * be built in internal temp buf. This is used e.g. to get the rule size
+ * only.
+ */
+int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf)
+{
+ struct ipahal_fltrt_obj *obj;
+ u8 *tmp = NULL;
+ int rc;
+
+ IPAHAL_DBG("Entry\n");
+
+ if (!params || !hw_len) {
+ IPAHAL_ERR("Input err: params=%p hw_len=%p\n", params, hw_len);
+ return -EINVAL;
+ }
+ if (!params->rule) {
+ IPAHAL_ERR("Input err: invalid rule\n");
+ return -EINVAL;
+ }
+ if (params->ipt >= IPA_IP_MAX) {
+ IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+ return -EINVAL;
+ }
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (buf == NULL) {
+ tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL);
+ if (!tmp) {
+ IPAHAL_ERR("failed to alloc %u bytes\n",
+ obj->rule_buf_size);
+ return -ENOMEM;
+ }
+ buf = tmp;
+ } else
+ if ((long)buf & obj->rule_start_alignment) {
+ IPAHAL_ERR("buff is not rule rule start aligned\n");
+ return -EPERM;
+ }
+
+ rc = ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_generate_hw_rule(
+ params, hw_len, buf);
+ if (!tmp && !rc) {
+ /* write the rule-set terminator */
+ memset(buf + *hw_len, 0, obj->tbl_width);
+ }
+
+ kfree(tmp);
+
+ return rc;
+
+}
+
+/*
+ * ipahal_flt_generate_equation() - generate flt rule in equation form
+ * Will build equation form flt rule from given info.
+ * @ipt: IP family
+ * @attrib: Rule attribute to be generated
+ * @eq_atrb: Equation form generated rule
+ * Note: Usage example: Pass the generated form to other sub-systems
+ * for inter-subsystems rules exchange.
+ */
+int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+ IPAHAL_DBG("Entry\n");
+
+ if (ipt >= IPA_IP_MAX) {
+ IPAHAL_ERR("Input err: Invalid ip type %d\n", ipt);
+ return -EINVAL;
+ }
+
+ if (!attrib || !eq_atrb) {
+ IPAHAL_ERR("Input err: attrib=%p eq_atrb=%p\n",
+ attrib, eq_atrb);
+ return -EINVAL;
+ }
+
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_generate_eq(ipt,
+ attrib, eq_atrb);
+
+}
+
+/*
+ * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule
+ * Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_rt_parse_hw_rule(u8 *rule_addr,
+ struct ipahal_rt_rule_entry *rule)
+{
+ IPAHAL_DBG("Entry\n");
+
+ if (!rule_addr || !rule) {
+ IPAHAL_ERR("Input err: rule_addr=%p rule=%p\n",
+ rule_addr, rule);
+ return -EINVAL;
+ }
+
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].rt_parse_hw_rule(
+ rule_addr, rule);
+}
+
+/*
+ * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule
+ * Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_flt_parse_hw_rule(u8 *rule_addr,
+ struct ipahal_flt_rule_entry *rule)
+{
+ IPAHAL_DBG("Entry\n");
+
+ if (!rule_addr || !rule) {
+ IPAHAL_ERR("Input err: rule_addr=%p rule=%p\n",
+ rule_addr, rule);
+ return -EINVAL;
+ }
+
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_parse_hw_rule(
+ rule_addr, rule);
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
new file mode 100644
index 0000000..ee2704d6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
@@ -0,0 +1,288 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_FLTRT_H_
+#define _IPAHAL_FLTRT_H_
+
+/*
+ * struct ipahal_fltrt_alloc_imgs_params - Params for tbls imgs allocations
+ * The allocation logic will allocate DMA memory representing the header.
+ * If the bodies are local (SRAM) the allocation will allocate
+ * a DMA buffers that would contain the content of these local tables in raw
+ * @ipt: IP version type
+ * @tbls_num: Number of tables to represent by the header
+ * @num_lcl_hash_tbls: Number of local (sram) hashable tables
+ * @num_lcl_nhash_tbls: Number of local (sram) non-hashable tables
+ * @total_sz_lcl_hash_tbls: Total size of local hashable tables
+ * @total_sz_lcl_nhash_tbls: Total size of local non-hashable tables
+ * @hash_hdr/nhash_hdr: OUT params for the header structures
+ * @hash_bdy/nhash_bdy: OUT params for the local body structures
+ */
+struct ipahal_fltrt_alloc_imgs_params {
+ enum ipa_ip_type ipt;
+ u32 tbls_num;
+ u32 num_lcl_hash_tbls;
+ u32 num_lcl_nhash_tbls;
+ u32 total_sz_lcl_hash_tbls;
+ u32 total_sz_lcl_nhash_tbls;
+
+ /* OUT PARAMS */
+ struct ipa_mem_buffer hash_hdr;
+ struct ipa_mem_buffer nhash_hdr;
+ struct ipa_mem_buffer hash_bdy;
+ struct ipa_mem_buffer nhash_bdy;
+};
+
+/*
+ * enum ipahal_rt_rule_hdr_type - Header type used in rt rules
+ * @IPAHAL_RT_RULE_HDR_NONE: No header is used
+ * @IPAHAL_RT_RULE_HDR_RAW: Raw header is used
+ * @IPAHAL_RT_RULE_HDR_PROC_CTX: Header Processing context is used
+ */
+enum ipahal_rt_rule_hdr_type {
+ IPAHAL_RT_RULE_HDR_NONE,
+ IPAHAL_RT_RULE_HDR_RAW,
+ IPAHAL_RT_RULE_HDR_PROC_CTX,
+};
+
+/*
+ * struct ipahal_rt_rule_gen_params - Params for generating rt rule
+ * @ipt: IP family version
+ * @dst_pipe_idx: Destination pipe index
+ * @hdr_type: Header type to be used
+ * @hdr_lcl: Does header on local or system table?
+ * @hdr_ofst: Offset of the header in the header table
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @rule: Rule info
+ */
+struct ipahal_rt_rule_gen_params {
+ enum ipa_ip_type ipt;
+ int dst_pipe_idx;
+ enum ipahal_rt_rule_hdr_type hdr_type;
+ bool hdr_lcl;
+ u32 hdr_ofst;
+ u32 priority;
+ u32 id;
+ const struct ipa_rt_rule *rule;
+};
+
+/*
+ * struct ipahal_rt_rule_entry - Rt rule info parsed from H/W
+ * @dst_pipe_idx: Destination pipe index
+ * @hdr_lcl: Does the references header located in sram or system mem?
+ * @hdr_ofst: Offset of the header in the header table
+ * @hdr_type: Header type to be used
+ * @priority: Rule priority
+ * @retain_hdr: to retain the removed header in header removal
+ * @id: Rule ID
+ * @eq_attrib: Equations and their params in the rule
+ * @rule_size: Rule size in memory
+ */
+struct ipahal_rt_rule_entry {
+ int dst_pipe_idx;
+ bool hdr_lcl;
+ u32 hdr_ofst;
+ enum ipahal_rt_rule_hdr_type hdr_type;
+ u32 priority;
+ bool retain_hdr;
+ u32 id;
+ struct ipa_ipfltri_rule_eq eq_attrib;
+ u32 rule_size;
+};
+
+/*
+ * struct ipahal_flt_rule_gen_params - Params for generating flt rule
+ * @ipt: IP family version
+ * @rt_tbl_idx: Routing table the rule pointing to
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @rule: Rule info
+ */
+struct ipahal_flt_rule_gen_params {
+ enum ipa_ip_type ipt;
+ u32 rt_tbl_idx;
+ u32 priority;
+ u32 id;
+ const struct ipa_flt_rule *rule;
+};
+
+/*
+ * struct ipahal_flt_rule_entry - Flt rule info parsed from H/W
+ * @rule: Rule info
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @rule_size: Rule size in memory
+ */
+struct ipahal_flt_rule_entry {
+ struct ipa_flt_rule rule;
+ u32 priority;
+ u32 id;
+ u32 rule_size;
+};
+
+/* Get the H/W table (flt/rt) header width */
+u32 ipahal_get_hw_tbl_hdr_width(void);
+
+/* Get the H/W local table (SRAM) address alignment
+ * Tables headers references to local tables via offsets in SRAM
+ * This function return the alignment of the offset that IPA expects
+ */
+u32 ipahal_get_lcl_tbl_addr_alignment(void);
+
+/*
+ * Rule priority is used to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable tables. Max priority are rules that once are
+ * scanned by IPA, IPA will not look for further rules and use it.
+ */
+int ipahal_get_rule_max_priority(void);
+
+/* Given a priority, calc and return the next lower one if it is in
+ * legal range.
+ */
+int ipahal_rule_decrease_priority(int *prio);
+
+/* Does the given ID represents rule miss? */
+bool ipahal_is_rule_miss_id(u32 id);
+
+/* Get rule ID with high bit only asserted
+ * Used e.g. to create groups of IDs according to this bit
+ */
+u32 ipahal_get_rule_id_hi_bit(void);
+
+/* Get the low value possible to be used for rule-id */
+u32 ipahal_get_low_rule_id(void);
+
+/*
+ * ipahal_rt_generate_empty_img() - Generate empty route image
+ * Creates routing header buffer for the given tables number.
+ * For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+ u32 nhash_hdr_size, struct ipa_mem_buffer *mem);
+
+/*
+ * ipahal_flt_generate_empty_img() - Generate empty filter image
+ * Creates filter header buffer for the given tables number.
+ * For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @ep_bitmap: Bitmap representing the EP that has flt tables. The format
+ * should be: bit0->EP0, bit1->EP1
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+ u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem);
+
+/*
+ * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures
+ * Used usually during commit.
+ * Allocates header structures and init them to point to empty DDR table
+ * Allocate body strucutres for local bodies tables
+ * @params: Parameters for IN and OUT regard the allocation.
+ */
+int ipahal_fltrt_allocate_hw_tbl_imgs(
+ struct ipahal_fltrt_alloc_imgs_params *params);
+
+/*
+ * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl
+ * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the
+ * allocated memory.
+ *
+ * The size is adapted for needed alignments/borders.
+ */
+int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem);
+
+/*
+ * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address
+ * Given table addr/offset, adapt it to IPA H/W format and write it
+ * to given header index.
+ * @addr: Address or offset to be used
+ * @hdr_base: base address of header structure to write the address
+ * @hdr_idx: index of the address in the header structure
+ * @is_sys: Is it system address or local offset
+ */
+int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx,
+ bool is_sys);
+
+/*
+ * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's
+ * content (physical address or offset) and parse it.
+ * @hdr_base: base sram address of the header structure.
+ * @hdr_idx: index of the header entry line in the header structure.
+ * @addr: The parsed address - Out parameter
+ * @is_sys: Is this system or local address - Out parameter
+ */
+int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr,
+ bool *is_sys);
+
+/*
+ * ipahal_rt_generate_hw_rule() - generates the routing hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ * be built in internal temp buf. This is used e.g. to get the rule size
+ * only.
+ */
+int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf);
+
+/*
+ * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ * be built in internal temp buf. This is used e.g. to get the rule size
+ * only.
+ */
+int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf);
+
+/*
+ * ipahal_flt_generate_equation() - generate flt rule in equation form
+ * Will build equation form flt rule from given info.
+ * @ipt: IP family
+ * @attrib: Rule attribute to be generated
+ * @eq_atrb: Equation form generated rule
+ * Note: Usage example: Pass the generated form to other sub-systems
+ * for inter-subsystems rules exchange.
+ */
+int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb);
+
+/*
+ * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule
+ * Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_rt_parse_hw_rule(u8 *rule_addr,
+ struct ipahal_rt_rule_entry *rule);
+
+/*
+ * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule
+ * Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_flt_parse_hw_rule(u8 *rule_addr,
+ struct ipahal_flt_rule_entry *rule);
+
+
+#endif /* _IPAHAL_FLTRT_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
new file mode 100644
index 0000000..0c0637d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
@@ -0,0 +1,143 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_FLTRT_I_H_
+#define _IPAHAL_FLTRT_I_H_
+
+/*
+ * enum ipa_fltrt_equations - RULE equations
+ * These are names values to the equations that can be used
+ * The HAL layer holds mapping between these names and H/W
+ * presentation.
+ */
+enum ipa_fltrt_equations {
+ IPA_TOS_EQ,
+ IPA_PROTOCOL_EQ,
+ IPA_TC_EQ,
+ IPA_OFFSET_MEQ128_0,
+ IPA_OFFSET_MEQ128_1,
+ IPA_OFFSET_MEQ32_0,
+ IPA_OFFSET_MEQ32_1,
+ IPA_IHL_OFFSET_MEQ32_0,
+ IPA_IHL_OFFSET_MEQ32_1,
+ IPA_METADATA_COMPARE,
+ IPA_IHL_OFFSET_RANGE16_0,
+ IPA_IHL_OFFSET_RANGE16_1,
+ IPA_IHL_OFFSET_EQ_32,
+ IPA_IHL_OFFSET_EQ_16,
+ IPA_FL_EQ,
+ IPA_IS_FRAG,
+ IPA_EQ_MAX,
+};
+
+/* Width and Alignment values for H/W structures.
+ * Specific for IPA version.
+ */
+#define IPA3_0_HW_TBL_SYSADDR_ALIGNMENT (127)
+#define IPA3_0_HW_TBL_LCLADDR_ALIGNMENT (7)
+#define IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT (127)
+#define IPA3_0_HW_TBL_WIDTH (8)
+#define IPA3_0_HW_TBL_HDR_WIDTH (8)
+#define IPA3_0_HW_TBL_ADDR_MASK (127)
+#define IPA3_0_HW_RULE_BUF_SIZE (256)
+#define IPA3_0_HW_RULE_START_ALIGNMENT (7)
+
+
+/*
+ * Rules Priority.
+ * Needed due to rules classification to hashable and non-hashable.
+ * Higher priority is lower in number. i.e. 0 is highest priority
+ */
+#define IPA3_0_RULE_MAX_PRIORITY (0)
+#define IPA3_0_RULE_MIN_PRIORITY (1023)
+
+/*
+ * RULE ID, bit length (e.g. 10 bits).
+ */
+#define IPA3_0_RULE_ID_BIT_LEN (10)
+#define IPA3_0_LOW_RULE_ID (1)
+
+/**
+ * struct ipa3_0_rt_rule_hw_hdr - HW header of IPA routing rule
+ * @word: routing rule header properties
+ * @en_rule: enable rule - Equation bit fields
+ * @pipe_dest_idx: destination pipe index
+ * @system: Is referenced header is lcl or sys memory
+ * @hdr_offset: header offset
+ * @proc_ctx: whether hdr_offset points to header table or to
+ * header processing context table
+ * @priority: Rule priority. Added to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable parts
+ * @rsvd1: reserved bits
+ * @retain_hdr: added to add back to the packet the header removed
+ * as part of header removal. This will be done as part of
+ * header insertion block.
+ * @rule_id: rule ID that will be returned in the packet status
+ * @rsvd2: reserved bits
+ */
+struct ipa3_0_rt_rule_hw_hdr {
+ union {
+ u64 word;
+ struct {
+ u64 en_rule:16;
+ u64 pipe_dest_idx:5;
+ u64 system:1;
+ u64 hdr_offset:9;
+ u64 proc_ctx:1;
+ u64 priority:10;
+ u64 rsvd1:5;
+ u64 retain_hdr:1;
+ u64 rule_id:10;
+ u64 rsvd2:6;
+ } hdr;
+ } u;
+};
+
+/**
+ * struct ipa3_0_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post filtering action
+ * @rt_tbl_idx: index in routing table
+ * @retain_hdr: added to add back to the packet the header removed
+ * as part of header removal. This will be done as part of
+ * header insertion block.
+ * @rsvd1: reserved bits
+ * @priority: Rule priority. Added to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable parts
+ * @rsvd2: reserved bits
+ * @rule_id: rule ID that will be returned in the packet status
+ * @rsvd3: reserved bits
+ */
+struct ipa3_0_flt_rule_hw_hdr {
+ union {
+ u64 word;
+ struct {
+ u64 en_rule:16;
+ u64 action:5;
+ u64 rt_tbl_idx:5;
+ u64 retain_hdr:1;
+ u64 rsvd1:5;
+ u64 priority:10;
+ u64 rsvd2:6;
+ u64 rule_id:10;
+ u64 rsvd3:6;
+ } hdr;
+ } u;
+};
+
+int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type);
+void ipahal_fltrt_destroy(void);
+
+#endif /* _IPAHAL_FLTRT_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
new file mode 100644
index 0000000..4c4b666
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -0,0 +1,549 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_I_H_
+#define _IPAHAL_I_H_
+
+#include <linux/ipa.h>
+#include "../../ipa_common_i.h"
+
+#define IPAHAL_DRV_NAME "ipahal"
+
+#define IPAHAL_DBG(fmt, args...) \
+ do { \
+ pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+ ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPAHAL_DBG_LOW(fmt, args...) \
+ do { \
+ pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+ ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPAHAL_ERR(fmt, args...) \
+ do { \
+ pr_err(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+ ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+/*
+ * struct ipahal_context - HAL global context data
+ * @hw_type: IPA H/W type/version.
+ * @base: Base address to be used for accessing IPA memory. This is
+ * I/O memory mapped address.
+ * Controlled by debugfs. default is off
+ * @dent: Debugfs folder dir entry
+ * @ipa_pdev: IPA Platform Device. Will be used for DMA memory
+ * @empty_fltrt_tbl: Empty table to be used at tables init.
+ */
+struct ipahal_context {
+ enum ipa_hw_type hw_type;
+ void __iomem *base;
+ struct dentry *dent;
+ struct device *ipa_pdev;
+ struct ipa_mem_buffer empty_fltrt_tbl;
+};
+
+extern struct ipahal_context *ipahal_ctx;
+
+
+
+/* Immediate commands H/W structures */
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_filter_init - IP_V4_FILTER_INIT command payload
+ * in H/W format.
+ * Inits IPv4 filter block.
+ * @hash_rules_addr: Addr in system mem where ipv4 hashable flt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
+ * be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
+ * be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v4_filter_init {
+ u64 hash_rules_addr:64;
+ u64 hash_rules_size:12;
+ u64 hash_local_addr:16;
+ u64 nhash_rules_size:12;
+ u64 nhash_local_addr:16;
+ u64 rsvd:8;
+ u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v6_filter_init - IP_V6_FILTER_INIT command payload
+ * in H/W format.
+ * Inits IPv6 filter block.
+ * @hash_rules_addr: Addr in system mem where ipv6 hashable flt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
+ * be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
+ * be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v6_filter_init {
+ u64 hash_rules_addr:64;
+ u64 hash_rules_size:12;
+ u64 hash_local_addr:16;
+ u64 nhash_rules_size:12;
+ u64 nhash_local_addr:16;
+ u64 rsvd:8;
+ u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_nat_init - IP_V4_NAT_INIT command payload
+ * in H/W format.
+ * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
+ * cache address abd itger related parameters.
+ * @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
+ * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT
+ * table starts. IPv4 NAT rules that result in NAT collision are located
+ * in this table.
+ * @index_table_addr: Addr in sys/shared mem where index table, which points
+ * to NAT table starts
+ * @index_table_expansion_addr: Addr in sys/shared mem where expansion index
+ * table starts
+ * @table_index: For future support of multiple NAT tables
+ * @rsvd1: reserved
+ * @ipv4_rules_addr_type: ipv4_rules_addr in sys or shared mem
+ * @ipv4_expansion_rules_addr_type: ipv4_expansion_rules_addr in
+ * sys or shared mem
+ * @index_table_addr_type: index_table_addr in sys or shared mem
+ * @index_table_expansion_addr_type: index_table_expansion_addr in
+ * sys or shared mem
+ * @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
+ * @size_expansion_tables: Num of entries in NAT expantion tbl and expantion
+ * idx tbl (each)
+ * @rsvd2: reserved
+ * @public_ip_addr: public IP address
+ */
+struct ipa_imm_cmd_hw_ip_v4_nat_init {
+ u64 ipv4_rules_addr:64;
+ u64 ipv4_expansion_rules_addr:64;
+ u64 index_table_addr:64;
+ u64 index_table_expansion_addr:64;
+ u64 table_index:3;
+ u64 rsvd1:1;
+ u64 ipv4_rules_addr_type:1;
+ u64 ipv4_expansion_rules_addr_type:1;
+ u64 index_table_addr_type:1;
+ u64 index_table_expansion_addr_type:1;
+ u64 size_base_tables:12;
+ u64 size_expansion_tables:10;
+ u64 rsvd2:2;
+ u64 public_ip_addr:32;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_routing_init - IP_V4_ROUTING_INIT command payload
+ * in H/W format.
+ * Inits IPv4 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in system mem where ipv4 hashable rt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
+ * be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
+ * be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v4_routing_init {
+ u64 hash_rules_addr:64;
+ u64 hash_rules_size:12;
+ u64 hash_local_addr:16;
+ u64 nhash_rules_size:12;
+ u64 nhash_local_addr:16;
+ u64 rsvd:8;
+ u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v6_routing_init - IP_V6_ROUTING_INIT command payload
+ * in H/W format.
+ * Inits IPv6 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in system mem where ipv6 hashable rt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
+ * be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
+ * be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v6_routing_init {
+ u64 hash_rules_addr:64;
+ u64 hash_rules_size:12;
+ u64 hash_local_addr:16;
+ u64 nhash_rules_size:12;
+ u64 nhash_local_addr:16;
+ u64 rsvd:8;
+ u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_hdr_init_local - HDR_INIT_LOCAL command payload
+ * in H/W format.
+ * Inits hdr table within local mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in sys mem where the table starts (SRC)
+ * @size_hdr_table: Size of the above (in bytes)
+ * @hdr_addr: header address in IPA sram (used as DST for memory copy)
+ * @rsvd: reserved
+ */
+struct ipa_imm_cmd_hw_hdr_init_local {
+ u64 hdr_table_addr:64;
+ u64 size_hdr_table:12;
+ u64 hdr_addr:16;
+ u64 rsvd:4;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_nat_dma - NAT_DMA command payload
+ * in H/W format
+ * Perform DMA operation on NAT related mem addressess. Copy data into
+ * different locations within NAT associated tbls. (For add/remove NAT rules)
+ * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
+ * @rsvd1: reserved
+ * @base_addr: Base addr to which the DMA operation should be performed.
+ * @rsvd2: reserved
+ * @offset: offset in bytes from base addr to write 'data' to
+ * @data: data to be written
+ * @rsvd3: reserved
+ */
+struct ipa_imm_cmd_hw_nat_dma {
+ u64 table_index:3;
+ u64 rsvd1:1;
+ u64 base_addr:2;
+ u64 rsvd2:2;
+ u64 offset:32;
+ u64 data:16;
+ u64 rsvd3:8;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_hdr_init_system - HDR_INIT_SYSTEM command payload
+ * in H/W format.
+ * Inits hdr table within sys mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
+ */
+struct ipa_imm_cmd_hw_hdr_init_system {
+ u64 hdr_table_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_packet_init - IP_PACKET_INIT command payload
+ * in H/W format.
+ * Configuration for specific IP pkt. Shall be called prior to an IP pkt
+ * data. Pkt will not go through IP pkt processing.
+ * @destination_pipe_index: Destination pipe index (in case routing
+ * is enabled, this field will overwrite the rt rule)
+ * @rsvd: reserved
+ */
+struct ipa_imm_cmd_hw_ip_packet_init {
+ u64 destination_pipe_index:5;
+ u64 rsv1:59;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload
+ * in H/W format.
+ * Write value to register. Allows reg changes to be synced with data packet
+ * and other immediate command. Can be used to access the sram
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
+ * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ * @pipeline_clear_options: options for pipeline to clear
+ * 0: HPS - no pkt inside HPS (not grp specific)
+ * 1: source group - The immediate cmd src grp does not use any pkt ctxs
+ * 2: Wait until no pkt reside inside IPA pipeline
+ * 3: reserved
+ * @rsvd: reserved - should be set to zero
+ */
+struct ipa_imm_cmd_hw_register_write {
+ u64 sw_rsvd:15;
+ u64 skip_pipeline_clear:1;
+ u64 offset:16;
+ u64 value:32;
+ u64 value_mask:32;
+ u64 pipeline_clear_options:2;
+ u64 rsvd:30;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload
+ * in H/W format.
+ * Perform mem copy into or out of the SW area of IPA local mem
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
+ * @local_addr: Address in IPA local memory
+ * @direction: Read or write?
+ * 0: IPA write, Write to local address from system address
+ * 1: IPA read, Read from local address to system address
+ * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
+ * @pipeline_clear_options: options for pipeline to clear
+ * 0: HPS - no pkt inside HPS (not grp specific)
+ * 1: source group - The immediate cmd src grp does npt use any pkt ctxs
+ * 2: Wait until no pkt reside inside IPA pipeline
+ * 3: reserved
+ * @rsvd: reserved - should be set to zero
+ * @system_addr: Address in system memory
+ */
+struct ipa_imm_cmd_hw_dma_shared_mem {
+ u64 sw_rsvd:16;
+ u64 size:16;
+ u64 local_addr:16;
+ u64 direction:1;
+ u64 skip_pipeline_clear:1;
+ u64 pipeline_clear_options:2;
+ u64 rsvd:12;
+ u64 system_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_packet_tag_status -
+ * IP_PACKET_TAG_STATUS command payload in H/W format.
+ * This cmd is used for to allow SW to track HW processing by setting a TAG
+ * value that is passed back to SW inside Packet Status information.
+ * TAG info will be provided as part of Packet Status info generated for
+ * the next pkt transferred over the pipe.
+ * This immediate command must be followed by a packet in the same transfer.
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @tag: Tag that is provided back to SW
+ */
+struct ipa_imm_cmd_hw_ip_packet_tag_status {
+ u64 sw_rsvd:16;
+ u64 tag:48;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_dma_task_32b_addr -
+ * IPA_DMA_TASK_32B_ADDR command payload in H/W format.
+ * Used by clients using 32bit addresses. Used to perform DMA operation on
+ * multiple descriptors.
+ * The Opcode is dynamic, where it holds the number of buffer to process
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @cmplt: Complete flag: When asserted IPA will interrupt SW when the entire
+ * DMA related data was completely xfered to its destination.
+ * @eof: Enf Of Frame flag: When asserted IPA will assert the EOT to the
+ * dest client. This is used used for aggr sequence
+ * @flsh: Flush flag: When asserted, pkt will go through the IPA blocks but
+ * will not be xfered to dest client but rather will be discarded
+ * @lock: Lock pipe flag: When asserted, IPA will stop processing descriptors
+ * from other EPs in the same src grp (RX queue)
+ * @unlock: Unlock pipe flag: When asserted, IPA will stop exclusively
+ * servicing current EP out of the src EPs of the grp (RX queue)
+ * @size1: Size of buffer1 data
+ * @addr1: Pointer to buffer1 data
+ * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
+ * only the first one needs to have this field set. It will be ignored
+ * in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
+ * must contain this field (2 or more buffers) or EOT.
+ */
+struct ipa_imm_cmd_hw_dma_task_32b_addr {
+ u64 sw_rsvd:11;
+ u64 cmplt:1;
+ u64 eof:1;
+ u64 flsh:1;
+ u64 lock:1;
+ u64 unlock:1;
+ u64 size1:16;
+ u64 addr1:32;
+ u64 packet_size:16;
+};
+
+
+
+/* IPA Status packet H/W structures and info */
+
+/*
+ * struct ipa_status_pkt_hw - IPA status packet payload in H/W format.
+ * This structure describes the status packet H/W structure for the
+ * following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET,
+ * IPA_STATUS_SUSPENDED_PACKET.
+ * Other statuses types has different status packet structure.
+ * @status_opcode: The Type of the status (Opcode).
+ * @exception: (not bitmask) - the first exception that took place.
+ * In case of exception, src endp and pkt len are always valid.
+ * @status_mask: Bit mask specifying on which H/W blocks the pkt was processed.
+ * @pkt_len: Pkt pyld len including hdr, include retained hdr if used. Does
+ * not include padding or checksum trailer len.
+ * @endp_src_idx: Source end point index.
+ * @rsvd1: reserved
+ * @endp_dest_idx: Destination end point index.
+ * Not valid in case of exception
+ * @rsvd2: reserved
+ * @metadata: meta data value used by packet
+ * @flt_local: Filter table location flag: Does matching flt rule belongs to
+ * flt tbl that resides in lcl memory? (if not, then system mem)
+ * @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl?
+ * @flt_global: Global filter rule flag: Does matching flt rule belongs to
+ * the global flt tbl? (if not, then the per endp tables)
+ * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule
+ * specifies to retain header?
+ * @flt_rule_id: The ID of the matching filter rule. This info can be combined
+ * with endp_src_idx to locate the exact rule. ID=0x3FF reserved to specify
+ * flt miss. In case of miss, all flt info to be ignored
+ * @rt_local: Route table location flag: Does matching rt rule belongs to
+ * rt tbl that resides in lcl memory? (if not, then system mem)
+ * @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl?
+ * @ucp: UC Processing flag.
+ * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match
+ * @rt_rule_id: The ID of the matching rt rule. This info can be combined
+ * with rt_tbl_idx to locate the exact rule. ID=0x3FF reserved to specify
+ * rt miss. In case of miss, all rt info to be ignored
+ * @nat_hit: NAT hit flag: Was their NAT hit?
+ * @nat_entry_idx: Index of the NAT entry used of NAT processing
+ * @nat_type: Defines the type of the NAT operation:
+ * 00: No NAT
+ * 01: Source NAT
+ * 10: Destination NAT
+ * 11: Reserved
+ * @tag_info: S/W defined value provided via immediate command
+ * @seq_num: Per source endp unique packet sequence number
+ * @time_of_day_ctr: running counter from IPA clock
+ * @hdr_local: Header table location flag: In header insertion, was the header
+ * taken from the table resides in local memory? (If no, then system mem)
+ * @hdr_offset: Offset of used header in the header table
+ * @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table?
+ * @frag_rule: Frag rule index in H/W frag table in case of frag hit
+ * @hw_specific: H/W specific reserved value
+ */
+struct ipa_pkt_status_hw {
+ u64 status_opcode:8;
+ u64 exception:8;
+ u64 status_mask:16;
+ u64 pkt_len:16;
+ u64 endp_src_idx:5;
+ u64 rsvd1:3;
+ u64 endp_dest_idx:5;
+ u64 rsvd2:3;
+ u64 metadata:32;
+ u64 flt_local:1;
+ u64 flt_hash:1;
+ u64 flt_global:1;
+ u64 flt_ret_hdr:1;
+ u64 flt_rule_id:10;
+ u64 rt_local:1;
+ u64 rt_hash:1;
+ u64 ucp:1;
+ u64 rt_tbl_idx:5;
+ u64 rt_rule_id:10;
+ u64 nat_hit:1;
+ u64 nat_entry_idx:13;
+ u64 nat_type:2;
+ u64 tag_info:48;
+ u64 seq_num:8;
+ u64 time_of_day_ctr:24;
+ u64 hdr_local:1;
+ u64 hdr_offset:10;
+ u64 frag_hit:1;
+ u64 frag_rule:4;
+ u64 hw_specific:16;
+};
+
+/* Size of H/W Packet Status */
+#define IPA3_0_PKT_STATUS_SIZE 32
+
+/* Headers and processing context H/W structures and definitions */
+
+/* uCP command numbers */
+#define IPA_HDR_UCP_802_3_TO_802_3 6
+#define IPA_HDR_UCP_802_3_TO_ETHII 7
+#define IPA_HDR_UCP_ETHII_TO_802_3 8
+#define IPA_HDR_UCP_ETHII_TO_ETHII 9
+
+/* Processing context TLV type */
+#define IPA_PROC_CTX_TLV_TYPE_END 0
+#define IPA_PROC_CTX_TLV_TYPE_HDR_ADD 1
+#define IPA_PROC_CTX_TLV_TYPE_PROC_CMD 3
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_tlv -
+ * HW structure of IPA processing context header - TLV part
+ * @type: 0 - end type
+ * 1 - header addition type
+ * 3 - processing command type
+ * @length: number of bytes after tlv
+ * for type:
+ * 0 - needs to be 0
+ * 1 - header addition length
+ * 3 - number of 32B including type and length.
+ * @value: specific value for type
+ * for type:
+ * 0 - needs to be 0
+ * 1 - header length
+ * 3 - command ID (see IPA_HDR_UCP_* definitions)
+ */
+struct ipa_hw_hdr_proc_ctx_tlv {
+ u32 type:8;
+ u32 length:8;
+ u32 value:16;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_hdr_add -
+ * HW structure of IPA processing context - add header tlv
+ * @tlv: IPA processing context TLV
+ * @hdr_addr: processing context header address
+ */
+struct ipa_hw_hdr_proc_ctx_hdr_add {
+ struct ipa_hw_hdr_proc_ctx_tlv tlv;
+ u32 hdr_addr;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_seq -
+ * IPA processing context header - add header sequence
+ * @hdr_add: add header command
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_seq {
+ struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+ struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @cmd: tlv processing command (cmd.type must be 3)
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq {
+ struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+ struct ipa_hw_hdr_proc_ctx_tlv cmd;
+ struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
+#endif /* _IPAHAL_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
new file mode 100644
index 0000000..08decd8
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -0,0 +1,1541 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/ipa.h>
+#include <linux/kernel.h>
+#include <linux/msm_ipa.h>
+#include "ipahal_i.h"
+#include "ipahal_reg.h"
+#include "ipahal_reg_i.h"
+
+static const char *ipareg_name_to_str[IPA_REG_MAX] = {
+ __stringify(IPA_ROUTE),
+ __stringify(IPA_IRQ_STTS_EE_n),
+ __stringify(IPA_IRQ_EN_EE_n),
+ __stringify(IPA_IRQ_CLR_EE_n),
+ __stringify(IPA_IRQ_SUSPEND_INFO_EE_n),
+ __stringify(IPA_SUSPEND_IRQ_EN_EE_n),
+ __stringify(IPA_SUSPEND_IRQ_CLR_EE_n),
+ __stringify(IPA_BCR),
+ __stringify(IPA_ENABLED_PIPES),
+ __stringify(IPA_COMP_SW_RESET),
+ __stringify(IPA_VERSION),
+ __stringify(IPA_TAG_TIMER),
+ __stringify(IPA_COMP_HW_VERSION),
+ __stringify(IPA_SPARE_REG_1),
+ __stringify(IPA_SPARE_REG_2),
+ __stringify(IPA_COMP_CFG),
+ __stringify(IPA_STATE_AGGR_ACTIVE),
+ __stringify(IPA_ENDP_INIT_HDR_n),
+ __stringify(IPA_ENDP_INIT_HDR_EXT_n),
+ __stringify(IPA_ENDP_INIT_AGGR_n),
+ __stringify(IPA_AGGR_FORCE_CLOSE),
+ __stringify(IPA_ENDP_INIT_ROUTE_n),
+ __stringify(IPA_ENDP_INIT_MODE_n),
+ __stringify(IPA_ENDP_INIT_NAT_n),
+ __stringify(IPA_ENDP_INIT_CTRL_n),
+ __stringify(IPA_ENDP_INIT_HOL_BLOCK_EN_n),
+ __stringify(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n),
+ __stringify(IPA_ENDP_INIT_DEAGGR_n),
+ __stringify(IPA_ENDP_INIT_SEQ_n),
+ __stringify(IPA_DEBUG_CNT_REG_n),
+ __stringify(IPA_ENDP_INIT_CFG_n),
+ __stringify(IPA_IRQ_EE_UC_n),
+ __stringify(IPA_ENDP_INIT_HDR_METADATA_MASK_n),
+ __stringify(IPA_ENDP_INIT_HDR_METADATA_n),
+ __stringify(IPA_ENDP_INIT_RSRC_GRP_n),
+ __stringify(IPA_SHARED_MEM_SIZE),
+ __stringify(IPA_SRAM_DIRECT_ACCESS_n),
+ __stringify(IPA_DEBUG_CNT_CTRL_n),
+ __stringify(IPA_UC_MAILBOX_m_n),
+ __stringify(IPA_FILT_ROUT_HASH_FLUSH),
+ __stringify(IPA_SINGLE_NDP_MODE),
+ __stringify(IPA_QCNCM),
+ __stringify(IPA_SYS_PKT_PROC_CNTXT_BASE),
+ __stringify(IPA_LOCAL_PKT_PROC_CNTXT_BASE),
+ __stringify(IPA_ENDP_STATUS_n),
+ __stringify(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n),
+ __stringify(IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n),
+ __stringify(IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n),
+ __stringify(IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n),
+ __stringify(IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n),
+ __stringify(IPA_DST_RSRC_GRP_01_RSRC_TYPE_n),
+ __stringify(IPA_DST_RSRC_GRP_23_RSRC_TYPE_n),
+ __stringify(IPA_DST_RSRC_GRP_45_RSRC_TYPE_n),
+ __stringify(IPA_DST_RSRC_GRP_67_RSRC_TYPE_n),
+ __stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_0),
+ __stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_1),
+ __stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_0),
+ __stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_1),
+ __stringify(IPA_QSB_MAX_WRITES),
+ __stringify(IPA_QSB_MAX_READS),
+ __stringify(IPA_TX_CFG),
+};
+
+static void ipareg_construct_dummy(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ IPAHAL_ERR("No construct function for %s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+}
+
+static void ipareg_parse_dummy(enum ipahal_reg_name reg,
+ void *fields, u32 val)
+{
+ IPAHAL_ERR("No parse function for %s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+}
+
+static void ipareg_construct_rx_hps_clients_depth1(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_rx_hps_clients *clients =
+ (struct ipahal_reg_rx_hps_clients *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1));
+}
+
+static void ipareg_construct_rx_hps_clients_depth0(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_rx_hps_clients *clients =
+ (struct ipahal_reg_rx_hps_clients *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(2));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(3));
+}
+
+static void ipareg_construct_rx_hps_clients_depth0_v3_5(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_rx_hps_clients *clients =
+ (struct ipahal_reg_rx_hps_clients *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(0));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(1));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(2));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(3));
+}
+
+static void ipareg_construct_rsrg_grp_xy(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_rsrc_grp_cfg *grp =
+ (struct ipahal_reg_rsrc_grp_cfg *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, grp->x_min,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK);
+ IPA_SETFIELD_IN_REG(*val, grp->x_max,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK);
+ IPA_SETFIELD_IN_REG(*val, grp->y_min,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK);
+ IPA_SETFIELD_IN_REG(*val, grp->y_max,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK);
+}
+
+static void ipareg_construct_rsrg_grp_xy_v3_5(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_rsrc_grp_cfg *grp =
+ (struct ipahal_reg_rsrc_grp_cfg *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, grp->x_min,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5);
+ IPA_SETFIELD_IN_REG(*val, grp->x_max,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5);
+
+ /* DST_23 register has only X fields at ipa V3_5 */
+ if (reg == IPA_DST_RSRC_GRP_23_RSRC_TYPE_n)
+ return;
+
+ IPA_SETFIELD_IN_REG(*val, grp->y_min,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5);
+ IPA_SETFIELD_IN_REG(*val, grp->y_max,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5);
+}
+
+static void ipareg_construct_hash_cfg_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_fltrt_hash_tuple *tuple =
+ (struct ipahal_reg_fltrt_hash_tuple *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.src_id,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.src_ip_addr,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_ip_addr,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.src_port,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_port,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.protocol,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.meta_data,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->undefined1,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.src_id,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.src_ip_addr,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_ip_addr,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.src_port,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_port,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.protocol,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.meta_data,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->undefined2,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK);
+}
+
+static void ipareg_parse_hash_cfg_n(
+ enum ipahal_reg_name reg, void *fields, u32 val)
+{
+ struct ipahal_reg_fltrt_hash_tuple *tuple =
+ (struct ipahal_reg_fltrt_hash_tuple *)fields;
+
+ tuple->flt.src_id =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK);
+ tuple->flt.src_ip_addr =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK);
+ tuple->flt.dst_ip_addr =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK);
+ tuple->flt.src_port =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK);
+ tuple->flt.dst_port =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK);
+ tuple->flt.protocol =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK);
+ tuple->flt.meta_data =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK);
+ tuple->undefined1 =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK);
+ tuple->rt.src_id =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK);
+ tuple->rt.src_ip_addr =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK);
+ tuple->rt.dst_ip_addr =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK);
+ tuple->rt.src_port =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK);
+ tuple->rt.dst_port =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK);
+ tuple->rt.protocol =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK);
+ tuple->rt.meta_data =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK);
+ tuple->undefined2 =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK);
+}
+
+static void ipareg_construct_endp_status_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_ep_cfg_status *ep_status =
+ (struct ipahal_reg_ep_cfg_status *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_status->status_en,
+ IPA_ENDP_STATUS_n_STATUS_EN_SHFT,
+ IPA_ENDP_STATUS_n_STATUS_EN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_status->status_ep,
+ IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT,
+ IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_status->status_location,
+ IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT,
+ IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK);
+}
+
+static void ipareg_construct_qcncm(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_qcncm *qcncm =
+ (struct ipahal_reg_qcncm *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, qcncm->mode_en ? 1 : 0,
+ IPA_QCNCM_MODE_EN_SHFT,
+ IPA_QCNCM_MODE_EN_BMSK);
+ IPA_SETFIELD_IN_REG(*val, qcncm->mode_val,
+ IPA_QCNCM_MODE_VAL_SHFT,
+ IPA_QCNCM_MODE_VAL_BMSK);
+ IPA_SETFIELD_IN_REG(*val, qcncm->undefined,
+ 0, IPA_QCNCM_MODE_VAL_BMSK);
+}
+
+static void ipareg_parse_qcncm(
+ enum ipahal_reg_name reg, void *fields, u32 val)
+{
+ struct ipahal_reg_qcncm *qcncm =
+ (struct ipahal_reg_qcncm *)fields;
+
+ memset(qcncm, 0, sizeof(struct ipahal_reg_qcncm));
+ qcncm->mode_en = IPA_GETFIELD_FROM_REG(val,
+ IPA_QCNCM_MODE_EN_SHFT,
+ IPA_QCNCM_MODE_EN_BMSK);
+ qcncm->mode_val = IPA_GETFIELD_FROM_REG(val,
+ IPA_QCNCM_MODE_VAL_SHFT,
+ IPA_QCNCM_MODE_VAL_BMSK);
+ qcncm->undefined = IPA_GETFIELD_FROM_REG(val,
+ 0, IPA_QCNCM_UNDEFINED1_BMSK);
+ qcncm->undefined |= IPA_GETFIELD_FROM_REG(val,
+ 0, IPA_QCNCM_MODE_UNDEFINED2_BMSK);
+}
+
+static void ipareg_construct_single_ndp_mode(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_single_ndp_mode *mode =
+ (struct ipahal_reg_single_ndp_mode *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, mode->single_ndp_en ? 1 : 0,
+ IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT,
+ IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, mode->undefined,
+ IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT,
+ IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK);
+}
+
+static void ipareg_parse_single_ndp_mode(
+ enum ipahal_reg_name reg, void *fields, u32 val)
+{
+ struct ipahal_reg_single_ndp_mode *mode =
+ (struct ipahal_reg_single_ndp_mode *)fields;
+
+ memset(mode, 0, sizeof(struct ipahal_reg_single_ndp_mode));
+ mode->single_ndp_en = IPA_GETFIELD_FROM_REG(val,
+ IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT,
+ IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK);
+ mode->undefined = IPA_GETFIELD_FROM_REG(val,
+ IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT,
+ IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK);
+}
+
+static void ipareg_construct_debug_cnt_ctrl_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_debug_cnt_ctrl *dbg_cnt_ctrl =
+ (struct ipahal_reg_debug_cnt_ctrl *)fields;
+ u8 type;
+
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->en ? 1 : 0,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK);
+
+ switch (dbg_cnt_ctrl->type) {
+ case DBG_CNT_TYPE_IPV4_FLTR:
+ type = 0x0;
+ if (!dbg_cnt_ctrl->rule_idx_pipe_rule) {
+ IPAHAL_ERR("No FLT global rules\n");
+ WARN_ON(1);
+ }
+ break;
+ case DBG_CNT_TYPE_IPV4_ROUT:
+ type = 0x1;
+ break;
+ case DBG_CNT_TYPE_GENERAL:
+ type = 0x2;
+ break;
+ case DBG_CNT_TYPE_IPV6_FLTR:
+ type = 0x4;
+ if (!dbg_cnt_ctrl->rule_idx_pipe_rule) {
+ IPAHAL_ERR("No FLT global rules\n");
+ WARN_ON(1);
+ }
+ break;
+ case DBG_CNT_TYPE_IPV6_ROUT:
+ type = 0x5;
+ break;
+ default:
+ IPAHAL_ERR("Invalid dbg_cnt_ctrl type (%d) for %s\n",
+ dbg_cnt_ctrl->type, ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return;
+
+ };
+
+ IPA_SETFIELD_IN_REG(*val, type,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->product ? 1 : 0,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->src_pipe,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK);
+
+ if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK);
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx_pipe_rule,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK
+ );
+ } else {
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5);
+ }
+}
+
+static void ipareg_parse_shared_mem_size(
+ enum ipahal_reg_name reg, void *fields, u32 val)
+{
+ struct ipahal_reg_shared_mem_size *smem_sz =
+ (struct ipahal_reg_shared_mem_size *)fields;
+
+ memset(smem_sz, 0, sizeof(struct ipahal_reg_shared_mem_size));
+ smem_sz->shared_mem_sz = IPA_GETFIELD_FROM_REG(val,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK);
+
+ smem_sz->shared_mem_baddr = IPA_GETFIELD_FROM_REG(val,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK);
+}
+
+static void ipareg_construct_endp_init_rsrc_grp_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp =
+ (struct ipahal_reg_endp_init_rsrc_grp *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp,
+ IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT,
+ IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK);
+}
+
+static void ipareg_construct_endp_init_rsrc_grp_n_v3_5(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp =
+ (struct ipahal_reg_endp_init_rsrc_grp *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp,
+ IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5,
+ IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5);
+}
+
+static void ipareg_construct_endp_init_hdr_metadata_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_metadata *metadata =
+ (struct ipa_ep_cfg_metadata *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, metadata->qmap_id,
+ IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT,
+ IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_metadata_mask_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_metadata_mask *metadata_mask =
+ (struct ipa_ep_cfg_metadata_mask *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, metadata_mask->metadata_mask,
+ IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT,
+ IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK);
+}
+
+static void ipareg_construct_endp_init_cfg_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_cfg *cfg =
+ (struct ipa_ep_cfg_cfg *)fields;
+ u32 cs_offload_en;
+
+ switch (cfg->cs_offload_en) {
+ case IPA_DISABLE_CS_OFFLOAD:
+ cs_offload_en = 0;
+ break;
+ case IPA_ENABLE_CS_OFFLOAD_UL:
+ cs_offload_en = 1;
+ break;
+ case IPA_ENABLE_CS_OFFLOAD_DL:
+ cs_offload_en = 2;
+ break;
+ default:
+ IPAHAL_ERR("Invalid cs_offload_en value for %s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return;
+ }
+
+ IPA_SETFIELD_IN_REG(*val, cfg->frag_offload_en ? 1 : 0,
+ IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT,
+ IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK);
+ IPA_SETFIELD_IN_REG(*val, cs_offload_en,
+ IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT,
+ IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK);
+ IPA_SETFIELD_IN_REG(*val, cfg->cs_metadata_hdr_offset,
+ IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT,
+ IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK);
+ IPA_SETFIELD_IN_REG(*val, cfg->gen_qmb_master_sel,
+ IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT,
+ IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK);
+
+}
+
+static void ipareg_construct_endp_init_deaggr_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_deaggr *ep_deaggr =
+ (struct ipa_ep_cfg_deaggr *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_deaggr->deaggr_hdr_len,
+ IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_valid,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_location,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_deaggr->max_packet_len,
+ IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK);
+}
+
+static void ipareg_construct_endp_init_hol_block_en_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_holb *ep_holb =
+ (struct ipa_ep_cfg_holb *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_holb->en,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK);
+}
+
+static void ipareg_construct_endp_init_hol_block_timer_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_holb *ep_holb =
+ (struct ipa_ep_cfg_holb *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_holb->tmr_val,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK);
+}
+
+static void ipareg_construct_endp_init_ctrl_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_ctrl *ep_ctrl =
+ (struct ipa_ep_cfg_ctrl *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_suspend,
+ IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT,
+ IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_delay,
+ IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT,
+ IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK);
+}
+
+static void ipareg_construct_endp_init_nat_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_nat *ep_nat =
+ (struct ipa_ep_cfg_nat *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_nat->nat_en,
+ IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT,
+ IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK);
+}
+
+static void ipareg_construct_endp_init_mode_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_endp_init_mode *init_mode =
+ (struct ipahal_reg_endp_init_mode *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, init_mode->ep_mode.mode,
+ IPA_ENDP_INIT_MODE_n_MODE_SHFT,
+ IPA_ENDP_INIT_MODE_n_MODE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, init_mode->dst_pipe_number,
+ IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT,
+ IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK);
+}
+
+static void ipareg_construct_endp_init_route_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_endp_init_route *ep_init_rt =
+ (struct ipahal_reg_endp_init_route *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_init_rt->route_table_index,
+ IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT,
+ IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK);
+
+}
+
+static void ipareg_parse_endp_init_aggr_n(enum ipahal_reg_name reg,
+ void *fields, u32 val)
+{
+ struct ipa_ep_cfg_aggr *ep_aggr =
+ (struct ipa_ep_cfg_aggr *)fields;
+
+ memset(ep_aggr, 0, sizeof(struct ipa_ep_cfg_aggr));
+
+ ep_aggr->aggr_en =
+ (((val & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT)
+ == IPA_ENABLE_AGGR);
+ ep_aggr->aggr =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT);
+ ep_aggr->aggr_byte_limit =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT);
+ ep_aggr->aggr_time_limit =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT);
+ ep_aggr->aggr_pkt_limit =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT);
+ ep_aggr->aggr_sw_eof_active =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT);
+ ep_aggr->aggr_hard_byte_limit_en =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK)
+ >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT);
+}
+
+static void ipareg_construct_endp_init_aggr_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_aggr *ep_aggr =
+ (struct ipa_ep_cfg_aggr *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_en,
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_byte_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_time_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_pkt_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_sw_eof_active,
+ IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK);
+
+ /* At IPAv3 hard_byte_limit is not supported */
+ ep_aggr->aggr_hard_byte_limit_en = 0;
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_hard_byte_limit_en,
+ IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_ext_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_hdr_ext *ep_hdr_ext;
+ u8 hdr_endianness;
+
+ ep_hdr_ext = (struct ipa_ep_cfg_hdr_ext *)fields;
+ hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1;
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_pad_to_alignment,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_offset,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_payload_len_inc_padding,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_valid,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, hdr_endianness,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_hdr *ep_hdr;
+
+ ep_hdr = (struct ipa_ep_cfg_hdr *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_metadata_reg_valid,
+ IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT_v2,
+ IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK_v2);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_remove_additional,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v2,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v2);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_a5_mux,
+ IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size_valid,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_additional_const_len,
+ IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata_valid,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_len,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK);
+}
+
+static void ipareg_construct_route(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_route *route;
+
+ route = (struct ipahal_reg_route *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, route->route_dis,
+ IPA_ROUTE_ROUTE_DIS_SHFT,
+ IPA_ROUTE_ROUTE_DIS_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, route->route_def_pipe,
+ IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_table,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_ofst,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, route->route_frag_def_pipe,
+ IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT,
+ IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, route->route_def_retain_hdr,
+ IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT,
+ IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK);
+}
+
+static void ipareg_construct_qsb_max_writes(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ int *qsb_max_writes = (int *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, qsb_max_writes[0],
+ IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT,
+ IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK);
+ IPA_SETFIELD_IN_REG(*val, qsb_max_writes[1],
+ IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT,
+ IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK);
+}
+
+static void ipareg_construct_qsb_max_reads(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ int *qsb_max_reads = (int *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, qsb_max_reads[0],
+ IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT,
+ IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK);
+ IPA_SETFIELD_IN_REG(*val, qsb_max_reads[1],
+ IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT,
+ IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK);
+}
+
+static void ipareg_construct_tx_cfg(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_tx_cfg *tx_cfg;
+
+ tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_disable,
+ IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5,
+ IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5);
+
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->tx1_prefetch_disable,
+ IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5,
+ IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5);
+
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->prefetch_almost_empty_size,
+ IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5,
+ IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5);
+}
+
+/*
+ * struct ipahal_reg_obj - Register H/W information for specific IPA version
+ * @construct - CB to construct register value from abstracted structure
+ * @parse - CB to parse register value to abstracted structure
+ * @offset - register offset relative to base address
+ * @n_ofst - N parameterized register sub-offset
+ */
+struct ipahal_reg_obj {
+ void (*construct)(enum ipahal_reg_name reg, const void *fields,
+ u32 *val);
+ void (*parse)(enum ipahal_reg_name reg, void *fields,
+ u32 val);
+ u32 offset;
+ u32 n_ofst;
+};
+
+/*
+ * This table contains the info regarding each register for IPAv3 and later.
+ * Information like: offset and construct/parse functions.
+ * All the information on the register on IPAv3 are statically defined below.
+ * If information is missing regarding some register on some IPA version,
+ * the init function will fill it with the information from the previous
+ * IPA version.
+ * Information is considered missing if all of the fields are 0.
+ * If offset is -1, this means that the register is removed on the
+ * specific version.
+ */
+static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
+ /* IPAv3 */
+ [IPA_HW_v3_0][IPA_ROUTE] = {
+ ipareg_construct_route, ipareg_parse_dummy,
+ 0x00000048, 0},
+ [IPA_HW_v3_0][IPA_IRQ_STTS_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003008, 0x1000},
+ [IPA_HW_v3_0][IPA_IRQ_EN_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000300c, 0x1000},
+ [IPA_HW_v3_0][IPA_IRQ_CLR_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003010, 0x1000},
+ [IPA_HW_v3_0][IPA_IRQ_SUSPEND_INFO_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003098, 0x1000},
+ [IPA_HW_v3_0][IPA_BCR] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x000001D0, 0},
+ [IPA_HW_v3_0][IPA_ENABLED_PIPES] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000038, 0},
+ [IPA_HW_v3_0][IPA_COMP_SW_RESET] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000040, 0},
+ [IPA_HW_v3_0][IPA_VERSION] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000034, 0},
+ [IPA_HW_v3_0][IPA_TAG_TIMER] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000060, 0 },
+ [IPA_HW_v3_0][IPA_COMP_HW_VERSION] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000030, 0},
+ [IPA_HW_v3_0][IPA_SPARE_REG_1] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00005090, 0},
+ [IPA_HW_v3_0][IPA_SPARE_REG_2] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00005094, 0},
+ [IPA_HW_v3_0][IPA_COMP_CFG] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000003C, 0},
+ [IPA_HW_v3_0][IPA_STATE_AGGR_ACTIVE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000010C, 0},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_n] = {
+ ipareg_construct_endp_init_hdr_n, ipareg_parse_dummy,
+ 0x00000810, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_EXT_n] = {
+ ipareg_construct_endp_init_hdr_ext_n, ipareg_parse_dummy,
+ 0x00000814, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_AGGR_n] = {
+ ipareg_construct_endp_init_aggr_n,
+ ipareg_parse_endp_init_aggr_n,
+ 0x00000824, 0x70},
+ [IPA_HW_v3_0][IPA_AGGR_FORCE_CLOSE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x000001EC, 0},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_ROUTE_n] = {
+ ipareg_construct_endp_init_route_n, ipareg_parse_dummy,
+ 0x00000828, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_MODE_n] = {
+ ipareg_construct_endp_init_mode_n, ipareg_parse_dummy,
+ 0x00000820, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_NAT_n] = {
+ ipareg_construct_endp_init_nat_n, ipareg_parse_dummy,
+ 0x0000080C, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_n] = {
+ ipareg_construct_endp_init_ctrl_n, ipareg_parse_dummy,
+ 0x00000800, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_EN_n] = {
+ ipareg_construct_endp_init_hol_block_en_n,
+ ipareg_parse_dummy,
+ 0x0000082c, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_TIMER_n] = {
+ ipareg_construct_endp_init_hol_block_timer_n,
+ ipareg_parse_dummy,
+ 0x00000830, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_DEAGGR_n] = {
+ ipareg_construct_endp_init_deaggr_n,
+ ipareg_parse_dummy,
+ 0x00000834, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_SEQ_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000083C, 0x70},
+ [IPA_HW_v3_0][IPA_DEBUG_CNT_REG_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000600, 0x4},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_CFG_n] = {
+ ipareg_construct_endp_init_cfg_n, ipareg_parse_dummy,
+ 0x00000808, 0x70},
+ [IPA_HW_v3_0][IPA_IRQ_EE_UC_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000301c, 0x1000},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_MASK_n] = {
+ ipareg_construct_endp_init_hdr_metadata_mask_n,
+ ipareg_parse_dummy,
+ 0x00000818, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_n] = {
+ ipareg_construct_endp_init_hdr_metadata_n,
+ ipareg_parse_dummy,
+ 0x0000081c, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_RSRC_GRP_n] = {
+ ipareg_construct_endp_init_rsrc_grp_n,
+ ipareg_parse_dummy,
+ 0x00000838, 0x70},
+ [IPA_HW_v3_0][IPA_SHARED_MEM_SIZE] = {
+ ipareg_construct_dummy, ipareg_parse_shared_mem_size,
+ 0x00000054, 0},
+ [IPA_HW_v3_0][IPA_SRAM_DIRECT_ACCESS_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00007000, 0x4},
+ [IPA_HW_v3_0][IPA_DEBUG_CNT_CTRL_n] = {
+ ipareg_construct_debug_cnt_ctrl_n, ipareg_parse_dummy,
+ 0x00000640, 0x4},
+ [IPA_HW_v3_0][IPA_UC_MAILBOX_m_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00032000, 0x4},
+ [IPA_HW_v3_0][IPA_FILT_ROUT_HASH_FLUSH] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000090, 0},
+ [IPA_HW_v3_0][IPA_SINGLE_NDP_MODE] = {
+ ipareg_construct_single_ndp_mode, ipareg_parse_single_ndp_mode,
+ 0x00000068, 0},
+ [IPA_HW_v3_0][IPA_QCNCM] = {
+ ipareg_construct_qcncm, ipareg_parse_qcncm,
+ 0x00000064, 0},
+ [IPA_HW_v3_0][IPA_SYS_PKT_PROC_CNTXT_BASE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x000001e0, 0},
+ [IPA_HW_v3_0][IPA_LOCAL_PKT_PROC_CNTXT_BASE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x000001e8, 0},
+ [IPA_HW_v3_0][IPA_ENDP_STATUS_n] = {
+ ipareg_construct_endp_status_n, ipareg_parse_dummy,
+ 0x00000840, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_FILTER_ROUTER_HSH_CFG_n] = {
+ ipareg_construct_hash_cfg_n, ipareg_parse_hash_cfg_n,
+ 0x0000085C, 0x70},
+ [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000400, 0x20},
+ [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000404, 0x20},
+ [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000408, 0x20},
+ [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x0000040C, 0x20},
+ [IPA_HW_v3_0][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000500, 0x20},
+ [IPA_HW_v3_0][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000504, 0x20},
+ [IPA_HW_v3_0][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000508, 0x20},
+ [IPA_HW_v3_0][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x0000050c, 0x20},
+ [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = {
+ ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy,
+ 0x000023C4, 0},
+ [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = {
+ ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy,
+ 0x000023C8, 0},
+ [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = {
+ ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy,
+ 0x000023CC, 0},
+ [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = {
+ ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy,
+ 0x000023D0, 0},
+ [IPA_HW_v3_0][IPA_QSB_MAX_WRITES] = {
+ ipareg_construct_qsb_max_writes, ipareg_parse_dummy,
+ 0x00000074, 0},
+ [IPA_HW_v3_0][IPA_QSB_MAX_READS] = {
+ ipareg_construct_qsb_max_reads, ipareg_parse_dummy,
+ 0x00000078, 0},
+
+
+ /* IPAv3.1 */
+ [IPA_HW_v3_1][IPA_IRQ_SUSPEND_INFO_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003030, 0x1000},
+ [IPA_HW_v3_1][IPA_SUSPEND_IRQ_EN_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003034, 0x1000},
+ [IPA_HW_v3_1][IPA_SUSPEND_IRQ_CLR_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003038, 0x1000},
+
+
+ /* IPAv3.5 */
+ [IPA_HW_v3_5][IPA_TX_CFG] = {
+ ipareg_construct_tx_cfg, ipareg_parse_dummy,
+ 0x000001FC, 0},
+ [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+ 0x00000400, 0x20},
+ [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+ 0x00000404, 0x20},
+ [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+ 0x00000500, 0x20},
+ [IPA_HW_v3_5][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+ 0x00000504, 0x20},
+ [IPA_HW_v3_5][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_ENDP_INIT_RSRC_GRP_n] = {
+ ipareg_construct_endp_init_rsrc_grp_n_v3_5,
+ ipareg_parse_dummy,
+ 0x00000838, 0x70},
+ [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = {
+ ipareg_construct_rx_hps_clients_depth0_v3_5,
+ ipareg_parse_dummy,
+ 0x000023C4, 0},
+ [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = {
+ ipareg_construct_rx_hps_clients_depth0_v3_5,
+ ipareg_parse_dummy,
+ 0x000023CC, 0},
+ [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v3_5][IPA_SPARE_REG_1] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00002780, 0},
+ [IPA_HW_v3_5][IPA_SPARE_REG_2] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00002784, 0},
+};
+
+/*
+ * ipahal_reg_init() - Build the registers information table
+ * See ipahal_reg_objs[][] comments
+ *
+ * Note: As global variables are initialized with zero, any un-overridden
+ * register entry will be zero. By this we recognize them.
+ */
+int ipahal_reg_init(enum ipa_hw_type ipa_hw_type)
+{
+ int i;
+ int j;
+ struct ipahal_reg_obj zero_obj;
+
+ IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+ IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+ return -EINVAL;
+ }
+
+ memset(&zero_obj, 0, sizeof(zero_obj));
+ for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+ for (j = 0; j < IPA_REG_MAX ; j++) {
+ if (!memcmp(&ipahal_reg_objs[i+1][j], &zero_obj,
+ sizeof(struct ipahal_reg_obj))) {
+ memcpy(&ipahal_reg_objs[i+1][j],
+ &ipahal_reg_objs[i][j],
+ sizeof(struct ipahal_reg_obj));
+ } else {
+ /*
+ * explicitly overridden register.
+ * Check validity
+ */
+ if (!ipahal_reg_objs[i+1][j].offset) {
+ IPAHAL_ERR(
+ "reg=%s with zero offset ipa_ver=%d\n",
+ ipahal_reg_name_str(j), i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_reg_objs[i+1][j].construct) {
+ IPAHAL_ERR(
+ "reg=%s with NULL construct func ipa_ver=%d\n",
+ ipahal_reg_name_str(j), i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_reg_objs[i+1][j].parse) {
+ IPAHAL_ERR(
+ "reg=%s with NULL parse func ipa_ver=%d\n",
+ ipahal_reg_name_str(j), i+1);
+ WARN_ON(1);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * ipahal_reg_name_str() - returns string that represent the register
+ * @reg_name: [in] register name
+ */
+const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name)
+{
+ if (reg_name < 0 || reg_name >= IPA_REG_MAX) {
+ IPAHAL_ERR("requested name of invalid reg=%d\n", reg_name);
+ return "Invalid Register";
+ }
+
+ return ipareg_name_to_str[reg_name];
+}
+
+/*
+ * ipahal_read_reg_n() - Get n parameterized reg value
+ */
+u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n)
+{
+ u32 offset;
+
+ if (reg >= IPA_REG_MAX) {
+ IPAHAL_ERR("Invalid register reg=%u\n", reg);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ IPAHAL_DBG_LOW("read from %s n=%u\n",
+ ipahal_reg_name_str(reg), n);
+
+ offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+ if (offset == -1) {
+ IPAHAL_ERR("Read access to obsolete reg=%s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+ return ioread32(ipahal_ctx->base + offset);
+}
+
+/*
+ * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value
+ */
+void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val)
+{
+ u32 offset;
+
+ if (reg >= IPA_REG_MAX) {
+ IPAHAL_ERR("Invalid register reg=%u\n", reg);
+ return;
+ }
+
+ IPAHAL_DBG_LOW("write to %s m=%u n=%u val=%u\n",
+ ipahal_reg_name_str(reg), m, n, val);
+ offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+ if (offset == -1) {
+ IPAHAL_ERR("Write access to obsolete reg=%s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return;
+ }
+ /*
+ * Currently there is one register with m and n parameters
+ * IPA_UC_MAILBOX_m_n. The m value of it is 0x80.
+ * If more such registers will be added in the future,
+ * we can move the m parameter to the table above.
+ */
+ offset += 0x80 * m;
+ offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+ iowrite32(val, ipahal_ctx->base + offset);
+}
+
+/*
+ * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg
+ */
+u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields)
+{
+ u32 val = 0;
+ u32 offset;
+
+ if (!fields) {
+ IPAHAL_ERR("Input error fields=%p\n", fields);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ if (reg >= IPA_REG_MAX) {
+ IPAHAL_ERR("Invalid register reg=%u\n", reg);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ IPAHAL_DBG_LOW("read from %s n=%u and parse it\n",
+ ipahal_reg_name_str(reg), n);
+ offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+ if (offset == -1) {
+ IPAHAL_ERR("Read access to obsolete reg=%s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+ val = ioread32(ipahal_ctx->base + offset);
+ ipahal_reg_objs[ipahal_ctx->hw_type][reg].parse(reg, fields, val);
+
+ return val;
+}
+
+/*
+ * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value
+ */
+void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n,
+ const void *fields)
+{
+ u32 val = 0;
+ u32 offset;
+
+ if (!fields) {
+ IPAHAL_ERR("Input error fields=%p\n", fields);
+ return;
+ }
+
+ if (reg >= IPA_REG_MAX) {
+ IPAHAL_ERR("Invalid register reg=%u\n", reg);
+ return;
+ }
+
+ IPAHAL_DBG_LOW("write to %s n=%u after constructing it\n",
+ ipahal_reg_name_str(reg), n);
+ offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+ if (offset == -1) {
+ IPAHAL_ERR("Write access to obsolete reg=%s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return;
+ }
+ offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+ ipahal_reg_objs[ipahal_ctx->hw_type][reg].construct(reg, fields, &val);
+
+ iowrite32(val, ipahal_ctx->base + offset);
+}
+
+/*
+ * Get the offset of a m/n parameterized register
+ */
+u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n)
+{
+ u32 offset;
+
+ if (reg >= IPA_REG_MAX) {
+ IPAHAL_ERR("Invalid register reg=%u\n", reg);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ IPAHAL_DBG_LOW("get offset of %s m=%u n=%u\n",
+ ipahal_reg_name_str(reg), m, n);
+ offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+ if (offset == -1) {
+ IPAHAL_ERR("Access to obsolete reg=%s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ /*
+ * Currently there is one register with m and n parameters
+ * IPA_UC_MAILBOX_m_n. The m value of it is 0x80.
+ * If more such registers will be added in the future,
+ * we can move the m parameter to the table above.
+ */
+ offset += 0x80 * m;
+ offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+
+ return offset;
+}
+
+u32 ipahal_get_reg_base(void)
+{
+ return 0x00040000;
+}
+
+
+/*
+ * Specific functions
+ * These functions supply specific register values for specific operations
+ * that cannot be reached by generic functions.
+ * E.g. To disable aggregation, need to write to specific bits of the AGGR
+ * register. The other bits should be untouched. This oeprate is very specific
+ * and cannot be generically defined. For such operations we define these
+ * specific functions.
+ */
+
+void ipahal_get_disable_aggr_valmask(struct ipahal_reg_valmask *valmask)
+{
+ if (!valmask) {
+ IPAHAL_ERR("Input error\n");
+ return;
+ }
+
+ valmask->val = (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+ valmask->mask = IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+
+ valmask->val |= ((0 & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT);
+ valmask->mask |= ((IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT));
+}
+
+u32 ipahal_aggr_get_max_byte_limit(void)
+{
+ return
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT;
+}
+
+u32 ipahal_aggr_get_max_pkt_limit(void)
+{
+ return
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT;
+}
+
+void ipahal_get_aggr_force_close_valmask(int ep_idx,
+ struct ipahal_reg_valmask *valmask)
+{
+ u32 shft;
+ u32 bmsk;
+
+ if (!valmask) {
+ IPAHAL_ERR("Input error\n");
+ return;
+ }
+
+ if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
+ shft = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT;
+ bmsk = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK;
+ } else {
+ shft =
+ IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5;
+ bmsk =
+ IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5;
+ }
+
+ IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx, shft, bmsk);
+ valmask->mask = bmsk << shft;
+}
+
+void ipahal_get_fltrt_hash_flush_valmask(
+ struct ipahal_reg_fltrt_hash_flush *flush,
+ struct ipahal_reg_valmask *valmask)
+{
+ if (!flush || !valmask) {
+ IPAHAL_ERR("Input error: flush=%p ; valmask=%p\n",
+ flush, valmask);
+ return;
+ }
+
+ memset(valmask, 0, sizeof(struct ipahal_reg_valmask));
+
+ if (flush->v6_rt)
+ valmask->val |=
+ (1<<IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT);
+ if (flush->v6_flt)
+ valmask->val |=
+ (1<<IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT);
+ if (flush->v4_rt)
+ valmask->val |=
+ (1<<IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT);
+ if (flush->v4_flt)
+ valmask->val |=
+ (1<<IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT);
+
+ valmask->mask = valmask->val;
+}
+
+void ipahal_get_status_ep_valmask(int pipe_num,
+ struct ipahal_reg_valmask *valmask)
+{
+ if (!valmask) {
+ IPAHAL_ERR("Input error\n");
+ return;
+ }
+
+ valmask->val =
+ (pipe_num & IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK) <<
+ IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
+
+ valmask->mask =
+ IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK <<
+ IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
new file mode 100644
index 0000000..8fb9040
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -0,0 +1,449 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_REG_H_
+#define _IPAHAL_REG_H_
+
+#include <linux/ipa.h>
+
+/*
+ * Registers names
+ *
+ * NOTE:: Any change to this enum, need to change to ipareg_name_to_str
+ * array as well.
+ */
+enum ipahal_reg_name {
+ IPA_ROUTE,
+ IPA_IRQ_STTS_EE_n,
+ IPA_IRQ_EN_EE_n,
+ IPA_IRQ_CLR_EE_n,
+ IPA_IRQ_SUSPEND_INFO_EE_n,
+ IPA_SUSPEND_IRQ_EN_EE_n,
+ IPA_SUSPEND_IRQ_CLR_EE_n,
+ IPA_BCR,
+ IPA_ENABLED_PIPES,
+ IPA_COMP_SW_RESET,
+ IPA_VERSION,
+ IPA_TAG_TIMER,
+ IPA_COMP_HW_VERSION,
+ IPA_SPARE_REG_1,
+ IPA_SPARE_REG_2,
+ IPA_COMP_CFG,
+ IPA_STATE_AGGR_ACTIVE,
+ IPA_ENDP_INIT_HDR_n,
+ IPA_ENDP_INIT_HDR_EXT_n,
+ IPA_ENDP_INIT_AGGR_n,
+ IPA_AGGR_FORCE_CLOSE,
+ IPA_ENDP_INIT_ROUTE_n,
+ IPA_ENDP_INIT_MODE_n,
+ IPA_ENDP_INIT_NAT_n,
+ IPA_ENDP_INIT_CTRL_n,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+ IPA_ENDP_INIT_DEAGGR_n,
+ IPA_ENDP_INIT_SEQ_n,
+ IPA_DEBUG_CNT_REG_n,
+ IPA_ENDP_INIT_CFG_n,
+ IPA_IRQ_EE_UC_n,
+ IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+ IPA_ENDP_INIT_HDR_METADATA_n,
+ IPA_ENDP_INIT_RSRC_GRP_n,
+ IPA_SHARED_MEM_SIZE,
+ IPA_SRAM_DIRECT_ACCESS_n,
+ IPA_DEBUG_CNT_CTRL_n,
+ IPA_UC_MAILBOX_m_n,
+ IPA_FILT_ROUT_HASH_FLUSH,
+ IPA_SINGLE_NDP_MODE,
+ IPA_QCNCM,
+ IPA_SYS_PKT_PROC_CNTXT_BASE,
+ IPA_LOCAL_PKT_PROC_CNTXT_BASE,
+ IPA_ENDP_STATUS_n,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+ IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+ IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+ IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
+ IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n,
+ IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+ IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+ IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
+ IPA_DST_RSRC_GRP_67_RSRC_TYPE_n,
+ IPA_RX_HPS_CLIENTS_MIN_DEPTH_0,
+ IPA_RX_HPS_CLIENTS_MIN_DEPTH_1,
+ IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
+ IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
+ IPA_QSB_MAX_WRITES,
+ IPA_QSB_MAX_READS,
+ IPA_TX_CFG,
+ IPA_REG_MAX,
+};
+
+/*
+ * struct ipahal_reg_route - IPA route register
+ * @route_dis: route disable
+ * @route_def_pipe: route default pipe
+ * @route_def_hdr_table: route default header table
+ * @route_def_hdr_ofst: route default header offset table
+ * @route_frag_def_pipe: Default pipe to route fragmented exception
+ * packets and frag new rule statues, if source pipe does not have
+ * a notification status pipe defined.
+ * @route_def_retain_hdr: default value of retain header. It is used
+ * when no rule was hit
+ */
+struct ipahal_reg_route {
+ u32 route_dis;
+ u32 route_def_pipe;
+ u32 route_def_hdr_table;
+ u32 route_def_hdr_ofst;
+ u8 route_frag_def_pipe;
+ u32 route_def_retain_hdr;
+};
+
+/*
+ * struct ipahal_reg_endp_init_route - IPA ENDP_INIT_ROUTE_n register
+ * @route_table_index: Default index of routing table (IPA Consumer).
+ */
+struct ipahal_reg_endp_init_route {
+ u32 route_table_index;
+};
+
+/*
+ * struct ipahal_reg_endp_init_rsrc_grp - IPA_ENDP_INIT_RSRC_GRP_n register
+ * @rsrc_grp: Index of group for this ENDP. If this ENDP is a source-ENDP,
+ * index is for source-resource-group. If destination ENPD, index is
+ * for destination-resoruce-group.
+ */
+struct ipahal_reg_endp_init_rsrc_grp {
+ u32 rsrc_grp;
+};
+
+/*
+ * struct ipahal_reg_endp_init_mode - IPA ENDP_INIT_MODE_n register
+ * @dst_pipe_number: This parameter specifies destination output-pipe-packets
+ * will be routed to. Valid for DMA mode only and for Input
+ * Pipes only (IPA Consumer)
+ */
+struct ipahal_reg_endp_init_mode {
+ u32 dst_pipe_number;
+ struct ipa_ep_cfg_mode ep_mode;
+};
+
+/*
+ * struct ipahal_reg_shared_mem_size - IPA SHARED_MEM_SIZE register
+ * @shared_mem_sz: Available size [in 8Bytes] of SW partition within
+ * IPA shared memory.
+ * @shared_mem_baddr: Offset of SW partition within IPA
+ * shared memory[in 8Bytes]. To get absolute address of SW partition,
+ * add this offset to IPA_SRAM_DIRECT_ACCESS_n baddr.
+ */
+struct ipahal_reg_shared_mem_size {
+ u32 shared_mem_sz;
+ u32 shared_mem_baddr;
+};
+
+/*
+ * struct ipahal_reg_ep_cfg_status - status configuration in IPA end-point
+ * @status_en: Determines if end point supports Status Indications. SW should
+ * set this bit in order to enable Statuses. Output Pipe - send
+ * Status indications only if bit is set. Input Pipe - forward Status
+ * indication to STATUS_ENDP only if bit is set. Valid for Input
+ * and Output Pipes (IPA Consumer and Producer)
+ * @status_ep: Statuses generated for this endpoint will be forwarded to the
+ * specified Status End Point. Status endpoint needs to be
+ * configured with STATUS_EN=1 Valid only for Input Pipes (IPA
+ * Consumer)
+ * @status_location: Location of PKT-STATUS on destination pipe.
+ * If set to 0 (default), PKT-STATUS will be appended before the packet
+ * for this endpoint. If set to 1, PKT-STATUS will be appended after the
+ * packet for this endpoint. Valid only for Output Pipes (IPA Producer)
+ */
+struct ipahal_reg_ep_cfg_status {
+ bool status_en;
+ u8 status_ep;
+ bool status_location;
+};
+
+/*
+ * struct ipa_hash_tuple - Hash tuple members for flt and rt
+ * the fields tells if to be masked or not
+ * @src_id: pipe number for flt, table index for rt
+ * @src_ip_addr: IP source address
+ * @dst_ip_addr: IP destination address
+ * @src_port: L4 source port
+ * @dst_port: L4 destination port
+ * @protocol: IP protocol field
+ * @meta_data: packet meta-data
+ *
+ */
+struct ipahal_reg_hash_tuple {
+ /* src_id: pipe in flt, tbl index in rt */
+ bool src_id;
+ bool src_ip_addr;
+ bool dst_ip_addr;
+ bool src_port;
+ bool dst_port;
+ bool protocol;
+ bool meta_data;
+};
+
+/*
+ * struct ipahal_reg_fltrt_hash_tuple - IPA hash tuple register
+ * @flt: Hash tuple info for filtering
+ * @rt: Hash tuple info for routing
+ * @undefinedX: Undefined/Unused bit fields set of the register
+ */
+struct ipahal_reg_fltrt_hash_tuple {
+ struct ipahal_reg_hash_tuple flt;
+ struct ipahal_reg_hash_tuple rt;
+ u32 undefined1;
+ u32 undefined2;
+};
+
+/*
+ * enum ipahal_reg_dbg_cnt_type - Debug Counter Type
+ * DBG_CNT_TYPE_IPV4_FLTR - Count IPv4 filtering rules
+ * DBG_CNT_TYPE_IPV4_ROUT - Count IPv4 routing rules
+ * DBG_CNT_TYPE_GENERAL - General counter
+ * DBG_CNT_TYPE_IPV6_FLTR - Count IPv6 filtering rules
+ * DBG_CNT_TYPE_IPV4_ROUT - Count IPv6 routing rules
+ */
+enum ipahal_reg_dbg_cnt_type {
+ DBG_CNT_TYPE_IPV4_FLTR,
+ DBG_CNT_TYPE_IPV4_ROUT,
+ DBG_CNT_TYPE_GENERAL,
+ DBG_CNT_TYPE_IPV6_FLTR,
+ DBG_CNT_TYPE_IPV6_ROUT,
+};
+
+/*
+ * struct ipahal_reg_debug_cnt_ctrl - IPA_DEBUG_CNT_CTRL_n register
+ * @en - Enable debug counter
+ * @type - Type of debugging couting
+ * @product - False->Count Bytes . True->Count #packets
+ * @src_pipe - Specific Pipe to match. If FF, no need to match
+ * specific pipe
+ * @rule_idx_pipe_rule - Global Rule or Pipe Rule. If pipe, then indicated by
+ * src_pipe. Starting at IPA V3_5,
+ * no support on Global Rule. This field will be ignored.
+ * @rule_idx - Rule index. Irrelevant for type General
+ */
+struct ipahal_reg_debug_cnt_ctrl {
+ bool en;
+ enum ipahal_reg_dbg_cnt_type type;
+ bool product;
+ u8 src_pipe;
+ bool rule_idx_pipe_rule;
+ u16 rule_idx;
+};
+
+/*
+ * struct ipahal_reg_rsrc_grp_cfg - Mix/Max values for two rsrc groups
+ * @x_min - first group min value
+ * @x_max - first group max value
+ * @y_min - second group min value
+ * @y_max - second group max value
+ */
+struct ipahal_reg_rsrc_grp_cfg {
+ u32 x_min;
+ u32 x_max;
+ u32 y_min;
+ u32 y_max;
+};
+
+/*
+ * struct ipahal_reg_rx_hps_clients - Min or Max values for RX HPS clients
+ * @client_minmax - Min or Max values. In case of depth 0 the 4 values
+ * are used. In case of depth 1, only the first 2 values are used
+ */
+struct ipahal_reg_rx_hps_clients {
+ u32 client_minmax[4];
+};
+
+/*
+ * struct ipahal_reg_valmask - holding values and masking for registers
+ * HAL application may require only value and mask of it for some
+ * register fields.
+ * @val - The value
+ * @mask - Tha mask of the value
+ */
+struct ipahal_reg_valmask {
+ u32 val;
+ u32 mask;
+};
+
+/*
+ * struct ipahal_reg_fltrt_hash_flush - Flt/Rt flush configuration
+ * @v6_rt - Flush IPv6 Routing cache
+ * @v6_flt - Flush IPv6 Filtering cache
+ * @v4_rt - Flush IPv4 Routing cache
+ * @v4_flt - Flush IPv4 Filtering cache
+ */
+struct ipahal_reg_fltrt_hash_flush {
+ bool v6_rt;
+ bool v6_flt;
+ bool v4_rt;
+ bool v4_flt;
+};
+
+/*
+ * struct ipahal_reg_single_ndp_mode - IPA SINGLE_NDP_MODE register
+ * @single_ndp_en: When set to '1', IPA builds MBIM frames with up to 1
+ * NDP-header.
+ * @unused: undefined bits of the register
+ */
+struct ipahal_reg_single_ndp_mode {
+ bool single_ndp_en;
+ u32 undefined;
+};
+
+/*
+ * struct ipahal_reg_qcncm - IPA QCNCM register
+ * @mode_en: When QCNCM_MODE_EN=1, IPA will use QCNCM signature.
+ * @mode_val: Used only when QCNCM_MODE_EN=1 and sets SW Signature in
+ * the NDP header.
+ * @unused: undefined bits of the register
+ */
+struct ipahal_reg_qcncm {
+ bool mode_en;
+ u32 mode_val;
+ u32 undefined;
+};
+
+/*
+ * struct ipahal_reg_tx_cfg - IPA TX_CFG register
+ * @tx0_prefetch_disable: Disable prefetch on TX0
+ * @tx1_prefetch_disable: Disable prefetch on TX1
+ * @prefetch_almost_empty_size: Prefetch almost empty size
+ */
+struct ipahal_reg_tx_cfg {
+ bool tx0_prefetch_disable;
+ bool tx1_prefetch_disable;
+ u16 prefetch_almost_empty_size;
+};
+
+/*
+ * ipahal_reg_name_str() - returns string that represent the register
+ * @reg_name: [in] register name
+ */
+const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name);
+
+/*
+ * ipahal_read_reg_n() - Get the raw value of n parameterized reg
+ */
+u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n);
+
+/*
+ * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value
+ */
+void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val);
+
+/*
+ * ipahal_write_reg_n() - Write to n parameterized reg a raw value
+ */
+static inline void ipahal_write_reg_n(enum ipahal_reg_name reg,
+ u32 n, u32 val)
+{
+ ipahal_write_reg_mn(reg, 0, n, val);
+}
+
+/*
+ * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg
+ */
+u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields);
+
+/*
+ * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value
+ */
+void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n,
+ const void *fields);
+
+/*
+ * ipahal_read_reg() - Get the raw value of a reg
+ */
+static inline u32 ipahal_read_reg(enum ipahal_reg_name reg)
+{
+ return ipahal_read_reg_n(reg, 0);
+}
+
+/*
+ * ipahal_write_reg() - Write to reg a raw value
+ */
+static inline void ipahal_write_reg(enum ipahal_reg_name reg,
+ u32 val)
+{
+ ipahal_write_reg_mn(reg, 0, 0, val);
+}
+
+/*
+ * ipahal_read_reg_fields() - Get the parsed value of a reg
+ */
+static inline u32 ipahal_read_reg_fields(enum ipahal_reg_name reg, void *fields)
+{
+ return ipahal_read_reg_n_fields(reg, 0, fields);
+}
+
+/*
+ * ipahal_write_reg_fields() - Write to reg a parsed value
+ */
+static inline void ipahal_write_reg_fields(enum ipahal_reg_name reg,
+ const void *fields)
+{
+ ipahal_write_reg_n_fields(reg, 0, fields);
+}
+
+/*
+ * Get the offset of a m/n parameterized register
+ */
+u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n);
+
+/*
+ * Get the offset of a n parameterized register
+ */
+static inline u32 ipahal_get_reg_n_ofst(enum ipahal_reg_name reg, u32 n)
+{
+ return ipahal_get_reg_mn_ofst(reg, 0, n);
+}
+
+/*
+ * Get the offset of a register
+ */
+static inline u32 ipahal_get_reg_ofst(enum ipahal_reg_name reg)
+{
+ return ipahal_get_reg_mn_ofst(reg, 0, 0);
+}
+
+/*
+ * Get the register base address
+ */
+u32 ipahal_get_reg_base(void);
+
+/*
+ * Specific functions
+ * These functions supply specific register values for specific operations
+ * that cannot be reached by generic functions.
+ * E.g. To disable aggregation, need to write to specific bits of the AGGR
+ * register. The other bits should be untouched. This oeprate is very specific
+ * and cannot be generically defined. For such operations we define these
+ * specific functions.
+ */
+void ipahal_get_disable_aggr_valmask(struct ipahal_reg_valmask *valmask);
+u32 ipahal_aggr_get_max_byte_limit(void);
+u32 ipahal_aggr_get_max_pkt_limit(void);
+void ipahal_get_aggr_force_close_valmask(int ep_idx,
+ struct ipahal_reg_valmask *valmask);
+void ipahal_get_fltrt_hash_flush_valmask(
+ struct ipahal_reg_fltrt_hash_flush *flush,
+ struct ipahal_reg_valmask *valmask);
+void ipahal_get_status_ep_valmask(int pipe_num,
+ struct ipahal_reg_valmask *valmask);
+
+#endif /* _IPAHAL_REG_H_ */
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
new file mode 100644
index 0000000..1606a2f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -0,0 +1,315 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_REG_I_H_
+#define _IPAHAL_REG_I_H_
+
+int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
+
+#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
+#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \
+ (reg |= ((val) << (shift)) & (mask))
+#define IPA_GETFIELD_FROM_REG(reg, shift, mask) \
+ (((reg) & (mask)) >> (shift))
+
+
+/* IPA_ROUTE register */
+#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0X40
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11
+#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK 0x1000000
+#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT 0x18
+
+/* IPA_ENDP_INIT_HDR_n register */
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v2 0x8000000
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v2 0x1b
+#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK_v2 0x10000000
+#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT_v2 0x1c
+
+/* IPA_ENDP_INIT_HDR_EXT_n register */
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0 0x3c00
+
+/* IPA_ENDP_INIT_AGGR_N register */
+#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK 0x1000000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT 0x18
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x400000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x16
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK 0x200000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT 0x15
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x1f8000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0xf
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x7c00
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xa
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x3e0
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
+
+/* IPA_AGGR_FORCE_CLOSE register */
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK 0x3fffffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT 0
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5 0xfffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5 0
+
+/* IPA_ENDP_INIT_ROUTE_n register */
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT 0x0
+
+/* IPA_ENDP_INIT_MODE_n register */
+#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_BMSK 0x40000000
+#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_SHFT 0x1e
+#define IPA_ENDP_INIT_MODE_n_PAD_EN_BMSK 0x20000000
+#define IPA_ENDP_INIT_MODE_n_PAD_EN_SHFT 0x1d
+#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_BMSK 0x10000000
+#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_SHFT 0x1c
+#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_BMSK 0xffff000
+#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_SHFT 0xc
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x1f0
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x4
+#define IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x7
+#define IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0
+
+/* IPA_ENDP_INIT_NAT_n register */
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_CTRL_n register */
+#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK 0x1
+#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT 0x0
+#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK 0x2
+#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT 0x1
+
+/* IPA_ENDP_INIT_HOL_BLOCK_EN_n register */
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX 19
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_HOL_BLOCK_TIMER_n register */
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT 0x0
+
+/* IPA_ENDP_INIT_DEAGGR_n register */
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3F00
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK 0x80
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x7
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3F
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0
+
+/* IPA_IPA_ENDP_INIT_SEQ_n register */
+#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_BMSK 0xf000
+#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_SHFT 0xc
+#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_BMSK 0xf00
+#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_SHFT 0x8
+#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_BMSK 0xf0
+#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_SHFT 0x4
+#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_BMSK 0xf
+#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_SHFT 0x0
+
+/* IPA_DEBUG_CNT_REG_m register */
+#define IPA_DEBUG_CNT_REG_N_RMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_MAX 15
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_BMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_SHFT 0x0
+
+/* IPA_ENDP_INIT_CFG_n register */
+#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK 0x100
+#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT 0x8
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_HDR_METADATA_MASK_n register */
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0
+
+/* IPA_IPA_ENDP_INIT_HDR_METADATA_n register */
+#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT 0x0
+
+/* IPA_ENDP_INIT_RSRC_GRP_n register */
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK 0x7
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT 0
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5 0x3
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5 0
+
+/* IPA_SHARED_MEM_SIZE register */
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK 0xffff0000
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT 0x10
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK 0xffff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT 0x0
+
+/* IPA_DEBUG_CNT_CTRL_n register */
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK 0x10000000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT 0x1c
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK 0x0ff00000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5 0x1ff00000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT 0x14
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT 0xc
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK 0x100
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT 0x8
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK 0x70
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT 0x4
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK 0x1
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT 0x0
+
+/* IPA_FILT_ROUT_HASH_FLUSH register */
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT 12
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT 8
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT 4
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT 0
+
+/* IPA_SINGLE_NDP_MODE register */
+#define IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK 0xfffffffe
+#define IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT 0x1
+#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK 0x1
+#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT 0
+
+/* IPA_QCNCM register */
+#define IPA_QCNCM_MODE_UNDEFINED2_BMSK 0xf0000000
+#define IPA_QCNCM_MODE_UNDEFINED2_SHFT 0x1c
+#define IPA_QCNCM_MODE_VAL_BMSK 0xffffff0
+#define IPA_QCNCM_MODE_VAL_SHFT 0x4
+#define IPA_QCNCM_UNDEFINED1_BMSK 0xe
+#define IPA_QCNCM_UNDEFINED1_SHFT 0x1
+#define IPA_QCNCM_MODE_EN_BMSK 0x1
+#define IPA_QCNCM_MODE_EN_SHFT 0
+
+/* IPA_ENDP_STATUS_n register */
+#define IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK 0x100
+#define IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT 0x8
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
+
+/* IPA_ENDP_FILTER_ROUTER_HSH_CFG_n register */
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT 0
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK 0x1
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT 1
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK 0x2
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT 2
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK 0x4
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT 3
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK 0x8
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT 4
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK 0x10
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT 5
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK 0x20
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT 6
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK 0x40
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT 7
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK 0xff80
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT 16
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK 0x10000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT 17
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK 0x20000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT 18
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK 0x40000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT 19
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK 0x80000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT 20
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK 0x100000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT 21
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK 0x200000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT 22
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK 0x400000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT 23
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK 0xff800000
+
+/* IPA_RSRC_GRP_XY_RSRC_TYPE_n register */
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK 0xFF000000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT 24
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK 0xFF0000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT 16
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK 0xFF00
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT 8
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK 0xFF
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT 0
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5 0x3F000000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5 24
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5 0x3F0000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5 16
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5 0x3F00
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5 8
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5 0x3F
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5 0
+
+
+/* IPA_IPA_IPA_RX_HPS_CLIENTS_MIN/MAX_DEPTH_0/1 registers */
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(n) (0x7F << (8 * (n)))
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(n) \
+ (0xF << (8 * (n)))
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(n) (8 * (n))
+
+/* IPA_QSB_MAX_WRITES register */
+#define IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK (0xf)
+#define IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT (0)
+#define IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK (0xf0)
+#define IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT (4)
+
+/* IPA_QSB_MAX_READS register */
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK (0xf)
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT (0)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK (0xf0)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT (4)
+
+/* IPA_TX_CFG register */
+#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5 (0x1)
+#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5 (0)
+#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5 (0x2)
+#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5 (1)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5 (0x1C)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5 (2)
+
+#endif /* _IPAHAL_REG_I_H_ */