aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSandeep Paulraj <s-paulraj@ti.com>2012-09-13 11:30:23 -0400
committerCyril Chemparathy <cyril@ti.com>2012-09-21 10:44:14 -0400
commit68d6163fc0af1c01f8397b80c62d9f036421a217 (patch)
treeb7fe18616d614051633e6da75fbc96588196c146
parentea590abf3a411b22a099293a10e54e7495f300d0 (diff)
downloadlinux-keystone-68d6163fc0af1c01f8397b80c62d9f036421a217.tar.gz
net:keystone: add packet accelerator driver
This commit adds support for the packet acclerator on the keystone devices. The driver downloads the firmware to each of the 6 PDSP's of the packet accelerator. It will also initializes the timers so as to enable the timestamping feature. MAC rules are also added to filer mac addresses. Signed-off-by: Sandeep Paulraj <s-paulraj@ti.com>
-rw-r--r--drivers/net/ethernet/ti/Kconfig11
-rw-r--r--drivers/net/ethernet/ti/Makefile1
-rw-r--r--drivers/net/ethernet/ti/keystone_pa.c1644
-rw-r--r--drivers/net/ethernet/ti/keystone_pa.h737
-rw-r--r--drivers/net/ethernet/ti/keystone_pasahost.h384
5 files changed, 2777 insertions, 0 deletions
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 921d6a16a4405c..01ffb5346d8e0b 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -70,6 +70,17 @@ config TI_KEYSTONE_NET
To compile this driver as a module, choose M here: the module
will be called keystone_net.
+config TI_KEYSTONE_PA
+ tristate "TI Keystone Packet Accelerator Support"
+ depends on TI_KEYSTONE
+ default y if TI_KEYSTONE
+ ---help---
+ This driver supports TI's Keystone Packet Accelerator.
+
+ To compile this driver as a module, choose M here: the module
+ will be called keystone_pa.
+
+
config TLAN
tristate "TI ThunderLAN support"
depends on (PCI || EISA)
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index d69cc39a688403..e8647deca87240 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
ti_cpsw-y := cpsw_ale.o cpsw.o
obj-$(CONFIG_TI_KEYSTONE_NET) += keystone_net.o
+obj-$(CONFIG_TI_KEYSTONE_PA) += keystone_pa.o
keystone_net-y += cpsw_ale.o \
keystone_ethss.o \
keystone_sgmii.o \
diff --git a/drivers/net/ethernet/ti/keystone_pa.c b/drivers/net/ethernet/ti/keystone_pa.c
new file mode 100644
index 00000000000000..9679a7ca84693d
--- /dev/null
+++ b/drivers/net/ethernet/ti/keystone_pa.c
@@ -0,0 +1,1644 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated
+ * Authors: Sandeep Paulraj <s-paulraj@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/firmware.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/byteorder/generic.h>
+#include <linux/platform_device.h>
+#include <linux/keystone-dma.h>
+#include <linux/errqueue.h>
+
+#include "keystone_net.h"
+#include "keystone_pa.h"
+#include "keystone_pasahost.h"
+
+#define DEVICE_PA_PDSP02_FIRMWARE "keystone/pa_pdsp02_classify1.fw"
+#define DEVICE_PA_PDSP3_FIRMWARE "keystone/pa_pdsp3_classify2.fw"
+#define DEVICE_PA_PDSP45_FIRMWARE "keystone/pa_pdsp45_pam.fw"
+
+#define PSTREAM_ROUTE_PDSP0 0
+
+#define PA_PDSP_ALREADY_ACTIVE 0
+#define PA_PDSP_RESET_RELEASED 1
+#define PA_PDSP_NO_RESTART 2
+#define PA_MAX_PDSP_ENABLE_LOOP_COUNT 100000
+
+#define PA_STATE_RESET 0 /* Sub-system state reset */
+#define PA_STATE_ENABLE 1 /* Sub-system state enable */
+#define PA_STATE_QUERY 2 /* Query the Sub-system state */
+#define PA_STATE_INCONSISTENT 3 /* Sub-system is partially enabled */
+#define PA_STATE_INVALID_REQUEST 4 /* Invalid state command to the Sub-system */
+#define PA_STATE_ENABLE_FAILED 5 /* The Sub-system did not respond after restart */
+
+/* System Timestamp */
+#define PAFRM_SRAM_SIZE 0x2000
+#define PAFRM_SYS_TIMESTAMP_ADDR 0x6460
+
+/* PDSP Versions */
+#define PAFRM_PDSP_VERSION_BASE 0x7F04
+
+#define DEVICE_PA_BASE 0x02000000
+#define DEVICE_PA_REGION_SIZE 0x48000
+#define DEVICE_PA_NUM_PDSPS 6
+
+#define PA_MEM_PDSP_IRAM(pdsp) ((pdsp) * 0x8000)
+#define PA_MEM_PDSP_SRAM(num) ((num) * 0x2000)
+#define PA_REG_PKTID_SOFT_RESET 0x00404
+#define PA_REG_LUT2_SOFT_RESET 0x00504
+#define PA_REG_STATS_SOFT_RESET 0x06004
+
+#define PA_PDSP_CONST_REG_INDEX_C25_C24 0
+#define PA_PDSP_CONST_REG_INDEX_C27_C26 1
+#define PA_PDSP_CONST_REG_INDEX_C29_C28 2
+#define PA_PDSP_CONST_REG_INDEX_C31_C30 3
+
+/* The pdsp control register */
+#define PA_REG_VAL_PDSP_CTL_DISABLE_PDSP 1
+#define PA_REG_VAL_PDSP_CTL_RESET_PDSP 0
+#define PA_REG_VAL_PDSP_CTL_STATE (1 << 15)
+#define PA_REG_VAL_PDSP_CTL_ENABLE (1 << 1)
+#define PA_REG_VAL_PDSP_CTL_SOFT_RESET (1 << 0)
+#define PA_REG_VAL_PDSP_CTL_ENABLE_PDSP(pcval) (((pcval) << 16) \
+ | PA_REG_VAL_PDSP_CTL_ENABLE \
+ | PA_REG_VAL_PDSP_CTL_SOFT_RESET)
+
+/* Number of mailbox slots for each PDPS */
+#define PA_NUM_MAILBOX_SLOTS 4
+#define TEST_SWINFO0_TIMESTAMP 0x12340002
+
+#define PACKET_DROP 0
+#define PACKET_PARSE 1
+#define PACKET_HST 2
+
+#define NT 32
+
+#define PA_SGLIST_SIZE 3
+
+const u32 pap_pdsp_const_reg_map[6][4] =
+{
+ /* PDSP0: C24-C31 */
+ {
+ 0x0000007F, /* C25-C24 */
+ 0x0000006E, /* C27-C26 */
+ 0x00000000, /* C29-C28 */
+ 0x00000000 /* C31-C30 */
+ },
+ /* PDSP1: C24-C31 */
+ {
+ 0x0001007F, /* C25-C24 */
+ 0x00480040, /* C27-C26 */
+ 0x00000000, /* C29-C28 */
+ 0x00000000 /* C31-C30 */
+ },
+ /* PDSP2: C24-C31 */
+ {
+ 0x0002007F, /* C25-C24 */
+ 0x00490044, /* C27-C26 */
+ 0x00000000, /* C29-C28 */
+ 0x00000000 /* C31-C30 */
+ },
+ /* PDSP3: C24-C31 */
+ {
+ 0x0003007F, /* C25-C24 */
+ 0x0000006E, /* C27-C26 */
+ 0x00000000, /* C29-C28 */
+ 0x00000000 /* C31-C30 */
+ },
+ /* PDSP4: C24-C31 */
+ {
+ 0x0070007F, /* C25-C24 */
+ 0x00000000, /* C27-C26 */
+ 0x04080404, /* C29-C28 */
+ 0x00000000 /* C31-C30 */
+ },
+ /* PDSP5: C24-C31 */
+ {
+ 0x0071007F, /* C25-C24 */
+ 0x00000000, /* C27-C26 */
+ 0x04080404, /* C29-C28 */
+ 0x00000000 /* C31-C30 */
+ }
+};
+
+struct pa_mailbox_regs {
+ u32 pdsp_mailbox_slot0;
+ u32 pdsp_mailbox_slot1;
+ u32 pdsp_mailbox_slot2;
+ u32 pdsp_mailbox_slot3;
+};
+
+struct pa_packet_id_alloc_regs {
+ u32 revision;
+ u32 soft_reset;
+ u32 range_limit;
+ u32 idvalue;
+};
+
+struct pa_lut2_control_regs {
+ u32 revision;
+ u32 soft_reset;
+ u32 rsvd[6];
+ u32 add_data0;
+ u32 add_data1;
+ u32 add_data2;
+ u32 add_data3;
+ u32 add_del_key;
+ u32 add_del_control;
+};
+
+struct pa_pdsp_control_regs {
+ u32 control;
+ u32 status;
+ u32 wakeup_enable;
+ u32 cycle_count;
+ u32 stall_count;
+ u32 rsvd[3];
+ u32 const_tbl_blk_index0;
+ u32 const_tbl_blk_index1;
+ u32 const_tbl_prog_pointer0;
+ u32 const_tbl_prog_pointer1;
+ u32 rsvd1[52];
+};
+
+struct pa_pdsp_timer_regs {
+ u32 timer_control;
+ u32 timer_load;
+ u32 timer_value;
+ u32 timer_interrupt;
+ u32 rsvd[60];
+};
+
+struct pa_statistics_regs {
+ u32 revision;
+ u32 soft_reset;
+ u32 incr_flags;
+ u32 stats_capture;
+ u32 rsvd[4];
+ u32 stats_red[32];
+};
+
+struct pa_device {
+ struct device *dev;
+ struct netcp_module_data module;
+ struct clk *clk;
+ struct dma_chan *tx_channel;
+ struct dma_chan *rx_channel;
+ const char *tx_chan_name;
+ const char *rx_chan_name;
+ unsigned cmd_flow_num;
+ unsigned cmd_queue_num;
+ unsigned data_flow_num;
+ unsigned data_queue_num;
+
+ u64 pa2system_offset;
+
+ struct pa_mailbox_regs __iomem *reg_mailbox;
+ struct pa_packet_id_alloc_regs __iomem *reg_packet_id;
+ struct pa_lut2_control_regs __iomem *reg_lut2;
+ struct pa_pdsp_control_regs __iomem *reg_control;
+ struct pa_pdsp_timer_regs __iomem *reg_timer;
+ struct pa_statistics_regs __iomem *reg_stats;
+ void __iomem *pa_sram;
+ void __iomem *pa_iram;
+ void __iomem *streaming_switch;
+
+ u8 *mc_list;
+ u8 addr_count;
+ struct tasklet_struct task;
+ spinlock_t lock;
+
+ struct netcp_tx_pipe tx_pipe;
+ u32 tx_cmd_queue_depth;
+ u32 tx_data_queue_depth;
+ u32 rx_pool_depth;
+ u32 rx_buffer_size;
+};
+
+#define pa_from_module(data) container_of(data, struct pa_device, module)
+#define pa_to_module(pa) (&(pa)->module)
+
+struct pa_packet {
+ struct scatterlist sg[PA_SGLIST_SIZE];
+ int sg_ents;
+ enum dma_status status;
+ enum dma_transfer_direction direction;
+ struct pa_device *priv;
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ u32 epib[4];
+ u32 psdata[6];
+ struct completion complete;
+ void *data;
+};
+
+static void pdsp_fw_put(u32 *dest, const u32 *src, u32 wc)
+{
+ int i;
+
+ for (i = 0; i < wc; i++)
+ *dest++ = be32_to_cpu(*src++);
+}
+
+static inline void swizFwd (struct pa_frm_forward *fwd)
+{
+ fwd->flow_id = fwd->flow_id;
+ fwd->queue = cpu_to_be16(fwd->queue);
+
+ if (fwd->forward_type == PAFRM_FORWARD_TYPE_HOST) {
+ fwd->u.host.context = cpu_to_be32(fwd->u.host.context);
+ fwd->u.host.multi_route = fwd->u.host.multi_route;
+ fwd->u.host.multi_idx = fwd->u.host.multi_idx;
+ fwd->u.host.pa_pdsp_router = fwd->u.host.pa_pdsp_router;
+ } else if (fwd->forward_type == PAFRM_FORWARD_TYPE_SA) {
+ fwd->u.sa.sw_info_0 = cpu_to_be32(fwd->u.sa.sw_info_0);
+ fwd->u.sa.sw_info_1 = cpu_to_be32(fwd->u.sa.sw_info_1);
+ } else if (fwd->forward_type == PAFRM_FORWARD_TYPE_SRIO) {
+ fwd->u.srio.ps_info0 = cpu_to_be32(fwd->u.srio.ps_info0);
+ fwd->u.srio.ps_info1 = cpu_to_be32(fwd->u.srio.ps_info1);
+ fwd->u.srio.pkt_type = fwd->u.srio.pkt_type;
+ } else if (fwd->forward_type == PAFRM_FORWARD_TYPE_ETH) {
+ fwd->u.eth.ps_flags = fwd->u.eth.ps_flags;
+ } else if (fwd->forward_type == PAFRM_FORWARD_TYPE_PA) {
+ fwd->u.pa.pa_dest = fwd->u.pa.pa_dest;
+ fwd->u.pa.custom_type = fwd->u.pa.custom_type;
+ fwd->u.pa.custom_idx = fwd->u.pa.custom_idx;
+ }
+
+ fwd->forward_type = fwd->forward_type;
+}
+
+static inline void swizFcmd (struct pa_frm_command *fcmd)
+{
+ fcmd->command_result = cpu_to_be32(fcmd->command_result);
+ fcmd->command = fcmd->command;
+ fcmd->magic = fcmd->magic;
+ fcmd->com_id = cpu_to_be16(fcmd->com_id);
+ fcmd->ret_context = cpu_to_be32(fcmd->ret_context);
+ fcmd->reply_queue = cpu_to_be16(fcmd->reply_queue);
+ fcmd->reply_dest = fcmd->reply_dest;
+ fcmd->flow_id = fcmd->flow_id;
+}
+
+static inline void swizAl1 (struct pa_frm_cmd_add_lut1 *al1)
+{
+ al1->index = al1->index;
+ al1->type = al1->type;
+ al1->cust_index = al1->cust_index;
+
+ if (al1->type == PAFRM_COM_ADD_LUT1_STANDARD) {
+ al1->u.eth_ip.etype = cpu_to_be16(al1->u.eth_ip.etype);
+ al1->u.eth_ip.vlan = cpu_to_be16(al1->u.eth_ip.vlan);
+ al1->u.eth_ip.spi = cpu_to_be32(al1->u.eth_ip.spi);
+ al1->u.eth_ip.flow = cpu_to_be32(al1->u.eth_ip.flow);
+
+ if (al1->u.eth_ip.key & PAFRM_LUT1_KEY_MPLS)
+ al1->u.eth_ip.pm.mpls = cpu_to_be32(al1->u.eth_ip.pm.mpls);
+ else {
+ al1->u.eth_ip.pm.ports[0] = cpu_to_be16(al1->u.eth_ip.pm.ports[0]);
+ al1->u.eth_ip.pm.ports[1] = cpu_to_be16(al1->u.eth_ip.pm.ports[1]);
+ }
+
+ al1->u.eth_ip.proto_next = al1->u.eth_ip.proto_next;
+ al1->u.eth_ip.tos_tclass = al1->u.eth_ip.tos_tclass;
+ al1->u.eth_ip.inport = al1->u.eth_ip.inport;
+ al1->u.eth_ip.key = al1->u.eth_ip.key;
+ al1->u.eth_ip.match_flags = cpu_to_be16(al1->u.eth_ip.match_flags);
+ } else if (al1->type == PAFRM_COM_ADD_LUT1_SRIO) {
+ al1->u.srio.src_id = cpu_to_be16(al1->u.srio.src_id);
+ al1->u.srio.dest_id = cpu_to_be16(al1->u.srio.dest_id);
+ al1->u.srio.etype = cpu_to_be16(al1->u.srio.etype);
+ al1->u.srio.vlan = cpu_to_be16(al1->u.srio.vlan);
+ al1->u.srio.pri = al1->u.srio.pri;
+ al1->u.srio.type_param1 = cpu_to_be16(al1->u.srio.type_param1);
+ al1->u.srio.type_param2 = al1->u.srio.type_param2;
+ al1->u.srio.key = al1->u.srio.key;
+ al1->u.srio.match_flags = cpu_to_be16(al1->u.srio.match_flags);
+ al1->u.srio.next_hdr = al1->u.srio.next_hdr;
+ al1->u.srio.next_hdr_offset = cpu_to_be16(al1->u.srio.next_hdr_offset);
+ } else {
+ al1->u.custom.etype = cpu_to_be16(al1->u.custom.etype);
+ al1->u.custom.vlan = cpu_to_be16(al1->u.custom.vlan);
+ al1->u.custom.key = al1->u.custom.key;
+ al1->u.custom.match_flags = cpu_to_be16(al1->u.custom.match_flags);
+ }
+
+ swizFwd(&(al1->match));
+ swizFwd(&(al1->next_fail));
+}
+
+static int pa_conv_routing_info(struct pa_frm_forward *fwd_info,
+ struct pa_route_info *route_info,
+ int cmd_dest, u16 fail_route)
+{
+ u8 *pcmd = NULL;
+ fwd_info->flow_id = route_info->flow_id;
+ fwd_info->queue = route_info->queue;
+
+ if (route_info->dest == PA_DEST_HOST) {
+ fwd_info->forward_type = PAFRM_FORWARD_TYPE_HOST;
+ fwd_info->u.host.context = route_info->sw_info_0;
+
+ if (route_info->m_route_index >= 0) {
+ if (route_info->m_route_index >= PA_MAX_MULTI_ROUTE_SETS) {
+ return (PA_ERR_CONFIG);
+ }
+
+ fwd_info->u.host.multi_route = 1;
+ fwd_info->u.host.multi_idx = route_info->m_route_index;
+ fwd_info->u.host.pa_pdsp_router = PAFRM_DEST_PA_M_0;
+ }
+ pcmd = fwd_info->u.host.cmd;
+ } else if (route_info->dest == PA_DEST_DISCARD) {
+ fwd_info->forward_type = PAFRM_FORWARD_TYPE_DISCARD;
+ } else if (route_info->dest == PA_DEST_EMAC) {
+ fwd_info->forward_type = PAFRM_FORWARD_TYPE_ETH;
+ fwd_info->u.eth.ps_flags = (route_info->pkt_type_emac_ctrl &
+ PA_EMAC_CTRL_CRC_DISABLE)?
+ PAFRM_ETH_PS_FLAGS_DISABLE_CRC:0;
+ fwd_info->u.eth.ps_flags |= ((route_info->pkt_type_emac_ctrl &
+ PA_EMAC_CTRL_PORT_MASK) <<
+ PAFRM_ETH_PS_FLAGS_PORT_SHIFT);
+ } else if (fail_route) {
+ return (PA_ERR_CONFIG);
+
+ } else if (((route_info->dest == PA_DEST_CONTINUE_PARSE_LUT1) &&
+ (route_info->custom_type != PA_CUSTOM_TYPE_LUT2)) ||
+ ((route_info->dest == PA_DEST_CONTINUE_PARSE_LUT2) &&
+ (route_info->custom_type != PA_CUSTOM_TYPE_LUT1))) {
+
+ /* Custom Error check */
+ if (((route_info->custom_type == PA_CUSTOM_TYPE_LUT1) &&
+ (route_info->custom_index >= PA_MAX_CUSTOM_TYPES_LUT1)) ||
+ ((route_info->custom_type == PA_CUSTOM_TYPE_LUT2) &&
+ (route_info->custom_index >= PA_MAX_CUSTOM_TYPES_LUT2)))
+ return(PA_ERR_CONFIG);
+
+ fwd_info->forward_type = PAFRM_FORWARD_TYPE_PA;
+ fwd_info->u.pa.custom_type = (u8)route_info->custom_type;
+ fwd_info->u.pa.custom_idx = route_info->custom_index;
+
+ if (route_info->dest == PA_DEST_CONTINUE_PARSE_LUT2) {
+ fwd_info->u.pa.pa_dest = PAFRM_DEST_PA_C2;
+ } else {
+ /*
+ * cmd_dest is provided by calling function
+ * There is no need to check error case
+ */
+ fwd_info->u.pa.pa_dest = (cmd_dest == PA_CMD_TX_DEST_0)?
+ PAFRM_DEST_PA_C1_1:PAFRM_DEST_PA_C1_2;
+ }
+ } else if (route_info->dest == PA_DEST_SASS) {
+ fwd_info->forward_type = PAFRM_FORWARD_TYPE_SA;
+ fwd_info->u.sa.sw_info_0 = route_info->sw_info_0;
+ fwd_info->u.sa.sw_info_1 = route_info->sw_info_1;
+ pcmd = fwd_info->u.sa.cmd;
+ } else if (route_info->dest == PA_DEST_SRIO) {
+ fwd_info->forward_type = PAFRM_FORWARD_TYPE_SRIO;
+ fwd_info->u.srio.ps_info0 = route_info->sw_info_0;
+ fwd_info->u.srio.ps_info1 = route_info->sw_info_1;
+ fwd_info->u.srio.pkt_type = route_info->pkt_type_emac_ctrl;
+ } else {
+ return (PA_ERR_CONFIG);
+ }
+
+ if (pcmd && route_info->pcmd) {
+ struct pa_cmd_info *pacmd = route_info->pcmd;
+ struct pa_patch_info *patch_info;
+ struct pa_cmd_set *cmd_set;
+
+ switch (pacmd->cmd) {
+ case PA_CMD_PATCH_DATA:
+ patch_info = &pacmd->params.patch;
+ if ((patch_info->n_patch_bytes > 2) ||
+ (patch_info->overwrite) ||
+ (patch_info->patch_data == NULL))
+ return (PA_ERR_CONFIG);
+
+ pcmd[0] = PAFRM_RX_CMD_CMDSET;
+ pcmd[1] = patch_info->n_patch_bytes;
+ pcmd[2] = patch_info->patch_data[0];
+ pcmd[3] = patch_info->patch_data[1];
+ break;
+
+ case PA_CMD_CMDSET:
+ cmd_set = &pacmd->params.cmd_set;
+ if(cmd_set->index >= PA_MAX_CMD_SETS)
+ return (PA_ERR_CONFIG);
+
+ pcmd[0] = PAFRM_RX_CMD_CMDSET;
+ pcmd[1] = (u8)cmd_set->index;
+ break;
+ default:
+ return(PA_ERR_CONFIG);
+ }
+ }
+ return (PA_OK);
+}
+
+static int keystone_pa_reset(struct pa_device *pa_dev)
+{
+ struct pa_packet_id_alloc_regs __iomem *packet_id_regs = pa_dev->reg_packet_id;
+ struct pa_lut2_control_regs __iomem *lut2_regs = pa_dev->reg_lut2;
+ struct pa_statistics_regs __iomem *stats_regs = pa_dev->reg_stats;
+ u32 i;
+
+ /* Reset and disable all PDSPs */
+ for (i = 0; i < DEVICE_PA_NUM_PDSPS; i++) {
+ struct pa_pdsp_control_regs __iomem *ctrl_reg = &pa_dev->reg_control[i];
+ __raw_writel(PA_REG_VAL_PDSP_CTL_RESET_PDSP,
+ &ctrl_reg->control);
+
+ while((__raw_readl(&ctrl_reg->control)
+ & PA_REG_VAL_PDSP_CTL_STATE));
+ }
+
+ /* Reset packet Id */
+ __raw_writel(1, &packet_id_regs->soft_reset);
+
+ /* Reset LUT2 */
+ __raw_writel(1, &lut2_regs->soft_reset);
+
+ /* Reset statistic */
+ __raw_writel(1, &stats_regs->soft_reset);
+
+ /* Reset timers */
+ for (i = 0; i < DEVICE_PA_NUM_PDSPS; i++) {
+ struct pa_pdsp_timer_regs __iomem *timer_reg = &pa_dev->reg_timer[i];
+ __raw_writel(0, &timer_reg->timer_control);
+ }
+
+ return 0;
+}
+
+/*
+ * Convert a raw PA timer count to nanoseconds
+ *
+ * This assumes the PA timer frequency is 163,840,000 Hz.
+ * This is true for the TCI6614 EVM with default PLL settings,
+ * but is NOT a good generic assumption! Fix this later.
+ */
+static u64 tstamp_raw_to_ns(u64 raw)
+{
+ /* 100000/(2^14) = 6.103515625 nanoseconds per tick */
+ /* Take care not to exceed 64 bits! */
+
+ return (raw * 50000ULL) >> 13;
+}
+
+static u64 pa_to_sys_time(u64 offset, u64 pa_ticks)
+{
+ s64 temp;
+ u64 result;
+
+ /* we need to compute difference from wallclock
+ * to time from boot dynamically since
+ * it will change whenever absolute time is adjusted by
+ * protocols above (ntp, ptpv2)
+ */
+
+ temp = ktime_to_ns(ktime_get_monotonic_offset());
+ result = (u64)((s64)offset - temp + (s64)tstamp_raw_to_ns(pa_ticks));
+
+ return result;
+}
+
+static inline u64 tstamp_get_raw(struct pa_device *pa_dev)
+{
+ struct pa_pdsp_timer_regs __iomem *timer_reg = &pa_dev->reg_timer[0];
+ u32 low, high, high2;
+ u64 raw;
+ int count;
+
+ count = 0;
+ do {
+ high = __raw_readl(pa_dev->pa_sram + 0x6460);
+ low = __raw_readl(&timer_reg->timer_value);
+ high2 = __raw_readl(pa_dev->pa_sram + 0x6460);
+ } while((high != high2) && (++count < 32));
+
+ raw = (((u64)high) << 16) | (u64)(0x0000ffff - (low & 0x0000ffff));
+
+ return raw;
+}
+
+/*
+ * calibrate the PA timer to the system time
+ * ktime_get gives montonic time
+ * ktime_to_ns converts ktime to ns
+ * this needs to be called before doing conversions
+ */
+static void pa_calibrate_with_system_timer(struct pa_device *pa_dev)
+{
+ ktime_t ktime1, ktime2;
+ u64 pa_ticks;
+ u64 pa_ns;
+ u64 sys_ns1, sys_ns2;
+
+ /* Get the two values with minimum delay between */
+ ktime1 = ktime_get();
+ pa_ticks = tstamp_get_raw(pa_dev);
+ ktime2 = ktime_get();
+
+ /* Convert both values to nanoseconds */
+ sys_ns1 = ktime_to_ns(ktime1);
+ pa_ns = tstamp_raw_to_ns(pa_ticks);
+ sys_ns2 = ktime_to_ns(ktime2);
+
+ /* compute offset */
+ pa_dev->pa2system_offset = sys_ns1 + ((sys_ns2 - sys_ns1) / 2) - pa_ns;
+}
+
+static int pa_config_timestamp(struct pa_device *pa_dev, int factor)
+{
+ struct pa_pdsp_timer_regs __iomem *timer_reg = &pa_dev->reg_timer[0];
+
+ if (factor < PA_TIMESTAMP_SCALER_FACTOR_2 ||
+ factor > PA_TIMESTAMP_SCALER_FACTOR_8192)
+ return -1;
+ else {
+ __raw_writel(0xffff, &timer_reg->timer_load);
+ __raw_writel((PA_SS_TIMER_CNTRL_REG_GO |
+ PA_SS_TIMER_CNTRL_REG_MODE |
+ PA_SS_TIMER_CNTRL_REG_PSE |
+ (factor << PA_SS_TIMER_CNTRL_REG_PRESCALE_SHIFT)),
+ &timer_reg->timer_control);
+ }
+
+ return 0;
+}
+
+static void pa_get_version(struct pa_device *pa_dev)
+{
+ u32 version;
+
+ version = __raw_readl(pa_dev->pa_sram + PAFRM_PDSP_VERSION_BASE);
+
+ dev_info(pa_dev->dev, "Using Packet Accelerator Firmware version "
+ "0x%08x\n", version);
+}
+
+static int pa_pdsp_run(struct pa_device *pa_dev, int pdsp)
+{
+ struct pa_pdsp_control_regs __iomem *ctrl_reg = &pa_dev->reg_control[pdsp];
+ struct pa_mailbox_regs __iomem *mailbox_reg = &pa_dev->reg_mailbox[pdsp];
+ u32 i, v;
+
+ /* Check for enabled PDSP */
+ v = __raw_readl(&ctrl_reg->control);
+ if ((v & PA_REG_VAL_PDSP_CTL_ENABLE) ==
+ PA_REG_VAL_PDSP_CTL_ENABLE) {
+ /* Already enabled */
+ return (PA_PDSP_ALREADY_ACTIVE);
+ }
+
+ /* Clear the mailbox */
+ __raw_writel(0, &mailbox_reg->pdsp_mailbox_slot0);
+
+ /* Set PDSP PC to 0, enable the PDSP */
+ __raw_writel(PA_REG_VAL_PDSP_CTL_ENABLE |
+ PA_REG_VAL_PDSP_CTL_SOFT_RESET,
+ &ctrl_reg->control);
+
+ /* Wait for the mailbox to become non-zero */
+ for (i = 0; i < PA_MAX_PDSP_ENABLE_LOOP_COUNT; i++)
+ v = __raw_readl(&mailbox_reg->pdsp_mailbox_slot0);
+ if (v != 0)
+ return (PA_PDSP_RESET_RELEASED);
+
+ return (PA_PDSP_NO_RESTART);
+}
+
+static int keystone_pa_reset_control(struct pa_device *pa_dev, int new_state)
+{
+ struct pa_mailbox_regs __iomem *mailbox_reg = &pa_dev->reg_mailbox[0];
+ int do_global_reset = 1;
+ int i, res;
+ int ret;
+
+ if (new_state == PA_STATE_ENABLE) {
+ ret = PA_STATE_ENABLE;
+
+ /*
+ * Do nothing if a pdsp is already out of reset.
+ * If any PDSPs are out of reset
+ * a global init is not performed
+ */
+ for (i = 0; i < 6; i++) {
+ res = pa_pdsp_run(pa_dev, i);
+
+ if (res == PA_PDSP_ALREADY_ACTIVE)
+ do_global_reset = 0;
+
+ if (res == PA_PDSP_NO_RESTART) {
+ ret = PA_STATE_ENABLE_FAILED;
+ do_global_reset = 0;
+ }
+ }
+
+ /* If global reset is required any PDSP can do it */
+ if (do_global_reset) {
+ __raw_writel(1, &mailbox_reg->pdsp_mailbox_slot1);
+ __raw_writel(0, &mailbox_reg->pdsp_mailbox_slot0);
+
+ while (__raw_readl(&mailbox_reg->pdsp_mailbox_slot1) != 0);
+
+ for (i = 1; i < 6; i++) {
+ struct pa_mailbox_regs __iomem *mbox_reg =
+ &pa_dev->reg_mailbox[i];
+ __raw_writel(0,
+ &mbox_reg->pdsp_mailbox_slot0);
+ }
+ } else {
+ for (i = 0; i < 6; i++) {
+ struct pa_mailbox_regs __iomem *mbox_reg =
+ &pa_dev->reg_mailbox[i];
+ __raw_writel(0,
+ &mbox_reg->pdsp_mailbox_slot0);
+ }
+
+ }
+
+ return (ret);
+ }
+
+ return (PA_STATE_INVALID_REQUEST);
+}
+
+static int keystone_pa_set_firmware(struct pa_device *pa_dev,
+ int pdsp, const unsigned int *buffer, int len)
+{
+ struct pa_pdsp_control_regs __iomem *ctrl_reg = &pa_dev->reg_control[pdsp];
+
+ if ((pdsp < 0) || (pdsp >= DEVICE_PA_NUM_PDSPS))
+ return -EINVAL;
+
+ pdsp_fw_put((u32 *)(pa_dev->pa_iram + PA_MEM_PDSP_IRAM(pdsp)), buffer,
+ len >> 2);
+
+ __raw_writel(pap_pdsp_const_reg_map[pdsp][PA_PDSP_CONST_REG_INDEX_C25_C24],
+ &ctrl_reg->const_tbl_blk_index0);
+
+ __raw_writel(pap_pdsp_const_reg_map[pdsp][PA_PDSP_CONST_REG_INDEX_C27_C26],
+ &ctrl_reg->const_tbl_blk_index1);
+
+ __raw_writel(pap_pdsp_const_reg_map[pdsp][PA_PDSP_CONST_REG_INDEX_C29_C28],
+ &ctrl_reg->const_tbl_prog_pointer0);
+
+ __raw_writel(pap_pdsp_const_reg_map[pdsp][PA_PDSP_CONST_REG_INDEX_C31_C30],
+ &ctrl_reg->const_tbl_prog_pointer1);
+
+ return 0;
+}
+
+static struct pa_packet *pa_alloc_packet(struct pa_device *pa_dev,
+ unsigned cmd_size,
+ enum dma_transfer_direction direction)
+{
+ struct pa_packet *p_info;
+
+ p_info = kzalloc(sizeof(*p_info) + cmd_size, GFP_KERNEL);
+ if (!p_info)
+ return NULL;
+
+ p_info->priv = pa_dev;
+ p_info->data = p_info + 1;
+ p_info->direction = direction;
+ p_info->chan = (direction == DMA_MEM_TO_DEV) ? pa_dev->tx_channel :
+ pa_dev->rx_channel;
+
+ sg_init_table(p_info->sg, PA_SGLIST_SIZE);
+ sg_set_buf(&p_info->sg[0], p_info->epib, sizeof(p_info->epib));
+ sg_set_buf(&p_info->sg[1], p_info->psdata, sizeof(p_info->psdata));
+ sg_set_buf(&p_info->sg[2], p_info->data, cmd_size);
+
+ return p_info;
+}
+
+static void pa_tx_dma_callback(void *data)
+{
+ struct pa_packet *p_info = data;
+ struct pa_device *pa_dev = p_info->priv;
+
+ dma_unmap_sg(pa_dev->dev, &p_info->sg[2], 1, p_info->direction);
+
+ p_info->desc = NULL;
+
+ kfree(p_info);
+}
+
+static int pa_submit_tx_packet(struct pa_packet *p_info)
+{
+ unsigned flags = DMA_HAS_EPIB | DMA_HAS_PSINFO;
+ struct pa_device *pa_dev = p_info->priv;
+ int ret;
+
+ ret = dma_map_sg(pa_dev->dev, &p_info->sg[2], 1, p_info->direction);
+ if (ret < 0)
+ return ret;
+
+ p_info->desc = dmaengine_prep_slave_sg(p_info->chan, p_info->sg, 3,
+ p_info->direction, flags);
+ if (IS_ERR_OR_NULL(p_info->desc)) {
+ dma_unmap_sg(pa_dev->dev, &p_info->sg[2], 1, p_info->direction);
+ return PTR_ERR(p_info->desc);
+ }
+
+ p_info->desc->callback = pa_tx_dma_callback;
+ p_info->desc->callback_param = p_info;
+ p_info->cookie = dmaengine_submit(p_info->desc);
+
+ return 0;
+}
+
+#define PA_CONTEXT_MASK 0xffff0000
+#define PA_CONTEXT_CONFIG 0xdead0000
+#define PA_CONTEXT_TSTAMP 0xbeef0000
+
+#define TSTAMP_TIMEOUT (HZ * 5) /* 5 seconds (arbitrary) */
+
+struct tstamp_pending {
+ struct list_head list;
+ u32 context;
+ struct sock *sock;
+ struct sk_buff *skb;
+ struct pa_device *pa_dev;
+ struct timer_list timeout;
+};
+
+static spinlock_t tstamp_lock;
+static atomic_t tstamp_sequence = ATOMIC_INIT(0);
+static struct list_head tstamp_pending = LIST_HEAD_INIT(tstamp_pending);
+
+static struct tstamp_pending *tstamp_remove_pending(u32 context)
+{
+ struct tstamp_pending *pend;
+
+ spin_lock(&tstamp_lock);
+ list_for_each_entry(pend, &tstamp_pending, list) {
+ if (pend->context == context) {
+ del_timer(&pend->timeout);
+ list_del(&pend->list);
+ spin_unlock(&tstamp_lock);
+ return pend;
+ }
+ }
+ spin_unlock(&tstamp_lock);
+
+ return NULL;
+}
+
+static void tstamp_complete(u32, struct pa_packet *);
+
+static void tstamp_purge_pending(struct pa_device *pa_dev)
+{
+ struct tstamp_pending *pend;
+ int found;
+
+ /* This is ugly and inefficient, but very rarely executed */
+ do {
+ found = 0;
+
+ spin_lock(&tstamp_lock);
+ list_for_each_entry(pend, &tstamp_pending, list) {
+ if (pend->pa_dev == pa_dev) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&tstamp_lock);
+
+ if (found)
+ tstamp_complete(pend->context, NULL);
+ } while(found);
+}
+
+static void tstamp_timeout(unsigned long context)
+{
+ tstamp_complete((u32)context, NULL);
+}
+
+static int tstamp_add_pending(struct tstamp_pending *pend)
+{
+ init_timer(&pend->timeout);
+ pend->timeout.expires = jiffies + TSTAMP_TIMEOUT;
+ pend->timeout.function = tstamp_timeout;
+ pend->timeout.data = (unsigned long)pend->context;
+
+ spin_lock(&tstamp_lock);
+ add_timer(&pend->timeout);
+ list_add_tail(&pend->list, &tstamp_pending);
+ spin_unlock(&tstamp_lock);
+
+ return 0;
+}
+
+static void tstamp_complete(u32 context, struct pa_packet *p_info)
+{
+ struct tstamp_pending *pend;
+ struct sock_exterr_skb *serr;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *sh_hw_tstamps;
+ u64 tx_timestamp;
+ u64 sys_time;
+ int err;
+
+ pend = tstamp_remove_pending(context);
+ if (!pend)
+ return;
+
+
+ skb = pend->skb;
+ if (!p_info) {
+ printk("%s(%p,NULL): timeout\n", __func__, pend);
+ kfree_skb(skb);
+ } else {
+ tx_timestamp = p_info->epib[0];
+ tx_timestamp |= ((u64)(p_info->epib[2] & 0x0000ffff)) << 32;
+
+ sys_time = pa_to_sys_time(pend->pa_dev->pa2system_offset, tx_timestamp);
+
+ sh_hw_tstamps = skb_hwtstamps(skb);
+ memset(sh_hw_tstamps, 0, sizeof(*sh_hw_tstamps));
+ sh_hw_tstamps->hwtstamp = ns_to_ktime(tstamp_raw_to_ns(tx_timestamp));
+ sh_hw_tstamps->syststamp = ns_to_ktime(sys_time);
+
+ serr = SKB_EXT_ERR(skb);
+ memset(serr, 0, sizeof(*serr));
+ serr->ee.ee_errno = ENOMSG;
+ serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
+
+ err = sock_queue_err_skb(pend->sock, skb);
+ if (err)
+ kfree_skb(skb);
+ }
+
+ kfree(pend);
+}
+
+static void pa_rx_complete(void *param)
+{
+ struct pa_packet *p_info = param;
+ struct pa_device *pa_dev = p_info->priv;
+ struct pa_frm_command *fcmd;
+
+ dma_unmap_sg(pa_dev->dev, &p_info->sg[2], 1, DMA_FROM_DEVICE);
+
+ switch (p_info->epib[1] & PA_CONTEXT_MASK) {
+ case PA_CONTEXT_CONFIG:
+ fcmd = p_info->data;
+ swizFcmd(fcmd);
+
+ if (fcmd->command_result != PAFRM_COMMAND_RESULT_SUCCESS) {
+ dev_dbg(pa_dev->dev, "Command Result = 0x%x\n", fcmd->command_result);
+ dev_dbg(pa_dev->dev, "Command = 0x%x\n", fcmd->command);
+ dev_dbg(pa_dev->dev, "Magic = 0x%x\n", fcmd->magic);
+ dev_dbg(pa_dev->dev, "Com ID = 0x%x\n", fcmd->com_id);
+ dev_dbg(pa_dev->dev, "ret Context = 0x%x\n", fcmd->ret_context);
+ dev_dbg(pa_dev->dev, "Flow ID = 0x%x\n", fcmd->flow_id);
+ dev_dbg(pa_dev->dev, "reply Queue = 0x%x\n", fcmd->reply_queue);
+ dev_dbg(pa_dev->dev, "reply dest = 0x%x\n", fcmd->reply_dest);
+ }
+ dev_dbg(pa_dev->dev, "command response complete\n");
+ break;
+
+ case PA_CONTEXT_TSTAMP:
+ tstamp_complete(p_info->epib[1], p_info);
+ break;
+
+ default:
+ dev_warn(pa_dev->dev, "bad response context, got 0x%08x\n", p_info->epib[1]);
+ break;
+ }
+
+ p_info->desc = NULL;
+ kfree(p_info);
+}
+
+/* Release a free receive buffer */
+static void pa_rxpool_free(void *arg, unsigned q_num, unsigned bufsize,
+ struct dma_async_tx_descriptor *desc)
+{
+ struct pa_device *pa_dev = arg;
+ struct pa_packet *p_info = desc->callback_param;
+
+ dma_unmap_sg(pa_dev->dev, &p_info->sg[2], 1, DMA_FROM_DEVICE);
+
+ p_info->desc = NULL;
+
+ kfree(p_info);
+}
+
+static void pa_chan_work_handler(unsigned long data)
+{
+ struct pa_device *pa_dev = (struct pa_device *)data;
+
+ dma_poll(pa_dev->rx_channel, -1);
+
+ dma_rxfree_refill(pa_dev->rx_channel);
+
+ dmaengine_resume(pa_dev->rx_channel);
+}
+
+static void pa_chan_notify(struct dma_chan *dma_chan, void *arg)
+{
+ struct pa_device *pa_dev = arg;
+
+ dmaengine_pause(pa_dev->rx_channel);
+
+ tasklet_schedule(&pa_dev->task);
+
+ return;
+}
+
+/* Allocate a free receive buffer */
+static struct dma_async_tx_descriptor *pa_rxpool_alloc(void *arg,
+ unsigned q_num, unsigned bufsize)
+{
+ struct pa_device *pa_dev = arg;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_device *device;
+ u32 err = 0;
+
+ struct pa_packet *rx;
+
+ rx = pa_alloc_packet(pa_dev, bufsize, DMA_DEV_TO_MEM);
+ if (!rx) {
+ dev_err(pa_dev->dev, "could not allocate cmd rx packet\n");
+ kfree(rx);
+ return NULL;
+ }
+
+ rx->sg_ents = 2 + dma_map_sg(pa_dev->dev, &rx->sg[2],
+ 1, DMA_FROM_DEVICE);
+ if (rx->sg_ents != 3) {
+ dev_err(pa_dev->dev, "dma map failed\n");
+
+ kfree(rx);
+ return NULL;
+ }
+
+ device = rx->chan->device;
+
+ desc = dmaengine_prep_slave_sg(rx->chan, rx->sg, 3, DMA_DEV_TO_MEM,
+ DMA_HAS_EPIB | DMA_HAS_PSINFO);
+
+ if (IS_ERR_OR_NULL(desc)) {
+ dma_unmap_sg(pa_dev->dev, &rx->sg[2], 1, DMA_FROM_DEVICE);
+ kfree(rx);
+ err = PTR_ERR(desc);
+ if (err != -ENOMEM) {
+ dev_err(pa_dev->dev,
+ "dma prep failed, error %d\n", err);
+ }
+
+ return NULL;
+ }
+
+ desc->callback_param = rx;
+ desc->callback = pa_rx_complete;
+ rx->cookie = desc->cookie;
+
+ return desc;
+}
+
+static int keystone_pa_add_mac(struct pa_device *priv, const u8 *mac,
+ int rule, unsigned etype, int index)
+{
+ struct pa_route_info route_info, fail_info;
+ struct pa_frm_command *fcmd;
+ struct pa_frm_cmd_add_lut1 *al1;
+ struct pa_packet *tx;
+ u32 context = PA_CONTEXT_CONFIG;
+ int size, ret;
+
+ memset(&fail_info, 0, sizeof(fail_info));
+
+ memset(&route_info, 0, sizeof(route_info));
+
+ if (rule == PACKET_HST) {
+ route_info.dest = PA_DEST_HOST;
+ route_info.flow_id = priv->data_flow_num;
+ route_info.queue = priv->data_queue_num;
+ route_info.m_route_index = -1;
+ fail_info.dest = PA_DEST_HOST;
+ fail_info.flow_id = priv->data_flow_num;
+ fail_info.queue = priv->data_queue_num;
+ fail_info.m_route_index = -1;
+ } else if (rule == PACKET_PARSE) {
+ route_info.dest = PA_DEST_CONTINUE_PARSE_LUT1;
+ route_info.m_route_index = -1;
+ fail_info.dest = PA_DEST_HOST;
+ fail_info.flow_id = priv->data_flow_num;
+ fail_info.queue = priv->data_queue_num;
+ fail_info.m_route_index = -1;
+ } else if (rule == PACKET_DROP) {
+ route_info.dest = PA_DEST_DISCARD;
+ route_info.m_route_index = -1;
+ fail_info.dest = PA_DEST_DISCARD;
+ fail_info.m_route_index = -1;
+ }
+
+ size = (sizeof(struct pa_frm_command) +
+ sizeof(struct pa_frm_cmd_add_lut1) + 4);
+ tx = pa_alloc_packet(priv, size, DMA_MEM_TO_DEV);
+ if (!tx) {
+ dev_err(priv->dev, "could not allocate cmd tx packet\n");
+ return -ENOMEM;
+ }
+
+ fcmd = tx->data;
+ al1 = (struct pa_frm_cmd_add_lut1 *) &(fcmd->cmd);
+
+ fcmd->command_result = 0;
+ fcmd->command = PAFRM_CONFIG_COMMAND_ADDREP_LUT1;
+ fcmd->magic = PAFRM_CONFIG_COMMAND_SEC_BYTE;
+ fcmd->com_id = PA_COMID_L2;
+ fcmd->ret_context = context;
+ fcmd->flow_id = priv->cmd_flow_num;
+ fcmd->reply_queue = priv->cmd_queue_num;
+ fcmd->reply_dest = PAFRM_DEST_PKTDMA;
+
+ al1->index = index;
+ al1->type = PAFRM_COM_ADD_LUT1_STANDARD;
+
+ if (etype) {
+ al1->u.eth_ip.etype = etype;
+ al1->u.eth_ip.match_flags |= PAFRM_LUT1_CUSTOM_MATCH_ETYPE;
+ }
+
+ al1->u.eth_ip.vlan = 0;
+ al1->u.eth_ip.pm.mpls = 0;
+
+ if (mac) {
+ al1->u.eth_ip.dmac[0] = mac[0];
+ al1->u.eth_ip.dmac[1] = mac[1];
+ al1->u.eth_ip.dmac[2] = mac[2];
+ al1->u.eth_ip.dmac[3] = mac[3];
+ al1->u.eth_ip.dmac[4] = mac[4];
+ al1->u.eth_ip.dmac[5] = mac[5];
+ al1->u.eth_ip.key |= PAFRM_LUT1_KEY_MAC;
+ }
+
+ al1->u.eth_ip.smac[0] = 0;
+ al1->u.eth_ip.smac[1] = 0;
+ al1->u.eth_ip.smac[2] = 0;
+ al1->u.eth_ip.smac[3] = 0;
+ al1->u.eth_ip.smac[4] = 0;
+ al1->u.eth_ip.smac[5] = 0;
+ ret = pa_conv_routing_info(&al1->match, &route_info, 0, 0);
+ if (ret != 0)
+ dev_err(priv->dev, "route info config failed\n");
+
+ ret = pa_conv_routing_info(&al1->next_fail, &fail_info, 0, 1);
+ if (ret != 0)
+ dev_err(priv->dev, "fail info config failed\n");
+
+ swizFcmd(fcmd);
+ swizAl1((struct pa_frm_cmd_add_lut1 *)&(fcmd->cmd));
+
+ tx->psdata[0] = ((u32)(4 << 5) << 24);
+
+ tx->epib[1] = 0x11112222;
+ tx->epib[2] = 0x33334444;
+ tx->epib[3] = 0;
+
+ pa_submit_tx_packet(tx);
+ dev_dbg(priv->dev, "waiting for command transmit complete\n");
+
+ return 0;
+}
+
+static int pa_fmtcmd_next_route(struct netcp_packet *p_info, const struct pa_cmd_next_route *route)
+{
+ struct pasaho_next_route *nr;
+ int size;
+ u16 pdest;
+
+ /* Make sure the destination is valid */
+ switch (route->dest) {
+ case PA_DEST_HOST:
+ pdest = PAFRM_DEST_PKTDMA;
+ break;
+ case PA_DEST_EMAC:
+ pdest = PAFRM_DEST_ETH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ size = route->pkt_type_emac_ctrl ? sizeof(*nr) : (sizeof(*nr) - sizeof(nr->word1));
+ nr = (struct pasaho_next_route *)netcp_push_psdata(p_info, size);
+ if (!nr)
+ return -ENOMEM;
+
+ if (route->pkt_type_emac_ctrl) {
+ u8 ps_flags;
+ PASAHO_SET_E(nr, 1);
+
+ ps_flags = (route->pkt_type_emac_ctrl & PA_EMAC_CTRL_CRC_DISABLE) ?
+ PAFRM_ETH_PS_FLAGS_DISABLE_CRC : 0;
+
+ ps_flags |= ((route->pkt_type_emac_ctrl & PA_EMAC_CTRL_PORT_MASK) <<
+ PAFRM_ETH_PS_FLAGS_PORT_SHIFT);
+
+ PASAHO_SET_PKTTYPE(nr, ps_flags);
+ }
+
+ PASAHO_SET_CMDID(nr, PASAHO_PAMOD_NROUTE);
+ PASAHO_SET_DEST(nr, pdest);
+ PASAHO_SET_FLOW(nr, route->flow_id);
+ PASAHO_SET_QUEUE (nr, route->queue);
+
+ if (route->ctrl_bit_field & PA_NEXT_ROUTE_PROC_NEXT_CMD)
+ PASAHO_SET_N (nr, 1);
+
+ nr->sw_info0 = route->sw_info_0;
+ nr->sw_info1 = route->sw_info_1;
+
+ return size;
+}
+
+static int pa_fmtcmd_tx_timestamp(struct netcp_packet *p_info, const struct pa_cmd_tx_timestamp *tx_ts)
+{
+ struct pasaho_report_timestamp *rt_info;
+ int size;
+
+ size = sizeof(*rt_info);
+ rt_info = (struct pasaho_report_timestamp *)netcp_push_psdata(p_info, size);
+ if (!rt_info)
+ return -ENOMEM;
+
+ PASAHO_SET_CMDID(rt_info, PASAHO_PAMOD_REPORT_TIMESTAMP);
+ PASAHO_SET_REPORT_FLOW(rt_info, (u8)tx_ts->flow_id);
+ PASAHO_SET_REPORT_QUEUE(rt_info, tx_ts->dest_queue);
+ rt_info->sw_info0 = tx_ts->sw_info0;
+
+ return size;
+}
+
+static int pa_fmtcmd_align(struct netcp_packet *p_info, const unsigned bytes)
+{
+ struct pasaho_cmd_info *paCmdInfo;
+ int i;
+
+ if ((bytes & 0x03) != 0)
+ return -EINVAL;
+
+ paCmdInfo = (struct pasaho_cmd_info *)netcp_push_psdata(p_info, bytes);
+
+ for (i = bytes/sizeof(u32); i > 0; --i ) {
+ PASAHO_SET_CMDID(paCmdInfo, PASAHO_PAMOD_DUMMY);
+ ++paCmdInfo;
+ }
+
+ return bytes;
+}
+
+#define PA_TXHOOK_ORDER 10
+#define PA_RXHOOK_ORDER 10
+
+static int pa_tx_hook(int order, void *data, struct netcp_packet *p_info)
+{
+ struct netcp_module_data *module = data;
+ struct pa_device *pa_dev = pa_from_module(module);
+ static const struct pa_cmd_next_route route_cmd = {
+ 0, /* ctrlBitfield */
+ PA_DEST_EMAC, /* Route - host */
+ 0, /* pktType don't care */
+ 0, /* flow Id */
+ 0, /* Queue */
+ 0, /* SWInfo 0 */
+ 0, /* SWInfo 1 */
+ 0,
+ };
+ struct pa_cmd_tx_timestamp tx_ts;
+ int align, total = 0;
+
+ total += pa_fmtcmd_next_route(p_info, &route_cmd);
+
+ if ((skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) && p_info->skb->sk) {
+ struct tstamp_pending *pend;
+
+ pend = kzalloc(sizeof(*pend), GFP_ATOMIC);
+ if (pend) {
+ pend->skb = skb_clone(p_info->skb, GFP_ATOMIC);
+ if (!pend->skb)
+ kfree(pend);
+ else {
+ pend->sock = p_info->skb->sk;
+ pend->pa_dev = pa_dev;
+ pend->context = PA_CONTEXT_TSTAMP |
+ (~PA_CONTEXT_MASK & atomic_inc_return(&tstamp_sequence));
+ tstamp_add_pending(pend);
+
+ memset(&tx_ts, 0, sizeof(tx_ts));
+ tx_ts.dest_queue = pa_dev->cmd_queue_num;
+ tx_ts.flow_id = pa_dev->cmd_flow_num;
+ tx_ts.sw_info0 = pend->context;
+
+ total += pa_fmtcmd_tx_timestamp(p_info, &tx_ts);
+ }
+ }
+ }
+
+ align = netcp_align_psdata(p_info, 8);
+ if (align)
+ total += pa_fmtcmd_align(p_info, align);
+
+ p_info->tx_pipe = &pa_dev->tx_pipe;
+ return 0;
+}
+
+static int pa_rx_timestamp(int order, void *data, struct netcp_packet *p_info)
+{
+ struct netcp_module_data *module = data;
+ struct pa_device *pa_dev = pa_from_module(module);
+ struct netcp_priv *netcp = module->priv;
+ struct sk_buff *skb = p_info->skb;
+ struct skb_shared_hwtstamps *sh_hw_tstamps;
+ u64 rx_timestamp;
+ u64 sys_time;
+
+ if (!netcp->hwts_rx_en)
+ return 0;
+
+ rx_timestamp = p_info->epib[0];
+ rx_timestamp |= ((u64)(p_info->psdata[4] & 0x0000ffff)) << 32;
+
+ sys_time = pa_to_sys_time(pa_dev->pa2system_offset, rx_timestamp);
+
+ sh_hw_tstamps = skb_hwtstamps(skb);
+ memset(sh_hw_tstamps, 0, sizeof(*sh_hw_tstamps));
+ sh_hw_tstamps->hwtstamp = ns_to_ktime(tstamp_raw_to_ns(rx_timestamp));
+ sh_hw_tstamps->syststamp = ns_to_ktime(sys_time);
+
+ return 0;
+}
+
+static int pa_close(struct netcp_module_data *data)
+{
+ struct pa_device *pa_dev = pa_from_module(data);
+
+ netcp_unregister_txhook(data->priv, PA_TXHOOK_ORDER, pa_tx_hook, data);
+ netcp_unregister_rxhook(data->priv, PA_RXHOOK_ORDER, pa_rx_timestamp, data);
+
+ tasklet_disable(&pa_dev->task);
+
+ tstamp_purge_pending(pa_dev);
+
+ if (pa_dev->tx_channel) {
+ dmaengine_pause(pa_dev->tx_channel);
+ dma_release_channel(pa_dev->tx_channel);
+ pa_dev->tx_channel = NULL;
+ }
+
+ if (pa_dev->tx_pipe.dma_channel) {
+ dmaengine_pause(pa_dev->tx_pipe.dma_channel);
+ dma_release_channel(pa_dev->tx_pipe.dma_channel);
+ pa_dev->tx_pipe.dma_channel = NULL;
+ }
+
+ if (pa_dev->rx_channel) {
+ dmaengine_pause(pa_dev->rx_channel);
+ dma_release_channel(pa_dev->rx_channel);
+ pa_dev->rx_channel = NULL;
+ }
+
+ if (pa_dev->clk) {
+ clk_disable_unprepare(pa_dev->clk);
+ clk_put(pa_dev->clk);
+ }
+ pa_dev->clk = NULL;
+
+ return 0;
+}
+
+static int pa_open(struct netcp_module_data *data, struct net_device *ndev)
+{
+ struct pa_device *pa_dev = pa_from_module(data);
+ const struct firmware *fw;
+ const u8 bcast_addr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ struct dma_chan *chan;
+ struct dma_keystone_info config;
+ dma_cap_mask_t mask;
+ int i, factor;
+ int ret, err;
+
+ pa_dev->clk = clk_get(pa_dev->dev, "clk_pa");
+ if (IS_ERR_OR_NULL(pa_dev->clk)) {
+ dev_err(pa_dev->dev, "unable to get Packet Accelerator clock\n");
+ pa_dev->clk = NULL;
+ return -EBUSY;
+ }
+
+ clk_prepare_enable(pa_dev->clk);
+
+ keystone_pa_reset(pa_dev);
+
+ /* Configure the streaming switch */
+ __raw_writel(PSTREAM_ROUTE_PDSP0, pa_dev->streaming_switch);
+
+ for (i = 0; i <= 5; i++) {
+ if (i <= 2)
+ ret = request_firmware(&fw, DEVICE_PA_PDSP02_FIRMWARE, pa_dev->dev);
+ else if (i == 3)
+ ret = request_firmware(&fw, DEVICE_PA_PDSP3_FIRMWARE, pa_dev->dev);
+ else if (i > 3)
+ ret = request_firmware(&fw, DEVICE_PA_PDSP45_FIRMWARE, pa_dev->dev);
+ if (ret != 0) {
+ dev_err(pa_dev->dev, "cannot find firmware for pdsp %d\n", i);
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ /* Download the firmware to the PDSP */
+ keystone_pa_set_firmware(pa_dev, i,
+ (const unsigned int*) fw->data,
+ fw->size);
+
+ release_firmware(fw);
+ }
+
+ ret = keystone_pa_reset_control(pa_dev, PA_STATE_ENABLE);
+ if (ret != 1) {
+ dev_err(pa_dev->dev, "enabling failed, ret = %d\n", ret);
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ pa_get_version(pa_dev);
+
+ factor = PA_TIMESTAMP_SCALER_FACTOR_2;
+
+ ret = pa_config_timestamp(pa_dev, factor);
+ if (ret != 0) {
+ dev_err(pa_dev->dev, "timestamp configuration failed, ret = %d\n", ret);
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ pa_dev->pa2system_offset = 0;
+ pa_calibrate_with_system_timer(pa_dev);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Open the PA Command transmit channel */
+ pa_dev->tx_channel = dma_request_channel_by_name(mask, "patx-cmd");
+ if (IS_ERR_OR_NULL(pa_dev->tx_channel)) {
+ dev_err(pa_dev->dev, "Could not get PA TX command channel\n");
+ pa_dev->tx_channel = NULL;
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ memset(&config, 0, sizeof(config));
+
+ config.direction = DMA_MEM_TO_DEV;
+ config.tx_queue_depth = pa_dev->tx_cmd_queue_depth;
+
+ err = dma_keystone_config(pa_dev->tx_channel, &config);
+ if (err)
+ goto fail;
+
+ /* Open the PA Data transmit channel */
+ pa_dev->tx_pipe.dma_chan_name = "patx-dat";
+ pa_dev->tx_pipe.dma_channel = dma_request_channel_by_name(mask, pa_dev->tx_pipe.dma_chan_name);
+ if (IS_ERR_OR_NULL(pa_dev->tx_pipe.dma_channel)) {
+ dev_err(pa_dev->dev, "Could not get PA TX data channel\n");
+ pa_dev->tx_pipe.dma_channel = NULL;
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ memset(&config, 0, sizeof(config));
+
+ config.direction = DMA_MEM_TO_DEV;
+ config.tx_queue_depth = pa_dev->tx_data_queue_depth;
+
+ err = dma_keystone_config(pa_dev->tx_pipe.dma_channel, &config);
+ if (err)
+ goto fail;
+
+ pa_dev->tx_pipe.dma_queue = dma_get_tx_queue(pa_dev->tx_pipe.dma_channel);
+ pa_dev->tx_pipe.dma_poll_threshold = config.tx_queue_depth / 2;
+ atomic_set(&pa_dev->tx_pipe.dma_poll_count, pa_dev->tx_pipe.dma_poll_threshold);
+
+
+ /* Open the PA common response channel */
+ pa_dev->rx_channel = dma_request_channel_by_name(mask, "parx");
+ if (IS_ERR_OR_NULL(pa_dev->rx_channel)) {
+ dev_err(pa_dev->dev, "Could not get PA RX channel\n");
+ pa_dev->rx_channel = NULL;
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ memset(&config, 0, sizeof(config));
+
+ config.direction = DMA_DEV_TO_MEM;
+ config.scatterlist_size = PA_SGLIST_SIZE;
+ config.rxpool_allocator = pa_rxpool_alloc;
+ config.rxpool_destructor = pa_rxpool_free;
+ config.rxpool_param = pa_dev;
+ config.rxpool_count = 1;
+ config.rxpool_thresh_enable = DMA_THRESH_NONE;
+ config.rxpools[0].pool_depth = pa_dev->rx_pool_depth;
+ config.rxpools[0].buffer_size = pa_dev->rx_buffer_size;
+
+ err = dma_keystone_config(pa_dev->rx_channel, &config);
+ if (err)
+ goto fail;
+
+ tasklet_init(&pa_dev->task, pa_chan_work_handler,
+ (unsigned long) pa_dev);
+
+ dma_set_notify(pa_dev->rx_channel, pa_chan_notify, pa_dev);
+
+ chan = netcp_get_rx_chan(data->priv);
+
+ pa_dev->data_flow_num = dma_get_rx_flow(chan);
+ pa_dev->data_queue_num = dma_get_rx_queue(chan);
+ pa_dev->cmd_flow_num = dma_get_rx_flow(pa_dev->rx_channel);
+ pa_dev->cmd_queue_num = dma_get_rx_queue(pa_dev->rx_channel);
+
+ dev_dbg(pa_dev->dev, "configuring command receive flow %d, queue %d\n",
+ pa_dev->cmd_flow_num, pa_dev->cmd_queue_num);
+
+ pa_dev->addr_count = 0;
+
+ dma_rxfree_refill(pa_dev->rx_channel);
+
+ ret = keystone_pa_add_mac(pa_dev, NULL, PACKET_HST, 0, 63);
+ ret = keystone_pa_add_mac(pa_dev, bcast_addr, PACKET_HST, 0, 62);
+ ret = keystone_pa_add_mac(pa_dev, ndev->dev_addr, PACKET_HST, 0, 61);
+ ret = keystone_pa_add_mac(pa_dev, ndev->dev_addr, PACKET_PARSE, 0x0800, 60);
+ ret = keystone_pa_add_mac(pa_dev, ndev->dev_addr, PACKET_PARSE, 0x86dd, 59);
+ pa_dev->addr_count = 5;
+
+ netcp_register_txhook(data->priv, PA_TXHOOK_ORDER, pa_tx_hook, data);
+ netcp_register_rxhook(data->priv, PA_RXHOOK_ORDER, pa_rx_timestamp, data);
+ return 0;
+
+fail:
+ pa_close(data);
+ return ret;
+}
+
+static int pa_remove(struct netcp_module_data *data)
+{
+ struct pa_device *pa_dev = pa_from_module(data);
+ struct device *dev = pa_dev->dev;
+
+ devm_iounmap(dev, pa_dev->reg_mailbox);
+ devm_iounmap(dev, pa_dev->reg_mailbox);
+ devm_iounmap(dev, pa_dev->reg_packet_id);
+ devm_iounmap(dev, pa_dev->reg_lut2);
+ devm_iounmap(dev, pa_dev->reg_control);
+ devm_iounmap(dev, pa_dev->reg_timer);
+ devm_iounmap(dev, pa_dev->reg_stats);
+ devm_iounmap(dev, pa_dev->pa_iram);
+ devm_iounmap(dev, pa_dev->pa_sram);
+ devm_iounmap(dev, pa_dev->streaming_switch);
+
+ kfree(pa_dev);
+
+ return 0;
+}
+
+static struct netcp_module_data *pa_probe(struct device *dev,
+ struct device_node *node)
+{
+ struct pa_device *pa_dev;
+ int ret = 0;
+
+ pa_dev = devm_kzalloc(dev, sizeof(struct pa_device), GFP_KERNEL);
+ if (!pa_dev) {
+ dev_err(dev, "memory allocation failed\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (!node) {
+ dev_err(dev, "device tree info unavailable\n");
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ pa_dev->dev = dev;
+
+ ret = of_property_read_u32(node, "tx_cmd_queue_depth",
+ &pa_dev->tx_cmd_queue_depth);
+ if (ret < 0) {
+ dev_err(dev, "missing tx_cmd_queue_depth parameter, err %d\n",
+ ret);
+ pa_dev->tx_cmd_queue_depth = 32;
+ }
+ dev_dbg(dev, "tx_cmd_queue_depth %u\n", pa_dev->tx_cmd_queue_depth);
+
+ ret = of_property_read_u32(node, "tx_data_queue_depth",
+ &pa_dev->tx_data_queue_depth);
+ if (ret < 0) {
+ dev_err(dev, "missing tx_data_queue_depth parameter, err %d\n",
+ ret);
+ pa_dev->tx_data_queue_depth = 32;
+ }
+ dev_dbg(dev, "tx_data_queue_depth %u\n", pa_dev->tx_data_queue_depth);
+
+ ret = of_property_read_u32(node, "rx_pool_depth",
+ &pa_dev->rx_pool_depth);
+ if (ret < 0) {
+ dev_err(dev, "missing rx_pool_depth parameter, err %d\n",
+ ret);
+ pa_dev->rx_pool_depth = 32;
+ }
+ dev_dbg(dev, "rx_pool_depth %u\n", pa_dev->rx_pool_depth);
+
+ ret = of_property_read_u32(node, "rx_buffer_size",
+ &pa_dev->rx_buffer_size);
+ if (ret < 0) {
+ dev_err(dev, "missing rx_buffer_size parameter, err %d\n",
+ ret);
+ pa_dev->rx_buffer_size = 128;
+ }
+ dev_dbg(dev, "rx_buffer_size %u\n", pa_dev->rx_buffer_size);
+
+ pa_dev->reg_mailbox = devm_ioremap(dev, 0x2000000, 0x60);
+ pa_dev->reg_packet_id = devm_ioremap(dev, 0x2000400, 0x10);
+ pa_dev->reg_lut2 = devm_ioremap(dev, 0x2000500, 0x40);
+ pa_dev->reg_control = devm_ioremap(dev, 0x2001000, 0x600);
+ pa_dev->reg_timer = devm_ioremap(dev, 0x2003000, 0x600);
+ pa_dev->reg_stats = devm_ioremap(dev, 0x2006000, 0x100);
+ pa_dev->pa_iram = devm_ioremap(dev, 0x2010000, 0x30000);
+ pa_dev->pa_sram = devm_ioremap(dev, 0x2040000, 0x8000);
+ pa_dev->streaming_switch = devm_ioremap(dev, 0x02000604, 4);
+
+ if (!pa_dev->reg_mailbox || !pa_dev->reg_packet_id ||
+ !pa_dev->reg_lut2 || !pa_dev->reg_control ||
+ !pa_dev->reg_timer || !pa_dev->reg_stats ||
+ !pa_dev->pa_sram || !pa_dev->pa_iram ||
+ !pa_dev->streaming_switch) {
+ dev_err(dev, "failed to set up register areas\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ spin_lock_init(&pa_dev->lock);
+ spin_lock_init(&tstamp_lock);
+
+ pa_dev->module.open = pa_open;
+ pa_dev->module.close = pa_close;
+ pa_dev->module.remove = pa_remove;
+
+ return pa_to_module(pa_dev);
+exit:
+ return NULL;
+}
+
+static struct netcp_module pa_module = {
+ .name = "keystone-pa",
+ .owner = THIS_MODULE,
+ .probe = pa_probe,
+};
+
+static int __init keystone_pa_init(void)
+{
+ return netcp_register_module(&pa_module);
+}
+subsys_initcall(keystone_pa_init);
+
+static void __exit keystone_pa_exit(void)
+{
+ netcp_unregister_module(&pa_module);
+}
+module_exit(keystone_pa_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sandeep Paulraj <s-paulraj@ti.com>");
+MODULE_DESCRIPTION("Packet Accelerator driver for Keystone devices");
diff --git a/drivers/net/ethernet/ti/keystone_pa.h b/drivers/net/ethernet/ti/keystone_pa.h
new file mode 100644
index 00000000000000..80929988fc133a
--- /dev/null
+++ b/drivers/net/ethernet/ti/keystone_pa.h
@@ -0,0 +1,737 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated
+ * Author: Sandeep Paulraj <s-paulraj@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef KEYSTONE_PA_H
+#define KEYSTONE_PA_H
+
+#ifdef __KERNEL__
+
+
+struct pa_pdsp_config {
+
+ u32 pdsp[6];
+
+ char *pdsp_fw[6];
+};
+
+#define PAFRM_MAX_CMD_SET_SIZE 124
+
+#define PA_DEST_DISCARD 3 /**< Packet is discarded */
+
+/**
+ * @def PA_DEST_CONTINUE_PARSE_LUT1
+ * packet remains in PA sub-system for more parsing and LUT1 classification
+ */
+#define PA_DEST_CONTINUE_PARSE_LUT1 4 /**< Packet remains in PA sub-system for more parsing and LUT1 classification */
+
+/**
+ * @def PA_DEST_CONTINUE_PARSE_LUT2
+ * packet remains in PA sub-system for more parsing and LUT2 classification.
+ */
+#define PA_DEST_CONTINUE_PARSE_LUT2 5 /**< Packet remains in PA sub-system for more parsing and LUT2 classification */
+
+/**
+ * @def PA_DEST_HOST
+ * host thread
+ */
+#define PA_DEST_HOST 6 /**< Packet is routed to host */
+
+/**
+ * @def PA_DEST_EMAC
+ * ethernet mac port (of the switch)
+ */
+#define PA_DEST_EMAC 7 /**< Packet is routed to EMAC */
+
+/**
+ * @def PA_DEST_SASS
+ * security accelerator destination
+ */
+#define PA_DEST_SASS 8 /**< Packet is routed to SA */
+
+#define PA_DEST_SRIO 9
+
+#define PA_NO_MULTI_ROUTE -1
+#define PA_MAX_MULTI_ROUTE_SETS 32
+#define PA_MAX_MULTI_ROUTE_ENTRIES 8
+#define PA_MULTI_ROUTE_DESCRIPTOR_ONLY 0x01
+
+#define PA_EMAC_CTRL_PORT_MASK 0x0F
+#define PA_EMAC_CTRL_CRC_DISABLE 0x80
+#define PA_CUSTOM_TYPE_NONE 0
+#define PA_CUSTOM_TYPE_LUT1 1
+#define PA_CUSTOM_TYPE_LUT2 2
+#define PA_MAX_CUSTOM_TYPES_LUT1 4
+#define PA_MAX_CUSTOM_TYPES_LUT2 4
+
+#define PA_CMD_TX_DEST_0 0 /* Packet is sent to PDSP0 */
+#define PA_CMD_TX_DEST_1 1 /* Packet is sent to PDSP1 */
+#define PA_CMD_TX_DEST_2 2 /* Packet is sent to PDSP2 */
+#define PA_CMD_TX_DEST_3 3 /* Packet is sent to PDSP3 */
+#define PA_CMD_TX_DEST_4 4 /* Packet is sent to PDSP4 */
+#define PA_CMD_TX_DEST_5 5 /* Packet is sent to PDSP5 */
+
+#define PA_CMD_NONE 0
+#define PA_CMD_NEXT_ROUTE 1
+#define PA_CMD_CRC_OP 2
+#define PA_CMD_COPY_DATA_TO_PSINFO 3
+#define PA_CMD_PATCH_DATA 4
+#define PA_CMD_TX_CHECKSUM 5
+#define PA_CMD_MULTI_ROUTE 6
+#define PA_CMD_REPORT_TX_TIMESTAMP 7
+#define PA_CMD_REMOVE_HEADER 8
+#define PA_CMD_REMOVE_TAIL 9
+#define PA_CMD_CMDSET 10
+#define PA_CMD_SA_PAYLOAD 11
+#define PA_CMD_IP_FRAGMENT 12
+#define PA_CMD_USR_STATS 13
+#define PA_CMD_CMDSET_AND_USR_STATS 14
+
+struct pa_frm_forward_host {
+
+ u32 context; /* Context returned as swInfo0 for matched packet */
+ u8 multi_route; /* True if multiple destination enabled */
+ u8 multi_idx; /* Index of the multiple destination set */
+ u8 pa_pdsp_router; /* PA PDSP number used as multi-route router */
+ u8 rsvd2;
+ u8 cmd[4]; /* optional simple command: 0 means no command */
+}; /* 12 bytes */
+
+/*
+ * Routing information used to forward packets to the SA (via PKTDMA)
+ */
+struct pa_frm_forward_sa {
+
+ u32 sw_info_0; /* Packet descriptor swInfo0 required by SA operation */
+ u32 sw_info_1; /* Packet descriptor swInfo1 required by SA operation */
+ u8 cmd[4]; /* optional simple command: 0 means no command */
+};
+
+/*
+ * Routing information used to forward packets to the SRIO (via PKTDMA)
+ */
+struct pa_frm_forward_srio {
+
+ u32 ps_info0; /* 8-byte protocol-specific information required by SRIO */
+ u32 ps_info1; /* routing */
+ u8 pkt_type; /* Packet type specified for SRIO operation */
+ u8 rsv4[3];
+};
+
+/*
+ * Routing information used to forward packets to the Ethernet port
+ */
+struct pa_frm_forward_eth {
+ u8 ps_flags; /* use the bit 7:4 bit 7: Disable CRC, bit 6:4 port number (0/1/2) bit 3:0 errflags = 0*/
+ u8 rsvd1;
+ u16 rsvd2;
+ u32 rsvd3;
+ u32 rsvd4;
+};
+
+#define PAFRM_ETH_PS_FLAGS_DISABLE_CRC 0x80
+#define PAFRM_ETH_PS_FLAGS_PORT_MASK 0x70
+#define PAFRM_ETH_PS_FLAGS_PORT_SHIFT 4
+
+
+/* Routing information used to forward packets within PA */
+struct pa_frm_forward_pa {
+
+ u8 pa_dest; /* PDSP destination */
+ u8 custom_type; /* None, LUT1, LUT2 */
+ u8 custom_idx; /* Index of the custom type if LUT1 or LUT2 custom */
+ u8 rsvd2;
+ u32 rsvd3;
+ u32 rsvd4;
+};
+
+#define PAFRM_CUSTOM_TYPE_NONE PA_CUSTOM_TYPE_NONE /* 0 */
+#define PAFRM_CUSTOM_TYPE_LUT1 PA_CUSTOM_TYPE_LUT1 /* 1 */
+#define PAFRM_CUSTOM_TYPE_LUT2 PA_CUSTOM_TYPE_LUT2 /* 2 */
+
+/*
+ * Routing information used to forward packets fromm PA sub-system to various destinations
+ */
+struct pa_frm_forward {
+
+ u8 forward_type; /* Forwarding type as defined below */
+ u8 flow_id; /* PKTDMA flow Id, valid if forwarding via PKTDMA */
+ u16 queue; /* Destination queue number, valid if forwarding via PKTDMA */
+
+ union {
+ struct pa_frm_forward_host host; /* Host specific routing information */
+ struct pa_frm_forward_sa sa; /* SA specific routing information */
+ struct pa_frm_forward_srio srio; /* SRIO specific routing information */
+ struct pa_frm_forward_eth eth; /* Ethernet specific routing information */
+ struct pa_frm_forward_pa pa; /* PA internal routing information */
+ } u;
+};
+
+enum {
+ PAFRM_FORWARD_TYPE_HOST = 0, /* use PAFRM_DEST_CDMA */
+ PAFRM_FORWARD_TYPE_SA, /* use PAFRM_DEST_CDMA */
+ PAFRM_FORWARD_TYPE_PA, /* use pa.paDest */
+ PAFRM_FORWARD_TYPE_ETH, /* use PAFRM_DEST_ETH */
+ PAFRM_FORWARD_TYPE_SRIO, /* use PAFRM_DEST_CDMA */
+ PAFRM_FORWARD_TYPE_DISCARD
+};
+
+#define PAFRM_LUT1_KEY_MAC (1 << 0)
+
+/* Custom match flag bits */
+#define PAFRM_LUT1_CUSTOM_MATCH_ETYPE (1 << 2)
+#define PAFRM_LUT1_CUSTOM_MATCH_VLAN (1 << 3)
+#define PAFRM_LUT1_CUSTOM_MATCH_MATCH (3 << 4) /* Ipv6 source and dest entries */
+#define PAFRM_LUT1_CUSTOM_MATCH_KEY (1 << 13)
+#define PAFRM_LUT1_CUSTOM_MATCH_VALID (1 << 15)
+
+/* Key values. The PDSP will set these bits as it parses the SRIO header */
+#define PAFRM_LUT1_CUSTOM_KEY_CUSTOM PAFRM_LUT1_KEY_CUSTOM
+#define PAFRM_LUT1_CUSTOM_KEY_INDEX(index) ((index) << 0) /* Vaild if custom type is set */
+
+/* Add entry to LUT1 */
+/* if PA_LUT1_INDEX_LAST_FREE is used then when the command returns, the value of index
+ * will be replaced with the actual index used */
+#define PAFRM_HW_LUT1_ENTRIES 64
+#define PAFRM_LUT1_INDEX_LAST_FREE PAFRM_HW_LUT1_ENTRIES
+
+/* Standard match flag bits */
+#define PAFRM_LUT1_MATCH_DMAC (1 << 0)
+#define PAFRM_LUT1_MATCH_SMAC (1 << 1)
+#define PAFRM_LUT1_MATCH_ETYPE (1 << 2)
+#define PAFRM_LUT1_MATCH_VLAN (1 << 3)
+#define PAFRM_LUT1_MATCH_SIP (1 << 4)
+#define PAFRM_LUT1_MATCH_DIP (1 << 5)
+#define PAFRM_LUT1_MATCH_SPI_GRE_SCTP (1 << 6)
+#define PAFRM_LUT1_MATCH_FLOW (1 << 7)
+#define PAFRM_LUT1_MATCH_SPORT (1 << 8)
+#define PAFRM_LUT1_MATCH_DPORT (1 << 9)
+#define PAFRM_LUT1_MATCH_PROTO (1 << 10)
+#define PAFRM_LUT1_MATCH_TOS (1 << 11)
+#define PAFRM_LUT1_MATCH_PORT (1 << 12)
+#define PAFRM_LUT1_MATCH_KEY (1 << 13)
+#define PAFRM_LUT1_MATCH_VALID (1 << 15)
+
+#define PAFRM_LUT1_MATCH_MPLS (PAFRM_LUT1_MATCH_SPORT | PAFRM_LUT1_MATCH_DPORT)
+
+/* Key values. The PDSP will set these bits as it parses the headers. */
+/* LUT1_1 and LUT1_2 (L3): The following bit fields are used */
+#define PAFRM_LUT1_KEY_SPI (1 << 0)
+#define PAFRM_LUT1_KEY_GRE (1 << 1)
+#define PAFRM_LUT1_KEY_MPLS (1 << 2)
+#define PAFRM_LUT1_KEY_IPV4 (1 << 3)
+#define PAFRM_LUT1_KEY_IPV6 (1 << 4)
+#define PAFRM_LUT1_KEY_SCTP (1 << 5)
+
+/* LUT1: Custom (L3) */
+#define PAFRM_LUT1_KEY_CUSTOM (1 << 7)
+
+/* LUT1_0: MAC and SRIO (L0-l2): The following bit fields are used */
+#define PAFRM_LUT1_KEY_SRIO (1 << 7)
+
+struct pa_frm_com_l1_standard {
+
+ /* LUT1 view 1 */
+ u8 dmac[6]; /* Destination mac */
+ u8 smac[6]; /* Source mac */
+ u16 etype; /* Ethernrt type, the field is also used for the previous match PDSP number */
+ u16 vlan; /* VLAN tag, the field is also used for the previous match LUT1 index */
+
+ /* LUT1 view 2 */
+ u8 src_ip[16]; /* Source IP address */
+ u8 dst_ip[16]; /* Destination IP address */
+
+ /* LUT1 view 3 */
+ u32 spi; /* ESP or AH header Security Parameters Index */
+ /* The field is also used for GRE protocol or SCTP destination port */
+ u32 flow; /* IPv6 flow label in 20 lsbs */
+
+ union {
+ u16 ports[2]; /* UDP/TCP Source port (0), destination port (1) */
+ u32 mpls; /* mpls label in 20 Lsbs */
+ } pm;
+
+ u8 proto_next; /* Ipv4 Protocol fields, IPv6 next */
+ u8 tos_tclass; /* Ipv4 TOS, Ipv6 traffic class */
+ u8 inport; /* reserved field: not used */
+ u8 key; /* IP: Distinguishs spi/gre and mpls and ports
+ * LUT1_0: MAC/SRIO,
+ * LUT1_1/LUT1_2: custom or standard
+ */
+ /* end LUT1 view 3 */
+
+ /* Lookup cares/don't cares */
+ u16 match_flags; /* lookup matching valid flags as defined below */
+ u16 rsvd; /* reserved for alignment */
+};
+
+struct pa_frm_com_l1_srio {
+
+ /* LUT1 view 1 */
+ u8 rsvd1[4]; /* unused field: All zero's */
+ u16 src_id; /* Source ID */
+ u16 dest_id; /* Destination ID */
+ u8 rsvd2[4]; /* unused field: All zero's */
+ u16 etype; /* upper link (previous match PDSP number) */
+ u16 vlan; /* upper link (previous match LUT1 index) */
+
+ /* LUT1 view 2 */
+ u8 rsvd3[16]; /* unused field: All zero's */
+ u8 rsvd4[14]; /* unused field: All zero's */
+ u16 type_param1; /* stream ID or mailbox */
+
+ /* LUT1 view 3 */
+ u32 spi; /* unused field: All zero's */
+ u32 flow; /* unused field: All zero's */
+
+ u16 next_hdr_offset; /* unused field: All zero's */
+ u8 next_hdr; /* place holder for nextHdr and nextOffset */
+ u8 rsvd5; /* unused field: All zero's */
+ u8 pri; /* 3-bit Priority */
+ u8 type_param2; /* cos or letter */
+ u8 inport; /* unused field: All zero's */
+ u8 key; /* IP: Distinguishs spi/gre and mpls and ports
+ * LUT1_0: MAC/SRIO,
+ * LUT1_1/LUT1_2: custom or standard
+ */
+ /* end LUT1 view 3 */
+ /* Lookup cares/don't cares */
+ u16 match_flags; /* lookup matching valid flags as defined below */
+ u16 rsvd; /* reserved for alignment */
+};
+
+struct pa_frm_com_l1_custom{
+
+ /* LUT1 view 1 */
+ u8 dmac[6]; /* unused field: All zero's */
+ u8 smac[6]; /* unused field: All zero's */
+ u16 etype; /* upper link (previous match PDSP number) */
+ u16 vlan; /* upper link (previous match LUT1 index) */
+
+ /* LUT1 view 2 */
+ u8 match_values[32]; /* 32 bytes to match */
+
+ /* LUT1 view 3 - offset from start */
+ u32 rsvd0; /* unused field: All zero's */
+ u32 rsvd1; /* unused field: All zero's */
+ u32 rsvd2; /* unused field: All zero's */
+
+ u8 rsvd3; /* unused field: All zero's */
+ u8 rsvd4; /* unused field: All zero's */
+ u8 inport; /* unused field: All zero's */
+ u8 key; /* IP: Distinguishs spi/gre and mpls and ports
+ * LUT1_0: MAC/SRIO,
+ * LUT1_1/LUT1_2: custom or standard
+ */
+
+ /* Lookup cares/dont cares */
+ u16 match_flags; /* lookup matching valid flags as defined below */
+ u16 rsvd5; /* reserved for alignment */
+};
+
+enum {
+ PAFRM_CONFIG_COMMAND_RSVD = 0,
+ PAFRM_CONFIG_COMMAND_ADDREP_LUT1,
+ PAFRM_CONFIG_COMMAND_DEL_LUT1,
+ PAFRM_CONFIG_COMMAND_ADDREP_LUT2,
+ PAFRM_CONFIG_COMMAND_DEL_LUT2,
+ PAFRM_CONFIG_COMMAND_CONFIG_PA,
+ PAFRM_CONFIG_COMMAND_REQ_STATS,
+ PAFRM_CONFIG_COMMAND_REQ_VERSION,
+ PAFRM_CONFIG_COMMAND_MULTI_ROUTE,
+ PAFRM_CONFIG_COMMAND_CRC_ENGINE,
+ PAFRM_CONFIG_COMMAND_CMD_SET
+};
+
+/* Command magic value */
+#define PAFRM_CONFIG_COMMAND_SEC_BYTE 0xce
+
+/* Command return values */
+enum {
+
+ PAFRM_COMMAND_RESULT_SUCCESS = 0, /* Must be 0 */
+ PAFRM_COMMAND_RESULT_NO_COMMAND_MAGIC, /* Command magic value not found */
+
+ PAFRM_COMMAND_RESULT_INVALID_CMD, /* Invalid command identifier */
+
+ /* Add entry to LUT1 fails */
+ PAFRM_COMMAND_RESULT_LUT1_TYPE_INVALID, /* Invalid type, custom or standard IP/ethernet */
+ PAFRM_COMMAND_RESULT_LUT1_INDEX_INVALID, /* Invalid LUT1 index (0-63) or no free indices available */
+ PAFRM_COMMAND_RESULT_LUT1_MATCH_DEST_INVALID, /* Sent a match packet to q0 on c1 or c2 - this is illegal. */
+ PAFRM_COMMAND_RESULT_LUT1_NMATCH_INVALID, /* Previous match forward info was somewhere in chunk domain */
+ PAFRM_COMMAND_RESULT_LUT1_INVALID_KEYS, /* Invalid combination found in the key value */
+
+ /* Lut 2 entry warnings since the lut can be configured without pdsp */
+ PAFRM_COMMAND_RESULT_WARN_OVER_MAX_ENTRIES,
+ PAFRM_COMMAND_RESULT_WARN_NEGATIVE_ENTRY_COUNT,
+
+ /* Lut 2 entry failures */
+ PAFRM_COMMAND_RESULT_LUT2_ADD_BUSY, /* LUT2 had a lookup and pending config */
+
+ /* Not enough room in stats request packet for the reply */
+ PAFRM_COMMAND_RESULT_WARN_STATS_REPLY_SIZE,
+
+ /* Command sent to PDSP which couldn't handle it */
+ PAFRM_COMMAND_RESULT_INVALID_DESTINATION,
+
+ /* Add/Delete/Read entries to multi route table */
+ PAFRM_COMMAND_RESULT_MULTI_ROUTE_NO_FREE_ENTRIES, /* Asked to use a free entry, but none found */
+ PAFRM_COMMAND_RESULT_MULTI_ROUTE_INVALID_IDX, /* Illegal index value used */
+ PAFRM_COMMAND_RESULT_MULTI_ROUTE_INVALID_MODE, /* Illegal multi route mode used */
+
+ /* Packet size didn't match command */
+ PAFRM_COMMAND_RESULT_INVALID_PKT_SIZE,
+
+ /* Coustom and Command set index */
+ PAFRM_COMMAND_RESULT_INVALID_C1_CUSTOM_IDX, /* Illegal Custom LUT1 index value used */
+ PAFRM_COMMAND_RESULT_INVALID_C2_CUSTOM_IDX, /* Illegal Custom LUT2 index value used */
+ PAFRM_COMMAND_RESULT_INVALID_CMDSET_IDX /* Illegal Custom Command Set index value used */
+};
+
+enum {
+ PA_TIMESTAMP_SCALER_FACTOR_1 = -1,
+ PA_TIMESTAMP_SCALER_FACTOR_2 = 0,
+ PA_TIMESTAMP_SCALER_FACTOR_4,
+ PA_TIMESTAMP_SCALER_FACTOR_8,
+ PA_TIMESTAMP_SCALER_FACTOR_16,
+ PA_TIMESTAMP_SCALER_FACTOR_32,
+ PA_TIMESTAMP_SCALER_FACTOR_64,
+ PA_TIMESTAMP_SCALER_FACTOR_128,
+ PA_TIMESTAMP_SCALER_FACTOR_256,
+ PA_TIMESTAMP_SCALER_FACTOR_512,
+ PA_TIMESTAMP_SCALER_FACTOR_1024,
+ PA_TIMESTAMP_SCALER_FACTOR_2048,
+ PA_TIMESTAMP_SCALER_FACTOR_4096,
+ PA_TIMESTAMP_SCALER_FACTOR_8192
+};
+
+#define PA_SS_TIMER_CNTRL_REG_GO 0x00000001u
+#define PA_SS_TIMER_CNTRL_REG_MODE 0x00000002u
+#define PA_SS_TIMER_CNTRL_REG_PSE 0x00008000u
+#define PA_SS_TIMER_CNTRL_REG_PRESCALE_SHIFT 0x00000002u
+
+/* Destination (route) values */
+#define PAFRM_DEST_PDSP0 0
+#define PAFRM_DEST_PDSP1 1
+#define PAFRM_DEST_PDSP2 2
+#define PAFRM_DEST_PDSP3 3
+#define PAFRM_DEST_PDSP4 4
+#define PAFRM_DEST_PDSP5 5
+#define PAFRM_DEST_PKTDMA 6
+#define PAFRM_DEST_ETH 7
+
+#define PAFRM_DEST_DISCARD 10
+
+/* Assigning names based on PDSP functions */
+#define PAFRM_DEST_PA_C1_0 PAFRM_DEST_PDSP0
+#define PAFRM_DEST_PA_C1_1 PAFRM_DEST_PDSP1
+#define PAFRM_DEST_PA_C1_2 PAFRM_DEST_PDSP2
+#define PAFRM_DEST_PA_C2 PAFRM_DEST_PDSP3
+#define PAFRM_DEST_PA_M_0 PAFRM_DEST_PDSP4
+#define PAFRM_DEST_PA_M_1 PAFRM_DEST_PDSP5
+
+/* The default queue for packets that arrive at the PA and don't match in
+ * classify1 (right at init time) */
+#define PAFRM_DEFAULT_INIT_Q 0x100
+
+/* Ethertypes recognized by the firmware. */
+#define PAFRM_ETHERTYPE_IP 0x0800
+#define PAFRM_ETHERTYPE_IPV6 0x86dd
+#define PAFRM_ETHERTYPE_VLAN 0x8100
+#define PAFRM_ETHERTYPE_SPVLAN 0x88a8
+#define PAFRM_ETHERTYPE_MPLS 0x8847
+#define PAFRM_ETHERTYPE_MPLS_MULTI 0x8848
+
+/* Next header type values */
+#define PAFRM_HDR_MAC 0
+#define PAFRM_HDR_VLAN 1
+#define PAFRM_HDR_MPLS 2
+#define PAFRM_HDR_IPv4 3
+#define PAFRM_HDR_IPv6 4
+#define PAFRM_HDR_IPv6_EXT_HOP 5
+#define PAFRM_HDR_IPv6_EXT_ROUTE 6
+#define PAFRM_HDR_IPv6_EXT_FRAG 7
+#define PAFRM_HDR_IPv6_EXT_DEST 8
+#define PAFRM_HDR_GRE 9
+#define PAFRM_HDR_ESP 10
+#define PAFRM_HDR_ESP_DECODED 11
+#define PAFRM_HDR_AUTH 12
+#define PAFRM_HDR_CUSTOM_C1 13
+#define PAFRM_HDR_FORCE_LOOKUP 14 /* A contrived header type used with custom SRIO to force
+ a parse after looking at only the RIO L0-L2 */
+#define PAFRM_HDR_SCTP 15
+#define PAFRM_HDR_UNKNOWN 16
+#define PAFRM_HDR_UDP 17
+#define PAFRM_HDR_UDP_LITE 18
+#define PAFRM_HDR_TCP 19
+#define PAFRM_HDR_GTPU 20
+#define PAFRM_HDR_ESP_DECODED_C2 21
+#define PAFRM_HDR_CUSTOM_C2 22
+
+/* Command related definitions */
+#define PAFRM_CRC_FLAG_CRC_OFFSET_VALID 0x01
+#define PAFRM_CRC_FLAG_CRC_OFFSET_FROM_DESC 0x02
+#define PAFRM_CHKSUM_FALG_NEGATIVE 0x01
+
+#define PA_NEXT_ROUTE_PARAM_PRESENT 0x0001
+#define PA_NEXT_ROUTE_PROC_NEXT_CMD 0x0002
+#define PA_NEXT_ROUTE_PROC_MULTI_ROUTE 0x0004
+
+/* PAFRM receive commands related definitions */
+
+/*
+ * There are the following two groups of PAFRM receive commands:
+ * PAFRM short commands which can be used as part of the routing info
+ * PAFRM commands which can be used within a command set
+ */
+
+#define PAFRM_RX_CMD_NONE 0 /* Dummy command */
+
+/* short commands */
+#define PAFRM_RX_CMD_CMDSET 1 /* Execute a command set */
+#define PAFRM_RX_CMD_INSERT 2 /* Insert up to two types at the current location */
+
+/* command set commands */
+#define PAFRM_RX_CMD_NEXT_ROUTE 3 /* Specify the next route */
+#define PAFRM_RX_CMD_CRC_OP 4 /* CRC generation or verification */
+#define PAFRM_RX_CMD_COPY_DATA 5 /* Copy data to the PS Info section */
+#define PAFRM_RX_CMD_PATCH_DATA 6 /* Insert or pacth packet data at the specific location */
+#define PAFRM_RX_CMD_REMOVE_HDR 7 /* Remove the parsed packet header */
+#define PAFRM_RX_CMD_REMOVE_TAIL 8 /* Remove the parsed packet tail */
+#define PAFRM_RX_CMD_MULTI_ROUTE 9 /* Duplicate packet to multiple destinations */
+
+/*
+ * PASS command ID formatting
+ * Bit 15 is used to distinguish the L2 table from
+ * the L3 table in the command comId field
+ */
+#define PA_COMID_L2 (0 << 15)
+#define PA_COMID_L3 (1 << 15)
+#define PA_COMID_L_MASK (1 << 15)
+#define PA_COMID_IDX_MASK (~(1 << 15))
+
+/* define LUT1 entry types */
+#define PAFRM_COM_ADD_LUT1_STANDARD 0 /* MAC/IP */
+#define PAFRM_COM_ADD_LUT1_SRIO 1 /* SRIO */
+#define PAFRM_COM_ADD_LUT1_CUSTOM 2 /* Custom LUT1 */
+
+struct pa_frm_cmd_add_lut1 {
+
+ u8 index; /* LUT1 index. */
+ u8 type; /* Custom or standard */
+ u8 rsvd; /* reserved for alignment */
+ u8 cust_index; /* Vaild only if type is custom */
+
+ union {
+ struct pa_frm_com_l1_standard eth_ip; /* matching information for MAC/IP entry */
+ struct pa_frm_com_l1_srio srio;
+ struct pa_frm_com_l1_custom custom;
+ } u;
+
+ struct pa_frm_forward match; /* Routing information when a match is found */
+
+ /*
+ * Routing information when subsequent match fails - a fragmented
+ * packet orinner route
+ */
+ struct pa_frm_forward next_fail;
+};
+
+/* Commands to PA */
+struct pa_frm_command {
+
+ u32 command_result; /* Returned to the host, ignored on entry to the PASS */
+ u8 command; /* Command value */
+ u8 magic; /* Magic value */
+ u16 com_id; /* Used by the host to identify command results */
+ u32 ret_context; /* Returned in swInfo to identify packet as a command */
+ u16 reply_queue; /* Specifies the queue number for the message reply. 0xffff to toss the reply */
+ u8 reply_dest; /* Reply destination (host0, host1, discard are the only valid values) */
+ u8 flow_id; /* Flow ID used to assign packet at reply */
+ u32 cmd; /* First word of the command */
+};
+
+struct pa_cmd_next_route {
+ u16 ctrl_bit_field; /* Routing control information as defined at @ref routeCtrlInfo */
+ int dest; /* Packet destination as defined at @ref pktDest */
+ u8 pkt_type_emac_ctrl; /* For destination SRIO, specify the 5-bit packet type toward SRIO
+ For destination EMAC, specify the EMAC control @ref emcOutputCtrlBits to the network */
+ u8 flow_id; /* For host, SA or SRIO destinations, specifies return free descriptor setup */
+ u16 queue; /*For host, SA or SRIO destinations, specifies the dest queue */
+ u32 sw_info_0; /* Placed in SwInfo0 for packets to host or SA */
+ u32 sw_info_1; /* Placed in SwInfo1 for packets to the SA */
+ u16 multi_route_index; /* Multi-route index. It is valid in the from-network direction only */
+};
+
+struct pa_cmd_crcOp {
+ u16 ctrl_bit_field; /* CRC operation control information as defined at @ref crcOpCtrlInfo */
+ u16 start_offset; /* Byte location, from SOP/Protocol Header, where the CRC computation begins
+ if frame type is not specified
+ Byte location, from SOP/Protocol header, where the specific frame header begins
+ if frame type is specified
+ In to-network direction: offset from SOP
+ In from-network direction: offset from the current parsed header
+ */
+ u16 len; /* Number of bytes covered by the CRC computation
+ valid only if pa_CRC_OP_PAYLOAD_LENGTH_IN_HEADER is clear */
+ u16 len_offset; /* Payload length field offset in the custom header */
+ u16 len_mask; /* Payload length field mask */
+ u16 len_adjust; /* Payload length adjustment: valid only if PA_CRC_OP_PAYLOAD_LENGTH_IN_HEADER is set */
+ u16 crc_offset; /* Offset from SOP/Protocol Header to the CRC field
+ In to-network direction: offset from SOP
+ In from-network direction: offset from the current parsed header */
+ u16 frame_yype; /* Frame type @ref crcFrameTypes, vaild if
+ PA_CRC_OP_CRC_FRAME_TYPE is set */
+};
+
+/**
+ * @ingroup palld_api_structures
+ * @brief Transmit checksum configuration
+ *
+ * @details paTxChksum_t is used in the call to @ref Pa_formatTxRoute or @ref Pa_formatTxCmd to create a tx
+ * command header that instructs the packet accelerator sub-system to generate ones' complement
+ * checksums into network packets. The checksums are typically used for TCP and UDP payload checksums as
+ * well as IPv4 header checksums. In the case of TCP and UDP payload checksums the psuedo header
+ * checksum must be pre-calculated and provided, the sub-system does not calculate it.
+ */
+struct pa_tx_chksum {
+ u16 start_offset; /* Byte location, from SOP, where the checksum calculation begins */
+ u16 length_bytes; /* Number of bytes covered by the checksum. Must be even */
+ u16 result_offset; /* Byte offset, from startOffset, to place the resulting checksum */
+ u16 initial_sum; /* Initial value of the checksum */
+ u16 negative_0; /* If TRUE, a computed value of 0 is written as -0 */
+};
+
+struct pa_cmd_copy {
+ u16 ctrl_bitfield; /* Copy operation control information as defined at @ref copyCtrlInfo */
+ u16 src_offset; /* Offset from the start of current protocol header for the data copy to begin */
+ u16 dest_offset; /* Offset from the top of the PSInfo for the data to be copied to */
+ u16 num_bytes; /* Number of bytes to be copied */
+};
+
+struct pa_patch_info{
+ unsigned int n_patch_bytes; /**< The number of bytes to be patched */
+ unsigned int total_patch_size; /**< The number of patch bytes in the patch command, must be >= to nPatchBytes and a multiple of 4 bytes */
+ unsigned int offset; /**< Offset from the start of the packet for the patch to begin in the to-network direction
+ Offset from the start of the current header for the patch to begin in the from-network direction */
+ u16 overwrite; /**< If TRUE the patch data replaces existing packet data. If false the data is added */
+ u8 *patch_data; /**< Pointer to the patch data */
+};
+
+
+/**
+ * @ingroup palld_api_structures
+ * @brief paPayloadInfo_t defines the packet payload information in the short format.
+ * It is required by the Security Accelerator sub-system (SASS)
+ *
+ * @details paPayloadInfo_t defines the packet parsing information in terms of
+ * payload offset and payload length as described below
+ * @li SRTP: offset to the RTP header; RTP payload length including ICV
+ * @li IPSEC AH: offset to the Outer IP; IP payload length
+ * @li IPSEC ESP: offset to the ESP header; ESP papload length including ICV
+ */
+
+struct pa_payload_info {
+ u16 offset; /* The offset to where the SA packet parsing starts */
+ u16 len; /* The total length of the protocal payload to be processed by SA */
+};
+
+struct pa_cmd_multi_route {
+ u16 index; /* Multi-route set Index */
+};
+
+/**
+ * @def PA_MAX_CMD_SETS
+ * The maximum number of command sets supported
+ */
+#define PA_MAX_CMD_SETS 8
+
+#define PA_OK 0
+#define PA_ERR_CONFIG -10
+#define PA_INSUFFICIENT_CMD_BUFFER_SIZE -11
+#define PA_INVALID_CMD_REPLY_DEST -12
+
+/**
+ * @ingroup palld_api_structures
+ * @brief Command Set Command
+ *
+ * @details paCmdSet_t is used to specify the desired PA command set. The command set command
+ * instructs the PASS to execute a list of commands after a LUT1 or LUT2 match occurs.
+ * It is one of the command which can be embedded within the @ref paRouteInfo_t.
+ */
+struct pa_cmd_set {
+ u16 index; /*Command Set Index */
+};
+
+struct pa_cmd_tx_timestamp {
+ u16 dest_queue; /* Host queue for the tx timestamp reporting packet */
+ u16 flow_id; /* CPPI flow */
+ u32 sw_info0; /* 32 bit value returned in the descriptor */
+};
+
+struct pa_cmd_ip_frag {
+ u16 ip_offset; /* Offset to the IP header. */
+ u16 mtu_size; /* Size of the maximum transmission unit (>= 68) */
+};
+
+struct pa_cmd_usr_stats {
+ u16 index; /* User-defined statistics index */
+};
+
+struct pa_cmd_set_usr_stats {
+ u16 set_index; /* Commad Set Index */
+ u16 stats_index; /* User-defined statistics index */
+};
+
+struct pa_cmd_info {
+ u16 cmd; /*Specify the PA command code as defined at @ref paCmdCode */
+ union {
+ struct pa_cmd_next_route route; /* Specify nextRoute command specific parameters */
+ struct pa_tx_chksum chksum; /* Specify Tx Checksum command specific parameters */
+ struct pa_cmd_crcOp crcOp; /* Specify CRC operation command specific parameters */
+ struct pa_cmd_copy copy; /* Specify Copy command specific parameters */
+ struct pa_patch_info patch; /* Specify Patch command specific parameters */
+ struct pa_payload_info payload; /* Specify the payload information required by SA */
+ struct pa_cmd_set cmd_set; /* Specify Command Set command specific parameters */
+ struct pa_cmd_multi_route m_route; /* Specify Multi-route command specific parameters */
+ struct pa_cmd_tx_timestamp tx_ts; /*Specify Report Tx Timestamp command specific parameters */
+ struct pa_cmd_ip_frag ip_frag; /* Specify IP fragmentation command specific parameters */
+ struct pa_cmd_usr_stats usr_stats; /* Specify User-defined Statistics command specific parameters */
+ struct pa_cmd_set_usr_stats cmd_set_usr_stats;
+ } params;
+};
+
+struct pa_route_info {
+ int dest;
+ u8 flow_id;
+ u16 queue;
+ int m_route_index;
+ u32 sw_info_0;
+ u32 sw_info_1;
+ int custom_type;
+ u8 custom_index;
+ u8 pkt_type_emac_ctrl;
+ struct pa_cmd_info *pcmd;
+};
+
+struct pa_cmd_reply {
+ int dest; /* Packet destination, must be pa_DEST_HOST or PA_DEST_DISCARD, see @ref pktDest */
+ u32 reply_id; /* Value placed in swinfo0 in reply packet */
+ u16 queue; /* Destination queue for destination PA_DEST_HOST */
+ u8 flow_id; /* Flow ID used on command reply from PASS */
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* KEYSTONE_PA_H */
+
diff --git a/drivers/net/ethernet/ti/keystone_pasahost.h b/drivers/net/ethernet/ti/keystone_pasahost.h
new file mode 100644
index 00000000000000..9c20c9cc60bef0
--- /dev/null
+++ b/drivers/net/ethernet/ti/keystone_pasahost.h
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated
+ * Author: Sandeep Paulraj <s-paulraj@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef KEYSTONE_PASAHOST_H
+#define KEYSTONE_PASAHOST_H
+
+#ifdef __KERNEL__
+
+#define PASAHO_CONFIGURE 4
+#define PASAHO_PARX_PARSECMD 0
+#define PASAHO_PARX_MULTI_ROUTE 5
+#define PASAHO_PAMOD_CMPT_CHKSUM 0
+#define PASAHO_PAMOD_CMPT_CRC 1
+#define PASAHO_PAMOD_PATCH 2
+#define PASAHO_PAMOD_NROUTE 3
+#define PASAHO_PAMOD_MULTI_ROUTE 5
+#define PASAHO_PAMOD_REPORT_TIMESTAMP 6
+#define PASAHO_PAMOD_GROUP_7 7
+#define PASAHO_PAMOD_DUMMY PASAHO_PAMOD_GROUP_7
+#define PASAHO_PAMOD_IP_FRAGMENT PASAHO_PAMOD_GROUP_7
+#define PASAHO_SA_LONG_INFO 0
+#define PASAHO_SA_SHORT_INFO 1
+#define PASAHO_SA_AIR_INFO 2
+
+#define PASAHO_READ_BITFIELD(a,b,c) (((a)>>(b)) & ((1UL<<(c))-1))
+
+#define PASAHO_SET_BITFIELD(a,x,b,c) (a) &= ~(((1UL<<(c))-1)<<(b)), \
+ (a) |= (((x) & ((1UL<<(c))-1))<<(b))
+
+#define PASAHO_SET_CMDID(x,v) PASAHO_SET_BITFIELD((x)->word0, (v), 29,3)
+
+#define PASAHO_PACFG_CMD (((u32)PASAHO_CONFIGURE << 5) << 24)
+
+enum pasaho_header_type {
+ PASAHO_HDR_MAC = 0, /* MAC */
+ PASAHO_HDR_VLAN, /* VLAN */
+ PASAHO_HDR_MPLS, /* MPLS */
+ PASAHO_HDR_IPv4, /* IPv4 */
+ PASAHO_HDR_IPv6, /* IPv6 */
+ PASAHO_HDR_IPv6_EXT_HOP, /* IPv6 hop by hop extenstion header */
+ PASAHO_HDR_IPv6_EXT_ROUTE, /* IPv6 routing extenstion header */
+ PASAHO_HDR_IPv6_EXT_FRAG, /* IPv6 fragmentation extention header */
+ PASAHO_HDR_IPv6_EXT_DEST, /* IPv6 destination options header */
+ PASAHO_HDR_GRE, /* Generic Routing Encapsulation header */
+ PASAHO_HDR_ESP, /* Encapsulating Security Payload header */
+ PASAHO_HDR_ESP_DECODED, /* Decoded Encapsulating Security Payload header */
+ PASAHO_HDR_AUTH, /* Authentication header */
+ PASAHO_HDR_CUSTOM_C1, /* Custom classify 1 header */
+ PASAHO_HDR_FORCE_LOOKUP, /* A contrived header type used with custom SRIO to force
+ a parse after looking at only the SRIO L0-L2 */
+ PASAHO_HDR_UNKNOWN, /* Next header type is unknown */
+ PASAHO_HDR_UDP, /* User Datagram Protocol header */
+ PASAHO_HDR_UDP_LITE, /* Lightweight User Datagram Protocol header */
+ PASAHO_HDR_TCP, /* Transmission Control Protocol header */
+ PASAHO_HDR_CUSTOM_C2 /* Custom classify 2 header */
+};
+
+/**
+ * @defgroup pasahoSubCmdCode PASS Sub-Command Code
+ * @ingroup pasaho_if_constants
+ * @{
+ *
+ * @name PASS Sub-Command Code
+ * Definition of the 5-bit sub-command codes which is used to specify the group 7 commands.
+ */
+
+enum pasaho_sub_cmd_code {
+ PASAHO_SUB_CMD_DUMMY = 0, /* Dummy */
+ PASAHO_SUB_CMD_IP_FRAG /* IPv4 fragmentation */
+};
+
+/**
+ * @ingroup pasaho_if_structures
+ * @brief pasahoCmdInfo_t defines the general short command information
+ *
+ */
+struct pasaho_cmd_info {
+ u32 word0; /* Control block word 0 */
+};
+
+/**
+ * @ingroup pasaho_if_structures
+ * @brief pasahoLongInfo_t defines the packet parsing information in the long format.
+ * The information is structured as an array of 32 bit values. These values
+ * are broken down through macros. This allows the representation to be
+ * endian independent to the hardware which operates only on 32 bit values.
+ *
+ * @details
+ */
+struct pasaho_long_info {
+ u32 word0; /* Control block word 0 */
+ u32 word1; /* Control block word 1 */
+ u32 word2; /* Control block word 2 */
+ u32 word3; /* Control block word 3 */
+ u32 word4; /* Control block word 4 */
+};
+
+/**
+ * @defgroup PASAHO_long_info_command_macros PASAHO Long Info Command Macros
+ * @ingroup pasaho_if_macros
+ * @{
+ * @name PASAHO Long Info Command Macros
+ * Macros used by the PASAHO Long Info Command
+ */
+
+/* Extract the command ID defined at */
+#define PASAHO_LINFO_READ_CMDID(x) PASAHO_READ_BITFIELD((x)->word0,29,3)
+
+/* Extract the block length */
+#define PASAHO_LINFO_READ_RECLEN(x) PASAHO_READ_BITFIELD((x)->word0,24,5)
+
+/* Extract the next parse start offset */
+#define PASAHO_LINFO_READ_START_OFFSET(x) PASAHO_READ_BITFIELD((x)->word0,0,16)
+
+/* Extract the end of packet parse offset */
+#define PASAHO_LINFO_READ_END_OFFSET(x) PASAHO_READ_BITFIELD((x)->word1,16,16)
+
+/* Extract the error index */
+#define PASAHO_LINFO_READ_EIDX(x) PASAHO_READ_BITFIELD((x)->word1,11,5)
+
+/* Extract the previous match flag */
+#define PASAHO_LINFO_READ_PMATCH(x) PASAHO_READ_BITFIELD((x)->word1,10,1)
+
+/* Extract the custom classify flag */
+#define PASAHO_LINFO_READ_C2C(x) PASAHO_READ_BITFIELD((x)->word1,9,1)
+
+/* Extract the first parse module ID */
+#define PASAHO_LINFO_READ_L1_PDSP_ID(x) PASAHO_READ_BITFIELD((x)->word1,6,3)
+
+/* Extract the first parse module match index */
+#define PASAHO_LINFO_READ_L1_IDX(x) PASAHO_READ_BITFIELD((x)->word1,0,6)
+
+/* Extract the offset to the level 3 header */
+#define PASAHO_LINFO_READ_L3_OFFSET(x) PASAHO_READ_BITFIELD((x)->word2,24,8)
+
+/* Extract the offset to the level 4 header */
+#define PASAHO_LINFO_READ_L4_OFFSET(x) PASAHO_READ_BITFIELD((x)->word2,16,8)
+
+/* Extract the offset to the level 5 header */
+#define PASAHO_LINFO_READ_L5_OFFSET(x) PASAHO_READ_BITFIELD((x)->word2,8,8)
+
+/* Extract the offset to the security header */
+#define PASAHO_LINFO_READ_ESP_AH_OFFSET(x) PASAHO_READ_BITFIELD((x)->word2,0,8)
+
+/* Extract the bitmask of parsed header types */
+#define PASAHO_LINFO_READ_HDR_BITMASK(x) PASAHO_READ_BITFIELD((x)->word3,21,11)
+
+/* Extract the next header to parse type */
+#define PASAHO_LINFO_READ_NXT_HDR_TYPE(x) PASAHO_READ_BITFIELD((x)->word3,16,5)
+
+/* Extract the number of VLAN tags found */
+#define PASAHO_LINFO_READ_VLAN_COUNT(x) PASAHO_READ_BITFIELD((x)->word3,12,4)
+
+/* Extract the number of IP headers found */
+#define PASAHO_LINFO_READ_IP_COUNT(x) PASAHO_READ_BITFIELD((x)->word3,8,4)
+
+/* Extract the number of GRE headers found */
+#define PASAHO_LINFO_READ_GRE_COUNT(x) PASAHO_READ_BITFIELD((x)->word3,4,4)
+
+/* Extract the fragmentation found flag */
+#define PASAHO_LINFO_READ_FLAG_FRAG(x) PASAHO_READ_BITFIELD((x)->word3,3,1)
+
+/* Extract the incomplete IP route flag */
+#define PASAHO_LINFO_READ_FLAG_ROUTE(x) PASAHO_READ_BITFIELD((x)->word3,2,1)
+
+/* Extract the last pseudo-header checksum computed */
+#define PASAHO_LINFO_READ_PSEUDO_CHKSM(x) PASAHO_READ_BITFIELD((x)->word4,16,16)
+
+/* Extract the IP Reassembly Traffic Flow Index */
+#define PASAHO_LINFO_READ_TFINDEX(x) PASAHO_READ_BITFIELD((x)->word4,24,8)
+
+/* Extract the IP Reassembly Fragment count */
+#define PASAHO_LINFO_READ_FRANCNT(x) PASAHO_READ_BITFIELD((x)->word4,16,8)
+
+/* Set the IP Reassembly Traffic Flow Index */
+#define PASAHO_LINFO_SET_TFINDEX(x, v) PASAHO_SET_BITFIELD((x)->word4,(v),24,8)
+
+/* Set the IP Reassembly Fragment count */
+#define PASAHO_LINFO_SET_FRANCNT(x, v) PASAHO_SET_BITFIELD((x)->word4,(v),16,8)
+
+/* Indicate whether it is an IPSEC packet */
+#define PASAHO_LINFO_IS_IPSEC(x) PASAHO_READ_BITFIELD((x)->word3,25,2)
+
+/* Indicate whether it is an IPSEC ESP packet */
+#define PASAHO_LINFO_IS_IPSEC_ESP(x) PASAHO_READ_BITFIELD((x)->word3,26,1)
+
+/* Indicate whether it is an IPSEC AH packet */
+#define PASAHO_LINFO_IS_IPSEC_AH(x) PASAHO_READ_BITFIELD((x)->word3,25,1)
+
+/* Clear IPSEC indication bits */
+#define PASAHO_LINFO_CLR_IPSEC(x) PASAHO_SET_BITFIELD((x)->word3,0,25,2)
+
+/* Clear IPSEC ESP indication bit */
+#define PASAHO_LINFO_CLR_IPSEC_ESP(x) PASAHO_SET_BITFIELD((x)->word3,0,26,1)
+
+/* Clear IPSEC AH indication bit */
+#define PASAHO_LINFO_CLR_IPSEC_AH(x) PASAHO_SET_BITFIELD((x)->word3,0,25,1)
+
+/* Clear the fragmentation found flag */
+#define PASAHO_LINFO_CLR_FLAG_FRAG(x) PASAHO_SET_BITFIELD((x)->word3,0,3,1)
+
+/* Update the next parse start offset */
+#define PASAHO_LINFO_SET_START_OFFSET(x, v) PASAHO_SET_BITFIELD((x)->word0,(v),0,16)
+
+/* Update the end of packet parse offset */
+#define PASAHO_LINFO_SET_END_OFFSET(x, v) PASAHO_SET_BITFIELD((x)->word1,(v),16,16)
+
+
+/*
+ * Set the null packet flag which indicates that the packet should be dropped.
+ * This flag should be set for the null packet to be delivered to PASS when
+ * the reassembly timeout occurs
+ */
+#define PASAHO_LINFO_SET_NULL_PKT_IND(x, v) PASAHO_SET_BITFIELD((x)->word0,(v),21,1)
+
+/*
+ * PA_INV_TF_INDEX
+ * PASS-asssited IP reassembly traffic flow index to indicate
+ * that no traffic flow is available
+ */
+#define PA_INV_TF_INDEX 0xFF
+
+struct pasaho_short_info {
+ u32 word0;
+ u32 word1;
+};
+
+/* Extract the command ID defined at */
+#define PASAHO_SINFO_READ_CMDID(x) PASAHO_READ_BITFIELD((x)->word0,29,3)
+
+/* Extract the offset to the packet payload */
+#define PASAHO_SINFO_RESD_PAYLOAD_OFFSET(x) PASAHO_READ_BITFIELD((x)->word0,16,8)
+
+/* Extract the byte length of the payload */
+#define PASAHO_SINFO_READ_PAYLOAD_LENGTH(x) PASAHO_READ_BITFIELD((x)->word0,0,16)
+
+/* Set the offset to the payload */
+#define PASAHO_SINFO_SET_PAYLOAD_OFFSET(x, v) PASAHO_SET_BITFIELD((x)->word0, (v), 16, 8)
+
+/* Set the payload length */
+#define PASAHO_SINFO_SET_PAYLOAD_LENGTH(x, v) PASAHO_SET_BITFIELD((x)->word0, (v), 0, 16)
+
+/* Format the entire short info command */
+#define PASAHO_SINFO_FORMAT_CMD(offset, len) (((offset) << 16) | (len) | (PASAHO_SA_SHORT_INFO << 29))
+
+#define PASAHO_HDR_BITMASK_MAC (1 << 0) /* MAC present */
+#define PASAHO_HDR_BITMASK_VLAN (1 << 1) /* VLAN present */
+#define PASAHO_HDR_BITMASK_MPLS (1 << 2) /* MPLS present */
+#define PASAHO_HDR_BITMASK_IP (1 << 3) /* IP present */
+#define PASAHO_HDR_BITMASK_ESP (1 << 4) /* IPSEC/ESP present */
+#define PASAHO_HDR_BITMASK_AH (1 << 5) /* IPSEC/AH present */
+#define PASAHO_HDR_BITMASK_UDP (1 << 6) /* UDP present */
+#define PASAHO_HDR_BITMASK_UDPLITE (1 << 7) /* UDPLITE present */
+#define PASAHO_HDR_BITMASK_TCP (1 << 8) /* TCP present */
+#define PASAHO_HDR_BITMASK_GRE (1 << 9) /* GRE present */
+#define PASAHO_HDR_BITMASK_CUSTOM (1 << 10) /* Custom header */
+
+struct pasaho_next_route {
+ u32 word0;
+ u32 sw_info0;
+ u32 sw_info1;
+ u32 word1;
+};
+
+/*
+ * Sets the N bit which indicates the next command
+ * should be executed prior to the route command
+ */
+#define PASAHO_SET_N(x,v) PASAHO_SET_BITFIELD((x)->word0, (v), 28, 1)
+
+/*
+ * Sets the E bit which indicates the extened
+ * parameters (packet type) are present for SRIO
+ */
+#define PASAHO_SET_E(x,v) PASAHO_SET_BITFIELD((x)->word0, (v), 27, 1)
+
+/*
+ * Sets the destination of the route defined */
+#define PASAHO_SET_DEST(x,v) PASAHO_SET_BITFIELD((x)->word0, (v), 24, 3)
+
+/* Specifies the flow to use for packets sent to the host */
+#define PASAHO_SET_FLOW(x,v) PASAHO_SET_BITFIELD((x)->word0, (v), 16, 8)
+
+/* Specifies the queue to use for packets send to the host */
+#define PASAHO_SET_QUEUE(x,v) PASAHO_SET_BITFIELD((x)->word0, (v), 0, 16)
+
+/* Specifies the packet type to use for packets send to the SRIO */
+#define PASAHO_SET_PKTTYPE(x,v) PASAHO_SET_BITFIELD((x)->word1, (v), 24, 8)
+
+struct pasaho_com_chk_crc {
+ u32 word0; /* PASAHO_chksum_command_macros */
+ u32 word1; /* PASAHO_chksum_command_macros */
+ u32 word2; /* PASAHO_chksum_command_macros */
+};
+
+/*
+ * Sets the negative 0 flag - if set a
+ * checksum computed as 0 will be sent as 0xffff
+ */
+#define PASAHO_CHKCRC_SET_NEG0(x,v) PASAHO_SET_BITFIELD((x)->word0, (v), 23, 1)
+
+/* Sets the optional flags of the CRC/Checksum command */
+#define PASAHO_CHKCRC_SET_CTRL(x,v) PASAHO_SET_BITFIELD((x)->word0, (v), 16, 8)
+
+/* Sets the start offset of the checksum/crc */
+#define PASAHO_CHKCRC_SET_START(x,v) PASAHO_SET_BITFIELD((x)->word0, (v), 0, 16)
+
+/* Sets the length of the checksum/crc */
+#define PASAHO_CHKCRC_SET_LEN(x,v) PASAHO_SET_BITFIELD((x)->word1, (v), 16, 16)
+
+/* Sets the offset to where to paste the checksum/crc into the packet */
+#define PASAHO_CHKCRC_SET_RESULT_OFF(x,v) PASAHO_SET_BITFIELD((x)->word1, (v), 0, 16)
+
+/* Sets the initial value of the checksum/crc */
+#define PASAHO_CHKCRC_SET_INITVAL(x,v) PASAHO_SET_BITFIELD((x)->word2, (v), 16, 16)
+
+#define PASAHO_BPATCH_MAX_PATCH_WORDS 4
+
+struct pasaho_com_blind_patch {
+ u32 word0;
+ u32 patch[PASAHO_BPATCH_MAX_PATCH_WORDS];
+};
+
+
+#define PASAHO_BPATCH_SET_PATCH_NBYTES(x,v) PASAHO_SET_BITFIELD((x)->word0, v, 24, 5)
+
+/* Sets the number of bytes to patch */
+#define PASAHO_BPATCH_SET_PATCH_CMDSIZE(x,v) PASAHO_SET_BITFIELD((x)->word0, v, 20, 4)
+
+/* Sets the size of the command in 32 bit word units */
+#define PASAHO_BPATCH_SET_OVERWRITE(x,v) PASAHO_SET_BITFIELD((x)->word0, v, 19, 1)
+
+/*
+ * Sets the overwrite flag. If set the patch will
+ * overwrite existing packet data, otherwise data is inserted
+ */
+#define PASAHO_BPATCH_SET_OFFSET(x,v) PASAHO_SET_BITFIELD((x)->word0, v, 0, 16)
+
+/* Sets the offset to the start of the patch */
+#define PASAHO_BPATCH_SET_PATCH_BYTE(x, byteNum, byte) (x)->patch[(byteNum) >> 2] = \
+ PASAHO_SET_BITFIELD((x)->patch[(byteNum) >> 2], byte, ((3 - (byteNum & 0x3)) << 3), 8)
+
+
+struct pasaho_report_timestamp {
+ u32 word0;
+ u32 sw_info0;
+};
+
+/* Specifies the flow to use for report packets sent to the host */
+
+#define PASAHO_SET_REPORT_FLOW(x,v) PASAHO_SET_BITFIELD((x)->word0, (v), 16, 8)
+
+/* Specifies the queue to use for report packets send to the host */
+#define PASAHO_SET_REPORT_QUEUE(x,v) PASAHO_SET_BITFIELD((x)->word0, (v), 0, 16)
+
+struct pasaho_ip_frag {
+ u32 word0;
+};
+
+/* Set sub-command code to indicate IP Fragmentation command */
+#define PASAHO_SET_SUB_CODE_IP_FRAG(x) PASAHO_SET_BITFIELD((x)->word0, PASAHO_SUB_CMD_IP_FRAG, 24, 5)
+
+/* Specifies the sub-command code */
+#define PASAHO_SET_SUB_CODE(x,v) PASAHO_SET_BITFIELD((x)->word0, (v), 24, 5)
+
+/* Specifies the offset to the IP header to be fragmented */
+#define PASAHO_SET_IP_OFFSET(x,v) PASAHO_SET_BITFIELD((x)->word0, (v), 16, 8)
+
+/* Specifies the MTU size */
+#define PASAHO_SET_MTU_SIZE(x,v) PASAHO_SET_BITFIELD((x)->word0, (v), 0, 16)
+
+#endif /* __KERNEL__ */
+#endif /* KEYSTONE_PASAHOST_H */