Saeed Bishara | ff7b047 | 2008-07-08 11:58:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2007, 2008, Marvell International Ltd. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 11 | * for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License |
| 14 | * along with this program; if not, write to the Free Software Foundation, |
| 15 | * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
| 16 | */ |
| 17 | |
| 18 | #ifndef MV_XOR_H |
| 19 | #define MV_XOR_H |
| 20 | |
| 21 | #include <linux/types.h> |
| 22 | #include <linux/io.h> |
| 23 | #include <linux/dmaengine.h> |
| 24 | #include <linux/interrupt.h> |
| 25 | |
| 26 | #define USE_TIMER |
| 27 | #define MV_XOR_SLOT_SIZE 64 |
| 28 | #define MV_XOR_THRESHOLD 1 |
| 29 | |
| 30 | #define XOR_OPERATION_MODE_XOR 0 |
| 31 | #define XOR_OPERATION_MODE_MEMCPY 2 |
| 32 | #define XOR_OPERATION_MODE_MEMSET 4 |
| 33 | |
| 34 | #define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) |
| 35 | #define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) |
| 36 | #define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4)) |
| 37 | #define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4)) |
| 38 | #define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4)) |
| 39 | #define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0) |
| 40 | #define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4) |
| 41 | |
| 42 | #define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4)) |
| 43 | #define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4)) |
| 44 | #define XOR_INTR_CAUSE(chan) (chan->mmr_base + 0x30) |
| 45 | #define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40) |
| 46 | #define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50) |
| 47 | #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) |
| 48 | #define XOR_INTR_MASK_VALUE 0x3F5 |
| 49 | |
| 50 | #define WINDOW_BASE(w) (0x250 + ((w) << 2)) |
| 51 | #define WINDOW_SIZE(w) (0x270 + ((w) << 2)) |
| 52 | #define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) |
| 53 | #define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) |
| 54 | |
| 55 | struct mv_xor_shared_private { |
| 56 | void __iomem *xor_base; |
| 57 | void __iomem *xor_high_base; |
| 58 | }; |
| 59 | |
| 60 | |
| 61 | /** |
| 62 | * struct mv_xor_device - internal representation of a XOR device |
| 63 | * @pdev: Platform device |
| 64 | * @id: HW XOR Device selector |
| 65 | * @dma_desc_pool: base of DMA descriptor region (DMA address) |
| 66 | * @dma_desc_pool_virt: base of DMA descriptor region (CPU address) |
| 67 | * @common: embedded struct dma_device |
| 68 | */ |
| 69 | struct mv_xor_device { |
| 70 | struct platform_device *pdev; |
| 71 | int id; |
| 72 | dma_addr_t dma_desc_pool; |
| 73 | void *dma_desc_pool_virt; |
| 74 | struct dma_device common; |
| 75 | struct mv_xor_shared_private *shared; |
| 76 | }; |
| 77 | |
| 78 | /** |
| 79 | * struct mv_xor_chan - internal representation of a XOR channel |
| 80 | * @pending: allows batching of hardware operations |
| 81 | * @completed_cookie: identifier for the most recently completed operation |
| 82 | * @lock: serializes enqueue/dequeue operations to the descriptors pool |
| 83 | * @mmr_base: memory mapped register base |
| 84 | * @idx: the index of the xor channel |
| 85 | * @chain: device chain view of the descriptors |
| 86 | * @completed_slots: slots completed by HW but still need to be acked |
| 87 | * @device: parent device |
| 88 | * @common: common dmaengine channel object members |
| 89 | * @last_used: place holder for allocation to continue from where it left off |
| 90 | * @all_slots: complete domain of slots usable by the channel |
| 91 | * @slots_allocated: records the actual size of the descriptor slot pool |
| 92 | * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs |
| 93 | */ |
| 94 | struct mv_xor_chan { |
| 95 | int pending; |
| 96 | dma_cookie_t completed_cookie; |
| 97 | spinlock_t lock; /* protects the descriptor slot pool */ |
| 98 | void __iomem *mmr_base; |
| 99 | unsigned int idx; |
| 100 | enum dma_transaction_type current_type; |
| 101 | struct list_head chain; |
| 102 | struct list_head completed_slots; |
| 103 | struct mv_xor_device *device; |
| 104 | struct dma_chan common; |
| 105 | struct mv_xor_desc_slot *last_used; |
| 106 | struct list_head all_slots; |
| 107 | int slots_allocated; |
| 108 | struct tasklet_struct irq_tasklet; |
| 109 | #ifdef USE_TIMER |
| 110 | unsigned long cleanup_time; |
| 111 | u32 current_on_last_cleanup; |
| 112 | dma_cookie_t is_complete_cookie; |
| 113 | #endif |
| 114 | }; |
| 115 | |
| 116 | /** |
| 117 | * struct mv_xor_desc_slot - software descriptor |
| 118 | * @slot_node: node on the mv_xor_chan.all_slots list |
| 119 | * @chain_node: node on the mv_xor_chan.chain list |
| 120 | * @completed_node: node on the mv_xor_chan.completed_slots list |
| 121 | * @hw_desc: virtual address of the hardware descriptor chain |
| 122 | * @phys: hardware address of the hardware descriptor chain |
| 123 | * @group_head: first operation in a transaction |
| 124 | * @slot_cnt: total slots used in an transaction (group of operations) |
| 125 | * @slots_per_op: number of slots per operation |
| 126 | * @idx: pool index |
| 127 | * @unmap_src_cnt: number of xor sources |
| 128 | * @unmap_len: transaction bytecount |
| 129 | * @async_tx: support for the async_tx api |
| 130 | * @group_list: list of slots that make up a multi-descriptor transaction |
| 131 | * for example transfer lengths larger than the supported hw max |
| 132 | * @xor_check_result: result of zero sum |
| 133 | * @crc32_result: result crc calculation |
| 134 | */ |
| 135 | struct mv_xor_desc_slot { |
| 136 | struct list_head slot_node; |
| 137 | struct list_head chain_node; |
| 138 | struct list_head completed_node; |
| 139 | enum dma_transaction_type type; |
| 140 | void *hw_desc; |
| 141 | struct mv_xor_desc_slot *group_head; |
| 142 | u16 slot_cnt; |
| 143 | u16 slots_per_op; |
| 144 | u16 idx; |
| 145 | u16 unmap_src_cnt; |
| 146 | u32 value; |
| 147 | size_t unmap_len; |
| 148 | struct dma_async_tx_descriptor async_tx; |
| 149 | union { |
| 150 | u32 *xor_check_result; |
| 151 | u32 *crc32_result; |
| 152 | }; |
| 153 | #ifdef USE_TIMER |
| 154 | unsigned long arrival_time; |
| 155 | struct timer_list timeout; |
| 156 | #endif |
| 157 | }; |
| 158 | |
| 159 | /* This structure describes XOR descriptor size 64bytes */ |
| 160 | struct mv_xor_desc { |
| 161 | u32 status; /* descriptor execution status */ |
| 162 | u32 crc32_result; /* result of CRC-32 calculation */ |
| 163 | u32 desc_command; /* type of operation to be carried out */ |
| 164 | u32 phy_next_desc; /* next descriptor address pointer */ |
| 165 | u32 byte_count; /* size of src/dst blocks in bytes */ |
| 166 | u32 phy_dest_addr; /* destination block address */ |
| 167 | u32 phy_src_addr[8]; /* source block addresses */ |
| 168 | u32 reserved0; |
| 169 | u32 reserved1; |
| 170 | }; |
| 171 | |
| 172 | #define to_mv_sw_desc(addr_hw_desc) \ |
| 173 | container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc) |
| 174 | |
| 175 | #define mv_hw_desc_slot_idx(hw_desc, idx) \ |
| 176 | ((void *)(((unsigned long)hw_desc) + ((idx) << 5))) |
| 177 | |
| 178 | #define MV_XOR_MIN_BYTE_COUNT (128) |
| 179 | #define XOR_MAX_BYTE_COUNT ((16 * 1024 * 1024) - 1) |
| 180 | #define MV_XOR_MAX_BYTE_COUNT XOR_MAX_BYTE_COUNT |
| 181 | |
| 182 | |
| 183 | #endif |