Cong Wang | 650275d | 2012-02-03 15:34:16 +0800 | [diff] [blame] | 1 | #include <linux/prefetch.h> |
| 2 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | /** |
| 4 | * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir. |
| 5 | * @ioc: The I/O Controller. |
| 6 | * @startsg: The scatter/gather list of coalesced chunks. |
| 7 | * @nents: The number of entries in the scatter/gather list. |
| 8 | * @hint: The DMA Hint. |
| 9 | * |
| 10 | * This function inserts the coalesced scatter/gather list chunks into the |
| 11 | * I/O Controller's I/O Pdir. |
| 12 | */ |
| 13 | static inline unsigned int |
| 14 | iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, |
| 15 | unsigned long hint, |
| 16 | void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long, |
| 17 | unsigned long)) |
| 18 | { |
| 19 | struct scatterlist *dma_sg = startsg; /* pointer to current DMA */ |
| 20 | unsigned int n_mappings = 0; |
| 21 | unsigned long dma_offset = 0, dma_len = 0; |
| 22 | u64 *pdirp = NULL; |
| 23 | |
| 24 | /* Horrible hack. For efficiency's sake, dma_sg starts one |
| 25 | * entry below the true start (it is immediately incremented |
| 26 | * in the loop) */ |
| 27 | dma_sg--; |
| 28 | |
| 29 | while (nents-- > 0) { |
| 30 | unsigned long vaddr; |
| 31 | long size; |
| 32 | |
Matthew Wilcox | 8bf8a1d | 2015-03-20 13:37:59 -0400 | [diff] [blame] | 33 | DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | (unsigned long)sg_dma_address(startsg), cnt, |
Matthew Wilcox | 8bf8a1d | 2015-03-20 13:37:59 -0400 | [diff] [blame] | 35 | sg_virt(startsg), startsg->length |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | ); |
| 37 | |
| 38 | |
| 39 | /* |
| 40 | ** Look for the start of a new DMA stream |
| 41 | */ |
| 42 | |
| 43 | if (sg_dma_address(startsg) & PIDE_FLAG) { |
| 44 | u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG; |
| 45 | |
| 46 | BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg))); |
| 47 | |
| 48 | dma_sg++; |
| 49 | |
| 50 | dma_len = sg_dma_len(startsg); |
| 51 | sg_dma_len(startsg) = 0; |
| 52 | dma_offset = (unsigned long) pide & ~IOVP_MASK; |
| 53 | n_mappings++; |
| 54 | #if defined(ZX1_SUPPORT) |
| 55 | /* Pluto IOMMU IO Virt Address is not zero based */ |
| 56 | sg_dma_address(dma_sg) = pide | ioc->ibase; |
| 57 | #else |
| 58 | /* SBA, ccio, and dino are zero based. |
| 59 | * Trying to save a few CPU cycles for most users. |
| 60 | */ |
| 61 | sg_dma_address(dma_sg) = pide; |
| 62 | #endif |
| 63 | pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]); |
| 64 | prefetchw(pdirp); |
| 65 | } |
| 66 | |
| 67 | BUG_ON(pdirp == NULL); |
| 68 | |
Matthew Wilcox | 8bf8a1d | 2015-03-20 13:37:59 -0400 | [diff] [blame] | 69 | vaddr = (unsigned long)sg_virt(startsg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | sg_dma_len(dma_sg) += startsg->length; |
| 71 | size = startsg->length + dma_offset; |
| 72 | dma_offset = 0; |
| 73 | #ifdef IOMMU_MAP_STATS |
| 74 | ioc->msg_pages += startsg->length >> IOVP_SHIFT; |
| 75 | #endif |
| 76 | do { |
| 77 | iommu_io_pdir_entry(pdirp, KERNEL_SPACE, |
| 78 | vaddr, hint); |
| 79 | vaddr += IOVP_SIZE; |
| 80 | size -= IOVP_SIZE; |
| 81 | pdirp++; |
| 82 | } while(unlikely(size > 0)); |
| 83 | startsg++; |
| 84 | } |
| 85 | return(n_mappings); |
| 86 | } |
| 87 | |
| 88 | |
| 89 | /* |
| 90 | ** First pass is to walk the SG list and determine where the breaks are |
| 91 | ** in the DMA stream. Allocates PDIR entries but does not fill them. |
| 92 | ** Returns the number of DMA chunks. |
| 93 | ** |
| 94 | ** Doing the fill separate from the coalescing/allocation keeps the |
| 95 | ** code simpler. Future enhancement could make one pass through |
| 96 | ** the sglist do both. |
| 97 | */ |
| 98 | |
| 99 | static inline unsigned int |
FUJITA Tomonori | d1b5163 | 2008-02-04 22:28:03 -0800 | [diff] [blame] | 100 | iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, |
FUJITA Tomonori | 7c8cda6 | 2008-03-04 14:29:28 -0800 | [diff] [blame] | 101 | struct scatterlist *startsg, int nents, |
| 102 | int (*iommu_alloc_range)(struct ioc *, struct device *, size_t)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | { |
| 104 | struct scatterlist *contig_sg; /* contig chunk head */ |
| 105 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ |
| 106 | unsigned int n_mappings = 0; |
FUJITA Tomonori | d1b5163 | 2008-02-04 22:28:03 -0800 | [diff] [blame] | 107 | unsigned int max_seg_size = dma_get_max_seg_size(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | |
| 109 | while (nents > 0) { |
| 110 | |
| 111 | /* |
| 112 | ** Prepare for first/next DMA stream |
| 113 | */ |
| 114 | contig_sg = startsg; |
| 115 | dma_len = startsg->length; |
Matthew Wilcox | 8bf8a1d | 2015-03-20 13:37:59 -0400 | [diff] [blame] | 116 | dma_offset = startsg->offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | |
| 118 | /* PARANOID: clear entries */ |
| 119 | sg_dma_address(startsg) = 0; |
| 120 | sg_dma_len(startsg) = 0; |
| 121 | |
| 122 | /* |
| 123 | ** This loop terminates one iteration "early" since |
| 124 | ** it's always looking one "ahead". |
| 125 | */ |
| 126 | while(--nents > 0) { |
Matthew Wilcox | 8bf8a1d | 2015-03-20 13:37:59 -0400 | [diff] [blame] | 127 | unsigned long prev_end, sg_start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | |
Matthew Wilcox | 8bf8a1d | 2015-03-20 13:37:59 -0400 | [diff] [blame] | 129 | prev_end = (unsigned long)sg_virt(startsg) + |
| 130 | startsg->length; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | |
| 132 | startsg++; |
Matthew Wilcox | 8bf8a1d | 2015-03-20 13:37:59 -0400 | [diff] [blame] | 133 | sg_start = (unsigned long)sg_virt(startsg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | |
| 135 | /* PARANOID: clear entries */ |
| 136 | sg_dma_address(startsg) = 0; |
| 137 | sg_dma_len(startsg) = 0; |
| 138 | |
| 139 | /* |
| 140 | ** First make sure current dma stream won't |
| 141 | ** exceed DMA_CHUNK_SIZE if we coalesce the |
| 142 | ** next entry. |
| 143 | */ |
Milind Arun Choudhary | 3cb1d95 | 2007-03-06 02:44:13 -0800 | [diff] [blame] | 144 | if(unlikely(ALIGN(dma_len + dma_offset + startsg->length, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | IOVP_SIZE) > DMA_CHUNK_SIZE)) |
| 146 | break; |
| 147 | |
FUJITA Tomonori | d1b5163 | 2008-02-04 22:28:03 -0800 | [diff] [blame] | 148 | if (startsg->length + dma_len > max_seg_size) |
| 149 | break; |
| 150 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | /* |
Matthew Wilcox | 8bf8a1d | 2015-03-20 13:37:59 -0400 | [diff] [blame] | 152 | * Next see if we can append the next chunk (i.e. |
| 153 | * it must end on one page and begin on another, or |
| 154 | * it must start on the same address as the previous |
| 155 | * entry ended. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | */ |
Matthew Wilcox | 8bf8a1d | 2015-03-20 13:37:59 -0400 | [diff] [blame] | 157 | if (unlikely((prev_end != sg_start) || |
| 158 | ((prev_end | sg_start) & ~PAGE_MASK))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | break; |
| 160 | |
| 161 | dma_len += startsg->length; |
| 162 | } |
| 163 | |
| 164 | /* |
| 165 | ** End of DMA Stream |
| 166 | ** Terminate last VCONTIG block. |
| 167 | ** Allocate space for DMA stream. |
| 168 | */ |
| 169 | sg_dma_len(contig_sg) = dma_len; |
Milind Arun Choudhary | 3cb1d95 | 2007-03-06 02:44:13 -0800 | [diff] [blame] | 170 | dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | sg_dma_address(contig_sg) = |
| 172 | PIDE_FLAG |
FUJITA Tomonori | 7c8cda6 | 2008-03-04 14:29:28 -0800 | [diff] [blame] | 173 | | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | | dma_offset; |
| 175 | n_mappings++; |
| 176 | } |
| 177 | |
| 178 | return n_mappings; |
| 179 | } |
| 180 | |