blob: 38d9e1aba1d0f534e60a1dc6c4dca4ba1e67f056 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/**
2 * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
3 * @ioc: The I/O Controller.
4 * @startsg: The scatter/gather list of coalesced chunks.
5 * @nents: The number of entries in the scatter/gather list.
6 * @hint: The DMA Hint.
7 *
8 * This function inserts the coalesced scatter/gather list chunks into the
9 * I/O Controller's I/O Pdir.
10 */
11static inline unsigned int
12iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
13 unsigned long hint,
14 void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
15 unsigned long))
16{
17 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
18 unsigned int n_mappings = 0;
19 unsigned long dma_offset = 0, dma_len = 0;
20 u64 *pdirp = NULL;
21
22 /* Horrible hack. For efficiency's sake, dma_sg starts one
23 * entry below the true start (it is immediately incremented
24 * in the loop) */
25 dma_sg--;
26
27 while (nents-- > 0) {
28 unsigned long vaddr;
29 long size;
30
31 DBG_RUN_SG(" %d : %08lx/%05x %08lx/%05x\n", nents,
32 (unsigned long)sg_dma_address(startsg), cnt,
33 sg_virt_addr(startsg), startsg->length
34 );
35
36
37 /*
38 ** Look for the start of a new DMA stream
39 */
40
41 if (sg_dma_address(startsg) & PIDE_FLAG) {
42 u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;
43
44 BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg)));
45
46 dma_sg++;
47
48 dma_len = sg_dma_len(startsg);
49 sg_dma_len(startsg) = 0;
50 dma_offset = (unsigned long) pide & ~IOVP_MASK;
51 n_mappings++;
52#if defined(ZX1_SUPPORT)
53 /* Pluto IOMMU IO Virt Address is not zero based */
54 sg_dma_address(dma_sg) = pide | ioc->ibase;
55#else
56 /* SBA, ccio, and dino are zero based.
57 * Trying to save a few CPU cycles for most users.
58 */
59 sg_dma_address(dma_sg) = pide;
60#endif
61 pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
62 prefetchw(pdirp);
63 }
64
65 BUG_ON(pdirp == NULL);
66
67 vaddr = sg_virt_addr(startsg);
68 sg_dma_len(dma_sg) += startsg->length;
69 size = startsg->length + dma_offset;
70 dma_offset = 0;
71#ifdef IOMMU_MAP_STATS
72 ioc->msg_pages += startsg->length >> IOVP_SHIFT;
73#endif
74 do {
75 iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
76 vaddr, hint);
77 vaddr += IOVP_SIZE;
78 size -= IOVP_SIZE;
79 pdirp++;
80 } while(unlikely(size > 0));
81 startsg++;
82 }
83 return(n_mappings);
84}
85
86
87/*
88** First pass is to walk the SG list and determine where the breaks are
89** in the DMA stream. Allocates PDIR entries but does not fill them.
90** Returns the number of DMA chunks.
91**
92** Doing the fill separate from the coalescing/allocation keeps the
93** code simpler. Future enhancement could make one pass through
94** the sglist do both.
95*/
96
97static inline unsigned int
98iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents,
99 int (*iommu_alloc_range)(struct ioc *, size_t))
100{
101 struct scatterlist *contig_sg; /* contig chunk head */
102 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
103 unsigned int n_mappings = 0;
104
105 while (nents > 0) {
106
107 /*
108 ** Prepare for first/next DMA stream
109 */
110 contig_sg = startsg;
111 dma_len = startsg->length;
112 dma_offset = sg_virt_addr(startsg) & ~IOVP_MASK;
113
114 /* PARANOID: clear entries */
115 sg_dma_address(startsg) = 0;
116 sg_dma_len(startsg) = 0;
117
118 /*
119 ** This loop terminates one iteration "early" since
120 ** it's always looking one "ahead".
121 */
122 while(--nents > 0) {
123 unsigned long prevstartsg_end, startsg_end;
124
125 prevstartsg_end = sg_virt_addr(startsg) +
126 startsg->length;
127
128 startsg++;
129 startsg_end = sg_virt_addr(startsg) +
130 startsg->length;
131
132 /* PARANOID: clear entries */
133 sg_dma_address(startsg) = 0;
134 sg_dma_len(startsg) = 0;
135
136 /*
137 ** First make sure current dma stream won't
138 ** exceed DMA_CHUNK_SIZE if we coalesce the
139 ** next entry.
140 */
141 if(unlikely(ROUNDUP(dma_len + dma_offset + startsg->length,
142 IOVP_SIZE) > DMA_CHUNK_SIZE))
143 break;
144
145 /*
146 ** Next see if we can append the next chunk (i.e.
147 ** it must end on one page and begin on another
148 */
149 if (unlikely(((prevstartsg_end | sg_virt_addr(startsg)) & ~PAGE_MASK) != 0))
150 break;
151
152 dma_len += startsg->length;
153 }
154
155 /*
156 ** End of DMA Stream
157 ** Terminate last VCONTIG block.
158 ** Allocate space for DMA stream.
159 */
160 sg_dma_len(contig_sg) = dma_len;
161 dma_len = ROUNDUP(dma_len + dma_offset, IOVP_SIZE);
162 sg_dma_address(contig_sg) =
163 PIDE_FLAG
164 | (iommu_alloc_range(ioc, dma_len) << IOVP_SHIFT)
165 | dma_offset;
166 n_mappings++;
167 }
168
169 return n_mappings;
170}
171