blob: b12ff85f4241ece439e43310b115d16a08c5e2eb [file] [log] [blame]
Yuan Kanga299c832012-06-22 19:48:46 -05001/*
2 * CAAM/SEC 4.x functions for using scatterlists in caam driver
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 */
7
8struct sec4_sg_entry;
9
10/*
11 * convert single dma address to h/w link table format
12 */
13static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
14 dma_addr_t dma, u32 len, u32 offset)
15{
16 sec4_sg_ptr->ptr = dma;
17 sec4_sg_ptr->len = len;
18 sec4_sg_ptr->reserved = 0;
19 sec4_sg_ptr->buf_pool_id = 0;
20 sec4_sg_ptr->offset = offset;
21#ifdef DEBUG
22 print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
23 DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
24 sizeof(struct sec4_sg_entry), 1);
25#endif
26}
27
28/*
29 * convert scatterlist to h/w link table format
30 * but does not have final bit; instead, returns last entry
31 */
32static inline struct sec4_sg_entry *
33sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
34 struct sec4_sg_entry *sec4_sg_ptr, u32 offset)
35{
36 while (sg_count) {
37 dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
38 sg_dma_len(sg), offset);
39 sec4_sg_ptr++;
Yuan Kang643b39b2012-06-22 19:48:49 -050040 sg = scatterwalk_sg_next(sg);
Yuan Kanga299c832012-06-22 19:48:46 -050041 sg_count--;
42 }
43 return sec4_sg_ptr - 1;
44}
45
46/*
47 * convert scatterlist to h/w link table format
48 * scatterlist must have been previously dma mapped
49 */
50static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
51 struct sec4_sg_entry *sec4_sg_ptr,
52 u32 offset)
53{
54 sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
55 sec4_sg_ptr->len |= SEC4_SG_LEN_FIN;
56}
57
58/* count number of elements in scatterlist */
Yuan Kang643b39b2012-06-22 19:48:49 -050059static inline int __sg_count(struct scatterlist *sg_list, int nbytes,
60 bool *chained)
Yuan Kanga299c832012-06-22 19:48:46 -050061{
62 struct scatterlist *sg = sg_list;
63 int sg_nents = 0;
64
65 while (nbytes > 0) {
66 sg_nents++;
67 nbytes -= sg->length;
68 if (!sg_is_last(sg) && (sg + 1)->length == 0)
Yuan Kang643b39b2012-06-22 19:48:49 -050069 *chained = true;
Yuan Kanga299c832012-06-22 19:48:46 -050070 sg = scatterwalk_sg_next(sg);
71 }
72
73 return sg_nents;
74}
75
76/* derive number of elements in scatterlist, but return 0 for 1 */
Yuan Kang643b39b2012-06-22 19:48:49 -050077static inline int sg_count(struct scatterlist *sg_list, int nbytes,
78 bool *chained)
Yuan Kanga299c832012-06-22 19:48:46 -050079{
Yuan Kang643b39b2012-06-22 19:48:49 -050080 int sg_nents = __sg_count(sg_list, nbytes, chained);
Yuan Kanga299c832012-06-22 19:48:46 -050081
82 if (likely(sg_nents == 1))
83 return 0;
84
85 return sg_nents;
86}
Yuan Kang045e3672012-06-22 19:48:47 -050087
Yuan Kang643b39b2012-06-22 19:48:49 -050088static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg,
89 unsigned int nents, enum dma_data_direction dir,
90 bool chained)
91{
92 if (unlikely(chained)) {
93 int i;
94 for (i = 0; i < nents; i++) {
95 dma_map_sg(dev, sg, 1, dir);
96 sg = scatterwalk_sg_next(sg);
97 }
98 } else {
99 dma_map_sg(dev, sg, nents, dir);
100 }
101 return nents;
102}
103
104static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
105 unsigned int nents, enum dma_data_direction dir,
106 bool chained)
107{
108 if (unlikely(chained)) {
109 int i;
110 for (i = 0; i < nents; i++) {
111 dma_unmap_sg(dev, sg, 1, dir);
112 sg = scatterwalk_sg_next(sg);
113 }
114 } else {
115 dma_unmap_sg(dev, sg, nents, dir);
116 }
117 return nents;
118}
119
Yashpal Dutta82ad6bc2013-09-11 19:24:06 +0000120/* Map SG page in kernel virtual address space and copy */
121static inline void sg_map_copy(u8 *dest, struct scatterlist *sg,
122 int len, int offset)
123{
124 u8 *mapped_addr;
125
126 /*
127 * Page here can be user-space pinned using get_user_pages
128 * Same must be kmapped before use and kunmapped subsequently
129 */
130 mapped_addr = kmap_atomic(sg_page(sg));
131 memcpy(dest, mapped_addr + offset, len);
132 kunmap_atomic(mapped_addr);
133}
134
Yuan Kang045e3672012-06-22 19:48:47 -0500135/* Copy from len bytes of sg to dest, starting from beginning */
136static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
137{
138 struct scatterlist *current_sg = sg;
139 int cpy_index = 0, next_cpy_index = current_sg->length;
140
141 while (next_cpy_index < len) {
Yashpal Dutta82ad6bc2013-09-11 19:24:06 +0000142 sg_map_copy(dest + cpy_index, current_sg, current_sg->length,
143 current_sg->offset);
Yuan Kang045e3672012-06-22 19:48:47 -0500144 current_sg = scatterwalk_sg_next(current_sg);
145 cpy_index = next_cpy_index;
146 next_cpy_index += current_sg->length;
147 }
148 if (cpy_index < len)
Yashpal Dutta82ad6bc2013-09-11 19:24:06 +0000149 sg_map_copy(dest + cpy_index, current_sg, len-cpy_index,
150 current_sg->offset);
Yuan Kang045e3672012-06-22 19:48:47 -0500151}
152
153/* Copy sg data, from to_skip to end, to dest */
154static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
155 int to_skip, unsigned int end)
156{
157 struct scatterlist *current_sg = sg;
Yashpal Dutta82ad6bc2013-09-11 19:24:06 +0000158 int sg_index, cpy_index, offset;
Yuan Kang045e3672012-06-22 19:48:47 -0500159
160 sg_index = current_sg->length;
161 while (sg_index <= to_skip) {
162 current_sg = scatterwalk_sg_next(current_sg);
163 sg_index += current_sg->length;
164 }
165 cpy_index = sg_index - to_skip;
Yashpal Dutta82ad6bc2013-09-11 19:24:06 +0000166 offset = current_sg->offset + current_sg->length - cpy_index;
167 sg_map_copy(dest, current_sg, cpy_index, offset);
168 if (end - sg_index) {
169 current_sg = scatterwalk_sg_next(current_sg);
Yuan Kang045e3672012-06-22 19:48:47 -0500170 sg_copy(dest + cpy_index, current_sg, end - sg_index);
Yashpal Dutta82ad6bc2013-09-11 19:24:06 +0000171 }
Yuan Kang045e3672012-06-22 19:48:47 -0500172}