Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com> |
| 3 | * |
| 4 | * Scatterlist handling helpers. |
| 5 | * |
| 6 | * This source code is licensed under the GNU General Public License, |
| 7 | * Version 2. See the file COPYING for more details. |
| 8 | */ |
Paul Gortmaker | 8bc3bcc | 2011-11-16 21:29:17 -0500 | [diff] [blame] | 9 | #include <linux/export.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 10 | #include <linux/slab.h> |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 11 | #include <linux/scatterlist.h> |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 12 | #include <linux/highmem.h> |
Chris Wilson | b94de9b | 2010-07-28 22:59:02 +0100 | [diff] [blame] | 13 | #include <linux/kmemleak.h> |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 14 | |
| 15 | /** |
| 16 | * sg_next - return the next scatterlist entry in a list |
| 17 | * @sg: The current sg entry |
| 18 | * |
| 19 | * Description: |
| 20 | * Usually the next entry will be @sg@ + 1, but if this sg element is part |
| 21 | * of a chained scatterlist, it could jump to the start of a new |
| 22 | * scatterlist array. |
| 23 | * |
| 24 | **/ |
| 25 | struct scatterlist *sg_next(struct scatterlist *sg) |
| 26 | { |
| 27 | #ifdef CONFIG_DEBUG_SG |
| 28 | BUG_ON(sg->sg_magic != SG_MAGIC); |
| 29 | #endif |
| 30 | if (sg_is_last(sg)) |
| 31 | return NULL; |
| 32 | |
| 33 | sg++; |
| 34 | if (unlikely(sg_is_chain(sg))) |
| 35 | sg = sg_chain_ptr(sg); |
| 36 | |
| 37 | return sg; |
| 38 | } |
| 39 | EXPORT_SYMBOL(sg_next); |
| 40 | |
| 41 | /** |
Maxim Levitsky | 2e48461 | 2012-09-27 12:45:28 +0200 | [diff] [blame] | 42 | * sg_nents - return total count of entries in scatterlist |
| 43 | * @sg: The scatterlist |
| 44 | * |
| 45 | * Description: |
| 46 | * Allows to know how many entries are in sg, taking into acount |
| 47 | * chaining as well |
| 48 | * |
| 49 | **/ |
| 50 | int sg_nents(struct scatterlist *sg) |
| 51 | { |
Maxim Levitsky | 232f1b5 | 2012-09-28 10:38:15 +0200 | [diff] [blame] | 52 | int nents; |
| 53 | for (nents = 0; sg; sg = sg_next(sg)) |
Maxim Levitsky | 2e48461 | 2012-09-27 12:45:28 +0200 | [diff] [blame] | 54 | nents++; |
Maxim Levitsky | 2e48461 | 2012-09-27 12:45:28 +0200 | [diff] [blame] | 55 | return nents; |
| 56 | } |
| 57 | EXPORT_SYMBOL(sg_nents); |
| 58 | |
| 59 | |
| 60 | /** |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 61 | * sg_last - return the last scatterlist entry in a list |
| 62 | * @sgl: First entry in the scatterlist |
| 63 | * @nents: Number of entries in the scatterlist |
| 64 | * |
| 65 | * Description: |
| 66 | * Should only be used casually, it (currently) scans the entire list |
| 67 | * to get the last entry. |
| 68 | * |
| 69 | * Note that the @sgl@ pointer passed in need not be the first one, |
| 70 | * the important bit is that @nents@ denotes the number of entries that |
| 71 | * exist from @sgl@. |
| 72 | * |
| 73 | **/ |
| 74 | struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) |
| 75 | { |
Laura Abbott | 308c09f | 2014-08-08 14:23:25 -0700 | [diff] [blame] | 76 | #ifndef CONFIG_ARCH_HAS_SG_CHAIN |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 77 | struct scatterlist *ret = &sgl[nents - 1]; |
| 78 | #else |
| 79 | struct scatterlist *sg, *ret = NULL; |
| 80 | unsigned int i; |
| 81 | |
| 82 | for_each_sg(sgl, sg, nents, i) |
| 83 | ret = sg; |
| 84 | |
| 85 | #endif |
| 86 | #ifdef CONFIG_DEBUG_SG |
| 87 | BUG_ON(sgl[0].sg_magic != SG_MAGIC); |
| 88 | BUG_ON(!sg_is_last(ret)); |
| 89 | #endif |
| 90 | return ret; |
| 91 | } |
| 92 | EXPORT_SYMBOL(sg_last); |
| 93 | |
| 94 | /** |
| 95 | * sg_init_table - Initialize SG table |
| 96 | * @sgl: The SG table |
| 97 | * @nents: Number of entries in table |
| 98 | * |
| 99 | * Notes: |
| 100 | * If this is part of a chained sg table, sg_mark_end() should be |
| 101 | * used only on the last table part. |
| 102 | * |
| 103 | **/ |
| 104 | void sg_init_table(struct scatterlist *sgl, unsigned int nents) |
| 105 | { |
| 106 | memset(sgl, 0, sizeof(*sgl) * nents); |
| 107 | #ifdef CONFIG_DEBUG_SG |
| 108 | { |
| 109 | unsigned int i; |
| 110 | for (i = 0; i < nents; i++) |
| 111 | sgl[i].sg_magic = SG_MAGIC; |
| 112 | } |
| 113 | #endif |
| 114 | sg_mark_end(&sgl[nents - 1]); |
| 115 | } |
| 116 | EXPORT_SYMBOL(sg_init_table); |
| 117 | |
| 118 | /** |
| 119 | * sg_init_one - Initialize a single entry sg list |
| 120 | * @sg: SG entry |
| 121 | * @buf: Virtual address for IO |
| 122 | * @buflen: IO length |
| 123 | * |
| 124 | **/ |
| 125 | void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) |
| 126 | { |
| 127 | sg_init_table(sg, 1); |
| 128 | sg_set_buf(sg, buf, buflen); |
| 129 | } |
| 130 | EXPORT_SYMBOL(sg_init_one); |
| 131 | |
| 132 | /* |
| 133 | * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree |
| 134 | * helpers. |
| 135 | */ |
| 136 | static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) |
| 137 | { |
Chris Wilson | b94de9b | 2010-07-28 22:59:02 +0100 | [diff] [blame] | 138 | if (nents == SG_MAX_SINGLE_ALLOC) { |
| 139 | /* |
| 140 | * Kmemleak doesn't track page allocations as they are not |
| 141 | * commonly used (in a raw form) for kernel data structures. |
| 142 | * As we chain together a list of pages and then a normal |
| 143 | * kmalloc (tracked by kmemleak), in order to for that last |
| 144 | * allocation not to become decoupled (and thus a |
| 145 | * false-positive) we need to inform kmemleak of all the |
| 146 | * intermediate allocations. |
| 147 | */ |
| 148 | void *ptr = (void *) __get_free_page(gfp_mask); |
| 149 | kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); |
| 150 | return ptr; |
| 151 | } else |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 152 | return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); |
| 153 | } |
| 154 | |
| 155 | static void sg_kfree(struct scatterlist *sg, unsigned int nents) |
| 156 | { |
Chris Wilson | b94de9b | 2010-07-28 22:59:02 +0100 | [diff] [blame] | 157 | if (nents == SG_MAX_SINGLE_ALLOC) { |
| 158 | kmemleak_free(sg); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 159 | free_page((unsigned long) sg); |
Chris Wilson | b94de9b | 2010-07-28 22:59:02 +0100 | [diff] [blame] | 160 | } else |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 161 | kfree(sg); |
| 162 | } |
| 163 | |
| 164 | /** |
| 165 | * __sg_free_table - Free a previously mapped sg table |
| 166 | * @table: The sg table header to use |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 167 | * @max_ents: The maximum number of entries per single scatterlist |
Christoph Hellwig | c53c6d6 | 2014-04-15 14:38:31 +0200 | [diff] [blame] | 168 | * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 169 | * @free_fn: Free function |
| 170 | * |
| 171 | * Description: |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 172 | * Free an sg table previously allocated and setup with |
| 173 | * __sg_alloc_table(). The @max_ents value must be identical to |
| 174 | * that previously used with __sg_alloc_table(). |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 175 | * |
| 176 | **/ |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 177 | void __sg_free_table(struct sg_table *table, unsigned int max_ents, |
Christoph Hellwig | c53c6d6 | 2014-04-15 14:38:31 +0200 | [diff] [blame] | 178 | bool skip_first_chunk, sg_free_fn *free_fn) |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 179 | { |
| 180 | struct scatterlist *sgl, *next; |
| 181 | |
| 182 | if (unlikely(!table->sgl)) |
| 183 | return; |
| 184 | |
| 185 | sgl = table->sgl; |
| 186 | while (table->orig_nents) { |
| 187 | unsigned int alloc_size = table->orig_nents; |
| 188 | unsigned int sg_size; |
| 189 | |
| 190 | /* |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 191 | * If we have more than max_ents segments left, |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 192 | * then assign 'next' to the sg table after the current one. |
| 193 | * sg_size is then one less than alloc size, since the last |
| 194 | * element is the chain pointer. |
| 195 | */ |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 196 | if (alloc_size > max_ents) { |
| 197 | next = sg_chain_ptr(&sgl[max_ents - 1]); |
| 198 | alloc_size = max_ents; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 199 | sg_size = alloc_size - 1; |
| 200 | } else { |
| 201 | sg_size = alloc_size; |
| 202 | next = NULL; |
| 203 | } |
| 204 | |
| 205 | table->orig_nents -= sg_size; |
Tony Battersby | c21e59d | 2014-10-23 15:10:21 -0400 | [diff] [blame^] | 206 | if (skip_first_chunk) |
Christoph Hellwig | c53c6d6 | 2014-04-15 14:38:31 +0200 | [diff] [blame] | 207 | skip_first_chunk = false; |
Tony Battersby | c21e59d | 2014-10-23 15:10:21 -0400 | [diff] [blame^] | 208 | else |
| 209 | free_fn(sgl, alloc_size); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 210 | sgl = next; |
| 211 | } |
| 212 | |
| 213 | table->sgl = NULL; |
| 214 | } |
| 215 | EXPORT_SYMBOL(__sg_free_table); |
| 216 | |
| 217 | /** |
| 218 | * sg_free_table - Free a previously allocated sg table |
| 219 | * @table: The mapped sg table header |
| 220 | * |
| 221 | **/ |
| 222 | void sg_free_table(struct sg_table *table) |
| 223 | { |
Christoph Hellwig | c53c6d6 | 2014-04-15 14:38:31 +0200 | [diff] [blame] | 224 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 225 | } |
| 226 | EXPORT_SYMBOL(sg_free_table); |
| 227 | |
| 228 | /** |
| 229 | * __sg_alloc_table - Allocate and initialize an sg table with given allocator |
| 230 | * @table: The sg table header to use |
| 231 | * @nents: Number of entries in sg list |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 232 | * @max_ents: The maximum number of entries the allocator returns per call |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 233 | * @gfp_mask: GFP allocation mask |
| 234 | * @alloc_fn: Allocator to use |
| 235 | * |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 236 | * Description: |
| 237 | * This function returns a @table @nents long. The allocator is |
| 238 | * defined to return scatterlist chunks of maximum size @max_ents. |
| 239 | * Thus if @nents is bigger than @max_ents, the scatterlists will be |
| 240 | * chained in units of @max_ents. |
| 241 | * |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 242 | * Notes: |
| 243 | * If this function returns non-0 (eg failure), the caller must call |
| 244 | * __sg_free_table() to cleanup any leftover allocations. |
| 245 | * |
| 246 | **/ |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 247 | int __sg_alloc_table(struct sg_table *table, unsigned int nents, |
Christoph Hellwig | c53c6d6 | 2014-04-15 14:38:31 +0200 | [diff] [blame] | 248 | unsigned int max_ents, struct scatterlist *first_chunk, |
| 249 | gfp_t gfp_mask, sg_alloc_fn *alloc_fn) |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 250 | { |
| 251 | struct scatterlist *sg, *prv; |
| 252 | unsigned int left; |
| 253 | |
Dan Carpenter | 27daabd | 2013-07-08 16:01:58 -0700 | [diff] [blame] | 254 | memset(table, 0, sizeof(*table)); |
| 255 | |
| 256 | if (nents == 0) |
| 257 | return -EINVAL; |
Laura Abbott | 308c09f | 2014-08-08 14:23:25 -0700 | [diff] [blame] | 258 | #ifndef CONFIG_ARCH_HAS_SG_CHAIN |
Nick Bowler | 6fd59a8 | 2012-12-17 16:05:20 -0800 | [diff] [blame] | 259 | if (WARN_ON_ONCE(nents > max_ents)) |
| 260 | return -EINVAL; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 261 | #endif |
| 262 | |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 263 | left = nents; |
| 264 | prv = NULL; |
| 265 | do { |
| 266 | unsigned int sg_size, alloc_size = left; |
| 267 | |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 268 | if (alloc_size > max_ents) { |
| 269 | alloc_size = max_ents; |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 270 | sg_size = alloc_size - 1; |
| 271 | } else |
| 272 | sg_size = alloc_size; |
| 273 | |
| 274 | left -= sg_size; |
| 275 | |
Christoph Hellwig | c53c6d6 | 2014-04-15 14:38:31 +0200 | [diff] [blame] | 276 | if (first_chunk) { |
| 277 | sg = first_chunk; |
| 278 | first_chunk = NULL; |
| 279 | } else { |
| 280 | sg = alloc_fn(alloc_size, gfp_mask); |
| 281 | } |
Jeffrey Carlyle | edce682 | 2010-08-30 19:55:09 +0200 | [diff] [blame] | 282 | if (unlikely(!sg)) { |
| 283 | /* |
| 284 | * Adjust entry count to reflect that the last |
| 285 | * entry of the previous table won't be used for |
| 286 | * linkage. Without this, sg_kfree() may get |
| 287 | * confused. |
| 288 | */ |
| 289 | if (prv) |
| 290 | table->nents = ++table->orig_nents; |
| 291 | |
| 292 | return -ENOMEM; |
| 293 | } |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 294 | |
| 295 | sg_init_table(sg, alloc_size); |
| 296 | table->nents = table->orig_nents += sg_size; |
| 297 | |
| 298 | /* |
| 299 | * If this is the first mapping, assign the sg table header. |
| 300 | * If this is not the first mapping, chain previous part. |
| 301 | */ |
| 302 | if (prv) |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 303 | sg_chain(prv, max_ents, sg); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 304 | else |
| 305 | table->sgl = sg; |
| 306 | |
| 307 | /* |
| 308 | * If no more entries after this one, mark the end |
| 309 | */ |
| 310 | if (!left) |
| 311 | sg_mark_end(&sg[sg_size - 1]); |
| 312 | |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 313 | prv = sg; |
| 314 | } while (left); |
| 315 | |
| 316 | return 0; |
| 317 | } |
| 318 | EXPORT_SYMBOL(__sg_alloc_table); |
| 319 | |
| 320 | /** |
| 321 | * sg_alloc_table - Allocate and initialize an sg table |
| 322 | * @table: The sg table header to use |
| 323 | * @nents: Number of entries in sg list |
| 324 | * @gfp_mask: GFP allocation mask |
| 325 | * |
| 326 | * Description: |
| 327 | * Allocate and initialize an sg table. If @nents@ is larger than |
| 328 | * SG_MAX_SINGLE_ALLOC a chained sg table will be setup. |
| 329 | * |
| 330 | **/ |
| 331 | int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) |
| 332 | { |
| 333 | int ret; |
| 334 | |
James Bottomley | 7cedb1f | 2008-01-13 14:15:28 -0600 | [diff] [blame] | 335 | ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, |
Christoph Hellwig | c53c6d6 | 2014-04-15 14:38:31 +0200 | [diff] [blame] | 336 | NULL, gfp_mask, sg_kmalloc); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 337 | if (unlikely(ret)) |
Christoph Hellwig | c53c6d6 | 2014-04-15 14:38:31 +0200 | [diff] [blame] | 338 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); |
Jens Axboe | 0db9299 | 2007-11-30 09:16:50 +0100 | [diff] [blame] | 339 | |
| 340 | return ret; |
| 341 | } |
| 342 | EXPORT_SYMBOL(sg_alloc_table); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 343 | |
| 344 | /** |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 345 | * sg_alloc_table_from_pages - Allocate and initialize an sg table from |
| 346 | * an array of pages |
| 347 | * @sgt: The sg table header to use |
| 348 | * @pages: Pointer to an array of page pointers |
| 349 | * @n_pages: Number of pages in the pages array |
| 350 | * @offset: Offset from start of the first page to the start of a buffer |
| 351 | * @size: Number of valid bytes in the buffer (after offset) |
| 352 | * @gfp_mask: GFP allocation mask |
| 353 | * |
| 354 | * Description: |
| 355 | * Allocate and initialize an sg table from a list of pages. Contiguous |
| 356 | * ranges of the pages are squashed into a single scatterlist node. A user |
| 357 | * may provide an offset at a start and a size of valid data in a buffer |
| 358 | * specified by the page array. The returned sg table is released by |
| 359 | * sg_free_table. |
| 360 | * |
| 361 | * Returns: |
| 362 | * 0 on success, negative error on failure |
| 363 | */ |
| 364 | int sg_alloc_table_from_pages(struct sg_table *sgt, |
| 365 | struct page **pages, unsigned int n_pages, |
| 366 | unsigned long offset, unsigned long size, |
| 367 | gfp_t gfp_mask) |
| 368 | { |
| 369 | unsigned int chunks; |
| 370 | unsigned int i; |
| 371 | unsigned int cur_page; |
| 372 | int ret; |
| 373 | struct scatterlist *s; |
| 374 | |
| 375 | /* compute number of contiguous chunks */ |
| 376 | chunks = 1; |
| 377 | for (i = 1; i < n_pages; ++i) |
| 378 | if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) |
| 379 | ++chunks; |
| 380 | |
| 381 | ret = sg_alloc_table(sgt, chunks, gfp_mask); |
| 382 | if (unlikely(ret)) |
| 383 | return ret; |
| 384 | |
| 385 | /* merging chunks and putting them into the scatterlist */ |
| 386 | cur_page = 0; |
| 387 | for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { |
| 388 | unsigned long chunk_size; |
| 389 | unsigned int j; |
| 390 | |
| 391 | /* look for the end of the current chunk */ |
| 392 | for (j = cur_page + 1; j < n_pages; ++j) |
| 393 | if (page_to_pfn(pages[j]) != |
| 394 | page_to_pfn(pages[j - 1]) + 1) |
| 395 | break; |
| 396 | |
| 397 | chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; |
| 398 | sg_set_page(s, pages[cur_page], min(size, chunk_size), offset); |
| 399 | size -= chunk_size; |
| 400 | offset = 0; |
| 401 | cur_page = j; |
| 402 | } |
| 403 | |
| 404 | return 0; |
| 405 | } |
| 406 | EXPORT_SYMBOL(sg_alloc_table_from_pages); |
| 407 | |
Imre Deak | a321e91 | 2013-02-27 17:02:56 -0800 | [diff] [blame] | 408 | void __sg_page_iter_start(struct sg_page_iter *piter, |
| 409 | struct scatterlist *sglist, unsigned int nents, |
| 410 | unsigned long pgoffset) |
| 411 | { |
| 412 | piter->__pg_advance = 0; |
| 413 | piter->__nents = nents; |
| 414 | |
Imre Deak | a321e91 | 2013-02-27 17:02:56 -0800 | [diff] [blame] | 415 | piter->sg = sglist; |
| 416 | piter->sg_pgoffset = pgoffset; |
| 417 | } |
| 418 | EXPORT_SYMBOL(__sg_page_iter_start); |
| 419 | |
| 420 | static int sg_page_count(struct scatterlist *sg) |
| 421 | { |
| 422 | return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; |
| 423 | } |
| 424 | |
| 425 | bool __sg_page_iter_next(struct sg_page_iter *piter) |
| 426 | { |
| 427 | if (!piter->__nents || !piter->sg) |
| 428 | return false; |
| 429 | |
| 430 | piter->sg_pgoffset += piter->__pg_advance; |
| 431 | piter->__pg_advance = 1; |
| 432 | |
| 433 | while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { |
| 434 | piter->sg_pgoffset -= sg_page_count(piter->sg); |
| 435 | piter->sg = sg_next(piter->sg); |
| 436 | if (!--piter->__nents || !piter->sg) |
| 437 | return false; |
| 438 | } |
Imre Deak | a321e91 | 2013-02-27 17:02:56 -0800 | [diff] [blame] | 439 | |
| 440 | return true; |
| 441 | } |
| 442 | EXPORT_SYMBOL(__sg_page_iter_next); |
| 443 | |
Tomasz Stanislawski | efc42bc | 2012-06-18 09:25:01 +0200 | [diff] [blame] | 444 | /** |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 445 | * sg_miter_start - start mapping iteration over a sg list |
| 446 | * @miter: sg mapping iter to be started |
| 447 | * @sgl: sg list to iterate over |
| 448 | * @nents: number of sg entries |
| 449 | * |
| 450 | * Description: |
| 451 | * Starts mapping iterator @miter. |
| 452 | * |
| 453 | * Context: |
| 454 | * Don't care. |
| 455 | */ |
| 456 | void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, |
| 457 | unsigned int nents, unsigned int flags) |
| 458 | { |
| 459 | memset(miter, 0, sizeof(struct sg_mapping_iter)); |
| 460 | |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 461 | __sg_page_iter_start(&miter->piter, sgl, nents, 0); |
Sebastian Andrzej Siewior | 6de7e356 | 2009-06-18 10:19:12 +0200 | [diff] [blame] | 462 | WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 463 | miter->__flags = flags; |
| 464 | } |
| 465 | EXPORT_SYMBOL(sg_miter_start); |
| 466 | |
Akinobu Mita | 1105200 | 2013-07-08 16:01:52 -0700 | [diff] [blame] | 467 | static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) |
| 468 | { |
| 469 | if (!miter->__remaining) { |
| 470 | struct scatterlist *sg; |
| 471 | unsigned long pgoffset; |
| 472 | |
| 473 | if (!__sg_page_iter_next(&miter->piter)) |
| 474 | return false; |
| 475 | |
| 476 | sg = miter->piter.sg; |
| 477 | pgoffset = miter->piter.sg_pgoffset; |
| 478 | |
| 479 | miter->__offset = pgoffset ? 0 : sg->offset; |
| 480 | miter->__remaining = sg->offset + sg->length - |
| 481 | (pgoffset << PAGE_SHIFT) - miter->__offset; |
| 482 | miter->__remaining = min_t(unsigned long, miter->__remaining, |
| 483 | PAGE_SIZE - miter->__offset); |
| 484 | } |
| 485 | |
| 486 | return true; |
| 487 | } |
| 488 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 489 | /** |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 490 | * sg_miter_skip - reposition mapping iterator |
| 491 | * @miter: sg mapping iter to be skipped |
| 492 | * @offset: number of bytes to plus the current location |
| 493 | * |
| 494 | * Description: |
| 495 | * Sets the offset of @miter to its current location plus @offset bytes. |
| 496 | * If mapping iterator @miter has been proceeded by sg_miter_next(), this |
| 497 | * stops @miter. |
| 498 | * |
| 499 | * Context: |
| 500 | * Don't care if @miter is stopped, or not proceeded yet. |
| 501 | * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set. |
| 502 | * |
| 503 | * Returns: |
| 504 | * true if @miter contains the valid mapping. false if end of sg |
| 505 | * list is reached. |
| 506 | */ |
Ming Lei | 0d6077f | 2013-11-26 12:43:37 +0800 | [diff] [blame] | 507 | bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 508 | { |
| 509 | sg_miter_stop(miter); |
| 510 | |
| 511 | while (offset) { |
| 512 | off_t consumed; |
| 513 | |
| 514 | if (!sg_miter_get_next_page(miter)) |
| 515 | return false; |
| 516 | |
| 517 | consumed = min_t(off_t, offset, miter->__remaining); |
| 518 | miter->__offset += consumed; |
| 519 | miter->__remaining -= consumed; |
| 520 | offset -= consumed; |
| 521 | } |
| 522 | |
| 523 | return true; |
| 524 | } |
Ming Lei | 0d6077f | 2013-11-26 12:43:37 +0800 | [diff] [blame] | 525 | EXPORT_SYMBOL(sg_miter_skip); |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 526 | |
| 527 | /** |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 528 | * sg_miter_next - proceed mapping iterator to the next mapping |
| 529 | * @miter: sg mapping iter to proceed |
| 530 | * |
| 531 | * Description: |
Tejun Heo | 8290e2d | 2012-10-04 17:13:28 -0700 | [diff] [blame] | 532 | * Proceeds @miter to the next mapping. @miter should have been started |
| 533 | * using sg_miter_start(). On successful return, @miter->page, |
| 534 | * @miter->addr and @miter->length point to the current mapping. |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 535 | * |
| 536 | * Context: |
Tejun Heo | 8290e2d | 2012-10-04 17:13:28 -0700 | [diff] [blame] | 537 | * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled |
| 538 | * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 539 | * |
| 540 | * Returns: |
| 541 | * true if @miter contains the next mapping. false if end of sg |
| 542 | * list is reached. |
| 543 | */ |
| 544 | bool sg_miter_next(struct sg_mapping_iter *miter) |
| 545 | { |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 546 | sg_miter_stop(miter); |
| 547 | |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 548 | /* |
| 549 | * Get to the next page if necessary. |
| 550 | * __remaining, __offset is adjusted by sg_miter_stop |
| 551 | */ |
Akinobu Mita | 1105200 | 2013-07-08 16:01:52 -0700 | [diff] [blame] | 552 | if (!sg_miter_get_next_page(miter)) |
| 553 | return false; |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 554 | |
Imre Deak | 2db76d7 | 2013-03-26 15:14:18 +0200 | [diff] [blame] | 555 | miter->page = sg_page_iter_page(&miter->piter); |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 556 | miter->consumed = miter->length = miter->__remaining; |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 557 | |
| 558 | if (miter->__flags & SG_MITER_ATOMIC) |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 559 | miter->addr = kmap_atomic(miter->page) + miter->__offset; |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 560 | else |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 561 | miter->addr = kmap(miter->page) + miter->__offset; |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 562 | |
| 563 | return true; |
| 564 | } |
| 565 | EXPORT_SYMBOL(sg_miter_next); |
| 566 | |
| 567 | /** |
| 568 | * sg_miter_stop - stop mapping iteration |
| 569 | * @miter: sg mapping iter to be stopped |
| 570 | * |
| 571 | * Description: |
| 572 | * Stops mapping iterator @miter. @miter should have been started |
| 573 | * started using sg_miter_start(). A stopped iteration can be |
| 574 | * resumed by calling sg_miter_next() on it. This is useful when |
| 575 | * resources (kmap) need to be released during iteration. |
| 576 | * |
| 577 | * Context: |
Tejun Heo | 8290e2d | 2012-10-04 17:13:28 -0700 | [diff] [blame] | 578 | * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care |
| 579 | * otherwise. |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 580 | */ |
| 581 | void sg_miter_stop(struct sg_mapping_iter *miter) |
| 582 | { |
| 583 | WARN_ON(miter->consumed > miter->length); |
| 584 | |
| 585 | /* drop resources from the last iteration */ |
| 586 | if (miter->addr) { |
| 587 | miter->__offset += miter->consumed; |
Imre Deak | 4225fc8 | 2013-02-27 17:02:57 -0800 | [diff] [blame] | 588 | miter->__remaining -= miter->consumed; |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 589 | |
Ming Lei | 3d77b50 | 2013-10-31 16:34:17 -0700 | [diff] [blame] | 590 | if ((miter->__flags & SG_MITER_TO_SG) && |
| 591 | !PageSlab(miter->page)) |
Sebastian Andrzej Siewior | 6de7e356 | 2009-06-18 10:19:12 +0200 | [diff] [blame] | 592 | flush_kernel_dcache_page(miter->page); |
| 593 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 594 | if (miter->__flags & SG_MITER_ATOMIC) { |
Tejun Heo | 8290e2d | 2012-10-04 17:13:28 -0700 | [diff] [blame] | 595 | WARN_ON_ONCE(preemptible()); |
Cong Wang | c3eede8 | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 596 | kunmap_atomic(miter->addr); |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 597 | } else |
Arjan van de Ven | f652c52 | 2008-11-19 15:36:19 -0800 | [diff] [blame] | 598 | kunmap(miter->page); |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 599 | |
| 600 | miter->page = NULL; |
| 601 | miter->addr = NULL; |
| 602 | miter->length = 0; |
| 603 | miter->consumed = 0; |
| 604 | } |
| 605 | } |
| 606 | EXPORT_SYMBOL(sg_miter_stop); |
| 607 | |
| 608 | /** |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 609 | * sg_copy_buffer - Copy data between a linear buffer and an SG list |
| 610 | * @sgl: The SG list |
| 611 | * @nents: Number of SG entries |
| 612 | * @buf: Where to copy from |
| 613 | * @buflen: The number of bytes to copy |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 614 | * @skip: Number of bytes to skip before copying |
| 615 | * @to_buffer: transfer direction (true == from an sg list to a |
| 616 | * buffer, false == from a buffer to an sg list |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 617 | * |
| 618 | * Returns the number of copied bytes. |
| 619 | * |
| 620 | **/ |
| 621 | static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 622 | void *buf, size_t buflen, off_t skip, |
| 623 | bool to_buffer) |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 624 | { |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 625 | unsigned int offset = 0; |
| 626 | struct sg_mapping_iter miter; |
FUJITA Tomonori | 50bed2e | 2008-09-11 18:35:39 +0200 | [diff] [blame] | 627 | unsigned long flags; |
Sebastian Andrzej Siewior | 6de7e356 | 2009-06-18 10:19:12 +0200 | [diff] [blame] | 628 | unsigned int sg_flags = SG_MITER_ATOMIC; |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 629 | |
Sebastian Andrzej Siewior | 6de7e356 | 2009-06-18 10:19:12 +0200 | [diff] [blame] | 630 | if (to_buffer) |
| 631 | sg_flags |= SG_MITER_FROM_SG; |
| 632 | else |
| 633 | sg_flags |= SG_MITER_TO_SG; |
| 634 | |
| 635 | sg_miter_start(&miter, sgl, nents, sg_flags); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 636 | |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 637 | if (!sg_miter_skip(&miter, skip)) |
| 638 | return false; |
| 639 | |
FUJITA Tomonori | 50bed2e | 2008-09-11 18:35:39 +0200 | [diff] [blame] | 640 | local_irq_save(flags); |
| 641 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 642 | while (sg_miter_next(&miter) && offset < buflen) { |
| 643 | unsigned int len; |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 644 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 645 | len = min(miter.length, buflen - offset); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 646 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 647 | if (to_buffer) |
| 648 | memcpy(buf + offset, miter.addr, len); |
Sebastian Andrzej Siewior | 6de7e356 | 2009-06-18 10:19:12 +0200 | [diff] [blame] | 649 | else |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 650 | memcpy(miter.addr, buf + offset, len); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 651 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 652 | offset += len; |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 653 | } |
| 654 | |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 655 | sg_miter_stop(&miter); |
| 656 | |
FUJITA Tomonori | 50bed2e | 2008-09-11 18:35:39 +0200 | [diff] [blame] | 657 | local_irq_restore(flags); |
Tejun Heo | 137d3ed | 2008-07-19 23:03:35 +0900 | [diff] [blame] | 658 | return offset; |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 659 | } |
| 660 | |
| 661 | /** |
| 662 | * sg_copy_from_buffer - Copy from a linear buffer to an SG list |
| 663 | * @sgl: The SG list |
| 664 | * @nents: Number of SG entries |
| 665 | * @buf: Where to copy from |
| 666 | * @buflen: The number of bytes to copy |
| 667 | * |
| 668 | * Returns the number of copied bytes. |
| 669 | * |
| 670 | **/ |
| 671 | size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, |
| 672 | void *buf, size_t buflen) |
| 673 | { |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 674 | return sg_copy_buffer(sgl, nents, buf, buflen, 0, false); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 675 | } |
| 676 | EXPORT_SYMBOL(sg_copy_from_buffer); |
| 677 | |
| 678 | /** |
| 679 | * sg_copy_to_buffer - Copy from an SG list to a linear buffer |
| 680 | * @sgl: The SG list |
| 681 | * @nents: Number of SG entries |
| 682 | * @buf: Where to copy to |
| 683 | * @buflen: The number of bytes to copy |
| 684 | * |
| 685 | * Returns the number of copied bytes. |
| 686 | * |
| 687 | **/ |
| 688 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, |
| 689 | void *buf, size_t buflen) |
| 690 | { |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 691 | return sg_copy_buffer(sgl, nents, buf, buflen, 0, true); |
FUJITA Tomonori | b1adaf6 | 2008-03-18 00:15:03 +0900 | [diff] [blame] | 692 | } |
| 693 | EXPORT_SYMBOL(sg_copy_to_buffer); |
Akinobu Mita | df642ce | 2013-07-08 16:01:54 -0700 | [diff] [blame] | 694 | |
| 695 | /** |
| 696 | * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list |
| 697 | * @sgl: The SG list |
| 698 | * @nents: Number of SG entries |
| 699 | * @buf: Where to copy from |
| 700 | * @skip: Number of bytes to skip before copying |
| 701 | * @buflen: The number of bytes to copy |
| 702 | * |
| 703 | * Returns the number of copied bytes. |
| 704 | * |
| 705 | **/ |
| 706 | size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, |
| 707 | void *buf, size_t buflen, off_t skip) |
| 708 | { |
| 709 | return sg_copy_buffer(sgl, nents, buf, buflen, skip, false); |
| 710 | } |
| 711 | EXPORT_SYMBOL(sg_pcopy_from_buffer); |
| 712 | |
| 713 | /** |
| 714 | * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer |
| 715 | * @sgl: The SG list |
| 716 | * @nents: Number of SG entries |
| 717 | * @buf: Where to copy to |
| 718 | * @skip: Number of bytes to skip before copying |
| 719 | * @buflen: The number of bytes to copy |
| 720 | * |
| 721 | * Returns the number of copied bytes. |
| 722 | * |
| 723 | **/ |
| 724 | size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, |
| 725 | void *buf, size_t buflen, off_t skip) |
| 726 | { |
| 727 | return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); |
| 728 | } |
| 729 | EXPORT_SYMBOL(sg_pcopy_to_buffer); |