blob: bafa9933fa768d6f76e4ade1296c873a3c38c139 [file] [log] [blame]
Jens Axboe0db92992007-11-30 09:16:50 +01001/*
2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
3 *
4 * Scatterlist handling helpers.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -05009#include <linux/export.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Jens Axboe0db92992007-11-30 09:16:50 +010011#include <linux/scatterlist.h>
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +090012#include <linux/highmem.h>
Chris Wilsonb94de9b2010-07-28 22:59:02 +010013#include <linux/kmemleak.h>
Jens Axboe0db92992007-11-30 09:16:50 +010014
15/**
16 * sg_next - return the next scatterlist entry in a list
17 * @sg: The current sg entry
18 *
19 * Description:
20 * Usually the next entry will be @sg@ + 1, but if this sg element is part
21 * of a chained scatterlist, it could jump to the start of a new
22 * scatterlist array.
23 *
24 **/
25struct scatterlist *sg_next(struct scatterlist *sg)
26{
27#ifdef CONFIG_DEBUG_SG
28 BUG_ON(sg->sg_magic != SG_MAGIC);
29#endif
30 if (sg_is_last(sg))
31 return NULL;
32
33 sg++;
34 if (unlikely(sg_is_chain(sg)))
35 sg = sg_chain_ptr(sg);
36
37 return sg;
38}
39EXPORT_SYMBOL(sg_next);
40
41/**
Maxim Levitsky2e484612012-09-27 12:45:28 +020042 * sg_nents - return total count of entries in scatterlist
43 * @sg: The scatterlist
44 *
45 * Description:
46 * Allows to know how many entries are in sg, taking into acount
47 * chaining as well
48 *
49 **/
50int sg_nents(struct scatterlist *sg)
51{
Maxim Levitsky232f1b52012-09-28 10:38:15 +020052 int nents;
53 for (nents = 0; sg; sg = sg_next(sg))
Maxim Levitsky2e484612012-09-27 12:45:28 +020054 nents++;
Maxim Levitsky2e484612012-09-27 12:45:28 +020055 return nents;
56}
57EXPORT_SYMBOL(sg_nents);
58
Tom Lendackycfaed102015-06-01 11:15:25 -050059/**
60 * sg_nents_for_len - return total count of entries in scatterlist
61 * needed to satisfy the supplied length
62 * @sg: The scatterlist
63 * @len: The total required length
64 *
65 * Description:
66 * Determines the number of entries in sg that are required to meet
67 * the supplied length, taking into acount chaining as well
68 *
69 * Returns:
70 * the number of sg entries needed, negative error on failure
71 *
72 **/
73int sg_nents_for_len(struct scatterlist *sg, u64 len)
74{
75 int nents;
76 u64 total;
77
78 if (!len)
79 return 0;
80
81 for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
82 nents++;
83 total += sg->length;
84 if (total >= len)
85 return nents;
86 }
87
88 return -EINVAL;
89}
90EXPORT_SYMBOL(sg_nents_for_len);
Maxim Levitsky2e484612012-09-27 12:45:28 +020091
92/**
Jens Axboe0db92992007-11-30 09:16:50 +010093 * sg_last - return the last scatterlist entry in a list
94 * @sgl: First entry in the scatterlist
95 * @nents: Number of entries in the scatterlist
96 *
97 * Description:
98 * Should only be used casually, it (currently) scans the entire list
99 * to get the last entry.
100 *
101 * Note that the @sgl@ pointer passed in need not be the first one,
102 * the important bit is that @nents@ denotes the number of entries that
103 * exist from @sgl@.
104 *
105 **/
106struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
107{
Jens Axboe0db92992007-11-30 09:16:50 +0100108 struct scatterlist *sg, *ret = NULL;
109 unsigned int i;
110
111 for_each_sg(sgl, sg, nents, i)
112 ret = sg;
113
Jens Axboe0db92992007-11-30 09:16:50 +0100114#ifdef CONFIG_DEBUG_SG
115 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
116 BUG_ON(!sg_is_last(ret));
117#endif
118 return ret;
119}
120EXPORT_SYMBOL(sg_last);
121
122/**
123 * sg_init_table - Initialize SG table
124 * @sgl: The SG table
125 * @nents: Number of entries in table
126 *
127 * Notes:
128 * If this is part of a chained sg table, sg_mark_end() should be
129 * used only on the last table part.
130 *
131 **/
132void sg_init_table(struct scatterlist *sgl, unsigned int nents)
133{
134 memset(sgl, 0, sizeof(*sgl) * nents);
135#ifdef CONFIG_DEBUG_SG
136 {
137 unsigned int i;
138 for (i = 0; i < nents; i++)
139 sgl[i].sg_magic = SG_MAGIC;
140 }
141#endif
142 sg_mark_end(&sgl[nents - 1]);
143}
144EXPORT_SYMBOL(sg_init_table);
145
146/**
147 * sg_init_one - Initialize a single entry sg list
148 * @sg: SG entry
149 * @buf: Virtual address for IO
150 * @buflen: IO length
151 *
152 **/
153void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
154{
155 sg_init_table(sg, 1);
156 sg_set_buf(sg, buf, buflen);
157}
158EXPORT_SYMBOL(sg_init_one);
159
160/*
161 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
162 * helpers.
163 */
164static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
165{
Chris Wilsonb94de9b2010-07-28 22:59:02 +0100166 if (nents == SG_MAX_SINGLE_ALLOC) {
167 /*
168 * Kmemleak doesn't track page allocations as they are not
169 * commonly used (in a raw form) for kernel data structures.
170 * As we chain together a list of pages and then a normal
171 * kmalloc (tracked by kmemleak), in order to for that last
172 * allocation not to become decoupled (and thus a
173 * false-positive) we need to inform kmemleak of all the
174 * intermediate allocations.
175 */
176 void *ptr = (void *) __get_free_page(gfp_mask);
177 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
178 return ptr;
179 } else
Jens Axboe0db92992007-11-30 09:16:50 +0100180 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
181}
182
183static void sg_kfree(struct scatterlist *sg, unsigned int nents)
184{
Chris Wilsonb94de9b2010-07-28 22:59:02 +0100185 if (nents == SG_MAX_SINGLE_ALLOC) {
186 kmemleak_free(sg);
Jens Axboe0db92992007-11-30 09:16:50 +0100187 free_page((unsigned long) sg);
Chris Wilsonb94de9b2010-07-28 22:59:02 +0100188 } else
Jens Axboe0db92992007-11-30 09:16:50 +0100189 kfree(sg);
190}
191
192/**
193 * __sg_free_table - Free a previously mapped sg table
194 * @table: The sg table header to use
James Bottomley7cedb1f2008-01-13 14:15:28 -0600195 * @max_ents: The maximum number of entries per single scatterlist
Christoph Hellwigc53c6d62014-04-15 14:38:31 +0200196 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
Jens Axboe0db92992007-11-30 09:16:50 +0100197 * @free_fn: Free function
198 *
199 * Description:
James Bottomley7cedb1f2008-01-13 14:15:28 -0600200 * Free an sg table previously allocated and setup with
201 * __sg_alloc_table(). The @max_ents value must be identical to
202 * that previously used with __sg_alloc_table().
Jens Axboe0db92992007-11-30 09:16:50 +0100203 *
204 **/
James Bottomley7cedb1f2008-01-13 14:15:28 -0600205void __sg_free_table(struct sg_table *table, unsigned int max_ents,
Christoph Hellwigc53c6d62014-04-15 14:38:31 +0200206 bool skip_first_chunk, sg_free_fn *free_fn)
Jens Axboe0db92992007-11-30 09:16:50 +0100207{
208 struct scatterlist *sgl, *next;
209
210 if (unlikely(!table->sgl))
211 return;
212
213 sgl = table->sgl;
214 while (table->orig_nents) {
215 unsigned int alloc_size = table->orig_nents;
216 unsigned int sg_size;
217
218 /*
James Bottomley7cedb1f2008-01-13 14:15:28 -0600219 * If we have more than max_ents segments left,
Jens Axboe0db92992007-11-30 09:16:50 +0100220 * then assign 'next' to the sg table after the current one.
221 * sg_size is then one less than alloc size, since the last
222 * element is the chain pointer.
223 */
James Bottomley7cedb1f2008-01-13 14:15:28 -0600224 if (alloc_size > max_ents) {
225 next = sg_chain_ptr(&sgl[max_ents - 1]);
226 alloc_size = max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100227 sg_size = alloc_size - 1;
228 } else {
229 sg_size = alloc_size;
230 next = NULL;
231 }
232
233 table->orig_nents -= sg_size;
Tony Battersbyc21e59d2014-10-23 15:10:21 -0400234 if (skip_first_chunk)
Christoph Hellwigc53c6d62014-04-15 14:38:31 +0200235 skip_first_chunk = false;
Tony Battersbyc21e59d2014-10-23 15:10:21 -0400236 else
237 free_fn(sgl, alloc_size);
Jens Axboe0db92992007-11-30 09:16:50 +0100238 sgl = next;
239 }
240
241 table->sgl = NULL;
242}
243EXPORT_SYMBOL(__sg_free_table);
244
245/**
246 * sg_free_table - Free a previously allocated sg table
247 * @table: The mapped sg table header
248 *
249 **/
250void sg_free_table(struct sg_table *table)
251{
Christoph Hellwigc53c6d62014-04-15 14:38:31 +0200252 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
Jens Axboe0db92992007-11-30 09:16:50 +0100253}
254EXPORT_SYMBOL(sg_free_table);
255
256/**
257 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
258 * @table: The sg table header to use
259 * @nents: Number of entries in sg list
James Bottomley7cedb1f2008-01-13 14:15:28 -0600260 * @max_ents: The maximum number of entries the allocator returns per call
Jens Axboe0db92992007-11-30 09:16:50 +0100261 * @gfp_mask: GFP allocation mask
262 * @alloc_fn: Allocator to use
263 *
James Bottomley7cedb1f2008-01-13 14:15:28 -0600264 * Description:
265 * This function returns a @table @nents long. The allocator is
266 * defined to return scatterlist chunks of maximum size @max_ents.
267 * Thus if @nents is bigger than @max_ents, the scatterlists will be
268 * chained in units of @max_ents.
269 *
Jens Axboe0db92992007-11-30 09:16:50 +0100270 * Notes:
271 * If this function returns non-0 (eg failure), the caller must call
272 * __sg_free_table() to cleanup any leftover allocations.
273 *
274 **/
James Bottomley7cedb1f2008-01-13 14:15:28 -0600275int __sg_alloc_table(struct sg_table *table, unsigned int nents,
Christoph Hellwigc53c6d62014-04-15 14:38:31 +0200276 unsigned int max_ents, struct scatterlist *first_chunk,
277 gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
Jens Axboe0db92992007-11-30 09:16:50 +0100278{
279 struct scatterlist *sg, *prv;
280 unsigned int left;
281
Dan Carpenter27daabd2013-07-08 16:01:58 -0700282 memset(table, 0, sizeof(*table));
283
284 if (nents == 0)
285 return -EINVAL;
Laura Abbott308c09f2014-08-08 14:23:25 -0700286#ifndef CONFIG_ARCH_HAS_SG_CHAIN
Nick Bowler6fd59a82012-12-17 16:05:20 -0800287 if (WARN_ON_ONCE(nents > max_ents))
288 return -EINVAL;
Jens Axboe0db92992007-11-30 09:16:50 +0100289#endif
290
Jens Axboe0db92992007-11-30 09:16:50 +0100291 left = nents;
292 prv = NULL;
293 do {
294 unsigned int sg_size, alloc_size = left;
295
James Bottomley7cedb1f2008-01-13 14:15:28 -0600296 if (alloc_size > max_ents) {
297 alloc_size = max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100298 sg_size = alloc_size - 1;
299 } else
300 sg_size = alloc_size;
301
302 left -= sg_size;
303
Christoph Hellwigc53c6d62014-04-15 14:38:31 +0200304 if (first_chunk) {
305 sg = first_chunk;
306 first_chunk = NULL;
307 } else {
308 sg = alloc_fn(alloc_size, gfp_mask);
309 }
Jeffrey Carlyleedce6822010-08-30 19:55:09 +0200310 if (unlikely(!sg)) {
311 /*
312 * Adjust entry count to reflect that the last
313 * entry of the previous table won't be used for
314 * linkage. Without this, sg_kfree() may get
315 * confused.
316 */
317 if (prv)
318 table->nents = ++table->orig_nents;
319
320 return -ENOMEM;
321 }
Jens Axboe0db92992007-11-30 09:16:50 +0100322
323 sg_init_table(sg, alloc_size);
324 table->nents = table->orig_nents += sg_size;
325
326 /*
327 * If this is the first mapping, assign the sg table header.
328 * If this is not the first mapping, chain previous part.
329 */
330 if (prv)
James Bottomley7cedb1f2008-01-13 14:15:28 -0600331 sg_chain(prv, max_ents, sg);
Jens Axboe0db92992007-11-30 09:16:50 +0100332 else
333 table->sgl = sg;
334
335 /*
336 * If no more entries after this one, mark the end
337 */
338 if (!left)
339 sg_mark_end(&sg[sg_size - 1]);
340
Jens Axboe0db92992007-11-30 09:16:50 +0100341 prv = sg;
342 } while (left);
343
344 return 0;
345}
346EXPORT_SYMBOL(__sg_alloc_table);
347
348/**
349 * sg_alloc_table - Allocate and initialize an sg table
350 * @table: The sg table header to use
351 * @nents: Number of entries in sg list
352 * @gfp_mask: GFP allocation mask
353 *
354 * Description:
355 * Allocate and initialize an sg table. If @nents@ is larger than
356 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
357 *
358 **/
359int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
360{
361 int ret;
362
James Bottomley7cedb1f2008-01-13 14:15:28 -0600363 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
Christoph Hellwigc53c6d62014-04-15 14:38:31 +0200364 NULL, gfp_mask, sg_kmalloc);
Jens Axboe0db92992007-11-30 09:16:50 +0100365 if (unlikely(ret))
Christoph Hellwigc53c6d62014-04-15 14:38:31 +0200366 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
Jens Axboe0db92992007-11-30 09:16:50 +0100367
368 return ret;
369}
370EXPORT_SYMBOL(sg_alloc_table);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900371
372/**
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200373 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
374 * an array of pages
375 * @sgt: The sg table header to use
376 * @pages: Pointer to an array of page pointers
377 * @n_pages: Number of pages in the pages array
378 * @offset: Offset from start of the first page to the start of a buffer
379 * @size: Number of valid bytes in the buffer (after offset)
380 * @gfp_mask: GFP allocation mask
381 *
382 * Description:
383 * Allocate and initialize an sg table from a list of pages. Contiguous
384 * ranges of the pages are squashed into a single scatterlist node. A user
385 * may provide an offset at a start and a size of valid data in a buffer
386 * specified by the page array. The returned sg table is released by
387 * sg_free_table.
388 *
389 * Returns:
390 * 0 on success, negative error on failure
391 */
392int sg_alloc_table_from_pages(struct sg_table *sgt,
393 struct page **pages, unsigned int n_pages,
394 unsigned long offset, unsigned long size,
395 gfp_t gfp_mask)
396{
397 unsigned int chunks;
398 unsigned int i;
399 unsigned int cur_page;
400 int ret;
401 struct scatterlist *s;
402
403 /* compute number of contiguous chunks */
404 chunks = 1;
405 for (i = 1; i < n_pages; ++i)
406 if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
407 ++chunks;
408
409 ret = sg_alloc_table(sgt, chunks, gfp_mask);
410 if (unlikely(ret))
411 return ret;
412
413 /* merging chunks and putting them into the scatterlist */
414 cur_page = 0;
415 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
416 unsigned long chunk_size;
417 unsigned int j;
418
419 /* look for the end of the current chunk */
420 for (j = cur_page + 1; j < n_pages; ++j)
421 if (page_to_pfn(pages[j]) !=
422 page_to_pfn(pages[j - 1]) + 1)
423 break;
424
425 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
426 sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
427 size -= chunk_size;
428 offset = 0;
429 cur_page = j;
430 }
431
432 return 0;
433}
434EXPORT_SYMBOL(sg_alloc_table_from_pages);
435
Imre Deaka321e912013-02-27 17:02:56 -0800436void __sg_page_iter_start(struct sg_page_iter *piter,
437 struct scatterlist *sglist, unsigned int nents,
438 unsigned long pgoffset)
439{
440 piter->__pg_advance = 0;
441 piter->__nents = nents;
442
Imre Deaka321e912013-02-27 17:02:56 -0800443 piter->sg = sglist;
444 piter->sg_pgoffset = pgoffset;
445}
446EXPORT_SYMBOL(__sg_page_iter_start);
447
448static int sg_page_count(struct scatterlist *sg)
449{
450 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
451}
452
453bool __sg_page_iter_next(struct sg_page_iter *piter)
454{
455 if (!piter->__nents || !piter->sg)
456 return false;
457
458 piter->sg_pgoffset += piter->__pg_advance;
459 piter->__pg_advance = 1;
460
461 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
462 piter->sg_pgoffset -= sg_page_count(piter->sg);
463 piter->sg = sg_next(piter->sg);
464 if (!--piter->__nents || !piter->sg)
465 return false;
466 }
Imre Deaka321e912013-02-27 17:02:56 -0800467
468 return true;
469}
470EXPORT_SYMBOL(__sg_page_iter_next);
471
Tomasz Stanislawskiefc42bc2012-06-18 09:25:01 +0200472/**
Tejun Heo137d3ed2008-07-19 23:03:35 +0900473 * sg_miter_start - start mapping iteration over a sg list
474 * @miter: sg mapping iter to be started
475 * @sgl: sg list to iterate over
476 * @nents: number of sg entries
477 *
478 * Description:
479 * Starts mapping iterator @miter.
480 *
481 * Context:
482 * Don't care.
483 */
484void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
485 unsigned int nents, unsigned int flags)
486{
487 memset(miter, 0, sizeof(struct sg_mapping_iter));
488
Imre Deak4225fc82013-02-27 17:02:57 -0800489 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
Sebastian Andrzej Siewior6de7e3562009-06-18 10:19:12 +0200490 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
Tejun Heo137d3ed2008-07-19 23:03:35 +0900491 miter->__flags = flags;
492}
493EXPORT_SYMBOL(sg_miter_start);
494
Akinobu Mita11052002013-07-08 16:01:52 -0700495static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
496{
497 if (!miter->__remaining) {
498 struct scatterlist *sg;
499 unsigned long pgoffset;
500
501 if (!__sg_page_iter_next(&miter->piter))
502 return false;
503
504 sg = miter->piter.sg;
505 pgoffset = miter->piter.sg_pgoffset;
506
507 miter->__offset = pgoffset ? 0 : sg->offset;
508 miter->__remaining = sg->offset + sg->length -
509 (pgoffset << PAGE_SHIFT) - miter->__offset;
510 miter->__remaining = min_t(unsigned long, miter->__remaining,
511 PAGE_SIZE - miter->__offset);
512 }
513
514 return true;
515}
516
Tejun Heo137d3ed2008-07-19 23:03:35 +0900517/**
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700518 * sg_miter_skip - reposition mapping iterator
519 * @miter: sg mapping iter to be skipped
520 * @offset: number of bytes to plus the current location
521 *
522 * Description:
523 * Sets the offset of @miter to its current location plus @offset bytes.
524 * If mapping iterator @miter has been proceeded by sg_miter_next(), this
525 * stops @miter.
526 *
527 * Context:
528 * Don't care if @miter is stopped, or not proceeded yet.
529 * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
530 *
531 * Returns:
532 * true if @miter contains the valid mapping. false if end of sg
533 * list is reached.
534 */
Ming Lei0d6077f2013-11-26 12:43:37 +0800535bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700536{
537 sg_miter_stop(miter);
538
539 while (offset) {
540 off_t consumed;
541
542 if (!sg_miter_get_next_page(miter))
543 return false;
544
545 consumed = min_t(off_t, offset, miter->__remaining);
546 miter->__offset += consumed;
547 miter->__remaining -= consumed;
548 offset -= consumed;
549 }
550
551 return true;
552}
Ming Lei0d6077f2013-11-26 12:43:37 +0800553EXPORT_SYMBOL(sg_miter_skip);
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700554
555/**
Tejun Heo137d3ed2008-07-19 23:03:35 +0900556 * sg_miter_next - proceed mapping iterator to the next mapping
557 * @miter: sg mapping iter to proceed
558 *
559 * Description:
Tejun Heo8290e2d2012-10-04 17:13:28 -0700560 * Proceeds @miter to the next mapping. @miter should have been started
561 * using sg_miter_start(). On successful return, @miter->page,
562 * @miter->addr and @miter->length point to the current mapping.
Tejun Heo137d3ed2008-07-19 23:03:35 +0900563 *
564 * Context:
Tejun Heo8290e2d2012-10-04 17:13:28 -0700565 * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
566 * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
Tejun Heo137d3ed2008-07-19 23:03:35 +0900567 *
568 * Returns:
569 * true if @miter contains the next mapping. false if end of sg
570 * list is reached.
571 */
572bool sg_miter_next(struct sg_mapping_iter *miter)
573{
Tejun Heo137d3ed2008-07-19 23:03:35 +0900574 sg_miter_stop(miter);
575
Imre Deak4225fc82013-02-27 17:02:57 -0800576 /*
577 * Get to the next page if necessary.
578 * __remaining, __offset is adjusted by sg_miter_stop
579 */
Akinobu Mita11052002013-07-08 16:01:52 -0700580 if (!sg_miter_get_next_page(miter))
581 return false;
Imre Deak4225fc82013-02-27 17:02:57 -0800582
Imre Deak2db76d72013-03-26 15:14:18 +0200583 miter->page = sg_page_iter_page(&miter->piter);
Imre Deak4225fc82013-02-27 17:02:57 -0800584 miter->consumed = miter->length = miter->__remaining;
Tejun Heo137d3ed2008-07-19 23:03:35 +0900585
586 if (miter->__flags & SG_MITER_ATOMIC)
Imre Deak4225fc82013-02-27 17:02:57 -0800587 miter->addr = kmap_atomic(miter->page) + miter->__offset;
Tejun Heo137d3ed2008-07-19 23:03:35 +0900588 else
Imre Deak4225fc82013-02-27 17:02:57 -0800589 miter->addr = kmap(miter->page) + miter->__offset;
Tejun Heo137d3ed2008-07-19 23:03:35 +0900590
591 return true;
592}
593EXPORT_SYMBOL(sg_miter_next);
594
595/**
596 * sg_miter_stop - stop mapping iteration
597 * @miter: sg mapping iter to be stopped
598 *
599 * Description:
600 * Stops mapping iterator @miter. @miter should have been started
601 * started using sg_miter_start(). A stopped iteration can be
602 * resumed by calling sg_miter_next() on it. This is useful when
603 * resources (kmap) need to be released during iteration.
604 *
605 * Context:
Tejun Heo8290e2d2012-10-04 17:13:28 -0700606 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
607 * otherwise.
Tejun Heo137d3ed2008-07-19 23:03:35 +0900608 */
609void sg_miter_stop(struct sg_mapping_iter *miter)
610{
611 WARN_ON(miter->consumed > miter->length);
612
613 /* drop resources from the last iteration */
614 if (miter->addr) {
615 miter->__offset += miter->consumed;
Imre Deak4225fc82013-02-27 17:02:57 -0800616 miter->__remaining -= miter->consumed;
Tejun Heo137d3ed2008-07-19 23:03:35 +0900617
Ming Lei3d77b502013-10-31 16:34:17 -0700618 if ((miter->__flags & SG_MITER_TO_SG) &&
619 !PageSlab(miter->page))
Sebastian Andrzej Siewior6de7e3562009-06-18 10:19:12 +0200620 flush_kernel_dcache_page(miter->page);
621
Tejun Heo137d3ed2008-07-19 23:03:35 +0900622 if (miter->__flags & SG_MITER_ATOMIC) {
Tejun Heo8290e2d2012-10-04 17:13:28 -0700623 WARN_ON_ONCE(preemptible());
Cong Wangc3eede82011-11-25 23:14:39 +0800624 kunmap_atomic(miter->addr);
Tejun Heo137d3ed2008-07-19 23:03:35 +0900625 } else
Arjan van de Venf652c522008-11-19 15:36:19 -0800626 kunmap(miter->page);
Tejun Heo137d3ed2008-07-19 23:03:35 +0900627
628 miter->page = NULL;
629 miter->addr = NULL;
630 miter->length = 0;
631 miter->consumed = 0;
632 }
633}
634EXPORT_SYMBOL(sg_miter_stop);
635
636/**
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900637 * sg_copy_buffer - Copy data between a linear buffer and an SG list
638 * @sgl: The SG list
639 * @nents: Number of SG entries
640 * @buf: Where to copy from
641 * @buflen: The number of bytes to copy
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700642 * @skip: Number of bytes to skip before copying
643 * @to_buffer: transfer direction (true == from an sg list to a
644 * buffer, false == from a buffer to an sg list
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900645 *
646 * Returns the number of copied bytes.
647 *
648 **/
Dave Gordon386ecb12015-06-30 14:58:57 -0700649size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
650 size_t buflen, off_t skip, bool to_buffer)
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900651{
Tejun Heo137d3ed2008-07-19 23:03:35 +0900652 unsigned int offset = 0;
653 struct sg_mapping_iter miter;
FUJITA Tomonori50bed2e2008-09-11 18:35:39 +0200654 unsigned long flags;
Sebastian Andrzej Siewior6de7e3562009-06-18 10:19:12 +0200655 unsigned int sg_flags = SG_MITER_ATOMIC;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900656
Sebastian Andrzej Siewior6de7e3562009-06-18 10:19:12 +0200657 if (to_buffer)
658 sg_flags |= SG_MITER_FROM_SG;
659 else
660 sg_flags |= SG_MITER_TO_SG;
661
662 sg_miter_start(&miter, sgl, nents, sg_flags);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900663
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700664 if (!sg_miter_skip(&miter, skip))
665 return false;
666
FUJITA Tomonori50bed2e2008-09-11 18:35:39 +0200667 local_irq_save(flags);
668
Tejun Heo137d3ed2008-07-19 23:03:35 +0900669 while (sg_miter_next(&miter) && offset < buflen) {
670 unsigned int len;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900671
Tejun Heo137d3ed2008-07-19 23:03:35 +0900672 len = min(miter.length, buflen - offset);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900673
Tejun Heo137d3ed2008-07-19 23:03:35 +0900674 if (to_buffer)
675 memcpy(buf + offset, miter.addr, len);
Sebastian Andrzej Siewior6de7e3562009-06-18 10:19:12 +0200676 else
Tejun Heo137d3ed2008-07-19 23:03:35 +0900677 memcpy(miter.addr, buf + offset, len);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900678
Tejun Heo137d3ed2008-07-19 23:03:35 +0900679 offset += len;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900680 }
681
Tejun Heo137d3ed2008-07-19 23:03:35 +0900682 sg_miter_stop(&miter);
683
FUJITA Tomonori50bed2e2008-09-11 18:35:39 +0200684 local_irq_restore(flags);
Tejun Heo137d3ed2008-07-19 23:03:35 +0900685 return offset;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900686}
Dave Gordon386ecb12015-06-30 14:58:57 -0700687EXPORT_SYMBOL(sg_copy_buffer);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900688
689/**
690 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
691 * @sgl: The SG list
692 * @nents: Number of SG entries
693 * @buf: Where to copy from
694 * @buflen: The number of bytes to copy
695 *
696 * Returns the number of copied bytes.
697 *
698 **/
699size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
Dave Gordon2a1bf8f2015-06-30 14:58:54 -0700700 const void *buf, size_t buflen)
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900701{
Dave Gordon2a1bf8f2015-06-30 14:58:54 -0700702 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900703}
704EXPORT_SYMBOL(sg_copy_from_buffer);
705
706/**
707 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
708 * @sgl: The SG list
709 * @nents: Number of SG entries
710 * @buf: Where to copy to
711 * @buflen: The number of bytes to copy
712 *
713 * Returns the number of copied bytes.
714 *
715 **/
716size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
717 void *buf, size_t buflen)
718{
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700719 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900720}
721EXPORT_SYMBOL(sg_copy_to_buffer);
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700722
723/**
724 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
725 * @sgl: The SG list
726 * @nents: Number of SG entries
727 * @buf: Where to copy from
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700728 * @buflen: The number of bytes to copy
Dave Gordon4dc7daf2015-06-30 14:58:52 -0700729 * @skip: Number of bytes to skip before copying
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700730 *
731 * Returns the number of copied bytes.
732 *
733 **/
734size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
Dave Gordon2a1bf8f2015-06-30 14:58:54 -0700735 const void *buf, size_t buflen, off_t skip)
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700736{
Dave Gordon2a1bf8f2015-06-30 14:58:54 -0700737 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700738}
739EXPORT_SYMBOL(sg_pcopy_from_buffer);
740
741/**
742 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
743 * @sgl: The SG list
744 * @nents: Number of SG entries
745 * @buf: Where to copy to
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700746 * @buflen: The number of bytes to copy
Dave Gordon4dc7daf2015-06-30 14:58:52 -0700747 * @skip: Number of bytes to skip before copying
Akinobu Mitadf642ce2013-07-08 16:01:54 -0700748 *
749 * Returns the number of copied bytes.
750 *
751 **/
752size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
753 void *buf, size_t buflen, off_t skip)
754{
755 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
756}
757EXPORT_SYMBOL(sg_pcopy_to_buffer);