blob: 547a01fe0d5cf5b17ed887ff9bbe533a7ad29ad7 [file] [log] [blame]
Jens Axboe0db92992007-11-30 09:16:50 +01001/*
2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
3 *
4 * Scatterlist handling helpers.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -05009#include <linux/export.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Jens Axboe0db92992007-11-30 09:16:50 +010011#include <linux/scatterlist.h>
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +090012#include <linux/highmem.h>
Chris Wilsonb94de9b2010-07-28 22:59:02 +010013#include <linux/kmemleak.h>
Jens Axboe0db92992007-11-30 09:16:50 +010014
15/**
16 * sg_next - return the next scatterlist entry in a list
17 * @sg: The current sg entry
18 *
19 * Description:
20 * Usually the next entry will be @sg@ + 1, but if this sg element is part
21 * of a chained scatterlist, it could jump to the start of a new
22 * scatterlist array.
23 *
24 **/
25struct scatterlist *sg_next(struct scatterlist *sg)
26{
27#ifdef CONFIG_DEBUG_SG
28 BUG_ON(sg->sg_magic != SG_MAGIC);
29#endif
30 if (sg_is_last(sg))
31 return NULL;
32
33 sg++;
34 if (unlikely(sg_is_chain(sg)))
35 sg = sg_chain_ptr(sg);
36
37 return sg;
38}
39EXPORT_SYMBOL(sg_next);
40
41/**
42 * sg_last - return the last scatterlist entry in a list
43 * @sgl: First entry in the scatterlist
44 * @nents: Number of entries in the scatterlist
45 *
46 * Description:
47 * Should only be used casually, it (currently) scans the entire list
48 * to get the last entry.
49 *
50 * Note that the @sgl@ pointer passed in need not be the first one,
51 * the important bit is that @nents@ denotes the number of entries that
52 * exist from @sgl@.
53 *
54 **/
55struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
56{
57#ifndef ARCH_HAS_SG_CHAIN
58 struct scatterlist *ret = &sgl[nents - 1];
59#else
60 struct scatterlist *sg, *ret = NULL;
61 unsigned int i;
62
63 for_each_sg(sgl, sg, nents, i)
64 ret = sg;
65
66#endif
67#ifdef CONFIG_DEBUG_SG
68 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
69 BUG_ON(!sg_is_last(ret));
70#endif
71 return ret;
72}
73EXPORT_SYMBOL(sg_last);
74
75/**
76 * sg_init_table - Initialize SG table
77 * @sgl: The SG table
78 * @nents: Number of entries in table
79 *
80 * Notes:
81 * If this is part of a chained sg table, sg_mark_end() should be
82 * used only on the last table part.
83 *
84 **/
85void sg_init_table(struct scatterlist *sgl, unsigned int nents)
86{
87 memset(sgl, 0, sizeof(*sgl) * nents);
88#ifdef CONFIG_DEBUG_SG
89 {
90 unsigned int i;
91 for (i = 0; i < nents; i++)
92 sgl[i].sg_magic = SG_MAGIC;
93 }
94#endif
95 sg_mark_end(&sgl[nents - 1]);
96}
97EXPORT_SYMBOL(sg_init_table);
98
99/**
100 * sg_init_one - Initialize a single entry sg list
101 * @sg: SG entry
102 * @buf: Virtual address for IO
103 * @buflen: IO length
104 *
105 **/
106void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
107{
108 sg_init_table(sg, 1);
109 sg_set_buf(sg, buf, buflen);
110}
111EXPORT_SYMBOL(sg_init_one);
112
113/*
114 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
115 * helpers.
116 */
117static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
118{
Chris Wilsonb94de9b2010-07-28 22:59:02 +0100119 if (nents == SG_MAX_SINGLE_ALLOC) {
120 /*
121 * Kmemleak doesn't track page allocations as they are not
122 * commonly used (in a raw form) for kernel data structures.
123 * As we chain together a list of pages and then a normal
124 * kmalloc (tracked by kmemleak), in order to for that last
125 * allocation not to become decoupled (and thus a
126 * false-positive) we need to inform kmemleak of all the
127 * intermediate allocations.
128 */
129 void *ptr = (void *) __get_free_page(gfp_mask);
130 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
131 return ptr;
132 } else
Jens Axboe0db92992007-11-30 09:16:50 +0100133 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
134}
135
136static void sg_kfree(struct scatterlist *sg, unsigned int nents)
137{
Chris Wilsonb94de9b2010-07-28 22:59:02 +0100138 if (nents == SG_MAX_SINGLE_ALLOC) {
139 kmemleak_free(sg);
Jens Axboe0db92992007-11-30 09:16:50 +0100140 free_page((unsigned long) sg);
Chris Wilsonb94de9b2010-07-28 22:59:02 +0100141 } else
Jens Axboe0db92992007-11-30 09:16:50 +0100142 kfree(sg);
143}
144
145/**
146 * __sg_free_table - Free a previously mapped sg table
147 * @table: The sg table header to use
James Bottomley7cedb1f2008-01-13 14:15:28 -0600148 * @max_ents: The maximum number of entries per single scatterlist
Jens Axboe0db92992007-11-30 09:16:50 +0100149 * @free_fn: Free function
150 *
151 * Description:
James Bottomley7cedb1f2008-01-13 14:15:28 -0600152 * Free an sg table previously allocated and setup with
153 * __sg_alloc_table(). The @max_ents value must be identical to
154 * that previously used with __sg_alloc_table().
Jens Axboe0db92992007-11-30 09:16:50 +0100155 *
156 **/
James Bottomley7cedb1f2008-01-13 14:15:28 -0600157void __sg_free_table(struct sg_table *table, unsigned int max_ents,
158 sg_free_fn *free_fn)
Jens Axboe0db92992007-11-30 09:16:50 +0100159{
160 struct scatterlist *sgl, *next;
161
162 if (unlikely(!table->sgl))
163 return;
164
165 sgl = table->sgl;
166 while (table->orig_nents) {
167 unsigned int alloc_size = table->orig_nents;
168 unsigned int sg_size;
169
170 /*
James Bottomley7cedb1f2008-01-13 14:15:28 -0600171 * If we have more than max_ents segments left,
Jens Axboe0db92992007-11-30 09:16:50 +0100172 * then assign 'next' to the sg table after the current one.
173 * sg_size is then one less than alloc size, since the last
174 * element is the chain pointer.
175 */
James Bottomley7cedb1f2008-01-13 14:15:28 -0600176 if (alloc_size > max_ents) {
177 next = sg_chain_ptr(&sgl[max_ents - 1]);
178 alloc_size = max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100179 sg_size = alloc_size - 1;
180 } else {
181 sg_size = alloc_size;
182 next = NULL;
183 }
184
185 table->orig_nents -= sg_size;
186 free_fn(sgl, alloc_size);
187 sgl = next;
188 }
189
190 table->sgl = NULL;
191}
192EXPORT_SYMBOL(__sg_free_table);
193
194/**
195 * sg_free_table - Free a previously allocated sg table
196 * @table: The mapped sg table header
197 *
198 **/
199void sg_free_table(struct sg_table *table)
200{
James Bottomley7cedb1f2008-01-13 14:15:28 -0600201 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
Jens Axboe0db92992007-11-30 09:16:50 +0100202}
203EXPORT_SYMBOL(sg_free_table);
204
205/**
206 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
207 * @table: The sg table header to use
208 * @nents: Number of entries in sg list
James Bottomley7cedb1f2008-01-13 14:15:28 -0600209 * @max_ents: The maximum number of entries the allocator returns per call
Jens Axboe0db92992007-11-30 09:16:50 +0100210 * @gfp_mask: GFP allocation mask
211 * @alloc_fn: Allocator to use
212 *
James Bottomley7cedb1f2008-01-13 14:15:28 -0600213 * Description:
214 * This function returns a @table @nents long. The allocator is
215 * defined to return scatterlist chunks of maximum size @max_ents.
216 * Thus if @nents is bigger than @max_ents, the scatterlists will be
217 * chained in units of @max_ents.
218 *
Jens Axboe0db92992007-11-30 09:16:50 +0100219 * Notes:
220 * If this function returns non-0 (eg failure), the caller must call
221 * __sg_free_table() to cleanup any leftover allocations.
222 *
223 **/
James Bottomley7cedb1f2008-01-13 14:15:28 -0600224int __sg_alloc_table(struct sg_table *table, unsigned int nents,
225 unsigned int max_ents, gfp_t gfp_mask,
Jens Axboe0db92992007-11-30 09:16:50 +0100226 sg_alloc_fn *alloc_fn)
227{
228 struct scatterlist *sg, *prv;
229 unsigned int left;
230
Dan Carpenterf9025f22013-07-08 16:01:58 -0700231 memset(table, 0, sizeof(*table));
232
233 if (nents == 0)
234 return -EINVAL;
Jens Axboe0db92992007-11-30 09:16:50 +0100235#ifndef ARCH_HAS_SG_CHAIN
James Bottomley7cedb1f2008-01-13 14:15:28 -0600236 BUG_ON(nents > max_ents);
Jens Axboe0db92992007-11-30 09:16:50 +0100237#endif
238
Jens Axboe0db92992007-11-30 09:16:50 +0100239 left = nents;
240 prv = NULL;
241 do {
242 unsigned int sg_size, alloc_size = left;
243
James Bottomley7cedb1f2008-01-13 14:15:28 -0600244 if (alloc_size > max_ents) {
245 alloc_size = max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100246 sg_size = alloc_size - 1;
247 } else
248 sg_size = alloc_size;
249
250 left -= sg_size;
251
252 sg = alloc_fn(alloc_size, gfp_mask);
Jeffrey Carlyleedce6822010-08-30 19:55:09 +0200253 if (unlikely(!sg)) {
254 /*
255 * Adjust entry count to reflect that the last
256 * entry of the previous table won't be used for
257 * linkage. Without this, sg_kfree() may get
258 * confused.
259 */
260 if (prv)
261 table->nents = ++table->orig_nents;
262
263 return -ENOMEM;
264 }
Jens Axboe0db92992007-11-30 09:16:50 +0100265
266 sg_init_table(sg, alloc_size);
267 table->nents = table->orig_nents += sg_size;
268
269 /*
270 * If this is the first mapping, assign the sg table header.
271 * If this is not the first mapping, chain previous part.
272 */
273 if (prv)
James Bottomley7cedb1f2008-01-13 14:15:28 -0600274 sg_chain(prv, max_ents, sg);
Jens Axboe0db92992007-11-30 09:16:50 +0100275 else
276 table->sgl = sg;
277
278 /*
279 * If no more entries after this one, mark the end
280 */
281 if (!left)
282 sg_mark_end(&sg[sg_size - 1]);
283
284 /*
285 * only really needed for mempool backed sg allocations (like
286 * SCSI), a possible improvement here would be to pass the
287 * table pointer into the allocator and let that clear these
288 * flags
289 */
290 gfp_mask &= ~__GFP_WAIT;
291 gfp_mask |= __GFP_HIGH;
292 prv = sg;
293 } while (left);
294
295 return 0;
296}
297EXPORT_SYMBOL(__sg_alloc_table);
298
299/**
300 * sg_alloc_table - Allocate and initialize an sg table
301 * @table: The sg table header to use
302 * @nents: Number of entries in sg list
303 * @gfp_mask: GFP allocation mask
304 *
305 * Description:
306 * Allocate and initialize an sg table. If @nents@ is larger than
307 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
308 *
309 **/
310int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
311{
312 int ret;
313
James Bottomley7cedb1f2008-01-13 14:15:28 -0600314 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
315 gfp_mask, sg_kmalloc);
Jens Axboe0db92992007-11-30 09:16:50 +0100316 if (unlikely(ret))
James Bottomley7cedb1f2008-01-13 14:15:28 -0600317 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
Jens Axboe0db92992007-11-30 09:16:50 +0100318
319 return ret;
320}
321EXPORT_SYMBOL(sg_alloc_table);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900322
323/**
Tejun Heo137d3ed2008-07-19 23:03:35 +0900324 * sg_miter_start - start mapping iteration over a sg list
325 * @miter: sg mapping iter to be started
326 * @sgl: sg list to iterate over
327 * @nents: number of sg entries
328 *
329 * Description:
330 * Starts mapping iterator @miter.
331 *
332 * Context:
333 * Don't care.
334 */
335void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
336 unsigned int nents, unsigned int flags)
337{
338 memset(miter, 0, sizeof(struct sg_mapping_iter));
339
340 miter->__sg = sgl;
341 miter->__nents = nents;
342 miter->__offset = 0;
Sebastian Andrzej Siewior6de7e352009-06-18 10:19:12 +0200343 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
Tejun Heo137d3ed2008-07-19 23:03:35 +0900344 miter->__flags = flags;
345}
346EXPORT_SYMBOL(sg_miter_start);
347
348/**
349 * sg_miter_next - proceed mapping iterator to the next mapping
350 * @miter: sg mapping iter to proceed
351 *
352 * Description:
353 * Proceeds @miter@ to the next mapping. @miter@ should have been
354 * started using sg_miter_start(). On successful return,
355 * @miter@->page, @miter@->addr and @miter@->length point to the
356 * current mapping.
357 *
358 * Context:
359 * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
360 * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
361 *
362 * Returns:
363 * true if @miter contains the next mapping. false if end of sg
364 * list is reached.
365 */
366bool sg_miter_next(struct sg_mapping_iter *miter)
367{
368 unsigned int off, len;
369
370 /* check for end and drop resources from the last iteration */
371 if (!miter->__nents)
372 return false;
373
374 sg_miter_stop(miter);
375
376 /* get to the next sg if necessary. __offset is adjusted by stop */
Tejun Heo23c560a2009-04-15 22:10:23 +0900377 while (miter->__offset == miter->__sg->length) {
378 if (--miter->__nents) {
379 miter->__sg = sg_next(miter->__sg);
380 miter->__offset = 0;
381 } else
382 return false;
Tejun Heo137d3ed2008-07-19 23:03:35 +0900383 }
384
385 /* map the next page */
386 off = miter->__sg->offset + miter->__offset;
387 len = miter->__sg->length - miter->__offset;
388
389 miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
390 off &= ~PAGE_MASK;
391 miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
392 miter->consumed = miter->length;
393
394 if (miter->__flags & SG_MITER_ATOMIC)
Cong Wangc3eede82011-11-25 23:14:39 +0800395 miter->addr = kmap_atomic(miter->page) + off;
Tejun Heo137d3ed2008-07-19 23:03:35 +0900396 else
397 miter->addr = kmap(miter->page) + off;
398
399 return true;
400}
401EXPORT_SYMBOL(sg_miter_next);
402
403/**
404 * sg_miter_stop - stop mapping iteration
405 * @miter: sg mapping iter to be stopped
406 *
407 * Description:
408 * Stops mapping iterator @miter. @miter should have been started
409 * started using sg_miter_start(). A stopped iteration can be
410 * resumed by calling sg_miter_next() on it. This is useful when
411 * resources (kmap) need to be released during iteration.
412 *
413 * Context:
414 * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
415 */
416void sg_miter_stop(struct sg_mapping_iter *miter)
417{
418 WARN_ON(miter->consumed > miter->length);
419
420 /* drop resources from the last iteration */
421 if (miter->addr) {
422 miter->__offset += miter->consumed;
423
Sebastian Andrzej Siewior6de7e352009-06-18 10:19:12 +0200424 if (miter->__flags & SG_MITER_TO_SG)
425 flush_kernel_dcache_page(miter->page);
426
Tejun Heo137d3ed2008-07-19 23:03:35 +0900427 if (miter->__flags & SG_MITER_ATOMIC) {
428 WARN_ON(!irqs_disabled());
Cong Wangc3eede82011-11-25 23:14:39 +0800429 kunmap_atomic(miter->addr);
Tejun Heo137d3ed2008-07-19 23:03:35 +0900430 } else
Arjan van de Venf652c522008-11-19 15:36:19 -0800431 kunmap(miter->page);
Tejun Heo137d3ed2008-07-19 23:03:35 +0900432
433 miter->page = NULL;
434 miter->addr = NULL;
435 miter->length = 0;
436 miter->consumed = 0;
437 }
438}
439EXPORT_SYMBOL(sg_miter_stop);
440
441/**
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900442 * sg_copy_buffer - Copy data between a linear buffer and an SG list
443 * @sgl: The SG list
444 * @nents: Number of SG entries
445 * @buf: Where to copy from
446 * @buflen: The number of bytes to copy
447 * @to_buffer: transfer direction (non zero == from an sg list to a
448 * buffer, 0 == from a buffer to an sg list
449 *
450 * Returns the number of copied bytes.
451 *
452 **/
453static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
454 void *buf, size_t buflen, int to_buffer)
455{
Tejun Heo137d3ed2008-07-19 23:03:35 +0900456 unsigned int offset = 0;
457 struct sg_mapping_iter miter;
FUJITA Tomonori50bed2e2008-09-11 18:35:39 +0200458 unsigned long flags;
Sebastian Andrzej Siewior6de7e352009-06-18 10:19:12 +0200459 unsigned int sg_flags = SG_MITER_ATOMIC;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900460
Sebastian Andrzej Siewior6de7e352009-06-18 10:19:12 +0200461 if (to_buffer)
462 sg_flags |= SG_MITER_FROM_SG;
463 else
464 sg_flags |= SG_MITER_TO_SG;
465
466 sg_miter_start(&miter, sgl, nents, sg_flags);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900467
FUJITA Tomonori50bed2e2008-09-11 18:35:39 +0200468 local_irq_save(flags);
469
Tejun Heo137d3ed2008-07-19 23:03:35 +0900470 while (sg_miter_next(&miter) && offset < buflen) {
471 unsigned int len;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900472
Tejun Heo137d3ed2008-07-19 23:03:35 +0900473 len = min(miter.length, buflen - offset);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900474
Tejun Heo137d3ed2008-07-19 23:03:35 +0900475 if (to_buffer)
476 memcpy(buf + offset, miter.addr, len);
Sebastian Andrzej Siewior6de7e352009-06-18 10:19:12 +0200477 else
Tejun Heo137d3ed2008-07-19 23:03:35 +0900478 memcpy(miter.addr, buf + offset, len);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900479
Tejun Heo137d3ed2008-07-19 23:03:35 +0900480 offset += len;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900481 }
482
Tejun Heo137d3ed2008-07-19 23:03:35 +0900483 sg_miter_stop(&miter);
484
FUJITA Tomonori50bed2e2008-09-11 18:35:39 +0200485 local_irq_restore(flags);
Tejun Heo137d3ed2008-07-19 23:03:35 +0900486 return offset;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900487}
488
489/**
490 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
491 * @sgl: The SG list
492 * @nents: Number of SG entries
493 * @buf: Where to copy from
494 * @buflen: The number of bytes to copy
495 *
496 * Returns the number of copied bytes.
497 *
498 **/
499size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
500 void *buf, size_t buflen)
501{
502 return sg_copy_buffer(sgl, nents, buf, buflen, 0);
503}
504EXPORT_SYMBOL(sg_copy_from_buffer);
505
506/**
507 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
508 * @sgl: The SG list
509 * @nents: Number of SG entries
510 * @buf: Where to copy to
511 * @buflen: The number of bytes to copy
512 *
513 * Returns the number of copied bytes.
514 *
515 **/
516size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
517 void *buf, size_t buflen)
518{
519 return sg_copy_buffer(sgl, nents, buf, buflen, 1);
520}
521EXPORT_SYMBOL(sg_copy_to_buffer);