blob: 9afa25b52a83f33f0685d94306f1df4def372db4 [file] [log] [blame]
Jens Axboe0db92992007-11-30 09:16:50 +01001/*
2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
3 *
4 * Scatterlist handling helpers.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */
9#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Jens Axboe0db92992007-11-30 09:16:50 +010011#include <linux/scatterlist.h>
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +090012#include <linux/highmem.h>
Jens Axboe0db92992007-11-30 09:16:50 +010013
14/**
15 * sg_next - return the next scatterlist entry in a list
16 * @sg: The current sg entry
17 *
18 * Description:
19 * Usually the next entry will be @sg@ + 1, but if this sg element is part
20 * of a chained scatterlist, it could jump to the start of a new
21 * scatterlist array.
22 *
23 **/
24struct scatterlist *sg_next(struct scatterlist *sg)
25{
26#ifdef CONFIG_DEBUG_SG
27 BUG_ON(sg->sg_magic != SG_MAGIC);
28#endif
29 if (sg_is_last(sg))
30 return NULL;
31
32 sg++;
33 if (unlikely(sg_is_chain(sg)))
34 sg = sg_chain_ptr(sg);
35
36 return sg;
37}
38EXPORT_SYMBOL(sg_next);
39
40/**
41 * sg_last - return the last scatterlist entry in a list
42 * @sgl: First entry in the scatterlist
43 * @nents: Number of entries in the scatterlist
44 *
45 * Description:
46 * Should only be used casually, it (currently) scans the entire list
47 * to get the last entry.
48 *
49 * Note that the @sgl@ pointer passed in need not be the first one,
50 * the important bit is that @nents@ denotes the number of entries that
51 * exist from @sgl@.
52 *
53 **/
54struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
55{
56#ifndef ARCH_HAS_SG_CHAIN
57 struct scatterlist *ret = &sgl[nents - 1];
58#else
59 struct scatterlist *sg, *ret = NULL;
60 unsigned int i;
61
62 for_each_sg(sgl, sg, nents, i)
63 ret = sg;
64
65#endif
66#ifdef CONFIG_DEBUG_SG
67 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
68 BUG_ON(!sg_is_last(ret));
69#endif
70 return ret;
71}
72EXPORT_SYMBOL(sg_last);
73
74/**
75 * sg_init_table - Initialize SG table
76 * @sgl: The SG table
77 * @nents: Number of entries in table
78 *
79 * Notes:
80 * If this is part of a chained sg table, sg_mark_end() should be
81 * used only on the last table part.
82 *
83 **/
84void sg_init_table(struct scatterlist *sgl, unsigned int nents)
85{
86 memset(sgl, 0, sizeof(*sgl) * nents);
87#ifdef CONFIG_DEBUG_SG
88 {
89 unsigned int i;
90 for (i = 0; i < nents; i++)
91 sgl[i].sg_magic = SG_MAGIC;
92 }
93#endif
94 sg_mark_end(&sgl[nents - 1]);
95}
96EXPORT_SYMBOL(sg_init_table);
97
98/**
99 * sg_init_one - Initialize a single entry sg list
100 * @sg: SG entry
101 * @buf: Virtual address for IO
102 * @buflen: IO length
103 *
104 **/
105void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
106{
107 sg_init_table(sg, 1);
108 sg_set_buf(sg, buf, buflen);
109}
110EXPORT_SYMBOL(sg_init_one);
111
112/*
113 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
114 * helpers.
115 */
116static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
117{
118 if (nents == SG_MAX_SINGLE_ALLOC)
119 return (struct scatterlist *) __get_free_page(gfp_mask);
120 else
121 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
122}
123
124static void sg_kfree(struct scatterlist *sg, unsigned int nents)
125{
126 if (nents == SG_MAX_SINGLE_ALLOC)
127 free_page((unsigned long) sg);
128 else
129 kfree(sg);
130}
131
132/**
133 * __sg_free_table - Free a previously mapped sg table
134 * @table: The sg table header to use
James Bottomley7cedb1f2008-01-13 14:15:28 -0600135 * @max_ents: The maximum number of entries per single scatterlist
Jens Axboe0db92992007-11-30 09:16:50 +0100136 * @free_fn: Free function
137 *
138 * Description:
James Bottomley7cedb1f2008-01-13 14:15:28 -0600139 * Free an sg table previously allocated and setup with
140 * __sg_alloc_table(). The @max_ents value must be identical to
141 * that previously used with __sg_alloc_table().
Jens Axboe0db92992007-11-30 09:16:50 +0100142 *
143 **/
James Bottomley7cedb1f2008-01-13 14:15:28 -0600144void __sg_free_table(struct sg_table *table, unsigned int max_ents,
145 sg_free_fn *free_fn)
Jens Axboe0db92992007-11-30 09:16:50 +0100146{
147 struct scatterlist *sgl, *next;
148
149 if (unlikely(!table->sgl))
150 return;
151
152 sgl = table->sgl;
153 while (table->orig_nents) {
154 unsigned int alloc_size = table->orig_nents;
155 unsigned int sg_size;
156
157 /*
James Bottomley7cedb1f2008-01-13 14:15:28 -0600158 * If we have more than max_ents segments left,
Jens Axboe0db92992007-11-30 09:16:50 +0100159 * then assign 'next' to the sg table after the current one.
160 * sg_size is then one less than alloc size, since the last
161 * element is the chain pointer.
162 */
James Bottomley7cedb1f2008-01-13 14:15:28 -0600163 if (alloc_size > max_ents) {
164 next = sg_chain_ptr(&sgl[max_ents - 1]);
165 alloc_size = max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100166 sg_size = alloc_size - 1;
167 } else {
168 sg_size = alloc_size;
169 next = NULL;
170 }
171
172 table->orig_nents -= sg_size;
173 free_fn(sgl, alloc_size);
174 sgl = next;
175 }
176
177 table->sgl = NULL;
178}
179EXPORT_SYMBOL(__sg_free_table);
180
181/**
182 * sg_free_table - Free a previously allocated sg table
183 * @table: The mapped sg table header
184 *
185 **/
186void sg_free_table(struct sg_table *table)
187{
James Bottomley7cedb1f2008-01-13 14:15:28 -0600188 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
Jens Axboe0db92992007-11-30 09:16:50 +0100189}
190EXPORT_SYMBOL(sg_free_table);
191
192/**
193 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
194 * @table: The sg table header to use
195 * @nents: Number of entries in sg list
James Bottomley7cedb1f2008-01-13 14:15:28 -0600196 * @max_ents: The maximum number of entries the allocator returns per call
Jens Axboe0db92992007-11-30 09:16:50 +0100197 * @gfp_mask: GFP allocation mask
198 * @alloc_fn: Allocator to use
199 *
James Bottomley7cedb1f2008-01-13 14:15:28 -0600200 * Description:
201 * This function returns a @table @nents long. The allocator is
202 * defined to return scatterlist chunks of maximum size @max_ents.
203 * Thus if @nents is bigger than @max_ents, the scatterlists will be
204 * chained in units of @max_ents.
205 *
Jens Axboe0db92992007-11-30 09:16:50 +0100206 * Notes:
207 * If this function returns non-0 (eg failure), the caller must call
208 * __sg_free_table() to cleanup any leftover allocations.
209 *
210 **/
James Bottomley7cedb1f2008-01-13 14:15:28 -0600211int __sg_alloc_table(struct sg_table *table, unsigned int nents,
212 unsigned int max_ents, gfp_t gfp_mask,
Jens Axboe0db92992007-11-30 09:16:50 +0100213 sg_alloc_fn *alloc_fn)
214{
215 struct scatterlist *sg, *prv;
216 unsigned int left;
217
218#ifndef ARCH_HAS_SG_CHAIN
James Bottomley7cedb1f2008-01-13 14:15:28 -0600219 BUG_ON(nents > max_ents);
Jens Axboe0db92992007-11-30 09:16:50 +0100220#endif
221
222 memset(table, 0, sizeof(*table));
223
224 left = nents;
225 prv = NULL;
226 do {
227 unsigned int sg_size, alloc_size = left;
228
James Bottomley7cedb1f2008-01-13 14:15:28 -0600229 if (alloc_size > max_ents) {
230 alloc_size = max_ents;
Jens Axboe0db92992007-11-30 09:16:50 +0100231 sg_size = alloc_size - 1;
232 } else
233 sg_size = alloc_size;
234
235 left -= sg_size;
236
237 sg = alloc_fn(alloc_size, gfp_mask);
238 if (unlikely(!sg))
239 return -ENOMEM;
240
241 sg_init_table(sg, alloc_size);
242 table->nents = table->orig_nents += sg_size;
243
244 /*
245 * If this is the first mapping, assign the sg table header.
246 * If this is not the first mapping, chain previous part.
247 */
248 if (prv)
James Bottomley7cedb1f2008-01-13 14:15:28 -0600249 sg_chain(prv, max_ents, sg);
Jens Axboe0db92992007-11-30 09:16:50 +0100250 else
251 table->sgl = sg;
252
253 /*
254 * If no more entries after this one, mark the end
255 */
256 if (!left)
257 sg_mark_end(&sg[sg_size - 1]);
258
259 /*
260 * only really needed for mempool backed sg allocations (like
261 * SCSI), a possible improvement here would be to pass the
262 * table pointer into the allocator and let that clear these
263 * flags
264 */
265 gfp_mask &= ~__GFP_WAIT;
266 gfp_mask |= __GFP_HIGH;
267 prv = sg;
268 } while (left);
269
270 return 0;
271}
272EXPORT_SYMBOL(__sg_alloc_table);
273
274/**
275 * sg_alloc_table - Allocate and initialize an sg table
276 * @table: The sg table header to use
277 * @nents: Number of entries in sg list
278 * @gfp_mask: GFP allocation mask
279 *
280 * Description:
281 * Allocate and initialize an sg table. If @nents@ is larger than
282 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
283 *
284 **/
285int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
286{
287 int ret;
288
James Bottomley7cedb1f2008-01-13 14:15:28 -0600289 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
290 gfp_mask, sg_kmalloc);
Jens Axboe0db92992007-11-30 09:16:50 +0100291 if (unlikely(ret))
James Bottomley7cedb1f2008-01-13 14:15:28 -0600292 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
Jens Axboe0db92992007-11-30 09:16:50 +0100293
294 return ret;
295}
296EXPORT_SYMBOL(sg_alloc_table);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900297
298/**
Tejun Heo137d3ed2008-07-19 23:03:35 +0900299 * sg_miter_start - start mapping iteration over a sg list
300 * @miter: sg mapping iter to be started
301 * @sgl: sg list to iterate over
302 * @nents: number of sg entries
303 *
304 * Description:
305 * Starts mapping iterator @miter.
306 *
307 * Context:
308 * Don't care.
309 */
310void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
311 unsigned int nents, unsigned int flags)
312{
313 memset(miter, 0, sizeof(struct sg_mapping_iter));
314
315 miter->__sg = sgl;
316 miter->__nents = nents;
317 miter->__offset = 0;
Sebastian Andrzej Siewior6de7e352009-06-18 10:19:12 +0200318 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
Tejun Heo137d3ed2008-07-19 23:03:35 +0900319 miter->__flags = flags;
320}
321EXPORT_SYMBOL(sg_miter_start);
322
323/**
324 * sg_miter_next - proceed mapping iterator to the next mapping
325 * @miter: sg mapping iter to proceed
326 *
327 * Description:
328 * Proceeds @miter@ to the next mapping. @miter@ should have been
329 * started using sg_miter_start(). On successful return,
330 * @miter@->page, @miter@->addr and @miter@->length point to the
331 * current mapping.
332 *
333 * Context:
334 * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
335 * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
336 *
337 * Returns:
338 * true if @miter contains the next mapping. false if end of sg
339 * list is reached.
340 */
341bool sg_miter_next(struct sg_mapping_iter *miter)
342{
343 unsigned int off, len;
344
345 /* check for end and drop resources from the last iteration */
346 if (!miter->__nents)
347 return false;
348
349 sg_miter_stop(miter);
350
351 /* get to the next sg if necessary. __offset is adjusted by stop */
Tejun Heo23c560a2009-04-15 22:10:23 +0900352 while (miter->__offset == miter->__sg->length) {
353 if (--miter->__nents) {
354 miter->__sg = sg_next(miter->__sg);
355 miter->__offset = 0;
356 } else
357 return false;
Tejun Heo137d3ed2008-07-19 23:03:35 +0900358 }
359
360 /* map the next page */
361 off = miter->__sg->offset + miter->__offset;
362 len = miter->__sg->length - miter->__offset;
363
364 miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
365 off &= ~PAGE_MASK;
366 miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
367 miter->consumed = miter->length;
368
369 if (miter->__flags & SG_MITER_ATOMIC)
370 miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
371 else
372 miter->addr = kmap(miter->page) + off;
373
374 return true;
375}
376EXPORT_SYMBOL(sg_miter_next);
377
378/**
379 * sg_miter_stop - stop mapping iteration
380 * @miter: sg mapping iter to be stopped
381 *
382 * Description:
383 * Stops mapping iterator @miter. @miter should have been started
384 * started using sg_miter_start(). A stopped iteration can be
385 * resumed by calling sg_miter_next() on it. This is useful when
386 * resources (kmap) need to be released during iteration.
387 *
388 * Context:
389 * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
390 */
391void sg_miter_stop(struct sg_mapping_iter *miter)
392{
393 WARN_ON(miter->consumed > miter->length);
394
395 /* drop resources from the last iteration */
396 if (miter->addr) {
397 miter->__offset += miter->consumed;
398
Sebastian Andrzej Siewior6de7e352009-06-18 10:19:12 +0200399 if (miter->__flags & SG_MITER_TO_SG)
400 flush_kernel_dcache_page(miter->page);
401
Tejun Heo137d3ed2008-07-19 23:03:35 +0900402 if (miter->__flags & SG_MITER_ATOMIC) {
403 WARN_ON(!irqs_disabled());
404 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
405 } else
Arjan van de Venf652c522008-11-19 15:36:19 -0800406 kunmap(miter->page);
Tejun Heo137d3ed2008-07-19 23:03:35 +0900407
408 miter->page = NULL;
409 miter->addr = NULL;
410 miter->length = 0;
411 miter->consumed = 0;
412 }
413}
414EXPORT_SYMBOL(sg_miter_stop);
415
416/**
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900417 * sg_copy_buffer - Copy data between a linear buffer and an SG list
418 * @sgl: The SG list
419 * @nents: Number of SG entries
420 * @buf: Where to copy from
421 * @buflen: The number of bytes to copy
422 * @to_buffer: transfer direction (non zero == from an sg list to a
423 * buffer, 0 == from a buffer to an sg list
424 *
425 * Returns the number of copied bytes.
426 *
427 **/
428static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
429 void *buf, size_t buflen, int to_buffer)
430{
Tejun Heo137d3ed2008-07-19 23:03:35 +0900431 unsigned int offset = 0;
432 struct sg_mapping_iter miter;
FUJITA Tomonori50bed2e2008-09-11 18:35:39 +0200433 unsigned long flags;
Sebastian Andrzej Siewior6de7e352009-06-18 10:19:12 +0200434 unsigned int sg_flags = SG_MITER_ATOMIC;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900435
Sebastian Andrzej Siewior6de7e352009-06-18 10:19:12 +0200436 if (to_buffer)
437 sg_flags |= SG_MITER_FROM_SG;
438 else
439 sg_flags |= SG_MITER_TO_SG;
440
441 sg_miter_start(&miter, sgl, nents, sg_flags);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900442
FUJITA Tomonori50bed2e2008-09-11 18:35:39 +0200443 local_irq_save(flags);
444
Tejun Heo137d3ed2008-07-19 23:03:35 +0900445 while (sg_miter_next(&miter) && offset < buflen) {
446 unsigned int len;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900447
Tejun Heo137d3ed2008-07-19 23:03:35 +0900448 len = min(miter.length, buflen - offset);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900449
Tejun Heo137d3ed2008-07-19 23:03:35 +0900450 if (to_buffer)
451 memcpy(buf + offset, miter.addr, len);
Sebastian Andrzej Siewior6de7e352009-06-18 10:19:12 +0200452 else
Tejun Heo137d3ed2008-07-19 23:03:35 +0900453 memcpy(miter.addr, buf + offset, len);
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900454
Tejun Heo137d3ed2008-07-19 23:03:35 +0900455 offset += len;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900456 }
457
Tejun Heo137d3ed2008-07-19 23:03:35 +0900458 sg_miter_stop(&miter);
459
FUJITA Tomonori50bed2e2008-09-11 18:35:39 +0200460 local_irq_restore(flags);
Tejun Heo137d3ed2008-07-19 23:03:35 +0900461 return offset;
FUJITA Tomonorib1adaf62008-03-18 00:15:03 +0900462}
463
464/**
465 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
466 * @sgl: The SG list
467 * @nents: Number of SG entries
468 * @buf: Where to copy from
469 * @buflen: The number of bytes to copy
470 *
471 * Returns the number of copied bytes.
472 *
473 **/
474size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
475 void *buf, size_t buflen)
476{
477 return sg_copy_buffer(sgl, nents, buf, buflen, 0);
478}
479EXPORT_SYMBOL(sg_copy_from_buffer);
480
481/**
482 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
483 * @sgl: The SG list
484 * @nents: Number of SG entries
485 * @buf: Where to copy to
486 * @buflen: The number of bytes to copy
487 *
488 * Returns the number of copied bytes.
489 *
490 **/
491size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
492 void *buf, size_t buflen)
493{
494 return sg_copy_buffer(sgl, nents, buf, buflen, 1);
495}
496EXPORT_SYMBOL(sg_copy_to_buffer);