blob: af417d35153578cec1e2f98d814a8a892aeed501 [file] [log] [blame]
Boaz Harrosha1fec1d2011-10-12 18:42:22 +02001/*
2 * Copyright (C) 2011
3 * Boaz Harrosh <bharrosh@panasas.com>
4 *
5 * This file is part of the objects raid engine (ore).
6 *
7 * It is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with "ore". If not, write to the Free Software Foundation, Inc:
13 * "Free Software Foundation <info@fsf.org>"
14 */
15
16#include <linux/gfp.h>
Boaz Harrosh769ba8d2011-10-14 15:33:51 +020017#include <linux/async_tx.h>
Boaz Harrosha1fec1d2011-10-12 18:42:22 +020018
19#include "ore_raid.h"
20
Boaz Harrosh769ba8d2011-10-14 15:33:51 +020021#undef ORE_DBGMSG2
22#define ORE_DBGMSG2 ORE_DBGMSG
23
Rashika Kheria0961f02a2014-02-09 18:33:15 +053024static struct page *_raid_page_alloc(void)
Boaz Harrosha1fec1d2011-10-12 18:42:22 +020025{
26 return alloc_page(GFP_KERNEL);
27}
28
Rashika Kheria0961f02a2014-02-09 18:33:15 +053029static void _raid_page_free(struct page *p)
Boaz Harrosha1fec1d2011-10-12 18:42:22 +020030{
31 __free_page(p);
32}
33
Boaz Harrosh769ba8d2011-10-14 15:33:51 +020034/* This struct is forward declare in ore_io_state, but is private to here.
35 * It is put on ios->sp2d for RAID5/6 writes only. See _gen_xor_unit.
36 *
37 * __stripe_pages_2d is a 2d array of pages, and it is also a corner turn.
38 * Ascending page index access is sp2d(p-minor, c-major). But storage is
39 * sp2d[p-minor][c-major], so it can be properlly presented to the async-xor
40 * API.
41 */
42struct __stripe_pages_2d {
43 /* Cache some hot path repeated calculations */
44 unsigned parity;
45 unsigned data_devs;
46 unsigned pages_in_unit;
47
48 bool needed ;
49
50 /* Array size is pages_in_unit (layout->stripe_unit / PAGE_SIZE) */
51 struct __1_page_stripe {
52 bool alloc;
53 unsigned write_count;
54 struct async_submit_ctl submit;
55 struct dma_async_tx_descriptor *tx;
56
57 /* The size of this array is data_devs + parity */
58 struct page **pages;
59 struct page **scribble;
60 /* bool array, size of this array is data_devs */
61 char *page_is_read;
62 } _1p_stripes[];
63};
64
65/* This can get bigger then a page. So support multiple page allocations
66 * _sp2d_free should be called even if _sp2d_alloc fails (by returning
67 * none-zero).
68 */
69static int _sp2d_alloc(unsigned pages_in_unit, unsigned group_width,
70 unsigned parity, struct __stripe_pages_2d **psp2d)
71{
72 struct __stripe_pages_2d *sp2d;
73 unsigned data_devs = group_width - parity;
74 struct _alloc_all_bytes {
75 struct __alloc_stripe_pages_2d {
76 struct __stripe_pages_2d sp2d;
77 struct __1_page_stripe _1p_stripes[pages_in_unit];
78 } __asp2d;
79 struct __alloc_1p_arrays {
80 struct page *pages[group_width];
81 struct page *scribble[group_width];
82 char page_is_read[data_devs];
83 } __a1pa[pages_in_unit];
84 } *_aab;
85 struct __alloc_1p_arrays *__a1pa;
86 struct __alloc_1p_arrays *__a1pa_end;
87 const unsigned sizeof__a1pa = sizeof(_aab->__a1pa[0]);
88 unsigned num_a1pa, alloc_size, i;
89
90 /* FIXME: check these numbers in ore_verify_layout */
91 BUG_ON(sizeof(_aab->__asp2d) > PAGE_SIZE);
92 BUG_ON(sizeof__a1pa > PAGE_SIZE);
93
94 if (sizeof(*_aab) > PAGE_SIZE) {
95 num_a1pa = (PAGE_SIZE - sizeof(_aab->__asp2d)) / sizeof__a1pa;
96 alloc_size = sizeof(_aab->__asp2d) + sizeof__a1pa * num_a1pa;
97 } else {
98 num_a1pa = pages_in_unit;
99 alloc_size = sizeof(*_aab);
100 }
101
102 _aab = kzalloc(alloc_size, GFP_KERNEL);
103 if (unlikely(!_aab)) {
104 ORE_DBGMSG("!! Failed to alloc sp2d size=%d\n", alloc_size);
105 return -ENOMEM;
106 }
107
108 sp2d = &_aab->__asp2d.sp2d;
109 *psp2d = sp2d; /* From here Just call _sp2d_free */
110
111 __a1pa = _aab->__a1pa;
112 __a1pa_end = __a1pa + num_a1pa;
113
114 for (i = 0; i < pages_in_unit; ++i) {
115 if (unlikely(__a1pa >= __a1pa_end)) {
116 num_a1pa = min_t(unsigned, PAGE_SIZE / sizeof__a1pa,
117 pages_in_unit - i);
118
119 __a1pa = kzalloc(num_a1pa * sizeof__a1pa, GFP_KERNEL);
120 if (unlikely(!__a1pa)) {
121 ORE_DBGMSG("!! Failed to _alloc_1p_arrays=%d\n",
122 num_a1pa);
123 return -ENOMEM;
124 }
125 __a1pa_end = __a1pa + num_a1pa;
126 /* First *pages is marked for kfree of the buffer */
127 sp2d->_1p_stripes[i].alloc = true;
128 }
129
130 sp2d->_1p_stripes[i].pages = __a1pa->pages;
131 sp2d->_1p_stripes[i].scribble = __a1pa->scribble ;
132 sp2d->_1p_stripes[i].page_is_read = __a1pa->page_is_read;
133 ++__a1pa;
134 }
135
136 sp2d->parity = parity;
137 sp2d->data_devs = data_devs;
138 sp2d->pages_in_unit = pages_in_unit;
139 return 0;
140}
141
142static void _sp2d_reset(struct __stripe_pages_2d *sp2d,
143 const struct _ore_r4w_op *r4w, void *priv)
144{
145 unsigned data_devs = sp2d->data_devs;
146 unsigned group_width = data_devs + sp2d->parity;
Boaz Harrosh537632e2012-07-11 15:27:13 +0300147 int p, c;
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200148
149 if (!sp2d->needed)
150 return;
151
Boaz Harrosh537632e2012-07-11 15:27:13 +0300152 for (c = data_devs - 1; c >= 0; --c)
153 for (p = sp2d->pages_in_unit - 1; p >= 0; --p) {
154 struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
155
156 if (_1ps->page_is_read[c]) {
157 struct page *page = _1ps->pages[c];
158
159 r4w->put_page(priv, page);
160 _1ps->page_is_read[c] = false;
161 }
162 }
163
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200164 for (p = 0; p < sp2d->pages_in_unit; p++) {
165 struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
166
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200167 memset(_1ps->pages, 0, group_width * sizeof(*_1ps->pages));
168 _1ps->write_count = 0;
169 _1ps->tx = NULL;
170 }
171
172 sp2d->needed = false;
173}
174
175static void _sp2d_free(struct __stripe_pages_2d *sp2d)
176{
177 unsigned i;
178
179 if (!sp2d)
180 return;
181
182 for (i = 0; i < sp2d->pages_in_unit; ++i) {
183 if (sp2d->_1p_stripes[i].alloc)
184 kfree(sp2d->_1p_stripes[i].pages);
185 }
186
187 kfree(sp2d);
188}
189
190static unsigned _sp2d_min_pg(struct __stripe_pages_2d *sp2d)
191{
192 unsigned p;
193
194 for (p = 0; p < sp2d->pages_in_unit; p++) {
195 struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
196
197 if (_1ps->write_count)
198 return p;
199 }
200
201 return ~0;
202}
203
204static unsigned _sp2d_max_pg(struct __stripe_pages_2d *sp2d)
205{
Dan Carpenter74b217d2012-10-02 11:28:45 +0300206 int p;
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200207
208 for (p = sp2d->pages_in_unit - 1; p >= 0; --p) {
209 struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
210
211 if (_1ps->write_count)
212 return p;
213 }
214
215 return ~0;
216}
217
218static void _gen_xor_unit(struct __stripe_pages_2d *sp2d)
219{
220 unsigned p;
221 for (p = 0; p < sp2d->pages_in_unit; p++) {
222 struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
223
224 if (!_1ps->write_count)
225 continue;
226
227 init_async_submit(&_1ps->submit,
228 ASYNC_TX_XOR_ZERO_DST | ASYNC_TX_ACK,
Boaz Harrosh101a6422014-04-03 17:53:31 +0300229 NULL, NULL, NULL, (addr_conv_t *)_1ps->scribble);
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200230
231 /* TODO: raid6 */
232 _1ps->tx = async_xor(_1ps->pages[sp2d->data_devs], _1ps->pages,
233 0, sp2d->data_devs, PAGE_SIZE,
234 &_1ps->submit);
235 }
236
237 for (p = 0; p < sp2d->pages_in_unit; p++) {
238 struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
239 /* NOTE: We wait for HW synchronously (I don't have such HW
240 * to test with.) Is parallelism needed with today's multi
241 * cores?
242 */
243 async_tx_issue_pending(_1ps->tx);
244 }
245}
246
247void _ore_add_stripe_page(struct __stripe_pages_2d *sp2d,
248 struct ore_striping_info *si, struct page *page)
249{
250 struct __1_page_stripe *_1ps;
251
252 sp2d->needed = true;
253
254 _1ps = &sp2d->_1p_stripes[si->cur_pg];
255 _1ps->pages[si->cur_comp] = page;
256 ++_1ps->write_count;
257
258 si->cur_pg = (si->cur_pg + 1) % sp2d->pages_in_unit;
259 /* si->cur_comp is advanced outside at main loop */
260}
261
Boaz Harrosha1fec1d2011-10-12 18:42:22 +0200262void _ore_add_sg_seg(struct ore_per_dev_state *per_dev, unsigned cur_len,
263 bool not_last)
264{
265 struct osd_sg_entry *sge;
266
267 ORE_DBGMSG("dev=%d cur_len=0x%x not_last=%d cur_sg=%d "
268 "offset=0x%llx length=0x%x last_sgs_total=0x%x\n",
269 per_dev->dev, cur_len, not_last, per_dev->cur_sg,
270 _LLU(per_dev->offset), per_dev->length,
271 per_dev->last_sgs_total);
272
273 if (!per_dev->cur_sg) {
274 sge = per_dev->sglist;
275
276 /* First time we prepare two entries */
277 if (per_dev->length) {
278 ++per_dev->cur_sg;
279 sge->offset = per_dev->offset;
280 sge->len = per_dev->length;
281 } else {
282 /* Here the parity is the first unit of this object.
283 * This happens every time we reach a parity device on
284 * the same stripe as the per_dev->offset. We need to
285 * just skip this unit.
286 */
287 per_dev->offset += cur_len;
288 return;
289 }
290 } else {
291 /* finalize the last one */
292 sge = &per_dev->sglist[per_dev->cur_sg - 1];
293 sge->len = per_dev->length - per_dev->last_sgs_total;
294 }
295
296 if (not_last) {
297 /* Partly prepare the next one */
298 struct osd_sg_entry *next_sge = sge + 1;
299
300 ++per_dev->cur_sg;
301 next_sge->offset = sge->offset + sge->len + cur_len;
302 /* Save cur len so we know how mutch was added next time */
303 per_dev->last_sgs_total = per_dev->length;
304 next_sge->len = 0;
305 } else if (!sge->len) {
306 /* Optimize for when the last unit is a parity */
307 --per_dev->cur_sg;
308 }
309}
310
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200311static int _alloc_read_4_write(struct ore_io_state *ios)
312{
313 struct ore_layout *layout = ios->layout;
314 int ret;
315 /* We want to only read those pages not in cache so worst case
316 * is a stripe populated with every other page
317 */
318 unsigned sgs_per_dev = ios->sp2d->pages_in_unit + 2;
319
320 ret = _ore_get_io_state(layout, ios->oc,
321 layout->group_width * layout->mirrors_p1,
322 sgs_per_dev, 0, &ios->ios_read_4_write);
323 return ret;
324}
325
326/* @si contains info of the to-be-inserted page. Update of @si should be
327 * maintained by caller. Specificaly si->dev, si->obj_offset, ...
328 */
Boaz Harrosh724577c2011-12-28 19:21:45 +0200329static int _add_to_r4w(struct ore_io_state *ios, struct ore_striping_info *si,
330 struct page *page, unsigned pg_len)
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200331{
332 struct request_queue *q;
333 struct ore_per_dev_state *per_dev;
334 struct ore_io_state *read_ios;
335 unsigned first_dev = si->dev - (si->dev %
336 (ios->layout->group_width * ios->layout->mirrors_p1));
337 unsigned comp = si->dev - first_dev;
338 unsigned added_len;
339
340 if (!ios->ios_read_4_write) {
341 int ret = _alloc_read_4_write(ios);
342
343 if (unlikely(ret))
344 return ret;
345 }
346
347 read_ios = ios->ios_read_4_write;
348 read_ios->numdevs = ios->layout->group_width * ios->layout->mirrors_p1;
349
350 per_dev = &read_ios->per_dev[comp];
351 if (!per_dev->length) {
352 per_dev->bio = bio_kmalloc(GFP_KERNEL,
353 ios->sp2d->pages_in_unit);
354 if (unlikely(!per_dev->bio)) {
355 ORE_DBGMSG("Failed to allocate BIO size=%u\n",
356 ios->sp2d->pages_in_unit);
357 return -ENOMEM;
358 }
359 per_dev->offset = si->obj_offset;
360 per_dev->dev = si->dev;
361 } else if (si->obj_offset != (per_dev->offset + per_dev->length)) {
362 u64 gap = si->obj_offset - (per_dev->offset + per_dev->length);
363
364 _ore_add_sg_seg(per_dev, gap, true);
365 }
366 q = osd_request_queue(ore_comp_dev(read_ios->oc, per_dev->dev));
Boaz Harrosh724577c2011-12-28 19:21:45 +0200367 added_len = bio_add_pc_page(q, per_dev->bio, page, pg_len,
368 si->obj_offset % PAGE_SIZE);
369 if (unlikely(added_len != pg_len)) {
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200370 ORE_DBGMSG("Failed to bio_add_pc_page bi_vcnt=%d\n",
371 per_dev->bio->bi_vcnt);
372 return -ENOMEM;
373 }
374
Boaz Harrosh724577c2011-12-28 19:21:45 +0200375 per_dev->length += pg_len;
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200376 return 0;
377}
378
Boaz Harrosh724577c2011-12-28 19:21:45 +0200379/* read the beginning of an unaligned first page */
380static int _add_to_r4w_first_page(struct ore_io_state *ios, struct page *page)
381{
382 struct ore_striping_info si;
383 unsigned pg_len;
384
385 ore_calc_stripe_info(ios->layout, ios->offset, 0, &si);
386
387 pg_len = si.obj_offset % PAGE_SIZE;
388 si.obj_offset -= pg_len;
389
390 ORE_DBGMSG("offset=0x%llx len=0x%x index=0x%lx dev=%x\n",
391 _LLU(si.obj_offset), pg_len, page->index, si.dev);
392
393 return _add_to_r4w(ios, &si, page, pg_len);
394}
395
396/* read the end of an incomplete last page */
397static int _add_to_r4w_last_page(struct ore_io_state *ios, u64 *offset)
398{
399 struct ore_striping_info si;
400 struct page *page;
401 unsigned pg_len, p, c;
402
403 ore_calc_stripe_info(ios->layout, *offset, 0, &si);
404
405 p = si.unit_off / PAGE_SIZE;
406 c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
407 ios->layout->mirrors_p1, si.par_dev, si.dev);
408 page = ios->sp2d->_1p_stripes[p].pages[c];
409
410 pg_len = PAGE_SIZE - (si.unit_off % PAGE_SIZE);
411 *offset += pg_len;
412
413 ORE_DBGMSG("p=%d, c=%d next-offset=0x%llx len=0x%x dev=%x par_dev=%d\n",
414 p, c, _LLU(*offset), pg_len, si.dev, si.par_dev);
415
416 BUG_ON(!page);
417
418 return _add_to_r4w(ios, &si, page, pg_len);
419}
420
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200421static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
422{
423 struct bio_vec *bv;
424 unsigned i, d;
425
426 /* loop on all devices all pages */
427 for (d = 0; d < ios->numdevs; d++) {
428 struct bio *bio = ios->per_dev[d].bio;
429
430 if (!bio)
431 continue;
432
Kent Overstreetd74c6d52013-02-06 12:23:11 -0800433 bio_for_each_segment_all(bv, bio, i) {
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200434 struct page *page = bv->bv_page;
435
436 SetPageUptodate(page);
437 if (PageError(page))
438 ClearPageError(page);
439 }
440 }
441}
442
443/* read_4_write is hacked to read the start of the first stripe and/or
444 * the end of the last stripe. If needed, with an sg-gap at each device/page.
445 * It is assumed to be called after the to_be_written pages of the first stripe
446 * are populating ios->sp2d[][]
447 *
448 * NOTE: We call ios->r4w->lock_fn for all pages needed for parity calculations
449 * These pages are held at sp2d[p].pages[c] but with
450 * sp2d[p].page_is_read[c] = true. At _sp2d_reset these pages are
451 * ios->r4w->lock_fn(). The ios->r4w->lock_fn might signal that the page is
452 * @uptodate=true, so we don't need to read it, only unlock, after IO.
453 *
454 * TODO: The read_4_write should calc a need_to_read_pages_count, if bigger then
455 * to-be-written count, we should consider the xor-in-place mode.
456 * need_to_read_pages_count is the actual number of pages not present in cache.
457 * maybe "devs_in_group - ios->sp2d[p].write_count" is a good enough
458 * approximation? In this mode the read pages are put in the empty places of
459 * ios->sp2d[p][*], xor is calculated the same way. These pages are
460 * allocated/freed and don't go through cache
461 */
Boaz Harrosh9ff19302012-06-08 01:19:07 +0300462static int _read_4_write_first_stripe(struct ore_io_state *ios)
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200463{
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200464 struct ore_striping_info read_si;
465 struct __stripe_pages_2d *sp2d = ios->sp2d;
466 u64 offset = ios->si.first_stripe_start;
Boaz Harrosh9ff19302012-06-08 01:19:07 +0300467 unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200468
469 if (offset == ios->offset) /* Go to start collect $200 */
470 goto read_last_stripe;
471
472 min_p = _sp2d_min_pg(sp2d);
473 max_p = _sp2d_max_pg(sp2d);
474
Boaz Harrosh9ff19302012-06-08 01:19:07 +0300475 ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n",
476 offset, ios->offset, min_p, max_p);
477
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200478 for (c = 0; ; c++) {
479 ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
480 read_si.obj_offset += min_p * PAGE_SIZE;
481 offset += min_p * PAGE_SIZE;
482 for (p = min_p; p <= max_p; p++) {
483 struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
484 struct page **pp = &_1ps->pages[c];
485 bool uptodate;
486
Boaz Harrosh724577c2011-12-28 19:21:45 +0200487 if (*pp) {
488 if (ios->offset % PAGE_SIZE)
489 /* Read the remainder of the page */
490 _add_to_r4w_first_page(ios, *pp);
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200491 /* to-be-written pages start here */
492 goto read_last_stripe;
Boaz Harrosh724577c2011-12-28 19:21:45 +0200493 }
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200494
495 *pp = ios->r4w->get_page(ios->private, offset,
496 &uptodate);
497 if (unlikely(!*pp))
498 return -ENOMEM;
499
500 if (!uptodate)
Boaz Harrosh724577c2011-12-28 19:21:45 +0200501 _add_to_r4w(ios, &read_si, *pp, PAGE_SIZE);
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200502
503 /* Mark read-pages to be cache_released */
504 _1ps->page_is_read[c] = true;
505 read_si.obj_offset += PAGE_SIZE;
506 offset += PAGE_SIZE;
507 }
508 offset += (sp2d->pages_in_unit - p) * PAGE_SIZE;
509 }
510
511read_last_stripe:
Boaz Harrosh9ff19302012-06-08 01:19:07 +0300512 return 0;
513}
514
515static int _read_4_write_last_stripe(struct ore_io_state *ios)
516{
517 struct ore_striping_info read_si;
518 struct __stripe_pages_2d *sp2d = ios->sp2d;
519 u64 offset;
520 u64 last_stripe_end;
521 unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
522 unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
523
Boaz Harrosh724577c2011-12-28 19:21:45 +0200524 offset = ios->offset + ios->length;
525 if (offset % PAGE_SIZE)
526 _add_to_r4w_last_page(ios, &offset);
527 /* offset will be aligned to next page */
528
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200529 last_stripe_end = div_u64(offset + bytes_in_stripe - 1, bytes_in_stripe)
530 * bytes_in_stripe;
531 if (offset == last_stripe_end) /* Optimize for the aligned case */
532 goto read_it;
533
534 ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
535 p = read_si.unit_off / PAGE_SIZE;
536 c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
537 ios->layout->mirrors_p1, read_si.par_dev, read_si.dev);
538
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200539 if (min_p == sp2d->pages_in_unit) {
540 /* Didn't do it yet */
541 min_p = _sp2d_min_pg(sp2d);
542 max_p = _sp2d_max_pg(sp2d);
543 }
544
Boaz Harrosh9ff19302012-06-08 01:19:07 +0300545 ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n",
546 offset, last_stripe_end, min_p, max_p);
547
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200548 while (offset < last_stripe_end) {
549 struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
550
551 if ((min_p <= p) && (p <= max_p)) {
552 struct page *page;
553 bool uptodate;
554
555 BUG_ON(_1ps->pages[c]);
556 page = ios->r4w->get_page(ios->private, offset,
557 &uptodate);
558 if (unlikely(!page))
559 return -ENOMEM;
560
561 _1ps->pages[c] = page;
562 /* Mark read-pages to be cache_released */
563 _1ps->page_is_read[c] = true;
564 if (!uptodate)
Boaz Harrosh724577c2011-12-28 19:21:45 +0200565 _add_to_r4w(ios, &read_si, page, PAGE_SIZE);
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200566 }
567
568 offset += PAGE_SIZE;
569 if (p == (sp2d->pages_in_unit - 1)) {
570 ++c;
571 p = 0;
572 ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
573 } else {
574 read_si.obj_offset += PAGE_SIZE;
575 ++p;
576 }
577 }
578
579read_it:
Boaz Harrosh9ff19302012-06-08 01:19:07 +0300580 return 0;
581}
582
583static int _read_4_write_execute(struct ore_io_state *ios)
584{
585 struct ore_io_state *ios_read;
586 unsigned i;
587 int ret;
588
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200589 ios_read = ios->ios_read_4_write;
590 if (!ios_read)
591 return 0;
592
593 /* FIXME: Ugly to signal _sbi_read_mirror that we have bio(s). Change
594 * to check for per_dev->bio
595 */
596 ios_read->pages = ios->pages;
597
598 /* Now read these devices */
599 for (i = 0; i < ios_read->numdevs; i += ios_read->layout->mirrors_p1) {
600 ret = _ore_read_mirror(ios_read, i);
601 if (unlikely(ret))
602 return ret;
603 }
604
605 ret = ore_io_execute(ios_read); /* Synchronus execution */
606 if (unlikely(ret)) {
607 ORE_DBGMSG("!! ore_io_execute => %d\n", ret);
608 return ret;
609 }
610
611 _mark_read4write_pages_uptodate(ios_read, ret);
Boaz Harrosh9ff19302012-06-08 01:19:07 +0300612 ore_put_io_state(ios_read);
613 ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200614 return 0;
615}
616
Boaz Harrosha1fec1d2011-10-12 18:42:22 +0200617/* In writes @cur_len means length left. .i.e cur_len==0 is the last parity U */
618int _ore_add_parity_unit(struct ore_io_state *ios,
619 struct ore_striping_info *si,
620 struct ore_per_dev_state *per_dev,
621 unsigned cur_len)
622{
623 if (ios->reading) {
Boaz Harrosh361aba52011-12-28 19:14:23 +0200624 if (per_dev->cur_sg >= ios->sgs_per_dev) {
625 ORE_DBGMSG("cur_sg(%d) >= sgs_per_dev(%d)\n" ,
626 per_dev->cur_sg, ios->sgs_per_dev);
627 return -ENOMEM;
628 }
Boaz Harrosha1fec1d2011-10-12 18:42:22 +0200629 _ore_add_sg_seg(per_dev, cur_len, true);
630 } else {
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200631 struct __stripe_pages_2d *sp2d = ios->sp2d;
Boaz Harrosha1fec1d2011-10-12 18:42:22 +0200632 struct page **pages = ios->parity_pages + ios->cur_par_page;
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200633 unsigned num_pages;
Boaz Harrosha1fec1d2011-10-12 18:42:22 +0200634 unsigned array_start = 0;
635 unsigned i;
636 int ret;
637
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200638 si->cur_pg = _sp2d_min_pg(sp2d);
639 num_pages = _sp2d_max_pg(sp2d) + 1 - si->cur_pg;
640
641 if (!cur_len) /* If last stripe operate on parity comp */
642 si->cur_comp = sp2d->data_devs;
643
644 if (!per_dev->length) {
645 per_dev->offset += si->cur_pg * PAGE_SIZE;
646 /* If first stripe, Read in all read4write pages
647 * (if needed) before we calculate the first parity.
648 */
Boaz Harrosh9ff19302012-06-08 01:19:07 +0300649 _read_4_write_first_stripe(ios);
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200650 }
Boaz Harrosh9ff19302012-06-08 01:19:07 +0300651 if (!cur_len) /* If last stripe r4w pages of last stripe */
652 _read_4_write_last_stripe(ios);
653 _read_4_write_execute(ios);
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200654
Boaz Harrosha1fec1d2011-10-12 18:42:22 +0200655 for (i = 0; i < num_pages; i++) {
656 pages[i] = _raid_page_alloc();
657 if (unlikely(!pages[i]))
658 return -ENOMEM;
659
660 ++(ios->cur_par_page);
Boaz Harrosha1fec1d2011-10-12 18:42:22 +0200661 }
662
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200663 BUG_ON(si->cur_comp != sp2d->data_devs);
664 BUG_ON(si->cur_pg + num_pages > sp2d->pages_in_unit);
Boaz Harrosha1fec1d2011-10-12 18:42:22 +0200665
666 ret = _ore_add_stripe_unit(ios, &array_start, 0, pages,
667 per_dev, num_pages * PAGE_SIZE);
668 if (unlikely(ret))
669 return ret;
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200670
671 /* TODO: raid6 if (last_parity_dev) */
672 _gen_xor_unit(sp2d);
673 _sp2d_reset(sp2d, ios->r4w, ios->private);
Boaz Harrosha1fec1d2011-10-12 18:42:22 +0200674 }
675 return 0;
676}
677
678int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
679{
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200680 if (ios->parity_pages) {
Boaz Harrosh9ff19302012-06-08 01:19:07 +0300681 struct ore_layout *layout = ios->layout;
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200682 unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200683
684 if (_sp2d_alloc(pages_in_unit, layout->group_width,
685 layout->parity, &ios->sp2d)) {
686 return -ENOMEM;
687 }
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200688 }
Boaz Harrosha1fec1d2011-10-12 18:42:22 +0200689 return 0;
690}
691
692void _ore_free_raid_stuff(struct ore_io_state *ios)
693{
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200694 if (ios->sp2d) { /* writing and raid */
Boaz Harrosha1fec1d2011-10-12 18:42:22 +0200695 unsigned i;
696
697 for (i = 0; i < ios->cur_par_page; i++) {
698 struct page *page = ios->parity_pages[i];
699
700 if (page)
701 _raid_page_free(page);
702 }
703 if (ios->extra_part_alloc)
704 kfree(ios->parity_pages);
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200705 /* If IO returned an error pages might need unlocking */
706 _sp2d_reset(ios->sp2d, ios->r4w, ios->private);
707 _sp2d_free(ios->sp2d);
Boaz Harrosha1fec1d2011-10-12 18:42:22 +0200708 } else {
709 /* Will only be set if raid reading && sglist is big */
710 if (ios->extra_part_alloc)
711 kfree(ios->per_dev[0].sglist);
712 }
Boaz Harrosh769ba8d2011-10-14 15:33:51 +0200713 if (ios->ios_read_4_write)
714 ore_put_io_state(ios->ios_read_4_write);
Boaz Harrosha1fec1d2011-10-12 18:42:22 +0200715}