blob: f0ae63d2df65531447d23a50c8cf4185f724e1bf [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_bitmap.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/bitops.h>
26#include <linux/vmalloc.h>
27#include <linux/string.h>
28#include <linux/drbd.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <asm/kmap_types.h>
Stephen Rothwellf0ff1352011-03-17 15:02:51 +010031
32#include <asm-generic/bitops/le.h>
33
Philipp Reisnerb411b362009-09-25 16:07:19 -070034#include "drbd_int.h"
35
Lars Ellenberg95a0f102010-12-15 08:59:09 +010036
Philipp Reisnerb411b362009-09-25 16:07:19 -070037/* OPAQUE outside this file!
38 * interface defined in drbd_int.h
39
40 * convention:
41 * function name drbd_bm_... => used elsewhere, "public".
42 * function name bm_... => internal to implementation, "private".
Lars Ellenberg4b0715f2010-12-14 15:13:04 +010043 */
Philipp Reisnerb411b362009-09-25 16:07:19 -070044
Lars Ellenberg4b0715f2010-12-14 15:13:04 +010045
46/*
47 * LIMITATIONS:
48 * We want to support >= peta byte of backend storage, while for now still using
49 * a granularity of one bit per 4KiB of storage.
50 * 1 << 50 bytes backend storage (1 PiB)
51 * 1 << (50 - 12) bits needed
52 * 38 --> we need u64 to index and count bits
53 * 1 << (38 - 3) bitmap bytes needed
54 * 35 --> we still need u64 to index and count bytes
55 * (that's 32 GiB of bitmap for 1 PiB storage)
56 * 1 << (35 - 2) 32bit longs needed
57 * 33 --> we'd even need u64 to index and count 32bit long words.
58 * 1 << (35 - 3) 64bit longs needed
59 * 32 --> we could get away with a 32bit unsigned int to index and count
60 * 64bit long words, but I rather stay with unsigned long for now.
61 * We probably should neither count nor point to bytes or long words
62 * directly, but either by bitnumber, or by page index and offset.
63 * 1 << (35 - 12)
64 * 22 --> we need that much 4KiB pages of bitmap.
65 * 1 << (22 + 3) --> on a 64bit arch,
66 * we need 32 MiB to store the array of page pointers.
67 *
68 * Because I'm lazy, and because the resulting patch was too large, too ugly
69 * and still incomplete, on 32bit we still "only" support 16 TiB (minus some),
70 * (1 << 32) bits * 4k storage.
71 *
72
73 * bitmap storage and IO:
74 * Bitmap is stored little endian on disk, and is kept little endian in
75 * core memory. Currently we still hold the full bitmap in core as long
76 * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
77 * seems excessive.
78 *
79 * We plan to reduce the amount of in-core bitmap pages by pageing them in
80 * and out against their on-disk location as necessary, but need to make
81 * sure we don't cause too much meta data IO, and must not deadlock in
82 * tight memory situations. This needs some more work.
Philipp Reisnerb411b362009-09-25 16:07:19 -070083 */
84
85/*
86 * NOTE
87 * Access to the *bm_pages is protected by bm_lock.
88 * It is safe to read the other members within the lock.
89 *
90 * drbd_bm_set_bits is called from bio_endio callbacks,
91 * We may be called with irq already disabled,
92 * so we need spin_lock_irqsave().
93 * And we need the kmap_atomic.
94 */
95struct drbd_bitmap {
96 struct page **bm_pages;
97 spinlock_t bm_lock;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +010098
99 /* see LIMITATIONS: above */
100
Philipp Reisnerb411b362009-09-25 16:07:19 -0700101 unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */
102 unsigned long bm_bits;
103 size_t bm_words;
104 size_t bm_number_of_pages;
105 sector_t bm_dev_capacity;
Thomas Gleixner8a03ae22010-01-29 20:39:07 +0000106 struct mutex bm_change; /* serializes resize operations */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700107
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100108 wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700109
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100110 enum bm_flag bm_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700111
112 /* debugging aid, in case we are still racy somewhere */
113 char *bm_why;
114 struct task_struct *bm_task;
115};
116
Philipp Reisnerb4ee79d2010-04-01 09:57:40 +0200117static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
Philipp Reisnerfd764382010-04-01 09:57:40 +0200118 unsigned long e, int val, const enum km_type km);
119
Philipp Reisnerb411b362009-09-25 16:07:19 -0700120#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
121static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
122{
123 struct drbd_bitmap *b = mdev->bitmap;
124 if (!__ratelimit(&drbd_ratelimit_state))
125 return;
126 dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
127 current == mdev->receiver.task ? "receiver" :
128 current == mdev->asender.task ? "asender" :
129 current == mdev->worker.task ? "worker" : current->comm,
130 func, b->bm_why ?: "?",
131 b->bm_task == mdev->receiver.task ? "receiver" :
132 b->bm_task == mdev->asender.task ? "asender" :
133 b->bm_task == mdev->worker.task ? "worker" : "?");
134}
135
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100136void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700137{
138 struct drbd_bitmap *b = mdev->bitmap;
139 int trylock_failed;
140
141 if (!b) {
142 dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n");
143 return;
144 }
145
Thomas Gleixner8a03ae22010-01-29 20:39:07 +0000146 trylock_failed = !mutex_trylock(&b->bm_change);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700147
148 if (trylock_failed) {
149 dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
150 current == mdev->receiver.task ? "receiver" :
151 current == mdev->asender.task ? "asender" :
152 current == mdev->worker.task ? "worker" : current->comm,
153 why, b->bm_why ?: "?",
154 b->bm_task == mdev->receiver.task ? "receiver" :
155 b->bm_task == mdev->asender.task ? "asender" :
156 b->bm_task == mdev->worker.task ? "worker" : "?");
Thomas Gleixner8a03ae22010-01-29 20:39:07 +0000157 mutex_lock(&b->bm_change);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700158 }
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100159 if (BM_LOCKED_MASK & b->bm_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700160 dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100161 b->bm_flags |= flags & BM_LOCKED_MASK;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700162
163 b->bm_why = why;
164 b->bm_task = current;
165}
166
167void drbd_bm_unlock(struct drbd_conf *mdev)
168{
169 struct drbd_bitmap *b = mdev->bitmap;
170 if (!b) {
171 dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
172 return;
173 }
174
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100175 if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700176 dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
177
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100178 b->bm_flags &= ~BM_LOCKED_MASK;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700179 b->bm_why = NULL;
180 b->bm_task = NULL;
Thomas Gleixner8a03ae22010-01-29 20:39:07 +0000181 mutex_unlock(&b->bm_change);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700182}
183
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100184/* we store some "meta" info about our pages in page->private */
185/* at a granularity of 4k storage per bitmap bit:
186 * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks
187 * 1<<38 bits,
188 * 1<<23 4k bitmap pages.
189 * Use 24 bits as page index, covers 2 peta byte storage
190 * at a granularity of 4k per bit.
191 * Used to report the failed page idx on io error from the endio handlers.
192 */
193#define BM_PAGE_IDX_MASK ((1UL<<24)-1)
194/* this page is currently read in, or written back */
195#define BM_PAGE_IO_LOCK 31
196/* if there has been an IO error for this page */
197#define BM_PAGE_IO_ERROR 30
198/* this is to be able to intelligently skip disk IO,
199 * set if bits have been set since last IO. */
200#define BM_PAGE_NEED_WRITEOUT 29
201/* to mark for lazy writeout once syncer cleared all clearable bits,
202 * we if bits have been cleared since last IO. */
203#define BM_PAGE_LAZY_WRITEOUT 28
204
205/* store_page_idx uses non-atomic assingment. It is only used directly after
206 * allocating the page. All other bm_set_page_* and bm_clear_page_* need to
207 * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
208 * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
209 * requires it all to be atomic as well. */
210static void bm_store_page_idx(struct page *page, unsigned long idx)
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100211{
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100212 BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
213 page_private(page) |= idx;
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100214}
215
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100216static unsigned long bm_page_to_idx(struct page *page)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700217{
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100218 return page_private(page) & BM_PAGE_IDX_MASK;
219}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700220
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100221/* As is very unlikely that the same page is under IO from more than one
222 * context, we can get away with a bit per page and one wait queue per bitmap.
223 */
224static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr)
225{
226 struct drbd_bitmap *b = mdev->bitmap;
227 void *addr = &page_private(b->bm_pages[page_nr]);
228 wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
229}
230
231static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
232{
233 struct drbd_bitmap *b = mdev->bitmap;
234 void *addr = &page_private(b->bm_pages[page_nr]);
235 clear_bit(BM_PAGE_IO_LOCK, addr);
236 smp_mb__after_clear_bit();
237 wake_up(&mdev->bitmap->bm_io_wait);
238}
239
240/* set _before_ submit_io, so it may be reset due to being changed
241 * while this page is in flight... will get submitted later again */
242static void bm_set_page_unchanged(struct page *page)
243{
244 /* use cmpxchg? */
245 clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
246 clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
247}
248
249static void bm_set_page_need_writeout(struct page *page)
250{
251 set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
252}
253
254static int bm_test_page_unchanged(struct page *page)
255{
256 volatile const unsigned long *addr = &page_private(page);
257 return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
258}
259
260static void bm_set_page_io_err(struct page *page)
261{
262 set_bit(BM_PAGE_IO_ERROR, &page_private(page));
263}
264
265static void bm_clear_page_io_err(struct page *page)
266{
267 clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
268}
269
270static void bm_set_page_lazy_writeout(struct page *page)
271{
272 set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
273}
274
275static int bm_test_page_lazy_writeout(struct page *page)
276{
277 return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
278}
279
280/* on a 32bit box, this would allow for exactly (2<<38) bits. */
281static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
282{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700283 /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100284 unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700285 BUG_ON(page_nr >= b->bm_number_of_pages);
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100286 return page_nr;
287}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700288
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100289static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
290{
291 /* page_nr = (bitnr/8) >> PAGE_SHIFT; */
292 unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
293 BUG_ON(page_nr >= b->bm_number_of_pages);
294 return page_nr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700295}
296
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100297static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km)
298{
299 struct page *page = b->bm_pages[idx];
300 return (unsigned long *) kmap_atomic(page, km);
301}
302
303static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
304{
305 return __bm_map_pidx(b, idx, KM_IRQ1);
306}
307
Philipp Reisnerb411b362009-09-25 16:07:19 -0700308static void __bm_unmap(unsigned long *p_addr, const enum km_type km)
309{
310 kunmap_atomic(p_addr, km);
311};
312
313static void bm_unmap(unsigned long *p_addr)
314{
315 return __bm_unmap(p_addr, KM_IRQ1);
316}
317
318/* long word offset of _bitmap_ sector */
319#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
320/* word offset from start of bitmap to word number _in_page_
321 * modulo longs per page
322#define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
323 hm, well, Philipp thinks gcc might not optimze the % into & (... - 1)
324 so do it explicitly:
325 */
326#define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
327
328/* Long words per page */
329#define LWPP (PAGE_SIZE/sizeof(long))
330
331/*
332 * actually most functions herein should take a struct drbd_bitmap*, not a
333 * struct drbd_conf*, but for the debug macros I like to have the mdev around
334 * to be able to report device specific.
335 */
336
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100337
Philipp Reisnerb411b362009-09-25 16:07:19 -0700338static void bm_free_pages(struct page **pages, unsigned long number)
339{
340 unsigned long i;
341 if (!pages)
342 return;
343
344 for (i = 0; i < number; i++) {
345 if (!pages[i]) {
346 printk(KERN_ALERT "drbd: bm_free_pages tried to free "
347 "a NULL pointer; i=%lu n=%lu\n",
348 i, number);
349 continue;
350 }
351 __free_page(pages[i]);
352 pages[i] = NULL;
353 }
354}
355
356static void bm_vk_free(void *ptr, int v)
357{
358 if (v)
359 vfree(ptr);
360 else
361 kfree(ptr);
362}
363
364/*
365 * "have" and "want" are NUMBER OF PAGES.
366 */
367static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
368{
369 struct page **old_pages = b->bm_pages;
370 struct page **new_pages, *page;
371 unsigned int i, bytes, vmalloced = 0;
372 unsigned long have = b->bm_number_of_pages;
373
374 BUG_ON(have == 0 && old_pages != NULL);
375 BUG_ON(have != 0 && old_pages == NULL);
376
377 if (have == want)
378 return old_pages;
379
380 /* Trying kmalloc first, falling back to vmalloc.
381 * GFP_KERNEL is ok, as this is done when a lower level disk is
382 * "attached" to the drbd. Context is receiver thread or cqueue
383 * thread. As we have no disk yet, we are not in the IO path,
384 * not even the IO path of the peer. */
385 bytes = sizeof(struct page *)*want;
386 new_pages = kmalloc(bytes, GFP_KERNEL);
387 if (!new_pages) {
388 new_pages = vmalloc(bytes);
389 if (!new_pages)
390 return NULL;
391 vmalloced = 1;
392 }
393
394 memset(new_pages, 0, bytes);
395 if (want >= have) {
396 for (i = 0; i < have; i++)
397 new_pages[i] = old_pages[i];
398 for (; i < want; i++) {
399 page = alloc_page(GFP_HIGHUSER);
400 if (!page) {
401 bm_free_pages(new_pages + have, i - have);
402 bm_vk_free(new_pages, vmalloced);
403 return NULL;
404 }
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100405 /* we want to know which page it is
406 * from the endio handlers */
407 bm_store_page_idx(page, i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700408 new_pages[i] = page;
409 }
410 } else {
411 for (i = 0; i < want; i++)
412 new_pages[i] = old_pages[i];
413 /* NOT HERE, we are outside the spinlock!
414 bm_free_pages(old_pages + want, have - want);
415 */
416 }
417
418 if (vmalloced)
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100419 b->bm_flags |= BM_P_VMALLOCED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700420 else
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100421 b->bm_flags &= ~BM_P_VMALLOCED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700422
423 return new_pages;
424}
425
426/*
427 * called on driver init only. TODO call when a device is created.
428 * allocates the drbd_bitmap, and stores it in mdev->bitmap.
429 */
430int drbd_bm_init(struct drbd_conf *mdev)
431{
432 struct drbd_bitmap *b = mdev->bitmap;
433 WARN_ON(b != NULL);
434 b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
435 if (!b)
436 return -ENOMEM;
437 spin_lock_init(&b->bm_lock);
Thomas Gleixner8a03ae22010-01-29 20:39:07 +0000438 mutex_init(&b->bm_change);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700439 init_waitqueue_head(&b->bm_io_wait);
440
441 mdev->bitmap = b;
442
443 return 0;
444}
445
446sector_t drbd_bm_capacity(struct drbd_conf *mdev)
447{
448 ERR_IF(!mdev->bitmap) return 0;
449 return mdev->bitmap->bm_dev_capacity;
450}
451
452/* called on driver unload. TODO: call when a device is destroyed.
453 */
454void drbd_bm_cleanup(struct drbd_conf *mdev)
455{
456 ERR_IF (!mdev->bitmap) return;
457 bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100458 bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700459 kfree(mdev->bitmap);
460 mdev->bitmap = NULL;
461}
462
463/*
464 * since (b->bm_bits % BITS_PER_LONG) != 0,
465 * this masks out the remaining bits.
466 * Returns the number of bits cleared.
467 */
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100468#define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3))
469#define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1)
470#define BITS_PER_LONG_MASK (BITS_PER_LONG - 1)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700471static int bm_clear_surplus(struct drbd_bitmap *b)
472{
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100473 unsigned long mask;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700474 unsigned long *p_addr, *bm;
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100475 int tmp;
476 int cleared = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700477
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100478 /* number of bits modulo bits per page */
479 tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
480 /* mask the used bits of the word containing the last bit */
481 mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
482 /* bitmap is always stored little endian,
483 * on disk and in core memory alike */
484 mask = cpu_to_lel(mask);
485
Lars Ellenberg6850c442010-12-16 00:32:38 +0100486 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100487 bm = p_addr + (tmp/BITS_PER_LONG);
488 if (mask) {
489 /* If mask != 0, we are not exactly aligned, so bm now points
490 * to the long containing the last bit.
491 * If mask == 0, bm already points to the word immediately
492 * after the last (long word aligned) bit. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700493 cleared = hweight_long(*bm & ~mask);
494 *bm &= mask;
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100495 bm++;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700496 }
497
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100498 if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
499 /* on a 32bit arch, we may need to zero out
500 * a padding long to align with a 64bit remote */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700501 cleared += hweight_long(*bm);
502 *bm = 0;
503 }
504 bm_unmap(p_addr);
505 return cleared;
506}
507
508static void bm_set_surplus(struct drbd_bitmap *b)
509{
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100510 unsigned long mask;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700511 unsigned long *p_addr, *bm;
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100512 int tmp;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700513
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100514 /* number of bits modulo bits per page */
515 tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
516 /* mask the used bits of the word containing the last bit */
517 mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
518 /* bitmap is always stored little endian,
519 * on disk and in core memory alike */
520 mask = cpu_to_lel(mask);
521
Lars Ellenberg6850c442010-12-16 00:32:38 +0100522 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100523 bm = p_addr + (tmp/BITS_PER_LONG);
524 if (mask) {
525 /* If mask != 0, we are not exactly aligned, so bm now points
526 * to the long containing the last bit.
527 * If mask == 0, bm already points to the word immediately
528 * after the last (long word aligned) bit. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700529 *bm |= ~mask;
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100530 bm++;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700531 }
532
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100533 if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
534 /* on a 32bit arch, we may need to zero out
535 * a padding long to align with a 64bit remote */
536 *bm = ~0UL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700537 }
538 bm_unmap(p_addr);
539}
540
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100541/* you better not modify the bitmap while this is running,
542 * or its results will be stale */
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100543static unsigned long bm_count_bits(struct drbd_bitmap *b)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700544{
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100545 unsigned long *p_addr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700546 unsigned long bits = 0;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100547 unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
Lars Ellenberg6850c442010-12-16 00:32:38 +0100548 int idx, i, last_word;
Lars Ellenberg7777a8b2010-12-15 23:21:39 +0100549
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100550 /* all but last page */
Lars Ellenberg6850c442010-12-16 00:32:38 +0100551 for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100552 p_addr = __bm_map_pidx(b, idx, KM_USER0);
553 for (i = 0; i < LWPP; i++)
554 bits += hweight_long(p_addr[i]);
Lars Ellenberg7777a8b2010-12-15 23:21:39 +0100555 __bm_unmap(p_addr, KM_USER0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700556 cond_resched();
557 }
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100558 /* last (or only) page */
559 last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
560 p_addr = __bm_map_pidx(b, idx, KM_USER0);
561 for (i = 0; i < last_word; i++)
562 bits += hweight_long(p_addr[i]);
563 p_addr[last_word] &= cpu_to_lel(mask);
564 bits += hweight_long(p_addr[last_word]);
565 /* 32bit arch, may have an unused padding long */
566 if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
567 p_addr[last_word+1] = 0;
568 __bm_unmap(p_addr, KM_USER0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700569 return bits;
570}
571
Philipp Reisnerb411b362009-09-25 16:07:19 -0700572/* offset and len in long words.*/
573static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
574{
575 unsigned long *p_addr, *bm;
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100576 unsigned int idx;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700577 size_t do_now, end;
578
Philipp Reisnerb411b362009-09-25 16:07:19 -0700579 end = offset + len;
580
581 if (end > b->bm_words) {
582 printk(KERN_ALERT "drbd: bm_memset end > bm_words\n");
583 return;
584 }
585
586 while (offset < end) {
587 do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100588 idx = bm_word_to_page_idx(b, offset);
589 p_addr = bm_map_pidx(b, idx);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700590 bm = p_addr + MLPP(offset);
591 if (bm+do_now > p_addr + LWPP) {
592 printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
593 p_addr, bm, (int)do_now);
Lars Ellenberg84e7c0f2010-12-16 00:37:57 +0100594 } else
595 memset(bm, c, do_now * sizeof(long));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700596 bm_unmap(p_addr);
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100597 bm_set_page_need_writeout(b->bm_pages[idx]);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700598 offset += do_now;
599 }
600}
601
602/*
603 * make sure the bitmap has enough room for the attached storage,
604 * if necessary, resize.
605 * called whenever we may have changed the device size.
606 * returns -ENOMEM if we could not allocate enough memory, 0 on success.
607 * In case this is actually a resize, we copy the old bitmap into the new one.
608 * Otherwise, the bitmap is initialized to all bits set.
609 */
Philipp Reisner02d9a942010-03-24 16:23:03 +0100610int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700611{
612 struct drbd_bitmap *b = mdev->bitmap;
Lars Ellenberg6850c442010-12-16 00:32:38 +0100613 unsigned long bits, words, owords, obits;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700614 unsigned long want, have, onpages; /* number of pages */
615 struct page **npages, **opages = NULL;
616 int err = 0, growing;
617 int opages_vmalloced;
618
619 ERR_IF(!b) return -ENOMEM;
620
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100621 drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700622
623 dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
624 (unsigned long long)capacity);
625
626 if (capacity == b->bm_dev_capacity)
627 goto out;
628
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100629 opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700630
631 if (capacity == 0) {
632 spin_lock_irq(&b->bm_lock);
633 opages = b->bm_pages;
634 onpages = b->bm_number_of_pages;
635 owords = b->bm_words;
636 b->bm_pages = NULL;
637 b->bm_number_of_pages =
638 b->bm_set =
639 b->bm_bits =
640 b->bm_words =
641 b->bm_dev_capacity = 0;
642 spin_unlock_irq(&b->bm_lock);
643 bm_free_pages(opages, onpages);
644 bm_vk_free(opages, opages_vmalloced);
645 goto out;
646 }
647 bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
648
649 /* if we would use
650 words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
651 a 32bit host could present the wrong number of words
652 to a 64bit host.
653 */
654 words = ALIGN(bits, 64) >> LN2_BPL;
655
656 if (get_ldev(mdev)) {
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100657 u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700658 put_ldev(mdev);
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100659 if (bits > bits_on_disk) {
660 dev_info(DEV, "bits = %lu\n", bits);
661 dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk);
662 err = -ENOSPC;
663 goto out;
664 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700665 }
666
Lars Ellenberg6850c442010-12-16 00:32:38 +0100667 want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700668 have = b->bm_number_of_pages;
669 if (want == have) {
670 D_ASSERT(b->bm_pages != NULL);
671 npages = b->bm_pages;
672 } else {
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100673 if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700674 npages = NULL;
675 else
676 npages = bm_realloc_pages(b, want);
677 }
678
679 if (!npages) {
680 err = -ENOMEM;
681 goto out;
682 }
683
684 spin_lock_irq(&b->bm_lock);
685 opages = b->bm_pages;
686 owords = b->bm_words;
687 obits = b->bm_bits;
688
689 growing = bits > obits;
Philipp Reisner52236712010-04-28 14:46:57 +0200690 if (opages && growing && set_new_bits)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700691 bm_set_surplus(b);
692
693 b->bm_pages = npages;
694 b->bm_number_of_pages = want;
695 b->bm_bits = bits;
696 b->bm_words = words;
697 b->bm_dev_capacity = capacity;
698
699 if (growing) {
Philipp Reisner02d9a942010-03-24 16:23:03 +0100700 if (set_new_bits) {
701 bm_memset(b, owords, 0xff, words-owords);
702 b->bm_set += bits - obits;
703 } else
704 bm_memset(b, owords, 0x00, words-owords);
705
Philipp Reisnerb411b362009-09-25 16:07:19 -0700706 }
707
708 if (want < have) {
709 /* implicit: (opages != NULL) && (opages != npages) */
710 bm_free_pages(opages + want, have - want);
711 }
712
Philipp Reisnerb411b362009-09-25 16:07:19 -0700713 (void)bm_clear_surplus(b);
714
715 spin_unlock_irq(&b->bm_lock);
716 if (opages != npages)
717 bm_vk_free(opages, opages_vmalloced);
718 if (!growing)
719 b->bm_set = bm_count_bits(b);
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100720 dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700721
722 out:
723 drbd_bm_unlock(mdev);
724 return err;
725}
726
727/* inherently racy:
728 * if not protected by other means, return value may be out of date when
729 * leaving this function...
730 * we still need to lock it, since it is important that this returns
731 * bm_set == 0 precisely.
732 *
733 * maybe bm_set should be atomic_t ?
734 */
Philipp Reisner07782862010-08-31 12:00:50 +0200735unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700736{
737 struct drbd_bitmap *b = mdev->bitmap;
738 unsigned long s;
739 unsigned long flags;
740
741 ERR_IF(!b) return 0;
742 ERR_IF(!b->bm_pages) return 0;
743
744 spin_lock_irqsave(&b->bm_lock, flags);
745 s = b->bm_set;
746 spin_unlock_irqrestore(&b->bm_lock, flags);
747
748 return s;
749}
750
751unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
752{
753 unsigned long s;
754 /* if I don't have a disk, I don't know about out-of-sync status */
755 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
756 return 0;
757 s = _drbd_bm_total_weight(mdev);
758 put_ldev(mdev);
759 return s;
760}
761
762size_t drbd_bm_words(struct drbd_conf *mdev)
763{
764 struct drbd_bitmap *b = mdev->bitmap;
765 ERR_IF(!b) return 0;
766 ERR_IF(!b->bm_pages) return 0;
767
768 return b->bm_words;
769}
770
771unsigned long drbd_bm_bits(struct drbd_conf *mdev)
772{
773 struct drbd_bitmap *b = mdev->bitmap;
774 ERR_IF(!b) return 0;
775
776 return b->bm_bits;
777}
778
779/* merge number words from buffer into the bitmap starting at offset.
780 * buffer[i] is expected to be little endian unsigned long.
781 * bitmap must be locked by drbd_bm_lock.
782 * currently only used from receive_bitmap.
783 */
784void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
785 unsigned long *buffer)
786{
787 struct drbd_bitmap *b = mdev->bitmap;
788 unsigned long *p_addr, *bm;
789 unsigned long word, bits;
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100790 unsigned int idx;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700791 size_t end, do_now;
792
793 end = offset + number;
794
795 ERR_IF(!b) return;
796 ERR_IF(!b->bm_pages) return;
797 if (number == 0)
798 return;
799 WARN_ON(offset >= b->bm_words);
800 WARN_ON(end > b->bm_words);
801
802 spin_lock_irq(&b->bm_lock);
803 while (offset < end) {
804 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100805 idx = bm_word_to_page_idx(b, offset);
806 p_addr = bm_map_pidx(b, idx);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700807 bm = p_addr + MLPP(offset);
808 offset += do_now;
809 while (do_now--) {
810 bits = hweight_long(*bm);
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100811 word = *bm | *buffer++;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700812 *bm++ = word;
813 b->bm_set += hweight_long(word) - bits;
814 }
815 bm_unmap(p_addr);
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100816 bm_set_page_need_writeout(b->bm_pages[idx]);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700817 }
818 /* with 32bit <-> 64bit cross-platform connect
819 * this is only correct for current usage,
820 * where we _know_ that we are 64 bit aligned,
821 * and know that this function is used in this way, too...
822 */
823 if (end == b->bm_words)
824 b->bm_set -= bm_clear_surplus(b);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700825 spin_unlock_irq(&b->bm_lock);
826}
827
828/* copy number words from the bitmap starting at offset into the buffer.
829 * buffer[i] will be little endian unsigned long.
830 */
831void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
832 unsigned long *buffer)
833{
834 struct drbd_bitmap *b = mdev->bitmap;
835 unsigned long *p_addr, *bm;
836 size_t end, do_now;
837
838 end = offset + number;
839
840 ERR_IF(!b) return;
841 ERR_IF(!b->bm_pages) return;
842
843 spin_lock_irq(&b->bm_lock);
844 if ((offset >= b->bm_words) ||
845 (end > b->bm_words) ||
846 (number <= 0))
847 dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n",
848 (unsigned long) offset,
849 (unsigned long) number,
850 (unsigned long) b->bm_words);
851 else {
852 while (offset < end) {
853 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100854 p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700855 bm = p_addr + MLPP(offset);
856 offset += do_now;
857 while (do_now--)
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100858 *buffer++ = *bm++;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700859 bm_unmap(p_addr);
860 }
861 }
862 spin_unlock_irq(&b->bm_lock);
863}
864
865/* set all bits in the bitmap */
866void drbd_bm_set_all(struct drbd_conf *mdev)
867{
868 struct drbd_bitmap *b = mdev->bitmap;
869 ERR_IF(!b) return;
870 ERR_IF(!b->bm_pages) return;
871
872 spin_lock_irq(&b->bm_lock);
873 bm_memset(b, 0, 0xff, b->bm_words);
874 (void)bm_clear_surplus(b);
875 b->bm_set = b->bm_bits;
876 spin_unlock_irq(&b->bm_lock);
877}
878
879/* clear all bits in the bitmap */
880void drbd_bm_clear_all(struct drbd_conf *mdev)
881{
882 struct drbd_bitmap *b = mdev->bitmap;
883 ERR_IF(!b) return;
884 ERR_IF(!b->bm_pages) return;
885
886 spin_lock_irq(&b->bm_lock);
887 bm_memset(b, 0, 0, b->bm_words);
888 b->bm_set = 0;
889 spin_unlock_irq(&b->bm_lock);
890}
891
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100892struct bm_aio_ctx {
893 struct drbd_conf *mdev;
894 atomic_t in_flight;
Lars Ellenberg725a97e2010-12-19 11:29:55 +0100895 struct completion done;
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100896 unsigned flags;
897#define BM_AIO_COPY_PAGES 1
898 int error;
899};
900
901/* bv_page may be a copy, or may be the original */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700902static void bm_async_io_complete(struct bio *bio, int error)
903{
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100904 struct bm_aio_ctx *ctx = bio->bi_private;
905 struct drbd_conf *mdev = ctx->mdev;
906 struct drbd_bitmap *b = mdev->bitmap;
907 unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700908 int uptodate = bio_flagged(bio, BIO_UPTODATE);
909
910
911 /* strange behavior of some lower level drivers...
912 * fail the request by clearing the uptodate flag,
913 * but do not return any error?!
914 * do we want to WARN() on this? */
915 if (!error && !uptodate)
916 error = -EIO;
917
Lars Ellenberg7648cdf2010-12-17 23:58:41 +0100918 if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
919 !bm_test_page_unchanged(b->bm_pages[idx]))
920 dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx);
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100921
Philipp Reisnerb411b362009-09-25 16:07:19 -0700922 if (error) {
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100923 /* ctx error will hold the completed-last non-zero error code,
924 * in case error codes differ. */
925 ctx->error = error;
926 bm_set_page_io_err(b->bm_pages[idx]);
927 /* Not identical to on disk version of it.
928 * Is BM_PAGE_IO_ERROR enough? */
929 if (__ratelimit(&drbd_ratelimit_state))
930 dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n",
931 error, idx);
932 } else {
933 bm_clear_page_io_err(b->bm_pages[idx]);
934 dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700935 }
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100936
937 bm_page_unlock_io(mdev, idx);
938
939 /* FIXME give back to page pool */
940 if (ctx->flags & BM_AIO_COPY_PAGES)
941 put_page(bio->bi_io_vec[0].bv_page);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700942
943 bio_put(bio);
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100944
945 if (atomic_dec_and_test(&ctx->in_flight))
Lars Ellenberg725a97e2010-12-19 11:29:55 +0100946 complete(&ctx->done);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700947}
948
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100949static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700950{
951 /* we are process context. we always get a bio */
952 struct bio *bio = bio_alloc(GFP_KERNEL, 1);
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100953 struct drbd_conf *mdev = ctx->mdev;
954 struct drbd_bitmap *b = mdev->bitmap;
955 struct page *page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700956 unsigned int len;
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100957
Philipp Reisnerb411b362009-09-25 16:07:19 -0700958 sector_t on_disk_sector =
959 mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
960 on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
961
962 /* this might happen with very small
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100963 * flexible external meta data device,
964 * or with PAGE_SIZE > 4k */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700965 len = min_t(unsigned int, PAGE_SIZE,
966 (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
967
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100968 /* serialize IO on this page */
969 bm_page_lock_io(mdev, page_nr);
970 /* before memcpy and submit,
971 * so it can be redirtied any time */
972 bm_set_page_unchanged(b->bm_pages[page_nr]);
973
974 if (ctx->flags & BM_AIO_COPY_PAGES) {
975 /* FIXME alloc_page is good enough for now, but actually needs
976 * to use pre-allocated page pool */
977 void *src, *dest;
978 page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
979 dest = kmap_atomic(page, KM_USER0);
980 src = kmap_atomic(b->bm_pages[page_nr], KM_USER1);
981 memcpy(dest, src, PAGE_SIZE);
982 kunmap_atomic(src, KM_USER1);
983 kunmap_atomic(dest, KM_USER0);
984 bm_store_page_idx(page, page_nr);
985 } else
986 page = b->bm_pages[page_nr];
987
Philipp Reisnerb411b362009-09-25 16:07:19 -0700988 bio->bi_bdev = mdev->ldev->md_bdev;
989 bio->bi_sector = on_disk_sector;
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100990 bio_add_page(bio, page, len, 0);
991 bio->bi_private = ctx;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700992 bio->bi_end_io = bm_async_io_complete;
993
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100994 if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700995 bio->bi_rw |= rw;
996 bio_endio(bio, -EIO);
997 } else {
998 submit_bio(rw, bio);
999 }
1000}
1001
Philipp Reisnerb411b362009-09-25 16:07:19 -07001002/*
1003 * bm_rw: read/write the whole bitmap from/to its on disk location.
1004 */
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001005static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001006{
Lars Ellenberg725a97e2010-12-19 11:29:55 +01001007 struct bm_aio_ctx ctx = {
1008 .mdev = mdev,
1009 .in_flight = ATOMIC_INIT(1),
1010 .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
1011 .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0,
1012 };
Philipp Reisnerb411b362009-09-25 16:07:19 -07001013 struct drbd_bitmap *b = mdev->bitmap;
Lars Ellenberg6850c442010-12-16 00:32:38 +01001014 int num_pages, i, count = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001015 unsigned long now;
1016 char ppb[10];
1017 int err = 0;
1018
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001019 /*
1020 * We are protected against bitmap disappearing/resizing by holding an
1021 * ldev reference (caller must have called get_ldev()).
1022 * For read/write, we are protected against changes to the bitmap by
1023 * the bitmap lock (see drbd_bitmap_io).
1024 * For lazy writeout, we don't care for ongoing changes to the bitmap,
1025 * as we submit copies of pages anyways.
1026 */
1027 if (!ctx.flags)
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001028 WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001029
Lars Ellenberg6850c442010-12-16 00:32:38 +01001030 num_pages = b->bm_number_of_pages;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001031
Philipp Reisnerb411b362009-09-25 16:07:19 -07001032 now = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001033
1034 /* let the layers below us try to merge these bios... */
Lars Ellenberg6850c442010-12-16 00:32:38 +01001035 for (i = 0; i < num_pages; i++) {
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001036 /* ignore completely unchanged pages */
1037 if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
1038 break;
1039 if (rw & WRITE) {
1040 if (bm_test_page_unchanged(b->bm_pages[i])) {
1041 dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
1042 continue;
1043 }
1044 /* during lazy writeout,
1045 * ignore those pages not marked for lazy writeout. */
1046 if (lazy_writeout_upper_idx &&
1047 !bm_test_page_lazy_writeout(b->bm_pages[i])) {
1048 dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i);
1049 continue;
1050 }
1051 }
1052 atomic_inc(&ctx.in_flight);
1053 bm_page_io_async(&ctx, i, rw);
1054 ++count;
1055 cond_resched();
1056 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001057
Lars Ellenberg725a97e2010-12-19 11:29:55 +01001058 /*
1059 * We initialize ctx.in_flight to one to make sure bm_async_io_complete
1060 * will not complete() early, and decrement / test it here. If there
1061 * are still some bios in flight, we need to wait for them here.
1062 */
1063 if (!atomic_dec_and_test(&ctx.in_flight))
1064 wait_for_completion(&ctx.done);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001065 dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
1066 rw == WRITE ? "WRITE" : "READ",
1067 count, jiffies - now);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001068
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001069 if (ctx.error) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001070 dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001071 drbd_chk_io_error(mdev, 1, true);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001072 err = -EIO; /* ctx.error ? */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001073 }
1074
1075 now = jiffies;
1076 if (rw == WRITE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001077 drbd_md_flush(mdev);
1078 } else /* rw == READ */ {
Lars Ellenberg95a0f102010-12-15 08:59:09 +01001079 b->bm_set = bm_count_bits(b);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001080 dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
1081 jiffies - now);
1082 }
1083 now = b->bm_set;
1084
1085 dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
1086 ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
1087
1088 return err;
1089}
1090
1091/**
1092 * drbd_bm_read() - Read the whole bitmap from its on disk location.
1093 * @mdev: DRBD device.
1094 */
1095int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
1096{
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001097 return bm_rw(mdev, READ, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001098}
1099
1100/**
1101 * drbd_bm_write() - Write the whole bitmap to its on disk location.
1102 * @mdev: DRBD device.
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001103 *
1104 * Will only write pages that have changed since last IO.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001105 */
1106int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
1107{
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001108 return bm_rw(mdev, WRITE, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001109}
1110
1111/**
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001112 * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001113 * @mdev: DRBD device.
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001114 * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages
Philipp Reisnerb411b362009-09-25 16:07:19 -07001115 */
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001116int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001117{
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001118 return bm_rw(mdev, WRITE, upper_idx);
1119}
Philipp Reisnerb411b362009-09-25 16:07:19 -07001120
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001121
1122/**
1123 * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap
1124 * @mdev: DRBD device.
1125 * @idx: bitmap page index
1126 *
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001127 * We don't want to special case on logical_block_size of the backend device,
1128 * so we submit PAGE_SIZE aligned pieces.
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001129 * Note that on "most" systems, PAGE_SIZE is 4k.
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001130 *
1131 * In case this becomes an issue on systems with larger PAGE_SIZE,
1132 * we may want to change this again to write 4k aligned 4k pieces.
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001133 */
1134int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
1135{
Lars Ellenberg725a97e2010-12-19 11:29:55 +01001136 struct bm_aio_ctx ctx = {
1137 .mdev = mdev,
1138 .in_flight = ATOMIC_INIT(1),
1139 .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
1140 .flags = BM_AIO_COPY_PAGES,
1141 };
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001142
1143 if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
Lars Ellenberg7648cdf2010-12-17 23:58:41 +01001144 dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001145 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001146 }
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001147
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001148 bm_page_io_async(&ctx, idx, WRITE_SYNC);
Lars Ellenberg725a97e2010-12-19 11:29:55 +01001149 wait_for_completion(&ctx.done);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001150
1151 if (ctx.error)
1152 drbd_chk_io_error(mdev, 1, true);
1153 /* that should force detach, so the in memory bitmap will be
1154 * gone in a moment as well. */
1155
Philipp Reisnerb411b362009-09-25 16:07:19 -07001156 mdev->bm_writ_cnt++;
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001157 return ctx.error;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001158}
1159
1160/* NOTE
1161 * find_first_bit returns int, we return unsigned long.
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001162 * For this to work on 32bit arch with bitnumbers > (1<<32),
1163 * we'd need to return u64, and get a whole lot of other places
1164 * fixed where we still use unsigned long.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001165 *
1166 * this returns a bit number, NOT a sector!
1167 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001168static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
1169 const int find_zero_bit, const enum km_type km)
1170{
1171 struct drbd_bitmap *b = mdev->bitmap;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001172 unsigned long *p_addr;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001173 unsigned long bit_offset;
1174 unsigned i;
1175
Philipp Reisnerb411b362009-09-25 16:07:19 -07001176
1177 if (bm_fo > b->bm_bits) {
1178 dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001179 bm_fo = DRBD_END_OF_BITMAP;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001180 } else {
1181 while (bm_fo < b->bm_bits) {
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001182 /* bit offset of the first bit in the page */
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001183 bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001184 p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001185
1186 if (find_zero_bit)
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001187 i = generic_find_next_zero_le_bit(p_addr,
1188 PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001189 else
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001190 i = generic_find_next_le_bit(p_addr,
1191 PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001192
1193 __bm_unmap(p_addr, km);
1194 if (i < PAGE_SIZE*8) {
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001195 bm_fo = bit_offset + i;
1196 if (bm_fo >= b->bm_bits)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001197 break;
1198 goto found;
1199 }
1200 bm_fo = bit_offset + PAGE_SIZE*8;
1201 }
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001202 bm_fo = DRBD_END_OF_BITMAP;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001203 }
1204 found:
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001205 return bm_fo;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001206}
1207
1208static unsigned long bm_find_next(struct drbd_conf *mdev,
1209 unsigned long bm_fo, const int find_zero_bit)
1210{
1211 struct drbd_bitmap *b = mdev->bitmap;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001212 unsigned long i = DRBD_END_OF_BITMAP;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001213
1214 ERR_IF(!b) return i;
1215 ERR_IF(!b->bm_pages) return i;
1216
1217 spin_lock_irq(&b->bm_lock);
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001218 if (BM_DONT_TEST & b->bm_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001219 bm_print_lock_info(mdev);
1220
1221 i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1);
1222
1223 spin_unlock_irq(&b->bm_lock);
1224 return i;
1225}
1226
1227unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1228{
1229 return bm_find_next(mdev, bm_fo, 0);
1230}
1231
1232#if 0
1233/* not yet needed for anything. */
1234unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1235{
1236 return bm_find_next(mdev, bm_fo, 1);
1237}
1238#endif
1239
1240/* does not spin_lock_irqsave.
1241 * you must take drbd_bm_lock() first */
1242unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1243{
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001244 /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001245 return __bm_find_next(mdev, bm_fo, 0, KM_USER1);
1246}
1247
1248unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1249{
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001250 /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001251 return __bm_find_next(mdev, bm_fo, 1, KM_USER1);
1252}
1253
1254/* returns number of bits actually changed.
1255 * for val != 0, we change 0 -> 1, return code positive
1256 * for val == 0, we change 1 -> 0, return code negative
1257 * wants bitnr, not sector.
1258 * expected to be called for only a few bits (e - s about BITS_PER_LONG).
1259 * Must hold bitmap lock already. */
Philipp Reisnerb4ee79d2010-04-01 09:57:40 +02001260static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001261 unsigned long e, int val, const enum km_type km)
1262{
1263 struct drbd_bitmap *b = mdev->bitmap;
1264 unsigned long *p_addr = NULL;
1265 unsigned long bitnr;
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001266 unsigned int last_page_nr = -1U;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001267 int c = 0;
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001268 int changed_total = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001269
1270 if (e >= b->bm_bits) {
1271 dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
1272 s, e, b->bm_bits);
1273 e = b->bm_bits ? b->bm_bits -1 : 0;
1274 }
1275 for (bitnr = s; bitnr <= e; bitnr++) {
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001276 unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001277 if (page_nr != last_page_nr) {
1278 if (p_addr)
1279 __bm_unmap(p_addr, km);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001280 if (c < 0)
1281 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
1282 else if (c > 0)
1283 bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
1284 changed_total += c;
1285 c = 0;
1286 p_addr = __bm_map_pidx(b, page_nr, km);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001287 last_page_nr = page_nr;
1288 }
1289 if (val)
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001290 c += (0 == generic___test_and_set_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001291 else
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001292 c -= (0 != generic___test_and_clear_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001293 }
1294 if (p_addr)
1295 __bm_unmap(p_addr, km);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001296 if (c < 0)
1297 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
1298 else if (c > 0)
1299 bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
1300 changed_total += c;
1301 b->bm_set += changed_total;
1302 return changed_total;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001303}
1304
1305/* returns number of bits actually changed.
1306 * for val != 0, we change 0 -> 1, return code positive
1307 * for val == 0, we change 1 -> 0, return code negative
1308 * wants bitnr, not sector */
Philipp Reisnerb4ee79d2010-04-01 09:57:40 +02001309static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001310 const unsigned long e, int val)
1311{
1312 unsigned long flags;
1313 struct drbd_bitmap *b = mdev->bitmap;
1314 int c = 0;
1315
1316 ERR_IF(!b) return 1;
1317 ERR_IF(!b->bm_pages) return 0;
1318
1319 spin_lock_irqsave(&b->bm_lock, flags);
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001320 if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001321 bm_print_lock_info(mdev);
1322
1323 c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
1324
1325 spin_unlock_irqrestore(&b->bm_lock, flags);
1326 return c;
1327}
1328
1329/* returns number of bits changed 0 -> 1 */
1330int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1331{
1332 return bm_change_bits_to(mdev, s, e, 1);
1333}
1334
1335/* returns number of bits changed 1 -> 0 */
1336int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1337{
1338 return -bm_change_bits_to(mdev, s, e, 0);
1339}
1340
1341/* sets all bits in full words,
1342 * from first_word up to, but not including, last_word */
1343static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1344 int page_nr, int first_word, int last_word)
1345{
1346 int i;
1347 int bits;
1348 unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0);
1349 for (i = first_word; i < last_word; i++) {
1350 bits = hweight_long(paddr[i]);
1351 paddr[i] = ~0UL;
1352 b->bm_set += BITS_PER_LONG - bits;
1353 }
1354 kunmap_atomic(paddr, KM_USER0);
1355}
1356
1357/* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave.
1358 * You must first drbd_bm_lock().
1359 * Can be called to set the whole bitmap in one go.
1360 * Sets bits from s to e _inclusive_. */
1361void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1362{
1363 /* First set_bit from the first bit (s)
1364 * up to the next long boundary (sl),
1365 * then assign full words up to the last long boundary (el),
1366 * then set_bit up to and including the last bit (e).
1367 *
1368 * Do not use memset, because we must account for changes,
1369 * so we need to loop over the words with hweight() anyways.
1370 */
1371 unsigned long sl = ALIGN(s,BITS_PER_LONG);
1372 unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
1373 int first_page;
1374 int last_page;
1375 int page_nr;
1376 int first_word;
1377 int last_word;
1378
1379 if (e - s <= 3*BITS_PER_LONG) {
1380 /* don't bother; el and sl may even be wrong. */
1381 __bm_change_bits_to(mdev, s, e, 1, KM_USER0);
1382 return;
1383 }
1384
1385 /* difference is large enough that we can trust sl and el */
1386
1387 /* bits filling the current long */
1388 if (sl)
1389 __bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0);
1390
1391 first_page = sl >> (3 + PAGE_SHIFT);
1392 last_page = el >> (3 + PAGE_SHIFT);
1393
1394 /* MLPP: modulo longs per page */
1395 /* LWPP: long words per page */
1396 first_word = MLPP(sl >> LN2_BPL);
1397 last_word = LWPP;
1398
1399 /* first and full pages, unless first page == last page */
1400 for (page_nr = first_page; page_nr < last_page; page_nr++) {
1401 bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
1402 cond_resched();
1403 first_word = 0;
1404 }
1405
1406 /* last page (respectively only page, for first page == last page) */
1407 last_word = MLPP(el >> LN2_BPL);
1408 bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
1409
1410 /* possibly trailing bits.
1411 * example: (e & 63) == 63, el will be e+1.
1412 * if that even was the very last bit,
1413 * it would trigger an assert in __bm_change_bits_to()
1414 */
1415 if (el <= e)
1416 __bm_change_bits_to(mdev, el, e, 1, KM_USER0);
1417}
1418
1419/* returns bit state
1420 * wants bitnr, NOT sector.
1421 * inherently racy... area needs to be locked by means of {al,rs}_lru
1422 * 1 ... bit set
1423 * 0 ... bit not set
1424 * -1 ... first out of bounds access, stop testing for bits!
1425 */
1426int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
1427{
1428 unsigned long flags;
1429 struct drbd_bitmap *b = mdev->bitmap;
1430 unsigned long *p_addr;
1431 int i;
1432
1433 ERR_IF(!b) return 0;
1434 ERR_IF(!b->bm_pages) return 0;
1435
1436 spin_lock_irqsave(&b->bm_lock, flags);
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001437 if (BM_DONT_TEST & b->bm_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001438 bm_print_lock_info(mdev);
1439 if (bitnr < b->bm_bits) {
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001440 p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001441 i = generic_test_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001442 bm_unmap(p_addr);
1443 } else if (bitnr == b->bm_bits) {
1444 i = -1;
1445 } else { /* (bitnr > b->bm_bits) */
1446 dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
1447 i = 0;
1448 }
1449
1450 spin_unlock_irqrestore(&b->bm_lock, flags);
1451 return i;
1452}
1453
1454/* returns number of bits set in the range [s, e] */
1455int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1456{
1457 unsigned long flags;
1458 struct drbd_bitmap *b = mdev->bitmap;
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001459 unsigned long *p_addr = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001460 unsigned long bitnr;
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001461 unsigned int page_nr = -1U;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001462 int c = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001463
1464 /* If this is called without a bitmap, that is a bug. But just to be
1465 * robust in case we screwed up elsewhere, in that case pretend there
1466 * was one dirty bit in the requested area, so we won't try to do a
1467 * local read there (no bitmap probably implies no disk) */
1468 ERR_IF(!b) return 1;
1469 ERR_IF(!b->bm_pages) return 1;
1470
1471 spin_lock_irqsave(&b->bm_lock, flags);
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001472 if (BM_DONT_TEST & b->bm_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001473 bm_print_lock_info(mdev);
1474 for (bitnr = s; bitnr <= e; bitnr++) {
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001475 unsigned int idx = bm_bit_to_page_idx(b, bitnr);
1476 if (page_nr != idx) {
1477 page_nr = idx;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001478 if (p_addr)
1479 bm_unmap(p_addr);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001480 p_addr = bm_map_pidx(b, idx);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001481 }
1482 ERR_IF (bitnr >= b->bm_bits) {
1483 dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
1484 } else {
Lars Ellenberg95a0f102010-12-15 08:59:09 +01001485 c += (0 != generic_test_le_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001486 }
1487 }
1488 if (p_addr)
1489 bm_unmap(p_addr);
1490 spin_unlock_irqrestore(&b->bm_lock, flags);
1491 return c;
1492}
1493
1494
1495/* inherently racy...
1496 * return value may be already out-of-date when this function returns.
1497 * but the general usage is that this is only use during a cstate when bits are
1498 * only cleared, not set, and typically only care for the case when the return
1499 * value is zero, or we already "locked" this "bitmap extent" by other means.
1500 *
1501 * enr is bm-extent number, since we chose to name one sector (512 bytes)
1502 * worth of the bitmap a "bitmap extent".
1503 *
1504 * TODO
1505 * I think since we use it like a reference count, we should use the real
1506 * reference count of some bitmap extent element from some lru instead...
1507 *
1508 */
1509int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
1510{
1511 struct drbd_bitmap *b = mdev->bitmap;
1512 int count, s, e;
1513 unsigned long flags;
1514 unsigned long *p_addr, *bm;
1515
1516 ERR_IF(!b) return 0;
1517 ERR_IF(!b->bm_pages) return 0;
1518
1519 spin_lock_irqsave(&b->bm_lock, flags);
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001520 if (BM_DONT_TEST & b->bm_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001521 bm_print_lock_info(mdev);
1522
1523 s = S2W(enr);
1524 e = min((size_t)S2W(enr+1), b->bm_words);
1525 count = 0;
1526 if (s < b->bm_words) {
1527 int n = e-s;
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001528 p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001529 bm = p_addr + MLPP(s);
1530 while (n--)
1531 count += hweight_long(*bm++);
1532 bm_unmap(p_addr);
1533 } else {
1534 dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s);
1535 }
1536 spin_unlock_irqrestore(&b->bm_lock, flags);
1537 return count;
1538}
1539
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001540/* Set all bits covered by the AL-extent al_enr.
1541 * Returns number of bits changed. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001542unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
1543{
1544 struct drbd_bitmap *b = mdev->bitmap;
1545 unsigned long *p_addr, *bm;
1546 unsigned long weight;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001547 unsigned long s, e;
1548 int count, i, do_now;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001549 ERR_IF(!b) return 0;
1550 ERR_IF(!b->bm_pages) return 0;
1551
1552 spin_lock_irq(&b->bm_lock);
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001553 if (BM_DONT_SET & b->bm_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001554 bm_print_lock_info(mdev);
1555 weight = b->bm_set;
1556
1557 s = al_enr * BM_WORDS_PER_AL_EXT;
1558 e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words);
1559 /* assert that s and e are on the same page */
1560 D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3)
1561 == s >> (PAGE_SHIFT - LN2_BPL + 3));
1562 count = 0;
1563 if (s < b->bm_words) {
1564 i = do_now = e-s;
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001565 p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001566 bm = p_addr + MLPP(s);
1567 while (i--) {
1568 count += hweight_long(*bm);
1569 *bm = -1UL;
1570 bm++;
1571 }
1572 bm_unmap(p_addr);
1573 b->bm_set += do_now*BITS_PER_LONG - count;
1574 if (e == b->bm_words)
1575 b->bm_set -= bm_clear_surplus(b);
1576 } else {
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001577 dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001578 }
1579 weight = b->bm_set - weight;
1580 spin_unlock_irq(&b->bm_lock);
1581 return weight;
1582}