blob: 8d959ed6c2cceabf8b9bbba40ab9106a2045aaac [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_bitmap.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/bitops.h>
26#include <linux/vmalloc.h>
27#include <linux/string.h>
28#include <linux/drbd.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <asm/kmap_types.h>
31#include "drbd_int.h"
32
Lars Ellenberg95a0f102010-12-15 08:59:09 +010033
Philipp Reisnerb411b362009-09-25 16:07:19 -070034/* OPAQUE outside this file!
35 * interface defined in drbd_int.h
36
37 * convention:
38 * function name drbd_bm_... => used elsewhere, "public".
39 * function name bm_... => internal to implementation, "private".
40
41 * Note that since find_first_bit returns int, at the current granularity of
42 * the bitmap (4KB per byte), this implementation "only" supports up to
43 * 1<<(32+12) == 16 TB...
44 */
45
46/*
47 * NOTE
48 * Access to the *bm_pages is protected by bm_lock.
49 * It is safe to read the other members within the lock.
50 *
51 * drbd_bm_set_bits is called from bio_endio callbacks,
52 * We may be called with irq already disabled,
53 * so we need spin_lock_irqsave().
54 * And we need the kmap_atomic.
55 */
56struct drbd_bitmap {
57 struct page **bm_pages;
58 spinlock_t bm_lock;
59 /* WARNING unsigned long bm_*:
60 * 32bit number of bit offset is just enough for 512 MB bitmap.
61 * it will blow up if we make the bitmap bigger...
62 * not that it makes much sense to have a bitmap that large,
63 * rather change the granularity to 16k or 64k or something.
64 * (that implies other problems, however...)
65 */
66 unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */
67 unsigned long bm_bits;
68 size_t bm_words;
69 size_t bm_number_of_pages;
70 sector_t bm_dev_capacity;
Thomas Gleixner8a03ae22010-01-29 20:39:07 +000071 struct mutex bm_change; /* serializes resize operations */
Philipp Reisnerb411b362009-09-25 16:07:19 -070072
73 atomic_t bm_async_io;
74 wait_queue_head_t bm_io_wait;
75
76 unsigned long bm_flags;
77
78 /* debugging aid, in case we are still racy somewhere */
79 char *bm_why;
80 struct task_struct *bm_task;
81};
82
83/* definition of bits in bm_flags */
84#define BM_LOCKED 0
85#define BM_MD_IO_ERROR 1
86#define BM_P_VMALLOCED 2
87
Philipp Reisnerb4ee79d2010-04-01 09:57:40 +020088static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
Philipp Reisnerfd764382010-04-01 09:57:40 +020089 unsigned long e, int val, const enum km_type km);
90
Philipp Reisnerb411b362009-09-25 16:07:19 -070091static int bm_is_locked(struct drbd_bitmap *b)
92{
93 return test_bit(BM_LOCKED, &b->bm_flags);
94}
95
96#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
97static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
98{
99 struct drbd_bitmap *b = mdev->bitmap;
100 if (!__ratelimit(&drbd_ratelimit_state))
101 return;
102 dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
103 current == mdev->receiver.task ? "receiver" :
104 current == mdev->asender.task ? "asender" :
105 current == mdev->worker.task ? "worker" : current->comm,
106 func, b->bm_why ?: "?",
107 b->bm_task == mdev->receiver.task ? "receiver" :
108 b->bm_task == mdev->asender.task ? "asender" :
109 b->bm_task == mdev->worker.task ? "worker" : "?");
110}
111
112void drbd_bm_lock(struct drbd_conf *mdev, char *why)
113{
114 struct drbd_bitmap *b = mdev->bitmap;
115 int trylock_failed;
116
117 if (!b) {
118 dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n");
119 return;
120 }
121
Thomas Gleixner8a03ae22010-01-29 20:39:07 +0000122 trylock_failed = !mutex_trylock(&b->bm_change);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700123
124 if (trylock_failed) {
125 dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
126 current == mdev->receiver.task ? "receiver" :
127 current == mdev->asender.task ? "asender" :
128 current == mdev->worker.task ? "worker" : current->comm,
129 why, b->bm_why ?: "?",
130 b->bm_task == mdev->receiver.task ? "receiver" :
131 b->bm_task == mdev->asender.task ? "asender" :
132 b->bm_task == mdev->worker.task ? "worker" : "?");
Thomas Gleixner8a03ae22010-01-29 20:39:07 +0000133 mutex_lock(&b->bm_change);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700134 }
135 if (__test_and_set_bit(BM_LOCKED, &b->bm_flags))
136 dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
137
138 b->bm_why = why;
139 b->bm_task = current;
140}
141
142void drbd_bm_unlock(struct drbd_conf *mdev)
143{
144 struct drbd_bitmap *b = mdev->bitmap;
145 if (!b) {
146 dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
147 return;
148 }
149
150 if (!__test_and_clear_bit(BM_LOCKED, &mdev->bitmap->bm_flags))
151 dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
152
153 b->bm_why = NULL;
154 b->bm_task = NULL;
Thomas Gleixner8a03ae22010-01-29 20:39:07 +0000155 mutex_unlock(&b->bm_change);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700156}
157
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100158static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
159{
160 /* page_nr = (bitnr/8) >> PAGE_SHIFT; */
161 unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
162 BUG_ON(page_nr >= b->bm_number_of_pages);
163 return page_nr;
164}
165
Philipp Reisnerb411b362009-09-25 16:07:19 -0700166/* word offset to long pointer */
167static unsigned long *__bm_map_paddr(struct drbd_bitmap *b, unsigned long offset, const enum km_type km)
168{
169 struct page *page;
170 unsigned long page_nr;
171
172 /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
173 page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3);
174 BUG_ON(page_nr >= b->bm_number_of_pages);
175 page = b->bm_pages[page_nr];
176
177 return (unsigned long *) kmap_atomic(page, km);
178}
179
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100180static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km)
181{
182 struct page *page = b->bm_pages[idx];
183 return (unsigned long *) kmap_atomic(page, km);
184}
185
186static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
187{
188 return __bm_map_pidx(b, idx, KM_IRQ1);
189}
190
Philipp Reisnerb411b362009-09-25 16:07:19 -0700191static unsigned long * bm_map_paddr(struct drbd_bitmap *b, unsigned long offset)
192{
193 return __bm_map_paddr(b, offset, KM_IRQ1);
194}
195
196static void __bm_unmap(unsigned long *p_addr, const enum km_type km)
197{
198 kunmap_atomic(p_addr, km);
199};
200
201static void bm_unmap(unsigned long *p_addr)
202{
203 return __bm_unmap(p_addr, KM_IRQ1);
204}
205
206/* long word offset of _bitmap_ sector */
207#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
208/* word offset from start of bitmap to word number _in_page_
209 * modulo longs per page
210#define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
211 hm, well, Philipp thinks gcc might not optimze the % into & (... - 1)
212 so do it explicitly:
213 */
214#define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
215
216/* Long words per page */
217#define LWPP (PAGE_SIZE/sizeof(long))
218
219/*
220 * actually most functions herein should take a struct drbd_bitmap*, not a
221 * struct drbd_conf*, but for the debug macros I like to have the mdev around
222 * to be able to report device specific.
223 */
224
225static void bm_free_pages(struct page **pages, unsigned long number)
226{
227 unsigned long i;
228 if (!pages)
229 return;
230
231 for (i = 0; i < number; i++) {
232 if (!pages[i]) {
233 printk(KERN_ALERT "drbd: bm_free_pages tried to free "
234 "a NULL pointer; i=%lu n=%lu\n",
235 i, number);
236 continue;
237 }
238 __free_page(pages[i]);
239 pages[i] = NULL;
240 }
241}
242
243static void bm_vk_free(void *ptr, int v)
244{
245 if (v)
246 vfree(ptr);
247 else
248 kfree(ptr);
249}
250
251/*
252 * "have" and "want" are NUMBER OF PAGES.
253 */
254static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
255{
256 struct page **old_pages = b->bm_pages;
257 struct page **new_pages, *page;
258 unsigned int i, bytes, vmalloced = 0;
259 unsigned long have = b->bm_number_of_pages;
260
261 BUG_ON(have == 0 && old_pages != NULL);
262 BUG_ON(have != 0 && old_pages == NULL);
263
264 if (have == want)
265 return old_pages;
266
267 /* Trying kmalloc first, falling back to vmalloc.
268 * GFP_KERNEL is ok, as this is done when a lower level disk is
269 * "attached" to the drbd. Context is receiver thread or cqueue
270 * thread. As we have no disk yet, we are not in the IO path,
271 * not even the IO path of the peer. */
272 bytes = sizeof(struct page *)*want;
273 new_pages = kmalloc(bytes, GFP_KERNEL);
274 if (!new_pages) {
275 new_pages = vmalloc(bytes);
276 if (!new_pages)
277 return NULL;
278 vmalloced = 1;
279 }
280
281 memset(new_pages, 0, bytes);
282 if (want >= have) {
283 for (i = 0; i < have; i++)
284 new_pages[i] = old_pages[i];
285 for (; i < want; i++) {
286 page = alloc_page(GFP_HIGHUSER);
287 if (!page) {
288 bm_free_pages(new_pages + have, i - have);
289 bm_vk_free(new_pages, vmalloced);
290 return NULL;
291 }
292 new_pages[i] = page;
293 }
294 } else {
295 for (i = 0; i < want; i++)
296 new_pages[i] = old_pages[i];
297 /* NOT HERE, we are outside the spinlock!
298 bm_free_pages(old_pages + want, have - want);
299 */
300 }
301
302 if (vmalloced)
303 set_bit(BM_P_VMALLOCED, &b->bm_flags);
304 else
305 clear_bit(BM_P_VMALLOCED, &b->bm_flags);
306
307 return new_pages;
308}
309
310/*
311 * called on driver init only. TODO call when a device is created.
312 * allocates the drbd_bitmap, and stores it in mdev->bitmap.
313 */
314int drbd_bm_init(struct drbd_conf *mdev)
315{
316 struct drbd_bitmap *b = mdev->bitmap;
317 WARN_ON(b != NULL);
318 b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
319 if (!b)
320 return -ENOMEM;
321 spin_lock_init(&b->bm_lock);
Thomas Gleixner8a03ae22010-01-29 20:39:07 +0000322 mutex_init(&b->bm_change);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700323 init_waitqueue_head(&b->bm_io_wait);
324
325 mdev->bitmap = b;
326
327 return 0;
328}
329
330sector_t drbd_bm_capacity(struct drbd_conf *mdev)
331{
332 ERR_IF(!mdev->bitmap) return 0;
333 return mdev->bitmap->bm_dev_capacity;
334}
335
336/* called on driver unload. TODO: call when a device is destroyed.
337 */
338void drbd_bm_cleanup(struct drbd_conf *mdev)
339{
340 ERR_IF (!mdev->bitmap) return;
341 bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
342 bm_vk_free(mdev->bitmap->bm_pages, test_bit(BM_P_VMALLOCED, &mdev->bitmap->bm_flags));
343 kfree(mdev->bitmap);
344 mdev->bitmap = NULL;
345}
346
347/*
348 * since (b->bm_bits % BITS_PER_LONG) != 0,
349 * this masks out the remaining bits.
350 * Returns the number of bits cleared.
351 */
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100352#define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3))
353#define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1)
354#define BITS_PER_LONG_MASK (BITS_PER_LONG - 1)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700355static int bm_clear_surplus(struct drbd_bitmap *b)
356{
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100357 unsigned long mask;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700358 unsigned long *p_addr, *bm;
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100359 int tmp;
360 int cleared = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700361
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100362 /* number of bits modulo bits per page */
363 tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
364 /* mask the used bits of the word containing the last bit */
365 mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
366 /* bitmap is always stored little endian,
367 * on disk and in core memory alike */
368 mask = cpu_to_lel(mask);
369
370 /* because of the "extra long to catch oob access" we allocate in
371 * drbd_bm_resize, bm_number_of_pages -1 is not necessarily the page
372 * containing the last _relevant_ bitmap word */
373 p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, b->bm_bits - 1));
374 bm = p_addr + (tmp/BITS_PER_LONG);
375 if (mask) {
376 /* If mask != 0, we are not exactly aligned, so bm now points
377 * to the long containing the last bit.
378 * If mask == 0, bm already points to the word immediately
379 * after the last (long word aligned) bit. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700380 cleared = hweight_long(*bm & ~mask);
381 *bm &= mask;
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100382 bm++;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700383 }
384
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100385 if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
386 /* on a 32bit arch, we may need to zero out
387 * a padding long to align with a 64bit remote */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700388 cleared += hweight_long(*bm);
389 *bm = 0;
390 }
391 bm_unmap(p_addr);
392 return cleared;
393}
394
395static void bm_set_surplus(struct drbd_bitmap *b)
396{
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100397 unsigned long mask;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700398 unsigned long *p_addr, *bm;
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100399 int tmp;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700400
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100401 /* number of bits modulo bits per page */
402 tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
403 /* mask the used bits of the word containing the last bit */
404 mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
405 /* bitmap is always stored little endian,
406 * on disk and in core memory alike */
407 mask = cpu_to_lel(mask);
408
409 /* because of the "extra long to catch oob access" we allocate in
410 * drbd_bm_resize, bm_number_of_pages -1 is not necessarily the page
411 * containing the last _relevant_ bitmap word */
412 p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, b->bm_bits - 1));
413 bm = p_addr + (tmp/BITS_PER_LONG);
414 if (mask) {
415 /* If mask != 0, we are not exactly aligned, so bm now points
416 * to the long containing the last bit.
417 * If mask == 0, bm already points to the word immediately
418 * after the last (long word aligned) bit. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700419 *bm |= ~mask;
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100420 bm++;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700421 }
422
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100423 if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
424 /* on a 32bit arch, we may need to zero out
425 * a padding long to align with a 64bit remote */
426 *bm = ~0UL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700427 }
428 bm_unmap(p_addr);
429}
430
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100431static unsigned long bm_count_bits(struct drbd_bitmap *b)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700432{
433 unsigned long *p_addr, *bm, offset = 0;
434 unsigned long bits = 0;
435 unsigned long i, do_now;
Lars Ellenberg7777a8b2010-12-15 23:21:39 +0100436 unsigned long words;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700437
Lars Ellenberg7777a8b2010-12-15 23:21:39 +0100438 /* due to 64bit alignment, the last long on a 32bit arch
439 * may be not used at all. The last used long will likely
440 * be only partially used, always. Don't count those bits,
441 * but mask them out. */
442 words = (b->bm_bits + BITS_PER_LONG - 1) >> LN2_BPL;
443
444 while (offset < words) {
445 i = do_now = min_t(size_t, words-offset, LWPP);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700446 p_addr = __bm_map_paddr(b, offset, KM_USER0);
447 bm = p_addr + MLPP(offset);
448 while (i--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700449 bits += hweight_long(*bm++);
450 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700451 offset += do_now;
Lars Ellenberg7777a8b2010-12-15 23:21:39 +0100452 if (offset == words) {
453 /* last word may only be partially used,
454 * see also bm_clear_surplus. */
455 i = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) -1;
456 if (i) {
457 bits -= hweight_long(p_addr[do_now-1] & ~i);
458 p_addr[do_now-1] &= i;
459 }
460 /* 32bit arch, may have an unused padding long */
461 if (words != b->bm_words)
462 p_addr[do_now] = 0;
463 }
464 __bm_unmap(p_addr, KM_USER0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700465 cond_resched();
466 }
467
468 return bits;
469}
470
Philipp Reisnerb411b362009-09-25 16:07:19 -0700471/* offset and len in long words.*/
472static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
473{
474 unsigned long *p_addr, *bm;
475 size_t do_now, end;
476
477#define BM_SECTORS_PER_BIT (BM_BLOCK_SIZE/512)
478
479 end = offset + len;
480
481 if (end > b->bm_words) {
482 printk(KERN_ALERT "drbd: bm_memset end > bm_words\n");
483 return;
484 }
485
486 while (offset < end) {
487 do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
488 p_addr = bm_map_paddr(b, offset);
489 bm = p_addr + MLPP(offset);
490 if (bm+do_now > p_addr + LWPP) {
491 printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
492 p_addr, bm, (int)do_now);
493 break; /* breaks to after catch_oob_access_end() only! */
494 }
495 memset(bm, c, do_now * sizeof(long));
496 bm_unmap(p_addr);
497 offset += do_now;
498 }
499}
500
501/*
502 * make sure the bitmap has enough room for the attached storage,
503 * if necessary, resize.
504 * called whenever we may have changed the device size.
505 * returns -ENOMEM if we could not allocate enough memory, 0 on success.
506 * In case this is actually a resize, we copy the old bitmap into the new one.
507 * Otherwise, the bitmap is initialized to all bits set.
508 */
Philipp Reisner02d9a942010-03-24 16:23:03 +0100509int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700510{
511 struct drbd_bitmap *b = mdev->bitmap;
512 unsigned long bits, words, owords, obits, *p_addr, *bm;
513 unsigned long want, have, onpages; /* number of pages */
514 struct page **npages, **opages = NULL;
515 int err = 0, growing;
516 int opages_vmalloced;
517
518 ERR_IF(!b) return -ENOMEM;
519
520 drbd_bm_lock(mdev, "resize");
521
522 dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
523 (unsigned long long)capacity);
524
525 if (capacity == b->bm_dev_capacity)
526 goto out;
527
528 opages_vmalloced = test_bit(BM_P_VMALLOCED, &b->bm_flags);
529
530 if (capacity == 0) {
531 spin_lock_irq(&b->bm_lock);
532 opages = b->bm_pages;
533 onpages = b->bm_number_of_pages;
534 owords = b->bm_words;
535 b->bm_pages = NULL;
536 b->bm_number_of_pages =
537 b->bm_set =
538 b->bm_bits =
539 b->bm_words =
540 b->bm_dev_capacity = 0;
541 spin_unlock_irq(&b->bm_lock);
542 bm_free_pages(opages, onpages);
543 bm_vk_free(opages, opages_vmalloced);
544 goto out;
545 }
546 bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
547
548 /* if we would use
549 words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
550 a 32bit host could present the wrong number of words
551 to a 64bit host.
552 */
553 words = ALIGN(bits, 64) >> LN2_BPL;
554
555 if (get_ldev(mdev)) {
556 D_ASSERT((u64)bits <= (((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12));
557 put_ldev(mdev);
558 }
559
560 /* one extra long to catch off by one errors */
561 want = ALIGN((words+1)*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
562 have = b->bm_number_of_pages;
563 if (want == have) {
564 D_ASSERT(b->bm_pages != NULL);
565 npages = b->bm_pages;
566 } else {
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100567 if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700568 npages = NULL;
569 else
570 npages = bm_realloc_pages(b, want);
571 }
572
573 if (!npages) {
574 err = -ENOMEM;
575 goto out;
576 }
577
578 spin_lock_irq(&b->bm_lock);
579 opages = b->bm_pages;
580 owords = b->bm_words;
581 obits = b->bm_bits;
582
583 growing = bits > obits;
Philipp Reisner52236712010-04-28 14:46:57 +0200584 if (opages && growing && set_new_bits)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700585 bm_set_surplus(b);
586
587 b->bm_pages = npages;
588 b->bm_number_of_pages = want;
589 b->bm_bits = bits;
590 b->bm_words = words;
591 b->bm_dev_capacity = capacity;
592
593 if (growing) {
Philipp Reisner02d9a942010-03-24 16:23:03 +0100594 if (set_new_bits) {
595 bm_memset(b, owords, 0xff, words-owords);
596 b->bm_set += bits - obits;
597 } else
598 bm_memset(b, owords, 0x00, words-owords);
599
Philipp Reisnerb411b362009-09-25 16:07:19 -0700600 }
601
602 if (want < have) {
603 /* implicit: (opages != NULL) && (opages != npages) */
604 bm_free_pages(opages + want, have - want);
605 }
606
607 p_addr = bm_map_paddr(b, words);
608 bm = p_addr + MLPP(words);
609 *bm = DRBD_MAGIC;
610 bm_unmap(p_addr);
611
612 (void)bm_clear_surplus(b);
613
614 spin_unlock_irq(&b->bm_lock);
615 if (opages != npages)
616 bm_vk_free(opages, opages_vmalloced);
617 if (!growing)
618 b->bm_set = bm_count_bits(b);
619 dev_info(DEV, "resync bitmap: bits=%lu words=%lu\n", bits, words);
620
621 out:
622 drbd_bm_unlock(mdev);
623 return err;
624}
625
626/* inherently racy:
627 * if not protected by other means, return value may be out of date when
628 * leaving this function...
629 * we still need to lock it, since it is important that this returns
630 * bm_set == 0 precisely.
631 *
632 * maybe bm_set should be atomic_t ?
633 */
Philipp Reisner07782862010-08-31 12:00:50 +0200634unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700635{
636 struct drbd_bitmap *b = mdev->bitmap;
637 unsigned long s;
638 unsigned long flags;
639
640 ERR_IF(!b) return 0;
641 ERR_IF(!b->bm_pages) return 0;
642
643 spin_lock_irqsave(&b->bm_lock, flags);
644 s = b->bm_set;
645 spin_unlock_irqrestore(&b->bm_lock, flags);
646
647 return s;
648}
649
650unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
651{
652 unsigned long s;
653 /* if I don't have a disk, I don't know about out-of-sync status */
654 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
655 return 0;
656 s = _drbd_bm_total_weight(mdev);
657 put_ldev(mdev);
658 return s;
659}
660
661size_t drbd_bm_words(struct drbd_conf *mdev)
662{
663 struct drbd_bitmap *b = mdev->bitmap;
664 ERR_IF(!b) return 0;
665 ERR_IF(!b->bm_pages) return 0;
666
667 return b->bm_words;
668}
669
670unsigned long drbd_bm_bits(struct drbd_conf *mdev)
671{
672 struct drbd_bitmap *b = mdev->bitmap;
673 ERR_IF(!b) return 0;
674
675 return b->bm_bits;
676}
677
678/* merge number words from buffer into the bitmap starting at offset.
679 * buffer[i] is expected to be little endian unsigned long.
680 * bitmap must be locked by drbd_bm_lock.
681 * currently only used from receive_bitmap.
682 */
683void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
684 unsigned long *buffer)
685{
686 struct drbd_bitmap *b = mdev->bitmap;
687 unsigned long *p_addr, *bm;
688 unsigned long word, bits;
689 size_t end, do_now;
690
691 end = offset + number;
692
693 ERR_IF(!b) return;
694 ERR_IF(!b->bm_pages) return;
695 if (number == 0)
696 return;
697 WARN_ON(offset >= b->bm_words);
698 WARN_ON(end > b->bm_words);
699
700 spin_lock_irq(&b->bm_lock);
701 while (offset < end) {
702 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
703 p_addr = bm_map_paddr(b, offset);
704 bm = p_addr + MLPP(offset);
705 offset += do_now;
706 while (do_now--) {
707 bits = hweight_long(*bm);
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100708 word = *bm | *buffer++;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700709 *bm++ = word;
710 b->bm_set += hweight_long(word) - bits;
711 }
712 bm_unmap(p_addr);
713 }
714 /* with 32bit <-> 64bit cross-platform connect
715 * this is only correct for current usage,
716 * where we _know_ that we are 64 bit aligned,
717 * and know that this function is used in this way, too...
718 */
719 if (end == b->bm_words)
720 b->bm_set -= bm_clear_surplus(b);
721
722 spin_unlock_irq(&b->bm_lock);
723}
724
725/* copy number words from the bitmap starting at offset into the buffer.
726 * buffer[i] will be little endian unsigned long.
727 */
728void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
729 unsigned long *buffer)
730{
731 struct drbd_bitmap *b = mdev->bitmap;
732 unsigned long *p_addr, *bm;
733 size_t end, do_now;
734
735 end = offset + number;
736
737 ERR_IF(!b) return;
738 ERR_IF(!b->bm_pages) return;
739
740 spin_lock_irq(&b->bm_lock);
741 if ((offset >= b->bm_words) ||
742 (end > b->bm_words) ||
743 (number <= 0))
744 dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n",
745 (unsigned long) offset,
746 (unsigned long) number,
747 (unsigned long) b->bm_words);
748 else {
749 while (offset < end) {
750 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
751 p_addr = bm_map_paddr(b, offset);
752 bm = p_addr + MLPP(offset);
753 offset += do_now;
754 while (do_now--)
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100755 *buffer++ = *bm++;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700756 bm_unmap(p_addr);
757 }
758 }
759 spin_unlock_irq(&b->bm_lock);
760}
761
762/* set all bits in the bitmap */
763void drbd_bm_set_all(struct drbd_conf *mdev)
764{
765 struct drbd_bitmap *b = mdev->bitmap;
766 ERR_IF(!b) return;
767 ERR_IF(!b->bm_pages) return;
768
769 spin_lock_irq(&b->bm_lock);
770 bm_memset(b, 0, 0xff, b->bm_words);
771 (void)bm_clear_surplus(b);
772 b->bm_set = b->bm_bits;
773 spin_unlock_irq(&b->bm_lock);
774}
775
776/* clear all bits in the bitmap */
777void drbd_bm_clear_all(struct drbd_conf *mdev)
778{
779 struct drbd_bitmap *b = mdev->bitmap;
780 ERR_IF(!b) return;
781 ERR_IF(!b->bm_pages) return;
782
783 spin_lock_irq(&b->bm_lock);
784 bm_memset(b, 0, 0, b->bm_words);
785 b->bm_set = 0;
786 spin_unlock_irq(&b->bm_lock);
787}
788
789static void bm_async_io_complete(struct bio *bio, int error)
790{
791 struct drbd_bitmap *b = bio->bi_private;
792 int uptodate = bio_flagged(bio, BIO_UPTODATE);
793
794
795 /* strange behavior of some lower level drivers...
796 * fail the request by clearing the uptodate flag,
797 * but do not return any error?!
798 * do we want to WARN() on this? */
799 if (!error && !uptodate)
800 error = -EIO;
801
802 if (error) {
803 /* doh. what now?
804 * for now, set all bits, and flag MD_IO_ERROR */
805 __set_bit(BM_MD_IO_ERROR, &b->bm_flags);
806 }
807 if (atomic_dec_and_test(&b->bm_async_io))
808 wake_up(&b->bm_io_wait);
809
810 bio_put(bio);
811}
812
813static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int page_nr, int rw) __must_hold(local)
814{
815 /* we are process context. we always get a bio */
816 struct bio *bio = bio_alloc(GFP_KERNEL, 1);
817 unsigned int len;
818 sector_t on_disk_sector =
819 mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
820 on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
821
822 /* this might happen with very small
823 * flexible external meta data device */
824 len = min_t(unsigned int, PAGE_SIZE,
825 (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
826
827 bio->bi_bdev = mdev->ldev->md_bdev;
828 bio->bi_sector = on_disk_sector;
829 bio_add_page(bio, b->bm_pages[page_nr], len, 0);
830 bio->bi_private = b;
831 bio->bi_end_io = bm_async_io_complete;
832
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100833 if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700834 bio->bi_rw |= rw;
835 bio_endio(bio, -EIO);
836 } else {
837 submit_bio(rw, bio);
838 }
839}
840
Philipp Reisnerb411b362009-09-25 16:07:19 -0700841/*
842 * bm_rw: read/write the whole bitmap from/to its on disk location.
843 */
844static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
845{
846 struct drbd_bitmap *b = mdev->bitmap;
847 /* sector_t sector; */
848 int bm_words, num_pages, i;
849 unsigned long now;
850 char ppb[10];
851 int err = 0;
852
853 WARN_ON(!bm_is_locked(b));
854
855 /* no spinlock here, the drbd_bm_lock should be enough! */
856
857 bm_words = drbd_bm_words(mdev);
858 num_pages = (bm_words*sizeof(long) + PAGE_SIZE-1) >> PAGE_SHIFT;
859
Philipp Reisnerb411b362009-09-25 16:07:19 -0700860 now = jiffies;
861 atomic_set(&b->bm_async_io, num_pages);
862 __clear_bit(BM_MD_IO_ERROR, &b->bm_flags);
863
864 /* let the layers below us try to merge these bios... */
865 for (i = 0; i < num_pages; i++)
866 bm_page_io_async(mdev, b, i, rw);
867
Philipp Reisnerb411b362009-09-25 16:07:19 -0700868 wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
869
870 if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
871 dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100872 drbd_chk_io_error(mdev, 1, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700873 err = -EIO;
874 }
875
876 now = jiffies;
877 if (rw == WRITE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700878 drbd_md_flush(mdev);
879 } else /* rw == READ */ {
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100880 b->bm_set = bm_count_bits(b);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700881 dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
882 jiffies - now);
883 }
884 now = b->bm_set;
885
886 dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
887 ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
888
889 return err;
890}
891
892/**
893 * drbd_bm_read() - Read the whole bitmap from its on disk location.
894 * @mdev: DRBD device.
895 */
896int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
897{
898 return bm_rw(mdev, READ);
899}
900
901/**
902 * drbd_bm_write() - Write the whole bitmap to its on disk location.
903 * @mdev: DRBD device.
904 */
905int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
906{
907 return bm_rw(mdev, WRITE);
908}
909
910/**
911 * drbd_bm_write_sect: Writes a 512 (MD_SECTOR_SIZE) byte piece of the bitmap
912 * @mdev: DRBD device.
913 * @enr: Extent number in the resync lru (happens to be sector offset)
914 *
915 * The BM_EXT_SIZE is on purpose exactly the amount of the bitmap covered
916 * by a single sector write. Therefore enr == sector offset from the
917 * start of the bitmap.
918 */
919int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local)
920{
921 sector_t on_disk_sector = enr + mdev->ldev->md.md_offset
922 + mdev->ldev->md.bm_offset;
923 int bm_words, num_words, offset;
924 int err = 0;
925
926 mutex_lock(&mdev->md_io_mutex);
927 bm_words = drbd_bm_words(mdev);
928 offset = S2W(enr); /* word offset into bitmap */
929 num_words = min(S2W(1), bm_words - offset);
930 if (num_words < S2W(1))
931 memset(page_address(mdev->md_io_page), 0, MD_SECTOR_SIZE);
932 drbd_bm_get_lel(mdev, offset, num_words,
933 page_address(mdev->md_io_page));
934 if (!drbd_md_sync_page_io(mdev, mdev->ldev, on_disk_sector, WRITE)) {
935 int i;
936 err = -EIO;
937 dev_err(DEV, "IO ERROR writing bitmap sector %lu "
938 "(meta-disk sector %llus)\n",
939 enr, (unsigned long long)on_disk_sector);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100940 drbd_chk_io_error(mdev, 1, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700941 for (i = 0; i < AL_EXT_PER_BM_SECT; i++)
942 drbd_bm_ALe_set_all(mdev, enr*AL_EXT_PER_BM_SECT+i);
943 }
944 mdev->bm_writ_cnt++;
945 mutex_unlock(&mdev->md_io_mutex);
946 return err;
947}
948
949/* NOTE
950 * find_first_bit returns int, we return unsigned long.
951 * should not make much difference anyways, but ...
952 *
953 * this returns a bit number, NOT a sector!
954 */
955#define BPP_MASK ((1UL << (PAGE_SHIFT+3)) - 1)
956static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
957 const int find_zero_bit, const enum km_type km)
958{
959 struct drbd_bitmap *b = mdev->bitmap;
960 unsigned long i = -1UL;
961 unsigned long *p_addr;
962 unsigned long bit_offset; /* bit offset of the mapped page. */
963
964 if (bm_fo > b->bm_bits) {
965 dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
966 } else {
967 while (bm_fo < b->bm_bits) {
968 unsigned long offset;
969 bit_offset = bm_fo & ~BPP_MASK; /* bit offset of the page */
970 offset = bit_offset >> LN2_BPL; /* word offset of the page */
971 p_addr = __bm_map_paddr(b, offset, km);
972
973 if (find_zero_bit)
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100974 i = generic_find_next_zero_le_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700975 else
Lars Ellenberg95a0f102010-12-15 08:59:09 +0100976 i = generic_find_next_le_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700977
978 __bm_unmap(p_addr, km);
979 if (i < PAGE_SIZE*8) {
980 i = bit_offset + i;
981 if (i >= b->bm_bits)
982 break;
983 goto found;
984 }
985 bm_fo = bit_offset + PAGE_SIZE*8;
986 }
987 i = -1UL;
988 }
989 found:
990 return i;
991}
992
993static unsigned long bm_find_next(struct drbd_conf *mdev,
994 unsigned long bm_fo, const int find_zero_bit)
995{
996 struct drbd_bitmap *b = mdev->bitmap;
997 unsigned long i = -1UL;
998
999 ERR_IF(!b) return i;
1000 ERR_IF(!b->bm_pages) return i;
1001
1002 spin_lock_irq(&b->bm_lock);
1003 if (bm_is_locked(b))
1004 bm_print_lock_info(mdev);
1005
1006 i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1);
1007
1008 spin_unlock_irq(&b->bm_lock);
1009 return i;
1010}
1011
1012unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1013{
1014 return bm_find_next(mdev, bm_fo, 0);
1015}
1016
1017#if 0
1018/* not yet needed for anything. */
1019unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1020{
1021 return bm_find_next(mdev, bm_fo, 1);
1022}
1023#endif
1024
1025/* does not spin_lock_irqsave.
1026 * you must take drbd_bm_lock() first */
1027unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1028{
1029 /* WARN_ON(!bm_is_locked(mdev)); */
1030 return __bm_find_next(mdev, bm_fo, 0, KM_USER1);
1031}
1032
1033unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1034{
1035 /* WARN_ON(!bm_is_locked(mdev)); */
1036 return __bm_find_next(mdev, bm_fo, 1, KM_USER1);
1037}
1038
1039/* returns number of bits actually changed.
1040 * for val != 0, we change 0 -> 1, return code positive
1041 * for val == 0, we change 1 -> 0, return code negative
1042 * wants bitnr, not sector.
1043 * expected to be called for only a few bits (e - s about BITS_PER_LONG).
1044 * Must hold bitmap lock already. */
Philipp Reisnerb4ee79d2010-04-01 09:57:40 +02001045static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001046 unsigned long e, int val, const enum km_type km)
1047{
1048 struct drbd_bitmap *b = mdev->bitmap;
1049 unsigned long *p_addr = NULL;
1050 unsigned long bitnr;
1051 unsigned long last_page_nr = -1UL;
1052 int c = 0;
1053
1054 if (e >= b->bm_bits) {
1055 dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
1056 s, e, b->bm_bits);
1057 e = b->bm_bits ? b->bm_bits -1 : 0;
1058 }
1059 for (bitnr = s; bitnr <= e; bitnr++) {
1060 unsigned long offset = bitnr>>LN2_BPL;
1061 unsigned long page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3);
1062 if (page_nr != last_page_nr) {
1063 if (p_addr)
1064 __bm_unmap(p_addr, km);
1065 p_addr = __bm_map_paddr(b, offset, km);
1066 last_page_nr = page_nr;
1067 }
1068 if (val)
Lars Ellenberg95a0f102010-12-15 08:59:09 +01001069 c += (0 == generic___test_and_set_le_bit(bitnr & BPP_MASK, p_addr));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001070 else
Lars Ellenberg95a0f102010-12-15 08:59:09 +01001071 c -= (0 != generic___test_and_clear_le_bit(bitnr & BPP_MASK, p_addr));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001072 }
1073 if (p_addr)
1074 __bm_unmap(p_addr, km);
1075 b->bm_set += c;
1076 return c;
1077}
1078
1079/* returns number of bits actually changed.
1080 * for val != 0, we change 0 -> 1, return code positive
1081 * for val == 0, we change 1 -> 0, return code negative
1082 * wants bitnr, not sector */
Philipp Reisnerb4ee79d2010-04-01 09:57:40 +02001083static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001084 const unsigned long e, int val)
1085{
1086 unsigned long flags;
1087 struct drbd_bitmap *b = mdev->bitmap;
1088 int c = 0;
1089
1090 ERR_IF(!b) return 1;
1091 ERR_IF(!b->bm_pages) return 0;
1092
1093 spin_lock_irqsave(&b->bm_lock, flags);
1094 if (bm_is_locked(b))
1095 bm_print_lock_info(mdev);
1096
1097 c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
1098
1099 spin_unlock_irqrestore(&b->bm_lock, flags);
1100 return c;
1101}
1102
1103/* returns number of bits changed 0 -> 1 */
1104int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1105{
1106 return bm_change_bits_to(mdev, s, e, 1);
1107}
1108
1109/* returns number of bits changed 1 -> 0 */
1110int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1111{
1112 return -bm_change_bits_to(mdev, s, e, 0);
1113}
1114
1115/* sets all bits in full words,
1116 * from first_word up to, but not including, last_word */
1117static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1118 int page_nr, int first_word, int last_word)
1119{
1120 int i;
1121 int bits;
1122 unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0);
1123 for (i = first_word; i < last_word; i++) {
1124 bits = hweight_long(paddr[i]);
1125 paddr[i] = ~0UL;
1126 b->bm_set += BITS_PER_LONG - bits;
1127 }
1128 kunmap_atomic(paddr, KM_USER0);
1129}
1130
1131/* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave.
1132 * You must first drbd_bm_lock().
1133 * Can be called to set the whole bitmap in one go.
1134 * Sets bits from s to e _inclusive_. */
1135void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1136{
1137 /* First set_bit from the first bit (s)
1138 * up to the next long boundary (sl),
1139 * then assign full words up to the last long boundary (el),
1140 * then set_bit up to and including the last bit (e).
1141 *
1142 * Do not use memset, because we must account for changes,
1143 * so we need to loop over the words with hweight() anyways.
1144 */
1145 unsigned long sl = ALIGN(s,BITS_PER_LONG);
1146 unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
1147 int first_page;
1148 int last_page;
1149 int page_nr;
1150 int first_word;
1151 int last_word;
1152
1153 if (e - s <= 3*BITS_PER_LONG) {
1154 /* don't bother; el and sl may even be wrong. */
1155 __bm_change_bits_to(mdev, s, e, 1, KM_USER0);
1156 return;
1157 }
1158
1159 /* difference is large enough that we can trust sl and el */
1160
1161 /* bits filling the current long */
1162 if (sl)
1163 __bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0);
1164
1165 first_page = sl >> (3 + PAGE_SHIFT);
1166 last_page = el >> (3 + PAGE_SHIFT);
1167
1168 /* MLPP: modulo longs per page */
1169 /* LWPP: long words per page */
1170 first_word = MLPP(sl >> LN2_BPL);
1171 last_word = LWPP;
1172
1173 /* first and full pages, unless first page == last page */
1174 for (page_nr = first_page; page_nr < last_page; page_nr++) {
1175 bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
1176 cond_resched();
1177 first_word = 0;
1178 }
1179
1180 /* last page (respectively only page, for first page == last page) */
1181 last_word = MLPP(el >> LN2_BPL);
1182 bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
1183
1184 /* possibly trailing bits.
1185 * example: (e & 63) == 63, el will be e+1.
1186 * if that even was the very last bit,
1187 * it would trigger an assert in __bm_change_bits_to()
1188 */
1189 if (el <= e)
1190 __bm_change_bits_to(mdev, el, e, 1, KM_USER0);
1191}
1192
1193/* returns bit state
1194 * wants bitnr, NOT sector.
1195 * inherently racy... area needs to be locked by means of {al,rs}_lru
1196 * 1 ... bit set
1197 * 0 ... bit not set
1198 * -1 ... first out of bounds access, stop testing for bits!
1199 */
1200int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
1201{
1202 unsigned long flags;
1203 struct drbd_bitmap *b = mdev->bitmap;
1204 unsigned long *p_addr;
1205 int i;
1206
1207 ERR_IF(!b) return 0;
1208 ERR_IF(!b->bm_pages) return 0;
1209
1210 spin_lock_irqsave(&b->bm_lock, flags);
1211 if (bm_is_locked(b))
1212 bm_print_lock_info(mdev);
1213 if (bitnr < b->bm_bits) {
1214 unsigned long offset = bitnr>>LN2_BPL;
1215 p_addr = bm_map_paddr(b, offset);
Lars Ellenberg95a0f102010-12-15 08:59:09 +01001216 i = generic_test_le_bit(bitnr & BPP_MASK, p_addr) ? 1 : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001217 bm_unmap(p_addr);
1218 } else if (bitnr == b->bm_bits) {
1219 i = -1;
1220 } else { /* (bitnr > b->bm_bits) */
1221 dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
1222 i = 0;
1223 }
1224
1225 spin_unlock_irqrestore(&b->bm_lock, flags);
1226 return i;
1227}
1228
1229/* returns number of bits set in the range [s, e] */
1230int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1231{
1232 unsigned long flags;
1233 struct drbd_bitmap *b = mdev->bitmap;
1234 unsigned long *p_addr = NULL, page_nr = -1;
1235 unsigned long bitnr;
1236 int c = 0;
1237 size_t w;
1238
1239 /* If this is called without a bitmap, that is a bug. But just to be
1240 * robust in case we screwed up elsewhere, in that case pretend there
1241 * was one dirty bit in the requested area, so we won't try to do a
1242 * local read there (no bitmap probably implies no disk) */
1243 ERR_IF(!b) return 1;
1244 ERR_IF(!b->bm_pages) return 1;
1245
1246 spin_lock_irqsave(&b->bm_lock, flags);
1247 if (bm_is_locked(b))
1248 bm_print_lock_info(mdev);
1249 for (bitnr = s; bitnr <= e; bitnr++) {
1250 w = bitnr >> LN2_BPL;
1251 if (page_nr != w >> (PAGE_SHIFT - LN2_BPL + 3)) {
1252 page_nr = w >> (PAGE_SHIFT - LN2_BPL + 3);
1253 if (p_addr)
1254 bm_unmap(p_addr);
1255 p_addr = bm_map_paddr(b, w);
1256 }
1257 ERR_IF (bitnr >= b->bm_bits) {
1258 dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
1259 } else {
Lars Ellenberg95a0f102010-12-15 08:59:09 +01001260 c += (0 != generic_test_le_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001261 }
1262 }
1263 if (p_addr)
1264 bm_unmap(p_addr);
1265 spin_unlock_irqrestore(&b->bm_lock, flags);
1266 return c;
1267}
1268
1269
1270/* inherently racy...
1271 * return value may be already out-of-date when this function returns.
1272 * but the general usage is that this is only use during a cstate when bits are
1273 * only cleared, not set, and typically only care for the case when the return
1274 * value is zero, or we already "locked" this "bitmap extent" by other means.
1275 *
1276 * enr is bm-extent number, since we chose to name one sector (512 bytes)
1277 * worth of the bitmap a "bitmap extent".
1278 *
1279 * TODO
1280 * I think since we use it like a reference count, we should use the real
1281 * reference count of some bitmap extent element from some lru instead...
1282 *
1283 */
1284int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
1285{
1286 struct drbd_bitmap *b = mdev->bitmap;
1287 int count, s, e;
1288 unsigned long flags;
1289 unsigned long *p_addr, *bm;
1290
1291 ERR_IF(!b) return 0;
1292 ERR_IF(!b->bm_pages) return 0;
1293
1294 spin_lock_irqsave(&b->bm_lock, flags);
1295 if (bm_is_locked(b))
1296 bm_print_lock_info(mdev);
1297
1298 s = S2W(enr);
1299 e = min((size_t)S2W(enr+1), b->bm_words);
1300 count = 0;
1301 if (s < b->bm_words) {
1302 int n = e-s;
1303 p_addr = bm_map_paddr(b, s);
1304 bm = p_addr + MLPP(s);
1305 while (n--)
1306 count += hweight_long(*bm++);
1307 bm_unmap(p_addr);
1308 } else {
1309 dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s);
1310 }
1311 spin_unlock_irqrestore(&b->bm_lock, flags);
1312 return count;
1313}
1314
1315/* set all bits covered by the AL-extent al_enr */
1316unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
1317{
1318 struct drbd_bitmap *b = mdev->bitmap;
1319 unsigned long *p_addr, *bm;
1320 unsigned long weight;
1321 int count, s, e, i, do_now;
1322 ERR_IF(!b) return 0;
1323 ERR_IF(!b->bm_pages) return 0;
1324
1325 spin_lock_irq(&b->bm_lock);
1326 if (bm_is_locked(b))
1327 bm_print_lock_info(mdev);
1328 weight = b->bm_set;
1329
1330 s = al_enr * BM_WORDS_PER_AL_EXT;
1331 e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words);
1332 /* assert that s and e are on the same page */
1333 D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3)
1334 == s >> (PAGE_SHIFT - LN2_BPL + 3));
1335 count = 0;
1336 if (s < b->bm_words) {
1337 i = do_now = e-s;
1338 p_addr = bm_map_paddr(b, s);
1339 bm = p_addr + MLPP(s);
1340 while (i--) {
1341 count += hweight_long(*bm);
1342 *bm = -1UL;
1343 bm++;
1344 }
1345 bm_unmap(p_addr);
1346 b->bm_set += do_now*BITS_PER_LONG - count;
1347 if (e == b->bm_words)
1348 b->bm_set -= bm_clear_surplus(b);
1349 } else {
1350 dev_err(DEV, "start offset (%d) too large in drbd_bm_ALe_set_all\n", s);
1351 }
1352 weight = b->bm_set - weight;
1353 spin_unlock_irq(&b->bm_lock);
1354 return weight;
1355}