blob: 759fa247ced85341ebd301b773388ed60d41284d [file] [log] [blame]
Chris Mason56bec292009-03-13 10:10:06 -04001/*
2 * Copyright (C) 2009 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/sort.h>
21#include <linux/ftrace.h>
22#include "ctree.h"
23#include "delayed-ref.h"
24#include "transaction.h"
25
26/*
27 * delayed back reference update tracking. For subvolume trees
28 * we queue up extent allocations and backref maintenance for
29 * delayed processing. This avoids deep call chains where we
30 * add extents in the middle of btrfs_search_slot, and it allows
31 * us to buffer up frequently modified backrefs in an rb tree instead
32 * of hammering updates on the extent allocation tree.
33 *
34 * Right now this code is only used for reference counted trees, but
35 * the long term goal is to get rid of the similar code for delayed
36 * extent tree modifications.
37 */
38
39/*
40 * entries in the rb tree are ordered by the byte number of the extent
41 * and by the byte number of the parent block.
42 */
43static int comp_entry(struct btrfs_delayed_ref_node *ref,
44 u64 bytenr, u64 parent)
45{
46 if (bytenr < ref->bytenr)
47 return -1;
48 if (bytenr > ref->bytenr)
49 return 1;
50 if (parent < ref->parent)
51 return -1;
52 if (parent > ref->parent)
53 return 1;
54 return 0;
55}
56
57/*
58 * insert a new ref into the rbtree. This returns any existing refs
59 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
60 * inserted.
61 */
62static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
63 u64 bytenr, u64 parent,
64 struct rb_node *node)
65{
66 struct rb_node **p = &root->rb_node;
67 struct rb_node *parent_node = NULL;
68 struct btrfs_delayed_ref_node *entry;
69 int cmp;
70
71 while (*p) {
72 parent_node = *p;
73 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
74 rb_node);
75
76 cmp = comp_entry(entry, bytenr, parent);
77 if (cmp < 0)
78 p = &(*p)->rb_left;
79 else if (cmp > 0)
80 p = &(*p)->rb_right;
81 else
82 return entry;
83 }
84
85 entry = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
86 rb_link_node(node, parent_node, p);
87 rb_insert_color(node, root);
88 return NULL;
89}
90
91/*
92 * find an entry based on (bytenr,parent). This returns the delayed
93 * ref if it was able to find one, or NULL if nothing was in that spot
94 */
95static struct btrfs_delayed_ref_node *tree_search(struct rb_root *root,
Chris Masonc3e69d52009-03-13 10:17:05 -040096 u64 bytenr, u64 parent,
97 struct btrfs_delayed_ref_node **last)
Chris Mason56bec292009-03-13 10:10:06 -040098{
99 struct rb_node *n = root->rb_node;
100 struct btrfs_delayed_ref_node *entry;
101 int cmp;
102
103 while (n) {
104 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
105 WARN_ON(!entry->in_tree);
Chris Masonc3e69d52009-03-13 10:17:05 -0400106 if (last)
107 *last = entry;
Chris Mason56bec292009-03-13 10:10:06 -0400108
109 cmp = comp_entry(entry, bytenr, parent);
110 if (cmp < 0)
111 n = n->rb_left;
112 else if (cmp > 0)
113 n = n->rb_right;
114 else
115 return entry;
116 }
117 return NULL;
118}
119
Chris Masonc3e69d52009-03-13 10:17:05 -0400120int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
121 struct btrfs_delayed_ref_head *head)
Chris Mason56bec292009-03-13 10:10:06 -0400122{
Chris Masonc3e69d52009-03-13 10:17:05 -0400123 struct btrfs_delayed_ref_root *delayed_refs;
Chris Mason56bec292009-03-13 10:10:06 -0400124
Chris Masonc3e69d52009-03-13 10:17:05 -0400125 delayed_refs = &trans->transaction->delayed_refs;
126 assert_spin_locked(&delayed_refs->lock);
127 if (mutex_trylock(&head->mutex))
128 return 0;
129
130 atomic_inc(&head->node.refs);
131 spin_unlock(&delayed_refs->lock);
132
133 mutex_lock(&head->mutex);
134 spin_lock(&delayed_refs->lock);
135 if (!head->node.in_tree) {
136 mutex_unlock(&head->mutex);
137 btrfs_put_delayed_ref(&head->node);
138 return -EAGAIN;
139 }
140 btrfs_put_delayed_ref(&head->node);
141 return 0;
142}
143
144int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
145 struct list_head *cluster, u64 start)
146{
147 int count = 0;
148 struct btrfs_delayed_ref_root *delayed_refs;
149 struct rb_node *node;
150 struct btrfs_delayed_ref_node *ref;
151 struct btrfs_delayed_ref_head *head;
152
153 delayed_refs = &trans->transaction->delayed_refs;
154 if (start == 0) {
155 node = rb_first(&delayed_refs->root);
156 } else {
157 ref = NULL;
158 tree_search(&delayed_refs->root, start, (u64)-1, &ref);
159 if (ref) {
160 struct btrfs_delayed_ref_node *tmp;
161
162 node = rb_prev(&ref->rb_node);
163 while (node) {
164 tmp = rb_entry(node,
165 struct btrfs_delayed_ref_node,
166 rb_node);
167 if (tmp->bytenr < start)
168 break;
169 ref = tmp;
170 node = rb_prev(&ref->rb_node);
171 }
172 node = &ref->rb_node;
173 } else
174 node = rb_first(&delayed_refs->root);
175 }
176again:
177 while (node && count < 32) {
178 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
Chris Mason56bec292009-03-13 10:10:06 -0400179 if (btrfs_delayed_ref_is_head(ref)) {
180 head = btrfs_delayed_node_to_head(ref);
Chris Masonc3e69d52009-03-13 10:17:05 -0400181 if (list_empty(&head->cluster)) {
182 list_add_tail(&head->cluster, cluster);
183 delayed_refs->run_delayed_start =
184 head->node.bytenr;
185 count++;
186
187 WARN_ON(delayed_refs->num_heads_ready == 0);
188 delayed_refs->num_heads_ready--;
189 } else if (count) {
190 /* the goal of the clustering is to find extents
191 * that are likely to end up in the same extent
192 * leaf on disk. So, we don't want them spread
193 * all over the tree. Stop now if we've hit
194 * a head that was already in use
195 */
Chris Mason56bec292009-03-13 10:10:06 -0400196 break;
197 }
198 }
Chris Masonc3e69d52009-03-13 10:17:05 -0400199 node = rb_next(node);
Chris Mason56bec292009-03-13 10:10:06 -0400200 }
Chris Masonc3e69d52009-03-13 10:17:05 -0400201 if (count) {
202 return 0;
203 } else if (start) {
204 /*
205 * we've gone to the end of the rbtree without finding any
206 * clusters. start from the beginning and try again
207 */
208 start = 0;
209 node = rb_first(&delayed_refs->root);
210 goto again;
211 }
212 return 1;
Chris Mason56bec292009-03-13 10:10:06 -0400213}
214
215/*
216 * This checks to see if there are any delayed refs in the
217 * btree for a given bytenr. It returns one if it finds any
218 * and zero otherwise.
219 *
220 * If it only finds a head node, it returns 0.
221 *
222 * The idea is to use this when deciding if you can safely delete an
223 * extent from the extent allocation tree. There may be a pending
224 * ref in the rbtree that adds or removes references, so as long as this
225 * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
226 * allocation tree.
227 */
228int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr)
229{
230 struct btrfs_delayed_ref_node *ref;
231 struct btrfs_delayed_ref_root *delayed_refs;
232 struct rb_node *prev_node;
233 int ret = 0;
234
235 delayed_refs = &trans->transaction->delayed_refs;
236 spin_lock(&delayed_refs->lock);
237
Chris Masonc3e69d52009-03-13 10:17:05 -0400238 ref = tree_search(&delayed_refs->root, bytenr, (u64)-1, NULL);
Chris Mason56bec292009-03-13 10:10:06 -0400239 if (ref) {
240 prev_node = rb_prev(&ref->rb_node);
241 if (!prev_node)
242 goto out;
243 ref = rb_entry(prev_node, struct btrfs_delayed_ref_node,
244 rb_node);
245 if (ref->bytenr == bytenr)
246 ret = 1;
247 }
248out:
249 spin_unlock(&delayed_refs->lock);
250 return ret;
251}
252
253/*
254 * helper function to lookup reference count
255 *
256 * the head node for delayed ref is used to store the sum of all the
257 * reference count modifications queued up in the rbtree. This way you
258 * can check to see what the reference count would be if all of the
259 * delayed refs are processed.
260 */
261int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
262 struct btrfs_root *root, u64 bytenr,
263 u64 num_bytes, u32 *refs)
264{
265 struct btrfs_delayed_ref_node *ref;
266 struct btrfs_delayed_ref_head *head;
267 struct btrfs_delayed_ref_root *delayed_refs;
268 struct btrfs_path *path;
269 struct extent_buffer *leaf;
270 struct btrfs_extent_item *ei;
271 struct btrfs_key key;
272 u32 num_refs;
273 int ret;
274
275 path = btrfs_alloc_path();
276 if (!path)
277 return -ENOMEM;
278
279 key.objectid = bytenr;
280 key.type = BTRFS_EXTENT_ITEM_KEY;
281 key.offset = num_bytes;
282 delayed_refs = &trans->transaction->delayed_refs;
283again:
284 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
285 &key, path, 0, 0);
286 if (ret < 0)
287 goto out;
288
289 if (ret == 0) {
290 leaf = path->nodes[0];
291 ei = btrfs_item_ptr(leaf, path->slots[0],
292 struct btrfs_extent_item);
293 num_refs = btrfs_extent_refs(leaf, ei);
294 } else {
295 num_refs = 0;
296 ret = 0;
297 }
298
299 spin_lock(&delayed_refs->lock);
Chris Masonc3e69d52009-03-13 10:17:05 -0400300 ref = tree_search(&delayed_refs->root, bytenr, (u64)-1, NULL);
Chris Mason56bec292009-03-13 10:10:06 -0400301 if (ref) {
302 head = btrfs_delayed_node_to_head(ref);
303 if (mutex_trylock(&head->mutex)) {
304 num_refs += ref->ref_mod;
305 mutex_unlock(&head->mutex);
306 *refs = num_refs;
307 goto out;
308 }
309
310 atomic_inc(&ref->refs);
311 spin_unlock(&delayed_refs->lock);
312
313 btrfs_release_path(root->fs_info->extent_root, path);
314
315 mutex_lock(&head->mutex);
316 mutex_unlock(&head->mutex);
317 btrfs_put_delayed_ref(ref);
318 goto again;
319 } else {
320 *refs = num_refs;
321 }
322out:
323 spin_unlock(&delayed_refs->lock);
324 btrfs_free_path(path);
325 return ret;
326}
327
328/*
329 * helper function to update an extent delayed ref in the
330 * rbtree. existing and update must both have the same
331 * bytenr and parent
332 *
333 * This may free existing if the update cancels out whatever
334 * operation it was doing.
335 */
336static noinline void
337update_existing_ref(struct btrfs_trans_handle *trans,
338 struct btrfs_delayed_ref_root *delayed_refs,
339 struct btrfs_delayed_ref_node *existing,
340 struct btrfs_delayed_ref_node *update)
341{
342 struct btrfs_delayed_ref *existing_ref;
343 struct btrfs_delayed_ref *ref;
344
345 existing_ref = btrfs_delayed_node_to_ref(existing);
346 ref = btrfs_delayed_node_to_ref(update);
347
348 if (ref->pin)
349 existing_ref->pin = 1;
350
351 if (ref->action != existing_ref->action) {
352 /*
353 * this is effectively undoing either an add or a
354 * drop. We decrement the ref_mod, and if it goes
355 * down to zero we just delete the entry without
356 * every changing the extent allocation tree.
357 */
358 existing->ref_mod--;
359 if (existing->ref_mod == 0) {
360 rb_erase(&existing->rb_node,
361 &delayed_refs->root);
362 existing->in_tree = 0;
363 btrfs_put_delayed_ref(existing);
364 delayed_refs->num_entries--;
365 if (trans->delayed_ref_updates)
366 trans->delayed_ref_updates--;
367 }
368 } else {
369 if (existing_ref->action == BTRFS_ADD_DELAYED_REF) {
370 /* if we're adding refs, make sure all the
371 * details match up. The extent could
372 * have been totally freed and reallocated
373 * by a different owner before the delayed
374 * ref entries were removed.
375 */
376 existing_ref->owner_objectid = ref->owner_objectid;
377 existing_ref->generation = ref->generation;
378 existing_ref->root = ref->root;
379 existing->num_bytes = update->num_bytes;
380 }
381 /*
382 * the action on the existing ref matches
383 * the action on the ref we're trying to add.
384 * Bump the ref_mod by one so the backref that
385 * is eventually added/removed has the correct
386 * reference count
387 */
388 existing->ref_mod += update->ref_mod;
389 }
390}
391
392/*
393 * helper function to update the accounting in the head ref
394 * existing and update must have the same bytenr
395 */
396static noinline void
397update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
398 struct btrfs_delayed_ref_node *update)
399{
400 struct btrfs_delayed_ref_head *existing_ref;
401 struct btrfs_delayed_ref_head *ref;
402
403 existing_ref = btrfs_delayed_node_to_head(existing);
404 ref = btrfs_delayed_node_to_head(update);
405
406 if (ref->must_insert_reserved) {
407 /* if the extent was freed and then
408 * reallocated before the delayed ref
409 * entries were processed, we can end up
410 * with an existing head ref without
411 * the must_insert_reserved flag set.
412 * Set it again here
413 */
414 existing_ref->must_insert_reserved = ref->must_insert_reserved;
415
416 /*
417 * update the num_bytes so we make sure the accounting
418 * is done correctly
419 */
420 existing->num_bytes = update->num_bytes;
421
422 }
423
424 /*
425 * update the reference mod on the head to reflect this new operation
426 */
427 existing->ref_mod += update->ref_mod;
428}
429
430/*
431 * helper function to actually insert a delayed ref into the rbtree.
432 * this does all the dirty work in terms of maintaining the correct
433 * overall modification count in the head node and properly dealing
434 * with updating existing nodes as new modifications are queued.
435 */
436static noinline int __btrfs_add_delayed_ref(struct btrfs_trans_handle *trans,
437 struct btrfs_delayed_ref_node *ref,
438 u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root,
439 u64 ref_generation, u64 owner_objectid, int action,
440 int pin)
441{
442 struct btrfs_delayed_ref_node *existing;
443 struct btrfs_delayed_ref *full_ref;
Chris Masonc3e69d52009-03-13 10:17:05 -0400444 struct btrfs_delayed_ref_head *head_ref = NULL;
Chris Mason56bec292009-03-13 10:10:06 -0400445 struct btrfs_delayed_ref_root *delayed_refs;
446 int count_mod = 1;
447 int must_insert_reserved = 0;
448
449 /*
450 * the head node stores the sum of all the mods, so dropping a ref
451 * should drop the sum in the head node by one.
452 */
453 if (parent == (u64)-1 && action == BTRFS_DROP_DELAYED_REF)
454 count_mod = -1;
455
456 /*
457 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
458 * the reserved accounting when the extent is finally added, or
459 * if a later modification deletes the delayed ref without ever
460 * inserting the extent into the extent allocation tree.
461 * ref->must_insert_reserved is the flag used to record
462 * that accounting mods are required.
463 *
464 * Once we record must_insert_reserved, switch the action to
465 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
466 */
467 if (action == BTRFS_ADD_DELAYED_EXTENT) {
468 must_insert_reserved = 1;
469 action = BTRFS_ADD_DELAYED_REF;
470 } else {
471 must_insert_reserved = 0;
472 }
473
474
475 delayed_refs = &trans->transaction->delayed_refs;
476
477 /* first set the basic ref node struct up */
478 atomic_set(&ref->refs, 1);
479 ref->bytenr = bytenr;
480 ref->parent = parent;
481 ref->ref_mod = count_mod;
482 ref->in_tree = 1;
483 ref->num_bytes = num_bytes;
484
485 if (btrfs_delayed_ref_is_head(ref)) {
486 head_ref = btrfs_delayed_node_to_head(ref);
487 head_ref->must_insert_reserved = must_insert_reserved;
Chris Masonc3e69d52009-03-13 10:17:05 -0400488 INIT_LIST_HEAD(&head_ref->cluster);
Chris Mason56bec292009-03-13 10:10:06 -0400489 mutex_init(&head_ref->mutex);
490 } else {
491 full_ref = btrfs_delayed_node_to_ref(ref);
492 full_ref->root = ref_root;
493 full_ref->generation = ref_generation;
494 full_ref->owner_objectid = owner_objectid;
495 full_ref->pin = pin;
496 full_ref->action = action;
497 }
498
499 existing = tree_insert(&delayed_refs->root, bytenr,
500 parent, &ref->rb_node);
501
502 if (existing) {
503 if (btrfs_delayed_ref_is_head(ref))
504 update_existing_head_ref(existing, ref);
505 else
506 update_existing_ref(trans, delayed_refs, existing, ref);
507
508 /*
509 * we've updated the existing ref, free the newly
510 * allocated ref
511 */
512 kfree(ref);
513 } else {
Chris Masonc3e69d52009-03-13 10:17:05 -0400514 if (btrfs_delayed_ref_is_head(ref)) {
515 delayed_refs->num_heads++;
516 delayed_refs->num_heads_ready++;
517 }
Chris Mason56bec292009-03-13 10:10:06 -0400518 delayed_refs->num_entries++;
519 trans->delayed_ref_updates++;
520 }
521 return 0;
522}
523
524/*
525 * add a delayed ref to the tree. This does all of the accounting required
526 * to make sure the delayed ref is eventually processed before this
527 * transaction commits.
528 */
529int btrfs_add_delayed_ref(struct btrfs_trans_handle *trans,
530 u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root,
531 u64 ref_generation, u64 owner_objectid, int action,
532 int pin)
533{
534 struct btrfs_delayed_ref *ref;
535 struct btrfs_delayed_ref_head *head_ref;
536 struct btrfs_delayed_ref_root *delayed_refs;
537 int ret;
538
539 ref = kmalloc(sizeof(*ref), GFP_NOFS);
540 if (!ref)
541 return -ENOMEM;
542
543 /*
544 * the parent = 0 case comes from cases where we don't actually
545 * know the parent yet. It will get updated later via a add/drop
546 * pair.
547 */
548 if (parent == 0)
549 parent = bytenr;
550
551 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
552 if (!head_ref) {
553 kfree(ref);
554 return -ENOMEM;
555 }
556 delayed_refs = &trans->transaction->delayed_refs;
557 spin_lock(&delayed_refs->lock);
558
559 /*
560 * insert both the head node and the new ref without dropping
561 * the spin lock
562 */
563 ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
564 (u64)-1, 0, 0, 0, action, pin);
565 BUG_ON(ret);
566
567 ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
568 parent, ref_root, ref_generation,
569 owner_objectid, action, pin);
570 BUG_ON(ret);
571 spin_unlock(&delayed_refs->lock);
572 return 0;
573}
574
575/*
Chris Mason1887be62009-03-13 10:11:24 -0400576 * this does a simple search for the head node for a given extent.
577 * It must be called with the delayed ref spinlock held, and it returns
578 * the head node if any where found, or NULL if not.
579 */
580struct btrfs_delayed_ref_head *
581btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
582{
583 struct btrfs_delayed_ref_node *ref;
584 struct btrfs_delayed_ref_root *delayed_refs;
585
586 delayed_refs = &trans->transaction->delayed_refs;
Chris Masonc3e69d52009-03-13 10:17:05 -0400587 ref = tree_search(&delayed_refs->root, bytenr, (u64)-1, NULL);
Chris Mason1887be62009-03-13 10:11:24 -0400588 if (ref)
589 return btrfs_delayed_node_to_head(ref);
590 return NULL;
591}
592
593/*
Chris Mason56bec292009-03-13 10:10:06 -0400594 * add a delayed ref to the tree. This does all of the accounting required
595 * to make sure the delayed ref is eventually processed before this
596 * transaction commits.
597 *
598 * The main point of this call is to add and remove a backreference in a single
599 * shot, taking the lock only once, and only searching for the head node once.
600 *
601 * It is the same as doing a ref add and delete in two separate calls.
602 */
603int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
604 u64 bytenr, u64 num_bytes, u64 orig_parent,
605 u64 parent, u64 orig_ref_root, u64 ref_root,
606 u64 orig_ref_generation, u64 ref_generation,
607 u64 owner_objectid, int pin)
608{
609 struct btrfs_delayed_ref *ref;
610 struct btrfs_delayed_ref *old_ref;
611 struct btrfs_delayed_ref_head *head_ref;
612 struct btrfs_delayed_ref_root *delayed_refs;
613 int ret;
614
615 ref = kmalloc(sizeof(*ref), GFP_NOFS);
616 if (!ref)
617 return -ENOMEM;
618
619 old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
620 if (!old_ref) {
621 kfree(ref);
622 return -ENOMEM;
623 }
624
625 /*
626 * the parent = 0 case comes from cases where we don't actually
627 * know the parent yet. It will get updated later via a add/drop
628 * pair.
629 */
630 if (parent == 0)
631 parent = bytenr;
632 if (orig_parent == 0)
633 orig_parent = bytenr;
634
635 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
636 if (!head_ref) {
637 kfree(ref);
638 kfree(old_ref);
639 return -ENOMEM;
640 }
641 delayed_refs = &trans->transaction->delayed_refs;
642 spin_lock(&delayed_refs->lock);
643
644 /*
645 * insert both the head node and the new ref without dropping
646 * the spin lock
647 */
648 ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
649 (u64)-1, 0, 0, 0,
650 BTRFS_ADD_DELAYED_REF, 0);
651 BUG_ON(ret);
652
653 ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
654 parent, ref_root, ref_generation,
655 owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
656 BUG_ON(ret);
657
658 ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
659 orig_parent, orig_ref_root,
660 orig_ref_generation, owner_objectid,
661 BTRFS_DROP_DELAYED_REF, pin);
662 BUG_ON(ret);
663 spin_unlock(&delayed_refs->lock);
664 return 0;
665}