Btrfs: Fix the defragmention code and the block relocation code for data=ordered

Before setting an extent to delalloc, the code needs to wait for
pending ordered extents.

Also, the relocation code needs to wait for ordered IO before scanning
the block group again.  This is because the extents are not removed
until the IO for the new extents is finished

Signed-off-by: Chris Mason <chris.mason@oracle.com>
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index febc6295..f92b297 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2640,6 +2640,7 @@
 	struct file_ra_state *ra;
 	unsigned long total_read = 0;
 	unsigned long ra_pages;
+	struct btrfs_ordered_extent *ordered;
 	struct btrfs_trans_handle *trans;
 
 	ra = kzalloc(sizeof(*ra), GFP_NOFS);
@@ -2658,9 +2659,9 @@
 				       calc_ra(i, last_index, ra_pages));
 		}
 		total_read++;
-		if (((u64)i << PAGE_CACHE_SHIFT) > inode->i_size)
+again:
+		if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
 			goto truncate_racing;
-
 		page = grab_cache_page(inode->i_mapping, i);
 		if (!page) {
 			goto out_unlock;
@@ -2674,18 +2675,24 @@
 				goto out_unlock;
 			}
 		}
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
-		ClearPageDirty(page);
-#else
-		cancel_dirty_page(page, PAGE_CACHE_SIZE);
-#endif
 		wait_on_page_writeback(page);
-		set_page_extent_mapped(page);
+
 		page_start = (u64)page->index << PAGE_CACHE_SHIFT;
 		page_end = page_start + PAGE_CACHE_SIZE - 1;
-
 		lock_extent(io_tree, page_start, page_end, GFP_NOFS);
 
+		ordered = btrfs_lookup_ordered_extent(inode, page_start);
+		if (ordered) {
+			unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+			unlock_page(page);
+			page_cache_release(page);
+			btrfs_start_ordered_extent(inode, ordered, 1);
+			btrfs_put_ordered_extent(ordered);
+			goto again;
+		}
+		set_page_extent_mapped(page);
+
+
 		set_extent_delalloc(io_tree, page_start,
 				    page_end, GFP_NOFS);
 		set_page_dirty(page);
@@ -2694,10 +2701,18 @@
 		unlock_page(page);
 		page_cache_release(page);
 	}
-	balance_dirty_pages_ratelimited_nr(inode->i_mapping,
-					   total_read);
 
 out_unlock:
+	/* we have to start the IO in order to get the ordered extents
+	 * instantiated.  This allows the relocation to code to wait
+	 * for all the ordered extents to hit the disk.
+	 *
+	 * Otherwise, it would constantly loop over the same extents
+	 * because the old ones don't get deleted  until the IO is
+	 * started
+	 */
+	btrfs_fdatawrite_range(inode->i_mapping, start, start + len - 1,
+			       WB_SYNC_NONE);
 	kfree(ra);
 	trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
 	if (trans) {
@@ -3238,6 +3253,8 @@
 
 		btrfs_clean_old_snapshots(tree_root);
 
+		btrfs_wait_ordered_extents(tree_root);
+
 		trans = btrfs_start_transaction(tree_root, 1);
 		btrfs_commit_transaction(trans, tree_root);
 		mutex_lock(&root->fs_info->alloc_mutex);