Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Olaf Weber | 3e57ecf | 2006-06-09 14:48:12 +1000 | [diff] [blame] | 2 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License as |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * published by the Free Software Foundation. |
| 8 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 9 | * This program is distributed in the hope that it would be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write the Free Software Foundation, |
| 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include "xfs.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include "xfs_fs.h" |
Dave Chinner | 70a9883c | 2013-10-23 10:36:05 +1100 | [diff] [blame] | 20 | #include "xfs_shared.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 21 | #include "xfs_format.h" |
| 22 | #include "xfs_log_format.h" |
| 23 | #include "xfs_trans_resv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include "xfs_mount.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include "xfs_inode.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 26 | #include "xfs_btree.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 27 | #include "xfs_bmap_btree.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include "xfs_bmap.h" |
Dave Chinner | 6898811 | 2013-08-12 20:49:42 +1000 | [diff] [blame] | 29 | #include "xfs_bmap_util.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include "xfs_error.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 31 | #include "xfs_trans.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include "xfs_trans_space.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include "xfs_iomap.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 34 | #include "xfs_trace.h" |
Brian Foster | 27b5286 | 2012-11-06 09:50:38 -0500 | [diff] [blame] | 35 | #include "xfs_icache.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 36 | #include "xfs_quota.h" |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 37 | #include "xfs_dquot_item.h" |
| 38 | #include "xfs_dquot.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
| 41 | #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ |
| 42 | << mp->m_writeio_log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP |
| 44 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | STATIC int |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 46 | xfs_iomap_eof_align_last_fsb( |
| 47 | xfs_mount_t *mp, |
Lachlan McIlroy | 541d7d3 | 2007-10-11 17:34:33 +1000 | [diff] [blame] | 48 | xfs_inode_t *ip, |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 49 | xfs_extlen_t extsize, |
| 50 | xfs_fileoff_t *last_fsb) |
| 51 | { |
Christoph Hellwig | bf322d9 | 2011-12-18 20:00:05 +0000 | [diff] [blame] | 52 | xfs_extlen_t align = 0; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 53 | int eof, error; |
| 54 | |
Christoph Hellwig | bf322d9 | 2011-12-18 20:00:05 +0000 | [diff] [blame] | 55 | if (!XFS_IS_REALTIME_INODE(ip)) { |
| 56 | /* |
| 57 | * Round up the allocation request to a stripe unit |
| 58 | * (m_dalign) boundary if the file size is >= stripe unit |
| 59 | * size, and we are allocating past the allocation eof. |
| 60 | * |
| 61 | * If mounted with the "-o swalloc" option the alignment is |
| 62 | * increased from the strip unit size to the stripe width. |
| 63 | */ |
| 64 | if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) |
| 65 | align = mp->m_swidth; |
| 66 | else if (mp->m_dalign) |
| 67 | align = mp->m_dalign; |
| 68 | |
Peter Watkins | 76b5730 | 2014-12-04 09:30:51 +1100 | [diff] [blame] | 69 | if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align)) |
| 70 | align = 0; |
Christoph Hellwig | bf322d9 | 2011-12-18 20:00:05 +0000 | [diff] [blame] | 71 | } |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 72 | |
| 73 | /* |
| 74 | * Always round up the allocation request to an extent boundary |
| 75 | * (when file on a real-time subvolume or has di_extsize hint). |
| 76 | */ |
| 77 | if (extsize) { |
Peter Watkins | 76b5730 | 2014-12-04 09:30:51 +1100 | [diff] [blame] | 78 | if (align) |
| 79 | align = roundup_64(align, extsize); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 80 | else |
| 81 | align = extsize; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 82 | } |
| 83 | |
Peter Watkins | 76b5730 | 2014-12-04 09:30:51 +1100 | [diff] [blame] | 84 | if (align) { |
| 85 | xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align); |
Lachlan McIlroy | 541d7d3 | 2007-10-11 17:34:33 +1000 | [diff] [blame] | 86 | error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 87 | if (error) |
| 88 | return error; |
| 89 | if (eof) |
| 90 | *last_fsb = new_last_fsb; |
| 91 | } |
| 92 | return 0; |
| 93 | } |
| 94 | |
| 95 | STATIC int |
Dave Chinner | 6d4a8ec | 2011-03-07 10:06:35 +1100 | [diff] [blame] | 96 | xfs_alert_fsblock_zero( |
Nathan Scott | 572d95f | 2006-09-28 11:03:20 +1000 | [diff] [blame] | 97 | xfs_inode_t *ip, |
| 98 | xfs_bmbt_irec_t *imap) |
| 99 | { |
Dave Chinner | 6a19d93 | 2011-03-07 10:02:35 +1100 | [diff] [blame] | 100 | xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, |
Nathan Scott | 572d95f | 2006-09-28 11:03:20 +1000 | [diff] [blame] | 101 | "Access to block zero in inode %llu " |
| 102 | "start_block: %llx start_off: %llx " |
Eric Sandeen | 08e96e1 | 2013-10-11 20:59:05 -0500 | [diff] [blame] | 103 | "blkcnt: %llx extent-state: %x", |
Nathan Scott | 572d95f | 2006-09-28 11:03:20 +1000 | [diff] [blame] | 104 | (unsigned long long)ip->i_ino, |
| 105 | (unsigned long long)imap->br_startblock, |
| 106 | (unsigned long long)imap->br_startoff, |
| 107 | (unsigned long long)imap->br_blockcount, |
| 108 | imap->br_state); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 109 | return -EFSCORRUPTED; |
Nathan Scott | 572d95f | 2006-09-28 11:03:20 +1000 | [diff] [blame] | 110 | } |
| 111 | |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 112 | int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | xfs_iomap_write_direct( |
| 114 | xfs_inode_t *ip, |
Nathan Scott | f403b7f | 2005-05-05 13:33:40 -0700 | [diff] [blame] | 115 | xfs_off_t offset, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | size_t count, |
Christoph Hellwig | 3070451 | 2010-06-24 11:42:19 +1000 | [diff] [blame] | 117 | xfs_bmbt_irec_t *imap, |
Christoph Hellwig | 405f804 | 2010-12-10 08:42:19 +0000 | [diff] [blame] | 118 | int nmaps) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | { |
| 120 | xfs_mount_t *mp = ip->i_mount; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | xfs_fileoff_t offset_fsb; |
| 122 | xfs_fileoff_t last_fsb; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 123 | xfs_filblks_t count_fsb, resaligned; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | xfs_fsblock_t firstfsb; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 125 | xfs_extlen_t extsz, temp; |
Eric Sandeen | 0116d93 | 2005-11-02 15:00:01 +1100 | [diff] [blame] | 126 | int nimaps; |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 127 | int quota_flag; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | int rt; |
| 129 | xfs_trans_t *tp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | xfs_bmap_free_t free_list; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 131 | uint qblocks, resblks, resrtextents; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 132 | int error; |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 133 | int lockmode; |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 134 | int bmapi_flags = XFS_BMAPI_PREALLOC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 136 | rt = XFS_IS_REALTIME_INODE(ip); |
David Chinner | 957d0eb | 2007-06-18 16:50:37 +1000 | [diff] [blame] | 137 | extsz = xfs_get_extsz_hint(ip); |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 138 | lockmode = XFS_ILOCK_SHARED; /* locked by caller */ |
| 139 | |
| 140 | ASSERT(xfs_isilocked(ip, lockmode)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | |
David Chinner | 957d0eb | 2007-06-18 16:50:37 +1000 | [diff] [blame] | 142 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 143 | last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); |
Christoph Hellwig | ce7ae151 | 2011-12-18 20:00:11 +0000 | [diff] [blame] | 144 | if ((offset + count) > XFS_ISIZE(ip)) { |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 145 | /* |
| 146 | * Assert that the in-core extent list is present since this can |
| 147 | * call xfs_iread_extents() and we only have the ilock shared. |
| 148 | * This should be safe because the lock was held around a bmapi |
| 149 | * call in the caller and we only need it to access the in-core |
| 150 | * list. |
| 151 | */ |
| 152 | ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags & |
| 153 | XFS_IFEXTENTS); |
Lachlan McIlroy | 9f6c92b | 2008-12-22 17:56:49 +1100 | [diff] [blame] | 154 | error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 155 | if (error) |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 156 | goto out_unlock; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 157 | } else { |
Christoph Hellwig | 405f804 | 2010-12-10 08:42:19 +0000 | [diff] [blame] | 158 | if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 159 | last_fsb = MIN(last_fsb, (xfs_fileoff_t) |
Christoph Hellwig | 3070451 | 2010-06-24 11:42:19 +1000 | [diff] [blame] | 160 | imap->br_blockcount + |
| 161 | imap->br_startoff); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 162 | } |
| 163 | count_fsb = last_fsb - offset_fsb; |
| 164 | ASSERT(count_fsb > 0); |
| 165 | |
| 166 | resaligned = count_fsb; |
| 167 | if (unlikely(extsz)) { |
| 168 | if ((temp = do_mod(offset_fsb, extsz))) |
| 169 | resaligned += temp; |
| 170 | if ((temp = do_mod(resaligned, extsz))) |
| 171 | resaligned += extsz - temp; |
| 172 | } |
| 173 | |
| 174 | if (unlikely(rt)) { |
| 175 | resrtextents = qblocks = resaligned; |
| 176 | resrtextents /= mp->m_sb.sb_rextsize; |
David Chinner | 84e1e99 | 2007-06-18 16:50:27 +1000 | [diff] [blame] | 177 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); |
| 178 | quota_flag = XFS_QMOPT_RES_RTBLKS; |
| 179 | } else { |
| 180 | resrtextents = 0; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 181 | resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); |
David Chinner | 84e1e99 | 2007-06-18 16:50:27 +1000 | [diff] [blame] | 182 | quota_flag = XFS_QMOPT_RES_REGBLKS; |
| 183 | } |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 184 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | /* |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 186 | * Drop the shared lock acquired by the caller, attach the dquot if |
| 187 | * necessary and move on to transaction setup. |
| 188 | */ |
| 189 | xfs_iunlock(ip, lockmode); |
| 190 | error = xfs_qm_dqattach(ip, 0); |
| 191 | if (error) |
| 192 | return error; |
| 193 | |
| 194 | /* |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 195 | * Allocate and setup the transaction |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 198 | |
| 199 | /* |
| 200 | * For DAX, we do not allocate unwritten extents, but instead we zero |
| 201 | * the block before we commit the transaction. Ideally we'd like to do |
| 202 | * this outside the transaction context, but if we commit and then crash |
| 203 | * we may not have zeroed the blocks and this will be exposed on |
| 204 | * recovery of the allocation. Hence we must zero before commit. |
Dave Chinner | 3b0fe47 | 2016-01-04 16:22:45 +1100 | [diff] [blame] | 205 | * |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 206 | * Further, if we are mapping unwritten extents here, we need to zero |
| 207 | * and convert them to written so that we don't need an unwritten extent |
| 208 | * callback for DAX. This also means that we need to be able to dip into |
Dave Chinner | 3b0fe47 | 2016-01-04 16:22:45 +1100 | [diff] [blame] | 209 | * the reserve block pool for bmbt block allocation if there is no space |
| 210 | * left but we need to do unwritten extent conversion. |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 211 | */ |
Dave Chinner | 3b0fe47 | 2016-01-04 16:22:45 +1100 | [diff] [blame] | 212 | |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 213 | if (IS_DAX(VFS_I(ip))) { |
| 214 | bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO; |
Dave Chinner | 3b0fe47 | 2016-01-04 16:22:45 +1100 | [diff] [blame] | 215 | if (ISUNWRITTEN(imap)) { |
| 216 | tp->t_flags |= XFS_TRANS_RESERVE; |
| 217 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; |
| 218 | } |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 219 | } |
Jie Liu | 3d3c8b5 | 2013-08-12 20:49:59 +1000 | [diff] [blame] | 220 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, |
| 221 | resblks, resrtextents); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | /* |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 223 | * Check for running out of space, note: need lock to return |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | */ |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 225 | if (error) { |
Christoph Hellwig | 4906e21 | 2015-06-04 13:47:56 +1000 | [diff] [blame] | 226 | xfs_trans_cancel(tp); |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 227 | return error; |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 228 | } |
| 229 | |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 230 | lockmode = XFS_ILOCK_EXCL; |
| 231 | xfs_ilock(ip, lockmode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 233 | error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 234 | if (error) |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 235 | goto out_trans_cancel; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | |
Christoph Hellwig | ddc3415 | 2011-09-19 15:00:54 +0000 | [diff] [blame] | 237 | xfs_trans_ijoin(tp, ip, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | /* |
Christoph Hellwig | 3070451 | 2010-06-24 11:42:19 +1000 | [diff] [blame] | 240 | * From this point onwards we overwrite the imap pointer that the |
| 241 | * caller gave to us. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | */ |
Eric Sandeen | 9d87c31 | 2009-01-14 23:22:07 -0600 | [diff] [blame] | 243 | xfs_bmap_init(&free_list, &firstfsb); |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 244 | nimaps = 1; |
Christoph Hellwig | d531d91 | 2014-02-10 10:27:43 +1100 | [diff] [blame] | 245 | error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, |
Dave Chinner | 264e89a | 2015-11-03 13:28:41 +1100 | [diff] [blame] | 246 | bmapi_flags, &firstfsb, resblks, imap, |
Brian Foster | dbd5c8c | 2015-10-12 16:04:13 +1100 | [diff] [blame] | 247 | &nimaps, &free_list); |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 248 | if (error) |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 249 | goto out_bmap_cancel; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | |
| 251 | /* |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 252 | * Complete the transaction |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | */ |
Eric Sandeen | f6106ef | 2016-01-11 11:34:01 +1100 | [diff] [blame] | 254 | error = xfs_bmap_finish(&tp, &free_list, NULL); |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 255 | if (error) |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 256 | goto out_bmap_cancel; |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 257 | |
Christoph Hellwig | 7039331 | 2015-06-04 13:48:08 +1000 | [diff] [blame] | 258 | error = xfs_trans_commit(tp); |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 259 | if (error) |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 260 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 262 | /* |
| 263 | * Copy any maps to caller's array and return any error. |
| 264 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | if (nimaps == 0) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 266 | error = -ENOSPC; |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 267 | goto out_unlock; |
Nathan Scott | 572d95f | 2006-09-28 11:03:20 +1000 | [diff] [blame] | 268 | } |
| 269 | |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 270 | if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) |
Dave Chinner | 6d4a8ec | 2011-03-07 10:06:35 +1100 | [diff] [blame] | 271 | error = xfs_alert_fsblock_zero(ip, imap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 273 | out_unlock: |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 274 | xfs_iunlock(ip, lockmode); |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 275 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 277 | out_bmap_cancel: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | xfs_bmap_cancel(&free_list); |
Dave Chinner | ea562ed | 2012-05-08 20:48:53 +1000 | [diff] [blame] | 279 | xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 280 | out_trans_cancel: |
Christoph Hellwig | 4906e21 | 2015-06-04 13:47:56 +1000 | [diff] [blame] | 281 | xfs_trans_cancel(tp); |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 282 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | } |
| 284 | |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 285 | /* |
Dave Chinner | 8de2bf9 | 2009-04-06 18:49:12 +0200 | [diff] [blame] | 286 | * If the caller is doing a write at the end of the file, then extend the |
| 287 | * allocation out to the file system's write iosize. We clean up any extra |
| 288 | * space left over when the file is closed in xfs_inactive(). |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 289 | * |
| 290 | * If we find we already have delalloc preallocation beyond EOF, don't do more |
| 291 | * preallocation as it it not needed. |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 292 | */ |
| 293 | STATIC int |
| 294 | xfs_iomap_eof_want_preallocate( |
| 295 | xfs_mount_t *mp, |
Lachlan McIlroy | 541d7d3 | 2007-10-11 17:34:33 +1000 | [diff] [blame] | 296 | xfs_inode_t *ip, |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 297 | xfs_off_t offset, |
| 298 | size_t count, |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 299 | xfs_bmbt_irec_t *imap, |
| 300 | int nimaps, |
| 301 | int *prealloc) |
| 302 | { |
| 303 | xfs_fileoff_t start_fsb; |
| 304 | xfs_filblks_t count_fsb; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 305 | int n, error, imaps; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 306 | int found_delalloc = 0; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 307 | |
| 308 | *prealloc = 0; |
Christoph Hellwig | ce7ae151 | 2011-12-18 20:00:11 +0000 | [diff] [blame] | 309 | if (offset + count <= XFS_ISIZE(ip)) |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 310 | return 0; |
| 311 | |
| 312 | /* |
Dave Chinner | 133eeb1 | 2013-06-27 16:04:48 +1000 | [diff] [blame] | 313 | * If the file is smaller than the minimum prealloc and we are using |
| 314 | * dynamic preallocation, don't do any preallocation at all as it is |
| 315 | * likely this is the only write to the file that is going to be done. |
| 316 | */ |
| 317 | if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) && |
| 318 | XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)) |
| 319 | return 0; |
| 320 | |
| 321 | /* |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 322 | * If there are any real blocks past eof, then don't |
| 323 | * do any speculative allocation. |
| 324 | */ |
| 325 | start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1))); |
Dave Chinner | 3297238 | 2012-06-08 15:44:54 +1000 | [diff] [blame] | 326 | count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 327 | while (count_fsb > 0) { |
| 328 | imaps = nimaps; |
Dave Chinner | 5c8ed20 | 2011-09-18 20:40:45 +0000 | [diff] [blame] | 329 | error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps, |
| 330 | 0); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 331 | if (error) |
| 332 | return error; |
| 333 | for (n = 0; n < imaps; n++) { |
| 334 | if ((imap[n].br_startblock != HOLESTARTBLOCK) && |
| 335 | (imap[n].br_startblock != DELAYSTARTBLOCK)) |
| 336 | return 0; |
| 337 | start_fsb += imap[n].br_blockcount; |
| 338 | count_fsb -= imap[n].br_blockcount; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 339 | |
| 340 | if (imap[n].br_startblock == DELAYSTARTBLOCK) |
| 341 | found_delalloc = 1; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 342 | } |
| 343 | } |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 344 | if (!found_delalloc) |
| 345 | *prealloc = 1; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 346 | return 0; |
| 347 | } |
| 348 | |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 349 | /* |
Dave Chinner | a1e16c2 | 2013-02-11 16:05:01 +1100 | [diff] [blame] | 350 | * Determine the initial size of the preallocation. We are beyond the current |
| 351 | * EOF here, but we need to take into account whether this is a sparse write or |
| 352 | * an extending write when determining the preallocation size. Hence we need to |
| 353 | * look up the extent that ends at the current write offset and use the result |
| 354 | * to determine the preallocation size. |
| 355 | * |
| 356 | * If the extent is a hole, then preallocation is essentially disabled. |
| 357 | * Otherwise we take the size of the preceeding data extent as the basis for the |
| 358 | * preallocation size. If the size of the extent is greater than half the |
| 359 | * maximum extent length, then use the current offset as the basis. This ensures |
| 360 | * that for large files the preallocation size always extends to MAXEXTLEN |
| 361 | * rather than falling short due to things like stripe unit/width alignment of |
| 362 | * real extents. |
| 363 | */ |
Mark Tinguely | e8108ce | 2013-02-24 13:04:37 -0600 | [diff] [blame] | 364 | STATIC xfs_fsblock_t |
Dave Chinner | a1e16c2 | 2013-02-11 16:05:01 +1100 | [diff] [blame] | 365 | xfs_iomap_eof_prealloc_initial_size( |
| 366 | struct xfs_mount *mp, |
| 367 | struct xfs_inode *ip, |
| 368 | xfs_off_t offset, |
| 369 | xfs_bmbt_irec_t *imap, |
| 370 | int nimaps) |
| 371 | { |
| 372 | xfs_fileoff_t start_fsb; |
| 373 | int imaps = 1; |
| 374 | int error; |
| 375 | |
| 376 | ASSERT(nimaps >= imaps); |
| 377 | |
| 378 | /* if we are using a specific prealloc size, return now */ |
| 379 | if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) |
| 380 | return 0; |
| 381 | |
Dave Chinner | 133eeb1 | 2013-06-27 16:04:48 +1000 | [diff] [blame] | 382 | /* If the file is small, then use the minimum prealloc */ |
| 383 | if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign)) |
| 384 | return 0; |
| 385 | |
Dave Chinner | a1e16c2 | 2013-02-11 16:05:01 +1100 | [diff] [blame] | 386 | /* |
| 387 | * As we write multiple pages, the offset will always align to the |
| 388 | * start of a page and hence point to a hole at EOF. i.e. if the size is |
| 389 | * 4096 bytes, we only have one block at FSB 0, but XFS_B_TO_FSB(4096) |
| 390 | * will return FSB 1. Hence if there are blocks in the file, we want to |
| 391 | * point to the block prior to the EOF block and not the hole that maps |
| 392 | * directly at @offset. |
| 393 | */ |
| 394 | start_fsb = XFS_B_TO_FSB(mp, offset); |
| 395 | if (start_fsb) |
| 396 | start_fsb--; |
| 397 | error = xfs_bmapi_read(ip, start_fsb, 1, imap, &imaps, XFS_BMAPI_ENTIRE); |
| 398 | if (error) |
| 399 | return 0; |
| 400 | |
| 401 | ASSERT(imaps == 1); |
| 402 | if (imap[0].br_startblock == HOLESTARTBLOCK) |
| 403 | return 0; |
| 404 | if (imap[0].br_blockcount <= (MAXEXTLEN >> 1)) |
Brian Foster | e114b5f | 2013-02-19 10:24:41 -0500 | [diff] [blame] | 405 | return imap[0].br_blockcount << 1; |
Dave Chinner | a1e16c2 | 2013-02-11 16:05:01 +1100 | [diff] [blame] | 406 | return XFS_B_TO_FSB(mp, offset); |
| 407 | } |
| 408 | |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 409 | STATIC bool |
| 410 | xfs_quota_need_throttle( |
| 411 | struct xfs_inode *ip, |
| 412 | int type, |
| 413 | xfs_fsblock_t alloc_blocks) |
| 414 | { |
| 415 | struct xfs_dquot *dq = xfs_inode_dquot(ip, type); |
| 416 | |
| 417 | if (!dq || !xfs_this_quota_on(ip->i_mount, type)) |
| 418 | return false; |
| 419 | |
| 420 | /* no hi watermark, no throttle */ |
| 421 | if (!dq->q_prealloc_hi_wmark) |
| 422 | return false; |
| 423 | |
| 424 | /* under the lo watermark, no throttle */ |
| 425 | if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark) |
| 426 | return false; |
| 427 | |
| 428 | return true; |
| 429 | } |
| 430 | |
| 431 | STATIC void |
| 432 | xfs_quota_calc_throttle( |
| 433 | struct xfs_inode *ip, |
| 434 | int type, |
| 435 | xfs_fsblock_t *qblocks, |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 436 | int *qshift, |
| 437 | int64_t *qfreesp) |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 438 | { |
| 439 | int64_t freesp; |
| 440 | int shift = 0; |
| 441 | struct xfs_dquot *dq = xfs_inode_dquot(ip, type); |
| 442 | |
Eric Sandeen | 5cca3f6 | 2014-10-02 09:27:09 +1000 | [diff] [blame] | 443 | /* no dq, or over hi wmark, squash the prealloc completely */ |
| 444 | if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) { |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 445 | *qblocks = 0; |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 446 | *qfreesp = 0; |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 447 | return; |
| 448 | } |
| 449 | |
| 450 | freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount; |
| 451 | if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) { |
| 452 | shift = 2; |
| 453 | if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT]) |
| 454 | shift += 2; |
| 455 | if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT]) |
| 456 | shift += 2; |
| 457 | } |
| 458 | |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 459 | if (freesp < *qfreesp) |
| 460 | *qfreesp = freesp; |
| 461 | |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 462 | /* only overwrite the throttle values if we are more aggressive */ |
| 463 | if ((freesp >> shift) < (*qblocks >> *qshift)) { |
| 464 | *qblocks = freesp; |
| 465 | *qshift = shift; |
| 466 | } |
| 467 | } |
| 468 | |
Dave Chinner | a1e16c2 | 2013-02-11 16:05:01 +1100 | [diff] [blame] | 469 | /* |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 470 | * If we don't have a user specified preallocation size, dynamically increase |
| 471 | * the preallocation size as the size of the file grows. Cap the maximum size |
| 472 | * at a single extent or less if the filesystem is near full. The closer the |
| 473 | * filesystem is to full, the smaller the maximum prealocation. |
| 474 | */ |
| 475 | STATIC xfs_fsblock_t |
| 476 | xfs_iomap_prealloc_size( |
| 477 | struct xfs_mount *mp, |
Dave Chinner | a1e16c2 | 2013-02-11 16:05:01 +1100 | [diff] [blame] | 478 | struct xfs_inode *ip, |
| 479 | xfs_off_t offset, |
| 480 | struct xfs_bmbt_irec *imap, |
| 481 | int nimaps) |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 482 | { |
| 483 | xfs_fsblock_t alloc_blocks = 0; |
Brian Foster | 3c58b5f | 2013-03-18 10:51:43 -0400 | [diff] [blame] | 484 | int shift = 0; |
| 485 | int64_t freesp; |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 486 | xfs_fsblock_t qblocks; |
| 487 | int qshift = 0; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 488 | |
Dave Chinner | a1e16c2 | 2013-02-11 16:05:01 +1100 | [diff] [blame] | 489 | alloc_blocks = xfs_iomap_eof_prealloc_initial_size(mp, ip, offset, |
| 490 | imap, nimaps); |
Brian Foster | 3c58b5f | 2013-03-18 10:51:43 -0400 | [diff] [blame] | 491 | if (!alloc_blocks) |
| 492 | goto check_writeio; |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 493 | qblocks = alloc_blocks; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 494 | |
Brian Foster | c9bdbdc | 2013-03-18 10:51:44 -0400 | [diff] [blame] | 495 | /* |
| 496 | * MAXEXTLEN is not a power of two value but we round the prealloc down |
| 497 | * to the nearest power of two value after throttling. To prevent the |
| 498 | * round down from unconditionally reducing the maximum supported prealloc |
| 499 | * size, we round up first, apply appropriate throttling, round down and |
| 500 | * cap the value to MAXEXTLEN. |
| 501 | */ |
| 502 | alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN), |
| 503 | alloc_blocks); |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 504 | |
Dave Chinner | 0d485ad | 2015-02-23 21:22:03 +1100 | [diff] [blame] | 505 | freesp = percpu_counter_read_positive(&mp->m_fdblocks); |
Brian Foster | 3c58b5f | 2013-03-18 10:51:43 -0400 | [diff] [blame] | 506 | if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { |
| 507 | shift = 2; |
| 508 | if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) |
| 509 | shift++; |
| 510 | if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) |
| 511 | shift++; |
| 512 | if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) |
| 513 | shift++; |
| 514 | if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) |
| 515 | shift++; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 516 | } |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 517 | |
| 518 | /* |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 519 | * Check each quota to cap the prealloc size, provide a shift value to |
| 520 | * throttle with and adjust amount of available space. |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 521 | */ |
| 522 | if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 523 | xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift, |
| 524 | &freesp); |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 525 | if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 526 | xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift, |
| 527 | &freesp); |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 528 | if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 529 | xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift, |
| 530 | &freesp); |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 531 | |
| 532 | /* |
| 533 | * The final prealloc size is set to the minimum of free space available |
| 534 | * in each of the quotas and the overall filesystem. |
| 535 | * |
| 536 | * The shift throttle value is set to the maximum value as determined by |
| 537 | * the global low free space values and per-quota low free space values. |
| 538 | */ |
| 539 | alloc_blocks = MIN(alloc_blocks, qblocks); |
| 540 | shift = MAX(shift, qshift); |
| 541 | |
Brian Foster | 3c58b5f | 2013-03-18 10:51:43 -0400 | [diff] [blame] | 542 | if (shift) |
| 543 | alloc_blocks >>= shift; |
Brian Foster | c9bdbdc | 2013-03-18 10:51:44 -0400 | [diff] [blame] | 544 | /* |
| 545 | * rounddown_pow_of_two() returns an undefined result if we pass in |
| 546 | * alloc_blocks = 0. |
| 547 | */ |
| 548 | if (alloc_blocks) |
| 549 | alloc_blocks = rounddown_pow_of_two(alloc_blocks); |
| 550 | if (alloc_blocks > MAXEXTLEN) |
| 551 | alloc_blocks = MAXEXTLEN; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 552 | |
Brian Foster | 3c58b5f | 2013-03-18 10:51:43 -0400 | [diff] [blame] | 553 | /* |
| 554 | * If we are still trying to allocate more space than is |
| 555 | * available, squash the prealloc hard. This can happen if we |
| 556 | * have a large file on a small filesystem and the above |
| 557 | * lowspace thresholds are smaller than MAXEXTLEN. |
| 558 | */ |
| 559 | while (alloc_blocks && alloc_blocks >= freesp) |
| 560 | alloc_blocks >>= 4; |
| 561 | |
| 562 | check_writeio: |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 563 | if (alloc_blocks < mp->m_writeio_blocks) |
| 564 | alloc_blocks = mp->m_writeio_blocks; |
| 565 | |
Brian Foster | 19cb7e3 | 2013-03-18 10:51:48 -0400 | [diff] [blame] | 566 | trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, |
| 567 | mp->m_writeio_blocks); |
| 568 | |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 569 | return alloc_blocks; |
| 570 | } |
| 571 | |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 572 | int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | xfs_iomap_write_delay( |
| 574 | xfs_inode_t *ip, |
Nathan Scott | f403b7f | 2005-05-05 13:33:40 -0700 | [diff] [blame] | 575 | xfs_off_t offset, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | size_t count, |
Christoph Hellwig | 405f804 | 2010-12-10 08:42:19 +0000 | [diff] [blame] | 577 | xfs_bmbt_irec_t *ret_imap) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | { |
| 579 | xfs_mount_t *mp = ip->i_mount; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | xfs_fileoff_t offset_fsb; |
| 581 | xfs_fileoff_t last_fsb; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 582 | xfs_off_t aligned_offset; |
| 583 | xfs_fileoff_t ioalign; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 584 | xfs_extlen_t extsz; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | int nimaps; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; |
Dave Chinner | 9aa0500 | 2012-10-08 21:56:04 +1100 | [diff] [blame] | 587 | int prealloc; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 588 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 590 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | |
| 592 | /* |
| 593 | * Make sure that the dquots are there. This doesn't hold |
| 594 | * the ilock across a disk read. |
| 595 | */ |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 596 | error = xfs_qm_dqattach_locked(ip, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 | if (error) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 598 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 | |
David Chinner | 957d0eb | 2007-06-18 16:50:37 +1000 | [diff] [blame] | 600 | extsz = xfs_get_extsz_hint(ip); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 601 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 602 | |
Lachlan McIlroy | 9f6c92b | 2008-12-22 17:56:49 +1100 | [diff] [blame] | 603 | error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, |
Christoph Hellwig | 405f804 | 2010-12-10 08:42:19 +0000 | [diff] [blame] | 604 | imap, XFS_WRITE_IMAPS, &prealloc); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 605 | if (error) |
| 606 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | |
Dave Chinner | 8de2bf9 | 2009-04-06 18:49:12 +0200 | [diff] [blame] | 608 | retry: |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 609 | if (prealloc) { |
Dave Chinner | a1e16c2 | 2013-02-11 16:05:01 +1100 | [diff] [blame] | 610 | xfs_fsblock_t alloc_blocks; |
| 611 | |
| 612 | alloc_blocks = xfs_iomap_prealloc_size(mp, ip, offset, imap, |
| 613 | XFS_WRITE_IMAPS); |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 614 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); |
| 616 | ioalign = XFS_B_TO_FSBT(mp, aligned_offset); |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 617 | last_fsb = ioalign + alloc_blocks; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 618 | } else { |
| 619 | last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | } |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 621 | |
| 622 | if (prealloc || extsz) { |
Lachlan McIlroy | 9f6c92b | 2008-12-22 17:56:49 +1100 | [diff] [blame] | 623 | error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 624 | if (error) |
| 625 | return error; |
| 626 | } |
| 627 | |
Dave Chinner | 3ed9116 | 2012-04-29 22:43:19 +1000 | [diff] [blame] | 628 | /* |
| 629 | * Make sure preallocation does not create extents beyond the range we |
| 630 | * actually support in this filesystem. |
| 631 | */ |
Dave Chinner | d2c2819 | 2012-06-08 15:44:53 +1000 | [diff] [blame] | 632 | if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)) |
| 633 | last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); |
Dave Chinner | 3ed9116 | 2012-04-29 22:43:19 +1000 | [diff] [blame] | 634 | |
| 635 | ASSERT(last_fsb > offset_fsb); |
| 636 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | nimaps = XFS_WRITE_IMAPS; |
Christoph Hellwig | 4403280 | 2011-09-18 20:40:48 +0000 | [diff] [blame] | 638 | error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb, |
| 639 | imap, &nimaps, XFS_BMAPI_ENTIRE); |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 640 | switch (error) { |
| 641 | case 0: |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 642 | case -ENOSPC: |
| 643 | case -EDQUOT: |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 644 | break; |
| 645 | default: |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 646 | return error; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 647 | } |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 648 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | /* |
Dave Chinner | 9aa0500 | 2012-10-08 21:56:04 +1100 | [diff] [blame] | 650 | * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. Retry |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 651 | * without EOF preallocation. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | */ |
| 653 | if (nimaps == 0) { |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 654 | trace_xfs_delalloc_enospc(ip, offset, count); |
Dave Chinner | 9aa0500 | 2012-10-08 21:56:04 +1100 | [diff] [blame] | 655 | if (prealloc) { |
| 656 | prealloc = 0; |
| 657 | error = 0; |
| 658 | goto retry; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 659 | } |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 660 | return error ? error : -ENOSPC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | } |
| 662 | |
David Chinner | 86c4d62 | 2008-04-29 12:53:21 +1000 | [diff] [blame] | 663 | if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) |
Dave Chinner | 6d4a8ec | 2011-03-07 10:06:35 +1100 | [diff] [blame] | 664 | return xfs_alert_fsblock_zero(ip, &imap[0]); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 665 | |
Brian Foster | 27b5286 | 2012-11-06 09:50:38 -0500 | [diff] [blame] | 666 | /* |
| 667 | * Tag the inode as speculatively preallocated so we can reclaim this |
| 668 | * space on demand, if necessary. |
| 669 | */ |
| 670 | if (prealloc) |
| 671 | xfs_inode_set_eofblocks_tag(ip); |
| 672 | |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 673 | *ret_imap = imap[0]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | return 0; |
| 675 | } |
| 676 | |
| 677 | /* |
| 678 | * Pass in a delayed allocate extent, convert it to real extents; |
| 679 | * return to the caller the extent we create which maps on top of |
| 680 | * the originating callers request. |
| 681 | * |
| 682 | * Called without a lock on the inode. |
David Chinner | e4143a1 | 2007-11-23 16:29:11 +1100 | [diff] [blame] | 683 | * |
| 684 | * We no longer bother to look at the incoming map - all we have to |
| 685 | * guarantee is that whatever we allocate fills the required range. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 | */ |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 687 | int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | xfs_iomap_write_allocate( |
| 689 | xfs_inode_t *ip, |
Nathan Scott | f403b7f | 2005-05-05 13:33:40 -0700 | [diff] [blame] | 690 | xfs_off_t offset, |
Christoph Hellwig | 405f804 | 2010-12-10 08:42:19 +0000 | [diff] [blame] | 691 | xfs_bmbt_irec_t *imap) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | { |
| 693 | xfs_mount_t *mp = ip->i_mount; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | xfs_fileoff_t offset_fsb, last_block; |
| 695 | xfs_fileoff_t end_fsb, map_start_fsb; |
| 696 | xfs_fsblock_t first_block; |
| 697 | xfs_bmap_free_t free_list; |
| 698 | xfs_filblks_t count_fsb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 699 | xfs_trans_t *tp; |
Eric Sandeen | f6106ef | 2016-01-11 11:34:01 +1100 | [diff] [blame] | 700 | int nimaps; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | int error = 0; |
| 702 | int nres; |
| 703 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | /* |
| 705 | * Make sure that the dquots are there. |
| 706 | */ |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 707 | error = xfs_qm_dqattach(ip, 0); |
| 708 | if (error) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 709 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | |
Nathan Scott | 24e17b5 | 2005-05-05 13:33:20 -0700 | [diff] [blame] | 711 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
Christoph Hellwig | 3070451 | 2010-06-24 11:42:19 +1000 | [diff] [blame] | 712 | count_fsb = imap->br_blockcount; |
| 713 | map_start_fsb = imap->br_startoff; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 715 | XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | |
| 717 | while (count_fsb != 0) { |
| 718 | /* |
| 719 | * Set up a transaction with which to allocate the |
| 720 | * backing store for the file. Do allocations in a |
| 721 | * loop until we get some space in the range we are |
| 722 | * interested in. The other space that might be allocated |
| 723 | * is in the delayed allocation extent on which we sit |
| 724 | * but before our buffer starts. |
| 725 | */ |
| 726 | |
| 727 | nimaps = 0; |
| 728 | while (nimaps == 0) { |
| 729 | tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); |
David Chinner | 84e1e99 | 2007-06-18 16:50:27 +1000 | [diff] [blame] | 730 | tp->t_flags |= XFS_TRANS_RESERVE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 731 | nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); |
Jie Liu | 3d3c8b5 | 2013-08-12 20:49:59 +1000 | [diff] [blame] | 732 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, |
| 733 | nres, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | if (error) { |
Christoph Hellwig | 4906e21 | 2015-06-04 13:47:56 +1000 | [diff] [blame] | 735 | xfs_trans_cancel(tp); |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 736 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 737 | } |
| 738 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
Christoph Hellwig | ddc3415 | 2011-09-19 15:00:54 +0000 | [diff] [blame] | 739 | xfs_trans_ijoin(tp, ip, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | |
Eric Sandeen | 9d87c31 | 2009-01-14 23:22:07 -0600 | [diff] [blame] | 741 | xfs_bmap_init(&free_list, &first_block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | /* |
David Chinner | e4143a1 | 2007-11-23 16:29:11 +1100 | [diff] [blame] | 744 | * it is possible that the extents have changed since |
| 745 | * we did the read call as we dropped the ilock for a |
| 746 | * while. We have to be careful about truncates or hole |
| 747 | * punchs here - we are not allowed to allocate |
| 748 | * non-delalloc blocks here. |
| 749 | * |
| 750 | * The only protection against truncation is the pages |
| 751 | * for the range we are being asked to convert are |
| 752 | * locked and hence a truncate will block on them |
| 753 | * first. |
| 754 | * |
| 755 | * As a result, if we go beyond the range we really |
| 756 | * need and hit an delalloc extent boundary followed by |
| 757 | * a hole while we have excess blocks in the map, we |
| 758 | * will fill the hole incorrectly and overrun the |
| 759 | * transaction reservation. |
| 760 | * |
| 761 | * Using a single map prevents this as we are forced to |
| 762 | * check each map we look for overlap with the desired |
| 763 | * range and abort as soon as we find it. Also, given |
| 764 | * that we only return a single map, having one beyond |
| 765 | * what we can return is probably a bit silly. |
| 766 | * |
| 767 | * We also need to check that we don't go beyond EOF; |
| 768 | * this is a truncate optimisation as a truncate sets |
| 769 | * the new file size before block on the pages we |
| 770 | * currently have locked under writeback. Because they |
| 771 | * are about to be tossed, we don't need to write them |
| 772 | * back.... |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | */ |
David Chinner | e4143a1 | 2007-11-23 16:29:11 +1100 | [diff] [blame] | 774 | nimaps = 1; |
Christoph Hellwig | ce7ae151 | 2011-12-18 20:00:11 +0000 | [diff] [blame] | 775 | end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); |
Eric Sandeen | 7fb2cd4 | 2014-04-14 18:58:05 +1000 | [diff] [blame] | 776 | error = xfs_bmap_last_offset(ip, &last_block, |
David Chinner | 7c9ef85 | 2008-04-10 12:21:59 +1000 | [diff] [blame] | 777 | XFS_DATA_FORK); |
| 778 | if (error) |
| 779 | goto trans_cancel; |
| 780 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | last_block = XFS_FILEOFF_MAX(last_block, end_fsb); |
| 782 | if ((map_start_fsb + count_fsb) > last_block) { |
| 783 | count_fsb = last_block - map_start_fsb; |
| 784 | if (count_fsb == 0) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 785 | error = -EAGAIN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 786 | goto trans_cancel; |
| 787 | } |
| 788 | } |
| 789 | |
Christoph Hellwig | 3070451 | 2010-06-24 11:42:19 +1000 | [diff] [blame] | 790 | /* |
Christoph Hellwig | 3070451 | 2010-06-24 11:42:19 +1000 | [diff] [blame] | 791 | * From this point onwards we overwrite the imap |
| 792 | * pointer that the caller gave to us. |
| 793 | */ |
Dave Chinner | c0dc782 | 2011-09-18 20:40:52 +0000 | [diff] [blame] | 794 | error = xfs_bmapi_write(tp, ip, map_start_fsb, |
Brian Foster | dbd5c8c | 2015-10-12 16:04:13 +1100 | [diff] [blame] | 795 | count_fsb, 0, &first_block, |
| 796 | nres, imap, &nimaps, |
| 797 | &free_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | if (error) |
| 799 | goto trans_cancel; |
| 800 | |
Eric Sandeen | f6106ef | 2016-01-11 11:34:01 +1100 | [diff] [blame] | 801 | error = xfs_bmap_finish(&tp, &free_list, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 802 | if (error) |
| 803 | goto trans_cancel; |
| 804 | |
Christoph Hellwig | 7039331 | 2015-06-04 13:48:08 +1000 | [diff] [blame] | 805 | error = xfs_trans_commit(tp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 806 | if (error) |
| 807 | goto error0; |
| 808 | |
| 809 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
| 810 | } |
| 811 | |
| 812 | /* |
| 813 | * See if we were able to allocate an extent that |
| 814 | * covers at least part of the callers request |
| 815 | */ |
Christoph Hellwig | 3070451 | 2010-06-24 11:42:19 +1000 | [diff] [blame] | 816 | if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) |
Dave Chinner | 6d4a8ec | 2011-03-07 10:06:35 +1100 | [diff] [blame] | 817 | return xfs_alert_fsblock_zero(ip, imap); |
David Chinner | 86c4d62 | 2008-04-29 12:53:21 +1000 | [diff] [blame] | 818 | |
Christoph Hellwig | 3070451 | 2010-06-24 11:42:19 +1000 | [diff] [blame] | 819 | if ((offset_fsb >= imap->br_startoff) && |
| 820 | (offset_fsb < (imap->br_startoff + |
| 821 | imap->br_blockcount))) { |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 822 | XFS_STATS_INC(mp, xs_xstrat_quick); |
David Chinner | e4143a1 | 2007-11-23 16:29:11 +1100 | [diff] [blame] | 823 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 824 | } |
| 825 | |
David Chinner | e4143a1 | 2007-11-23 16:29:11 +1100 | [diff] [blame] | 826 | /* |
| 827 | * So far we have not mapped the requested part of the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 828 | * file, just surrounding data, try again. |
| 829 | */ |
Christoph Hellwig | 3070451 | 2010-06-24 11:42:19 +1000 | [diff] [blame] | 830 | count_fsb -= imap->br_blockcount; |
| 831 | map_start_fsb = imap->br_startoff + imap->br_blockcount; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 832 | } |
| 833 | |
| 834 | trans_cancel: |
| 835 | xfs_bmap_cancel(&free_list); |
Christoph Hellwig | 4906e21 | 2015-06-04 13:47:56 +1000 | [diff] [blame] | 836 | xfs_trans_cancel(tp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 837 | error0: |
| 838 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 839 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 840 | } |
| 841 | |
| 842 | int |
| 843 | xfs_iomap_write_unwritten( |
| 844 | xfs_inode_t *ip, |
Nathan Scott | f403b7f | 2005-05-05 13:33:40 -0700 | [diff] [blame] | 845 | xfs_off_t offset, |
Christoph Hellwig | d32057f | 2015-01-09 10:48:12 +1100 | [diff] [blame] | 846 | xfs_off_t count) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | { |
| 848 | xfs_mount_t *mp = ip->i_mount; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 | xfs_fileoff_t offset_fsb; |
| 850 | xfs_filblks_t count_fsb; |
| 851 | xfs_filblks_t numblks_fsb; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 852 | xfs_fsblock_t firstfsb; |
| 853 | int nimaps; |
| 854 | xfs_trans_t *tp; |
| 855 | xfs_bmbt_irec_t imap; |
| 856 | xfs_bmap_free_t free_list; |
Christoph Hellwig | 84803fb | 2012-02-29 09:53:50 +0000 | [diff] [blame] | 857 | xfs_fsize_t i_size; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 858 | uint resblks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 861 | trace_xfs_unwritten_convert(ip, offset, count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 862 | |
| 863 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 864 | count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); |
| 865 | count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); |
| 866 | |
Lachlan McIlroy | 4ddd8bb | 2008-06-27 13:32:53 +1000 | [diff] [blame] | 867 | /* |
| 868 | * Reserve enough blocks in this transaction for two complete extent |
| 869 | * btree splits. We may be converting the middle part of an unwritten |
| 870 | * extent and in this case we will insert two new extents in the btree |
| 871 | * each of which could cause a full split. |
| 872 | * |
| 873 | * This reservation amount will be used in the first call to |
| 874 | * xfs_bmbt_split() to select an AG with enough space to satisfy the |
| 875 | * rest of the operation. |
| 876 | */ |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 877 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 878 | |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 879 | do { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 880 | /* |
| 881 | * set up a transaction to convert the range of extents |
| 882 | * from unwritten to real. Do allocations in a loop until |
| 883 | * we have covered the range passed in. |
Christoph Hellwig | 80641dc | 2009-10-19 04:00:03 +0000 | [diff] [blame] | 884 | * |
| 885 | * Note that we open code the transaction allocation here |
| 886 | * to pass KM_NOFS--we can't risk to recursing back into |
| 887 | * the filesystem here as we might be asked to write out |
| 888 | * the same inode that we complete here and might deadlock |
| 889 | * on the iolock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 890 | */ |
Jan Kara | d9457dc | 2012-06-12 16:20:39 +0200 | [diff] [blame] | 891 | sb_start_intwrite(mp->m_super); |
Christoph Hellwig | 80641dc | 2009-10-19 04:00:03 +0000 | [diff] [blame] | 892 | tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS); |
Jan Kara | d9457dc | 2012-06-12 16:20:39 +0200 | [diff] [blame] | 893 | tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT; |
Jie Liu | 3d3c8b5 | 2013-08-12 20:49:59 +1000 | [diff] [blame] | 894 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, |
| 895 | resblks, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 896 | if (error) { |
Christoph Hellwig | 4906e21 | 2015-06-04 13:47:56 +1000 | [diff] [blame] | 897 | xfs_trans_cancel(tp); |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 898 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 899 | } |
| 900 | |
| 901 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
Christoph Hellwig | ddc3415 | 2011-09-19 15:00:54 +0000 | [diff] [blame] | 902 | xfs_trans_ijoin(tp, ip, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | |
| 904 | /* |
| 905 | * Modify the unwritten extent state of the buffer. |
| 906 | */ |
Eric Sandeen | 9d87c31 | 2009-01-14 23:22:07 -0600 | [diff] [blame] | 907 | xfs_bmap_init(&free_list, &firstfsb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | nimaps = 1; |
Dave Chinner | c0dc782 | 2011-09-18 20:40:52 +0000 | [diff] [blame] | 909 | error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, |
Brian Foster | dbd5c8c | 2015-10-12 16:04:13 +1100 | [diff] [blame] | 910 | XFS_BMAPI_CONVERT, &firstfsb, resblks, |
| 911 | &imap, &nimaps, &free_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 912 | if (error) |
| 913 | goto error_on_bmapi_transaction; |
| 914 | |
Christoph Hellwig | 84803fb | 2012-02-29 09:53:50 +0000 | [diff] [blame] | 915 | /* |
| 916 | * Log the updated inode size as we go. We have to be careful |
| 917 | * to only log it up to the actual write offset if it is |
| 918 | * halfway into a block. |
| 919 | */ |
| 920 | i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); |
| 921 | if (i_size > offset + count) |
| 922 | i_size = offset + count; |
| 923 | |
| 924 | i_size = xfs_new_eof(ip, i_size); |
| 925 | if (i_size) { |
| 926 | ip->i_d.di_size = i_size; |
| 927 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
| 928 | } |
| 929 | |
Eric Sandeen | f6106ef | 2016-01-11 11:34:01 +1100 | [diff] [blame] | 930 | error = xfs_bmap_finish(&tp, &free_list, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 931 | if (error) |
| 932 | goto error_on_bmapi_transaction; |
| 933 | |
Christoph Hellwig | 7039331 | 2015-06-04 13:48:08 +1000 | [diff] [blame] | 934 | error = xfs_trans_commit(tp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
| 936 | if (error) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 937 | return error; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 938 | |
David Chinner | 86c4d62 | 2008-04-29 12:53:21 +1000 | [diff] [blame] | 939 | if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) |
Dave Chinner | 6d4a8ec | 2011-03-07 10:06:35 +1100 | [diff] [blame] | 940 | return xfs_alert_fsblock_zero(ip, &imap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 941 | |
| 942 | if ((numblks_fsb = imap.br_blockcount) == 0) { |
| 943 | /* |
| 944 | * The numblks_fsb value should always get |
| 945 | * smaller, otherwise the loop is stuck. |
| 946 | */ |
| 947 | ASSERT(imap.br_blockcount); |
| 948 | break; |
| 949 | } |
| 950 | offset_fsb += numblks_fsb; |
| 951 | count_fsb -= numblks_fsb; |
| 952 | } while (count_fsb > 0); |
| 953 | |
| 954 | return 0; |
| 955 | |
| 956 | error_on_bmapi_transaction: |
| 957 | xfs_bmap_cancel(&free_list); |
Christoph Hellwig | 4906e21 | 2015-06-04 13:47:56 +1000 | [diff] [blame] | 958 | xfs_trans_cancel(tp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 959 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 960 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 | } |