Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
| 6 | #ifndef __XFS_QUOTA_H__ |
| 7 | #define __XFS_QUOTA_H__ |
| 8 | |
Dave Chinner | 76456fc | 2013-08-12 20:49:30 +1000 | [diff] [blame] | 9 | #include "xfs_quota_defs.h" |
| 10 | |
| 11 | /* |
| 12 | * Kernel only quota definitions and functions |
| 13 | */ |
| 14 | |
Christoph Hellwig | fcafb71 | 2009-02-09 08:47:34 +0100 | [diff] [blame] | 15 | struct xfs_trans; |
| 16 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | * This check is done typically without holding the inode lock; |
Nathan Scott | c41564b | 2006-03-29 08:55:14 +1000 | [diff] [blame] | 19 | * that may seem racy, but it is harmless in the context that it is used. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | * The inode cannot go inactive as long a reference is kept, and |
| 21 | * therefore if dquot(s) were attached, they'll stay consistent. |
| 22 | * If, for example, the ownership of the inode changes while |
| 23 | * we didn't have the inode locked, the appropriate dquot(s) will be |
| 24 | * attached atomically. |
| 25 | */ |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 26 | #define XFS_NOT_DQATTACHED(mp, ip) \ |
| 27 | ((XFS_IS_UQUOTA_ON(mp) && (ip)->i_udquot == NULL) || \ |
| 28 | (XFS_IS_GQUOTA_ON(mp) && (ip)->i_gdquot == NULL) || \ |
| 29 | (XFS_IS_PQUOTA_ON(mp) && (ip)->i_pdquot == NULL)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 31 | #define XFS_QM_NEED_QUOTACHECK(mp) \ |
| 32 | ((XFS_IS_UQUOTA_ON(mp) && \ |
| 33 | (mp->m_sb.sb_qflags & XFS_UQUOTA_CHKD) == 0) || \ |
| 34 | (XFS_IS_GQUOTA_ON(mp) && \ |
Chandra Seetharaman | 83e782e | 2013-06-27 17:25:10 -0500 | [diff] [blame] | 35 | (mp->m_sb.sb_qflags & XFS_GQUOTA_CHKD) == 0) || \ |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 36 | (XFS_IS_PQUOTA_ON(mp) && \ |
Chandra Seetharaman | 83e782e | 2013-06-27 17:25:10 -0500 | [diff] [blame] | 37 | (mp->m_sb.sb_qflags & XFS_PQUOTA_CHKD) == 0)) |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 38 | |
Darrick J. Wong | 7e85bc6 | 2018-05-29 22:18:11 -0700 | [diff] [blame] | 39 | static inline uint |
| 40 | xfs_quota_chkd_flag( |
| 41 | uint dqtype) |
| 42 | { |
| 43 | switch (dqtype) { |
| 44 | case XFS_DQ_USER: |
| 45 | return XFS_UQUOTA_CHKD; |
| 46 | case XFS_DQ_GROUP: |
| 47 | return XFS_GQUOTA_CHKD; |
| 48 | case XFS_DQ_PROJ: |
| 49 | return XFS_PQUOTA_CHKD; |
| 50 | default: |
| 51 | return 0; |
| 52 | } |
| 53 | } |
| 54 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | /* |
| 56 | * The structure kept inside the xfs_trans_t keep track of dquot changes |
| 57 | * within a transaction and apply them later. |
| 58 | */ |
| 59 | typedef struct xfs_dqtrx { |
| 60 | struct xfs_dquot *qt_dquot; /* the dquot this refers to */ |
| 61 | ulong qt_blk_res; /* blks reserved on a dquot */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | ulong qt_ino_res; /* inode reserved on a dquot */ |
| 63 | ulong qt_ino_res_used; /* inodes used from the reservation */ |
| 64 | long qt_bcount_delta; /* dquot blk count changes */ |
| 65 | long qt_delbcnt_delta; /* delayed dquot blk count changes */ |
| 66 | long qt_icount_delta; /* dquot inode count changes */ |
| 67 | ulong qt_rtblk_res; /* # blks reserved on a dquot */ |
| 68 | ulong qt_rtblk_res_used;/* # blks used from reservation */ |
| 69 | long qt_rtbcount_delta;/* dquot realtime blk changes */ |
| 70 | long qt_delrtb_delta; /* delayed RT blk count changes */ |
| 71 | } xfs_dqtrx_t; |
| 72 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 73 | #ifdef CONFIG_XFS_QUOTA |
| 74 | extern void xfs_trans_dup_dqinfo(struct xfs_trans *, struct xfs_trans *); |
| 75 | extern void xfs_trans_free_dqinfo(struct xfs_trans *); |
| 76 | extern void xfs_trans_mod_dquot_byino(struct xfs_trans *, struct xfs_inode *, |
| 77 | uint, long); |
| 78 | extern void xfs_trans_apply_dquot_deltas(struct xfs_trans *); |
| 79 | extern void xfs_trans_unreserve_and_mod_dquots(struct xfs_trans *); |
| 80 | extern int xfs_trans_reserve_quota_nblks(struct xfs_trans *, |
| 81 | struct xfs_inode *, long, long, uint); |
| 82 | extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *, |
| 83 | struct xfs_mount *, struct xfs_dquot *, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 84 | struct xfs_dquot *, struct xfs_dquot *, long, long, uint); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
Dwight Engen | 7aab1b2 | 2013-08-15 14:08:01 -0400 | [diff] [blame] | 86 | extern int xfs_qm_vop_dqalloc(struct xfs_inode *, xfs_dqid_t, xfs_dqid_t, |
| 87 | prid_t, uint, struct xfs_dquot **, struct xfs_dquot **, |
| 88 | struct xfs_dquot **); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 89 | extern void xfs_qm_vop_create_dqattach(struct xfs_trans *, struct xfs_inode *, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 90 | struct xfs_dquot *, struct xfs_dquot *, struct xfs_dquot *); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 91 | extern int xfs_qm_vop_rename_dqattach(struct xfs_inode **); |
| 92 | extern struct xfs_dquot *xfs_qm_vop_chown(struct xfs_trans *, |
| 93 | struct xfs_inode *, struct xfs_dquot **, struct xfs_dquot *); |
| 94 | extern int xfs_qm_vop_chown_reserve(struct xfs_trans *, struct xfs_inode *, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 95 | struct xfs_dquot *, struct xfs_dquot *, |
| 96 | struct xfs_dquot *, uint); |
Darrick J. Wong | c14cfcc | 2018-05-04 15:30:21 -0700 | [diff] [blame] | 97 | extern int xfs_qm_dqattach(struct xfs_inode *); |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 98 | extern int xfs_qm_dqattach_locked(struct xfs_inode *ip, bool doalloc); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 99 | extern void xfs_qm_dqdetach(struct xfs_inode *); |
| 100 | extern void xfs_qm_dqrele(struct xfs_dquot *); |
| 101 | extern void xfs_qm_statvfs(struct xfs_inode *, struct kstatfs *); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 102 | extern int xfs_qm_newmount(struct xfs_mount *, uint *, uint *); |
| 103 | extern void xfs_qm_mount_quotas(struct xfs_mount *); |
| 104 | extern void xfs_qm_unmount(struct xfs_mount *); |
| 105 | extern void xfs_qm_unmount_quotas(struct xfs_mount *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 107 | #else |
Christoph Hellwig | 493b87e | 2009-06-12 11:34:55 -0400 | [diff] [blame] | 108 | static inline int |
Dwight Engen | 7aab1b2 | 2013-08-15 14:08:01 -0400 | [diff] [blame] | 109 | xfs_qm_vop_dqalloc(struct xfs_inode *ip, xfs_dqid_t uid, xfs_dqid_t gid, |
| 110 | prid_t prid, uint flags, struct xfs_dquot **udqp, |
| 111 | struct xfs_dquot **gdqp, struct xfs_dquot **pdqp) |
Christoph Hellwig | 493b87e | 2009-06-12 11:34:55 -0400 | [diff] [blame] | 112 | { |
| 113 | *udqp = NULL; |
| 114 | *gdqp = NULL; |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 115 | *pdqp = NULL; |
Christoph Hellwig | 493b87e | 2009-06-12 11:34:55 -0400 | [diff] [blame] | 116 | return 0; |
| 117 | } |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 118 | #define xfs_trans_dup_dqinfo(tp, tp2) |
| 119 | #define xfs_trans_free_dqinfo(tp) |
| 120 | #define xfs_trans_mod_dquot_byino(tp, ip, fields, delta) |
| 121 | #define xfs_trans_apply_dquot_deltas(tp) |
| 122 | #define xfs_trans_unreserve_and_mod_dquots(tp) |
Christoph Hellwig | 5d2bf8a | 2010-11-06 11:42:56 +0000 | [diff] [blame] | 123 | static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp, |
| 124 | struct xfs_inode *ip, long nblks, long ninos, uint flags) |
| 125 | { |
| 126 | return 0; |
| 127 | } |
| 128 | static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp, |
| 129 | struct xfs_mount *mp, struct xfs_dquot *udqp, |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 130 | struct xfs_dquot *gdqp, struct xfs_dquot *pdqp, |
| 131 | long nblks, long nions, uint flags) |
Christoph Hellwig | 5d2bf8a | 2010-11-06 11:42:56 +0000 | [diff] [blame] | 132 | { |
| 133 | return 0; |
| 134 | } |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 135 | #define xfs_qm_vop_create_dqattach(tp, ip, u, g, p) |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 136 | #define xfs_qm_vop_rename_dqattach(it) (0) |
| 137 | #define xfs_qm_vop_chown(tp, ip, old, new) (NULL) |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 138 | #define xfs_qm_vop_chown_reserve(tp, ip, u, g, p, fl) (0) |
Darrick J. Wong | c14cfcc | 2018-05-04 15:30:21 -0700 | [diff] [blame] | 139 | #define xfs_qm_dqattach(ip) (0) |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 140 | #define xfs_qm_dqattach_locked(ip, fl) (0) |
| 141 | #define xfs_qm_dqdetach(ip) |
| 142 | #define xfs_qm_dqrele(d) |
| 143 | #define xfs_qm_statvfs(ip, s) |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 144 | #define xfs_qm_newmount(mp, a, b) (0) |
| 145 | #define xfs_qm_mount_quotas(mp) |
| 146 | #define xfs_qm_unmount(mp) |
Christoph Hellwig | 5d2bf8a | 2010-11-06 11:42:56 +0000 | [diff] [blame] | 147 | #define xfs_qm_unmount_quotas(mp) |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 148 | #endif /* CONFIG_XFS_QUOTA */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 150 | #define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \ |
| 151 | xfs_trans_reserve_quota_nblks(tp, ip, -(nblks), -(ninos), flags) |
Chandra Seetharaman | 92f8ff7 | 2013-07-11 00:00:40 -0500 | [diff] [blame] | 152 | #define xfs_trans_reserve_quota(tp, mp, ud, gd, pd, nb, ni, f) \ |
| 153 | xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, pd, nb, ni, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | f | XFS_QMOPT_RES_REGBLKS) |
| 155 | |
Tim Shimmin | 4cd4a03 | 2005-09-05 08:24:10 +1000 | [diff] [blame] | 156 | extern int xfs_mount_reset_sbqflags(struct xfs_mount *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | #endif /* __XFS_QUOTA_H__ */ |