blob: 03f6ff249edbe235df67fe99a88203c140e167ae [file] [log] [blame]
Tao Maf56654c2008-08-18 17:38:48 +08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * xattr.c
5 *
Tiger Yangc3cb6822008-10-23 16:33:03 +08006 * Copyright (C) 2004, 2008 Oracle. All rights reserved.
Tao Maf56654c2008-08-18 17:38:48 +08007 *
Tiger Yangcf1d6c72008-08-18 17:11:00 +08008 * CREDITS:
Tiger Yangc3cb6822008-10-23 16:33:03 +08009 * Lots of code in this file is copy from linux/fs/ext3/xattr.c.
10 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
Tiger Yangcf1d6c72008-08-18 17:11:00 +080011 *
Tao Maf56654c2008-08-18 17:38:48 +080012 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public
Tiger Yangc3cb6822008-10-23 16:33:03 +080014 * License version 2 as published by the Free Software Foundation.
Tao Maf56654c2008-08-18 17:38:48 +080015 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
Tao Maf56654c2008-08-18 17:38:48 +080020 */
21
Tiger Yangcf1d6c72008-08-18 17:11:00 +080022#include <linux/capability.h>
23#include <linux/fs.h>
24#include <linux/types.h>
25#include <linux/slab.h>
26#include <linux/highmem.h>
27#include <linux/pagemap.h>
28#include <linux/uio.h>
29#include <linux/sched.h>
30#include <linux/splice.h>
31#include <linux/mount.h>
32#include <linux/writeback.h>
33#include <linux/falloc.h>
Tao Ma01225592008-08-18 17:38:53 +080034#include <linux/sort.h>
Mark Fasheh99219ae2008-10-07 14:52:59 -070035#include <linux/init.h>
36#include <linux/module.h>
37#include <linux/string.h>
Tiger Yang923f7f32008-11-14 11:16:27 +080038#include <linux/security.h>
Tiger Yangcf1d6c72008-08-18 17:11:00 +080039
Tao Maf56654c2008-08-18 17:38:48 +080040#include <cluster/masklog.h>
41
42#include "ocfs2.h"
43#include "alloc.h"
Joel Beckerd6b32bb2008-10-17 14:55:01 -070044#include "blockcheck.h"
Tao Maf56654c2008-08-18 17:38:48 +080045#include "dlmglue.h"
46#include "file.h"
Tiger Yangcf1d6c72008-08-18 17:11:00 +080047#include "symlink.h"
48#include "sysfile.h"
Tao Maf56654c2008-08-18 17:38:48 +080049#include "inode.h"
50#include "journal.h"
51#include "ocfs2_fs.h"
52#include "suballoc.h"
53#include "uptodate.h"
54#include "buffer_head_io.h"
Tao Ma0c044f02008-08-18 17:38:50 +080055#include "super.h"
Tiger Yangcf1d6c72008-08-18 17:11:00 +080056#include "xattr.h"
Tao Ma492a8a32009-08-18 11:43:17 +080057#include "refcounttree.h"
Tao Ma0fe9b662009-08-18 11:47:56 +080058#include "acl.h"
Tao Ma402b4182011-02-23 22:01:17 +080059#include "ocfs2_trace.h"
Tiger Yangcf1d6c72008-08-18 17:11:00 +080060
61struct ocfs2_xattr_def_value_root {
62 struct ocfs2_xattr_value_root xv;
63 struct ocfs2_extent_rec er;
64};
65
Tao Ma0c044f02008-08-18 17:38:50 +080066struct ocfs2_xattr_bucket {
Joel Beckerba937122008-10-24 19:13:20 -070067 /* The inode these xattrs are associated with */
68 struct inode *bu_inode;
69
70 /* The actual buffers that make up the bucket */
Joel Becker4ac60322008-10-18 19:11:42 -070071 struct buffer_head *bu_bhs[OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET];
Joel Beckerba937122008-10-24 19:13:20 -070072
73 /* How many blocks make up one bucket for this filesystem */
74 int bu_blocks;
Tao Ma0c044f02008-08-18 17:38:50 +080075};
76
Tao Ma78f30c32008-11-12 08:27:00 +080077struct ocfs2_xattr_set_ctxt {
Tao Ma85db90e2008-11-12 08:27:01 +080078 handle_t *handle;
Tao Ma78f30c32008-11-12 08:27:00 +080079 struct ocfs2_alloc_context *meta_ac;
80 struct ocfs2_alloc_context *data_ac;
81 struct ocfs2_cached_dealloc_ctxt dealloc;
Tao Ma5f5261a2010-05-13 22:49:05 +080082 int set_abort;
Tao Ma78f30c32008-11-12 08:27:00 +080083};
84
Tiger Yangcf1d6c72008-08-18 17:11:00 +080085#define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root))
86#define OCFS2_XATTR_INLINE_SIZE 80
Tiger Yang4442f512009-02-20 11:11:50 +080087#define OCFS2_XATTR_HEADER_GAP 4
Tiger Yang534eadd2008-11-14 11:16:41 +080088#define OCFS2_XATTR_FREE_IN_IBODY (OCFS2_MIN_XATTR_INLINE_SIZE \
89 - sizeof(struct ocfs2_xattr_header) \
Tiger Yang4442f512009-02-20 11:11:50 +080090 - OCFS2_XATTR_HEADER_GAP)
Tiger Yang89c38bd2008-11-14 11:17:41 +080091#define OCFS2_XATTR_FREE_IN_BLOCK(ptr) ((ptr)->i_sb->s_blocksize \
92 - sizeof(struct ocfs2_xattr_block) \
93 - sizeof(struct ocfs2_xattr_header) \
Tiger Yang4442f512009-02-20 11:11:50 +080094 - OCFS2_XATTR_HEADER_GAP)
Tiger Yangcf1d6c72008-08-18 17:11:00 +080095
96static struct ocfs2_xattr_def_value_root def_xv = {
97 .xv.xr_list.l_count = cpu_to_le16(1),
98};
99
Stephen Hemminger537d81c2010-05-13 17:53:22 -0700100const struct xattr_handler *ocfs2_xattr_handlers[] = {
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800101 &ocfs2_xattr_user_handler,
Christoph Hellwig702e5bc2013-12-20 05:16:48 -0800102 &posix_acl_access_xattr_handler,
103 &posix_acl_default_xattr_handler,
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800104 &ocfs2_xattr_trusted_handler,
Tiger Yang923f7f32008-11-14 11:16:27 +0800105 &ocfs2_xattr_security_handler,
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800106 NULL
107};
108
Stephen Hemminger537d81c2010-05-13 17:53:22 -0700109static const struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800110 [OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler,
Tiger Yang929fb012008-11-14 11:17:04 +0800111 [OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS]
Christoph Hellwig702e5bc2013-12-20 05:16:48 -0800112 = &posix_acl_access_xattr_handler,
Tiger Yang929fb012008-11-14 11:17:04 +0800113 [OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT]
Christoph Hellwig702e5bc2013-12-20 05:16:48 -0800114 = &posix_acl_default_xattr_handler,
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800115 [OCFS2_XATTR_INDEX_TRUSTED] = &ocfs2_xattr_trusted_handler,
Tiger Yang923f7f32008-11-14 11:16:27 +0800116 [OCFS2_XATTR_INDEX_SECURITY] = &ocfs2_xattr_security_handler,
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800117};
118
119struct ocfs2_xattr_info {
Joel Becker6b240ff2009-08-14 18:02:52 -0700120 int xi_name_index;
121 const char *xi_name;
Joel Becker18853b92009-08-14 18:17:07 -0700122 int xi_name_len;
Joel Becker6b240ff2009-08-14 18:02:52 -0700123 const void *xi_value;
124 size_t xi_value_len;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800125};
126
127struct ocfs2_xattr_search {
128 struct buffer_head *inode_bh;
129 /*
130 * xattr_bh point to the block buffer head which has extended attribute
131 * when extended attribute in inode, xattr_bh is equal to inode_bh.
132 */
133 struct buffer_head *xattr_bh;
134 struct ocfs2_xattr_header *header;
Joel Beckerba937122008-10-24 19:13:20 -0700135 struct ocfs2_xattr_bucket *bucket;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800136 void *base;
137 void *end;
138 struct ocfs2_xattr_entry *here;
139 int not_found;
140};
141
Joel Becker11179f22009-08-14 16:07:44 -0700142/* Operations on struct ocfs2_xa_entry */
143struct ocfs2_xa_loc;
144struct ocfs2_xa_loc_operations {
145 /*
Joel Beckercf2bc802009-08-18 13:52:38 -0700146 * Journal functions
147 */
148 int (*xlo_journal_access)(handle_t *handle, struct ocfs2_xa_loc *loc,
149 int type);
150 void (*xlo_journal_dirty)(handle_t *handle, struct ocfs2_xa_loc *loc);
151
152 /*
Joel Becker11179f22009-08-14 16:07:44 -0700153 * Return a pointer to the appropriate buffer in loc->xl_storage
154 * at the given offset from loc->xl_header.
155 */
156 void *(*xlo_offset_pointer)(struct ocfs2_xa_loc *loc, int offset);
157
Joel Becker69a3e532009-08-17 12:24:39 -0700158 /* Can we reuse the existing entry for the new value? */
159 int (*xlo_can_reuse)(struct ocfs2_xa_loc *loc,
160 struct ocfs2_xattr_info *xi);
161
162 /* How much space is needed for the new value? */
163 int (*xlo_check_space)(struct ocfs2_xa_loc *loc,
164 struct ocfs2_xattr_info *xi);
165
166 /*
167 * Return the offset of the first name+value pair. This is
168 * the start of our downward-filling free space.
169 */
170 int (*xlo_get_free_start)(struct ocfs2_xa_loc *loc);
171
Joel Becker11179f22009-08-14 16:07:44 -0700172 /*
173 * Remove the name+value at this location. Do whatever is
174 * appropriate with the remaining name+value pairs.
175 */
176 void (*xlo_wipe_namevalue)(struct ocfs2_xa_loc *loc);
Joel Becker69a3e532009-08-17 12:24:39 -0700177
178 /* Fill xl_entry with a new entry */
179 void (*xlo_add_entry)(struct ocfs2_xa_loc *loc, u32 name_hash);
180
181 /* Add name+value storage to an entry */
182 void (*xlo_add_namevalue)(struct ocfs2_xa_loc *loc, int size);
Joel Becker3fc12af2009-08-18 13:20:27 -0700183
184 /*
185 * Initialize the value buf's access and bh fields for this entry.
186 * ocfs2_xa_fill_value_buf() will handle the xv pointer.
187 */
188 void (*xlo_fill_value_buf)(struct ocfs2_xa_loc *loc,
189 struct ocfs2_xattr_value_buf *vb);
Joel Becker11179f22009-08-14 16:07:44 -0700190};
191
192/*
193 * Describes an xattr entry location. This is a memory structure
194 * tracking the on-disk structure.
195 */
196struct ocfs2_xa_loc {
Joel Beckercf2bc802009-08-18 13:52:38 -0700197 /* This xattr belongs to this inode */
198 struct inode *xl_inode;
199
Joel Becker11179f22009-08-14 16:07:44 -0700200 /* The ocfs2_xattr_header inside the on-disk storage. Not NULL. */
201 struct ocfs2_xattr_header *xl_header;
202
203 /* Bytes from xl_header to the end of the storage */
204 int xl_size;
205
206 /*
207 * The ocfs2_xattr_entry this location describes. If this is
208 * NULL, this location describes the on-disk structure where it
209 * would have been.
210 */
211 struct ocfs2_xattr_entry *xl_entry;
212
213 /*
214 * Internal housekeeping
215 */
216
217 /* Buffer(s) containing this entry */
218 void *xl_storage;
219
220 /* Operations on the storage backing this location */
221 const struct ocfs2_xa_loc_operations *xl_ops;
222};
223
Joel Becker199799a2009-08-14 19:04:15 -0700224/*
225 * Convenience functions to calculate how much space is needed for a
226 * given name+value pair
227 */
228static int namevalue_size(int name_len, uint64_t value_len)
229{
230 if (value_len > OCFS2_XATTR_INLINE_SIZE)
231 return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
232 else
233 return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len);
234}
235
236static int namevalue_size_xi(struct ocfs2_xattr_info *xi)
237{
238 return namevalue_size(xi->xi_name_len, xi->xi_value_len);
239}
240
241static int namevalue_size_xe(struct ocfs2_xattr_entry *xe)
242{
243 u64 value_len = le64_to_cpu(xe->xe_value_size);
244
245 BUG_ON((value_len > OCFS2_XATTR_INLINE_SIZE) &&
246 ocfs2_xattr_is_local(xe));
247 return namevalue_size(xe->xe_name_len, value_len);
248}
249
250
Tao Mafd68a892009-08-18 11:43:21 +0800251static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
Tao Ma589dc262008-08-18 17:38:51 +0800252 struct ocfs2_xattr_header *xh,
253 int index,
254 int *block_off,
255 int *new_offset);
256
Joel Becker54f443f2008-10-20 18:43:07 -0700257static int ocfs2_xattr_block_find(struct inode *inode,
258 int name_index,
259 const char *name,
260 struct ocfs2_xattr_search *xs);
Tao Ma589dc262008-08-18 17:38:51 +0800261static int ocfs2_xattr_index_block_find(struct inode *inode,
262 struct buffer_head *root_bh,
263 int name_index,
264 const char *name,
265 struct ocfs2_xattr_search *xs);
266
Tao Ma0c044f02008-08-18 17:38:50 +0800267static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
Tao Ma47bca492009-08-18 11:43:42 +0800268 struct buffer_head *blk_bh,
Tao Ma0c044f02008-08-18 17:38:50 +0800269 char *buffer,
270 size_t buffer_size);
271
Tao Ma01225592008-08-18 17:38:53 +0800272static int ocfs2_xattr_create_index_block(struct inode *inode,
Tao Ma78f30c32008-11-12 08:27:00 +0800273 struct ocfs2_xattr_search *xs,
274 struct ocfs2_xattr_set_ctxt *ctxt);
Tao Ma01225592008-08-18 17:38:53 +0800275
276static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
277 struct ocfs2_xattr_info *xi,
Tao Ma78f30c32008-11-12 08:27:00 +0800278 struct ocfs2_xattr_search *xs,
279 struct ocfs2_xattr_set_ctxt *ctxt);
Tao Ma01225592008-08-18 17:38:53 +0800280
Tao Ma47bca492009-08-18 11:43:42 +0800281typedef int (xattr_tree_rec_func)(struct inode *inode,
282 struct buffer_head *root_bh,
283 u64 blkno, u32 cpos, u32 len, void *para);
284static int ocfs2_iterate_xattr_index_block(struct inode *inode,
285 struct buffer_head *root_bh,
286 xattr_tree_rec_func *rec_func,
287 void *para);
288static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
289 struct ocfs2_xattr_bucket *bucket,
290 void *para);
291static int ocfs2_rm_xattr_cluster(struct inode *inode,
292 struct buffer_head *root_bh,
293 u64 blkno,
294 u32 cpos,
295 u32 len,
296 void *para);
297
Joel Beckerc58b6032008-11-26 13:36:24 -0800298static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
299 u64 src_blk, u64 last_blk, u64 to_blk,
300 unsigned int start_bucket,
301 u32 *first_hash);
Tao Ma492a8a32009-08-18 11:43:17 +0800302static int ocfs2_prepare_refcount_xattr(struct inode *inode,
303 struct ocfs2_dinode *di,
304 struct ocfs2_xattr_info *xi,
305 struct ocfs2_xattr_search *xis,
306 struct ocfs2_xattr_search *xbs,
307 struct ocfs2_refcount_tree **ref_tree,
308 int *meta_need,
309 int *credits);
Tao Mace9c5a52009-08-18 11:43:59 +0800310static int ocfs2_get_xattr_tree_value_root(struct super_block *sb,
311 struct ocfs2_xattr_bucket *bucket,
312 int offset,
313 struct ocfs2_xattr_value_root **xv,
314 struct buffer_head **bh);
Tao Maa3944252008-08-18 17:38:54 +0800315
Tiger Yang0030e002008-10-23 16:33:33 +0800316static inline u16 ocfs2_xattr_buckets_per_cluster(struct ocfs2_super *osb)
317{
318 return (1 << osb->s_clustersize_bits) / OCFS2_XATTR_BUCKET_SIZE;
319}
320
321static inline u16 ocfs2_blocks_per_xattr_bucket(struct super_block *sb)
322{
323 return OCFS2_XATTR_BUCKET_SIZE / (1 << sb->s_blocksize_bits);
324}
325
Joel Becker9c7759a2008-10-24 16:21:03 -0700326#define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr)
Joel Becker51def392008-10-24 16:57:21 -0700327#define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data)
Joel Becker3e632942008-10-24 17:04:49 -0700328#define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0))
Joel Becker9c7759a2008-10-24 16:21:03 -0700329
Joel Beckerba937122008-10-24 19:13:20 -0700330static struct ocfs2_xattr_bucket *ocfs2_xattr_bucket_new(struct inode *inode)
Joel Becker6dde41d2008-10-24 17:16:48 -0700331{
Joel Beckerba937122008-10-24 19:13:20 -0700332 struct ocfs2_xattr_bucket *bucket;
333 int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Joel Becker6dde41d2008-10-24 17:16:48 -0700334
Joel Beckerba937122008-10-24 19:13:20 -0700335 BUG_ON(blks > OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET);
336
337 bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS);
338 if (bucket) {
339 bucket->bu_inode = inode;
340 bucket->bu_blocks = blks;
341 }
342
343 return bucket;
344}
345
346static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket)
347{
348 int i;
349
350 for (i = 0; i < bucket->bu_blocks; i++) {
Joel Becker6dde41d2008-10-24 17:16:48 -0700351 brelse(bucket->bu_bhs[i]);
352 bucket->bu_bhs[i] = NULL;
353 }
354}
355
Joel Beckerba937122008-10-24 19:13:20 -0700356static void ocfs2_xattr_bucket_free(struct ocfs2_xattr_bucket *bucket)
357{
358 if (bucket) {
359 ocfs2_xattr_bucket_relse(bucket);
360 bucket->bu_inode = NULL;
361 kfree(bucket);
362 }
363}
364
Joel Becker784b8162008-10-24 17:33:40 -0700365/*
366 * A bucket that has never been written to disk doesn't need to be
367 * read. We just need the buffer_heads. Don't call this for
368 * buckets that are already on disk. ocfs2_read_xattr_bucket() initializes
369 * them fully.
370 */
Joel Beckerba937122008-10-24 19:13:20 -0700371static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
Wengang Wang9c339252014-04-03 14:47:15 -0700372 u64 xb_blkno, int new)
Joel Becker784b8162008-10-24 17:33:40 -0700373{
374 int i, rc = 0;
Joel Becker784b8162008-10-24 17:33:40 -0700375
Joel Beckerba937122008-10-24 19:13:20 -0700376 for (i = 0; i < bucket->bu_blocks; i++) {
377 bucket->bu_bhs[i] = sb_getblk(bucket->bu_inode->i_sb,
378 xb_blkno + i);
Joel Becker784b8162008-10-24 17:33:40 -0700379 if (!bucket->bu_bhs[i]) {
Rui Xiang7391a292013-11-12 15:06:54 -0800380 rc = -ENOMEM;
Joel Becker784b8162008-10-24 17:33:40 -0700381 mlog_errno(rc);
382 break;
383 }
384
Joel Becker8cb471e2009-02-10 20:00:41 -0800385 if (!ocfs2_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
Wengang Wang9c339252014-04-03 14:47:15 -0700386 bucket->bu_bhs[i])) {
387 if (new)
388 ocfs2_set_new_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
389 bucket->bu_bhs[i]);
390 else {
391 set_buffer_uptodate(bucket->bu_bhs[i]);
392 ocfs2_set_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
393 bucket->bu_bhs[i]);
394 }
395 }
Joel Becker784b8162008-10-24 17:33:40 -0700396 }
397
398 if (rc)
Joel Beckerba937122008-10-24 19:13:20 -0700399 ocfs2_xattr_bucket_relse(bucket);
Joel Becker784b8162008-10-24 17:33:40 -0700400 return rc;
401}
402
403/* Read the xattr bucket at xb_blkno */
Joel Beckerba937122008-10-24 19:13:20 -0700404static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
Joel Becker784b8162008-10-24 17:33:40 -0700405 u64 xb_blkno)
406{
Joel Beckerba937122008-10-24 19:13:20 -0700407 int rc;
Joel Becker784b8162008-10-24 17:33:40 -0700408
Joel Becker8cb471e2009-02-10 20:00:41 -0800409 rc = ocfs2_read_blocks(INODE_CACHE(bucket->bu_inode), xb_blkno,
Joel Becker970e4932008-11-13 14:49:19 -0800410 bucket->bu_blocks, bucket->bu_bhs, 0,
411 NULL);
Joel Becker4d0e2142008-12-05 11:19:37 -0800412 if (!rc) {
Tao Mac8b9cf92009-02-24 17:40:26 -0800413 spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
Joel Becker4d0e2142008-12-05 11:19:37 -0800414 rc = ocfs2_validate_meta_ecc_bhs(bucket->bu_inode->i_sb,
415 bucket->bu_bhs,
416 bucket->bu_blocks,
417 &bucket_xh(bucket)->xh_check);
Tao Mac8b9cf92009-02-24 17:40:26 -0800418 spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
Joel Becker4d0e2142008-12-05 11:19:37 -0800419 if (rc)
420 mlog_errno(rc);
421 }
422
Joel Becker784b8162008-10-24 17:33:40 -0700423 if (rc)
Joel Beckerba937122008-10-24 19:13:20 -0700424 ocfs2_xattr_bucket_relse(bucket);
Joel Becker784b8162008-10-24 17:33:40 -0700425 return rc;
426}
427
Joel Becker1224be02008-10-24 18:47:33 -0700428static int ocfs2_xattr_bucket_journal_access(handle_t *handle,
Joel Becker1224be02008-10-24 18:47:33 -0700429 struct ocfs2_xattr_bucket *bucket,
430 int type)
431{
432 int i, rc = 0;
Joel Becker1224be02008-10-24 18:47:33 -0700433
Joel Beckerba937122008-10-24 19:13:20 -0700434 for (i = 0; i < bucket->bu_blocks; i++) {
Joel Becker0cf2f762009-02-12 16:41:25 -0800435 rc = ocfs2_journal_access(handle,
436 INODE_CACHE(bucket->bu_inode),
Joel Becker1224be02008-10-24 18:47:33 -0700437 bucket->bu_bhs[i], type);
438 if (rc) {
439 mlog_errno(rc);
440 break;
441 }
442 }
443
444 return rc;
445}
446
447static void ocfs2_xattr_bucket_journal_dirty(handle_t *handle,
Joel Becker1224be02008-10-24 18:47:33 -0700448 struct ocfs2_xattr_bucket *bucket)
449{
Joel Beckerba937122008-10-24 19:13:20 -0700450 int i;
Joel Becker1224be02008-10-24 18:47:33 -0700451
Tao Mac8b9cf92009-02-24 17:40:26 -0800452 spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
Joel Becker4d0e2142008-12-05 11:19:37 -0800453 ocfs2_compute_meta_ecc_bhs(bucket->bu_inode->i_sb,
454 bucket->bu_bhs, bucket->bu_blocks,
455 &bucket_xh(bucket)->xh_check);
Tao Mac8b9cf92009-02-24 17:40:26 -0800456 spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
Joel Becker4d0e2142008-12-05 11:19:37 -0800457
Joel Beckerba937122008-10-24 19:13:20 -0700458 for (i = 0; i < bucket->bu_blocks; i++)
Joel Becker1224be02008-10-24 18:47:33 -0700459 ocfs2_journal_dirty(handle, bucket->bu_bhs[i]);
460}
461
Joel Beckerba937122008-10-24 19:13:20 -0700462static void ocfs2_xattr_bucket_copy_data(struct ocfs2_xattr_bucket *dest,
Joel Becker4980c6d2008-10-24 18:54:43 -0700463 struct ocfs2_xattr_bucket *src)
464{
465 int i;
Joel Beckerba937122008-10-24 19:13:20 -0700466 int blocksize = src->bu_inode->i_sb->s_blocksize;
Joel Becker4980c6d2008-10-24 18:54:43 -0700467
Joel Beckerba937122008-10-24 19:13:20 -0700468 BUG_ON(dest->bu_blocks != src->bu_blocks);
469 BUG_ON(dest->bu_inode != src->bu_inode);
470
471 for (i = 0; i < src->bu_blocks; i++) {
Joel Becker4980c6d2008-10-24 18:54:43 -0700472 memcpy(bucket_block(dest, i), bucket_block(src, i),
473 blocksize);
474 }
475}
Joel Becker1224be02008-10-24 18:47:33 -0700476
Joel Becker4ae1d692008-11-13 14:49:18 -0800477static int ocfs2_validate_xattr_block(struct super_block *sb,
478 struct buffer_head *bh)
479{
Joel Beckerd6b32bb2008-10-17 14:55:01 -0700480 int rc;
Joel Becker4ae1d692008-11-13 14:49:18 -0800481 struct ocfs2_xattr_block *xb =
482 (struct ocfs2_xattr_block *)bh->b_data;
483
Tao Ma402b4182011-02-23 22:01:17 +0800484 trace_ocfs2_validate_xattr_block((unsigned long long)bh->b_blocknr);
Joel Becker4ae1d692008-11-13 14:49:18 -0800485
Joel Beckerd6b32bb2008-10-17 14:55:01 -0700486 BUG_ON(!buffer_uptodate(bh));
487
488 /*
489 * If the ecc fails, we return the error but otherwise
490 * leave the filesystem running. We know any error is
491 * local to this block.
492 */
493 rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &xb->xb_check);
494 if (rc)
495 return rc;
496
497 /*
498 * Errors after here are fatal
499 */
500
Joel Becker4ae1d692008-11-13 14:49:18 -0800501 if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) {
Goldwyn Rodrigues17a5b9a2015-09-04 15:44:17 -0700502 return ocfs2_error(sb,
Joe Perches7ecef142015-09-04 15:44:51 -0700503 "Extended attribute block #%llu has bad signature %.*s\n",
504 (unsigned long long)bh->b_blocknr, 7,
505 xb->xb_signature);
Joel Becker4ae1d692008-11-13 14:49:18 -0800506 }
507
508 if (le64_to_cpu(xb->xb_blkno) != bh->b_blocknr) {
Goldwyn Rodrigues17a5b9a2015-09-04 15:44:17 -0700509 return ocfs2_error(sb,
Joe Perches7ecef142015-09-04 15:44:51 -0700510 "Extended attribute block #%llu has an invalid xb_blkno of %llu\n",
511 (unsigned long long)bh->b_blocknr,
512 (unsigned long long)le64_to_cpu(xb->xb_blkno));
Joel Becker4ae1d692008-11-13 14:49:18 -0800513 }
514
515 if (le32_to_cpu(xb->xb_fs_generation) != OCFS2_SB(sb)->fs_generation) {
Goldwyn Rodrigues17a5b9a2015-09-04 15:44:17 -0700516 return ocfs2_error(sb,
Joe Perches7ecef142015-09-04 15:44:51 -0700517 "Extended attribute block #%llu has an invalid xb_fs_generation of #%u\n",
518 (unsigned long long)bh->b_blocknr,
519 le32_to_cpu(xb->xb_fs_generation));
Joel Becker4ae1d692008-11-13 14:49:18 -0800520 }
521
522 return 0;
523}
524
525static int ocfs2_read_xattr_block(struct inode *inode, u64 xb_blkno,
526 struct buffer_head **bh)
527{
528 int rc;
529 struct buffer_head *tmp = *bh;
530
Joel Becker8cb471e2009-02-10 20:00:41 -0800531 rc = ocfs2_read_block(INODE_CACHE(inode), xb_blkno, &tmp,
Joel Becker970e4932008-11-13 14:49:19 -0800532 ocfs2_validate_xattr_block);
Joel Becker4ae1d692008-11-13 14:49:18 -0800533
534 /* If ocfs2_read_block() got us a new bh, pass it up. */
535 if (!rc && !*bh)
536 *bh = tmp;
537
538 return rc;
539}
540
Tao Ma936b8832008-10-09 23:06:14 +0800541static inline const char *ocfs2_xattr_prefix(int name_index)
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800542{
Stephen Hemminger537d81c2010-05-13 17:53:22 -0700543 const struct xattr_handler *handler = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800544
545 if (name_index > 0 && name_index < OCFS2_XATTR_MAX)
546 handler = ocfs2_xattr_handler_map[name_index];
Andreas Gruenbacher98e9cb52015-12-02 14:44:36 +0100547 return handler ? xattr_prefix(handler) : NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800548}
549
Mark Fasheh40daa162008-10-07 14:31:42 -0700550static u32 ocfs2_xattr_name_hash(struct inode *inode,
Tao Ma2057e5c2008-10-09 23:06:13 +0800551 const char *name,
Mark Fasheh40daa162008-10-07 14:31:42 -0700552 int name_len)
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800553{
554 /* Get hash value of uuid from super block */
555 u32 hash = OCFS2_SB(inode->i_sb)->uuid_hash;
556 int i;
557
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800558 /* hash extended attribute name */
559 for (i = 0; i < name_len; i++) {
560 hash = (hash << OCFS2_HASH_SHIFT) ^
561 (hash >> (8*sizeof(hash) - OCFS2_HASH_SHIFT)) ^
562 *name++;
563 }
564
565 return hash;
566}
567
Tiger Yang534eadd2008-11-14 11:16:41 +0800568static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len)
569{
Joel Becker199799a2009-08-14 19:04:15 -0700570 return namevalue_size(name_len, value_len) +
571 sizeof(struct ocfs2_xattr_entry);
572}
Tiger Yang534eadd2008-11-14 11:16:41 +0800573
Joel Becker199799a2009-08-14 19:04:15 -0700574static int ocfs2_xi_entry_usage(struct ocfs2_xattr_info *xi)
575{
576 return namevalue_size_xi(xi) +
577 sizeof(struct ocfs2_xattr_entry);
578}
Tiger Yang534eadd2008-11-14 11:16:41 +0800579
Joel Becker199799a2009-08-14 19:04:15 -0700580static int ocfs2_xe_entry_usage(struct ocfs2_xattr_entry *xe)
581{
582 return namevalue_size_xe(xe) +
583 sizeof(struct ocfs2_xattr_entry);
Tiger Yang534eadd2008-11-14 11:16:41 +0800584}
585
586int ocfs2_calc_security_init(struct inode *dir,
587 struct ocfs2_security_xattr_info *si,
588 int *want_clusters,
589 int *xattr_credits,
590 struct ocfs2_alloc_context **xattr_ac)
591{
592 int ret = 0;
593 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
594 int s_size = ocfs2_xattr_entry_real_size(strlen(si->name),
595 si->value_len);
596
597 /*
598 * The max space of security xattr taken inline is
599 * 256(name) + 80(value) + 16(entry) = 352 bytes,
600 * So reserve one metadata block for it is ok.
601 */
602 if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE ||
603 s_size > OCFS2_XATTR_FREE_IN_IBODY) {
604 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, xattr_ac);
605 if (ret) {
606 mlog_errno(ret);
607 return ret;
608 }
609 *xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
610 }
611
612 /* reserve clusters for xattr value which will be set in B tree*/
Tiger Yang0e445b62008-12-09 16:42:51 +0800613 if (si->value_len > OCFS2_XATTR_INLINE_SIZE) {
614 int new_clusters = ocfs2_clusters_for_bytes(dir->i_sb,
615 si->value_len);
616
617 *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
618 new_clusters);
619 *want_clusters += new_clusters;
620 }
Tiger Yang534eadd2008-11-14 11:16:41 +0800621 return ret;
622}
623
Tiger Yang89c38bd2008-11-14 11:17:41 +0800624int ocfs2_calc_xattr_init(struct inode *dir,
625 struct buffer_head *dir_bh,
Al Viro67697cb2011-07-26 02:55:32 -0400626 umode_t mode,
Tiger Yang89c38bd2008-11-14 11:17:41 +0800627 struct ocfs2_security_xattr_info *si,
628 int *want_clusters,
629 int *xattr_credits,
Mark Fasheh9b7895e2008-11-12 16:27:44 -0800630 int *want_meta)
Tiger Yang89c38bd2008-11-14 11:17:41 +0800631{
632 int ret = 0;
633 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
Tiger Yang0e445b62008-12-09 16:42:51 +0800634 int s_size = 0, a_size = 0, acl_len = 0, new_clusters;
Tiger Yang89c38bd2008-11-14 11:17:41 +0800635
636 if (si->enable)
637 s_size = ocfs2_xattr_entry_real_size(strlen(si->name),
638 si->value_len);
639
640 if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
piaojun1d5fdc12018-01-31 16:14:59 -0800641 down_read(&OCFS2_I(dir)->ip_xattr_sem);
Tiger Yang89c38bd2008-11-14 11:17:41 +0800642 acl_len = ocfs2_xattr_get_nolock(dir, dir_bh,
643 OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT,
644 "", NULL, 0);
piaojun1d5fdc12018-01-31 16:14:59 -0800645 up_read(&OCFS2_I(dir)->ip_xattr_sem);
Tiger Yang89c38bd2008-11-14 11:17:41 +0800646 if (acl_len > 0) {
647 a_size = ocfs2_xattr_entry_real_size(0, acl_len);
648 if (S_ISDIR(mode))
649 a_size <<= 1;
650 } else if (acl_len != 0 && acl_len != -ENODATA) {
651 mlog_errno(ret);
652 return ret;
653 }
654 }
655
656 if (!(s_size + a_size))
657 return ret;
658
659 /*
660 * The max space of security xattr taken inline is
661 * 256(name) + 80(value) + 16(entry) = 352 bytes,
662 * The max space of acl xattr taken inline is
663 * 80(value) + 16(entry) * 2(if directory) = 192 bytes,
664 * when blocksize = 512, may reserve one more cluser for
665 * xattr bucket, otherwise reserve one metadata block
666 * for them is ok.
Tiger Yang6c9fd1d2009-03-06 10:19:30 +0800667 * If this is a new directory with inline data,
668 * we choose to reserve the entire inline area for
669 * directory contents and force an external xattr block.
Tiger Yang89c38bd2008-11-14 11:17:41 +0800670 */
671 if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE ||
Tiger Yang6c9fd1d2009-03-06 10:19:30 +0800672 (S_ISDIR(mode) && ocfs2_supports_inline_data(osb)) ||
Tiger Yang89c38bd2008-11-14 11:17:41 +0800673 (s_size + a_size) > OCFS2_XATTR_FREE_IN_IBODY) {
Mark Fasheh9b7895e2008-11-12 16:27:44 -0800674 *want_meta = *want_meta + 1;
Tiger Yang89c38bd2008-11-14 11:17:41 +0800675 *xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
676 }
677
678 if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE &&
679 (s_size + a_size) > OCFS2_XATTR_FREE_IN_BLOCK(dir)) {
680 *want_clusters += 1;
681 *xattr_credits += ocfs2_blocks_per_xattr_bucket(dir->i_sb);
682 }
683
Tiger Yang0e445b62008-12-09 16:42:51 +0800684 /*
685 * reserve credits and clusters for xattrs which has large value
686 * and have to be set outside
687 */
688 if (si->enable && si->value_len > OCFS2_XATTR_INLINE_SIZE) {
689 new_clusters = ocfs2_clusters_for_bytes(dir->i_sb,
690 si->value_len);
691 *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
692 new_clusters);
693 *want_clusters += new_clusters;
694 }
Tiger Yang89c38bd2008-11-14 11:17:41 +0800695 if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL &&
696 acl_len > OCFS2_XATTR_INLINE_SIZE) {
Tiger Yang0e445b62008-12-09 16:42:51 +0800697 /* for directory, it has DEFAULT and ACCESS two types of acls */
698 new_clusters = (S_ISDIR(mode) ? 2 : 1) *
699 ocfs2_clusters_for_bytes(dir->i_sb, acl_len);
700 *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
701 new_clusters);
702 *want_clusters += new_clusters;
Tiger Yang89c38bd2008-11-14 11:17:41 +0800703 }
704
705 return ret;
706}
707
Tao Maf56654c2008-08-18 17:38:48 +0800708static int ocfs2_xattr_extend_allocation(struct inode *inode,
709 u32 clusters_to_add,
Joel Becker19b801f2008-12-09 14:36:50 -0800710 struct ocfs2_xattr_value_buf *vb,
Tao Ma78f30c32008-11-12 08:27:00 +0800711 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Maf56654c2008-08-18 17:38:48 +0800712{
Tao Maa78f9f42010-07-09 14:53:11 +0800713 int status = 0, credits;
Tao Ma85db90e2008-11-12 08:27:01 +0800714 handle_t *handle = ctxt->handle;
Tao Maf56654c2008-08-18 17:38:48 +0800715 enum ocfs2_alloc_restarted why;
Joel Becker19b801f2008-12-09 14:36:50 -0800716 u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters);
Joel Beckerf99b9b72008-08-20 19:36:33 -0700717 struct ocfs2_extent_tree et;
Tao Maf56654c2008-08-18 17:38:48 +0800718
Joel Becker5e404e92009-02-13 03:54:22 -0800719 ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
Joel Beckerf99b9b72008-08-20 19:36:33 -0700720
Tao Maa78f9f42010-07-09 14:53:11 +0800721 while (clusters_to_add) {
Tao Ma402b4182011-02-23 22:01:17 +0800722 trace_ocfs2_xattr_extend_allocation(clusters_to_add);
723
Tao Maa78f9f42010-07-09 14:53:11 +0800724 status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
725 OCFS2_JOURNAL_ACCESS_WRITE);
726 if (status < 0) {
727 mlog_errno(status);
728 break;
729 }
730
731 prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
732 status = ocfs2_add_clusters_in_btree(handle,
733 &et,
734 &logical_start,
735 clusters_to_add,
736 0,
737 ctxt->data_ac,
738 ctxt->meta_ac,
739 &why);
740 if ((status < 0) && (status != -EAGAIN)) {
741 if (status != -ENOSPC)
742 mlog_errno(status);
743 break;
744 }
745
746 ocfs2_journal_dirty(handle, vb->vb_bh);
747
748 clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) -
749 prev_clusters;
750
751 if (why != RESTART_NONE && clusters_to_add) {
752 /*
753 * We can only fail in case the alloc file doesn't give
754 * up enough clusters.
755 */
756 BUG_ON(why == RESTART_META);
757
Tao Maa78f9f42010-07-09 14:53:11 +0800758 credits = ocfs2_calc_extend_credits(inode->i_sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -0800759 &vb->vb_xv->xr_list);
Tao Maa78f9f42010-07-09 14:53:11 +0800760 status = ocfs2_extend_trans(handle, credits);
761 if (status < 0) {
762 status = -ENOMEM;
763 mlog_errno(status);
764 break;
765 }
766 }
Tao Maf56654c2008-08-18 17:38:48 +0800767 }
768
Tao Maf56654c2008-08-18 17:38:48 +0800769 return status;
770}
771
772static int __ocfs2_remove_xattr_range(struct inode *inode,
Joel Beckerd72cc722008-12-09 14:30:41 -0800773 struct ocfs2_xattr_value_buf *vb,
Tao Maf56654c2008-08-18 17:38:48 +0800774 u32 cpos, u32 phys_cpos, u32 len,
Tao Ma492a8a32009-08-18 11:43:17 +0800775 unsigned int ext_flags,
Tao Ma78f30c32008-11-12 08:27:00 +0800776 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Maf56654c2008-08-18 17:38:48 +0800777{
778 int ret;
779 u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
Tao Ma85db90e2008-11-12 08:27:01 +0800780 handle_t *handle = ctxt->handle;
Joel Beckerf99b9b72008-08-20 19:36:33 -0700781 struct ocfs2_extent_tree et;
Tao Maf56654c2008-08-18 17:38:48 +0800782
Joel Becker5e404e92009-02-13 03:54:22 -0800783 ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
Joel Beckerf99b9b72008-08-20 19:36:33 -0700784
Joel Becker0cf2f762009-02-12 16:41:25 -0800785 ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
Joel Beckerd72cc722008-12-09 14:30:41 -0800786 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Maf56654c2008-08-18 17:38:48 +0800787 if (ret) {
788 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +0800789 goto out;
Tao Maf56654c2008-08-18 17:38:48 +0800790 }
791
Joel Beckerdbdcf6a2009-02-13 03:41:26 -0800792 ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac,
Tao Ma78f30c32008-11-12 08:27:00 +0800793 &ctxt->dealloc);
Tao Maf56654c2008-08-18 17:38:48 +0800794 if (ret) {
795 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +0800796 goto out;
Tao Maf56654c2008-08-18 17:38:48 +0800797 }
798
Joel Beckerd72cc722008-12-09 14:30:41 -0800799 le32_add_cpu(&vb->vb_xv->xr_clusters, -len);
Joel Beckerec20cec2010-03-19 14:13:52 -0700800 ocfs2_journal_dirty(handle, vb->vb_bh);
Tao Maf56654c2008-08-18 17:38:48 +0800801
Tao Ma492a8a32009-08-18 11:43:17 +0800802 if (ext_flags & OCFS2_EXT_REFCOUNTED)
803 ret = ocfs2_decrease_refcount(inode, handle,
804 ocfs2_blocks_to_clusters(inode->i_sb,
805 phys_blkno),
806 len, ctxt->meta_ac, &ctxt->dealloc, 1);
807 else
808 ret = ocfs2_cache_cluster_dealloc(&ctxt->dealloc,
809 phys_blkno, len);
Tao Maf56654c2008-08-18 17:38:48 +0800810 if (ret)
811 mlog_errno(ret);
812
Tao Maf56654c2008-08-18 17:38:48 +0800813out:
Tao Maf56654c2008-08-18 17:38:48 +0800814 return ret;
815}
816
817static int ocfs2_xattr_shrink_size(struct inode *inode,
818 u32 old_clusters,
819 u32 new_clusters,
Joel Becker19b801f2008-12-09 14:36:50 -0800820 struct ocfs2_xattr_value_buf *vb,
Tao Ma78f30c32008-11-12 08:27:00 +0800821 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Maf56654c2008-08-18 17:38:48 +0800822{
823 int ret = 0;
Tao Ma492a8a32009-08-18 11:43:17 +0800824 unsigned int ext_flags;
Tao Maf56654c2008-08-18 17:38:48 +0800825 u32 trunc_len, cpos, phys_cpos, alloc_size;
826 u64 block;
Tao Maf56654c2008-08-18 17:38:48 +0800827
828 if (old_clusters <= new_clusters)
829 return 0;
830
831 cpos = new_clusters;
832 trunc_len = old_clusters - new_clusters;
833 while (trunc_len) {
834 ret = ocfs2_xattr_get_clusters(inode, cpos, &phys_cpos,
Joel Beckerd72cc722008-12-09 14:30:41 -0800835 &alloc_size,
Tao Ma492a8a32009-08-18 11:43:17 +0800836 &vb->vb_xv->xr_list, &ext_flags);
Tao Maf56654c2008-08-18 17:38:48 +0800837 if (ret) {
838 mlog_errno(ret);
839 goto out;
840 }
841
842 if (alloc_size > trunc_len)
843 alloc_size = trunc_len;
844
Joel Becker19b801f2008-12-09 14:36:50 -0800845 ret = __ocfs2_remove_xattr_range(inode, vb, cpos,
Tao Maf56654c2008-08-18 17:38:48 +0800846 phys_cpos, alloc_size,
Tao Ma492a8a32009-08-18 11:43:17 +0800847 ext_flags, ctxt);
Tao Maf56654c2008-08-18 17:38:48 +0800848 if (ret) {
849 mlog_errno(ret);
850 goto out;
851 }
852
853 block = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
Joel Becker8cb471e2009-02-10 20:00:41 -0800854 ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode),
855 block, alloc_size);
Tao Maf56654c2008-08-18 17:38:48 +0800856 cpos += alloc_size;
857 trunc_len -= alloc_size;
858 }
859
860out:
Tao Maf56654c2008-08-18 17:38:48 +0800861 return ret;
862}
863
864static int ocfs2_xattr_value_truncate(struct inode *inode,
Joel Beckerb3e5d372008-12-09 15:01:04 -0800865 struct ocfs2_xattr_value_buf *vb,
Tao Ma78f30c32008-11-12 08:27:00 +0800866 int len,
867 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Maf56654c2008-08-18 17:38:48 +0800868{
869 int ret;
870 u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, len);
Joel Beckerb3e5d372008-12-09 15:01:04 -0800871 u32 old_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
Tao Maf56654c2008-08-18 17:38:48 +0800872
873 if (new_clusters == old_clusters)
874 return 0;
875
876 if (new_clusters > old_clusters)
877 ret = ocfs2_xattr_extend_allocation(inode,
878 new_clusters - old_clusters,
Joel Beckerb3e5d372008-12-09 15:01:04 -0800879 vb, ctxt);
Tao Maf56654c2008-08-18 17:38:48 +0800880 else
881 ret = ocfs2_xattr_shrink_size(inode,
882 old_clusters, new_clusters,
Joel Beckerb3e5d372008-12-09 15:01:04 -0800883 vb, ctxt);
Tao Maf56654c2008-08-18 17:38:48 +0800884
885 return ret;
886}
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800887
Andreas Gruenbacher1046cb12015-12-02 14:44:42 +0100888static int ocfs2_xattr_list_entry(struct super_block *sb,
889 char *buffer, size_t size,
890 size_t *result, int type,
Tao Ma936b8832008-10-09 23:06:14 +0800891 const char *name, int name_len)
892{
893 char *p = buffer + *result;
Andreas Gruenbacher1046cb12015-12-02 14:44:42 +0100894 const char *prefix;
895 int prefix_len;
896 int total_len;
Tao Ma936b8832008-10-09 23:06:14 +0800897
Andreas Gruenbacher1046cb12015-12-02 14:44:42 +0100898 switch(type) {
899 case OCFS2_XATTR_INDEX_USER:
900 if (OCFS2_SB(sb)->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
901 return 0;
902 break;
903
904 case OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS:
905 case OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT:
906 if (!(sb->s_flags & MS_POSIXACL))
907 return 0;
908 break;
909
910 case OCFS2_XATTR_INDEX_TRUSTED:
911 if (!capable(CAP_SYS_ADMIN))
912 return 0;
913 break;
914 }
915
916 prefix = ocfs2_xattr_prefix(type);
917 if (!prefix)
918 return 0;
919 prefix_len = strlen(prefix);
920 total_len = prefix_len + name_len + 1;
Tao Ma936b8832008-10-09 23:06:14 +0800921 *result += total_len;
922
923 /* we are just looking for how big our buffer needs to be */
924 if (!size)
925 return 0;
926
927 if (*result > size)
928 return -ERANGE;
929
930 memcpy(p, prefix, prefix_len);
931 memcpy(p + prefix_len, name, name_len);
932 p[prefix_len + name_len] = '\0';
933
934 return 0;
935}
936
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800937static int ocfs2_xattr_list_entries(struct inode *inode,
938 struct ocfs2_xattr_header *header,
939 char *buffer, size_t buffer_size)
940{
Tao Ma936b8832008-10-09 23:06:14 +0800941 size_t result = 0;
942 int i, type, ret;
Andreas Gruenbacher1046cb12015-12-02 14:44:42 +0100943 const char *name;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800944
945 for (i = 0 ; i < le16_to_cpu(header->xh_count); i++) {
946 struct ocfs2_xattr_entry *entry = &header->xh_entries[i];
Tao Ma936b8832008-10-09 23:06:14 +0800947 type = ocfs2_xattr_get_type(entry);
Andreas Gruenbacher1046cb12015-12-02 14:44:42 +0100948 name = (const char *)header +
949 le16_to_cpu(entry->xe_name_offset);
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800950
Andreas Gruenbacher1046cb12015-12-02 14:44:42 +0100951 ret = ocfs2_xattr_list_entry(inode->i_sb,
952 buffer, buffer_size,
953 &result, type, name,
954 entry->xe_name_len);
955 if (ret)
956 return ret;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800957 }
958
Tao Ma936b8832008-10-09 23:06:14 +0800959 return result;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800960}
961
Tao Ma8b2c0db2009-08-18 11:43:49 +0800962int ocfs2_has_inline_xattr_value_outside(struct inode *inode,
963 struct ocfs2_dinode *di)
964{
965 struct ocfs2_xattr_header *xh;
966 int i;
967
968 xh = (struct ocfs2_xattr_header *)
969 ((void *)di + inode->i_sb->s_blocksize -
970 le16_to_cpu(di->i_xattr_inline_size));
971
972 for (i = 0; i < le16_to_cpu(xh->xh_count); i++)
973 if (!ocfs2_xattr_is_local(&xh->xh_entries[i]))
974 return 1;
975
976 return 0;
977}
978
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800979static int ocfs2_xattr_ibody_list(struct inode *inode,
980 struct ocfs2_dinode *di,
981 char *buffer,
982 size_t buffer_size)
983{
984 struct ocfs2_xattr_header *header = NULL;
985 struct ocfs2_inode_info *oi = OCFS2_I(inode);
986 int ret = 0;
987
988 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL))
989 return ret;
990
991 header = (struct ocfs2_xattr_header *)
992 ((void *)di + inode->i_sb->s_blocksize -
993 le16_to_cpu(di->i_xattr_inline_size));
994
995 ret = ocfs2_xattr_list_entries(inode, header, buffer, buffer_size);
996
997 return ret;
998}
999
1000static int ocfs2_xattr_block_list(struct inode *inode,
1001 struct ocfs2_dinode *di,
1002 char *buffer,
1003 size_t buffer_size)
1004{
1005 struct buffer_head *blk_bh = NULL;
Tao Ma0c044f02008-08-18 17:38:50 +08001006 struct ocfs2_xattr_block *xb;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001007 int ret = 0;
1008
1009 if (!di->i_xattr_loc)
1010 return ret;
1011
Joel Becker4ae1d692008-11-13 14:49:18 -08001012 ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
1013 &blk_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001014 if (ret < 0) {
1015 mlog_errno(ret);
1016 return ret;
1017 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001018
Tao Ma0c044f02008-08-18 17:38:50 +08001019 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
Tao Ma0c044f02008-08-18 17:38:50 +08001020 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
1021 struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header;
1022 ret = ocfs2_xattr_list_entries(inode, header,
1023 buffer, buffer_size);
Tao Ma47bca492009-08-18 11:43:42 +08001024 } else
1025 ret = ocfs2_xattr_tree_list_index_block(inode, blk_bh,
Tao Ma0c044f02008-08-18 17:38:50 +08001026 buffer, buffer_size);
Joel Becker4ae1d692008-11-13 14:49:18 -08001027
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001028 brelse(blk_bh);
1029
1030 return ret;
1031}
1032
1033ssize_t ocfs2_listxattr(struct dentry *dentry,
1034 char *buffer,
1035 size_t size)
1036{
1037 int ret = 0, i_ret = 0, b_ret = 0;
1038 struct buffer_head *di_bh = NULL;
1039 struct ocfs2_dinode *di = NULL;
David Howells2b0143b2015-03-17 22:25:59 +00001040 struct ocfs2_inode_info *oi = OCFS2_I(d_inode(dentry));
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001041
Tiger Yang8154da32008-08-18 17:11:46 +08001042 if (!ocfs2_supports_xattr(OCFS2_SB(dentry->d_sb)))
1043 return -EOPNOTSUPP;
1044
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001045 if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
1046 return ret;
1047
David Howells2b0143b2015-03-17 22:25:59 +00001048 ret = ocfs2_inode_lock(d_inode(dentry), &di_bh, 0);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001049 if (ret < 0) {
1050 mlog_errno(ret);
1051 return ret;
1052 }
1053
1054 di = (struct ocfs2_dinode *)di_bh->b_data;
1055
1056 down_read(&oi->ip_xattr_sem);
David Howells2b0143b2015-03-17 22:25:59 +00001057 i_ret = ocfs2_xattr_ibody_list(d_inode(dentry), di, buffer, size);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001058 if (i_ret < 0)
1059 b_ret = 0;
1060 else {
1061 if (buffer) {
1062 buffer += i_ret;
1063 size -= i_ret;
1064 }
David Howells2b0143b2015-03-17 22:25:59 +00001065 b_ret = ocfs2_xattr_block_list(d_inode(dentry), di,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001066 buffer, size);
1067 if (b_ret < 0)
1068 i_ret = 0;
1069 }
1070 up_read(&oi->ip_xattr_sem);
David Howells2b0143b2015-03-17 22:25:59 +00001071 ocfs2_inode_unlock(d_inode(dentry), 0);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001072
1073 brelse(di_bh);
1074
1075 return i_ret + b_ret;
1076}
1077
1078static int ocfs2_xattr_find_entry(int name_index,
1079 const char *name,
1080 struct ocfs2_xattr_search *xs)
1081{
1082 struct ocfs2_xattr_entry *entry;
1083 size_t name_len;
1084 int i, cmp = 1;
1085
1086 if (name == NULL)
1087 return -EINVAL;
1088
1089 name_len = strlen(name);
1090 entry = xs->here;
1091 for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
1092 cmp = name_index - ocfs2_xattr_get_type(entry);
1093 if (!cmp)
1094 cmp = name_len - entry->xe_name_len;
1095 if (!cmp)
1096 cmp = memcmp(name, (xs->base +
1097 le16_to_cpu(entry->xe_name_offset)),
1098 name_len);
1099 if (cmp == 0)
1100 break;
1101 entry += 1;
1102 }
1103 xs->here = entry;
1104
1105 return cmp ? -ENODATA : 0;
1106}
1107
1108static int ocfs2_xattr_get_value_outside(struct inode *inode,
Tao Ma589dc262008-08-18 17:38:51 +08001109 struct ocfs2_xattr_value_root *xv,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001110 void *buffer,
1111 size_t len)
1112{
1113 u32 cpos, p_cluster, num_clusters, bpc, clusters;
1114 u64 blkno;
1115 int i, ret = 0;
1116 size_t cplen, blocksize;
1117 struct buffer_head *bh = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001118 struct ocfs2_extent_list *el;
1119
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001120 el = &xv->xr_list;
1121 clusters = le32_to_cpu(xv->xr_clusters);
1122 bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
1123 blocksize = inode->i_sb->s_blocksize;
1124
1125 cpos = 0;
1126 while (cpos < clusters) {
1127 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
Tao Ma1061f9c2009-08-18 11:41:57 +08001128 &num_clusters, el, NULL);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001129 if (ret) {
1130 mlog_errno(ret);
1131 goto out;
1132 }
1133
1134 blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
1135 /* Copy ocfs2_xattr_value */
1136 for (i = 0; i < num_clusters * bpc; i++, blkno++) {
Joel Becker8cb471e2009-02-10 20:00:41 -08001137 ret = ocfs2_read_block(INODE_CACHE(inode), blkno,
1138 &bh, NULL);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001139 if (ret) {
1140 mlog_errno(ret);
1141 goto out;
1142 }
1143
1144 cplen = len >= blocksize ? blocksize : len;
1145 memcpy(buffer, bh->b_data, cplen);
1146 len -= cplen;
1147 buffer += cplen;
1148
1149 brelse(bh);
1150 bh = NULL;
1151 if (len == 0)
1152 break;
1153 }
1154 cpos += num_clusters;
1155 }
1156out:
1157 return ret;
1158}
1159
1160static int ocfs2_xattr_ibody_get(struct inode *inode,
1161 int name_index,
1162 const char *name,
1163 void *buffer,
1164 size_t buffer_size,
1165 struct ocfs2_xattr_search *xs)
1166{
1167 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1168 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
Tao Ma589dc262008-08-18 17:38:51 +08001169 struct ocfs2_xattr_value_root *xv;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001170 size_t size;
1171 int ret = 0;
1172
1173 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL))
1174 return -ENODATA;
1175
1176 xs->end = (void *)di + inode->i_sb->s_blocksize;
1177 xs->header = (struct ocfs2_xattr_header *)
1178 (xs->end - le16_to_cpu(di->i_xattr_inline_size));
1179 xs->base = (void *)xs->header;
1180 xs->here = xs->header->xh_entries;
1181
1182 ret = ocfs2_xattr_find_entry(name_index, name, xs);
1183 if (ret)
1184 return ret;
1185 size = le64_to_cpu(xs->here->xe_value_size);
1186 if (buffer) {
1187 if (size > buffer_size)
1188 return -ERANGE;
1189 if (ocfs2_xattr_is_local(xs->here)) {
1190 memcpy(buffer, (void *)xs->base +
1191 le16_to_cpu(xs->here->xe_name_offset) +
1192 OCFS2_XATTR_SIZE(xs->here->xe_name_len), size);
1193 } else {
Tao Ma589dc262008-08-18 17:38:51 +08001194 xv = (struct ocfs2_xattr_value_root *)
1195 (xs->base + le16_to_cpu(
1196 xs->here->xe_name_offset) +
1197 OCFS2_XATTR_SIZE(xs->here->xe_name_len));
1198 ret = ocfs2_xattr_get_value_outside(inode, xv,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001199 buffer, size);
1200 if (ret < 0) {
1201 mlog_errno(ret);
1202 return ret;
1203 }
1204 }
1205 }
1206
1207 return size;
1208}
1209
1210static int ocfs2_xattr_block_get(struct inode *inode,
1211 int name_index,
1212 const char *name,
1213 void *buffer,
1214 size_t buffer_size,
1215 struct ocfs2_xattr_search *xs)
1216{
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001217 struct ocfs2_xattr_block *xb;
Tao Ma589dc262008-08-18 17:38:51 +08001218 struct ocfs2_xattr_value_root *xv;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001219 size_t size;
Subrata Modak44d8e4e2009-07-14 01:19:31 +05301220 int ret = -ENODATA, name_offset, name_len, i;
1221 int uninitialized_var(block_off);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001222
Joel Beckerba937122008-10-24 19:13:20 -07001223 xs->bucket = ocfs2_xattr_bucket_new(inode);
1224 if (!xs->bucket) {
1225 ret = -ENOMEM;
1226 mlog_errno(ret);
1227 goto cleanup;
1228 }
Tao Ma589dc262008-08-18 17:38:51 +08001229
Joel Becker54f443f2008-10-20 18:43:07 -07001230 ret = ocfs2_xattr_block_find(inode, name_index, name, xs);
1231 if (ret) {
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001232 mlog_errno(ret);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001233 goto cleanup;
1234 }
1235
Tiger Yang6c1e1832008-11-02 19:04:21 +08001236 if (xs->not_found) {
1237 ret = -ENODATA;
1238 goto cleanup;
1239 }
1240
Joel Becker54f443f2008-10-20 18:43:07 -07001241 xb = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001242 size = le64_to_cpu(xs->here->xe_value_size);
1243 if (buffer) {
1244 ret = -ERANGE;
1245 if (size > buffer_size)
1246 goto cleanup;
Tao Ma589dc262008-08-18 17:38:51 +08001247
1248 name_offset = le16_to_cpu(xs->here->xe_name_offset);
1249 name_len = OCFS2_XATTR_SIZE(xs->here->xe_name_len);
1250 i = xs->here - xs->header->xh_entries;
1251
1252 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
Tao Mafd68a892009-08-18 11:43:21 +08001253 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
Joel Beckerba937122008-10-24 19:13:20 -07001254 bucket_xh(xs->bucket),
Tao Ma589dc262008-08-18 17:38:51 +08001255 i,
1256 &block_off,
1257 &name_offset);
Joseph Qi023d4ea2015-04-14 15:43:33 -07001258 if (ret) {
1259 mlog_errno(ret);
1260 goto cleanup;
1261 }
Joel Beckerba937122008-10-24 19:13:20 -07001262 xs->base = bucket_block(xs->bucket, block_off);
Tao Ma589dc262008-08-18 17:38:51 +08001263 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001264 if (ocfs2_xattr_is_local(xs->here)) {
1265 memcpy(buffer, (void *)xs->base +
Tao Ma589dc262008-08-18 17:38:51 +08001266 name_offset + name_len, size);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001267 } else {
Tao Ma589dc262008-08-18 17:38:51 +08001268 xv = (struct ocfs2_xattr_value_root *)
1269 (xs->base + name_offset + name_len);
1270 ret = ocfs2_xattr_get_value_outside(inode, xv,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001271 buffer, size);
1272 if (ret < 0) {
1273 mlog_errno(ret);
1274 goto cleanup;
1275 }
1276 }
1277 }
1278 ret = size;
1279cleanup:
Joel Beckerba937122008-10-24 19:13:20 -07001280 ocfs2_xattr_bucket_free(xs->bucket);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001281
Joel Becker54f443f2008-10-20 18:43:07 -07001282 brelse(xs->xattr_bh);
1283 xs->xattr_bh = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001284 return ret;
1285}
1286
Tiger Yang4e3e9d02008-11-14 11:16:53 +08001287int ocfs2_xattr_get_nolock(struct inode *inode,
1288 struct buffer_head *di_bh,
Tiger Yang0030e002008-10-23 16:33:33 +08001289 int name_index,
1290 const char *name,
1291 void *buffer,
1292 size_t buffer_size)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001293{
1294 int ret;
1295 struct ocfs2_dinode *di = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001296 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1297 struct ocfs2_xattr_search xis = {
1298 .not_found = -ENODATA,
1299 };
1300 struct ocfs2_xattr_search xbs = {
1301 .not_found = -ENODATA,
1302 };
1303
Tiger Yang8154da32008-08-18 17:11:46 +08001304 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
1305 return -EOPNOTSUPP;
1306
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001307 if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
Jan Kara2b693002014-12-10 15:41:40 -08001308 return -ENODATA;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001309
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001310 xis.inode_bh = xbs.inode_bh = di_bh;
1311 di = (struct ocfs2_dinode *)di_bh->b_data;
1312
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001313 ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer,
1314 buffer_size, &xis);
Tiger Yang6c1e1832008-11-02 19:04:21 +08001315 if (ret == -ENODATA && di->i_xattr_loc)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001316 ret = ocfs2_xattr_block_get(inode, name_index, name, buffer,
1317 buffer_size, &xbs);
Tiger Yang4e3e9d02008-11-14 11:16:53 +08001318
1319 return ret;
1320}
1321
1322/* ocfs2_xattr_get()
1323 *
1324 * Copy an extended attribute into the buffer provided.
1325 * Buffer is NULL to compute the size of buffer required.
1326 */
1327static int ocfs2_xattr_get(struct inode *inode,
1328 int name_index,
1329 const char *name,
1330 void *buffer,
1331 size_t buffer_size)
1332{
1333 int ret;
1334 struct buffer_head *di_bh = NULL;
1335
1336 ret = ocfs2_inode_lock(inode, &di_bh, 0);
1337 if (ret < 0) {
1338 mlog_errno(ret);
1339 return ret;
1340 }
Tao Ma5e64b0d2010-09-07 13:30:05 +08001341 down_read(&OCFS2_I(inode)->ip_xattr_sem);
Tiger Yang4e3e9d02008-11-14 11:16:53 +08001342 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
1343 name, buffer, buffer_size);
Tao Ma5e64b0d2010-09-07 13:30:05 +08001344 up_read(&OCFS2_I(inode)->ip_xattr_sem);
Tiger Yang4e3e9d02008-11-14 11:16:53 +08001345
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001346 ocfs2_inode_unlock(inode, 0);
1347
1348 brelse(di_bh);
1349
1350 return ret;
1351}
1352
1353static int __ocfs2_xattr_set_value_outside(struct inode *inode,
Tao Ma85db90e2008-11-12 08:27:01 +08001354 handle_t *handle,
Tao Ma492a8a32009-08-18 11:43:17 +08001355 struct ocfs2_xattr_value_buf *vb,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001356 const void *value,
1357 int value_len)
1358{
Tao Ma71d548a2008-12-05 06:20:54 +08001359 int ret = 0, i, cp_len;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001360 u16 blocksize = inode->i_sb->s_blocksize;
1361 u32 p_cluster, num_clusters;
1362 u32 cpos = 0, bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
1363 u32 clusters = ocfs2_clusters_for_bytes(inode->i_sb, value_len);
1364 u64 blkno;
1365 struct buffer_head *bh = NULL;
Tao Ma492a8a32009-08-18 11:43:17 +08001366 unsigned int ext_flags;
1367 struct ocfs2_xattr_value_root *xv = vb->vb_xv;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001368
1369 BUG_ON(clusters > le32_to_cpu(xv->xr_clusters));
1370
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001371 while (cpos < clusters) {
1372 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
Tao Ma1061f9c2009-08-18 11:41:57 +08001373 &num_clusters, &xv->xr_list,
Tao Ma492a8a32009-08-18 11:43:17 +08001374 &ext_flags);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001375 if (ret) {
1376 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08001377 goto out;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001378 }
1379
Tao Ma492a8a32009-08-18 11:43:17 +08001380 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
1381
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001382 blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
1383
1384 for (i = 0; i < num_clusters * bpc; i++, blkno++) {
Joel Becker8cb471e2009-02-10 20:00:41 -08001385 ret = ocfs2_read_block(INODE_CACHE(inode), blkno,
1386 &bh, NULL);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001387 if (ret) {
1388 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08001389 goto out;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001390 }
1391
1392 ret = ocfs2_journal_access(handle,
Joel Becker0cf2f762009-02-12 16:41:25 -08001393 INODE_CACHE(inode),
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001394 bh,
1395 OCFS2_JOURNAL_ACCESS_WRITE);
1396 if (ret < 0) {
1397 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08001398 goto out;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001399 }
1400
1401 cp_len = value_len > blocksize ? blocksize : value_len;
1402 memcpy(bh->b_data, value, cp_len);
1403 value_len -= cp_len;
1404 value += cp_len;
1405 if (cp_len < blocksize)
1406 memset(bh->b_data + cp_len, 0,
1407 blocksize - cp_len);
1408
Joel Beckerec20cec2010-03-19 14:13:52 -07001409 ocfs2_journal_dirty(handle, bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001410 brelse(bh);
1411 bh = NULL;
1412
1413 /*
1414 * XXX: do we need to empty all the following
1415 * blocks in this cluster?
1416 */
1417 if (!value_len)
1418 break;
1419 }
1420 cpos += num_clusters;
1421 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001422out:
1423 brelse(bh);
1424
1425 return ret;
1426}
1427
Joel Becker69a3e532009-08-17 12:24:39 -07001428static int ocfs2_xa_check_space_helper(int needed_space, int free_start,
1429 int num_entries)
1430{
1431 int free_space;
1432
1433 if (!needed_space)
1434 return 0;
1435
1436 free_space = free_start -
1437 sizeof(struct ocfs2_xattr_header) -
1438 (num_entries * sizeof(struct ocfs2_xattr_entry)) -
1439 OCFS2_XATTR_HEADER_GAP;
1440 if (free_space < 0)
1441 return -EIO;
1442 if (free_space < needed_space)
1443 return -ENOSPC;
1444
1445 return 0;
1446}
1447
Joel Beckercf2bc802009-08-18 13:52:38 -07001448static int ocfs2_xa_journal_access(handle_t *handle, struct ocfs2_xa_loc *loc,
1449 int type)
1450{
1451 return loc->xl_ops->xlo_journal_access(handle, loc, type);
1452}
1453
1454static void ocfs2_xa_journal_dirty(handle_t *handle, struct ocfs2_xa_loc *loc)
1455{
1456 loc->xl_ops->xlo_journal_dirty(handle, loc);
1457}
1458
Joel Becker69a3e532009-08-17 12:24:39 -07001459/* Give a pointer into the storage for the given offset */
1460static void *ocfs2_xa_offset_pointer(struct ocfs2_xa_loc *loc, int offset)
1461{
1462 BUG_ON(offset >= loc->xl_size);
1463 return loc->xl_ops->xlo_offset_pointer(loc, offset);
1464}
1465
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001466/*
Joel Becker11179f22009-08-14 16:07:44 -07001467 * Wipe the name+value pair and allow the storage to reclaim it. This
1468 * must be followed by either removal of the entry or a call to
1469 * ocfs2_xa_add_namevalue().
1470 */
1471static void ocfs2_xa_wipe_namevalue(struct ocfs2_xa_loc *loc)
1472{
1473 loc->xl_ops->xlo_wipe_namevalue(loc);
1474}
1475
Joel Becker69a3e532009-08-17 12:24:39 -07001476/*
1477 * Find lowest offset to a name+value pair. This is the start of our
1478 * downward-growing free space.
1479 */
1480static int ocfs2_xa_get_free_start(struct ocfs2_xa_loc *loc)
1481{
1482 return loc->xl_ops->xlo_get_free_start(loc);
1483}
1484
1485/* Can we reuse loc->xl_entry for xi? */
1486static int ocfs2_xa_can_reuse_entry(struct ocfs2_xa_loc *loc,
1487 struct ocfs2_xattr_info *xi)
1488{
1489 return loc->xl_ops->xlo_can_reuse(loc, xi);
1490}
1491
1492/* How much free space is needed to set the new value */
1493static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
1494 struct ocfs2_xattr_info *xi)
1495{
1496 return loc->xl_ops->xlo_check_space(loc, xi);
1497}
1498
1499static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
1500{
1501 loc->xl_ops->xlo_add_entry(loc, name_hash);
1502 loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
1503 /*
1504 * We can't leave the new entry's xe_name_offset at zero or
1505 * add_namevalue() will go nuts. We set it to the size of our
1506 * storage so that it can never be less than any other entry.
1507 */
1508 loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
1509}
1510
1511static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
1512 struct ocfs2_xattr_info *xi)
1513{
1514 int size = namevalue_size_xi(xi);
1515 int nameval_offset;
1516 char *nameval_buf;
1517
1518 loc->xl_ops->xlo_add_namevalue(loc, size);
1519 loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
1520 loc->xl_entry->xe_name_len = xi->xi_name_len;
1521 ocfs2_xattr_set_type(loc->xl_entry, xi->xi_name_index);
1522 ocfs2_xattr_set_local(loc->xl_entry,
1523 xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE);
1524
1525 nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
1526 nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
1527 memset(nameval_buf, 0, size);
1528 memcpy(nameval_buf, xi->xi_name, xi->xi_name_len);
1529}
1530
Joel Becker3fc12af2009-08-18 13:20:27 -07001531static void ocfs2_xa_fill_value_buf(struct ocfs2_xa_loc *loc,
1532 struct ocfs2_xattr_value_buf *vb)
1533{
1534 int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
1535 int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
1536
1537 /* Value bufs are for value trees */
Joel Becker73857ee2009-08-18 20:26:41 -07001538 BUG_ON(ocfs2_xattr_is_local(loc->xl_entry));
Joel Becker3fc12af2009-08-18 13:20:27 -07001539 BUG_ON(namevalue_size_xe(loc->xl_entry) !=
1540 (name_size + OCFS2_XATTR_ROOT_SIZE));
1541
1542 loc->xl_ops->xlo_fill_value_buf(loc, vb);
1543 vb->vb_xv =
1544 (struct ocfs2_xattr_value_root *)ocfs2_xa_offset_pointer(loc,
1545 nameval_offset +
1546 name_size);
1547}
1548
Joel Beckercf2bc802009-08-18 13:52:38 -07001549static int ocfs2_xa_block_journal_access(handle_t *handle,
1550 struct ocfs2_xa_loc *loc, int type)
1551{
1552 struct buffer_head *bh = loc->xl_storage;
1553 ocfs2_journal_access_func access;
1554
1555 if (loc->xl_size == (bh->b_size -
1556 offsetof(struct ocfs2_xattr_block,
1557 xb_attrs.xb_header)))
1558 access = ocfs2_journal_access_xb;
1559 else
1560 access = ocfs2_journal_access_di;
1561 return access(handle, INODE_CACHE(loc->xl_inode), bh, type);
1562}
1563
1564static void ocfs2_xa_block_journal_dirty(handle_t *handle,
1565 struct ocfs2_xa_loc *loc)
1566{
1567 struct buffer_head *bh = loc->xl_storage;
1568
1569 ocfs2_journal_dirty(handle, bh);
1570}
1571
Joel Becker11179f22009-08-14 16:07:44 -07001572static void *ocfs2_xa_block_offset_pointer(struct ocfs2_xa_loc *loc,
1573 int offset)
1574{
Joel Becker11179f22009-08-14 16:07:44 -07001575 return (char *)loc->xl_header + offset;
1576}
1577
Joel Becker69a3e532009-08-17 12:24:39 -07001578static int ocfs2_xa_block_can_reuse(struct ocfs2_xa_loc *loc,
1579 struct ocfs2_xattr_info *xi)
1580{
1581 /*
1582 * Block storage is strict. If the sizes aren't exact, we will
1583 * remove the old one and reinsert the new.
1584 */
1585 return namevalue_size_xe(loc->xl_entry) ==
1586 namevalue_size_xi(xi);
1587}
1588
1589static int ocfs2_xa_block_get_free_start(struct ocfs2_xa_loc *loc)
1590{
1591 struct ocfs2_xattr_header *xh = loc->xl_header;
1592 int i, count = le16_to_cpu(xh->xh_count);
1593 int offset, free_start = loc->xl_size;
1594
1595 for (i = 0; i < count; i++) {
1596 offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
1597 if (offset < free_start)
1598 free_start = offset;
1599 }
1600
1601 return free_start;
1602}
1603
1604static int ocfs2_xa_block_check_space(struct ocfs2_xa_loc *loc,
1605 struct ocfs2_xattr_info *xi)
1606{
1607 int count = le16_to_cpu(loc->xl_header->xh_count);
1608 int free_start = ocfs2_xa_get_free_start(loc);
1609 int needed_space = ocfs2_xi_entry_usage(xi);
1610
1611 /*
1612 * Block storage will reclaim the original entry before inserting
1613 * the new value, so we only need the difference. If the new
1614 * entry is smaller than the old one, we don't need anything.
1615 */
1616 if (loc->xl_entry) {
1617 /* Don't need space if we're reusing! */
1618 if (ocfs2_xa_can_reuse_entry(loc, xi))
1619 needed_space = 0;
1620 else
1621 needed_space -= ocfs2_xe_entry_usage(loc->xl_entry);
1622 }
1623 if (needed_space < 0)
1624 needed_space = 0;
1625 return ocfs2_xa_check_space_helper(needed_space, free_start, count);
1626}
1627
Joel Becker11179f22009-08-14 16:07:44 -07001628/*
1629 * Block storage for xattrs keeps the name+value pairs compacted. When
1630 * we remove one, we have to shift any that preceded it towards the end.
1631 */
1632static void ocfs2_xa_block_wipe_namevalue(struct ocfs2_xa_loc *loc)
1633{
1634 int i, offset;
1635 int namevalue_offset, first_namevalue_offset, namevalue_size;
1636 struct ocfs2_xattr_entry *entry = loc->xl_entry;
1637 struct ocfs2_xattr_header *xh = loc->xl_header;
Joel Becker11179f22009-08-14 16:07:44 -07001638 int count = le16_to_cpu(xh->xh_count);
1639
1640 namevalue_offset = le16_to_cpu(entry->xe_name_offset);
Joel Becker199799a2009-08-14 19:04:15 -07001641 namevalue_size = namevalue_size_xe(entry);
Joel Becker69a3e532009-08-17 12:24:39 -07001642 first_namevalue_offset = ocfs2_xa_get_free_start(loc);
Joel Becker11179f22009-08-14 16:07:44 -07001643
1644 /* Shift the name+value pairs */
1645 memmove((char *)xh + first_namevalue_offset + namevalue_size,
1646 (char *)xh + first_namevalue_offset,
1647 namevalue_offset - first_namevalue_offset);
1648 memset((char *)xh + first_namevalue_offset, 0, namevalue_size);
1649
1650 /* Now tell xh->xh_entries about it */
1651 for (i = 0; i < count; i++) {
1652 offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
Tao Madfe4d3d2010-03-19 15:04:23 +08001653 if (offset <= namevalue_offset)
Joel Becker11179f22009-08-14 16:07:44 -07001654 le16_add_cpu(&xh->xh_entries[i].xe_name_offset,
1655 namevalue_size);
1656 }
1657
1658 /*
1659 * Note that we don't update xh_free_start or xh_name_value_len
1660 * because they're not used in block-stored xattrs.
1661 */
1662}
1663
Joel Becker69a3e532009-08-17 12:24:39 -07001664static void ocfs2_xa_block_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
1665{
1666 int count = le16_to_cpu(loc->xl_header->xh_count);
1667 loc->xl_entry = &(loc->xl_header->xh_entries[count]);
1668 le16_add_cpu(&loc->xl_header->xh_count, 1);
1669 memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
1670}
1671
1672static void ocfs2_xa_block_add_namevalue(struct ocfs2_xa_loc *loc, int size)
1673{
1674 int free_start = ocfs2_xa_get_free_start(loc);
1675
1676 loc->xl_entry->xe_name_offset = cpu_to_le16(free_start - size);
1677}
1678
Joel Becker3fc12af2009-08-18 13:20:27 -07001679static void ocfs2_xa_block_fill_value_buf(struct ocfs2_xa_loc *loc,
1680 struct ocfs2_xattr_value_buf *vb)
1681{
1682 struct buffer_head *bh = loc->xl_storage;
1683
1684 if (loc->xl_size == (bh->b_size -
1685 offsetof(struct ocfs2_xattr_block,
1686 xb_attrs.xb_header)))
1687 vb->vb_access = ocfs2_journal_access_xb;
1688 else
1689 vb->vb_access = ocfs2_journal_access_di;
1690 vb->vb_bh = bh;
1691}
1692
Joel Becker11179f22009-08-14 16:07:44 -07001693/*
1694 * Operations for xattrs stored in blocks. This includes inline inode
1695 * storage and unindexed ocfs2_xattr_blocks.
1696 */
1697static const struct ocfs2_xa_loc_operations ocfs2_xa_block_loc_ops = {
Joel Beckercf2bc802009-08-18 13:52:38 -07001698 .xlo_journal_access = ocfs2_xa_block_journal_access,
1699 .xlo_journal_dirty = ocfs2_xa_block_journal_dirty,
Joel Becker11179f22009-08-14 16:07:44 -07001700 .xlo_offset_pointer = ocfs2_xa_block_offset_pointer,
Joel Becker69a3e532009-08-17 12:24:39 -07001701 .xlo_check_space = ocfs2_xa_block_check_space,
1702 .xlo_can_reuse = ocfs2_xa_block_can_reuse,
1703 .xlo_get_free_start = ocfs2_xa_block_get_free_start,
Joel Becker11179f22009-08-14 16:07:44 -07001704 .xlo_wipe_namevalue = ocfs2_xa_block_wipe_namevalue,
Joel Becker69a3e532009-08-17 12:24:39 -07001705 .xlo_add_entry = ocfs2_xa_block_add_entry,
1706 .xlo_add_namevalue = ocfs2_xa_block_add_namevalue,
Joel Becker3fc12af2009-08-18 13:20:27 -07001707 .xlo_fill_value_buf = ocfs2_xa_block_fill_value_buf,
Joel Becker11179f22009-08-14 16:07:44 -07001708};
1709
Joel Beckercf2bc802009-08-18 13:52:38 -07001710static int ocfs2_xa_bucket_journal_access(handle_t *handle,
1711 struct ocfs2_xa_loc *loc, int type)
1712{
1713 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1714
1715 return ocfs2_xattr_bucket_journal_access(handle, bucket, type);
1716}
1717
1718static void ocfs2_xa_bucket_journal_dirty(handle_t *handle,
1719 struct ocfs2_xa_loc *loc)
1720{
1721 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1722
1723 ocfs2_xattr_bucket_journal_dirty(handle, bucket);
1724}
1725
Joel Becker11179f22009-08-14 16:07:44 -07001726static void *ocfs2_xa_bucket_offset_pointer(struct ocfs2_xa_loc *loc,
1727 int offset)
1728{
1729 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1730 int block, block_offset;
1731
Joel Becker11179f22009-08-14 16:07:44 -07001732 /* The header is at the front of the bucket */
Joel Beckercf2bc802009-08-18 13:52:38 -07001733 block = offset >> loc->xl_inode->i_sb->s_blocksize_bits;
1734 block_offset = offset % loc->xl_inode->i_sb->s_blocksize;
Joel Becker11179f22009-08-14 16:07:44 -07001735
1736 return bucket_block(bucket, block) + block_offset;
1737}
1738
Joel Becker69a3e532009-08-17 12:24:39 -07001739static int ocfs2_xa_bucket_can_reuse(struct ocfs2_xa_loc *loc,
1740 struct ocfs2_xattr_info *xi)
1741{
1742 return namevalue_size_xe(loc->xl_entry) >=
1743 namevalue_size_xi(xi);
1744}
1745
1746static int ocfs2_xa_bucket_get_free_start(struct ocfs2_xa_loc *loc)
1747{
1748 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1749 return le16_to_cpu(bucket_xh(bucket)->xh_free_start);
1750}
1751
1752static int ocfs2_bucket_align_free_start(struct super_block *sb,
1753 int free_start, int size)
1754{
1755 /*
1756 * We need to make sure that the name+value pair fits within
1757 * one block.
1758 */
1759 if (((free_start - size) >> sb->s_blocksize_bits) !=
1760 ((free_start - 1) >> sb->s_blocksize_bits))
1761 free_start -= free_start % sb->s_blocksize;
1762
1763 return free_start;
1764}
1765
1766static int ocfs2_xa_bucket_check_space(struct ocfs2_xa_loc *loc,
1767 struct ocfs2_xattr_info *xi)
1768{
1769 int rc;
1770 int count = le16_to_cpu(loc->xl_header->xh_count);
1771 int free_start = ocfs2_xa_get_free_start(loc);
1772 int needed_space = ocfs2_xi_entry_usage(xi);
1773 int size = namevalue_size_xi(xi);
Joel Beckercf2bc802009-08-18 13:52:38 -07001774 struct super_block *sb = loc->xl_inode->i_sb;
Joel Becker69a3e532009-08-17 12:24:39 -07001775
1776 /*
1777 * Bucket storage does not reclaim name+value pairs it cannot
1778 * reuse. They live as holes until the bucket fills, and then
1779 * the bucket is defragmented. However, the bucket can reclaim
1780 * the ocfs2_xattr_entry.
1781 */
1782 if (loc->xl_entry) {
1783 /* Don't need space if we're reusing! */
1784 if (ocfs2_xa_can_reuse_entry(loc, xi))
1785 needed_space = 0;
1786 else
1787 needed_space -= sizeof(struct ocfs2_xattr_entry);
1788 }
1789 BUG_ON(needed_space < 0);
1790
1791 if (free_start < size) {
1792 if (needed_space)
1793 return -ENOSPC;
1794 } else {
1795 /*
1796 * First we check if it would fit in the first place.
1797 * Below, we align the free start to a block. This may
1798 * slide us below the minimum gap. By checking unaligned
1799 * first, we avoid that error.
1800 */
1801 rc = ocfs2_xa_check_space_helper(needed_space, free_start,
1802 count);
1803 if (rc)
1804 return rc;
1805 free_start = ocfs2_bucket_align_free_start(sb, free_start,
1806 size);
1807 }
1808 return ocfs2_xa_check_space_helper(needed_space, free_start, count);
1809}
1810
Joel Becker11179f22009-08-14 16:07:44 -07001811static void ocfs2_xa_bucket_wipe_namevalue(struct ocfs2_xa_loc *loc)
1812{
Joel Becker199799a2009-08-14 19:04:15 -07001813 le16_add_cpu(&loc->xl_header->xh_name_value_len,
1814 -namevalue_size_xe(loc->xl_entry));
Joel Becker11179f22009-08-14 16:07:44 -07001815}
1816
Joel Becker69a3e532009-08-17 12:24:39 -07001817static void ocfs2_xa_bucket_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
1818{
1819 struct ocfs2_xattr_header *xh = loc->xl_header;
1820 int count = le16_to_cpu(xh->xh_count);
1821 int low = 0, high = count - 1, tmp;
1822 struct ocfs2_xattr_entry *tmp_xe;
1823
1824 /*
1825 * We keep buckets sorted by name_hash, so we need to find
1826 * our insert place.
1827 */
1828 while (low <= high && count) {
1829 tmp = (low + high) / 2;
1830 tmp_xe = &xh->xh_entries[tmp];
1831
1832 if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash))
1833 low = tmp + 1;
1834 else if (name_hash < le32_to_cpu(tmp_xe->xe_name_hash))
1835 high = tmp - 1;
1836 else {
1837 low = tmp;
1838 break;
1839 }
1840 }
1841
1842 if (low != count)
1843 memmove(&xh->xh_entries[low + 1],
1844 &xh->xh_entries[low],
1845 ((count - low) * sizeof(struct ocfs2_xattr_entry)));
1846
1847 le16_add_cpu(&xh->xh_count, 1);
1848 loc->xl_entry = &xh->xh_entries[low];
1849 memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
1850}
1851
1852static void ocfs2_xa_bucket_add_namevalue(struct ocfs2_xa_loc *loc, int size)
1853{
1854 int free_start = ocfs2_xa_get_free_start(loc);
1855 struct ocfs2_xattr_header *xh = loc->xl_header;
Joel Beckercf2bc802009-08-18 13:52:38 -07001856 struct super_block *sb = loc->xl_inode->i_sb;
Joel Becker69a3e532009-08-17 12:24:39 -07001857 int nameval_offset;
1858
1859 free_start = ocfs2_bucket_align_free_start(sb, free_start, size);
1860 nameval_offset = free_start - size;
1861 loc->xl_entry->xe_name_offset = cpu_to_le16(nameval_offset);
1862 xh->xh_free_start = cpu_to_le16(nameval_offset);
1863 le16_add_cpu(&xh->xh_name_value_len, size);
1864
1865}
1866
Joel Becker3fc12af2009-08-18 13:20:27 -07001867static void ocfs2_xa_bucket_fill_value_buf(struct ocfs2_xa_loc *loc,
1868 struct ocfs2_xattr_value_buf *vb)
1869{
1870 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
Joel Beckercf2bc802009-08-18 13:52:38 -07001871 struct super_block *sb = loc->xl_inode->i_sb;
Joel Becker3fc12af2009-08-18 13:20:27 -07001872 int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
1873 int size = namevalue_size_xe(loc->xl_entry);
1874 int block_offset = nameval_offset >> sb->s_blocksize_bits;
1875
1876 /* Values are not allowed to straddle block boundaries */
1877 BUG_ON(block_offset !=
1878 ((nameval_offset + size - 1) >> sb->s_blocksize_bits));
1879 /* We expect the bucket to be filled in */
1880 BUG_ON(!bucket->bu_bhs[block_offset]);
1881
1882 vb->vb_access = ocfs2_journal_access;
1883 vb->vb_bh = bucket->bu_bhs[block_offset];
1884}
1885
Joel Becker11179f22009-08-14 16:07:44 -07001886/* Operations for xattrs stored in buckets. */
1887static const struct ocfs2_xa_loc_operations ocfs2_xa_bucket_loc_ops = {
Joel Beckercf2bc802009-08-18 13:52:38 -07001888 .xlo_journal_access = ocfs2_xa_bucket_journal_access,
1889 .xlo_journal_dirty = ocfs2_xa_bucket_journal_dirty,
Joel Becker11179f22009-08-14 16:07:44 -07001890 .xlo_offset_pointer = ocfs2_xa_bucket_offset_pointer,
Joel Becker69a3e532009-08-17 12:24:39 -07001891 .xlo_check_space = ocfs2_xa_bucket_check_space,
1892 .xlo_can_reuse = ocfs2_xa_bucket_can_reuse,
1893 .xlo_get_free_start = ocfs2_xa_bucket_get_free_start,
Joel Becker11179f22009-08-14 16:07:44 -07001894 .xlo_wipe_namevalue = ocfs2_xa_bucket_wipe_namevalue,
Joel Becker69a3e532009-08-17 12:24:39 -07001895 .xlo_add_entry = ocfs2_xa_bucket_add_entry,
1896 .xlo_add_namevalue = ocfs2_xa_bucket_add_namevalue,
Joel Becker3fc12af2009-08-18 13:20:27 -07001897 .xlo_fill_value_buf = ocfs2_xa_bucket_fill_value_buf,
Joel Becker11179f22009-08-14 16:07:44 -07001898};
1899
Joel Becker399ff3a72009-09-01 18:38:27 -07001900static unsigned int ocfs2_xa_value_clusters(struct ocfs2_xa_loc *loc)
1901{
1902 struct ocfs2_xattr_value_buf vb;
1903
1904 if (ocfs2_xattr_is_local(loc->xl_entry))
1905 return 0;
1906
1907 ocfs2_xa_fill_value_buf(loc, &vb);
1908 return le32_to_cpu(vb.vb_xv->xr_clusters);
1909}
1910
Joel Becker73857ee2009-08-18 20:26:41 -07001911static int ocfs2_xa_value_truncate(struct ocfs2_xa_loc *loc, u64 bytes,
1912 struct ocfs2_xattr_set_ctxt *ctxt)
1913{
1914 int trunc_rc, access_rc;
1915 struct ocfs2_xattr_value_buf vb;
1916
1917 ocfs2_xa_fill_value_buf(loc, &vb);
1918 trunc_rc = ocfs2_xattr_value_truncate(loc->xl_inode, &vb, bytes,
1919 ctxt);
1920
1921 /*
1922 * The caller of ocfs2_xa_value_truncate() has already called
1923 * ocfs2_xa_journal_access on the loc. However, The truncate code
1924 * calls ocfs2_extend_trans(). This may commit the previous
1925 * transaction and open a new one. If this is a bucket, truncate
1926 * could leave only vb->vb_bh set up for journaling. Meanwhile,
1927 * the caller is expecting to dirty the entire bucket. So we must
1928 * reset the journal work. We do this even if truncate has failed,
1929 * as it could have failed after committing the extend.
1930 */
1931 access_rc = ocfs2_xa_journal_access(ctxt->handle, loc,
1932 OCFS2_JOURNAL_ACCESS_WRITE);
1933
1934 /* Errors in truncate take precedence */
1935 return trunc_rc ? trunc_rc : access_rc;
1936}
1937
Joel Becker11179f22009-08-14 16:07:44 -07001938static void ocfs2_xa_remove_entry(struct ocfs2_xa_loc *loc)
1939{
Joel Beckerbde1e542009-08-14 16:58:38 -07001940 int index, count;
1941 struct ocfs2_xattr_header *xh = loc->xl_header;
1942 struct ocfs2_xattr_entry *entry = loc->xl_entry;
1943
Joel Becker11179f22009-08-14 16:07:44 -07001944 ocfs2_xa_wipe_namevalue(loc);
Joel Beckerbde1e542009-08-14 16:58:38 -07001945 loc->xl_entry = NULL;
1946
1947 le16_add_cpu(&xh->xh_count, -1);
1948 count = le16_to_cpu(xh->xh_count);
1949
1950 /*
1951 * Only zero out the entry if there are more remaining. This is
1952 * important for an empty bucket, as it keeps track of the
1953 * bucket's hash value. It doesn't hurt empty block storage.
1954 */
1955 if (count) {
1956 index = ((char *)entry - (char *)&xh->xh_entries) /
1957 sizeof(struct ocfs2_xattr_entry);
1958 memmove(&xh->xh_entries[index], &xh->xh_entries[index + 1],
1959 (count - index) * sizeof(struct ocfs2_xattr_entry));
1960 memset(&xh->xh_entries[count], 0,
1961 sizeof(struct ocfs2_xattr_entry));
1962 }
Joel Becker11179f22009-08-14 16:07:44 -07001963}
1964
Joel Becker399ff3a72009-09-01 18:38:27 -07001965/*
1966 * If we have a problem adjusting the size of an external value during
1967 * ocfs2_xa_prepare_entry() or ocfs2_xa_remove(), we may have an xattr
1968 * in an intermediate state. For example, the value may be partially
1969 * truncated.
1970 *
1971 * If the value tree hasn't changed, the extend/truncate went nowhere.
1972 * We have nothing to do. The caller can treat it as a straight error.
1973 *
1974 * If the value tree got partially truncated, we now have a corrupted
1975 * extended attribute. We're going to wipe its entry and leak the
1976 * clusters. Better to leak some storage than leave a corrupt entry.
1977 *
1978 * If the value tree grew, it obviously didn't grow enough for the
1979 * new entry. We're not going to try and reclaim those clusters either.
1980 * If there was already an external value there (orig_clusters != 0),
1981 * the new clusters are attached safely and we can just leave the old
1982 * value in place. If there was no external value there, we remove
1983 * the entry.
1984 *
1985 * This way, the xattr block we store in the journal will be consistent.
1986 * If the size change broke because of the journal, no changes will hit
1987 * disk anyway.
1988 */
1989static void ocfs2_xa_cleanup_value_truncate(struct ocfs2_xa_loc *loc,
1990 const char *what,
1991 unsigned int orig_clusters)
1992{
1993 unsigned int new_clusters = ocfs2_xa_value_clusters(loc);
1994 char *nameval_buf = ocfs2_xa_offset_pointer(loc,
1995 le16_to_cpu(loc->xl_entry->xe_name_offset));
1996
1997 if (new_clusters < orig_clusters) {
1998 mlog(ML_ERROR,
1999 "Partial truncate while %s xattr %.*s. Leaking "
2000 "%u clusters and removing the entry\n",
2001 what, loc->xl_entry->xe_name_len, nameval_buf,
2002 orig_clusters - new_clusters);
2003 ocfs2_xa_remove_entry(loc);
2004 } else if (!orig_clusters) {
2005 mlog(ML_ERROR,
2006 "Unable to allocate an external value for xattr "
2007 "%.*s safely. Leaking %u clusters and removing the "
2008 "entry\n",
2009 loc->xl_entry->xe_name_len, nameval_buf,
2010 new_clusters - orig_clusters);
2011 ocfs2_xa_remove_entry(loc);
2012 } else if (new_clusters > orig_clusters)
2013 mlog(ML_ERROR,
2014 "Unable to grow xattr %.*s safely. %u new clusters "
2015 "have been added, but the value will not be "
2016 "modified\n",
2017 loc->xl_entry->xe_name_len, nameval_buf,
2018 new_clusters - orig_clusters);
2019}
2020
Joel Becker73857ee2009-08-18 20:26:41 -07002021static int ocfs2_xa_remove(struct ocfs2_xa_loc *loc,
2022 struct ocfs2_xattr_set_ctxt *ctxt)
2023{
2024 int rc = 0;
Joel Becker399ff3a72009-09-01 18:38:27 -07002025 unsigned int orig_clusters;
Joel Becker73857ee2009-08-18 20:26:41 -07002026
2027 if (!ocfs2_xattr_is_local(loc->xl_entry)) {
Joel Becker399ff3a72009-09-01 18:38:27 -07002028 orig_clusters = ocfs2_xa_value_clusters(loc);
Joel Becker73857ee2009-08-18 20:26:41 -07002029 rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
2030 if (rc) {
2031 mlog_errno(rc);
Joel Becker399ff3a72009-09-01 18:38:27 -07002032 /*
2033 * Since this is remove, we can return 0 if
2034 * ocfs2_xa_cleanup_value_truncate() is going to
2035 * wipe the entry anyway. So we check the
2036 * cluster count as well.
2037 */
2038 if (orig_clusters != ocfs2_xa_value_clusters(loc))
2039 rc = 0;
2040 ocfs2_xa_cleanup_value_truncate(loc, "removing",
2041 orig_clusters);
2042 if (rc)
2043 goto out;
Joel Becker73857ee2009-08-18 20:26:41 -07002044 }
2045 }
2046
2047 ocfs2_xa_remove_entry(loc);
2048
2049out:
2050 return rc;
2051}
2052
2053static void ocfs2_xa_install_value_root(struct ocfs2_xa_loc *loc)
2054{
2055 int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
2056 char *nameval_buf;
2057
2058 nameval_buf = ocfs2_xa_offset_pointer(loc,
2059 le16_to_cpu(loc->xl_entry->xe_name_offset));
2060 memcpy(nameval_buf + name_size, &def_xv, OCFS2_XATTR_ROOT_SIZE);
2061}
2062
2063/*
2064 * Take an existing entry and make it ready for the new value. This
2065 * won't allocate space, but it may free space. It should be ready for
2066 * ocfs2_xa_prepare_entry() to finish the work.
2067 */
2068static int ocfs2_xa_reuse_entry(struct ocfs2_xa_loc *loc,
2069 struct ocfs2_xattr_info *xi,
2070 struct ocfs2_xattr_set_ctxt *ctxt)
2071{
2072 int rc = 0;
2073 int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
Joel Becker399ff3a72009-09-01 18:38:27 -07002074 unsigned int orig_clusters;
Joel Becker73857ee2009-08-18 20:26:41 -07002075 char *nameval_buf;
2076 int xe_local = ocfs2_xattr_is_local(loc->xl_entry);
2077 int xi_local = xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE;
2078
2079 BUG_ON(OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len) !=
2080 name_size);
2081
2082 nameval_buf = ocfs2_xa_offset_pointer(loc,
2083 le16_to_cpu(loc->xl_entry->xe_name_offset));
2084 if (xe_local) {
2085 memset(nameval_buf + name_size, 0,
2086 namevalue_size_xe(loc->xl_entry) - name_size);
2087 if (!xi_local)
2088 ocfs2_xa_install_value_root(loc);
2089 } else {
Joel Becker399ff3a72009-09-01 18:38:27 -07002090 orig_clusters = ocfs2_xa_value_clusters(loc);
Joel Becker73857ee2009-08-18 20:26:41 -07002091 if (xi_local) {
2092 rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
Joel Becker399ff3a72009-09-01 18:38:27 -07002093 if (rc < 0)
Joel Becker73857ee2009-08-18 20:26:41 -07002094 mlog_errno(rc);
Joel Becker399ff3a72009-09-01 18:38:27 -07002095 else
2096 memset(nameval_buf + name_size, 0,
2097 namevalue_size_xe(loc->xl_entry) -
2098 name_size);
Joel Becker73857ee2009-08-18 20:26:41 -07002099 } else if (le64_to_cpu(loc->xl_entry->xe_value_size) >
2100 xi->xi_value_len) {
2101 rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len,
2102 ctxt);
Joel Becker399ff3a72009-09-01 18:38:27 -07002103 if (rc < 0)
Joel Becker73857ee2009-08-18 20:26:41 -07002104 mlog_errno(rc);
Joel Becker399ff3a72009-09-01 18:38:27 -07002105 }
2106
2107 if (rc) {
2108 ocfs2_xa_cleanup_value_truncate(loc, "reusing",
2109 orig_clusters);
2110 goto out;
Joel Becker73857ee2009-08-18 20:26:41 -07002111 }
2112 }
2113
2114 loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
2115 ocfs2_xattr_set_local(loc->xl_entry, xi_local);
2116
2117out:
2118 return rc;
2119}
2120
Joel Becker69a3e532009-08-17 12:24:39 -07002121/*
2122 * Prepares loc->xl_entry to receive the new xattr. This includes
2123 * properly setting up the name+value pair region. If loc->xl_entry
2124 * already exists, it will take care of modifying it appropriately.
Joel Becker69a3e532009-08-17 12:24:39 -07002125 *
2126 * Note that this modifies the data. You did journal_access already,
2127 * right?
2128 */
2129static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
2130 struct ocfs2_xattr_info *xi,
Joel Becker73857ee2009-08-18 20:26:41 -07002131 u32 name_hash,
2132 struct ocfs2_xattr_set_ctxt *ctxt)
Joel Becker69a3e532009-08-17 12:24:39 -07002133{
2134 int rc = 0;
Joel Becker399ff3a72009-09-01 18:38:27 -07002135 unsigned int orig_clusters;
2136 __le64 orig_value_size = 0;
Joel Becker69a3e532009-08-17 12:24:39 -07002137
Joel Becker69a3e532009-08-17 12:24:39 -07002138 rc = ocfs2_xa_check_space(loc, xi);
2139 if (rc)
2140 goto out;
2141
2142 if (loc->xl_entry) {
2143 if (ocfs2_xa_can_reuse_entry(loc, xi)) {
Joel Becker399ff3a72009-09-01 18:38:27 -07002144 orig_value_size = loc->xl_entry->xe_value_size;
Joel Becker73857ee2009-08-18 20:26:41 -07002145 rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
2146 if (rc)
2147 goto out;
2148 goto alloc_value;
Joel Becker69a3e532009-08-17 12:24:39 -07002149 }
2150
Joel Becker73857ee2009-08-18 20:26:41 -07002151 if (!ocfs2_xattr_is_local(loc->xl_entry)) {
Joel Becker399ff3a72009-09-01 18:38:27 -07002152 orig_clusters = ocfs2_xa_value_clusters(loc);
Joel Becker73857ee2009-08-18 20:26:41 -07002153 rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
2154 if (rc) {
2155 mlog_errno(rc);
Joel Becker399ff3a72009-09-01 18:38:27 -07002156 ocfs2_xa_cleanup_value_truncate(loc,
2157 "overwriting",
2158 orig_clusters);
Joel Becker73857ee2009-08-18 20:26:41 -07002159 goto out;
2160 }
2161 }
Joel Becker69a3e532009-08-17 12:24:39 -07002162 ocfs2_xa_wipe_namevalue(loc);
2163 } else
2164 ocfs2_xa_add_entry(loc, name_hash);
2165
2166 /*
2167 * If we get here, we have a blank entry. Fill it. We grow our
2168 * name+value pair back from the end.
2169 */
2170 ocfs2_xa_add_namevalue(loc, xi);
Joel Becker73857ee2009-08-18 20:26:41 -07002171 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
2172 ocfs2_xa_install_value_root(loc);
2173
2174alloc_value:
2175 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
Joel Becker399ff3a72009-09-01 18:38:27 -07002176 orig_clusters = ocfs2_xa_value_clusters(loc);
Joel Becker73857ee2009-08-18 20:26:41 -07002177 rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt);
Joel Becker399ff3a72009-09-01 18:38:27 -07002178 if (rc < 0) {
Tao Ma5f5261a2010-05-13 22:49:05 +08002179 ctxt->set_abort = 1;
Joel Becker399ff3a72009-09-01 18:38:27 -07002180 ocfs2_xa_cleanup_value_truncate(loc, "growing",
2181 orig_clusters);
Tao Mad5a7df02010-05-10 18:09:47 +08002182 /*
2183 * If we were growing an existing value,
2184 * ocfs2_xa_cleanup_value_truncate() won't remove
2185 * the entry. We need to restore the original value
2186 * size.
2187 */
2188 if (loc->xl_entry) {
2189 BUG_ON(!orig_value_size);
2190 loc->xl_entry->xe_value_size = orig_value_size;
2191 }
Joel Becker73857ee2009-08-18 20:26:41 -07002192 mlog_errno(rc);
Joel Becker399ff3a72009-09-01 18:38:27 -07002193 }
Joel Becker73857ee2009-08-18 20:26:41 -07002194 }
Joel Becker69a3e532009-08-17 12:24:39 -07002195
2196out:
2197 return rc;
2198}
2199
2200/*
Joel Becker73857ee2009-08-18 20:26:41 -07002201 * Store the value portion of the name+value pair. This will skip
2202 * values that are stored externally. Their tree roots were set up
2203 * by ocfs2_xa_prepare_entry().
Joel Becker69a3e532009-08-17 12:24:39 -07002204 */
Joel Becker73857ee2009-08-18 20:26:41 -07002205static int ocfs2_xa_store_value(struct ocfs2_xa_loc *loc,
2206 struct ocfs2_xattr_info *xi,
2207 struct ocfs2_xattr_set_ctxt *ctxt)
Joel Becker69a3e532009-08-17 12:24:39 -07002208{
Joel Becker73857ee2009-08-18 20:26:41 -07002209 int rc = 0;
Joel Becker69a3e532009-08-17 12:24:39 -07002210 int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
2211 int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
Joel Becker69a3e532009-08-17 12:24:39 -07002212 char *nameval_buf;
Joel Becker73857ee2009-08-18 20:26:41 -07002213 struct ocfs2_xattr_value_buf vb;
Joel Becker69a3e532009-08-17 12:24:39 -07002214
Joel Becker69a3e532009-08-17 12:24:39 -07002215 nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
Joel Becker73857ee2009-08-18 20:26:41 -07002216 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
2217 ocfs2_xa_fill_value_buf(loc, &vb);
2218 rc = __ocfs2_xattr_set_value_outside(loc->xl_inode,
2219 ctxt->handle, &vb,
2220 xi->xi_value,
2221 xi->xi_value_len);
2222 } else
2223 memcpy(nameval_buf + name_size, xi->xi_value, xi->xi_value_len);
2224
Joel Becker73857ee2009-08-18 20:26:41 -07002225 return rc;
Joel Becker69a3e532009-08-17 12:24:39 -07002226}
2227
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002228static int ocfs2_xa_set(struct ocfs2_xa_loc *loc,
2229 struct ocfs2_xattr_info *xi,
2230 struct ocfs2_xattr_set_ctxt *ctxt)
2231{
2232 int ret;
2233 u32 name_hash = ocfs2_xattr_name_hash(loc->xl_inode, xi->xi_name,
2234 xi->xi_name_len);
2235
2236 ret = ocfs2_xa_journal_access(ctxt->handle, loc,
2237 OCFS2_JOURNAL_ACCESS_WRITE);
2238 if (ret) {
2239 mlog_errno(ret);
2240 goto out;
2241 }
2242
Joel Becker399ff3a72009-09-01 18:38:27 -07002243 /*
2244 * From here on out, everything is going to modify the buffer a
2245 * little. Errors are going to leave the xattr header in a
2246 * sane state. Thus, even with errors we dirty the sucker.
2247 */
2248
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002249 /* Don't worry, we are never called with !xi_value and !xl_entry */
2250 if (!xi->xi_value) {
2251 ret = ocfs2_xa_remove(loc, ctxt);
Joel Becker399ff3a72009-09-01 18:38:27 -07002252 goto out_dirty;
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002253 }
2254
2255 ret = ocfs2_xa_prepare_entry(loc, xi, name_hash, ctxt);
2256 if (ret) {
2257 if (ret != -ENOSPC)
2258 mlog_errno(ret);
Joel Becker399ff3a72009-09-01 18:38:27 -07002259 goto out_dirty;
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002260 }
2261
2262 ret = ocfs2_xa_store_value(loc, xi, ctxt);
Joel Becker399ff3a72009-09-01 18:38:27 -07002263 if (ret)
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002264 mlog_errno(ret);
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002265
Joel Becker399ff3a72009-09-01 18:38:27 -07002266out_dirty:
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002267 ocfs2_xa_journal_dirty(ctxt->handle, loc);
2268
2269out:
2270 return ret;
2271}
2272
Joel Becker11179f22009-08-14 16:07:44 -07002273static void ocfs2_init_dinode_xa_loc(struct ocfs2_xa_loc *loc,
2274 struct inode *inode,
2275 struct buffer_head *bh,
2276 struct ocfs2_xattr_entry *entry)
2277{
2278 struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
2279
Joel Becker139ffac2009-08-19 11:09:17 -07002280 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_XATTR_FL));
2281
Joel Beckercf2bc802009-08-18 13:52:38 -07002282 loc->xl_inode = inode;
Joel Becker11179f22009-08-14 16:07:44 -07002283 loc->xl_ops = &ocfs2_xa_block_loc_ops;
2284 loc->xl_storage = bh;
2285 loc->xl_entry = entry;
Joel Becker139ffac2009-08-19 11:09:17 -07002286 loc->xl_size = le16_to_cpu(di->i_xattr_inline_size);
Joel Becker11179f22009-08-14 16:07:44 -07002287 loc->xl_header =
2288 (struct ocfs2_xattr_header *)(bh->b_data + bh->b_size -
2289 loc->xl_size);
2290}
2291
2292static void ocfs2_init_xattr_block_xa_loc(struct ocfs2_xa_loc *loc,
Joel Beckercf2bc802009-08-18 13:52:38 -07002293 struct inode *inode,
Joel Becker11179f22009-08-14 16:07:44 -07002294 struct buffer_head *bh,
2295 struct ocfs2_xattr_entry *entry)
2296{
2297 struct ocfs2_xattr_block *xb =
2298 (struct ocfs2_xattr_block *)bh->b_data;
2299
2300 BUG_ON(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED);
2301
Joel Beckercf2bc802009-08-18 13:52:38 -07002302 loc->xl_inode = inode;
Joel Becker11179f22009-08-14 16:07:44 -07002303 loc->xl_ops = &ocfs2_xa_block_loc_ops;
2304 loc->xl_storage = bh;
2305 loc->xl_header = &(xb->xb_attrs.xb_header);
2306 loc->xl_entry = entry;
2307 loc->xl_size = bh->b_size - offsetof(struct ocfs2_xattr_block,
2308 xb_attrs.xb_header);
2309}
2310
2311static void ocfs2_init_xattr_bucket_xa_loc(struct ocfs2_xa_loc *loc,
2312 struct ocfs2_xattr_bucket *bucket,
2313 struct ocfs2_xattr_entry *entry)
2314{
Joel Beckercf2bc802009-08-18 13:52:38 -07002315 loc->xl_inode = bucket->bu_inode;
Joel Becker11179f22009-08-14 16:07:44 -07002316 loc->xl_ops = &ocfs2_xa_bucket_loc_ops;
2317 loc->xl_storage = bucket;
2318 loc->xl_header = bucket_xh(bucket);
2319 loc->xl_entry = entry;
2320 loc->xl_size = OCFS2_XATTR_BUCKET_SIZE;
2321}
2322
Tao Mace9c5a52009-08-18 11:43:59 +08002323/*
2324 * In xattr remove, if it is stored outside and refcounted, we may have
2325 * the chance to split the refcount tree. So need the allocators.
2326 */
2327static int ocfs2_lock_xattr_remove_allocators(struct inode *inode,
2328 struct ocfs2_xattr_value_root *xv,
2329 struct ocfs2_caching_info *ref_ci,
2330 struct buffer_head *ref_root_bh,
2331 struct ocfs2_alloc_context **meta_ac,
2332 int *ref_credits)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002333{
Tao Mace9c5a52009-08-18 11:43:59 +08002334 int ret, meta_add = 0;
2335 u32 p_cluster, num_clusters;
2336 unsigned int ext_flags;
Tao Ma78f30c32008-11-12 08:27:00 +08002337
Tao Mace9c5a52009-08-18 11:43:59 +08002338 *ref_credits = 0;
2339 ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster,
2340 &num_clusters,
2341 &xv->xr_list,
2342 &ext_flags);
2343 if (ret) {
Tao Ma85db90e2008-11-12 08:27:01 +08002344 mlog_errno(ret);
2345 goto out;
2346 }
2347
Tao Mace9c5a52009-08-18 11:43:59 +08002348 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
2349 goto out;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002350
Tao Mace9c5a52009-08-18 11:43:59 +08002351 ret = ocfs2_refcounted_xattr_delete_need(inode, ref_ci,
2352 ref_root_bh, xv,
2353 &meta_add, ref_credits);
2354 if (ret) {
2355 mlog_errno(ret);
2356 goto out;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002357 }
2358
Tao Mace9c5a52009-08-18 11:43:59 +08002359 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
2360 meta_add, meta_ac);
2361 if (ret)
2362 mlog_errno(ret);
2363
Tao Ma85db90e2008-11-12 08:27:01 +08002364out:
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002365 return ret;
2366}
2367
Tao Mace9c5a52009-08-18 11:43:59 +08002368static int ocfs2_remove_value_outside(struct inode*inode,
2369 struct ocfs2_xattr_value_buf *vb,
2370 struct ocfs2_xattr_header *header,
2371 struct ocfs2_caching_info *ref_ci,
2372 struct buffer_head *ref_root_bh)
2373{
2374 int ret = 0, i, ref_credits;
2375 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2376 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, };
2377 void *val;
2378
2379 ocfs2_init_dealloc_ctxt(&ctxt.dealloc);
2380
2381 for (i = 0; i < le16_to_cpu(header->xh_count); i++) {
2382 struct ocfs2_xattr_entry *entry = &header->xh_entries[i];
2383
2384 if (ocfs2_xattr_is_local(entry))
2385 continue;
2386
2387 val = (void *)header +
2388 le16_to_cpu(entry->xe_name_offset);
2389 vb->vb_xv = (struct ocfs2_xattr_value_root *)
2390 (val + OCFS2_XATTR_SIZE(entry->xe_name_len));
2391
2392 ret = ocfs2_lock_xattr_remove_allocators(inode, vb->vb_xv,
2393 ref_ci, ref_root_bh,
2394 &ctxt.meta_ac,
2395 &ref_credits);
2396
2397 ctxt.handle = ocfs2_start_trans(osb, ref_credits +
2398 ocfs2_remove_extent_credits(osb->sb));
2399 if (IS_ERR(ctxt.handle)) {
2400 ret = PTR_ERR(ctxt.handle);
2401 mlog_errno(ret);
2402 break;
2403 }
2404
2405 ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt);
Tao Mace9c5a52009-08-18 11:43:59 +08002406
2407 ocfs2_commit_trans(osb, ctxt.handle);
2408 if (ctxt.meta_ac) {
2409 ocfs2_free_alloc_context(ctxt.meta_ac);
2410 ctxt.meta_ac = NULL;
2411 }
Wengang Wangb8a0ae52011-10-12 15:22:15 +08002412
2413 if (ret < 0) {
2414 mlog_errno(ret);
2415 break;
2416 }
2417
Tao Mace9c5a52009-08-18 11:43:59 +08002418 }
2419
2420 if (ctxt.meta_ac)
2421 ocfs2_free_alloc_context(ctxt.meta_ac);
2422 ocfs2_schedule_truncate_log_flush(osb, 1);
2423 ocfs2_run_deallocs(osb, &ctxt.dealloc);
2424 return ret;
2425}
2426
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002427static int ocfs2_xattr_ibody_remove(struct inode *inode,
Tao Mace9c5a52009-08-18 11:43:59 +08002428 struct buffer_head *di_bh,
2429 struct ocfs2_caching_info *ref_ci,
2430 struct buffer_head *ref_root_bh)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002431{
2432
2433 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2434 struct ocfs2_xattr_header *header;
2435 int ret;
Joel Becker43119012008-12-09 16:24:43 -08002436 struct ocfs2_xattr_value_buf vb = {
2437 .vb_bh = di_bh,
2438 .vb_access = ocfs2_journal_access_di,
2439 };
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002440
2441 header = (struct ocfs2_xattr_header *)
2442 ((void *)di + inode->i_sb->s_blocksize -
2443 le16_to_cpu(di->i_xattr_inline_size));
2444
Tao Mace9c5a52009-08-18 11:43:59 +08002445 ret = ocfs2_remove_value_outside(inode, &vb, header,
2446 ref_ci, ref_root_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002447
2448 return ret;
2449}
2450
Tao Mace9c5a52009-08-18 11:43:59 +08002451struct ocfs2_rm_xattr_bucket_para {
2452 struct ocfs2_caching_info *ref_ci;
2453 struct buffer_head *ref_root_bh;
2454};
2455
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002456static int ocfs2_xattr_block_remove(struct inode *inode,
Tao Mace9c5a52009-08-18 11:43:59 +08002457 struct buffer_head *blk_bh,
2458 struct ocfs2_caching_info *ref_ci,
2459 struct buffer_head *ref_root_bh)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002460{
2461 struct ocfs2_xattr_block *xb;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002462 int ret = 0;
Joel Becker43119012008-12-09 16:24:43 -08002463 struct ocfs2_xattr_value_buf vb = {
2464 .vb_bh = blk_bh,
2465 .vb_access = ocfs2_journal_access_xb,
2466 };
Tao Mace9c5a52009-08-18 11:43:59 +08002467 struct ocfs2_rm_xattr_bucket_para args = {
2468 .ref_ci = ref_ci,
2469 .ref_root_bh = ref_root_bh,
2470 };
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002471
2472 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
Tao Maa3944252008-08-18 17:38:54 +08002473 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
2474 struct ocfs2_xattr_header *header = &(xb->xb_attrs.xb_header);
Tao Mace9c5a52009-08-18 11:43:59 +08002475 ret = ocfs2_remove_value_outside(inode, &vb, header,
2476 ref_ci, ref_root_bh);
Tao Maa3944252008-08-18 17:38:54 +08002477 } else
Tao Ma47bca492009-08-18 11:43:42 +08002478 ret = ocfs2_iterate_xattr_index_block(inode,
2479 blk_bh,
2480 ocfs2_rm_xattr_cluster,
Tao Mace9c5a52009-08-18 11:43:59 +08002481 &args);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002482
2483 return ret;
2484}
2485
Tao Ma08413892008-08-29 09:00:19 +08002486static int ocfs2_xattr_free_block(struct inode *inode,
Tao Mace9c5a52009-08-18 11:43:59 +08002487 u64 block,
2488 struct ocfs2_caching_info *ref_ci,
2489 struct buffer_head *ref_root_bh)
Tao Ma08413892008-08-29 09:00:19 +08002490{
2491 struct inode *xb_alloc_inode;
2492 struct buffer_head *xb_alloc_bh = NULL;
2493 struct buffer_head *blk_bh = NULL;
2494 struct ocfs2_xattr_block *xb;
2495 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2496 handle_t *handle;
2497 int ret = 0;
2498 u64 blk, bg_blkno;
2499 u16 bit;
2500
Joel Becker4ae1d692008-11-13 14:49:18 -08002501 ret = ocfs2_read_xattr_block(inode, block, &blk_bh);
Tao Ma08413892008-08-29 09:00:19 +08002502 if (ret < 0) {
2503 mlog_errno(ret);
2504 goto out;
2505 }
2506
Tao Mace9c5a52009-08-18 11:43:59 +08002507 ret = ocfs2_xattr_block_remove(inode, blk_bh, ref_ci, ref_root_bh);
Tao Ma08413892008-08-29 09:00:19 +08002508 if (ret < 0) {
2509 mlog_errno(ret);
2510 goto out;
2511 }
2512
Joel Becker4ae1d692008-11-13 14:49:18 -08002513 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
Tao Ma08413892008-08-29 09:00:19 +08002514 blk = le64_to_cpu(xb->xb_blkno);
2515 bit = le16_to_cpu(xb->xb_suballoc_bit);
Tao Ma74380c42010-03-22 14:20:18 +08002516 if (xb->xb_suballoc_loc)
2517 bg_blkno = le64_to_cpu(xb->xb_suballoc_loc);
2518 else
2519 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
Tao Ma08413892008-08-29 09:00:19 +08002520
2521 xb_alloc_inode = ocfs2_get_system_file_inode(osb,
2522 EXTENT_ALLOC_SYSTEM_INODE,
2523 le16_to_cpu(xb->xb_suballoc_slot));
2524 if (!xb_alloc_inode) {
2525 ret = -ENOMEM;
2526 mlog_errno(ret);
2527 goto out;
2528 }
Al Viro59551022016-01-22 15:40:57 -05002529 inode_lock(xb_alloc_inode);
Tao Ma08413892008-08-29 09:00:19 +08002530
2531 ret = ocfs2_inode_lock(xb_alloc_inode, &xb_alloc_bh, 1);
2532 if (ret < 0) {
2533 mlog_errno(ret);
2534 goto out_mutex;
2535 }
2536
2537 handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
2538 if (IS_ERR(handle)) {
2539 ret = PTR_ERR(handle);
2540 mlog_errno(ret);
2541 goto out_unlock;
2542 }
2543
2544 ret = ocfs2_free_suballoc_bits(handle, xb_alloc_inode, xb_alloc_bh,
2545 bit, bg_blkno, 1);
2546 if (ret < 0)
2547 mlog_errno(ret);
2548
2549 ocfs2_commit_trans(osb, handle);
2550out_unlock:
2551 ocfs2_inode_unlock(xb_alloc_inode, 1);
2552 brelse(xb_alloc_bh);
2553out_mutex:
Al Viro59551022016-01-22 15:40:57 -05002554 inode_unlock(xb_alloc_inode);
Tao Ma08413892008-08-29 09:00:19 +08002555 iput(xb_alloc_inode);
2556out:
2557 brelse(blk_bh);
2558 return ret;
2559}
2560
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002561/*
2562 * ocfs2_xattr_remove()
2563 *
2564 * Free extended attribute resources associated with this inode.
2565 */
2566int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh)
2567{
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002568 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2569 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
Tao Mace9c5a52009-08-18 11:43:59 +08002570 struct ocfs2_refcount_tree *ref_tree = NULL;
2571 struct buffer_head *ref_root_bh = NULL;
2572 struct ocfs2_caching_info *ref_ci = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002573 handle_t *handle;
2574 int ret;
2575
Tiger Yang8154da32008-08-18 17:11:46 +08002576 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
2577 return 0;
2578
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002579 if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
2580 return 0;
2581
Tao Mace9c5a52009-08-18 11:43:59 +08002582 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) {
2583 ret = ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb),
2584 le64_to_cpu(di->i_refcount_loc),
2585 1, &ref_tree, &ref_root_bh);
2586 if (ret) {
2587 mlog_errno(ret);
2588 goto out;
2589 }
2590 ref_ci = &ref_tree->rf_ci;
2591
2592 }
2593
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002594 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
Tao Mace9c5a52009-08-18 11:43:59 +08002595 ret = ocfs2_xattr_ibody_remove(inode, di_bh,
2596 ref_ci, ref_root_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002597 if (ret < 0) {
2598 mlog_errno(ret);
2599 goto out;
2600 }
2601 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002602
Tao Ma08413892008-08-29 09:00:19 +08002603 if (di->i_xattr_loc) {
2604 ret = ocfs2_xattr_free_block(inode,
Tao Mace9c5a52009-08-18 11:43:59 +08002605 le64_to_cpu(di->i_xattr_loc),
2606 ref_ci, ref_root_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002607 if (ret < 0) {
2608 mlog_errno(ret);
2609 goto out;
2610 }
2611 }
2612
2613 handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)),
2614 OCFS2_INODE_UPDATE_CREDITS);
2615 if (IS_ERR(handle)) {
2616 ret = PTR_ERR(handle);
2617 mlog_errno(ret);
2618 goto out;
2619 }
Joel Becker0cf2f762009-02-12 16:41:25 -08002620 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
Joel Becker84008972008-12-09 16:11:49 -08002621 OCFS2_JOURNAL_ACCESS_WRITE);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002622 if (ret) {
2623 mlog_errno(ret);
2624 goto out_commit;
2625 }
2626
Tao Ma08413892008-08-29 09:00:19 +08002627 di->i_xattr_loc = 0;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002628
2629 spin_lock(&oi->ip_lock);
2630 oi->ip_dyn_features &= ~(OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL);
2631 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
2632 spin_unlock(&oi->ip_lock);
Darrick J. Wong6fdb7022014-04-03 14:47:08 -07002633 ocfs2_update_inode_fsync_trans(handle, inode, 0);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002634
Joel Beckerec20cec2010-03-19 14:13:52 -07002635 ocfs2_journal_dirty(handle, di_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002636out_commit:
2637 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
2638out:
Tao Mace9c5a52009-08-18 11:43:59 +08002639 if (ref_tree)
2640 ocfs2_unlock_refcount_tree(OCFS2_SB(inode->i_sb), ref_tree, 1);
2641 brelse(ref_root_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002642 return ret;
2643}
2644
2645static int ocfs2_xattr_has_space_inline(struct inode *inode,
2646 struct ocfs2_dinode *di)
2647{
2648 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2649 unsigned int xattrsize = OCFS2_SB(inode->i_sb)->s_xattr_inline_size;
2650 int free;
2651
2652 if (xattrsize < OCFS2_MIN_XATTR_INLINE_SIZE)
2653 return 0;
2654
2655 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2656 struct ocfs2_inline_data *idata = &di->id2.i_data;
2657 free = le16_to_cpu(idata->id_count) - le64_to_cpu(di->i_size);
2658 } else if (ocfs2_inode_is_fast_symlink(inode)) {
2659 free = ocfs2_fast_symlink_chars(inode->i_sb) -
2660 le64_to_cpu(di->i_size);
2661 } else {
2662 struct ocfs2_extent_list *el = &di->id2.i_list;
2663 free = (le16_to_cpu(el->l_count) -
2664 le16_to_cpu(el->l_next_free_rec)) *
2665 sizeof(struct ocfs2_extent_rec);
2666 }
2667 if (free >= xattrsize)
2668 return 1;
2669
2670 return 0;
2671}
2672
2673/*
2674 * ocfs2_xattr_ibody_find()
2675 *
2676 * Find extended attribute in inode block and
2677 * fill search info into struct ocfs2_xattr_search.
2678 */
2679static int ocfs2_xattr_ibody_find(struct inode *inode,
2680 int name_index,
2681 const char *name,
2682 struct ocfs2_xattr_search *xs)
2683{
2684 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2685 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
2686 int ret;
2687 int has_space = 0;
2688
2689 if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
2690 return 0;
2691
2692 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
2693 down_read(&oi->ip_alloc_sem);
2694 has_space = ocfs2_xattr_has_space_inline(inode, di);
2695 up_read(&oi->ip_alloc_sem);
2696 if (!has_space)
2697 return 0;
2698 }
2699
2700 xs->xattr_bh = xs->inode_bh;
2701 xs->end = (void *)di + inode->i_sb->s_blocksize;
2702 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)
2703 xs->header = (struct ocfs2_xattr_header *)
2704 (xs->end - le16_to_cpu(di->i_xattr_inline_size));
2705 else
2706 xs->header = (struct ocfs2_xattr_header *)
2707 (xs->end - OCFS2_SB(inode->i_sb)->s_xattr_inline_size);
2708 xs->base = (void *)xs->header;
2709 xs->here = xs->header->xh_entries;
2710
2711 /* Find the named attribute. */
2712 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
2713 ret = ocfs2_xattr_find_entry(name_index, name, xs);
2714 if (ret && ret != -ENODATA)
2715 return ret;
2716 xs->not_found = ret;
2717 }
2718
2719 return 0;
2720}
2721
Joel Becker139ffac2009-08-19 11:09:17 -07002722static int ocfs2_xattr_ibody_init(struct inode *inode,
2723 struct buffer_head *di_bh,
2724 struct ocfs2_xattr_set_ctxt *ctxt)
2725{
2726 int ret;
2727 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2728 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2729 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2730 unsigned int xattrsize = osb->s_xattr_inline_size;
2731
2732 if (!ocfs2_xattr_has_space_inline(inode, di)) {
2733 ret = -ENOSPC;
2734 goto out;
2735 }
2736
2737 ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), di_bh,
2738 OCFS2_JOURNAL_ACCESS_WRITE);
2739 if (ret) {
2740 mlog_errno(ret);
2741 goto out;
2742 }
2743
2744 /*
2745 * Adjust extent record count or inline data size
2746 * to reserve space for extended attribute.
2747 */
2748 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2749 struct ocfs2_inline_data *idata = &di->id2.i_data;
2750 le16_add_cpu(&idata->id_count, -xattrsize);
2751 } else if (!(ocfs2_inode_is_fast_symlink(inode))) {
2752 struct ocfs2_extent_list *el = &di->id2.i_list;
2753 le16_add_cpu(&el->l_count, -(xattrsize /
2754 sizeof(struct ocfs2_extent_rec)));
2755 }
2756 di->i_xattr_inline_size = cpu_to_le16(xattrsize);
2757
2758 spin_lock(&oi->ip_lock);
2759 oi->ip_dyn_features |= OCFS2_INLINE_XATTR_FL|OCFS2_HAS_XATTR_FL;
2760 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
2761 spin_unlock(&oi->ip_lock);
2762
Joel Beckerec20cec2010-03-19 14:13:52 -07002763 ocfs2_journal_dirty(ctxt->handle, di_bh);
Joel Becker139ffac2009-08-19 11:09:17 -07002764
2765out:
2766 return ret;
2767}
2768
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002769/*
2770 * ocfs2_xattr_ibody_set()
2771 *
2772 * Set, replace or remove an extended attribute into inode block.
2773 *
2774 */
2775static int ocfs2_xattr_ibody_set(struct inode *inode,
2776 struct ocfs2_xattr_info *xi,
Tao Ma78f30c32008-11-12 08:27:00 +08002777 struct ocfs2_xattr_search *xs,
2778 struct ocfs2_xattr_set_ctxt *ctxt)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002779{
Joel Becker139ffac2009-08-19 11:09:17 -07002780 int ret;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002781 struct ocfs2_inode_info *oi = OCFS2_I(inode);
Joel Becker139ffac2009-08-19 11:09:17 -07002782 struct ocfs2_xa_loc loc;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002783
2784 if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
2785 return -ENOSPC;
2786
2787 down_write(&oi->ip_alloc_sem);
2788 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
Joel Becker139ffac2009-08-19 11:09:17 -07002789 ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt);
2790 if (ret) {
2791 if (ret != -ENOSPC)
2792 mlog_errno(ret);
2793 goto out;
2794 }
2795 }
2796
2797 ocfs2_init_dinode_xa_loc(&loc, inode, xs->inode_bh,
2798 xs->not_found ? NULL : xs->here);
2799 ret = ocfs2_xa_set(&loc, xi, ctxt);
2800 if (ret) {
2801 if (ret != -ENOSPC)
2802 mlog_errno(ret);
2803 goto out;
2804 }
2805 xs->here = loc.xl_entry;
2806
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002807out:
2808 up_write(&oi->ip_alloc_sem);
2809
2810 return ret;
2811}
2812
2813/*
2814 * ocfs2_xattr_block_find()
2815 *
2816 * Find extended attribute in external block and
2817 * fill search info into struct ocfs2_xattr_search.
2818 */
2819static int ocfs2_xattr_block_find(struct inode *inode,
2820 int name_index,
2821 const char *name,
2822 struct ocfs2_xattr_search *xs)
2823{
2824 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
2825 struct buffer_head *blk_bh = NULL;
Tao Ma589dc262008-08-18 17:38:51 +08002826 struct ocfs2_xattr_block *xb;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002827 int ret = 0;
2828
2829 if (!di->i_xattr_loc)
2830 return ret;
2831
Joel Becker4ae1d692008-11-13 14:49:18 -08002832 ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
2833 &blk_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002834 if (ret < 0) {
2835 mlog_errno(ret);
2836 return ret;
2837 }
Joel Beckerf6087fb2008-10-20 18:20:43 -07002838
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002839 xs->xattr_bh = blk_bh;
Joel Becker4ae1d692008-11-13 14:49:18 -08002840 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002841
Tao Ma589dc262008-08-18 17:38:51 +08002842 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
2843 xs->header = &xb->xb_attrs.xb_header;
2844 xs->base = (void *)xs->header;
2845 xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size;
2846 xs->here = xs->header->xh_entries;
2847
2848 ret = ocfs2_xattr_find_entry(name_index, name, xs);
2849 } else
2850 ret = ocfs2_xattr_index_block_find(inode, blk_bh,
2851 name_index,
2852 name, xs);
2853
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002854 if (ret && ret != -ENODATA) {
2855 xs->xattr_bh = NULL;
2856 goto cleanup;
2857 }
2858 xs->not_found = ret;
2859 return 0;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002860cleanup:
2861 brelse(blk_bh);
2862
2863 return ret;
2864}
2865
Joel Beckerd3981542009-08-19 02:13:50 -07002866static int ocfs2_create_xattr_block(struct inode *inode,
Tao Ma5aea1f02009-08-18 11:43:24 +08002867 struct buffer_head *inode_bh,
Joel Beckerd3981542009-08-19 02:13:50 -07002868 struct ocfs2_xattr_set_ctxt *ctxt,
2869 int indexed,
2870 struct buffer_head **ret_bh)
Tao Ma5aea1f02009-08-18 11:43:24 +08002871{
2872 int ret;
2873 u16 suballoc_bit_start;
2874 u32 num_got;
Joel Becker2b6cb572010-03-26 10:09:15 +08002875 u64 suballoc_loc, first_blkno;
Tao Ma5aea1f02009-08-18 11:43:24 +08002876 struct ocfs2_dinode *di = (struct ocfs2_dinode *)inode_bh->b_data;
Tao Ma5aea1f02009-08-18 11:43:24 +08002877 struct buffer_head *new_bh = NULL;
2878 struct ocfs2_xattr_block *xblk;
2879
Joel Beckerd3981542009-08-19 02:13:50 -07002880 ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode),
2881 inode_bh, OCFS2_JOURNAL_ACCESS_CREATE);
Tao Ma5aea1f02009-08-18 11:43:24 +08002882 if (ret < 0) {
2883 mlog_errno(ret);
2884 goto end;
2885 }
2886
Joel Becker1ed9b772010-05-06 13:59:06 +08002887 ret = ocfs2_claim_metadata(ctxt->handle, ctxt->meta_ac, 1,
Joel Becker2b6cb572010-03-26 10:09:15 +08002888 &suballoc_loc, &suballoc_bit_start,
2889 &num_got, &first_blkno);
Tao Ma5aea1f02009-08-18 11:43:24 +08002890 if (ret < 0) {
2891 mlog_errno(ret);
2892 goto end;
2893 }
2894
2895 new_bh = sb_getblk(inode->i_sb, first_blkno);
Rui Xiang58796202013-11-12 15:06:55 -08002896 if (!new_bh) {
2897 ret = -ENOMEM;
2898 mlog_errno(ret);
2899 goto end;
2900 }
2901
Tao Ma5aea1f02009-08-18 11:43:24 +08002902 ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
2903
Joel Beckerd3981542009-08-19 02:13:50 -07002904 ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode),
Tao Ma5aea1f02009-08-18 11:43:24 +08002905 new_bh,
2906 OCFS2_JOURNAL_ACCESS_CREATE);
2907 if (ret < 0) {
2908 mlog_errno(ret);
2909 goto end;
2910 }
2911
2912 /* Initialize ocfs2_xattr_block */
2913 xblk = (struct ocfs2_xattr_block *)new_bh->b_data;
2914 memset(xblk, 0, inode->i_sb->s_blocksize);
2915 strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE);
Joel Beckerd3981542009-08-19 02:13:50 -07002916 xblk->xb_suballoc_slot = cpu_to_le16(ctxt->meta_ac->ac_alloc_slot);
Joel Becker2b6cb572010-03-26 10:09:15 +08002917 xblk->xb_suballoc_loc = cpu_to_le64(suballoc_loc);
Tao Ma5aea1f02009-08-18 11:43:24 +08002918 xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start);
Joel Becker1ed9b772010-05-06 13:59:06 +08002919 xblk->xb_fs_generation =
2920 cpu_to_le32(OCFS2_SB(inode->i_sb)->fs_generation);
Tao Ma5aea1f02009-08-18 11:43:24 +08002921 xblk->xb_blkno = cpu_to_le64(first_blkno);
Tao Maa7fe7a32009-08-18 11:43:52 +08002922 if (indexed) {
2923 struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root;
2924 xr->xt_clusters = cpu_to_le32(1);
2925 xr->xt_last_eb_blk = 0;
2926 xr->xt_list.l_tree_depth = 0;
2927 xr->xt_list.l_count = cpu_to_le16(
2928 ocfs2_xattr_recs_per_xb(inode->i_sb));
2929 xr->xt_list.l_next_free_rec = cpu_to_le16(1);
2930 xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED);
2931 }
Joel Beckerd3981542009-08-19 02:13:50 -07002932 ocfs2_journal_dirty(ctxt->handle, new_bh);
Tao Maa7fe7a32009-08-18 11:43:52 +08002933
Joel Beckerd3981542009-08-19 02:13:50 -07002934 /* Add it to the inode */
Tao Ma5aea1f02009-08-18 11:43:24 +08002935 di->i_xattr_loc = cpu_to_le64(first_blkno);
Joel Beckerd3981542009-08-19 02:13:50 -07002936
2937 spin_lock(&OCFS2_I(inode)->ip_lock);
2938 OCFS2_I(inode)->ip_dyn_features |= OCFS2_HAS_XATTR_FL;
2939 di->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features);
2940 spin_unlock(&OCFS2_I(inode)->ip_lock);
2941
2942 ocfs2_journal_dirty(ctxt->handle, inode_bh);
Tao Ma5aea1f02009-08-18 11:43:24 +08002943
2944 *ret_bh = new_bh;
2945 new_bh = NULL;
2946
2947end:
2948 brelse(new_bh);
2949 return ret;
2950}
2951
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002952/*
2953 * ocfs2_xattr_block_set()
2954 *
2955 * Set, replace or remove an extended attribute into external block.
2956 *
2957 */
2958static int ocfs2_xattr_block_set(struct inode *inode,
2959 struct ocfs2_xattr_info *xi,
Tao Ma78f30c32008-11-12 08:27:00 +08002960 struct ocfs2_xattr_search *xs,
2961 struct ocfs2_xattr_set_ctxt *ctxt)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002962{
2963 struct buffer_head *new_bh = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002964 struct ocfs2_xattr_block *xblk = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002965 int ret;
Joel Beckerd3981542009-08-19 02:13:50 -07002966 struct ocfs2_xa_loc loc;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002967
2968 if (!xs->xattr_bh) {
Joel Beckerd3981542009-08-19 02:13:50 -07002969 ret = ocfs2_create_xattr_block(inode, xs->inode_bh, ctxt,
2970 0, &new_bh);
Tao Ma5aea1f02009-08-18 11:43:24 +08002971 if (ret) {
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002972 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08002973 goto end;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002974 }
2975
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002976 xs->xattr_bh = new_bh;
Tao Ma5aea1f02009-08-18 11:43:24 +08002977 xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002978 xs->header = &xblk->xb_attrs.xb_header;
2979 xs->base = (void *)xs->header;
2980 xs->end = (void *)xblk + inode->i_sb->s_blocksize;
2981 xs->here = xs->header->xh_entries;
Tao Ma01225592008-08-18 17:38:53 +08002982 } else
2983 xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
2984
2985 if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) {
Joel Beckerd3981542009-08-19 02:13:50 -07002986 ocfs2_init_xattr_block_xa_loc(&loc, inode, xs->xattr_bh,
2987 xs->not_found ? NULL : xs->here);
Tao Ma01225592008-08-18 17:38:53 +08002988
Joel Beckerd3981542009-08-19 02:13:50 -07002989 ret = ocfs2_xa_set(&loc, xi, ctxt);
2990 if (!ret)
2991 xs->here = loc.xl_entry;
Tao Ma5f5261a2010-05-13 22:49:05 +08002992 else if ((ret != -ENOSPC) || ctxt->set_abort)
Tao Ma01225592008-08-18 17:38:53 +08002993 goto end;
Joel Beckerd3981542009-08-19 02:13:50 -07002994 else {
2995 ret = ocfs2_xattr_create_index_block(inode, xs, ctxt);
2996 if (ret)
2997 goto end;
2998 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002999 }
3000
Joel Beckerd3981542009-08-19 02:13:50 -07003001 if (le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)
3002 ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt);
Tao Ma01225592008-08-18 17:38:53 +08003003
3004end:
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003005 return ret;
3006}
3007
Tao Ma78f30c32008-11-12 08:27:00 +08003008/* Check whether the new xattr can be inserted into the inode. */
3009static int ocfs2_xattr_can_be_in_inode(struct inode *inode,
3010 struct ocfs2_xattr_info *xi,
3011 struct ocfs2_xattr_search *xs)
3012{
Tao Ma78f30c32008-11-12 08:27:00 +08003013 struct ocfs2_xattr_entry *last;
3014 int free, i;
3015 size_t min_offs = xs->end - xs->base;
3016
3017 if (!xs->header)
3018 return 0;
3019
3020 last = xs->header->xh_entries;
3021
3022 for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
3023 size_t offs = le16_to_cpu(last->xe_name_offset);
3024 if (offs < min_offs)
3025 min_offs = offs;
3026 last += 1;
3027 }
3028
Tiger Yang4442f512009-02-20 11:11:50 +08003029 free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP;
Tao Ma78f30c32008-11-12 08:27:00 +08003030 if (free < 0)
3031 return 0;
3032
3033 BUG_ON(!xs->not_found);
3034
Joel Becker199799a2009-08-14 19:04:15 -07003035 if (free >= (sizeof(struct ocfs2_xattr_entry) + namevalue_size_xi(xi)))
Tao Ma78f30c32008-11-12 08:27:00 +08003036 return 1;
3037
3038 return 0;
3039}
3040
3041static int ocfs2_calc_xattr_set_need(struct inode *inode,
3042 struct ocfs2_dinode *di,
3043 struct ocfs2_xattr_info *xi,
3044 struct ocfs2_xattr_search *xis,
3045 struct ocfs2_xattr_search *xbs,
3046 int *clusters_need,
Tao Ma85db90e2008-11-12 08:27:01 +08003047 int *meta_need,
3048 int *credits_need)
Tao Ma78f30c32008-11-12 08:27:00 +08003049{
3050 int ret = 0, old_in_xb = 0;
Tao Ma85db90e2008-11-12 08:27:01 +08003051 int clusters_add = 0, meta_add = 0, credits = 0;
Tao Ma78f30c32008-11-12 08:27:00 +08003052 struct buffer_head *bh = NULL;
3053 struct ocfs2_xattr_block *xb = NULL;
3054 struct ocfs2_xattr_entry *xe = NULL;
3055 struct ocfs2_xattr_value_root *xv = NULL;
3056 char *base = NULL;
3057 int name_offset, name_len = 0;
3058 u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
Joel Becker6b240ff2009-08-14 18:02:52 -07003059 xi->xi_value_len);
Tao Ma78f30c32008-11-12 08:27:00 +08003060 u64 value_size;
3061
Tao Ma71d548a2008-12-05 06:20:54 +08003062 /*
3063 * Calculate the clusters we need to write.
3064 * No matter whether we replace an old one or add a new one,
3065 * we need this for writing.
3066 */
Joel Becker6b240ff2009-08-14 18:02:52 -07003067 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
Tao Ma71d548a2008-12-05 06:20:54 +08003068 credits += new_clusters *
3069 ocfs2_clusters_to_blocks(inode->i_sb, 1);
3070
Tao Ma78f30c32008-11-12 08:27:00 +08003071 if (xis->not_found && xbs->not_found) {
Tao Ma85db90e2008-11-12 08:27:01 +08003072 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
3073
Joel Becker6b240ff2009-08-14 18:02:52 -07003074 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
Tao Ma78f30c32008-11-12 08:27:00 +08003075 clusters_add += new_clusters;
Tao Ma85db90e2008-11-12 08:27:01 +08003076 credits += ocfs2_calc_extend_credits(inode->i_sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -08003077 &def_xv.xv.xr_list);
Tao Ma85db90e2008-11-12 08:27:01 +08003078 }
Tao Ma78f30c32008-11-12 08:27:00 +08003079
3080 goto meta_guess;
3081 }
3082
3083 if (!xis->not_found) {
3084 xe = xis->here;
3085 name_offset = le16_to_cpu(xe->xe_name_offset);
3086 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
3087 base = xis->base;
Tao Ma85db90e2008-11-12 08:27:01 +08003088 credits += OCFS2_INODE_UPDATE_CREDITS;
Tao Ma78f30c32008-11-12 08:27:00 +08003089 } else {
Joel Becker970e4932008-11-13 14:49:19 -08003090 int i, block_off = 0;
Tao Ma78f30c32008-11-12 08:27:00 +08003091 xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
3092 xe = xbs->here;
3093 name_offset = le16_to_cpu(xe->xe_name_offset);
3094 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
3095 i = xbs->here - xbs->header->xh_entries;
3096 old_in_xb = 1;
3097
3098 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
Tao Mafd68a892009-08-18 11:43:21 +08003099 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
Tao Ma78f30c32008-11-12 08:27:00 +08003100 bucket_xh(xbs->bucket),
3101 i, &block_off,
3102 &name_offset);
3103 base = bucket_block(xbs->bucket, block_off);
Tao Ma85db90e2008-11-12 08:27:01 +08003104 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
3105 } else {
Tao Ma78f30c32008-11-12 08:27:00 +08003106 base = xbs->base;
Tao Ma85db90e2008-11-12 08:27:01 +08003107 credits += OCFS2_XATTR_BLOCK_UPDATE_CREDITS;
3108 }
3109 }
3110
3111 /*
3112 * delete a xattr doesn't need metadata and cluster allocation.
3113 * so just calculate the credits and return.
3114 *
3115 * The credits for removing the value tree will be extended
3116 * by ocfs2_remove_extent itself.
3117 */
Joel Becker6b240ff2009-08-14 18:02:52 -07003118 if (!xi->xi_value) {
Tao Ma85db90e2008-11-12 08:27:01 +08003119 if (!ocfs2_xattr_is_local(xe))
Jan Karaa90714c2008-10-09 19:38:40 +02003120 credits += ocfs2_remove_extent_credits(inode->i_sb);
Tao Ma85db90e2008-11-12 08:27:01 +08003121
3122 goto out;
Tao Ma78f30c32008-11-12 08:27:00 +08003123 }
3124
3125 /* do cluster allocation guess first. */
3126 value_size = le64_to_cpu(xe->xe_value_size);
3127
3128 if (old_in_xb) {
3129 /*
3130 * In xattr set, we always try to set the xe in inode first,
3131 * so if it can be inserted into inode successfully, the old
3132 * one will be removed from the xattr block, and this xattr
3133 * will be inserted into inode as a new xattr in inode.
3134 */
3135 if (ocfs2_xattr_can_be_in_inode(inode, xi, xis)) {
3136 clusters_add += new_clusters;
Jan Karaa90714c2008-10-09 19:38:40 +02003137 credits += ocfs2_remove_extent_credits(inode->i_sb) +
Tao Ma85db90e2008-11-12 08:27:01 +08003138 OCFS2_INODE_UPDATE_CREDITS;
3139 if (!ocfs2_xattr_is_local(xe))
3140 credits += ocfs2_calc_extend_credits(
3141 inode->i_sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -08003142 &def_xv.xv.xr_list);
Tao Ma78f30c32008-11-12 08:27:00 +08003143 goto out;
3144 }
3145 }
3146
Joel Becker6b240ff2009-08-14 18:02:52 -07003147 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
Tao Ma78f30c32008-11-12 08:27:00 +08003148 /* the new values will be stored outside. */
3149 u32 old_clusters = 0;
3150
3151 if (!ocfs2_xattr_is_local(xe)) {
3152 old_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
3153 value_size);
3154 xv = (struct ocfs2_xattr_value_root *)
3155 (base + name_offset + name_len);
Tao Ma97aff522008-11-19 16:48:41 +08003156 value_size = OCFS2_XATTR_ROOT_SIZE;
Tao Ma78f30c32008-11-12 08:27:00 +08003157 } else
3158 xv = &def_xv.xv;
3159
Tao Ma85db90e2008-11-12 08:27:01 +08003160 if (old_clusters >= new_clusters) {
Jan Karaa90714c2008-10-09 19:38:40 +02003161 credits += ocfs2_remove_extent_credits(inode->i_sb);
Tao Ma78f30c32008-11-12 08:27:00 +08003162 goto out;
Tao Ma85db90e2008-11-12 08:27:01 +08003163 } else {
Tao Ma78f30c32008-11-12 08:27:00 +08003164 meta_add += ocfs2_extend_meta_needed(&xv->xr_list);
3165 clusters_add += new_clusters - old_clusters;
Tao Ma85db90e2008-11-12 08:27:01 +08003166 credits += ocfs2_calc_extend_credits(inode->i_sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -08003167 &xv->xr_list);
Tao Ma97aff522008-11-19 16:48:41 +08003168 if (value_size >= OCFS2_XATTR_ROOT_SIZE)
3169 goto out;
Tao Ma78f30c32008-11-12 08:27:00 +08003170 }
3171 } else {
3172 /*
3173 * Now the new value will be stored inside. So if the new
3174 * value is smaller than the size of value root or the old
3175 * value, we don't need any allocation, otherwise we have
3176 * to guess metadata allocation.
3177 */
Joel Becker6b240ff2009-08-14 18:02:52 -07003178 if ((ocfs2_xattr_is_local(xe) &&
3179 (value_size >= xi->xi_value_len)) ||
Tao Ma78f30c32008-11-12 08:27:00 +08003180 (!ocfs2_xattr_is_local(xe) &&
Joel Becker6b240ff2009-08-14 18:02:52 -07003181 OCFS2_XATTR_ROOT_SIZE >= xi->xi_value_len))
Tao Ma78f30c32008-11-12 08:27:00 +08003182 goto out;
3183 }
3184
3185meta_guess:
3186 /* calculate metadata allocation. */
3187 if (di->i_xattr_loc) {
3188 if (!xbs->xattr_bh) {
Joel Becker4ae1d692008-11-13 14:49:18 -08003189 ret = ocfs2_read_xattr_block(inode,
3190 le64_to_cpu(di->i_xattr_loc),
3191 &bh);
Tao Ma78f30c32008-11-12 08:27:00 +08003192 if (ret) {
3193 mlog_errno(ret);
3194 goto out;
3195 }
3196
3197 xb = (struct ocfs2_xattr_block *)bh->b_data;
3198 } else
3199 xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
3200
Tao Ma90cb5462008-12-05 06:20:56 +08003201 /*
3202 * If there is already an xattr tree, good, we can calculate
3203 * like other b-trees. Otherwise we may have the chance of
3204 * create a tree, the credit calculation is borrowed from
3205 * ocfs2_calc_extend_credits with root_el = NULL. And the
3206 * new tree will be cluster based, so no meta is needed.
3207 */
Tao Ma78f30c32008-11-12 08:27:00 +08003208 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
3209 struct ocfs2_extent_list *el =
3210 &xb->xb_attrs.xb_root.xt_list;
3211 meta_add += ocfs2_extend_meta_needed(el);
Tao Ma85db90e2008-11-12 08:27:01 +08003212 credits += ocfs2_calc_extend_credits(inode->i_sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -08003213 el);
Tao Ma90cb5462008-12-05 06:20:56 +08003214 } else
3215 credits += OCFS2_SUBALLOC_ALLOC + 1;
Tao Ma78f30c32008-11-12 08:27:00 +08003216
3217 /*
3218 * This cluster will be used either for new bucket or for
3219 * new xattr block.
3220 * If the cluster size is the same as the bucket size, one
3221 * more is needed since we may need to extend the bucket
3222 * also.
3223 */
3224 clusters_add += 1;
Tao Ma85db90e2008-11-12 08:27:01 +08003225 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Ma78f30c32008-11-12 08:27:00 +08003226 if (OCFS2_XATTR_BUCKET_SIZE ==
Tao Ma85db90e2008-11-12 08:27:01 +08003227 OCFS2_SB(inode->i_sb)->s_clustersize) {
3228 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Ma78f30c32008-11-12 08:27:00 +08003229 clusters_add += 1;
Tao Ma85db90e2008-11-12 08:27:01 +08003230 }
3231 } else {
Tao Ma85db90e2008-11-12 08:27:01 +08003232 credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
Tariq Saeed3ed2be72014-04-03 14:47:03 -07003233 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
3234 struct ocfs2_extent_list *el = &def_xv.xv.xr_list;
3235 meta_add += ocfs2_extend_meta_needed(el);
3236 credits += ocfs2_calc_extend_credits(inode->i_sb,
3237 el);
3238 } else {
3239 meta_add += 1;
3240 }
Tao Ma85db90e2008-11-12 08:27:01 +08003241 }
Tao Ma78f30c32008-11-12 08:27:00 +08003242out:
3243 if (clusters_need)
3244 *clusters_need = clusters_add;
3245 if (meta_need)
3246 *meta_need = meta_add;
Tao Ma85db90e2008-11-12 08:27:01 +08003247 if (credits_need)
3248 *credits_need = credits;
Tao Ma78f30c32008-11-12 08:27:00 +08003249 brelse(bh);
3250 return ret;
3251}
3252
3253static int ocfs2_init_xattr_set_ctxt(struct inode *inode,
3254 struct ocfs2_dinode *di,
3255 struct ocfs2_xattr_info *xi,
3256 struct ocfs2_xattr_search *xis,
3257 struct ocfs2_xattr_search *xbs,
Tao Ma85db90e2008-11-12 08:27:01 +08003258 struct ocfs2_xattr_set_ctxt *ctxt,
Tao Ma492a8a32009-08-18 11:43:17 +08003259 int extra_meta,
Tao Ma85db90e2008-11-12 08:27:01 +08003260 int *credits)
Tao Ma78f30c32008-11-12 08:27:00 +08003261{
3262 int clusters_add, meta_add, ret;
3263 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3264
3265 memset(ctxt, 0, sizeof(struct ocfs2_xattr_set_ctxt));
3266
3267 ocfs2_init_dealloc_ctxt(&ctxt->dealloc);
3268
3269 ret = ocfs2_calc_xattr_set_need(inode, di, xi, xis, xbs,
Tao Ma85db90e2008-11-12 08:27:01 +08003270 &clusters_add, &meta_add, credits);
Tao Ma78f30c32008-11-12 08:27:00 +08003271 if (ret) {
3272 mlog_errno(ret);
3273 return ret;
3274 }
3275
Tao Ma492a8a32009-08-18 11:43:17 +08003276 meta_add += extra_meta;
Tao Ma402b4182011-02-23 22:01:17 +08003277 trace_ocfs2_init_xattr_set_ctxt(xi->xi_name, meta_add,
3278 clusters_add, *credits);
Tao Ma78f30c32008-11-12 08:27:00 +08003279
3280 if (meta_add) {
3281 ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add,
3282 &ctxt->meta_ac);
3283 if (ret) {
3284 mlog_errno(ret);
3285 goto out;
3286 }
3287 }
3288
3289 if (clusters_add) {
3290 ret = ocfs2_reserve_clusters(osb, clusters_add, &ctxt->data_ac);
3291 if (ret)
3292 mlog_errno(ret);
3293 }
3294out:
3295 if (ret) {
3296 if (ctxt->meta_ac) {
3297 ocfs2_free_alloc_context(ctxt->meta_ac);
3298 ctxt->meta_ac = NULL;
3299 }
3300
3301 /*
3302 * We cannot have an error and a non null ctxt->data_ac.
3303 */
3304 }
3305
3306 return ret;
3307}
3308
Tao Ma85db90e2008-11-12 08:27:01 +08003309static int __ocfs2_xattr_set_handle(struct inode *inode,
3310 struct ocfs2_dinode *di,
3311 struct ocfs2_xattr_info *xi,
3312 struct ocfs2_xattr_search *xis,
3313 struct ocfs2_xattr_search *xbs,
3314 struct ocfs2_xattr_set_ctxt *ctxt)
3315{
Tao Ma9f868f12008-11-19 16:48:42 +08003316 int ret = 0, credits, old_found;
Tao Ma85db90e2008-11-12 08:27:01 +08003317
Joel Becker6b240ff2009-08-14 18:02:52 -07003318 if (!xi->xi_value) {
Tao Ma85db90e2008-11-12 08:27:01 +08003319 /* Remove existing extended attribute */
3320 if (!xis->not_found)
3321 ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
3322 else if (!xbs->not_found)
3323 ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
3324 } else {
3325 /* We always try to set extended attribute into inode first*/
3326 ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
3327 if (!ret && !xbs->not_found) {
3328 /*
3329 * If succeed and that extended attribute existing in
3330 * external block, then we will remove it.
3331 */
Joel Becker6b240ff2009-08-14 18:02:52 -07003332 xi->xi_value = NULL;
3333 xi->xi_value_len = 0;
Tao Ma85db90e2008-11-12 08:27:01 +08003334
Tao Ma9f868f12008-11-19 16:48:42 +08003335 old_found = xis->not_found;
Tao Ma85db90e2008-11-12 08:27:01 +08003336 xis->not_found = -ENODATA;
3337 ret = ocfs2_calc_xattr_set_need(inode,
3338 di,
3339 xi,
3340 xis,
3341 xbs,
3342 NULL,
3343 NULL,
3344 &credits);
Tao Ma9f868f12008-11-19 16:48:42 +08003345 xis->not_found = old_found;
Tao Ma85db90e2008-11-12 08:27:01 +08003346 if (ret) {
3347 mlog_errno(ret);
3348 goto out;
3349 }
3350
Tao Mac901fb02010-04-26 14:34:57 +08003351 ret = ocfs2_extend_trans(ctxt->handle, credits);
Tao Ma85db90e2008-11-12 08:27:01 +08003352 if (ret) {
3353 mlog_errno(ret);
3354 goto out;
3355 }
3356 ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
Tao Ma5f5261a2010-05-13 22:49:05 +08003357 } else if ((ret == -ENOSPC) && !ctxt->set_abort) {
Tao Ma85db90e2008-11-12 08:27:01 +08003358 if (di->i_xattr_loc && !xbs->xattr_bh) {
3359 ret = ocfs2_xattr_block_find(inode,
Joel Becker6b240ff2009-08-14 18:02:52 -07003360 xi->xi_name_index,
3361 xi->xi_name, xbs);
Tao Ma85db90e2008-11-12 08:27:01 +08003362 if (ret)
3363 goto out;
3364
Tao Ma9f868f12008-11-19 16:48:42 +08003365 old_found = xis->not_found;
Tao Ma85db90e2008-11-12 08:27:01 +08003366 xis->not_found = -ENODATA;
3367 ret = ocfs2_calc_xattr_set_need(inode,
3368 di,
3369 xi,
3370 xis,
3371 xbs,
3372 NULL,
3373 NULL,
3374 &credits);
Tao Ma9f868f12008-11-19 16:48:42 +08003375 xis->not_found = old_found;
Tao Ma85db90e2008-11-12 08:27:01 +08003376 if (ret) {
3377 mlog_errno(ret);
3378 goto out;
3379 }
3380
Tao Mac901fb02010-04-26 14:34:57 +08003381 ret = ocfs2_extend_trans(ctxt->handle, credits);
Tao Ma85db90e2008-11-12 08:27:01 +08003382 if (ret) {
3383 mlog_errno(ret);
3384 goto out;
3385 }
3386 }
3387 /*
3388 * If no space in inode, we will set extended attribute
3389 * into external block.
3390 */
3391 ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
3392 if (ret)
3393 goto out;
3394 if (!xis->not_found) {
3395 /*
3396 * If succeed and that extended attribute
3397 * existing in inode, we will remove it.
3398 */
Joel Becker6b240ff2009-08-14 18:02:52 -07003399 xi->xi_value = NULL;
3400 xi->xi_value_len = 0;
Tao Ma85db90e2008-11-12 08:27:01 +08003401 xbs->not_found = -ENODATA;
3402 ret = ocfs2_calc_xattr_set_need(inode,
3403 di,
3404 xi,
3405 xis,
3406 xbs,
3407 NULL,
3408 NULL,
3409 &credits);
3410 if (ret) {
3411 mlog_errno(ret);
3412 goto out;
3413 }
3414
Tao Mac901fb02010-04-26 14:34:57 +08003415 ret = ocfs2_extend_trans(ctxt->handle, credits);
Tao Ma85db90e2008-11-12 08:27:01 +08003416 if (ret) {
3417 mlog_errno(ret);
3418 goto out;
3419 }
3420 ret = ocfs2_xattr_ibody_set(inode, xi,
3421 xis, ctxt);
3422 }
3423 }
3424 }
3425
Tao Ma4b3f6202008-12-05 06:20:55 +08003426 if (!ret) {
3427 /* Update inode ctime. */
Joel Becker0cf2f762009-02-12 16:41:25 -08003428 ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode),
Tao Ma89a907a2009-02-17 04:39:28 +08003429 xis->inode_bh,
3430 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma4b3f6202008-12-05 06:20:55 +08003431 if (ret) {
3432 mlog_errno(ret);
3433 goto out;
3434 }
3435
Deepa Dinamani078cd822016-09-14 07:48:04 -07003436 inode->i_ctime = current_time(inode);
Tao Ma4b3f6202008-12-05 06:20:55 +08003437 di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
3438 di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
3439 ocfs2_journal_dirty(ctxt->handle, xis->inode_bh);
3440 }
Tao Ma85db90e2008-11-12 08:27:01 +08003441out:
3442 return ret;
3443}
3444
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003445/*
Tiger Yang6c3faba2008-11-14 11:16:03 +08003446 * This function only called duing creating inode
3447 * for init security/acl xattrs of the new inode.
Tiger Yang008aafa2008-12-09 16:43:08 +08003448 * All transanction credits have been reserved in mknod.
Tiger Yang6c3faba2008-11-14 11:16:03 +08003449 */
3450int ocfs2_xattr_set_handle(handle_t *handle,
3451 struct inode *inode,
3452 struct buffer_head *di_bh,
3453 int name_index,
3454 const char *name,
3455 const void *value,
3456 size_t value_len,
3457 int flags,
3458 struct ocfs2_alloc_context *meta_ac,
3459 struct ocfs2_alloc_context *data_ac)
3460{
3461 struct ocfs2_dinode *di;
3462 int ret;
3463
3464 struct ocfs2_xattr_info xi = {
Joel Becker6b240ff2009-08-14 18:02:52 -07003465 .xi_name_index = name_index,
3466 .xi_name = name,
Joel Becker18853b92009-08-14 18:17:07 -07003467 .xi_name_len = strlen(name),
Joel Becker6b240ff2009-08-14 18:02:52 -07003468 .xi_value = value,
3469 .xi_value_len = value_len,
Tiger Yang6c3faba2008-11-14 11:16:03 +08003470 };
3471
3472 struct ocfs2_xattr_search xis = {
3473 .not_found = -ENODATA,
3474 };
3475
3476 struct ocfs2_xattr_search xbs = {
3477 .not_found = -ENODATA,
3478 };
3479
3480 struct ocfs2_xattr_set_ctxt ctxt = {
3481 .handle = handle,
3482 .meta_ac = meta_ac,
3483 .data_ac = data_ac,
3484 };
3485
3486 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
3487 return -EOPNOTSUPP;
3488
Tiger Yang008aafa2008-12-09 16:43:08 +08003489 /*
3490 * In extreme situation, may need xattr bucket when
3491 * block size is too small. And we have already reserved
3492 * the credits for bucket in mknod.
3493 */
3494 if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) {
3495 xbs.bucket = ocfs2_xattr_bucket_new(inode);
3496 if (!xbs.bucket) {
3497 mlog_errno(-ENOMEM);
3498 return -ENOMEM;
3499 }
3500 }
3501
Tiger Yang6c3faba2008-11-14 11:16:03 +08003502 xis.inode_bh = xbs.inode_bh = di_bh;
3503 di = (struct ocfs2_dinode *)di_bh->b_data;
3504
3505 down_write(&OCFS2_I(inode)->ip_xattr_sem);
3506
3507 ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis);
3508 if (ret)
3509 goto cleanup;
3510 if (xis.not_found) {
3511 ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs);
3512 if (ret)
3513 goto cleanup;
3514 }
3515
3516 ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt);
3517
3518cleanup:
3519 up_write(&OCFS2_I(inode)->ip_xattr_sem);
3520 brelse(xbs.xattr_bh);
Tiger Yang008aafa2008-12-09 16:43:08 +08003521 ocfs2_xattr_bucket_free(xbs.bucket);
Tiger Yang6c3faba2008-11-14 11:16:03 +08003522
3523 return ret;
3524}
3525
3526/*
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003527 * ocfs2_xattr_set()
3528 *
3529 * Set, replace or remove an extended attribute for this inode.
3530 * value is NULL to remove an existing extended attribute, else either
3531 * create or replace an extended attribute.
3532 */
3533int ocfs2_xattr_set(struct inode *inode,
3534 int name_index,
3535 const char *name,
3536 const void *value,
3537 size_t value_len,
3538 int flags)
3539{
3540 struct buffer_head *di_bh = NULL;
3541 struct ocfs2_dinode *di;
Tao Ma492a8a32009-08-18 11:43:17 +08003542 int ret, credits, ref_meta = 0, ref_credits = 0;
Tao Ma78f30c32008-11-12 08:27:00 +08003543 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Tao Ma85db90e2008-11-12 08:27:01 +08003544 struct inode *tl_inode = osb->osb_tl_inode;
Younger Liu6ea437a2013-09-11 14:19:56 -07003545 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
Tao Ma492a8a32009-08-18 11:43:17 +08003546 struct ocfs2_refcount_tree *ref_tree = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003547
3548 struct ocfs2_xattr_info xi = {
Joel Becker6b240ff2009-08-14 18:02:52 -07003549 .xi_name_index = name_index,
3550 .xi_name = name,
Joel Becker18853b92009-08-14 18:17:07 -07003551 .xi_name_len = strlen(name),
Joel Becker6b240ff2009-08-14 18:02:52 -07003552 .xi_value = value,
3553 .xi_value_len = value_len,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003554 };
3555
3556 struct ocfs2_xattr_search xis = {
3557 .not_found = -ENODATA,
3558 };
3559
3560 struct ocfs2_xattr_search xbs = {
3561 .not_found = -ENODATA,
3562 };
3563
Tiger Yang8154da32008-08-18 17:11:46 +08003564 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
3565 return -EOPNOTSUPP;
3566
Joel Beckerba937122008-10-24 19:13:20 -07003567 /*
3568 * Only xbs will be used on indexed trees. xis doesn't need a
3569 * bucket.
3570 */
3571 xbs.bucket = ocfs2_xattr_bucket_new(inode);
3572 if (!xbs.bucket) {
3573 mlog_errno(-ENOMEM);
3574 return -ENOMEM;
3575 }
3576
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003577 ret = ocfs2_inode_lock(inode, &di_bh, 1);
3578 if (ret < 0) {
3579 mlog_errno(ret);
Joel Beckerba937122008-10-24 19:13:20 -07003580 goto cleanup_nolock;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003581 }
3582 xis.inode_bh = xbs.inode_bh = di_bh;
3583 di = (struct ocfs2_dinode *)di_bh->b_data;
3584
3585 down_write(&OCFS2_I(inode)->ip_xattr_sem);
3586 /*
3587 * Scan inode and external block to find the same name
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003588 * extended attribute and collect search information.
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003589 */
3590 ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis);
3591 if (ret)
3592 goto cleanup;
3593 if (xis.not_found) {
3594 ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs);
3595 if (ret)
3596 goto cleanup;
3597 }
3598
3599 if (xis.not_found && xbs.not_found) {
3600 ret = -ENODATA;
3601 if (flags & XATTR_REPLACE)
3602 goto cleanup;
3603 ret = 0;
3604 if (!value)
3605 goto cleanup;
3606 } else {
3607 ret = -EEXIST;
3608 if (flags & XATTR_CREATE)
3609 goto cleanup;
3610 }
3611
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003612 /* Check whether the value is refcounted and do some preparation. */
Tao Ma492a8a32009-08-18 11:43:17 +08003613 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL &&
3614 (!xis.not_found || !xbs.not_found)) {
3615 ret = ocfs2_prepare_refcount_xattr(inode, di, &xi,
3616 &xis, &xbs, &ref_tree,
3617 &ref_meta, &ref_credits);
3618 if (ret) {
3619 mlog_errno(ret);
3620 goto cleanup;
3621 }
3622 }
Tao Ma85db90e2008-11-12 08:27:01 +08003623
Al Viro59551022016-01-22 15:40:57 -05003624 inode_lock(tl_inode);
Tao Ma85db90e2008-11-12 08:27:01 +08003625
3626 if (ocfs2_truncate_log_needs_flush(osb)) {
3627 ret = __ocfs2_flush_truncate_log(osb);
3628 if (ret < 0) {
Al Viro59551022016-01-22 15:40:57 -05003629 inode_unlock(tl_inode);
Tao Ma85db90e2008-11-12 08:27:01 +08003630 mlog_errno(ret);
3631 goto cleanup;
3632 }
3633 }
Al Viro59551022016-01-22 15:40:57 -05003634 inode_unlock(tl_inode);
Tao Ma85db90e2008-11-12 08:27:01 +08003635
3636 ret = ocfs2_init_xattr_set_ctxt(inode, di, &xi, &xis,
Tao Ma492a8a32009-08-18 11:43:17 +08003637 &xbs, &ctxt, ref_meta, &credits);
Tao Ma78f30c32008-11-12 08:27:00 +08003638 if (ret) {
3639 mlog_errno(ret);
3640 goto cleanup;
3641 }
3642
Tao Ma4b3f6202008-12-05 06:20:55 +08003643 /* we need to update inode's ctime field, so add credit for it. */
3644 credits += OCFS2_INODE_UPDATE_CREDITS;
Tao Ma492a8a32009-08-18 11:43:17 +08003645 ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits);
Tao Ma85db90e2008-11-12 08:27:01 +08003646 if (IS_ERR(ctxt.handle)) {
3647 ret = PTR_ERR(ctxt.handle);
3648 mlog_errno(ret);
Younger Liu6ea437a2013-09-11 14:19:56 -07003649 goto out_free_ac;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003650 }
Tao Ma85db90e2008-11-12 08:27:01 +08003651
3652 ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt);
Darrick J. Wong6fdb7022014-04-03 14:47:08 -07003653 ocfs2_update_inode_fsync_trans(ctxt.handle, inode, 0);
Tao Ma85db90e2008-11-12 08:27:01 +08003654
3655 ocfs2_commit_trans(osb, ctxt.handle);
3656
Younger Liu6ea437a2013-09-11 14:19:56 -07003657out_free_ac:
Tao Ma78f30c32008-11-12 08:27:00 +08003658 if (ctxt.data_ac)
3659 ocfs2_free_alloc_context(ctxt.data_ac);
3660 if (ctxt.meta_ac)
3661 ocfs2_free_alloc_context(ctxt.meta_ac);
3662 if (ocfs2_dealloc_has_cluster(&ctxt.dealloc))
3663 ocfs2_schedule_truncate_log_flush(osb, 1);
3664 ocfs2_run_deallocs(osb, &ctxt.dealloc);
Tao Ma8b2c0db2009-08-18 11:43:49 +08003665
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003666cleanup:
Tao Ma492a8a32009-08-18 11:43:17 +08003667 if (ref_tree)
3668 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003669 up_write(&OCFS2_I(inode)->ip_xattr_sem);
Tao Ma8b2c0db2009-08-18 11:43:49 +08003670 if (!value && !ret) {
3671 ret = ocfs2_try_remove_refcount_tree(inode, di_bh);
3672 if (ret)
3673 mlog_errno(ret);
3674 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003675 ocfs2_inode_unlock(inode, 1);
Joel Beckerba937122008-10-24 19:13:20 -07003676cleanup_nolock:
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003677 brelse(di_bh);
3678 brelse(xbs.xattr_bh);
Joel Beckerba937122008-10-24 19:13:20 -07003679 ocfs2_xattr_bucket_free(xbs.bucket);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003680
3681 return ret;
3682}
3683
Tao Ma0c044f02008-08-18 17:38:50 +08003684/*
3685 * Find the xattr extent rec which may contains name_hash.
3686 * e_cpos will be the first name hash of the xattr rec.
3687 * el must be the ocfs2_xattr_header.xb_attrs.xb_root.xt_list.
3688 */
3689static int ocfs2_xattr_get_rec(struct inode *inode,
3690 u32 name_hash,
3691 u64 *p_blkno,
3692 u32 *e_cpos,
3693 u32 *num_clusters,
3694 struct ocfs2_extent_list *el)
3695{
3696 int ret = 0, i;
3697 struct buffer_head *eb_bh = NULL;
3698 struct ocfs2_extent_block *eb;
3699 struct ocfs2_extent_rec *rec = NULL;
3700 u64 e_blkno = 0;
3701
3702 if (el->l_tree_depth) {
Joel Beckerfacdb772009-02-12 18:08:48 -08003703 ret = ocfs2_find_leaf(INODE_CACHE(inode), el, name_hash,
3704 &eb_bh);
Tao Ma0c044f02008-08-18 17:38:50 +08003705 if (ret) {
3706 mlog_errno(ret);
3707 goto out;
3708 }
3709
3710 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
3711 el = &eb->h_list;
3712
3713 if (el->l_tree_depth) {
Goldwyn Rodrigues17a5b9a2015-09-04 15:44:17 -07003714 ret = ocfs2_error(inode->i_sb,
Joe Perches7ecef142015-09-04 15:44:51 -07003715 "Inode %lu has non zero tree depth in xattr tree block %llu\n",
3716 inode->i_ino,
3717 (unsigned long long)eb_bh->b_blocknr);
Tao Ma0c044f02008-08-18 17:38:50 +08003718 goto out;
3719 }
3720 }
3721
3722 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
3723 rec = &el->l_recs[i];
3724
3725 if (le32_to_cpu(rec->e_cpos) <= name_hash) {
3726 e_blkno = le64_to_cpu(rec->e_blkno);
3727 break;
3728 }
3729 }
3730
3731 if (!e_blkno) {
Joe Perches7ecef142015-09-04 15:44:51 -07003732 ret = ocfs2_error(inode->i_sb, "Inode %lu has bad extent record (%u, %u, 0) in xattr\n",
3733 inode->i_ino,
3734 le32_to_cpu(rec->e_cpos),
3735 ocfs2_rec_clusters(el, rec));
Tao Ma0c044f02008-08-18 17:38:50 +08003736 goto out;
3737 }
3738
3739 *p_blkno = le64_to_cpu(rec->e_blkno);
3740 *num_clusters = le16_to_cpu(rec->e_leaf_clusters);
3741 if (e_cpos)
3742 *e_cpos = le32_to_cpu(rec->e_cpos);
3743out:
3744 brelse(eb_bh);
3745 return ret;
3746}
3747
3748typedef int (xattr_bucket_func)(struct inode *inode,
3749 struct ocfs2_xattr_bucket *bucket,
3750 void *para);
3751
Tao Ma589dc262008-08-18 17:38:51 +08003752static int ocfs2_find_xe_in_bucket(struct inode *inode,
Joel Beckere2356a32008-10-27 15:01:54 -07003753 struct ocfs2_xattr_bucket *bucket,
Tao Ma589dc262008-08-18 17:38:51 +08003754 int name_index,
3755 const char *name,
3756 u32 name_hash,
3757 u16 *xe_index,
3758 int *found)
3759{
3760 int i, ret = 0, cmp = 1, block_off, new_offset;
Joel Beckere2356a32008-10-27 15:01:54 -07003761 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
Tao Ma589dc262008-08-18 17:38:51 +08003762 size_t name_len = strlen(name);
3763 struct ocfs2_xattr_entry *xe = NULL;
Tao Ma589dc262008-08-18 17:38:51 +08003764 char *xe_name;
3765
3766 /*
3767 * We don't use binary search in the bucket because there
3768 * may be multiple entries with the same name hash.
3769 */
3770 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
3771 xe = &xh->xh_entries[i];
3772
3773 if (name_hash > le32_to_cpu(xe->xe_name_hash))
3774 continue;
3775 else if (name_hash < le32_to_cpu(xe->xe_name_hash))
3776 break;
3777
3778 cmp = name_index - ocfs2_xattr_get_type(xe);
3779 if (!cmp)
3780 cmp = name_len - xe->xe_name_len;
3781 if (cmp)
3782 continue;
3783
Tao Mafd68a892009-08-18 11:43:21 +08003784 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
Tao Ma589dc262008-08-18 17:38:51 +08003785 xh,
3786 i,
3787 &block_off,
3788 &new_offset);
3789 if (ret) {
3790 mlog_errno(ret);
3791 break;
3792 }
3793
Joel Becker970e4932008-11-13 14:49:19 -08003794
Joel Beckere2356a32008-10-27 15:01:54 -07003795 xe_name = bucket_block(bucket, block_off) + new_offset;
3796 if (!memcmp(name, xe_name, name_len)) {
Tao Ma589dc262008-08-18 17:38:51 +08003797 *xe_index = i;
3798 *found = 1;
3799 ret = 0;
3800 break;
3801 }
3802 }
3803
3804 return ret;
3805}
3806
3807/*
3808 * Find the specified xattr entry in a series of buckets.
3809 * This series start from p_blkno and last for num_clusters.
3810 * The ocfs2_xattr_header.xh_num_buckets of the first bucket contains
3811 * the num of the valid buckets.
3812 *
3813 * Return the buffer_head this xattr should reside in. And if the xattr's
3814 * hash is in the gap of 2 buckets, return the lower bucket.
3815 */
3816static int ocfs2_xattr_bucket_find(struct inode *inode,
3817 int name_index,
3818 const char *name,
3819 u32 name_hash,
3820 u64 p_blkno,
3821 u32 first_hash,
3822 u32 num_clusters,
3823 struct ocfs2_xattr_search *xs)
3824{
3825 int ret, found = 0;
Tao Ma589dc262008-08-18 17:38:51 +08003826 struct ocfs2_xattr_header *xh = NULL;
3827 struct ocfs2_xattr_entry *xe = NULL;
3828 u16 index = 0;
3829 u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
3830 int low_bucket = 0, bucket, high_bucket;
Joel Beckere2356a32008-10-27 15:01:54 -07003831 struct ocfs2_xattr_bucket *search;
Tao Ma589dc262008-08-18 17:38:51 +08003832 u32 last_hash;
Joel Beckere2356a32008-10-27 15:01:54 -07003833 u64 blkno, lower_blkno = 0;
Tao Ma589dc262008-08-18 17:38:51 +08003834
Joel Beckere2356a32008-10-27 15:01:54 -07003835 search = ocfs2_xattr_bucket_new(inode);
3836 if (!search) {
3837 ret = -ENOMEM;
3838 mlog_errno(ret);
3839 goto out;
3840 }
3841
3842 ret = ocfs2_read_xattr_bucket(search, p_blkno);
Tao Ma589dc262008-08-18 17:38:51 +08003843 if (ret) {
3844 mlog_errno(ret);
3845 goto out;
3846 }
3847
Joel Beckere2356a32008-10-27 15:01:54 -07003848 xh = bucket_xh(search);
Tao Ma589dc262008-08-18 17:38:51 +08003849 high_bucket = le16_to_cpu(xh->xh_num_buckets) - 1;
Tao Ma589dc262008-08-18 17:38:51 +08003850 while (low_bucket <= high_bucket) {
Joel Beckere2356a32008-10-27 15:01:54 -07003851 ocfs2_xattr_bucket_relse(search);
3852
Tao Ma589dc262008-08-18 17:38:51 +08003853 bucket = (low_bucket + high_bucket) / 2;
Tao Ma589dc262008-08-18 17:38:51 +08003854 blkno = p_blkno + bucket * blk_per_bucket;
Joel Beckere2356a32008-10-27 15:01:54 -07003855 ret = ocfs2_read_xattr_bucket(search, blkno);
Tao Ma589dc262008-08-18 17:38:51 +08003856 if (ret) {
3857 mlog_errno(ret);
3858 goto out;
3859 }
3860
Joel Beckere2356a32008-10-27 15:01:54 -07003861 xh = bucket_xh(search);
Tao Ma589dc262008-08-18 17:38:51 +08003862 xe = &xh->xh_entries[0];
3863 if (name_hash < le32_to_cpu(xe->xe_name_hash)) {
3864 high_bucket = bucket - 1;
3865 continue;
3866 }
3867
3868 /*
3869 * Check whether the hash of the last entry in our
Tao Ma5a095612008-09-19 22:17:41 +08003870 * bucket is larger than the search one. for an empty
3871 * bucket, the last one is also the first one.
Tao Ma589dc262008-08-18 17:38:51 +08003872 */
Tao Ma5a095612008-09-19 22:17:41 +08003873 if (xh->xh_count)
3874 xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
3875
Tao Ma589dc262008-08-18 17:38:51 +08003876 last_hash = le32_to_cpu(xe->xe_name_hash);
3877
Joel Beckere2356a32008-10-27 15:01:54 -07003878 /* record lower_blkno which may be the insert place. */
3879 lower_blkno = blkno;
Tao Ma589dc262008-08-18 17:38:51 +08003880
3881 if (name_hash > le32_to_cpu(xe->xe_name_hash)) {
3882 low_bucket = bucket + 1;
3883 continue;
3884 }
3885
3886 /* the searched xattr should reside in this bucket if exists. */
Joel Beckere2356a32008-10-27 15:01:54 -07003887 ret = ocfs2_find_xe_in_bucket(inode, search,
Tao Ma589dc262008-08-18 17:38:51 +08003888 name_index, name, name_hash,
3889 &index, &found);
3890 if (ret) {
3891 mlog_errno(ret);
3892 goto out;
3893 }
3894 break;
3895 }
3896
3897 /*
3898 * Record the bucket we have found.
3899 * When the xattr's hash value is in the gap of 2 buckets, we will
3900 * always set it to the previous bucket.
3901 */
Joel Beckere2356a32008-10-27 15:01:54 -07003902 if (!lower_blkno)
3903 lower_blkno = p_blkno;
3904
3905 /* This should be in cache - we just read it during the search */
3906 ret = ocfs2_read_xattr_bucket(xs->bucket, lower_blkno);
3907 if (ret) {
3908 mlog_errno(ret);
3909 goto out;
Tao Ma589dc262008-08-18 17:38:51 +08003910 }
Tao Ma589dc262008-08-18 17:38:51 +08003911
Joel Beckerba937122008-10-24 19:13:20 -07003912 xs->header = bucket_xh(xs->bucket);
3913 xs->base = bucket_block(xs->bucket, 0);
Tao Ma589dc262008-08-18 17:38:51 +08003914 xs->end = xs->base + inode->i_sb->s_blocksize;
3915
3916 if (found) {
Tao Ma589dc262008-08-18 17:38:51 +08003917 xs->here = &xs->header->xh_entries[index];
Tao Ma402b4182011-02-23 22:01:17 +08003918 trace_ocfs2_xattr_bucket_find(OCFS2_I(inode)->ip_blkno,
3919 name, name_index, name_hash,
3920 (unsigned long long)bucket_blkno(xs->bucket),
3921 index);
Tao Ma589dc262008-08-18 17:38:51 +08003922 } else
3923 ret = -ENODATA;
3924
3925out:
Joel Beckere2356a32008-10-27 15:01:54 -07003926 ocfs2_xattr_bucket_free(search);
Tao Ma589dc262008-08-18 17:38:51 +08003927 return ret;
3928}
3929
3930static int ocfs2_xattr_index_block_find(struct inode *inode,
3931 struct buffer_head *root_bh,
3932 int name_index,
3933 const char *name,
3934 struct ocfs2_xattr_search *xs)
3935{
3936 int ret;
3937 struct ocfs2_xattr_block *xb =
3938 (struct ocfs2_xattr_block *)root_bh->b_data;
3939 struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root;
3940 struct ocfs2_extent_list *el = &xb_root->xt_list;
3941 u64 p_blkno = 0;
3942 u32 first_hash, num_clusters = 0;
Tao Ma2057e5c2008-10-09 23:06:13 +08003943 u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name));
Tao Ma589dc262008-08-18 17:38:51 +08003944
3945 if (le16_to_cpu(el->l_next_free_rec) == 0)
3946 return -ENODATA;
3947
Tao Ma402b4182011-02-23 22:01:17 +08003948 trace_ocfs2_xattr_index_block_find(OCFS2_I(inode)->ip_blkno,
3949 name, name_index, name_hash,
3950 (unsigned long long)root_bh->b_blocknr,
3951 -1);
Tao Ma589dc262008-08-18 17:38:51 +08003952
3953 ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &first_hash,
3954 &num_clusters, el);
3955 if (ret) {
3956 mlog_errno(ret);
3957 goto out;
3958 }
3959
3960 BUG_ON(p_blkno == 0 || num_clusters == 0 || first_hash > name_hash);
3961
Tao Ma402b4182011-02-23 22:01:17 +08003962 trace_ocfs2_xattr_index_block_find_rec(OCFS2_I(inode)->ip_blkno,
3963 name, name_index, first_hash,
3964 (unsigned long long)p_blkno,
3965 num_clusters);
Tao Ma589dc262008-08-18 17:38:51 +08003966
3967 ret = ocfs2_xattr_bucket_find(inode, name_index, name, name_hash,
3968 p_blkno, first_hash, num_clusters, xs);
3969
3970out:
3971 return ret;
3972}
3973
Tao Ma0c044f02008-08-18 17:38:50 +08003974static int ocfs2_iterate_xattr_buckets(struct inode *inode,
3975 u64 blkno,
3976 u32 clusters,
3977 xattr_bucket_func *func,
3978 void *para)
3979{
Joel Becker6dde41d2008-10-24 17:16:48 -07003980 int i, ret = 0;
Tao Ma0c044f02008-08-18 17:38:50 +08003981 u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb));
3982 u32 num_buckets = clusters * bpc;
Joel Beckerba937122008-10-24 19:13:20 -07003983 struct ocfs2_xattr_bucket *bucket;
Tao Ma0c044f02008-08-18 17:38:50 +08003984
Joel Beckerba937122008-10-24 19:13:20 -07003985 bucket = ocfs2_xattr_bucket_new(inode);
3986 if (!bucket) {
3987 mlog_errno(-ENOMEM);
3988 return -ENOMEM;
3989 }
Tao Ma0c044f02008-08-18 17:38:50 +08003990
Tao Ma402b4182011-02-23 22:01:17 +08003991 trace_ocfs2_iterate_xattr_buckets(
3992 (unsigned long long)OCFS2_I(inode)->ip_blkno,
3993 (unsigned long long)blkno, clusters);
Tao Ma0c044f02008-08-18 17:38:50 +08003994
Joel Beckerba937122008-10-24 19:13:20 -07003995 for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) {
3996 ret = ocfs2_read_xattr_bucket(bucket, blkno);
Tao Ma0c044f02008-08-18 17:38:50 +08003997 if (ret) {
3998 mlog_errno(ret);
Joel Beckerba937122008-10-24 19:13:20 -07003999 break;
Tao Ma0c044f02008-08-18 17:38:50 +08004000 }
4001
Tao Ma0c044f02008-08-18 17:38:50 +08004002 /*
4003 * The real bucket num in this series of blocks is stored
4004 * in the 1st bucket.
4005 */
4006 if (i == 0)
Joel Beckerba937122008-10-24 19:13:20 -07004007 num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets);
Tao Ma0c044f02008-08-18 17:38:50 +08004008
Tao Ma402b4182011-02-23 22:01:17 +08004009 trace_ocfs2_iterate_xattr_bucket((unsigned long long)blkno,
Joel Beckerba937122008-10-24 19:13:20 -07004010 le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash));
Tao Ma0c044f02008-08-18 17:38:50 +08004011 if (func) {
Joel Beckerba937122008-10-24 19:13:20 -07004012 ret = func(inode, bucket, para);
Tao Maa46fa682009-05-04 05:18:09 +08004013 if (ret && ret != -ERANGE)
Tao Ma0c044f02008-08-18 17:38:50 +08004014 mlog_errno(ret);
Joel Beckerba937122008-10-24 19:13:20 -07004015 /* Fall through to bucket_relse() */
Tao Ma0c044f02008-08-18 17:38:50 +08004016 }
4017
Joel Beckerba937122008-10-24 19:13:20 -07004018 ocfs2_xattr_bucket_relse(bucket);
4019 if (ret)
4020 break;
Tao Ma0c044f02008-08-18 17:38:50 +08004021 }
4022
Joel Beckerba937122008-10-24 19:13:20 -07004023 ocfs2_xattr_bucket_free(bucket);
Tao Ma0c044f02008-08-18 17:38:50 +08004024 return ret;
4025}
4026
4027struct ocfs2_xattr_tree_list {
4028 char *buffer;
4029 size_t buffer_size;
Tao Ma936b8832008-10-09 23:06:14 +08004030 size_t result;
Tao Ma0c044f02008-08-18 17:38:50 +08004031};
4032
Tao Mafd68a892009-08-18 11:43:21 +08004033static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
Tao Ma0c044f02008-08-18 17:38:50 +08004034 struct ocfs2_xattr_header *xh,
4035 int index,
4036 int *block_off,
4037 int *new_offset)
4038{
4039 u16 name_offset;
4040
4041 if (index < 0 || index >= le16_to_cpu(xh->xh_count))
4042 return -EINVAL;
4043
4044 name_offset = le16_to_cpu(xh->xh_entries[index].xe_name_offset);
4045
Tao Mafd68a892009-08-18 11:43:21 +08004046 *block_off = name_offset >> sb->s_blocksize_bits;
4047 *new_offset = name_offset % sb->s_blocksize;
Tao Ma0c044f02008-08-18 17:38:50 +08004048
4049 return 0;
4050}
4051
4052static int ocfs2_list_xattr_bucket(struct inode *inode,
4053 struct ocfs2_xattr_bucket *bucket,
4054 void *para)
4055{
Tao Ma936b8832008-10-09 23:06:14 +08004056 int ret = 0, type;
Tao Ma0c044f02008-08-18 17:38:50 +08004057 struct ocfs2_xattr_tree_list *xl = (struct ocfs2_xattr_tree_list *)para;
Tao Ma0c044f02008-08-18 17:38:50 +08004058 int i, block_off, new_offset;
Andreas Gruenbacher1046cb12015-12-02 14:44:42 +01004059 const char *name;
Tao Ma0c044f02008-08-18 17:38:50 +08004060
Joel Becker3e632942008-10-24 17:04:49 -07004061 for (i = 0 ; i < le16_to_cpu(bucket_xh(bucket)->xh_count); i++) {
4062 struct ocfs2_xattr_entry *entry = &bucket_xh(bucket)->xh_entries[i];
Tao Ma936b8832008-10-09 23:06:14 +08004063 type = ocfs2_xattr_get_type(entry);
Tao Ma0c044f02008-08-18 17:38:50 +08004064
Andreas Gruenbacher1046cb12015-12-02 14:44:42 +01004065 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
4066 bucket_xh(bucket),
4067 i,
4068 &block_off,
4069 &new_offset);
4070 if (ret)
4071 break;
Tao Ma936b8832008-10-09 23:06:14 +08004072
Andreas Gruenbacher1046cb12015-12-02 14:44:42 +01004073 name = (const char *)bucket_block(bucket, block_off) +
4074 new_offset;
4075 ret = ocfs2_xattr_list_entry(inode->i_sb,
4076 xl->buffer,
4077 xl->buffer_size,
4078 &xl->result,
4079 type, name,
4080 entry->xe_name_len);
4081 if (ret)
4082 break;
Tao Ma0c044f02008-08-18 17:38:50 +08004083 }
4084
4085 return ret;
4086}
4087
Tao Ma47bca492009-08-18 11:43:42 +08004088static int ocfs2_iterate_xattr_index_block(struct inode *inode,
4089 struct buffer_head *blk_bh,
4090 xattr_tree_rec_func *rec_func,
4091 void *para)
Tao Ma0c044f02008-08-18 17:38:50 +08004092{
Tao Ma47bca492009-08-18 11:43:42 +08004093 struct ocfs2_xattr_block *xb =
4094 (struct ocfs2_xattr_block *)blk_bh->b_data;
4095 struct ocfs2_extent_list *el = &xb->xb_attrs.xb_root.xt_list;
Tao Ma0c044f02008-08-18 17:38:50 +08004096 int ret = 0;
4097 u32 name_hash = UINT_MAX, e_cpos = 0, num_clusters = 0;
4098 u64 p_blkno = 0;
Tao Ma0c044f02008-08-18 17:38:50 +08004099
Tao Ma47bca492009-08-18 11:43:42 +08004100 if (!el->l_next_free_rec || !rec_func)
Tao Ma0c044f02008-08-18 17:38:50 +08004101 return 0;
4102
4103 while (name_hash > 0) {
4104 ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno,
4105 &e_cpos, &num_clusters, el);
4106 if (ret) {
4107 mlog_errno(ret);
Tao Ma47bca492009-08-18 11:43:42 +08004108 break;
Tao Ma0c044f02008-08-18 17:38:50 +08004109 }
4110
Tao Ma47bca492009-08-18 11:43:42 +08004111 ret = rec_func(inode, blk_bh, p_blkno, e_cpos,
4112 num_clusters, para);
Tao Ma0c044f02008-08-18 17:38:50 +08004113 if (ret) {
Tao Maa46fa682009-05-04 05:18:09 +08004114 if (ret != -ERANGE)
4115 mlog_errno(ret);
Tao Ma47bca492009-08-18 11:43:42 +08004116 break;
Tao Ma0c044f02008-08-18 17:38:50 +08004117 }
4118
4119 if (e_cpos == 0)
4120 break;
4121
4122 name_hash = e_cpos - 1;
4123 }
4124
Tao Ma47bca492009-08-18 11:43:42 +08004125 return ret;
4126
4127}
4128
4129static int ocfs2_list_xattr_tree_rec(struct inode *inode,
4130 struct buffer_head *root_bh,
4131 u64 blkno, u32 cpos, u32 len, void *para)
4132{
4133 return ocfs2_iterate_xattr_buckets(inode, blkno, len,
4134 ocfs2_list_xattr_bucket, para);
4135}
4136
4137static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
4138 struct buffer_head *blk_bh,
4139 char *buffer,
4140 size_t buffer_size)
4141{
4142 int ret;
4143 struct ocfs2_xattr_tree_list xl = {
4144 .buffer = buffer,
4145 .buffer_size = buffer_size,
4146 .result = 0,
4147 };
4148
4149 ret = ocfs2_iterate_xattr_index_block(inode, blk_bh,
4150 ocfs2_list_xattr_tree_rec, &xl);
4151 if (ret) {
4152 mlog_errno(ret);
4153 goto out;
4154 }
4155
Tao Ma936b8832008-10-09 23:06:14 +08004156 ret = xl.result;
Tao Ma0c044f02008-08-18 17:38:50 +08004157out:
4158 return ret;
4159}
Tao Ma01225592008-08-18 17:38:53 +08004160
4161static int cmp_xe(const void *a, const void *b)
4162{
4163 const struct ocfs2_xattr_entry *l = a, *r = b;
4164 u32 l_hash = le32_to_cpu(l->xe_name_hash);
4165 u32 r_hash = le32_to_cpu(r->xe_name_hash);
4166
4167 if (l_hash > r_hash)
4168 return 1;
4169 if (l_hash < r_hash)
4170 return -1;
4171 return 0;
4172}
4173
4174static void swap_xe(void *a, void *b, int size)
4175{
4176 struct ocfs2_xattr_entry *l = a, *r = b, tmp;
4177
4178 tmp = *l;
4179 memcpy(l, r, sizeof(struct ocfs2_xattr_entry));
4180 memcpy(r, &tmp, sizeof(struct ocfs2_xattr_entry));
4181}
4182
4183/*
4184 * When the ocfs2_xattr_block is filled up, new bucket will be created
4185 * and all the xattr entries will be moved to the new bucket.
Joel Becker178eeac2008-10-27 15:18:29 -07004186 * The header goes at the start of the bucket, and the names+values are
4187 * filled from the end. This is why *target starts as the last buffer.
Tao Ma01225592008-08-18 17:38:53 +08004188 * Note: we need to sort the entries since they are not saved in order
4189 * in the ocfs2_xattr_block.
4190 */
4191static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode,
4192 struct buffer_head *xb_bh,
Joel Becker178eeac2008-10-27 15:18:29 -07004193 struct ocfs2_xattr_bucket *bucket)
Tao Ma01225592008-08-18 17:38:53 +08004194{
4195 int i, blocksize = inode->i_sb->s_blocksize;
Joel Becker178eeac2008-10-27 15:18:29 -07004196 int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Ma01225592008-08-18 17:38:53 +08004197 u16 offset, size, off_change;
4198 struct ocfs2_xattr_entry *xe;
4199 struct ocfs2_xattr_block *xb =
4200 (struct ocfs2_xattr_block *)xb_bh->b_data;
4201 struct ocfs2_xattr_header *xb_xh = &xb->xb_attrs.xb_header;
Joel Becker178eeac2008-10-27 15:18:29 -07004202 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
Tao Ma01225592008-08-18 17:38:53 +08004203 u16 count = le16_to_cpu(xb_xh->xh_count);
Joel Becker178eeac2008-10-27 15:18:29 -07004204 char *src = xb_bh->b_data;
4205 char *target = bucket_block(bucket, blks - 1);
Tao Ma01225592008-08-18 17:38:53 +08004206
Tao Ma402b4182011-02-23 22:01:17 +08004207 trace_ocfs2_cp_xattr_block_to_bucket_begin(
4208 (unsigned long long)xb_bh->b_blocknr,
4209 (unsigned long long)bucket_blkno(bucket));
Tao Ma01225592008-08-18 17:38:53 +08004210
Joel Becker178eeac2008-10-27 15:18:29 -07004211 for (i = 0; i < blks; i++)
4212 memset(bucket_block(bucket, i), 0, blocksize);
4213
Tao Ma01225592008-08-18 17:38:53 +08004214 /*
4215 * Since the xe_name_offset is based on ocfs2_xattr_header,
4216 * there is a offset change corresponding to the change of
4217 * ocfs2_xattr_header's position.
4218 */
4219 off_change = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
4220 xe = &xb_xh->xh_entries[count - 1];
4221 offset = le16_to_cpu(xe->xe_name_offset) + off_change;
4222 size = blocksize - offset;
4223
4224 /* copy all the names and values. */
Tao Ma01225592008-08-18 17:38:53 +08004225 memcpy(target + offset, src + offset, size);
4226
4227 /* Init new header now. */
4228 xh->xh_count = xb_xh->xh_count;
4229 xh->xh_num_buckets = cpu_to_le16(1);
4230 xh->xh_name_value_len = cpu_to_le16(size);
4231 xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE - size);
4232
4233 /* copy all the entries. */
Joel Becker178eeac2008-10-27 15:18:29 -07004234 target = bucket_block(bucket, 0);
Tao Ma01225592008-08-18 17:38:53 +08004235 offset = offsetof(struct ocfs2_xattr_header, xh_entries);
4236 size = count * sizeof(struct ocfs2_xattr_entry);
4237 memcpy(target + offset, (char *)xb_xh + offset, size);
4238
4239 /* Change the xe offset for all the xe because of the move. */
4240 off_change = OCFS2_XATTR_BUCKET_SIZE - blocksize +
4241 offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
4242 for (i = 0; i < count; i++)
4243 le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change);
4244
Tao Ma402b4182011-02-23 22:01:17 +08004245 trace_ocfs2_cp_xattr_block_to_bucket_end(offset, size, off_change);
Tao Ma01225592008-08-18 17:38:53 +08004246
4247 sort(target + offset, count, sizeof(struct ocfs2_xattr_entry),
4248 cmp_xe, swap_xe);
4249}
4250
4251/*
4252 * After we move xattr from block to index btree, we have to
4253 * update ocfs2_xattr_search to the new xe and base.
4254 *
4255 * When the entry is in xattr block, xattr_bh indicates the storage place.
4256 * While if the entry is in index b-tree, "bucket" indicates the
4257 * real place of the xattr.
4258 */
Joel Becker178eeac2008-10-27 15:18:29 -07004259static void ocfs2_xattr_update_xattr_search(struct inode *inode,
4260 struct ocfs2_xattr_search *xs,
4261 struct buffer_head *old_bh)
Tao Ma01225592008-08-18 17:38:53 +08004262{
Tao Ma01225592008-08-18 17:38:53 +08004263 char *buf = old_bh->b_data;
4264 struct ocfs2_xattr_block *old_xb = (struct ocfs2_xattr_block *)buf;
4265 struct ocfs2_xattr_header *old_xh = &old_xb->xb_attrs.xb_header;
Joel Becker178eeac2008-10-27 15:18:29 -07004266 int i;
Tao Ma01225592008-08-18 17:38:53 +08004267
Joel Beckerba937122008-10-24 19:13:20 -07004268 xs->header = bucket_xh(xs->bucket);
Joel Becker178eeac2008-10-27 15:18:29 -07004269 xs->base = bucket_block(xs->bucket, 0);
Tao Ma01225592008-08-18 17:38:53 +08004270 xs->end = xs->base + inode->i_sb->s_blocksize;
4271
Joel Becker178eeac2008-10-27 15:18:29 -07004272 if (xs->not_found)
4273 return;
Tao Ma01225592008-08-18 17:38:53 +08004274
Joel Becker178eeac2008-10-27 15:18:29 -07004275 i = xs->here - old_xh->xh_entries;
4276 xs->here = &xs->header->xh_entries[i];
Tao Ma01225592008-08-18 17:38:53 +08004277}
4278
4279static int ocfs2_xattr_create_index_block(struct inode *inode,
Tao Ma78f30c32008-11-12 08:27:00 +08004280 struct ocfs2_xattr_search *xs,
4281 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Ma01225592008-08-18 17:38:53 +08004282{
Tao Ma85db90e2008-11-12 08:27:01 +08004283 int ret;
Tao Ma01225592008-08-18 17:38:53 +08004284 u32 bit_off, len;
4285 u64 blkno;
Tao Ma85db90e2008-11-12 08:27:01 +08004286 handle_t *handle = ctxt->handle;
Tao Ma01225592008-08-18 17:38:53 +08004287 struct ocfs2_inode_info *oi = OCFS2_I(inode);
Tao Ma01225592008-08-18 17:38:53 +08004288 struct buffer_head *xb_bh = xs->xattr_bh;
4289 struct ocfs2_xattr_block *xb =
4290 (struct ocfs2_xattr_block *)xb_bh->b_data;
4291 struct ocfs2_xattr_tree_root *xr;
4292 u16 xb_flags = le16_to_cpu(xb->xb_flags);
Tao Ma01225592008-08-18 17:38:53 +08004293
Tao Ma402b4182011-02-23 22:01:17 +08004294 trace_ocfs2_xattr_create_index_block_begin(
4295 (unsigned long long)xb_bh->b_blocknr);
Tao Ma01225592008-08-18 17:38:53 +08004296
4297 BUG_ON(xb_flags & OCFS2_XATTR_INDEXED);
Joel Becker178eeac2008-10-27 15:18:29 -07004298 BUG_ON(!xs->bucket);
Tao Ma01225592008-08-18 17:38:53 +08004299
Tao Ma01225592008-08-18 17:38:53 +08004300 /*
4301 * XXX:
4302 * We can use this lock for now, and maybe move to a dedicated mutex
4303 * if performance becomes a problem later.
4304 */
4305 down_write(&oi->ip_alloc_sem);
4306
Joel Becker0cf2f762009-02-12 16:41:25 -08004307 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), xb_bh,
Joel Becker84008972008-12-09 16:11:49 -08004308 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08004309 if (ret) {
4310 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08004311 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004312 }
4313
Joel Becker1ed9b772010-05-06 13:59:06 +08004314 ret = __ocfs2_claim_clusters(handle, ctxt->data_ac,
Tao Ma78f30c32008-11-12 08:27:00 +08004315 1, 1, &bit_off, &len);
Tao Ma01225592008-08-18 17:38:53 +08004316 if (ret) {
4317 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08004318 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004319 }
4320
4321 /*
4322 * The bucket may spread in many blocks, and
4323 * we will only touch the 1st block and the last block
4324 * in the whole bucket(one for entry and one for data).
4325 */
4326 blkno = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
4327
Tao Ma402b4182011-02-23 22:01:17 +08004328 trace_ocfs2_xattr_create_index_block((unsigned long long)blkno);
Tao Ma01225592008-08-18 17:38:53 +08004329
Wengang Wang9c339252014-04-03 14:47:15 -07004330 ret = ocfs2_init_xattr_bucket(xs->bucket, blkno, 1);
Tao Ma01225592008-08-18 17:38:53 +08004331 if (ret) {
4332 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08004333 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004334 }
4335
Joel Becker178eeac2008-10-27 15:18:29 -07004336 ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
4337 OCFS2_JOURNAL_ACCESS_CREATE);
Joel Beckerbd60bd32008-10-20 18:25:56 -07004338 if (ret) {
4339 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08004340 goto out;
Joel Beckerbd60bd32008-10-20 18:25:56 -07004341 }
Tao Ma01225592008-08-18 17:38:53 +08004342
Joel Becker178eeac2008-10-27 15:18:29 -07004343 ocfs2_cp_xattr_block_to_bucket(inode, xb_bh, xs->bucket);
4344 ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
4345
4346 ocfs2_xattr_update_xattr_search(inode, xs, xb_bh);
4347
Tao Ma01225592008-08-18 17:38:53 +08004348 /* Change from ocfs2_xattr_header to ocfs2_xattr_tree_root */
4349 memset(&xb->xb_attrs, 0, inode->i_sb->s_blocksize -
4350 offsetof(struct ocfs2_xattr_block, xb_attrs));
4351
4352 xr = &xb->xb_attrs.xb_root;
4353 xr->xt_clusters = cpu_to_le32(1);
4354 xr->xt_last_eb_blk = 0;
4355 xr->xt_list.l_tree_depth = 0;
4356 xr->xt_list.l_count = cpu_to_le16(ocfs2_xattr_recs_per_xb(inode->i_sb));
4357 xr->xt_list.l_next_free_rec = cpu_to_le16(1);
4358
4359 xr->xt_list.l_recs[0].e_cpos = 0;
4360 xr->xt_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
4361 xr->xt_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
4362
4363 xb->xb_flags = cpu_to_le16(xb_flags | OCFS2_XATTR_INDEXED);
4364
Tao Ma85db90e2008-11-12 08:27:01 +08004365 ocfs2_journal_dirty(handle, xb_bh);
Tao Ma01225592008-08-18 17:38:53 +08004366
Tao Ma85db90e2008-11-12 08:27:01 +08004367out:
Tao Ma01225592008-08-18 17:38:53 +08004368 up_write(&oi->ip_alloc_sem);
4369
Tao Ma01225592008-08-18 17:38:53 +08004370 return ret;
4371}
4372
4373static int cmp_xe_offset(const void *a, const void *b)
4374{
4375 const struct ocfs2_xattr_entry *l = a, *r = b;
4376 u32 l_name_offset = le16_to_cpu(l->xe_name_offset);
4377 u32 r_name_offset = le16_to_cpu(r->xe_name_offset);
4378
4379 if (l_name_offset < r_name_offset)
4380 return 1;
4381 if (l_name_offset > r_name_offset)
4382 return -1;
4383 return 0;
4384}
4385
4386/*
4387 * defrag a xattr bucket if we find that the bucket has some
4388 * holes beteen name/value pairs.
4389 * We will move all the name/value pairs to the end of the bucket
4390 * so that we can spare some space for insertion.
4391 */
4392static int ocfs2_defrag_xattr_bucket(struct inode *inode,
Tao Ma85db90e2008-11-12 08:27:01 +08004393 handle_t *handle,
Tao Ma01225592008-08-18 17:38:53 +08004394 struct ocfs2_xattr_bucket *bucket)
4395{
4396 int ret, i;
Joel Becker199799a2009-08-14 19:04:15 -07004397 size_t end, offset, len;
Tao Ma01225592008-08-18 17:38:53 +08004398 struct ocfs2_xattr_header *xh;
4399 char *entries, *buf, *bucket_buf = NULL;
Joel Becker9c7759a2008-10-24 16:21:03 -07004400 u64 blkno = bucket_blkno(bucket);
Tao Ma01225592008-08-18 17:38:53 +08004401 u16 xh_free_start;
Tao Ma01225592008-08-18 17:38:53 +08004402 size_t blocksize = inode->i_sb->s_blocksize;
Tao Ma01225592008-08-18 17:38:53 +08004403 struct ocfs2_xattr_entry *xe;
Tao Ma01225592008-08-18 17:38:53 +08004404
4405 /*
4406 * In order to make the operation more efficient and generic,
4407 * we copy all the blocks into a contiguous memory and do the
4408 * defragment there, so if anything is error, we will not touch
4409 * the real block.
4410 */
4411 bucket_buf = kmalloc(OCFS2_XATTR_BUCKET_SIZE, GFP_NOFS);
4412 if (!bucket_buf) {
4413 ret = -EIO;
4414 goto out;
4415 }
4416
Joel Becker161d6f32008-10-27 15:25:18 -07004417 buf = bucket_buf;
Tao Ma1c32a2f2008-11-06 08:10:47 +08004418 for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
4419 memcpy(buf, bucket_block(bucket, i), blocksize);
Joel Becker161d6f32008-10-27 15:25:18 -07004420
Tao Ma1c32a2f2008-11-06 08:10:47 +08004421 ret = ocfs2_xattr_bucket_journal_access(handle, bucket,
Joel Becker161d6f32008-10-27 15:25:18 -07004422 OCFS2_JOURNAL_ACCESS_WRITE);
4423 if (ret < 0) {
4424 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08004425 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004426 }
4427
4428 xh = (struct ocfs2_xattr_header *)bucket_buf;
4429 entries = (char *)xh->xh_entries;
4430 xh_free_start = le16_to_cpu(xh->xh_free_start);
4431
Tao Ma402b4182011-02-23 22:01:17 +08004432 trace_ocfs2_defrag_xattr_bucket(
Mark Fashehde29c082008-10-29 14:45:30 -07004433 (unsigned long long)blkno, le16_to_cpu(xh->xh_count),
4434 xh_free_start, le16_to_cpu(xh->xh_name_value_len));
Tao Ma01225592008-08-18 17:38:53 +08004435
4436 /*
4437 * sort all the entries by their offset.
4438 * the largest will be the first, so that we can
4439 * move them to the end one by one.
4440 */
4441 sort(entries, le16_to_cpu(xh->xh_count),
4442 sizeof(struct ocfs2_xattr_entry),
4443 cmp_xe_offset, swap_xe);
4444
4445 /* Move all name/values to the end of the bucket. */
4446 xe = xh->xh_entries;
4447 end = OCFS2_XATTR_BUCKET_SIZE;
4448 for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) {
4449 offset = le16_to_cpu(xe->xe_name_offset);
Joel Becker199799a2009-08-14 19:04:15 -07004450 len = namevalue_size_xe(xe);
Tao Ma01225592008-08-18 17:38:53 +08004451
4452 /*
4453 * We must make sure that the name/value pair
4454 * exist in the same block. So adjust end to
4455 * the previous block end if needed.
4456 */
4457 if (((end - len) / blocksize !=
4458 (end - 1) / blocksize))
4459 end = end - end % blocksize;
4460
4461 if (end > offset + len) {
4462 memmove(bucket_buf + end - len,
4463 bucket_buf + offset, len);
4464 xe->xe_name_offset = cpu_to_le16(end - len);
4465 }
4466
4467 mlog_bug_on_msg(end < offset + len, "Defrag check failed for "
4468 "bucket %llu\n", (unsigned long long)blkno);
4469
4470 end -= len;
4471 }
4472
4473 mlog_bug_on_msg(xh_free_start > end, "Defrag check failed for "
4474 "bucket %llu\n", (unsigned long long)blkno);
4475
4476 if (xh_free_start == end)
Tao Ma85db90e2008-11-12 08:27:01 +08004477 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004478
4479 memset(bucket_buf + xh_free_start, 0, end - xh_free_start);
4480 xh->xh_free_start = cpu_to_le16(end);
4481
4482 /* sort the entries by their name_hash. */
4483 sort(entries, le16_to_cpu(xh->xh_count),
4484 sizeof(struct ocfs2_xattr_entry),
4485 cmp_xe, swap_xe);
4486
4487 buf = bucket_buf;
Tao Ma1c32a2f2008-11-06 08:10:47 +08004488 for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
4489 memcpy(bucket_block(bucket, i), buf, blocksize);
4490 ocfs2_xattr_bucket_journal_dirty(handle, bucket);
Tao Ma01225592008-08-18 17:38:53 +08004491
Tao Ma01225592008-08-18 17:38:53 +08004492out:
Tao Ma01225592008-08-18 17:38:53 +08004493 kfree(bucket_buf);
4494 return ret;
4495}
4496
4497/*
Joel Beckerb5c03e72008-11-25 19:58:16 -08004498 * prev_blkno points to the start of an existing extent. new_blkno
4499 * points to a newly allocated extent. Because we know each of our
4500 * clusters contains more than bucket, we can easily split one cluster
4501 * at a bucket boundary. So we take the last cluster of the existing
4502 * extent and split it down the middle. We move the last half of the
4503 * buckets in the last cluster of the existing extent over to the new
4504 * extent.
Tao Ma01225592008-08-18 17:38:53 +08004505 *
Joel Beckerb5c03e72008-11-25 19:58:16 -08004506 * first_bh is the buffer at prev_blkno so we can update the existing
4507 * extent's bucket count. header_bh is the bucket were we were hoping
4508 * to insert our xattr. If the bucket move places the target in the new
4509 * extent, we'll update first_bh and header_bh after modifying the old
4510 * extent.
4511 *
4512 * first_hash will be set as the 1st xe's name_hash in the new extent.
Tao Ma01225592008-08-18 17:38:53 +08004513 */
4514static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode,
4515 handle_t *handle,
Joel Becker41cb8142008-11-26 14:25:21 -08004516 struct ocfs2_xattr_bucket *first,
4517 struct ocfs2_xattr_bucket *target,
Tao Ma01225592008-08-18 17:38:53 +08004518 u64 new_blkno,
Tao Ma01225592008-08-18 17:38:53 +08004519 u32 num_clusters,
4520 u32 *first_hash)
4521{
Joel Beckerc58b6032008-11-26 13:36:24 -08004522 int ret;
Joel Becker41cb8142008-11-26 14:25:21 -08004523 struct super_block *sb = inode->i_sb;
4524 int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(sb);
4525 int num_buckets = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb));
Joel Beckerb5c03e72008-11-25 19:58:16 -08004526 int to_move = num_buckets / 2;
Joel Beckerc58b6032008-11-26 13:36:24 -08004527 u64 src_blkno;
Joel Becker41cb8142008-11-26 14:25:21 -08004528 u64 last_cluster_blkno = bucket_blkno(first) +
4529 ((num_clusters - 1) * ocfs2_clusters_to_blocks(sb, 1));
Tao Ma01225592008-08-18 17:38:53 +08004530
Joel Becker41cb8142008-11-26 14:25:21 -08004531 BUG_ON(le16_to_cpu(bucket_xh(first)->xh_num_buckets) < num_buckets);
4532 BUG_ON(OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(sb)->s_clustersize);
Tao Ma01225592008-08-18 17:38:53 +08004533
Tao Ma402b4182011-02-23 22:01:17 +08004534 trace_ocfs2_mv_xattr_bucket_cross_cluster(
4535 (unsigned long long)last_cluster_blkno,
4536 (unsigned long long)new_blkno);
Tao Ma01225592008-08-18 17:38:53 +08004537
Joel Becker41cb8142008-11-26 14:25:21 -08004538 ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first),
Joel Beckerc58b6032008-11-26 13:36:24 -08004539 last_cluster_blkno, new_blkno,
4540 to_move, first_hash);
Joel Beckerb5c03e72008-11-25 19:58:16 -08004541 if (ret) {
4542 mlog_errno(ret);
4543 goto out;
4544 }
4545
Joel Beckerc58b6032008-11-26 13:36:24 -08004546 /* This is the first bucket that got moved */
4547 src_blkno = last_cluster_blkno + (to_move * blks_per_bucket);
4548
Tao Ma01225592008-08-18 17:38:53 +08004549 /*
Joel Beckerc58b6032008-11-26 13:36:24 -08004550 * If the target bucket was part of the moved buckets, we need to
Joel Becker41cb8142008-11-26 14:25:21 -08004551 * update first and target.
Joel Beckerb5c03e72008-11-25 19:58:16 -08004552 */
Joel Becker41cb8142008-11-26 14:25:21 -08004553 if (bucket_blkno(target) >= src_blkno) {
Joel Beckerb5c03e72008-11-25 19:58:16 -08004554 /* Find the block for the new target bucket */
4555 src_blkno = new_blkno +
Joel Becker41cb8142008-11-26 14:25:21 -08004556 (bucket_blkno(target) - src_blkno);
4557
4558 ocfs2_xattr_bucket_relse(first);
4559 ocfs2_xattr_bucket_relse(target);
Joel Beckerb5c03e72008-11-25 19:58:16 -08004560
4561 /*
Joel Beckerc58b6032008-11-26 13:36:24 -08004562 * These shouldn't fail - the buffers are in the
Joel Beckerb5c03e72008-11-25 19:58:16 -08004563 * journal from ocfs2_cp_xattr_bucket().
4564 */
Joel Becker41cb8142008-11-26 14:25:21 -08004565 ret = ocfs2_read_xattr_bucket(first, new_blkno);
Joel Beckerc58b6032008-11-26 13:36:24 -08004566 if (ret) {
4567 mlog_errno(ret);
4568 goto out;
4569 }
Joel Becker41cb8142008-11-26 14:25:21 -08004570 ret = ocfs2_read_xattr_bucket(target, src_blkno);
4571 if (ret)
Joel Beckerb5c03e72008-11-25 19:58:16 -08004572 mlog_errno(ret);
Joel Beckerb5c03e72008-11-25 19:58:16 -08004573
Joel Beckerb5c03e72008-11-25 19:58:16 -08004574 }
4575
Tao Ma01225592008-08-18 17:38:53 +08004576out:
Tao Ma01225592008-08-18 17:38:53 +08004577 return ret;
4578}
4579
Tao Ma01225592008-08-18 17:38:53 +08004580/*
Tao Ma80bcaf32008-10-27 06:06:24 +08004581 * Find the suitable pos when we divide a bucket into 2.
4582 * We have to make sure the xattrs with the same hash value exist
4583 * in the same bucket.
4584 *
4585 * If this ocfs2_xattr_header covers more than one hash value, find a
4586 * place where the hash value changes. Try to find the most even split.
4587 * The most common case is that all entries have different hash values,
4588 * and the first check we make will find a place to split.
Tao Ma01225592008-08-18 17:38:53 +08004589 */
Tao Ma80bcaf32008-10-27 06:06:24 +08004590static int ocfs2_xattr_find_divide_pos(struct ocfs2_xattr_header *xh)
4591{
4592 struct ocfs2_xattr_entry *entries = xh->xh_entries;
4593 int count = le16_to_cpu(xh->xh_count);
4594 int delta, middle = count / 2;
4595
4596 /*
4597 * We start at the middle. Each step gets farther away in both
4598 * directions. We therefore hit the change in hash value
4599 * nearest to the middle. Note that this loop does not execute for
4600 * count < 2.
4601 */
4602 for (delta = 0; delta < middle; delta++) {
4603 /* Let's check delta earlier than middle */
4604 if (cmp_xe(&entries[middle - delta - 1],
4605 &entries[middle - delta]))
4606 return middle - delta;
4607
4608 /* For even counts, don't walk off the end */
4609 if ((middle + delta + 1) == count)
4610 continue;
4611
4612 /* Now try delta past middle */
4613 if (cmp_xe(&entries[middle + delta],
4614 &entries[middle + delta + 1]))
4615 return middle + delta + 1;
4616 }
4617
4618 /* Every entry had the same hash */
4619 return count;
4620}
4621
4622/*
4623 * Move some xattrs in old bucket(blk) to new bucket(new_blk).
4624 * first_hash will record the 1st hash of the new bucket.
4625 *
4626 * Normally half of the xattrs will be moved. But we have to make
4627 * sure that the xattrs with the same hash value are stored in the
4628 * same bucket. If all the xattrs in this bucket have the same hash
4629 * value, the new bucket will be initialized as an empty one and the
4630 * first_hash will be initialized as (hash_value+1).
4631 */
4632static int ocfs2_divide_xattr_bucket(struct inode *inode,
4633 handle_t *handle,
4634 u64 blk,
4635 u64 new_blk,
4636 u32 *first_hash,
4637 int new_bucket_head)
Tao Ma01225592008-08-18 17:38:53 +08004638{
4639 int ret, i;
Joel Becker199799a2009-08-14 19:04:15 -07004640 int count, start, len, name_value_len = 0, name_offset = 0;
Joel Beckerba937122008-10-24 19:13:20 -07004641 struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
Tao Ma01225592008-08-18 17:38:53 +08004642 struct ocfs2_xattr_header *xh;
4643 struct ocfs2_xattr_entry *xe;
4644 int blocksize = inode->i_sb->s_blocksize;
4645
Tao Ma402b4182011-02-23 22:01:17 +08004646 trace_ocfs2_divide_xattr_bucket_begin((unsigned long long)blk,
4647 (unsigned long long)new_blk);
Tao Ma01225592008-08-18 17:38:53 +08004648
Joel Beckerba937122008-10-24 19:13:20 -07004649 s_bucket = ocfs2_xattr_bucket_new(inode);
4650 t_bucket = ocfs2_xattr_bucket_new(inode);
4651 if (!s_bucket || !t_bucket) {
4652 ret = -ENOMEM;
4653 mlog_errno(ret);
4654 goto out;
4655 }
Tao Ma01225592008-08-18 17:38:53 +08004656
Joel Beckerba937122008-10-24 19:13:20 -07004657 ret = ocfs2_read_xattr_bucket(s_bucket, blk);
Tao Ma01225592008-08-18 17:38:53 +08004658 if (ret) {
4659 mlog_errno(ret);
4660 goto out;
4661 }
4662
Joel Beckerba937122008-10-24 19:13:20 -07004663 ret = ocfs2_xattr_bucket_journal_access(handle, s_bucket,
Joel Becker1224be02008-10-24 18:47:33 -07004664 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08004665 if (ret) {
4666 mlog_errno(ret);
4667 goto out;
4668 }
4669
Joel Becker784b8162008-10-24 17:33:40 -07004670 /*
4671 * Even if !new_bucket_head, we're overwriting t_bucket. Thus,
4672 * there's no need to read it.
4673 */
Wengang Wang9c339252014-04-03 14:47:15 -07004674 ret = ocfs2_init_xattr_bucket(t_bucket, new_blk, new_bucket_head);
Tao Ma01225592008-08-18 17:38:53 +08004675 if (ret) {
4676 mlog_errno(ret);
4677 goto out;
4678 }
4679
Joel Becker2b656c12008-11-25 19:00:15 -08004680 /*
4681 * Hey, if we're overwriting t_bucket, what difference does
4682 * ACCESS_CREATE vs ACCESS_WRITE make? See the comment in the
4683 * same part of ocfs2_cp_xattr_bucket().
4684 */
Joel Beckerba937122008-10-24 19:13:20 -07004685 ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket,
Joel Becker1224be02008-10-24 18:47:33 -07004686 new_bucket_head ?
4687 OCFS2_JOURNAL_ACCESS_CREATE :
4688 OCFS2_JOURNAL_ACCESS_WRITE);
4689 if (ret) {
4690 mlog_errno(ret);
4691 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004692 }
4693
Joel Beckerba937122008-10-24 19:13:20 -07004694 xh = bucket_xh(s_bucket);
Tao Ma80bcaf32008-10-27 06:06:24 +08004695 count = le16_to_cpu(xh->xh_count);
4696 start = ocfs2_xattr_find_divide_pos(xh);
4697
4698 if (start == count) {
4699 xe = &xh->xh_entries[start-1];
4700
4701 /*
4702 * initialized a new empty bucket here.
4703 * The hash value is set as one larger than
4704 * that of the last entry in the previous bucket.
4705 */
Joel Beckerba937122008-10-24 19:13:20 -07004706 for (i = 0; i < t_bucket->bu_blocks; i++)
4707 memset(bucket_block(t_bucket, i), 0, blocksize);
Tao Ma80bcaf32008-10-27 06:06:24 +08004708
Joel Beckerba937122008-10-24 19:13:20 -07004709 xh = bucket_xh(t_bucket);
Tao Ma80bcaf32008-10-27 06:06:24 +08004710 xh->xh_free_start = cpu_to_le16(blocksize);
4711 xh->xh_entries[0].xe_name_hash = xe->xe_name_hash;
4712 le32_add_cpu(&xh->xh_entries[0].xe_name_hash, 1);
4713
4714 goto set_num_buckets;
4715 }
4716
Tao Ma01225592008-08-18 17:38:53 +08004717 /* copy the whole bucket to the new first. */
Joel Beckerba937122008-10-24 19:13:20 -07004718 ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004719
4720 /* update the new bucket. */
Joel Beckerba937122008-10-24 19:13:20 -07004721 xh = bucket_xh(t_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004722
4723 /*
4724 * Calculate the total name/value len and xh_free_start for
4725 * the old bucket first.
4726 */
4727 name_offset = OCFS2_XATTR_BUCKET_SIZE;
4728 name_value_len = 0;
4729 for (i = 0; i < start; i++) {
4730 xe = &xh->xh_entries[i];
Joel Becker199799a2009-08-14 19:04:15 -07004731 name_value_len += namevalue_size_xe(xe);
Tao Ma01225592008-08-18 17:38:53 +08004732 if (le16_to_cpu(xe->xe_name_offset) < name_offset)
4733 name_offset = le16_to_cpu(xe->xe_name_offset);
4734 }
4735
4736 /*
4737 * Now begin the modification to the new bucket.
4738 *
4739 * In the new bucket, We just move the xattr entry to the beginning
4740 * and don't touch the name/value. So there will be some holes in the
4741 * bucket, and they will be removed when ocfs2_defrag_xattr_bucket is
4742 * called.
4743 */
4744 xe = &xh->xh_entries[start];
4745 len = sizeof(struct ocfs2_xattr_entry) * (count - start);
Tao Ma402b4182011-02-23 22:01:17 +08004746 trace_ocfs2_divide_xattr_bucket_move(len,
4747 (int)((char *)xe - (char *)xh),
4748 (int)((char *)xh->xh_entries - (char *)xh));
Tao Ma01225592008-08-18 17:38:53 +08004749 memmove((char *)xh->xh_entries, (char *)xe, len);
4750 xe = &xh->xh_entries[count - start];
4751 len = sizeof(struct ocfs2_xattr_entry) * start;
4752 memset((char *)xe, 0, len);
4753
4754 le16_add_cpu(&xh->xh_count, -start);
4755 le16_add_cpu(&xh->xh_name_value_len, -name_value_len);
4756
4757 /* Calculate xh_free_start for the new bucket. */
4758 xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
4759 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
4760 xe = &xh->xh_entries[i];
Tao Ma01225592008-08-18 17:38:53 +08004761 if (le16_to_cpu(xe->xe_name_offset) <
4762 le16_to_cpu(xh->xh_free_start))
4763 xh->xh_free_start = xe->xe_name_offset;
4764 }
4765
Tao Ma80bcaf32008-10-27 06:06:24 +08004766set_num_buckets:
Tao Ma01225592008-08-18 17:38:53 +08004767 /* set xh->xh_num_buckets for the new xh. */
4768 if (new_bucket_head)
4769 xh->xh_num_buckets = cpu_to_le16(1);
4770 else
4771 xh->xh_num_buckets = 0;
4772
Joel Beckerba937122008-10-24 19:13:20 -07004773 ocfs2_xattr_bucket_journal_dirty(handle, t_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004774
4775 /* store the first_hash of the new bucket. */
4776 if (first_hash)
4777 *first_hash = le32_to_cpu(xh->xh_entries[0].xe_name_hash);
4778
4779 /*
Tao Ma80bcaf32008-10-27 06:06:24 +08004780 * Now only update the 1st block of the old bucket. If we
4781 * just added a new empty bucket, there is no need to modify
4782 * it.
Tao Ma01225592008-08-18 17:38:53 +08004783 */
Tao Ma80bcaf32008-10-27 06:06:24 +08004784 if (start == count)
4785 goto out;
4786
Joel Beckerba937122008-10-24 19:13:20 -07004787 xh = bucket_xh(s_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004788 memset(&xh->xh_entries[start], 0,
4789 sizeof(struct ocfs2_xattr_entry) * (count - start));
4790 xh->xh_count = cpu_to_le16(start);
4791 xh->xh_free_start = cpu_to_le16(name_offset);
4792 xh->xh_name_value_len = cpu_to_le16(name_value_len);
4793
Joel Beckerba937122008-10-24 19:13:20 -07004794 ocfs2_xattr_bucket_journal_dirty(handle, s_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004795
4796out:
Joel Beckerba937122008-10-24 19:13:20 -07004797 ocfs2_xattr_bucket_free(s_bucket);
4798 ocfs2_xattr_bucket_free(t_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004799
4800 return ret;
4801}
4802
4803/*
4804 * Copy xattr from one bucket to another bucket.
4805 *
4806 * The caller must make sure that the journal transaction
4807 * has enough space for journaling.
4808 */
4809static int ocfs2_cp_xattr_bucket(struct inode *inode,
4810 handle_t *handle,
4811 u64 s_blkno,
4812 u64 t_blkno,
4813 int t_is_new)
4814{
Joel Becker4980c6d2008-10-24 18:54:43 -07004815 int ret;
Joel Beckerba937122008-10-24 19:13:20 -07004816 struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
Tao Ma01225592008-08-18 17:38:53 +08004817
4818 BUG_ON(s_blkno == t_blkno);
4819
Tao Ma402b4182011-02-23 22:01:17 +08004820 trace_ocfs2_cp_xattr_bucket((unsigned long long)s_blkno,
4821 (unsigned long long)t_blkno,
4822 t_is_new);
Tao Ma01225592008-08-18 17:38:53 +08004823
Joel Beckerba937122008-10-24 19:13:20 -07004824 s_bucket = ocfs2_xattr_bucket_new(inode);
4825 t_bucket = ocfs2_xattr_bucket_new(inode);
4826 if (!s_bucket || !t_bucket) {
4827 ret = -ENOMEM;
4828 mlog_errno(ret);
4829 goto out;
4830 }
Joel Becker92de1092008-11-25 17:06:40 -08004831
Joel Beckerba937122008-10-24 19:13:20 -07004832 ret = ocfs2_read_xattr_bucket(s_bucket, s_blkno);
Tao Ma01225592008-08-18 17:38:53 +08004833 if (ret)
4834 goto out;
4835
Joel Becker784b8162008-10-24 17:33:40 -07004836 /*
4837 * Even if !t_is_new, we're overwriting t_bucket. Thus,
4838 * there's no need to read it.
4839 */
Wengang Wang9c339252014-04-03 14:47:15 -07004840 ret = ocfs2_init_xattr_bucket(t_bucket, t_blkno, t_is_new);
Tao Ma01225592008-08-18 17:38:53 +08004841 if (ret)
4842 goto out;
4843
Joel Becker2b656c12008-11-25 19:00:15 -08004844 /*
4845 * Hey, if we're overwriting t_bucket, what difference does
4846 * ACCESS_CREATE vs ACCESS_WRITE make? Well, if we allocated a new
Joel Becker874d65a2008-11-26 13:02:18 -08004847 * cluster to fill, we came here from
4848 * ocfs2_mv_xattr_buckets(), and it is really new -
4849 * ACCESS_CREATE is required. But we also might have moved data
4850 * out of t_bucket before extending back into it.
4851 * ocfs2_add_new_xattr_bucket() can do this - its call to
4852 * ocfs2_add_new_xattr_cluster() may have created a new extent
Joel Becker2b656c12008-11-25 19:00:15 -08004853 * and copied out the end of the old extent. Then it re-extends
4854 * the old extent back to create space for new xattrs. That's
4855 * how we get here, and the bucket isn't really new.
4856 */
Joel Beckerba937122008-10-24 19:13:20 -07004857 ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket,
Joel Becker1224be02008-10-24 18:47:33 -07004858 t_is_new ?
4859 OCFS2_JOURNAL_ACCESS_CREATE :
4860 OCFS2_JOURNAL_ACCESS_WRITE);
4861 if (ret)
4862 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004863
Joel Beckerba937122008-10-24 19:13:20 -07004864 ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket);
4865 ocfs2_xattr_bucket_journal_dirty(handle, t_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004866
4867out:
Joel Beckerba937122008-10-24 19:13:20 -07004868 ocfs2_xattr_bucket_free(t_bucket);
4869 ocfs2_xattr_bucket_free(s_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004870
4871 return ret;
4872}
4873
4874/*
Joel Becker874d65a2008-11-26 13:02:18 -08004875 * src_blk points to the start of an existing extent. last_blk points to
4876 * last cluster in that extent. to_blk points to a newly allocated
Joel Becker54ecb6b2008-11-26 13:18:31 -08004877 * extent. We copy the buckets from the cluster at last_blk to the new
4878 * extent. If start_bucket is non-zero, we skip that many buckets before
4879 * we start copying. The new extent's xh_num_buckets gets set to the
4880 * number of buckets we copied. The old extent's xh_num_buckets shrinks
4881 * by the same amount.
Tao Ma01225592008-08-18 17:38:53 +08004882 */
Joel Becker54ecb6b2008-11-26 13:18:31 -08004883static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
4884 u64 src_blk, u64 last_blk, u64 to_blk,
4885 unsigned int start_bucket,
4886 u32 *first_hash)
Tao Ma01225592008-08-18 17:38:53 +08004887{
4888 int i, ret, credits;
4889 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Joel Becker15d60922008-11-25 18:36:42 -08004890 int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Ma01225592008-08-18 17:38:53 +08004891 int num_buckets = ocfs2_xattr_buckets_per_cluster(osb);
Joel Becker15d60922008-11-25 18:36:42 -08004892 struct ocfs2_xattr_bucket *old_first, *new_first;
Tao Ma01225592008-08-18 17:38:53 +08004893
Tao Ma402b4182011-02-23 22:01:17 +08004894 trace_ocfs2_mv_xattr_buckets((unsigned long long)last_blk,
4895 (unsigned long long)to_blk);
Tao Ma01225592008-08-18 17:38:53 +08004896
Joel Becker54ecb6b2008-11-26 13:18:31 -08004897 BUG_ON(start_bucket >= num_buckets);
4898 if (start_bucket) {
4899 num_buckets -= start_bucket;
4900 last_blk += (start_bucket * blks_per_bucket);
4901 }
4902
Joel Becker15d60922008-11-25 18:36:42 -08004903 /* The first bucket of the original extent */
4904 old_first = ocfs2_xattr_bucket_new(inode);
4905 /* The first bucket of the new extent */
4906 new_first = ocfs2_xattr_bucket_new(inode);
4907 if (!old_first || !new_first) {
4908 ret = -ENOMEM;
4909 mlog_errno(ret);
4910 goto out;
4911 }
4912
Joel Becker874d65a2008-11-26 13:02:18 -08004913 ret = ocfs2_read_xattr_bucket(old_first, src_blk);
Joel Becker15d60922008-11-25 18:36:42 -08004914 if (ret) {
4915 mlog_errno(ret);
4916 goto out;
4917 }
4918
Tao Ma01225592008-08-18 17:38:53 +08004919 /*
Joel Becker54ecb6b2008-11-26 13:18:31 -08004920 * We need to update the first bucket of the old extent and all
4921 * the buckets going to the new extent.
Tao Ma01225592008-08-18 17:38:53 +08004922 */
Tao Mac901fb02010-04-26 14:34:57 +08004923 credits = ((num_buckets + 1) * blks_per_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004924 ret = ocfs2_extend_trans(handle, credits);
4925 if (ret) {
4926 mlog_errno(ret);
4927 goto out;
4928 }
4929
Joel Becker15d60922008-11-25 18:36:42 -08004930 ret = ocfs2_xattr_bucket_journal_access(handle, old_first,
4931 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08004932 if (ret) {
4933 mlog_errno(ret);
4934 goto out;
4935 }
4936
4937 for (i = 0; i < num_buckets; i++) {
4938 ret = ocfs2_cp_xattr_bucket(inode, handle,
Joel Becker874d65a2008-11-26 13:02:18 -08004939 last_blk + (i * blks_per_bucket),
Joel Becker15d60922008-11-25 18:36:42 -08004940 to_blk + (i * blks_per_bucket),
4941 1);
Tao Ma01225592008-08-18 17:38:53 +08004942 if (ret) {
4943 mlog_errno(ret);
4944 goto out;
4945 }
Tao Ma01225592008-08-18 17:38:53 +08004946 }
4947
Joel Becker15d60922008-11-25 18:36:42 -08004948 /*
4949 * Get the new bucket ready before we dirty anything
4950 * (This actually shouldn't fail, because we already dirtied
4951 * it once in ocfs2_cp_xattr_bucket()).
4952 */
4953 ret = ocfs2_read_xattr_bucket(new_first, to_blk);
4954 if (ret) {
Tao Ma01225592008-08-18 17:38:53 +08004955 mlog_errno(ret);
4956 goto out;
4957 }
Joel Becker15d60922008-11-25 18:36:42 -08004958 ret = ocfs2_xattr_bucket_journal_access(handle, new_first,
4959 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08004960 if (ret) {
4961 mlog_errno(ret);
4962 goto out;
4963 }
4964
Joel Becker15d60922008-11-25 18:36:42 -08004965 /* Now update the headers */
4966 le16_add_cpu(&bucket_xh(old_first)->xh_num_buckets, -num_buckets);
4967 ocfs2_xattr_bucket_journal_dirty(handle, old_first);
Tao Ma01225592008-08-18 17:38:53 +08004968
Joel Becker15d60922008-11-25 18:36:42 -08004969 bucket_xh(new_first)->xh_num_buckets = cpu_to_le16(num_buckets);
4970 ocfs2_xattr_bucket_journal_dirty(handle, new_first);
Tao Ma01225592008-08-18 17:38:53 +08004971
4972 if (first_hash)
Joel Becker15d60922008-11-25 18:36:42 -08004973 *first_hash = le32_to_cpu(bucket_xh(new_first)->xh_entries[0].xe_name_hash);
4974
Tao Ma01225592008-08-18 17:38:53 +08004975out:
Joel Becker15d60922008-11-25 18:36:42 -08004976 ocfs2_xattr_bucket_free(new_first);
4977 ocfs2_xattr_bucket_free(old_first);
Tao Ma01225592008-08-18 17:38:53 +08004978 return ret;
4979}
4980
4981/*
Tao Ma80bcaf32008-10-27 06:06:24 +08004982 * Move some xattrs in this cluster to the new cluster.
Tao Ma01225592008-08-18 17:38:53 +08004983 * This function should only be called when bucket size == cluster size.
4984 * Otherwise ocfs2_mv_xattr_bucket_cross_cluster should be used instead.
4985 */
Tao Ma80bcaf32008-10-27 06:06:24 +08004986static int ocfs2_divide_xattr_cluster(struct inode *inode,
4987 handle_t *handle,
4988 u64 prev_blk,
4989 u64 new_blk,
4990 u32 *first_hash)
Tao Ma01225592008-08-18 17:38:53 +08004991{
4992 u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Mac901fb02010-04-26 14:34:57 +08004993 int ret, credits = 2 * blk_per_bucket;
Tao Ma01225592008-08-18 17:38:53 +08004994
4995 BUG_ON(OCFS2_XATTR_BUCKET_SIZE < OCFS2_SB(inode->i_sb)->s_clustersize);
4996
4997 ret = ocfs2_extend_trans(handle, credits);
4998 if (ret) {
4999 mlog_errno(ret);
5000 return ret;
5001 }
5002
5003 /* Move half of the xattr in start_blk to the next bucket. */
Tao Ma80bcaf32008-10-27 06:06:24 +08005004 return ocfs2_divide_xattr_bucket(inode, handle, prev_blk,
5005 new_blk, first_hash, 1);
Tao Ma01225592008-08-18 17:38:53 +08005006}
5007
5008/*
5009 * Move some xattrs from the old cluster to the new one since they are not
5010 * contiguous in ocfs2 xattr tree.
5011 *
5012 * new_blk starts a new separate cluster, and we will move some xattrs from
5013 * prev_blk to it. v_start will be set as the first name hash value in this
5014 * new cluster so that it can be used as e_cpos during tree insertion and
5015 * don't collide with our original b-tree operations. first_bh and header_bh
5016 * will also be updated since they will be used in ocfs2_extend_xattr_bucket
5017 * to extend the insert bucket.
5018 *
5019 * The problem is how much xattr should we move to the new one and when should
5020 * we update first_bh and header_bh?
5021 * 1. If cluster size > bucket size, that means the previous cluster has more
5022 * than 1 bucket, so just move half nums of bucket into the new cluster and
5023 * update the first_bh and header_bh if the insert bucket has been moved
5024 * to the new cluster.
5025 * 2. If cluster_size == bucket_size:
5026 * a) If the previous extent rec has more than one cluster and the insert
5027 * place isn't in the last cluster, copy the entire last cluster to the
5028 * new one. This time, we don't need to upate the first_bh and header_bh
5029 * since they will not be moved into the new cluster.
5030 * b) Otherwise, move the bottom half of the xattrs in the last cluster into
5031 * the new one. And we set the extend flag to zero if the insert place is
5032 * moved into the new allocated cluster since no extend is needed.
5033 */
5034static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode,
5035 handle_t *handle,
Joel Becker012ee912008-11-26 14:43:31 -08005036 struct ocfs2_xattr_bucket *first,
5037 struct ocfs2_xattr_bucket *target,
Tao Ma01225592008-08-18 17:38:53 +08005038 u64 new_blk,
Tao Ma01225592008-08-18 17:38:53 +08005039 u32 prev_clusters,
5040 u32 *v_start,
5041 int *extend)
5042{
Joel Becker92cf3ad2008-11-26 14:12:09 -08005043 int ret;
Tao Ma01225592008-08-18 17:38:53 +08005044
Tao Ma402b4182011-02-23 22:01:17 +08005045 trace_ocfs2_adjust_xattr_cross_cluster(
5046 (unsigned long long)bucket_blkno(first),
5047 (unsigned long long)new_blk, prev_clusters);
Tao Ma01225592008-08-18 17:38:53 +08005048
Joel Becker41cb8142008-11-26 14:25:21 -08005049 if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) {
Tao Ma01225592008-08-18 17:38:53 +08005050 ret = ocfs2_mv_xattr_bucket_cross_cluster(inode,
5051 handle,
Joel Becker41cb8142008-11-26 14:25:21 -08005052 first, target,
Tao Ma01225592008-08-18 17:38:53 +08005053 new_blk,
Tao Ma01225592008-08-18 17:38:53 +08005054 prev_clusters,
5055 v_start);
Joel Becker012ee912008-11-26 14:43:31 -08005056 if (ret)
Joel Becker41cb8142008-11-26 14:25:21 -08005057 mlog_errno(ret);
Joel Becker41cb8142008-11-26 14:25:21 -08005058 } else {
Joel Becker92cf3ad2008-11-26 14:12:09 -08005059 /* The start of the last cluster in the first extent */
5060 u64 last_blk = bucket_blkno(first) +
5061 ((prev_clusters - 1) *
5062 ocfs2_clusters_to_blocks(inode->i_sb, 1));
Tao Ma01225592008-08-18 17:38:53 +08005063
Joel Becker012ee912008-11-26 14:43:31 -08005064 if (prev_clusters > 1 && bucket_blkno(target) != last_blk) {
Joel Becker874d65a2008-11-26 13:02:18 -08005065 ret = ocfs2_mv_xattr_buckets(inode, handle,
Joel Becker92cf3ad2008-11-26 14:12:09 -08005066 bucket_blkno(first),
Joel Becker54ecb6b2008-11-26 13:18:31 -08005067 last_blk, new_blk, 0,
Tao Ma01225592008-08-18 17:38:53 +08005068 v_start);
Joel Becker012ee912008-11-26 14:43:31 -08005069 if (ret)
5070 mlog_errno(ret);
5071 } else {
Tao Ma80bcaf32008-10-27 06:06:24 +08005072 ret = ocfs2_divide_xattr_cluster(inode, handle,
5073 last_blk, new_blk,
5074 v_start);
Joel Becker012ee912008-11-26 14:43:31 -08005075 if (ret)
5076 mlog_errno(ret);
Tao Ma01225592008-08-18 17:38:53 +08005077
Joel Becker92cf3ad2008-11-26 14:12:09 -08005078 if ((bucket_blkno(target) == last_blk) && extend)
Tao Ma01225592008-08-18 17:38:53 +08005079 *extend = 0;
5080 }
5081 }
5082
5083 return ret;
5084}
5085
5086/*
5087 * Add a new cluster for xattr storage.
5088 *
5089 * If the new cluster is contiguous with the previous one, it will be
5090 * appended to the same extent record, and num_clusters will be updated.
5091 * If not, we will insert a new extent for it and move some xattrs in
5092 * the last cluster into the new allocated one.
5093 * We also need to limit the maximum size of a btree leaf, otherwise we'll
5094 * lose the benefits of hashing because we'll have to search large leaves.
5095 * So now the maximum size is OCFS2_MAX_XATTR_TREE_LEAF_SIZE(or clustersize,
5096 * if it's bigger).
5097 *
5098 * first_bh is the first block of the previous extent rec and header_bh
5099 * indicates the bucket we will insert the new xattrs. They will be updated
5100 * when the header_bh is moved into the new cluster.
5101 */
5102static int ocfs2_add_new_xattr_cluster(struct inode *inode,
5103 struct buffer_head *root_bh,
Joel Beckered29c0c2008-11-26 15:08:44 -08005104 struct ocfs2_xattr_bucket *first,
5105 struct ocfs2_xattr_bucket *target,
Tao Ma01225592008-08-18 17:38:53 +08005106 u32 *num_clusters,
5107 u32 prev_cpos,
Tao Ma78f30c32008-11-12 08:27:00 +08005108 int *extend,
5109 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Ma01225592008-08-18 17:38:53 +08005110{
Tao Ma85db90e2008-11-12 08:27:01 +08005111 int ret;
Tao Ma01225592008-08-18 17:38:53 +08005112 u16 bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
5113 u32 prev_clusters = *num_clusters;
5114 u32 clusters_to_add = 1, bit_off, num_bits, v_start = 0;
5115 u64 block;
Tao Ma85db90e2008-11-12 08:27:01 +08005116 handle_t *handle = ctxt->handle;
Tao Ma01225592008-08-18 17:38:53 +08005117 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Joel Beckerf99b9b72008-08-20 19:36:33 -07005118 struct ocfs2_extent_tree et;
Tao Ma01225592008-08-18 17:38:53 +08005119
Tao Ma402b4182011-02-23 22:01:17 +08005120 trace_ocfs2_add_new_xattr_cluster_begin(
5121 (unsigned long long)OCFS2_I(inode)->ip_blkno,
5122 (unsigned long long)bucket_blkno(first),
5123 prev_cpos, prev_clusters);
Tao Ma01225592008-08-18 17:38:53 +08005124
Joel Becker5e404e92009-02-13 03:54:22 -08005125 ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
Joel Beckerf99b9b72008-08-20 19:36:33 -07005126
Joel Becker0cf2f762009-02-12 16:41:25 -08005127 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh,
Joel Becker84008972008-12-09 16:11:49 -08005128 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08005129 if (ret < 0) {
5130 mlog_errno(ret);
5131 goto leave;
5132 }
5133
Joel Becker1ed9b772010-05-06 13:59:06 +08005134 ret = __ocfs2_claim_clusters(handle, ctxt->data_ac, 1,
Tao Ma01225592008-08-18 17:38:53 +08005135 clusters_to_add, &bit_off, &num_bits);
5136 if (ret < 0) {
5137 if (ret != -ENOSPC)
5138 mlog_errno(ret);
5139 goto leave;
5140 }
5141
5142 BUG_ON(num_bits > clusters_to_add);
5143
5144 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
Tao Ma402b4182011-02-23 22:01:17 +08005145 trace_ocfs2_add_new_xattr_cluster((unsigned long long)block, num_bits);
Tao Ma01225592008-08-18 17:38:53 +08005146
Joel Beckered29c0c2008-11-26 15:08:44 -08005147 if (bucket_blkno(first) + (prev_clusters * bpc) == block &&
Tao Ma01225592008-08-18 17:38:53 +08005148 (prev_clusters + num_bits) << osb->s_clustersize_bits <=
5149 OCFS2_MAX_XATTR_TREE_LEAF_SIZE) {
5150 /*
5151 * If this cluster is contiguous with the old one and
5152 * adding this new cluster, we don't surpass the limit of
5153 * OCFS2_MAX_XATTR_TREE_LEAF_SIZE, cool. We will let it be
5154 * initialized and used like other buckets in the previous
5155 * cluster.
5156 * So add it as a contiguous one. The caller will handle
5157 * its init process.
5158 */
5159 v_start = prev_cpos + prev_clusters;
5160 *num_clusters = prev_clusters + num_bits;
Tao Ma01225592008-08-18 17:38:53 +08005161 } else {
5162 ret = ocfs2_adjust_xattr_cross_cluster(inode,
5163 handle,
Joel Becker012ee912008-11-26 14:43:31 -08005164 first,
5165 target,
Tao Ma01225592008-08-18 17:38:53 +08005166 block,
Tao Ma01225592008-08-18 17:38:53 +08005167 prev_clusters,
5168 &v_start,
5169 extend);
5170 if (ret) {
5171 mlog_errno(ret);
5172 goto leave;
5173 }
5174 }
5175
Tao Ma402b4182011-02-23 22:01:17 +08005176 trace_ocfs2_add_new_xattr_cluster_insert((unsigned long long)block,
5177 v_start, num_bits);
Joel Beckercc79d8c2009-02-13 03:24:43 -08005178 ret = ocfs2_insert_extent(handle, &et, v_start, block,
Tao Ma78f30c32008-11-12 08:27:00 +08005179 num_bits, 0, ctxt->meta_ac);
Tao Ma01225592008-08-18 17:38:53 +08005180 if (ret < 0) {
5181 mlog_errno(ret);
5182 goto leave;
5183 }
5184
Joel Beckerec20cec2010-03-19 14:13:52 -07005185 ocfs2_journal_dirty(handle, root_bh);
Tao Ma01225592008-08-18 17:38:53 +08005186
5187leave:
Tao Ma01225592008-08-18 17:38:53 +08005188 return ret;
5189}
5190
5191/*
Joel Becker92de1092008-11-25 17:06:40 -08005192 * We are given an extent. 'first' is the bucket at the very front of
5193 * the extent. The extent has space for an additional bucket past
5194 * bucket_xh(first)->xh_num_buckets. 'target_blkno' is the block number
5195 * of the target bucket. We wish to shift every bucket past the target
5196 * down one, filling in that additional space. When we get back to the
5197 * target, we split the target between itself and the now-empty bucket
5198 * at target+1 (aka, target_blkno + blks_per_bucket).
Tao Ma01225592008-08-18 17:38:53 +08005199 */
5200static int ocfs2_extend_xattr_bucket(struct inode *inode,
Tao Ma85db90e2008-11-12 08:27:01 +08005201 handle_t *handle,
Joel Becker92de1092008-11-25 17:06:40 -08005202 struct ocfs2_xattr_bucket *first,
5203 u64 target_blk,
Tao Ma01225592008-08-18 17:38:53 +08005204 u32 num_clusters)
5205{
5206 int ret, credits;
5207 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5208 u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Joel Becker92de1092008-11-25 17:06:40 -08005209 u64 end_blk;
5210 u16 new_bucket = le16_to_cpu(bucket_xh(first)->xh_num_buckets);
Tao Ma01225592008-08-18 17:38:53 +08005211
Tao Ma402b4182011-02-23 22:01:17 +08005212 trace_ocfs2_extend_xattr_bucket((unsigned long long)target_blk,
5213 (unsigned long long)bucket_blkno(first),
5214 num_clusters, new_bucket);
Tao Ma01225592008-08-18 17:38:53 +08005215
Joel Becker92de1092008-11-25 17:06:40 -08005216 /* The extent must have room for an additional bucket */
5217 BUG_ON(new_bucket >=
5218 (num_clusters * ocfs2_xattr_buckets_per_cluster(osb)));
Tao Ma01225592008-08-18 17:38:53 +08005219
Joel Becker92de1092008-11-25 17:06:40 -08005220 /* end_blk points to the last existing bucket */
5221 end_blk = bucket_blkno(first) + ((new_bucket - 1) * blk_per_bucket);
Tao Ma01225592008-08-18 17:38:53 +08005222
5223 /*
Joel Becker92de1092008-11-25 17:06:40 -08005224 * end_blk is the start of the last existing bucket.
5225 * Thus, (end_blk - target_blk) covers the target bucket and
5226 * every bucket after it up to, but not including, the last
5227 * existing bucket. Then we add the last existing bucket, the
5228 * new bucket, and the first bucket (3 * blk_per_bucket).
Tao Ma01225592008-08-18 17:38:53 +08005229 */
Tao Mac901fb02010-04-26 14:34:57 +08005230 credits = (end_blk - target_blk) + (3 * blk_per_bucket);
Tao Ma85db90e2008-11-12 08:27:01 +08005231 ret = ocfs2_extend_trans(handle, credits);
5232 if (ret) {
Tao Ma01225592008-08-18 17:38:53 +08005233 mlog_errno(ret);
5234 goto out;
5235 }
5236
Joel Becker92de1092008-11-25 17:06:40 -08005237 ret = ocfs2_xattr_bucket_journal_access(handle, first,
5238 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08005239 if (ret) {
5240 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08005241 goto out;
Tao Ma01225592008-08-18 17:38:53 +08005242 }
5243
Joel Becker92de1092008-11-25 17:06:40 -08005244 while (end_blk != target_blk) {
Tao Ma01225592008-08-18 17:38:53 +08005245 ret = ocfs2_cp_xattr_bucket(inode, handle, end_blk,
5246 end_blk + blk_per_bucket, 0);
5247 if (ret)
Tao Ma85db90e2008-11-12 08:27:01 +08005248 goto out;
Tao Ma01225592008-08-18 17:38:53 +08005249 end_blk -= blk_per_bucket;
5250 }
5251
Joel Becker92de1092008-11-25 17:06:40 -08005252 /* Move half of the xattr in target_blkno to the next bucket. */
5253 ret = ocfs2_divide_xattr_bucket(inode, handle, target_blk,
5254 target_blk + blk_per_bucket, NULL, 0);
Tao Ma01225592008-08-18 17:38:53 +08005255
Joel Becker92de1092008-11-25 17:06:40 -08005256 le16_add_cpu(&bucket_xh(first)->xh_num_buckets, 1);
5257 ocfs2_xattr_bucket_journal_dirty(handle, first);
Tao Ma01225592008-08-18 17:38:53 +08005258
Tao Ma01225592008-08-18 17:38:53 +08005259out:
5260 return ret;
5261}
5262
5263/*
Joel Becker91f20332008-11-26 15:25:41 -08005264 * Add new xattr bucket in an extent record and adjust the buckets
5265 * accordingly. xb_bh is the ocfs2_xattr_block, and target is the
5266 * bucket we want to insert into.
Tao Ma01225592008-08-18 17:38:53 +08005267 *
Joel Becker91f20332008-11-26 15:25:41 -08005268 * In the easy case, we will move all the buckets after target down by
5269 * one. Half of target's xattrs will be moved to the next bucket.
5270 *
5271 * If current cluster is full, we'll allocate a new one. This may not
5272 * be contiguous. The underlying calls will make sure that there is
5273 * space for the insert, shifting buckets around if necessary.
5274 * 'target' may be moved by those calls.
Tao Ma01225592008-08-18 17:38:53 +08005275 */
5276static int ocfs2_add_new_xattr_bucket(struct inode *inode,
5277 struct buffer_head *xb_bh,
Joel Becker91f20332008-11-26 15:25:41 -08005278 struct ocfs2_xattr_bucket *target,
Tao Ma78f30c32008-11-12 08:27:00 +08005279 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Ma01225592008-08-18 17:38:53 +08005280{
Tao Ma01225592008-08-18 17:38:53 +08005281 struct ocfs2_xattr_block *xb =
5282 (struct ocfs2_xattr_block *)xb_bh->b_data;
5283 struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root;
5284 struct ocfs2_extent_list *el = &xb_root->xt_list;
Joel Becker91f20332008-11-26 15:25:41 -08005285 u32 name_hash =
5286 le32_to_cpu(bucket_xh(target)->xh_entries[0].xe_name_hash);
Joel Beckered29c0c2008-11-26 15:08:44 -08005287 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Tao Ma01225592008-08-18 17:38:53 +08005288 int ret, num_buckets, extend = 1;
5289 u64 p_blkno;
5290 u32 e_cpos, num_clusters;
Joel Becker92de1092008-11-25 17:06:40 -08005291 /* The bucket at the front of the extent */
Joel Becker91f20332008-11-26 15:25:41 -08005292 struct ocfs2_xattr_bucket *first;
Tao Ma01225592008-08-18 17:38:53 +08005293
Tao Ma402b4182011-02-23 22:01:17 +08005294 trace_ocfs2_add_new_xattr_bucket(
5295 (unsigned long long)bucket_blkno(target));
Tao Ma01225592008-08-18 17:38:53 +08005296
Joel Beckered29c0c2008-11-26 15:08:44 -08005297 /* The first bucket of the original extent */
Joel Becker92de1092008-11-25 17:06:40 -08005298 first = ocfs2_xattr_bucket_new(inode);
Joel Becker91f20332008-11-26 15:25:41 -08005299 if (!first) {
Joel Becker92de1092008-11-25 17:06:40 -08005300 ret = -ENOMEM;
5301 mlog_errno(ret);
5302 goto out;
5303 }
5304
Tao Ma01225592008-08-18 17:38:53 +08005305 ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &e_cpos,
5306 &num_clusters, el);
5307 if (ret) {
5308 mlog_errno(ret);
5309 goto out;
5310 }
5311
Joel Beckered29c0c2008-11-26 15:08:44 -08005312 ret = ocfs2_read_xattr_bucket(first, p_blkno);
5313 if (ret) {
5314 mlog_errno(ret);
5315 goto out;
5316 }
5317
Tao Ma01225592008-08-18 17:38:53 +08005318 num_buckets = ocfs2_xattr_buckets_per_cluster(osb) * num_clusters;
Joel Beckered29c0c2008-11-26 15:08:44 -08005319 if (num_buckets == le16_to_cpu(bucket_xh(first)->xh_num_buckets)) {
5320 /*
5321 * This can move first+target if the target bucket moves
5322 * to the new extent.
5323 */
Tao Ma01225592008-08-18 17:38:53 +08005324 ret = ocfs2_add_new_xattr_cluster(inode,
5325 xb_bh,
Joel Beckered29c0c2008-11-26 15:08:44 -08005326 first,
5327 target,
Tao Ma01225592008-08-18 17:38:53 +08005328 &num_clusters,
5329 e_cpos,
Tao Ma78f30c32008-11-12 08:27:00 +08005330 &extend,
5331 ctxt);
Tao Ma01225592008-08-18 17:38:53 +08005332 if (ret) {
5333 mlog_errno(ret);
5334 goto out;
5335 }
5336 }
5337
Joel Becker92de1092008-11-25 17:06:40 -08005338 if (extend) {
Tao Ma01225592008-08-18 17:38:53 +08005339 ret = ocfs2_extend_xattr_bucket(inode,
Tao Ma85db90e2008-11-12 08:27:01 +08005340 ctxt->handle,
Joel Beckered29c0c2008-11-26 15:08:44 -08005341 first,
5342 bucket_blkno(target),
Tao Ma01225592008-08-18 17:38:53 +08005343 num_clusters);
Joel Becker92de1092008-11-25 17:06:40 -08005344 if (ret)
5345 mlog_errno(ret);
5346 }
5347
Tao Ma01225592008-08-18 17:38:53 +08005348out:
Joel Becker92de1092008-11-25 17:06:40 -08005349 ocfs2_xattr_bucket_free(first);
Joel Beckered29c0c2008-11-26 15:08:44 -08005350
Tao Ma01225592008-08-18 17:38:53 +08005351 return ret;
5352}
5353
Tao Ma01225592008-08-18 17:38:53 +08005354/*
Tao Ma01225592008-08-18 17:38:53 +08005355 * Truncate the specified xe_off entry in xattr bucket.
5356 * bucket is indicated by header_bh and len is the new length.
5357 * Both the ocfs2_xattr_value_root and the entry will be updated here.
5358 *
5359 * Copy the new updated xe and xe_value_root to new_xe and new_xv if needed.
5360 */
5361static int ocfs2_xattr_bucket_value_truncate(struct inode *inode,
Joel Becker548b0f22008-11-24 19:32:13 -08005362 struct ocfs2_xattr_bucket *bucket,
Tao Ma01225592008-08-18 17:38:53 +08005363 int xe_off,
Tao Ma78f30c32008-11-12 08:27:00 +08005364 int len,
5365 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Ma01225592008-08-18 17:38:53 +08005366{
5367 int ret, offset;
5368 u64 value_blk;
Tao Ma01225592008-08-18 17:38:53 +08005369 struct ocfs2_xattr_entry *xe;
Joel Becker548b0f22008-11-24 19:32:13 -08005370 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
Tao Ma01225592008-08-18 17:38:53 +08005371 size_t blocksize = inode->i_sb->s_blocksize;
Joel Beckerb3e5d372008-12-09 15:01:04 -08005372 struct ocfs2_xattr_value_buf vb = {
5373 .vb_access = ocfs2_journal_access,
5374 };
Tao Ma01225592008-08-18 17:38:53 +08005375
5376 xe = &xh->xh_entries[xe_off];
5377
5378 BUG_ON(!xe || ocfs2_xattr_is_local(xe));
5379
5380 offset = le16_to_cpu(xe->xe_name_offset) +
5381 OCFS2_XATTR_SIZE(xe->xe_name_len);
5382
5383 value_blk = offset / blocksize;
5384
5385 /* We don't allow ocfs2_xattr_value to be stored in different block. */
5386 BUG_ON(value_blk != (offset + OCFS2_XATTR_ROOT_SIZE - 1) / blocksize);
Tao Ma01225592008-08-18 17:38:53 +08005387
Joel Beckerb3e5d372008-12-09 15:01:04 -08005388 vb.vb_bh = bucket->bu_bhs[value_blk];
5389 BUG_ON(!vb.vb_bh);
Tao Ma01225592008-08-18 17:38:53 +08005390
Joel Beckerb3e5d372008-12-09 15:01:04 -08005391 vb.vb_xv = (struct ocfs2_xattr_value_root *)
5392 (vb.vb_bh->b_data + offset % blocksize);
Tao Ma01225592008-08-18 17:38:53 +08005393
Joel Becker548b0f22008-11-24 19:32:13 -08005394 /*
5395 * From here on out we have to dirty the bucket. The generic
5396 * value calls only modify one of the bucket's bhs, but we need
5397 * to send the bucket at once. So if they error, they *could* have
5398 * modified something. We have to assume they did, and dirty
5399 * the whole bucket. This leaves us in a consistent state.
5400 */
Tao Ma402b4182011-02-23 22:01:17 +08005401 trace_ocfs2_xattr_bucket_value_truncate(
5402 (unsigned long long)bucket_blkno(bucket), xe_off, len);
Joel Beckerb3e5d372008-12-09 15:01:04 -08005403 ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt);
Tao Ma01225592008-08-18 17:38:53 +08005404 if (ret) {
5405 mlog_errno(ret);
Tao Ma554e7f92009-01-08 08:21:43 +08005406 goto out;
5407 }
5408
5409 ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket,
5410 OCFS2_JOURNAL_ACCESS_WRITE);
5411 if (ret) {
5412 mlog_errno(ret);
5413 goto out;
Tao Ma01225592008-08-18 17:38:53 +08005414 }
5415
Joel Becker548b0f22008-11-24 19:32:13 -08005416 xe->xe_value_size = cpu_to_le64(len);
5417
Joel Becker548b0f22008-11-24 19:32:13 -08005418 ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket);
Tao Ma01225592008-08-18 17:38:53 +08005419
5420out:
Tao Ma01225592008-08-18 17:38:53 +08005421 return ret;
5422}
5423
Tao Ma01225592008-08-18 17:38:53 +08005424static int ocfs2_rm_xattr_cluster(struct inode *inode,
5425 struct buffer_head *root_bh,
5426 u64 blkno,
5427 u32 cpos,
Tao Ma47bca492009-08-18 11:43:42 +08005428 u32 len,
5429 void *para)
Tao Ma01225592008-08-18 17:38:53 +08005430{
5431 int ret;
5432 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5433 struct inode *tl_inode = osb->osb_tl_inode;
5434 handle_t *handle;
5435 struct ocfs2_xattr_block *xb =
5436 (struct ocfs2_xattr_block *)root_bh->b_data;
Tao Ma01225592008-08-18 17:38:53 +08005437 struct ocfs2_alloc_context *meta_ac = NULL;
5438 struct ocfs2_cached_dealloc_ctxt dealloc;
Joel Beckerf99b9b72008-08-20 19:36:33 -07005439 struct ocfs2_extent_tree et;
5440
Tao Ma47bca492009-08-18 11:43:42 +08005441 ret = ocfs2_iterate_xattr_buckets(inode, blkno, len,
Tao Mace9c5a52009-08-18 11:43:59 +08005442 ocfs2_delete_xattr_in_bucket, para);
Tao Ma47bca492009-08-18 11:43:42 +08005443 if (ret) {
5444 mlog_errno(ret);
5445 return ret;
5446 }
5447
Joel Becker5e404e92009-02-13 03:54:22 -08005448 ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
Tao Ma01225592008-08-18 17:38:53 +08005449
5450 ocfs2_init_dealloc_ctxt(&dealloc);
5451
Tao Ma402b4182011-02-23 22:01:17 +08005452 trace_ocfs2_rm_xattr_cluster(
5453 (unsigned long long)OCFS2_I(inode)->ip_blkno,
5454 (unsigned long long)blkno, cpos, len);
Tao Ma01225592008-08-18 17:38:53 +08005455
Joel Becker8cb471e2009-02-10 20:00:41 -08005456 ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno,
5457 len);
Tao Ma01225592008-08-18 17:38:53 +08005458
Joel Beckerf99b9b72008-08-20 19:36:33 -07005459 ret = ocfs2_lock_allocators(inode, &et, 0, 1, NULL, &meta_ac);
Tao Ma01225592008-08-18 17:38:53 +08005460 if (ret) {
5461 mlog_errno(ret);
5462 return ret;
5463 }
5464
Al Viro59551022016-01-22 15:40:57 -05005465 inode_lock(tl_inode);
Tao Ma01225592008-08-18 17:38:53 +08005466
5467 if (ocfs2_truncate_log_needs_flush(osb)) {
5468 ret = __ocfs2_flush_truncate_log(osb);
5469 if (ret < 0) {
5470 mlog_errno(ret);
5471 goto out;
5472 }
5473 }
5474
Jan Karaa90714c2008-10-09 19:38:40 +02005475 handle = ocfs2_start_trans(osb, ocfs2_remove_extent_credits(osb->sb));
Tao Mad3264792008-10-24 07:57:28 +08005476 if (IS_ERR(handle)) {
Tao Ma01225592008-08-18 17:38:53 +08005477 ret = -ENOMEM;
5478 mlog_errno(ret);
5479 goto out;
5480 }
5481
Joel Becker0cf2f762009-02-12 16:41:25 -08005482 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh,
Joel Becker84008972008-12-09 16:11:49 -08005483 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08005484 if (ret) {
5485 mlog_errno(ret);
5486 goto out_commit;
5487 }
5488
Joel Beckerdbdcf6a2009-02-13 03:41:26 -08005489 ret = ocfs2_remove_extent(handle, &et, cpos, len, meta_ac,
Joel Beckerf99b9b72008-08-20 19:36:33 -07005490 &dealloc);
Tao Ma01225592008-08-18 17:38:53 +08005491 if (ret) {
5492 mlog_errno(ret);
5493 goto out_commit;
5494 }
5495
5496 le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, -len);
Joel Beckerec20cec2010-03-19 14:13:52 -07005497 ocfs2_journal_dirty(handle, root_bh);
Tao Ma01225592008-08-18 17:38:53 +08005498
5499 ret = ocfs2_truncate_log_append(osb, handle, blkno, len);
5500 if (ret)
5501 mlog_errno(ret);
Darrick J. Wong6fdb7022014-04-03 14:47:08 -07005502 ocfs2_update_inode_fsync_trans(handle, inode, 0);
Tao Ma01225592008-08-18 17:38:53 +08005503
5504out_commit:
5505 ocfs2_commit_trans(osb, handle);
5506out:
5507 ocfs2_schedule_truncate_log_flush(osb, 1);
5508
Al Viro59551022016-01-22 15:40:57 -05005509 inode_unlock(tl_inode);
Tao Ma01225592008-08-18 17:38:53 +08005510
5511 if (meta_ac)
5512 ocfs2_free_alloc_context(meta_ac);
5513
5514 ocfs2_run_deallocs(osb, &dealloc);
5515
5516 return ret;
5517}
5518
Tao Ma01225592008-08-18 17:38:53 +08005519/*
Tao Ma80bcaf32008-10-27 06:06:24 +08005520 * check whether the xattr bucket is filled up with the same hash value.
5521 * If we want to insert the xattr with the same hash, return -ENOSPC.
5522 * If we want to insert a xattr with different hash value, go ahead
5523 * and ocfs2_divide_xattr_bucket will handle this.
5524 */
Tao Ma01225592008-08-18 17:38:53 +08005525static int ocfs2_check_xattr_bucket_collision(struct inode *inode,
Tao Ma80bcaf32008-10-27 06:06:24 +08005526 struct ocfs2_xattr_bucket *bucket,
5527 const char *name)
Tao Ma01225592008-08-18 17:38:53 +08005528{
Joel Becker3e632942008-10-24 17:04:49 -07005529 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
Tao Ma80bcaf32008-10-27 06:06:24 +08005530 u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name));
5531
5532 if (name_hash != le32_to_cpu(xh->xh_entries[0].xe_name_hash))
5533 return 0;
Tao Ma01225592008-08-18 17:38:53 +08005534
5535 if (xh->xh_entries[le16_to_cpu(xh->xh_count) - 1].xe_name_hash ==
5536 xh->xh_entries[0].xe_name_hash) {
5537 mlog(ML_ERROR, "Too much hash collision in xattr bucket %llu, "
5538 "hash = %u\n",
Joel Becker9c7759a2008-10-24 16:21:03 -07005539 (unsigned long long)bucket_blkno(bucket),
Tao Ma01225592008-08-18 17:38:53 +08005540 le32_to_cpu(xh->xh_entries[0].xe_name_hash));
5541 return -ENOSPC;
5542 }
5543
5544 return 0;
5545}
5546
Joel Beckerc5d95df2009-08-18 21:03:24 -07005547/*
5548 * Try to set the entry in the current bucket. If we fail, the caller
5549 * will handle getting us another bucket.
5550 */
5551static int ocfs2_xattr_set_entry_bucket(struct inode *inode,
5552 struct ocfs2_xattr_info *xi,
5553 struct ocfs2_xattr_search *xs,
5554 struct ocfs2_xattr_set_ctxt *ctxt)
5555{
5556 int ret;
5557 struct ocfs2_xa_loc loc;
5558
Tao Ma402b4182011-02-23 22:01:17 +08005559 trace_ocfs2_xattr_set_entry_bucket(xi->xi_name);
Joel Beckerc5d95df2009-08-18 21:03:24 -07005560
5561 ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket,
5562 xs->not_found ? NULL : xs->here);
5563 ret = ocfs2_xa_set(&loc, xi, ctxt);
5564 if (!ret) {
5565 xs->here = loc.xl_entry;
5566 goto out;
5567 }
5568 if (ret != -ENOSPC) {
5569 mlog_errno(ret);
5570 goto out;
5571 }
5572
5573 /* Ok, we need space. Let's try defragmenting the bucket. */
5574 ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle,
5575 xs->bucket);
5576 if (ret) {
5577 mlog_errno(ret);
5578 goto out;
5579 }
5580
5581 ret = ocfs2_xa_set(&loc, xi, ctxt);
5582 if (!ret) {
5583 xs->here = loc.xl_entry;
5584 goto out;
5585 }
5586 if (ret != -ENOSPC)
5587 mlog_errno(ret);
5588
5589
5590out:
Joel Beckerc5d95df2009-08-18 21:03:24 -07005591 return ret;
5592}
5593
Tao Ma01225592008-08-18 17:38:53 +08005594static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
5595 struct ocfs2_xattr_info *xi,
Tao Ma78f30c32008-11-12 08:27:00 +08005596 struct ocfs2_xattr_search *xs,
5597 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Ma01225592008-08-18 17:38:53 +08005598{
Joel Beckerc5d95df2009-08-18 21:03:24 -07005599 int ret;
Tao Ma01225592008-08-18 17:38:53 +08005600
Tao Ma402b4182011-02-23 22:01:17 +08005601 trace_ocfs2_xattr_set_entry_index_block(xi->xi_name);
Tao Ma01225592008-08-18 17:38:53 +08005602
Joel Beckerc5d95df2009-08-18 21:03:24 -07005603 ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
5604 if (!ret)
5605 goto out;
5606 if (ret != -ENOSPC) {
5607 mlog_errno(ret);
5608 goto out;
Tao Ma01225592008-08-18 17:38:53 +08005609 }
5610
Joel Beckerc5d95df2009-08-18 21:03:24 -07005611 /* Ack, need more space. Let's try to get another bucket! */
5612
Tao Ma01225592008-08-18 17:38:53 +08005613 /*
Joel Beckerc5d95df2009-08-18 21:03:24 -07005614 * We do not allow for overlapping ranges between buckets. And
5615 * the maximum number of collisions we will allow for then is
5616 * one bucket's worth, so check it here whether we need to
5617 * add a new bucket for the insert.
Tao Ma01225592008-08-18 17:38:53 +08005618 */
Joel Beckerc5d95df2009-08-18 21:03:24 -07005619 ret = ocfs2_check_xattr_bucket_collision(inode,
Joel Becker91f20332008-11-26 15:25:41 -08005620 xs->bucket,
Joel Beckerc5d95df2009-08-18 21:03:24 -07005621 xi->xi_name);
5622 if (ret) {
5623 mlog_errno(ret);
5624 goto out;
Tao Ma01225592008-08-18 17:38:53 +08005625 }
5626
Joel Beckerc5d95df2009-08-18 21:03:24 -07005627 ret = ocfs2_add_new_xattr_bucket(inode,
5628 xs->xattr_bh,
5629 xs->bucket,
5630 ctxt);
5631 if (ret) {
5632 mlog_errno(ret);
5633 goto out;
5634 }
5635
5636 /*
5637 * ocfs2_add_new_xattr_bucket() will have updated
5638 * xs->bucket if it moved, but it will not have updated
5639 * any of the other search fields. Thus, we drop it and
5640 * re-search. Everything should be cached, so it'll be
5641 * quick.
5642 */
5643 ocfs2_xattr_bucket_relse(xs->bucket);
5644 ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh,
5645 xi->xi_name_index,
5646 xi->xi_name, xs);
5647 if (ret && ret != -ENODATA)
5648 goto out;
5649 xs->not_found = ret;
5650
5651 /* Ok, we have a new bucket, let's try again */
5652 ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
5653 if (ret && (ret != -ENOSPC))
5654 mlog_errno(ret);
5655
Tao Ma01225592008-08-18 17:38:53 +08005656out:
Tao Ma01225592008-08-18 17:38:53 +08005657 return ret;
5658}
Tao Maa3944252008-08-18 17:38:54 +08005659
5660static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
5661 struct ocfs2_xattr_bucket *bucket,
5662 void *para)
5663{
Tao Mace9c5a52009-08-18 11:43:59 +08005664 int ret = 0, ref_credits;
Joel Becker3e632942008-10-24 17:04:49 -07005665 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
Tao Maa3944252008-08-18 17:38:54 +08005666 u16 i;
5667 struct ocfs2_xattr_entry *xe;
Tao Ma78f30c32008-11-12 08:27:00 +08005668 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5669 struct ocfs2_xattr_set_ctxt ctxt = {NULL, NULL,};
Joel Becker548b0f22008-11-24 19:32:13 -08005670 int credits = ocfs2_remove_extent_credits(osb->sb) +
5671 ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Mace9c5a52009-08-18 11:43:59 +08005672 struct ocfs2_xattr_value_root *xv;
5673 struct ocfs2_rm_xattr_bucket_para *args =
5674 (struct ocfs2_rm_xattr_bucket_para *)para;
Tao Ma78f30c32008-11-12 08:27:00 +08005675
5676 ocfs2_init_dealloc_ctxt(&ctxt.dealloc);
Tao Maa3944252008-08-18 17:38:54 +08005677
5678 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
5679 xe = &xh->xh_entries[i];
5680 if (ocfs2_xattr_is_local(xe))
5681 continue;
5682
Tao Mace9c5a52009-08-18 11:43:59 +08005683 ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket,
5684 i, &xv, NULL);
Joseph Qi023d4ea2015-04-14 15:43:33 -07005685 if (ret) {
5686 mlog_errno(ret);
5687 break;
5688 }
Tao Mace9c5a52009-08-18 11:43:59 +08005689
5690 ret = ocfs2_lock_xattr_remove_allocators(inode, xv,
5691 args->ref_ci,
5692 args->ref_root_bh,
5693 &ctxt.meta_ac,
5694 &ref_credits);
5695
5696 ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits);
Tao Ma88c3b062008-12-11 08:54:11 +08005697 if (IS_ERR(ctxt.handle)) {
5698 ret = PTR_ERR(ctxt.handle);
5699 mlog_errno(ret);
5700 break;
5701 }
5702
Joel Becker548b0f22008-11-24 19:32:13 -08005703 ret = ocfs2_xattr_bucket_value_truncate(inode, bucket,
Tao Ma78f30c32008-11-12 08:27:00 +08005704 i, 0, &ctxt);
Tao Ma88c3b062008-12-11 08:54:11 +08005705
5706 ocfs2_commit_trans(osb, ctxt.handle);
Tao Mace9c5a52009-08-18 11:43:59 +08005707 if (ctxt.meta_ac) {
5708 ocfs2_free_alloc_context(ctxt.meta_ac);
5709 ctxt.meta_ac = NULL;
5710 }
Tao Maa3944252008-08-18 17:38:54 +08005711 if (ret) {
5712 mlog_errno(ret);
5713 break;
5714 }
5715 }
5716
Tao Mace9c5a52009-08-18 11:43:59 +08005717 if (ctxt.meta_ac)
5718 ocfs2_free_alloc_context(ctxt.meta_ac);
Tao Ma78f30c32008-11-12 08:27:00 +08005719 ocfs2_schedule_truncate_log_flush(osb, 1);
5720 ocfs2_run_deallocs(osb, &ctxt.dealloc);
Tao Maa3944252008-08-18 17:38:54 +08005721 return ret;
5722}
5723
Mark Fasheh99219ae2008-10-07 14:52:59 -07005724/*
Tao Ma492a8a32009-08-18 11:43:17 +08005725 * Whenever we modify a xattr value root in the bucket(e.g, CoW
5726 * or change the extent record flag), we need to recalculate
5727 * the metaecc for the whole bucket. So it is done here.
5728 *
5729 * Note:
5730 * We have to give the extra credits for the caller.
5731 */
5732static int ocfs2_xattr_bucket_post_refcount(struct inode *inode,
5733 handle_t *handle,
5734 void *para)
5735{
5736 int ret;
5737 struct ocfs2_xattr_bucket *bucket =
5738 (struct ocfs2_xattr_bucket *)para;
5739
5740 ret = ocfs2_xattr_bucket_journal_access(handle, bucket,
5741 OCFS2_JOURNAL_ACCESS_WRITE);
5742 if (ret) {
5743 mlog_errno(ret);
5744 return ret;
5745 }
5746
5747 ocfs2_xattr_bucket_journal_dirty(handle, bucket);
5748
5749 return 0;
5750}
5751
5752/*
5753 * Special action we need if the xattr value is refcounted.
5754 *
5755 * 1. If the xattr is refcounted, lock the tree.
5756 * 2. CoW the xattr if we are setting the new value and the value
5757 * will be stored outside.
5758 * 3. In other case, decrease_refcount will work for us, so just
5759 * lock the refcount tree, calculate the meta and credits is OK.
5760 *
5761 * We have to do CoW before ocfs2_init_xattr_set_ctxt since
5762 * currently CoW is a completed transaction, while this function
5763 * will also lock the allocators and let us deadlock. So we will
5764 * CoW the whole xattr value.
5765 */
5766static int ocfs2_prepare_refcount_xattr(struct inode *inode,
5767 struct ocfs2_dinode *di,
5768 struct ocfs2_xattr_info *xi,
5769 struct ocfs2_xattr_search *xis,
5770 struct ocfs2_xattr_search *xbs,
5771 struct ocfs2_refcount_tree **ref_tree,
5772 int *meta_add,
5773 int *credits)
5774{
5775 int ret = 0;
5776 struct ocfs2_xattr_block *xb;
5777 struct ocfs2_xattr_entry *xe;
5778 char *base;
5779 u32 p_cluster, num_clusters;
5780 unsigned int ext_flags;
5781 int name_offset, name_len;
5782 struct ocfs2_xattr_value_buf vb;
5783 struct ocfs2_xattr_bucket *bucket = NULL;
5784 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5785 struct ocfs2_post_refcount refcount;
5786 struct ocfs2_post_refcount *p = NULL;
5787 struct buffer_head *ref_root_bh = NULL;
5788
5789 if (!xis->not_found) {
5790 xe = xis->here;
5791 name_offset = le16_to_cpu(xe->xe_name_offset);
5792 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
5793 base = xis->base;
5794 vb.vb_bh = xis->inode_bh;
5795 vb.vb_access = ocfs2_journal_access_di;
5796 } else {
5797 int i, block_off = 0;
5798 xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
5799 xe = xbs->here;
5800 name_offset = le16_to_cpu(xe->xe_name_offset);
5801 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
5802 i = xbs->here - xbs->header->xh_entries;
5803
5804 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
Tao Mafd68a892009-08-18 11:43:21 +08005805 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
Tao Ma492a8a32009-08-18 11:43:17 +08005806 bucket_xh(xbs->bucket),
5807 i, &block_off,
5808 &name_offset);
5809 if (ret) {
5810 mlog_errno(ret);
5811 goto out;
5812 }
5813 base = bucket_block(xbs->bucket, block_off);
5814 vb.vb_bh = xbs->bucket->bu_bhs[block_off];
5815 vb.vb_access = ocfs2_journal_access;
5816
5817 if (ocfs2_meta_ecc(osb)) {
5818 /*create parameters for ocfs2_post_refcount. */
5819 bucket = xbs->bucket;
5820 refcount.credits = bucket->bu_blocks;
5821 refcount.para = bucket;
5822 refcount.func =
5823 ocfs2_xattr_bucket_post_refcount;
5824 p = &refcount;
5825 }
5826 } else {
5827 base = xbs->base;
5828 vb.vb_bh = xbs->xattr_bh;
5829 vb.vb_access = ocfs2_journal_access_xb;
5830 }
5831 }
5832
5833 if (ocfs2_xattr_is_local(xe))
5834 goto out;
5835
5836 vb.vb_xv = (struct ocfs2_xattr_value_root *)
5837 (base + name_offset + name_len);
5838
5839 ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster,
5840 &num_clusters, &vb.vb_xv->xr_list,
5841 &ext_flags);
5842 if (ret) {
5843 mlog_errno(ret);
5844 goto out;
5845 }
5846
5847 /*
5848 * We just need to check the 1st extent record, since we always
5849 * CoW the whole xattr. So there shouldn't be a xattr with
5850 * some REFCOUNT extent recs after the 1st one.
5851 */
5852 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
5853 goto out;
5854
5855 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
5856 1, ref_tree, &ref_root_bh);
5857 if (ret) {
5858 mlog_errno(ret);
5859 goto out;
5860 }
5861
5862 /*
5863 * If we are deleting the xattr or the new size will be stored inside,
5864 * cool, leave it there, the xattr truncate process will remove them
5865 * for us(it still needs the refcount tree lock and the meta, credits).
5866 * And the worse case is that every cluster truncate will split the
5867 * refcount tree, and make the original extent become 3. So we will need
5868 * 2 * cluster more extent recs at most.
5869 */
Joel Becker6b240ff2009-08-14 18:02:52 -07005870 if (!xi->xi_value || xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE) {
Tao Ma492a8a32009-08-18 11:43:17 +08005871
5872 ret = ocfs2_refcounted_xattr_delete_need(inode,
5873 &(*ref_tree)->rf_ci,
5874 ref_root_bh, vb.vb_xv,
5875 meta_add, credits);
5876 if (ret)
5877 mlog_errno(ret);
5878 goto out;
5879 }
5880
5881 ret = ocfs2_refcount_cow_xattr(inode, di, &vb,
5882 *ref_tree, ref_root_bh, 0,
5883 le32_to_cpu(vb.vb_xv->xr_clusters), p);
5884 if (ret)
5885 mlog_errno(ret);
5886
5887out:
5888 brelse(ref_root_bh);
5889 return ret;
5890}
5891
5892/*
Tao Ma01292412009-09-21 13:04:19 +08005893 * Add the REFCOUNTED flags for all the extent rec in ocfs2_xattr_value_root.
5894 * The physical clusters will be added to refcount tree.
5895 */
5896static int ocfs2_xattr_value_attach_refcount(struct inode *inode,
5897 struct ocfs2_xattr_value_root *xv,
5898 struct ocfs2_extent_tree *value_et,
5899 struct ocfs2_caching_info *ref_ci,
5900 struct buffer_head *ref_root_bh,
5901 struct ocfs2_cached_dealloc_ctxt *dealloc,
5902 struct ocfs2_post_refcount *refcount)
5903{
5904 int ret = 0;
5905 u32 clusters = le32_to_cpu(xv->xr_clusters);
5906 u32 cpos, p_cluster, num_clusters;
5907 struct ocfs2_extent_list *el = &xv->xr_list;
5908 unsigned int ext_flags;
5909
5910 cpos = 0;
5911 while (cpos < clusters) {
5912 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
5913 &num_clusters, el, &ext_flags);
Joseph Qi17caf952013-09-11 14:19:55 -07005914 if (ret) {
5915 mlog_errno(ret);
5916 break;
5917 }
Tao Ma01292412009-09-21 13:04:19 +08005918
5919 cpos += num_clusters;
5920 if ((ext_flags & OCFS2_EXT_REFCOUNTED))
5921 continue;
5922
5923 BUG_ON(!p_cluster);
5924
5925 ret = ocfs2_add_refcount_flag(inode, value_et,
5926 ref_ci, ref_root_bh,
5927 cpos - num_clusters,
5928 p_cluster, num_clusters,
5929 dealloc, refcount);
5930 if (ret) {
5931 mlog_errno(ret);
5932 break;
5933 }
5934 }
5935
5936 return ret;
5937}
5938
5939/*
5940 * Given a normal ocfs2_xattr_header, refcount all the entries which
5941 * have value stored outside.
5942 * Used for xattrs stored in inode and ocfs2_xattr_block.
5943 */
5944static int ocfs2_xattr_attach_refcount_normal(struct inode *inode,
5945 struct ocfs2_xattr_value_buf *vb,
5946 struct ocfs2_xattr_header *header,
5947 struct ocfs2_caching_info *ref_ci,
5948 struct buffer_head *ref_root_bh,
5949 struct ocfs2_cached_dealloc_ctxt *dealloc)
5950{
5951
5952 struct ocfs2_xattr_entry *xe;
5953 struct ocfs2_xattr_value_root *xv;
5954 struct ocfs2_extent_tree et;
5955 int i, ret = 0;
5956
5957 for (i = 0; i < le16_to_cpu(header->xh_count); i++) {
5958 xe = &header->xh_entries[i];
5959
5960 if (ocfs2_xattr_is_local(xe))
5961 continue;
5962
5963 xv = (struct ocfs2_xattr_value_root *)((void *)header +
5964 le16_to_cpu(xe->xe_name_offset) +
5965 OCFS2_XATTR_SIZE(xe->xe_name_len));
5966
5967 vb->vb_xv = xv;
5968 ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
5969
5970 ret = ocfs2_xattr_value_attach_refcount(inode, xv, &et,
5971 ref_ci, ref_root_bh,
5972 dealloc, NULL);
5973 if (ret) {
5974 mlog_errno(ret);
5975 break;
5976 }
5977 }
5978
5979 return ret;
5980}
5981
5982static int ocfs2_xattr_inline_attach_refcount(struct inode *inode,
5983 struct buffer_head *fe_bh,
5984 struct ocfs2_caching_info *ref_ci,
5985 struct buffer_head *ref_root_bh,
5986 struct ocfs2_cached_dealloc_ctxt *dealloc)
5987{
5988 struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
5989 struct ocfs2_xattr_header *header = (struct ocfs2_xattr_header *)
5990 (fe_bh->b_data + inode->i_sb->s_blocksize -
5991 le16_to_cpu(di->i_xattr_inline_size));
5992 struct ocfs2_xattr_value_buf vb = {
5993 .vb_bh = fe_bh,
5994 .vb_access = ocfs2_journal_access_di,
5995 };
5996
5997 return ocfs2_xattr_attach_refcount_normal(inode, &vb, header,
5998 ref_ci, ref_root_bh, dealloc);
5999}
6000
6001struct ocfs2_xattr_tree_value_refcount_para {
6002 struct ocfs2_caching_info *ref_ci;
6003 struct buffer_head *ref_root_bh;
6004 struct ocfs2_cached_dealloc_ctxt *dealloc;
6005};
6006
6007static int ocfs2_get_xattr_tree_value_root(struct super_block *sb,
6008 struct ocfs2_xattr_bucket *bucket,
6009 int offset,
6010 struct ocfs2_xattr_value_root **xv,
6011 struct buffer_head **bh)
6012{
6013 int ret, block_off, name_offset;
6014 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
6015 struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
6016 void *base;
6017
6018 ret = ocfs2_xattr_bucket_get_name_value(sb,
6019 bucket_xh(bucket),
6020 offset,
6021 &block_off,
6022 &name_offset);
6023 if (ret) {
6024 mlog_errno(ret);
6025 goto out;
6026 }
6027
6028 base = bucket_block(bucket, block_off);
6029
6030 *xv = (struct ocfs2_xattr_value_root *)(base + name_offset +
6031 OCFS2_XATTR_SIZE(xe->xe_name_len));
6032
6033 if (bh)
6034 *bh = bucket->bu_bhs[block_off];
6035out:
6036 return ret;
6037}
6038
6039/*
6040 * For a given xattr bucket, refcount all the entries which
6041 * have value stored outside.
6042 */
6043static int ocfs2_xattr_bucket_value_refcount(struct inode *inode,
6044 struct ocfs2_xattr_bucket *bucket,
6045 void *para)
6046{
6047 int i, ret = 0;
6048 struct ocfs2_extent_tree et;
6049 struct ocfs2_xattr_tree_value_refcount_para *ref =
6050 (struct ocfs2_xattr_tree_value_refcount_para *)para;
6051 struct ocfs2_xattr_header *xh =
6052 (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
6053 struct ocfs2_xattr_entry *xe;
6054 struct ocfs2_xattr_value_buf vb = {
6055 .vb_access = ocfs2_journal_access,
6056 };
6057 struct ocfs2_post_refcount refcount = {
6058 .credits = bucket->bu_blocks,
6059 .para = bucket,
6060 .func = ocfs2_xattr_bucket_post_refcount,
6061 };
6062 struct ocfs2_post_refcount *p = NULL;
6063
6064 /* We only need post_refcount if we support metaecc. */
6065 if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb)))
6066 p = &refcount;
6067
Tao Ma402b4182011-02-23 22:01:17 +08006068 trace_ocfs2_xattr_bucket_value_refcount(
6069 (unsigned long long)bucket_blkno(bucket),
6070 le16_to_cpu(xh->xh_count));
Tao Ma01292412009-09-21 13:04:19 +08006071 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
6072 xe = &xh->xh_entries[i];
6073
6074 if (ocfs2_xattr_is_local(xe))
6075 continue;
6076
6077 ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket, i,
6078 &vb.vb_xv, &vb.vb_bh);
6079 if (ret) {
6080 mlog_errno(ret);
6081 break;
6082 }
6083
6084 ocfs2_init_xattr_value_extent_tree(&et,
6085 INODE_CACHE(inode), &vb);
6086
6087 ret = ocfs2_xattr_value_attach_refcount(inode, vb.vb_xv,
6088 &et, ref->ref_ci,
6089 ref->ref_root_bh,
6090 ref->dealloc, p);
6091 if (ret) {
6092 mlog_errno(ret);
6093 break;
6094 }
6095 }
6096
6097 return ret;
6098
6099}
6100
6101static int ocfs2_refcount_xattr_tree_rec(struct inode *inode,
6102 struct buffer_head *root_bh,
6103 u64 blkno, u32 cpos, u32 len, void *para)
6104{
6105 return ocfs2_iterate_xattr_buckets(inode, blkno, len,
6106 ocfs2_xattr_bucket_value_refcount,
6107 para);
6108}
6109
6110static int ocfs2_xattr_block_attach_refcount(struct inode *inode,
6111 struct buffer_head *blk_bh,
6112 struct ocfs2_caching_info *ref_ci,
6113 struct buffer_head *ref_root_bh,
6114 struct ocfs2_cached_dealloc_ctxt *dealloc)
6115{
6116 int ret = 0;
6117 struct ocfs2_xattr_block *xb =
6118 (struct ocfs2_xattr_block *)blk_bh->b_data;
6119
6120 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
6121 struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header;
6122 struct ocfs2_xattr_value_buf vb = {
6123 .vb_bh = blk_bh,
6124 .vb_access = ocfs2_journal_access_xb,
6125 };
6126
6127 ret = ocfs2_xattr_attach_refcount_normal(inode, &vb, header,
6128 ref_ci, ref_root_bh,
6129 dealloc);
6130 } else {
6131 struct ocfs2_xattr_tree_value_refcount_para para = {
6132 .ref_ci = ref_ci,
6133 .ref_root_bh = ref_root_bh,
6134 .dealloc = dealloc,
6135 };
6136
6137 ret = ocfs2_iterate_xattr_index_block(inode, blk_bh,
6138 ocfs2_refcount_xattr_tree_rec,
6139 &para);
6140 }
6141
6142 return ret;
6143}
6144
6145int ocfs2_xattr_attach_refcount_tree(struct inode *inode,
6146 struct buffer_head *fe_bh,
6147 struct ocfs2_caching_info *ref_ci,
6148 struct buffer_head *ref_root_bh,
6149 struct ocfs2_cached_dealloc_ctxt *dealloc)
6150{
6151 int ret = 0;
6152 struct ocfs2_inode_info *oi = OCFS2_I(inode);
6153 struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
6154 struct buffer_head *blk_bh = NULL;
6155
6156 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
6157 ret = ocfs2_xattr_inline_attach_refcount(inode, fe_bh,
6158 ref_ci, ref_root_bh,
6159 dealloc);
6160 if (ret) {
6161 mlog_errno(ret);
6162 goto out;
6163 }
6164 }
6165
6166 if (!di->i_xattr_loc)
6167 goto out;
6168
6169 ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
6170 &blk_bh);
6171 if (ret < 0) {
6172 mlog_errno(ret);
6173 goto out;
6174 }
6175
6176 ret = ocfs2_xattr_block_attach_refcount(inode, blk_bh, ref_ci,
6177 ref_root_bh, dealloc);
6178 if (ret)
6179 mlog_errno(ret);
6180
6181 brelse(blk_bh);
6182out:
6183
6184 return ret;
6185}
6186
Tao Ma0fe9b662009-08-18 11:47:56 +08006187typedef int (should_xattr_reflinked)(struct ocfs2_xattr_entry *xe);
Tao Ma01292412009-09-21 13:04:19 +08006188/*
Tao Ma2999d122009-08-18 11:43:55 +08006189 * Store the information we need in xattr reflink.
6190 * old_bh and new_bh are inode bh for the old and new inode.
6191 */
6192struct ocfs2_xattr_reflink {
6193 struct inode *old_inode;
6194 struct inode *new_inode;
6195 struct buffer_head *old_bh;
6196 struct buffer_head *new_bh;
6197 struct ocfs2_caching_info *ref_ci;
6198 struct buffer_head *ref_root_bh;
6199 struct ocfs2_cached_dealloc_ctxt *dealloc;
Tao Ma0fe9b662009-08-18 11:47:56 +08006200 should_xattr_reflinked *xattr_reflinked;
Tao Ma2999d122009-08-18 11:43:55 +08006201};
6202
6203/*
6204 * Given a xattr header and xe offset,
6205 * return the proper xv and the corresponding bh.
6206 * xattr in inode, block and xattr tree have different implementaions.
6207 */
6208typedef int (get_xattr_value_root)(struct super_block *sb,
6209 struct buffer_head *bh,
6210 struct ocfs2_xattr_header *xh,
6211 int offset,
6212 struct ocfs2_xattr_value_root **xv,
6213 struct buffer_head **ret_bh,
6214 void *para);
6215
6216/*
6217 * Calculate all the xattr value root metadata stored in this xattr header and
6218 * credits we need if we create them from the scratch.
6219 * We use get_xattr_value_root so that all types of xattr container can use it.
6220 */
6221static int ocfs2_value_metas_in_xattr_header(struct super_block *sb,
6222 struct buffer_head *bh,
6223 struct ocfs2_xattr_header *xh,
6224 int *metas, int *credits,
6225 int *num_recs,
6226 get_xattr_value_root *func,
6227 void *para)
6228{
6229 int i, ret = 0;
6230 struct ocfs2_xattr_value_root *xv;
6231 struct ocfs2_xattr_entry *xe;
6232
6233 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
6234 xe = &xh->xh_entries[i];
6235 if (ocfs2_xattr_is_local(xe))
6236 continue;
6237
6238 ret = func(sb, bh, xh, i, &xv, NULL, para);
6239 if (ret) {
6240 mlog_errno(ret);
6241 break;
6242 }
6243
6244 *metas += le16_to_cpu(xv->xr_list.l_tree_depth) *
6245 le16_to_cpu(xv->xr_list.l_next_free_rec);
6246
6247 *credits += ocfs2_calc_extend_credits(sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -08006248 &def_xv.xv.xr_list);
Tao Ma2999d122009-08-18 11:43:55 +08006249
6250 /*
6251 * If the value is a tree with depth > 1, We don't go deep
6252 * to the extent block, so just calculate a maximum record num.
6253 */
6254 if (!xv->xr_list.l_tree_depth)
Tao Ma8ff6af82009-12-23 14:31:15 +08006255 *num_recs += le16_to_cpu(xv->xr_list.l_next_free_rec);
Tao Ma2999d122009-08-18 11:43:55 +08006256 else
6257 *num_recs += ocfs2_clusters_for_bytes(sb,
6258 XATTR_SIZE_MAX);
6259 }
6260
6261 return ret;
6262}
6263
6264/* Used by xattr inode and block to return the right xv and buffer_head. */
6265static int ocfs2_get_xattr_value_root(struct super_block *sb,
6266 struct buffer_head *bh,
6267 struct ocfs2_xattr_header *xh,
6268 int offset,
6269 struct ocfs2_xattr_value_root **xv,
6270 struct buffer_head **ret_bh,
6271 void *para)
6272{
6273 struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
6274
6275 *xv = (struct ocfs2_xattr_value_root *)((void *)xh +
6276 le16_to_cpu(xe->xe_name_offset) +
6277 OCFS2_XATTR_SIZE(xe->xe_name_len));
6278
6279 if (ret_bh)
6280 *ret_bh = bh;
6281
6282 return 0;
6283}
6284
6285/*
6286 * Lock the meta_ac and caculate how much credits we need for reflink xattrs.
6287 * It is only used for inline xattr and xattr block.
6288 */
6289static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb,
6290 struct ocfs2_xattr_header *xh,
6291 struct buffer_head *ref_root_bh,
6292 int *credits,
6293 struct ocfs2_alloc_context **meta_ac)
6294{
6295 int ret, meta_add = 0, num_recs = 0;
6296 struct ocfs2_refcount_block *rb =
6297 (struct ocfs2_refcount_block *)ref_root_bh->b_data;
6298
6299 *credits = 0;
6300
6301 ret = ocfs2_value_metas_in_xattr_header(osb->sb, NULL, xh,
6302 &meta_add, credits, &num_recs,
6303 ocfs2_get_xattr_value_root,
6304 NULL);
6305 if (ret) {
6306 mlog_errno(ret);
6307 goto out;
6308 }
6309
6310 /*
6311 * We need to add/modify num_recs in refcount tree, so just calculate
6312 * an approximate number we need for refcount tree change.
6313 * Sometimes we need to split the tree, and after split, half recs
6314 * will be moved to the new block, and a new block can only provide
6315 * half number of recs. So we multiple new blocks by 2.
6316 */
6317 num_recs = num_recs / ocfs2_refcount_recs_per_rb(osb->sb) * 2;
6318 meta_add += num_recs;
6319 *credits += num_recs + num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
6320 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
6321 *credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
6322 le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
6323 else
6324 *credits += 1;
6325
6326 ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, meta_ac);
6327 if (ret)
6328 mlog_errno(ret);
6329
6330out:
6331 return ret;
6332}
6333
6334/*
6335 * Given a xattr header, reflink all the xattrs in this container.
6336 * It can be used for inode, block and bucket.
6337 *
6338 * NOTE:
6339 * Before we call this function, the caller has memcpy the xattr in
6340 * old_xh to the new_xh.
Tao Ma0fe9b662009-08-18 11:47:56 +08006341 *
6342 * If args.xattr_reflinked is set, call it to decide whether the xe should
6343 * be reflinked or not. If not, remove it from the new xattr header.
Tao Ma2999d122009-08-18 11:43:55 +08006344 */
6345static int ocfs2_reflink_xattr_header(handle_t *handle,
6346 struct ocfs2_xattr_reflink *args,
6347 struct buffer_head *old_bh,
6348 struct ocfs2_xattr_header *xh,
6349 struct buffer_head *new_bh,
6350 struct ocfs2_xattr_header *new_xh,
6351 struct ocfs2_xattr_value_buf *vb,
6352 struct ocfs2_alloc_context *meta_ac,
6353 get_xattr_value_root *func,
6354 void *para)
6355{
Tao Ma0fe9b662009-08-18 11:47:56 +08006356 int ret = 0, i, j;
Tao Ma2999d122009-08-18 11:43:55 +08006357 struct super_block *sb = args->old_inode->i_sb;
6358 struct buffer_head *value_bh;
Tao Ma0fe9b662009-08-18 11:47:56 +08006359 struct ocfs2_xattr_entry *xe, *last;
Tao Ma2999d122009-08-18 11:43:55 +08006360 struct ocfs2_xattr_value_root *xv, *new_xv;
6361 struct ocfs2_extent_tree data_et;
6362 u32 clusters, cpos, p_cluster, num_clusters;
6363 unsigned int ext_flags = 0;
6364
Tao Ma402b4182011-02-23 22:01:17 +08006365 trace_ocfs2_reflink_xattr_header((unsigned long long)old_bh->b_blocknr,
6366 le16_to_cpu(xh->xh_count));
Tao Ma0fe9b662009-08-18 11:47:56 +08006367
6368 last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)];
6369 for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) {
Tao Ma2999d122009-08-18 11:43:55 +08006370 xe = &xh->xh_entries[i];
6371
Tao Ma0fe9b662009-08-18 11:47:56 +08006372 if (args->xattr_reflinked && !args->xattr_reflinked(xe)) {
6373 xe = &new_xh->xh_entries[j];
6374
6375 le16_add_cpu(&new_xh->xh_count, -1);
6376 if (new_xh->xh_count) {
6377 memmove(xe, xe + 1,
6378 (void *)last - (void *)xe);
6379 memset(last, 0,
6380 sizeof(struct ocfs2_xattr_entry));
6381 }
6382
6383 /*
6384 * We don't want j to increase in the next round since
6385 * it is already moved ahead.
6386 */
6387 j--;
6388 continue;
6389 }
6390
Tao Ma2999d122009-08-18 11:43:55 +08006391 if (ocfs2_xattr_is_local(xe))
6392 continue;
6393
6394 ret = func(sb, old_bh, xh, i, &xv, NULL, para);
6395 if (ret) {
6396 mlog_errno(ret);
6397 break;
6398 }
6399
Tao Ma0fe9b662009-08-18 11:47:56 +08006400 ret = func(sb, new_bh, new_xh, j, &new_xv, &value_bh, para);
Tao Ma2999d122009-08-18 11:43:55 +08006401 if (ret) {
6402 mlog_errno(ret);
6403 break;
6404 }
6405
6406 /*
6407 * For the xattr which has l_tree_depth = 0, all the extent
6408 * recs have already be copied to the new xh with the
6409 * propriate OCFS2_EXT_REFCOUNTED flag we just need to
6410 * increase the refount count int the refcount tree.
6411 *
6412 * For the xattr which has l_tree_depth > 0, we need
6413 * to initialize it to the empty default value root,
6414 * and then insert the extents one by one.
6415 */
6416 if (xv->xr_list.l_tree_depth) {
6417 memcpy(new_xv, &def_xv, sizeof(def_xv));
6418 vb->vb_xv = new_xv;
6419 vb->vb_bh = value_bh;
6420 ocfs2_init_xattr_value_extent_tree(&data_et,
6421 INODE_CACHE(args->new_inode), vb);
6422 }
6423
6424 clusters = le32_to_cpu(xv->xr_clusters);
6425 cpos = 0;
6426 while (cpos < clusters) {
6427 ret = ocfs2_xattr_get_clusters(args->old_inode,
6428 cpos,
6429 &p_cluster,
6430 &num_clusters,
6431 &xv->xr_list,
6432 &ext_flags);
6433 if (ret) {
6434 mlog_errno(ret);
6435 goto out;
6436 }
6437
6438 BUG_ON(!p_cluster);
6439
6440 if (xv->xr_list.l_tree_depth) {
6441 ret = ocfs2_insert_extent(handle,
6442 &data_et, cpos,
6443 ocfs2_clusters_to_blocks(
6444 args->old_inode->i_sb,
6445 p_cluster),
6446 num_clusters, ext_flags,
6447 meta_ac);
6448 if (ret) {
6449 mlog_errno(ret);
6450 goto out;
6451 }
6452 }
6453
6454 ret = ocfs2_increase_refcount(handle, args->ref_ci,
6455 args->ref_root_bh,
6456 p_cluster, num_clusters,
6457 meta_ac, args->dealloc);
6458 if (ret) {
6459 mlog_errno(ret);
6460 goto out;
6461 }
6462
6463 cpos += num_clusters;
6464 }
6465 }
6466
6467out:
6468 return ret;
6469}
6470
6471static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args)
6472{
6473 int ret = 0, credits = 0;
6474 handle_t *handle;
6475 struct ocfs2_super *osb = OCFS2_SB(args->old_inode->i_sb);
6476 struct ocfs2_dinode *di = (struct ocfs2_dinode *)args->old_bh->b_data;
6477 int inline_size = le16_to_cpu(di->i_xattr_inline_size);
6478 int header_off = osb->sb->s_blocksize - inline_size;
6479 struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)
6480 (args->old_bh->b_data + header_off);
6481 struct ocfs2_xattr_header *new_xh = (struct ocfs2_xattr_header *)
6482 (args->new_bh->b_data + header_off);
6483 struct ocfs2_alloc_context *meta_ac = NULL;
6484 struct ocfs2_inode_info *new_oi;
6485 struct ocfs2_dinode *new_di;
6486 struct ocfs2_xattr_value_buf vb = {
6487 .vb_bh = args->new_bh,
6488 .vb_access = ocfs2_journal_access_di,
6489 };
6490
6491 ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
6492 &credits, &meta_ac);
6493 if (ret) {
6494 mlog_errno(ret);
6495 goto out;
6496 }
6497
6498 handle = ocfs2_start_trans(osb, credits);
6499 if (IS_ERR(handle)) {
6500 ret = PTR_ERR(handle);
6501 mlog_errno(ret);
6502 goto out;
6503 }
6504
6505 ret = ocfs2_journal_access_di(handle, INODE_CACHE(args->new_inode),
6506 args->new_bh, OCFS2_JOURNAL_ACCESS_WRITE);
6507 if (ret) {
6508 mlog_errno(ret);
6509 goto out_commit;
6510 }
6511
6512 memcpy(args->new_bh->b_data + header_off,
6513 args->old_bh->b_data + header_off, inline_size);
6514
6515 new_di = (struct ocfs2_dinode *)args->new_bh->b_data;
6516 new_di->i_xattr_inline_size = cpu_to_le16(inline_size);
6517
6518 ret = ocfs2_reflink_xattr_header(handle, args, args->old_bh, xh,
6519 args->new_bh, new_xh, &vb, meta_ac,
6520 ocfs2_get_xattr_value_root, NULL);
6521 if (ret) {
6522 mlog_errno(ret);
6523 goto out_commit;
6524 }
6525
6526 new_oi = OCFS2_I(args->new_inode);
Junxiao Bief962df2013-07-03 15:01:03 -07006527 /*
6528 * Adjust extent record count to reserve space for extended attribute.
6529 * Inline data count had been adjusted in ocfs2_duplicate_inline_data().
6530 */
6531 if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
6532 !(ocfs2_inode_is_fast_symlink(args->new_inode))) {
6533 struct ocfs2_extent_list *el = &new_di->id2.i_list;
6534 le16_add_cpu(&el->l_count, -(inline_size /
6535 sizeof(struct ocfs2_extent_rec)));
6536 }
Tao Ma2999d122009-08-18 11:43:55 +08006537 spin_lock(&new_oi->ip_lock);
6538 new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
6539 new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
6540 spin_unlock(&new_oi->ip_lock);
6541
6542 ocfs2_journal_dirty(handle, args->new_bh);
6543
6544out_commit:
6545 ocfs2_commit_trans(osb, handle);
6546
6547out:
6548 if (meta_ac)
6549 ocfs2_free_alloc_context(meta_ac);
6550 return ret;
6551}
6552
6553static int ocfs2_create_empty_xattr_block(struct inode *inode,
6554 struct buffer_head *fe_bh,
6555 struct buffer_head **ret_bh,
6556 int indexed)
6557{
6558 int ret;
Tao Ma2999d122009-08-18 11:43:55 +08006559 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Tao Mab2317962010-03-19 15:04:24 +08006560 struct ocfs2_xattr_set_ctxt ctxt;
Tao Ma2999d122009-08-18 11:43:55 +08006561
Tao Mab2317962010-03-19 15:04:24 +08006562 memset(&ctxt, 0, sizeof(ctxt));
6563 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &ctxt.meta_ac);
Tao Ma2999d122009-08-18 11:43:55 +08006564 if (ret < 0) {
6565 mlog_errno(ret);
6566 return ret;
6567 }
6568
Joel Beckerd3981542009-08-19 02:13:50 -07006569 ctxt.handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS);
6570 if (IS_ERR(ctxt.handle)) {
6571 ret = PTR_ERR(ctxt.handle);
Tao Ma2999d122009-08-18 11:43:55 +08006572 mlog_errno(ret);
6573 goto out;
6574 }
6575
Tao Ma402b4182011-02-23 22:01:17 +08006576 trace_ocfs2_create_empty_xattr_block(
6577 (unsigned long long)fe_bh->b_blocknr, indexed);
Joel Beckerd3981542009-08-19 02:13:50 -07006578 ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed,
6579 ret_bh);
Tao Ma2999d122009-08-18 11:43:55 +08006580 if (ret)
6581 mlog_errno(ret);
6582
Joel Beckerd3981542009-08-19 02:13:50 -07006583 ocfs2_commit_trans(osb, ctxt.handle);
Tao Ma2999d122009-08-18 11:43:55 +08006584out:
Tao Mab2317962010-03-19 15:04:24 +08006585 ocfs2_free_alloc_context(ctxt.meta_ac);
Tao Ma2999d122009-08-18 11:43:55 +08006586 return ret;
6587}
6588
6589static int ocfs2_reflink_xattr_block(struct ocfs2_xattr_reflink *args,
6590 struct buffer_head *blk_bh,
6591 struct buffer_head *new_blk_bh)
6592{
6593 int ret = 0, credits = 0;
6594 handle_t *handle;
6595 struct ocfs2_inode_info *new_oi = OCFS2_I(args->new_inode);
6596 struct ocfs2_dinode *new_di;
6597 struct ocfs2_super *osb = OCFS2_SB(args->new_inode->i_sb);
6598 int header_off = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
6599 struct ocfs2_xattr_block *xb =
6600 (struct ocfs2_xattr_block *)blk_bh->b_data;
6601 struct ocfs2_xattr_header *xh = &xb->xb_attrs.xb_header;
6602 struct ocfs2_xattr_block *new_xb =
6603 (struct ocfs2_xattr_block *)new_blk_bh->b_data;
6604 struct ocfs2_xattr_header *new_xh = &new_xb->xb_attrs.xb_header;
6605 struct ocfs2_alloc_context *meta_ac;
6606 struct ocfs2_xattr_value_buf vb = {
6607 .vb_bh = new_blk_bh,
6608 .vb_access = ocfs2_journal_access_xb,
6609 };
6610
6611 ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
6612 &credits, &meta_ac);
6613 if (ret) {
6614 mlog_errno(ret);
6615 return ret;
6616 }
6617
6618 /* One more credits in case we need to add xattr flags in new inode. */
6619 handle = ocfs2_start_trans(osb, credits + 1);
6620 if (IS_ERR(handle)) {
6621 ret = PTR_ERR(handle);
6622 mlog_errno(ret);
6623 goto out;
6624 }
6625
6626 if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) {
6627 ret = ocfs2_journal_access_di(handle,
6628 INODE_CACHE(args->new_inode),
6629 args->new_bh,
6630 OCFS2_JOURNAL_ACCESS_WRITE);
6631 if (ret) {
6632 mlog_errno(ret);
6633 goto out_commit;
6634 }
6635 }
6636
6637 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(args->new_inode),
6638 new_blk_bh, OCFS2_JOURNAL_ACCESS_WRITE);
6639 if (ret) {
6640 mlog_errno(ret);
6641 goto out_commit;
6642 }
6643
6644 memcpy(new_blk_bh->b_data + header_off, blk_bh->b_data + header_off,
6645 osb->sb->s_blocksize - header_off);
6646
6647 ret = ocfs2_reflink_xattr_header(handle, args, blk_bh, xh,
6648 new_blk_bh, new_xh, &vb, meta_ac,
6649 ocfs2_get_xattr_value_root, NULL);
6650 if (ret) {
6651 mlog_errno(ret);
6652 goto out_commit;
6653 }
6654
6655 ocfs2_journal_dirty(handle, new_blk_bh);
6656
6657 if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) {
6658 new_di = (struct ocfs2_dinode *)args->new_bh->b_data;
6659 spin_lock(&new_oi->ip_lock);
6660 new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL;
6661 new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
6662 spin_unlock(&new_oi->ip_lock);
6663
6664 ocfs2_journal_dirty(handle, args->new_bh);
6665 }
6666
6667out_commit:
6668 ocfs2_commit_trans(osb, handle);
6669
6670out:
6671 ocfs2_free_alloc_context(meta_ac);
6672 return ret;
6673}
6674
6675struct ocfs2_reflink_xattr_tree_args {
6676 struct ocfs2_xattr_reflink *reflink;
6677 struct buffer_head *old_blk_bh;
6678 struct buffer_head *new_blk_bh;
6679 struct ocfs2_xattr_bucket *old_bucket;
6680 struct ocfs2_xattr_bucket *new_bucket;
6681};
6682
6683/*
6684 * NOTE:
6685 * We have to handle the case that both old bucket and new bucket
6686 * will call this function to get the right ret_bh.
6687 * So The caller must give us the right bh.
6688 */
6689static int ocfs2_get_reflink_xattr_value_root(struct super_block *sb,
6690 struct buffer_head *bh,
6691 struct ocfs2_xattr_header *xh,
6692 int offset,
6693 struct ocfs2_xattr_value_root **xv,
6694 struct buffer_head **ret_bh,
6695 void *para)
6696{
6697 struct ocfs2_reflink_xattr_tree_args *args =
6698 (struct ocfs2_reflink_xattr_tree_args *)para;
6699 struct ocfs2_xattr_bucket *bucket;
6700
6701 if (bh == args->old_bucket->bu_bhs[0])
6702 bucket = args->old_bucket;
6703 else
6704 bucket = args->new_bucket;
6705
6706 return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
6707 xv, ret_bh);
6708}
6709
6710struct ocfs2_value_tree_metas {
6711 int num_metas;
6712 int credits;
6713 int num_recs;
6714};
6715
6716static int ocfs2_value_tree_metas_in_bucket(struct super_block *sb,
6717 struct buffer_head *bh,
6718 struct ocfs2_xattr_header *xh,
6719 int offset,
6720 struct ocfs2_xattr_value_root **xv,
6721 struct buffer_head **ret_bh,
6722 void *para)
6723{
6724 struct ocfs2_xattr_bucket *bucket =
6725 (struct ocfs2_xattr_bucket *)para;
6726
6727 return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
6728 xv, ret_bh);
6729}
6730
6731static int ocfs2_calc_value_tree_metas(struct inode *inode,
6732 struct ocfs2_xattr_bucket *bucket,
6733 void *para)
6734{
6735 struct ocfs2_value_tree_metas *metas =
6736 (struct ocfs2_value_tree_metas *)para;
6737 struct ocfs2_xattr_header *xh =
6738 (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
6739
6740 /* Add the credits for this bucket first. */
6741 metas->credits += bucket->bu_blocks;
6742 return ocfs2_value_metas_in_xattr_header(inode->i_sb, bucket->bu_bhs[0],
6743 xh, &metas->num_metas,
6744 &metas->credits, &metas->num_recs,
6745 ocfs2_value_tree_metas_in_bucket,
6746 bucket);
6747}
6748
6749/*
6750 * Given a xattr extent rec starting from blkno and having len clusters,
6751 * iterate all the buckets calculate how much metadata we need for reflinking
6752 * all the ocfs2_xattr_value_root and lock the allocators accordingly.
6753 */
6754static int ocfs2_lock_reflink_xattr_rec_allocators(
6755 struct ocfs2_reflink_xattr_tree_args *args,
6756 struct ocfs2_extent_tree *xt_et,
6757 u64 blkno, u32 len, int *credits,
6758 struct ocfs2_alloc_context **meta_ac,
6759 struct ocfs2_alloc_context **data_ac)
6760{
6761 int ret, num_free_extents;
6762 struct ocfs2_value_tree_metas metas;
6763 struct ocfs2_super *osb = OCFS2_SB(args->reflink->old_inode->i_sb);
6764 struct ocfs2_refcount_block *rb;
6765
6766 memset(&metas, 0, sizeof(metas));
6767
6768 ret = ocfs2_iterate_xattr_buckets(args->reflink->old_inode, blkno, len,
6769 ocfs2_calc_value_tree_metas, &metas);
6770 if (ret) {
6771 mlog_errno(ret);
6772 goto out;
6773 }
6774
6775 *credits = metas.credits;
6776
6777 /*
6778 * Calculate we need for refcount tree change.
6779 *
6780 * We need to add/modify num_recs in refcount tree, so just calculate
6781 * an approximate number we need for refcount tree change.
6782 * Sometimes we need to split the tree, and after split, half recs
6783 * will be moved to the new block, and a new block can only provide
6784 * half number of recs. So we multiple new blocks by 2.
6785 * In the end, we have to add credits for modifying the already
6786 * existed refcount block.
6787 */
6788 rb = (struct ocfs2_refcount_block *)args->reflink->ref_root_bh->b_data;
6789 metas.num_recs =
6790 (metas.num_recs + ocfs2_refcount_recs_per_rb(osb->sb) - 1) /
6791 ocfs2_refcount_recs_per_rb(osb->sb) * 2;
6792 metas.num_metas += metas.num_recs;
6793 *credits += metas.num_recs +
6794 metas.num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
6795 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
6796 *credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
6797 le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
6798 else
6799 *credits += 1;
6800
6801 /* count in the xattr tree change. */
6802 num_free_extents = ocfs2_num_free_extents(osb, xt_et);
6803 if (num_free_extents < 0) {
6804 ret = num_free_extents;
6805 mlog_errno(ret);
6806 goto out;
6807 }
6808
6809 if (num_free_extents < len)
6810 metas.num_metas += ocfs2_extend_meta_needed(xt_et->et_root_el);
6811
6812 *credits += ocfs2_calc_extend_credits(osb->sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -08006813 xt_et->et_root_el);
Tao Ma2999d122009-08-18 11:43:55 +08006814
6815 if (metas.num_metas) {
6816 ret = ocfs2_reserve_new_metadata_blocks(osb, metas.num_metas,
6817 meta_ac);
6818 if (ret) {
6819 mlog_errno(ret);
6820 goto out;
6821 }
6822 }
6823
6824 if (len) {
6825 ret = ocfs2_reserve_clusters(osb, len, data_ac);
6826 if (ret)
6827 mlog_errno(ret);
6828 }
6829out:
6830 if (ret) {
6831 if (*meta_ac) {
6832 ocfs2_free_alloc_context(*meta_ac);
Joseph Qi6cae6d32013-09-11 14:19:58 -07006833 *meta_ac = NULL;
Tao Ma2999d122009-08-18 11:43:55 +08006834 }
6835 }
6836
6837 return ret;
6838}
6839
Tao Ma121a39b2010-07-09 14:53:12 +08006840static int ocfs2_reflink_xattr_bucket(handle_t *handle,
Tao Ma2999d122009-08-18 11:43:55 +08006841 u64 blkno, u64 new_blkno, u32 clusters,
Tao Ma121a39b2010-07-09 14:53:12 +08006842 u32 *cpos, int num_buckets,
Tao Ma2999d122009-08-18 11:43:55 +08006843 struct ocfs2_alloc_context *meta_ac,
6844 struct ocfs2_alloc_context *data_ac,
6845 struct ocfs2_reflink_xattr_tree_args *args)
6846{
6847 int i, j, ret = 0;
6848 struct super_block *sb = args->reflink->old_inode->i_sb;
Tao Ma2999d122009-08-18 11:43:55 +08006849 int bpb = args->old_bucket->bu_blocks;
6850 struct ocfs2_xattr_value_buf vb = {
6851 .vb_access = ocfs2_journal_access,
6852 };
6853
6854 for (i = 0; i < num_buckets; i++, blkno += bpb, new_blkno += bpb) {
6855 ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
6856 if (ret) {
6857 mlog_errno(ret);
6858 break;
6859 }
6860
Wengang Wang9c339252014-04-03 14:47:15 -07006861 ret = ocfs2_init_xattr_bucket(args->new_bucket, new_blkno, 1);
Tao Ma2999d122009-08-18 11:43:55 +08006862 if (ret) {
6863 mlog_errno(ret);
6864 break;
6865 }
6866
Tao Ma2999d122009-08-18 11:43:55 +08006867 ret = ocfs2_xattr_bucket_journal_access(handle,
6868 args->new_bucket,
6869 OCFS2_JOURNAL_ACCESS_CREATE);
6870 if (ret) {
6871 mlog_errno(ret);
6872 break;
6873 }
6874
6875 for (j = 0; j < bpb; j++)
6876 memcpy(bucket_block(args->new_bucket, j),
6877 bucket_block(args->old_bucket, j),
6878 sb->s_blocksize);
6879
Tao Ma121a39b2010-07-09 14:53:12 +08006880 /*
6881 * Record the start cpos so that we can use it to initialize
6882 * our xattr tree we also set the xh_num_bucket for the new
6883 * bucket.
6884 */
6885 if (i == 0) {
6886 *cpos = le32_to_cpu(bucket_xh(args->new_bucket)->
6887 xh_entries[0].xe_name_hash);
6888 bucket_xh(args->new_bucket)->xh_num_buckets =
6889 cpu_to_le16(num_buckets);
6890 }
6891
Tao Ma2999d122009-08-18 11:43:55 +08006892 ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
6893
6894 ret = ocfs2_reflink_xattr_header(handle, args->reflink,
6895 args->old_bucket->bu_bhs[0],
6896 bucket_xh(args->old_bucket),
6897 args->new_bucket->bu_bhs[0],
6898 bucket_xh(args->new_bucket),
6899 &vb, meta_ac,
6900 ocfs2_get_reflink_xattr_value_root,
6901 args);
6902 if (ret) {
6903 mlog_errno(ret);
6904 break;
6905 }
6906
6907 /*
6908 * Re-access and dirty the bucket to calculate metaecc.
6909 * Because we may extend the transaction in reflink_xattr_header
6910 * which will let the already accessed block gone.
6911 */
6912 ret = ocfs2_xattr_bucket_journal_access(handle,
6913 args->new_bucket,
6914 OCFS2_JOURNAL_ACCESS_WRITE);
6915 if (ret) {
6916 mlog_errno(ret);
6917 break;
6918 }
6919
6920 ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
Tao Ma121a39b2010-07-09 14:53:12 +08006921
Tao Ma2999d122009-08-18 11:43:55 +08006922 ocfs2_xattr_bucket_relse(args->old_bucket);
6923 ocfs2_xattr_bucket_relse(args->new_bucket);
6924 }
6925
6926 ocfs2_xattr_bucket_relse(args->old_bucket);
6927 ocfs2_xattr_bucket_relse(args->new_bucket);
6928 return ret;
6929}
Tao Ma121a39b2010-07-09 14:53:12 +08006930
6931static int ocfs2_reflink_xattr_buckets(handle_t *handle,
6932 struct inode *inode,
6933 struct ocfs2_reflink_xattr_tree_args *args,
6934 struct ocfs2_extent_tree *et,
6935 struct ocfs2_alloc_context *meta_ac,
6936 struct ocfs2_alloc_context *data_ac,
6937 u64 blkno, u32 cpos, u32 len)
6938{
6939 int ret, first_inserted = 0;
6940 u32 p_cluster, num_clusters, reflink_cpos = 0;
6941 u64 new_blkno;
6942 unsigned int num_buckets, reflink_buckets;
6943 unsigned int bpc =
6944 ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb));
6945
6946 ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
6947 if (ret) {
6948 mlog_errno(ret);
6949 goto out;
6950 }
6951 num_buckets = le16_to_cpu(bucket_xh(args->old_bucket)->xh_num_buckets);
6952 ocfs2_xattr_bucket_relse(args->old_bucket);
6953
6954 while (len && num_buckets) {
6955 ret = ocfs2_claim_clusters(handle, data_ac,
6956 1, &p_cluster, &num_clusters);
6957 if (ret) {
6958 mlog_errno(ret);
6959 goto out;
6960 }
6961
6962 new_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
6963 reflink_buckets = min(num_buckets, bpc * num_clusters);
6964
6965 ret = ocfs2_reflink_xattr_bucket(handle, blkno,
6966 new_blkno, num_clusters,
6967 &reflink_cpos, reflink_buckets,
6968 meta_ac, data_ac, args);
6969 if (ret) {
6970 mlog_errno(ret);
6971 goto out;
6972 }
6973
6974 /*
6975 * For the 1st allocated cluster, we make it use the same cpos
6976 * so that the xattr tree looks the same as the original one
6977 * in the most case.
6978 */
6979 if (!first_inserted) {
6980 reflink_cpos = cpos;
6981 first_inserted = 1;
6982 }
6983 ret = ocfs2_insert_extent(handle, et, reflink_cpos, new_blkno,
6984 num_clusters, 0, meta_ac);
6985 if (ret)
6986 mlog_errno(ret);
6987
Tao Ma402b4182011-02-23 22:01:17 +08006988 trace_ocfs2_reflink_xattr_buckets((unsigned long long)new_blkno,
6989 num_clusters, reflink_cpos);
Tao Ma121a39b2010-07-09 14:53:12 +08006990
6991 len -= num_clusters;
6992 blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
6993 num_buckets -= reflink_buckets;
6994 }
6995out:
6996 return ret;
6997}
6998
Tao Ma2999d122009-08-18 11:43:55 +08006999/*
7000 * Create the same xattr extent record in the new inode's xattr tree.
7001 */
7002static int ocfs2_reflink_xattr_rec(struct inode *inode,
7003 struct buffer_head *root_bh,
7004 u64 blkno,
7005 u32 cpos,
7006 u32 len,
7007 void *para)
7008{
7009 int ret, credits = 0;
Tao Ma2999d122009-08-18 11:43:55 +08007010 handle_t *handle;
7011 struct ocfs2_reflink_xattr_tree_args *args =
7012 (struct ocfs2_reflink_xattr_tree_args *)para;
7013 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
7014 struct ocfs2_alloc_context *meta_ac = NULL;
7015 struct ocfs2_alloc_context *data_ac = NULL;
7016 struct ocfs2_extent_tree et;
7017
Tao Ma402b4182011-02-23 22:01:17 +08007018 trace_ocfs2_reflink_xattr_rec((unsigned long long)blkno, len);
Tao Ma121a39b2010-07-09 14:53:12 +08007019
Tao Ma2999d122009-08-18 11:43:55 +08007020 ocfs2_init_xattr_tree_extent_tree(&et,
7021 INODE_CACHE(args->reflink->new_inode),
7022 args->new_blk_bh);
7023
7024 ret = ocfs2_lock_reflink_xattr_rec_allocators(args, &et, blkno,
7025 len, &credits,
7026 &meta_ac, &data_ac);
7027 if (ret) {
7028 mlog_errno(ret);
7029 goto out;
7030 }
7031
7032 handle = ocfs2_start_trans(osb, credits);
7033 if (IS_ERR(handle)) {
7034 ret = PTR_ERR(handle);
7035 mlog_errno(ret);
7036 goto out;
7037 }
7038
Tao Ma121a39b2010-07-09 14:53:12 +08007039 ret = ocfs2_reflink_xattr_buckets(handle, inode, args, &et,
7040 meta_ac, data_ac,
7041 blkno, cpos, len);
Tao Ma2999d122009-08-18 11:43:55 +08007042 if (ret)
7043 mlog_errno(ret);
7044
Tao Ma2999d122009-08-18 11:43:55 +08007045 ocfs2_commit_trans(osb, handle);
7046
7047out:
7048 if (meta_ac)
7049 ocfs2_free_alloc_context(meta_ac);
7050 if (data_ac)
7051 ocfs2_free_alloc_context(data_ac);
7052 return ret;
7053}
7054
7055/*
7056 * Create reflinked xattr buckets.
7057 * We will add bucket one by one, and refcount all the xattrs in the bucket
7058 * if they are stored outside.
7059 */
7060static int ocfs2_reflink_xattr_tree(struct ocfs2_xattr_reflink *args,
7061 struct buffer_head *blk_bh,
7062 struct buffer_head *new_blk_bh)
7063{
7064 int ret;
7065 struct ocfs2_reflink_xattr_tree_args para;
7066
7067 memset(&para, 0, sizeof(para));
7068 para.reflink = args;
7069 para.old_blk_bh = blk_bh;
7070 para.new_blk_bh = new_blk_bh;
7071
7072 para.old_bucket = ocfs2_xattr_bucket_new(args->old_inode);
7073 if (!para.old_bucket) {
7074 mlog_errno(-ENOMEM);
7075 return -ENOMEM;
7076 }
7077
7078 para.new_bucket = ocfs2_xattr_bucket_new(args->new_inode);
7079 if (!para.new_bucket) {
7080 ret = -ENOMEM;
7081 mlog_errno(ret);
7082 goto out;
7083 }
7084
7085 ret = ocfs2_iterate_xattr_index_block(args->old_inode, blk_bh,
7086 ocfs2_reflink_xattr_rec,
7087 &para);
7088 if (ret)
7089 mlog_errno(ret);
7090
7091out:
7092 ocfs2_xattr_bucket_free(para.old_bucket);
7093 ocfs2_xattr_bucket_free(para.new_bucket);
7094 return ret;
7095}
7096
7097static int ocfs2_reflink_xattr_in_block(struct ocfs2_xattr_reflink *args,
7098 struct buffer_head *blk_bh)
7099{
7100 int ret, indexed = 0;
7101 struct buffer_head *new_blk_bh = NULL;
7102 struct ocfs2_xattr_block *xb =
7103 (struct ocfs2_xattr_block *)blk_bh->b_data;
7104
7105
7106 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)
7107 indexed = 1;
7108
7109 ret = ocfs2_create_empty_xattr_block(args->new_inode, args->new_bh,
7110 &new_blk_bh, indexed);
7111 if (ret) {
7112 mlog_errno(ret);
7113 goto out;
7114 }
7115
Jeff Liu2decd652010-10-12 11:18:18 +08007116 if (!indexed)
Tao Ma2999d122009-08-18 11:43:55 +08007117 ret = ocfs2_reflink_xattr_block(args, blk_bh, new_blk_bh);
7118 else
7119 ret = ocfs2_reflink_xattr_tree(args, blk_bh, new_blk_bh);
7120 if (ret)
7121 mlog_errno(ret);
7122
7123out:
7124 brelse(new_blk_bh);
7125 return ret;
7126}
7127
Tao Ma0fe9b662009-08-18 11:47:56 +08007128static int ocfs2_reflink_xattr_no_security(struct ocfs2_xattr_entry *xe)
7129{
7130 int type = ocfs2_xattr_get_type(xe);
7131
7132 return type != OCFS2_XATTR_INDEX_SECURITY &&
7133 type != OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS &&
7134 type != OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT;
7135}
7136
Tao Ma2999d122009-08-18 11:43:55 +08007137int ocfs2_reflink_xattrs(struct inode *old_inode,
7138 struct buffer_head *old_bh,
7139 struct inode *new_inode,
Tao Ma0fe9b662009-08-18 11:47:56 +08007140 struct buffer_head *new_bh,
7141 bool preserve_security)
Tao Ma2999d122009-08-18 11:43:55 +08007142{
7143 int ret;
7144 struct ocfs2_xattr_reflink args;
7145 struct ocfs2_inode_info *oi = OCFS2_I(old_inode);
7146 struct ocfs2_dinode *di = (struct ocfs2_dinode *)old_bh->b_data;
7147 struct buffer_head *blk_bh = NULL;
7148 struct ocfs2_cached_dealloc_ctxt dealloc;
7149 struct ocfs2_refcount_tree *ref_tree;
7150 struct buffer_head *ref_root_bh = NULL;
7151
7152 ret = ocfs2_lock_refcount_tree(OCFS2_SB(old_inode->i_sb),
7153 le64_to_cpu(di->i_refcount_loc),
7154 1, &ref_tree, &ref_root_bh);
7155 if (ret) {
7156 mlog_errno(ret);
7157 goto out;
7158 }
7159
7160 ocfs2_init_dealloc_ctxt(&dealloc);
7161
7162 args.old_inode = old_inode;
7163 args.new_inode = new_inode;
7164 args.old_bh = old_bh;
7165 args.new_bh = new_bh;
7166 args.ref_ci = &ref_tree->rf_ci;
7167 args.ref_root_bh = ref_root_bh;
7168 args.dealloc = &dealloc;
Tao Ma0fe9b662009-08-18 11:47:56 +08007169 if (preserve_security)
7170 args.xattr_reflinked = NULL;
7171 else
7172 args.xattr_reflinked = ocfs2_reflink_xattr_no_security;
Tao Ma2999d122009-08-18 11:43:55 +08007173
7174 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
7175 ret = ocfs2_reflink_xattr_inline(&args);
7176 if (ret) {
7177 mlog_errno(ret);
7178 goto out_unlock;
7179 }
7180 }
7181
7182 if (!di->i_xattr_loc)
7183 goto out_unlock;
7184
7185 ret = ocfs2_read_xattr_block(old_inode, le64_to_cpu(di->i_xattr_loc),
7186 &blk_bh);
7187 if (ret < 0) {
7188 mlog_errno(ret);
7189 goto out_unlock;
7190 }
7191
7192 ret = ocfs2_reflink_xattr_in_block(&args, blk_bh);
7193 if (ret)
7194 mlog_errno(ret);
7195
7196 brelse(blk_bh);
7197
7198out_unlock:
7199 ocfs2_unlock_refcount_tree(OCFS2_SB(old_inode->i_sb),
7200 ref_tree, 1);
7201 brelse(ref_root_bh);
7202
7203 if (ocfs2_dealloc_has_cluster(&dealloc)) {
7204 ocfs2_schedule_truncate_log_flush(OCFS2_SB(old_inode->i_sb), 1);
7205 ocfs2_run_deallocs(OCFS2_SB(old_inode->i_sb), &dealloc);
7206 }
7207
7208out:
7209 return ret;
7210}
7211
7212/*
Tao Ma0fe9b662009-08-18 11:47:56 +08007213 * Initialize security and acl for a already created inode.
7214 * Used for reflink a non-preserve-security file.
7215 *
7216 * It uses common api like ocfs2_xattr_set, so the caller
7217 * must not hold any lock expect i_mutex.
7218 */
7219int ocfs2_init_security_and_acl(struct inode *dir,
Eric Paris2a7dba32011-02-01 11:05:39 -05007220 struct inode *inode,
Junxiao Bic25a1e02016-05-12 15:42:18 -07007221 const struct qstr *qstr)
Tao Ma0fe9b662009-08-18 11:47:56 +08007222{
Christoph Hellwig702e5bc2013-12-20 05:16:48 -08007223 int ret = 0;
Junxiao Bic25a1e02016-05-12 15:42:18 -07007224 struct buffer_head *dir_bh = NULL;
Tao Ma0fe9b662009-08-18 11:47:56 +08007225
Mimi Zohar9d8f13b2011-06-06 15:29:25 -04007226 ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
Jeff Liu32918dd2013-02-27 17:02:48 -08007227 if (ret) {
Tao Ma0fe9b662009-08-18 11:47:56 +08007228 mlog_errno(ret);
7229 goto leave;
7230 }
7231
7232 ret = ocfs2_inode_lock(dir, &dir_bh, 0);
7233 if (ret) {
7234 mlog_errno(ret);
7235 goto leave;
7236 }
Junxiao Bic25a1e02016-05-12 15:42:18 -07007237 ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
7238 if (ret)
7239 mlog_errno(ret);
Tao Ma0fe9b662009-08-18 11:47:56 +08007240
7241 ocfs2_inode_unlock(dir, 0);
7242 brelse(dir_bh);
7243leave:
7244 return ret;
7245}
Andreas Gruenbacher1046cb12015-12-02 14:44:42 +01007246
Tao Ma0fe9b662009-08-18 11:47:56 +08007247/*
Tiger Yang923f7f32008-11-14 11:16:27 +08007248 * 'security' attributes support
7249 */
Andreas Gruenbacherd9a82a02015-10-04 19:18:51 +02007250static int ocfs2_xattr_security_get(const struct xattr_handler *handler,
Al Virob2968212016-04-10 20:48:24 -04007251 struct dentry *unused, struct inode *inode,
7252 const char *name, void *buffer, size_t size)
Tiger Yang923f7f32008-11-14 11:16:27 +08007253{
Al Virob2968212016-04-10 20:48:24 -04007254 return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_SECURITY,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007255 name, buffer, size);
Tiger Yang923f7f32008-11-14 11:16:27 +08007256}
7257
Andreas Gruenbacherd9a82a02015-10-04 19:18:51 +02007258static int ocfs2_xattr_security_set(const struct xattr_handler *handler,
Al Viro59301222016-05-27 10:19:30 -04007259 struct dentry *unused, struct inode *inode,
7260 const char *name, const void *value,
7261 size_t size, int flags)
Tiger Yang923f7f32008-11-14 11:16:27 +08007262{
Al Viro59301222016-05-27 10:19:30 -04007263 return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007264 name, value, size, flags);
Tiger Yang923f7f32008-11-14 11:16:27 +08007265}
7266
Joseph Qib519ea62015-06-24 16:55:34 -07007267static int ocfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
Mimi Zohar9d8f13b2011-06-06 15:29:25 -04007268 void *fs_info)
7269{
7270 const struct xattr *xattr;
7271 int err = 0;
7272
7273 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
7274 err = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
7275 xattr->name, xattr->value,
7276 xattr->value_len, XATTR_CREATE);
7277 if (err)
7278 break;
7279 }
7280 return err;
7281}
7282
Tiger Yang534eadd2008-11-14 11:16:41 +08007283int ocfs2_init_security_get(struct inode *inode,
7284 struct inode *dir,
Eric Paris2a7dba32011-02-01 11:05:39 -05007285 const struct qstr *qstr,
Tiger Yang534eadd2008-11-14 11:16:41 +08007286 struct ocfs2_security_xattr_info *si)
7287{
Tiger Yang38d59ef2008-12-17 10:22:56 +08007288 /* check whether ocfs2 support feature xattr */
7289 if (!ocfs2_supports_xattr(OCFS2_SB(dir->i_sb)))
7290 return -EOPNOTSUPP;
Mimi Zohar9d8f13b2011-06-06 15:29:25 -04007291 if (si)
7292 return security_old_inode_init_security(inode, dir, qstr,
7293 &si->name, &si->value,
7294 &si->value_len);
7295
7296 return security_inode_init_security(inode, dir, qstr,
7297 &ocfs2_initxattrs, NULL);
Tiger Yang534eadd2008-11-14 11:16:41 +08007298}
7299
7300int ocfs2_init_security_set(handle_t *handle,
7301 struct inode *inode,
7302 struct buffer_head *di_bh,
7303 struct ocfs2_security_xattr_info *si,
7304 struct ocfs2_alloc_context *xattr_ac,
7305 struct ocfs2_alloc_context *data_ac)
7306{
7307 return ocfs2_xattr_set_handle(handle, inode, di_bh,
7308 OCFS2_XATTR_INDEX_SECURITY,
7309 si->name, si->value, si->value_len, 0,
7310 xattr_ac, data_ac);
7311}
7312
Stephen Hemminger537d81c2010-05-13 17:53:22 -07007313const struct xattr_handler ocfs2_xattr_security_handler = {
Tiger Yang923f7f32008-11-14 11:16:27 +08007314 .prefix = XATTR_SECURITY_PREFIX,
Tiger Yang923f7f32008-11-14 11:16:27 +08007315 .get = ocfs2_xattr_security_get,
7316 .set = ocfs2_xattr_security_set,
7317};
7318
7319/*
Mark Fasheh99219ae2008-10-07 14:52:59 -07007320 * 'trusted' attributes support
7321 */
Andreas Gruenbacherd9a82a02015-10-04 19:18:51 +02007322static int ocfs2_xattr_trusted_get(const struct xattr_handler *handler,
Al Virob2968212016-04-10 20:48:24 -04007323 struct dentry *unused, struct inode *inode,
7324 const char *name, void *buffer, size_t size)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007325{
Al Virob2968212016-04-10 20:48:24 -04007326 return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_TRUSTED,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007327 name, buffer, size);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007328}
7329
Andreas Gruenbacherd9a82a02015-10-04 19:18:51 +02007330static int ocfs2_xattr_trusted_set(const struct xattr_handler *handler,
Al Viro59301222016-05-27 10:19:30 -04007331 struct dentry *unused, struct inode *inode,
7332 const char *name, const void *value,
7333 size_t size, int flags)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007334{
Al Viro59301222016-05-27 10:19:30 -04007335 return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_TRUSTED,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007336 name, value, size, flags);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007337}
7338
Stephen Hemminger537d81c2010-05-13 17:53:22 -07007339const struct xattr_handler ocfs2_xattr_trusted_handler = {
Mark Fasheh99219ae2008-10-07 14:52:59 -07007340 .prefix = XATTR_TRUSTED_PREFIX,
Mark Fasheh99219ae2008-10-07 14:52:59 -07007341 .get = ocfs2_xattr_trusted_get,
7342 .set = ocfs2_xattr_trusted_set,
7343};
7344
Mark Fasheh99219ae2008-10-07 14:52:59 -07007345/*
7346 * 'user' attributes support
7347 */
Andreas Gruenbacherd9a82a02015-10-04 19:18:51 +02007348static int ocfs2_xattr_user_get(const struct xattr_handler *handler,
Andreas Gruenbacher84d86e62016-05-25 12:46:47 +02007349 struct dentry *unused, struct inode *inode,
Al Virob2968212016-04-10 20:48:24 -04007350 const char *name, void *buffer, size_t size)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007351{
Al Virob2968212016-04-10 20:48:24 -04007352 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007353
Mark Fasheh99219ae2008-10-07 14:52:59 -07007354 if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
7355 return -EOPNOTSUPP;
Al Virob2968212016-04-10 20:48:24 -04007356 return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_USER, name,
Mark Fasheh99219ae2008-10-07 14:52:59 -07007357 buffer, size);
7358}
7359
Andreas Gruenbacherd9a82a02015-10-04 19:18:51 +02007360static int ocfs2_xattr_user_set(const struct xattr_handler *handler,
Al Viro59301222016-05-27 10:19:30 -04007361 struct dentry *unused, struct inode *inode,
7362 const char *name, const void *value,
7363 size_t size, int flags)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007364{
Al Viro59301222016-05-27 10:19:30 -04007365 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007366
Mark Fasheh99219ae2008-10-07 14:52:59 -07007367 if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
7368 return -EOPNOTSUPP;
7369
Al Viro59301222016-05-27 10:19:30 -04007370 return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_USER,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007371 name, value, size, flags);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007372}
7373
Stephen Hemminger537d81c2010-05-13 17:53:22 -07007374const struct xattr_handler ocfs2_xattr_user_handler = {
Mark Fasheh99219ae2008-10-07 14:52:59 -07007375 .prefix = XATTR_USER_PREFIX,
Mark Fasheh99219ae2008-10-07 14:52:59 -07007376 .get = ocfs2_xattr_user_get,
7377 .set = ocfs2_xattr_user_set,
7378};