blob: 50a19a40bd4e776114f99fde2e103f14d91f045a [file] [log] [blame]
Andrew Perepechko7fc1f832013-12-03 21:58:49 +08001/*
2 * Copyright 2012 Xyratex Technology Limited
3 *
Andreas Dilger1dc563a2015-11-08 18:09:37 -05004 * Copyright (c) 2013, 2015, Intel Corporation.
5 *
Andrew Perepechko7fc1f832013-12-03 21:58:49 +08006 * Author: Andrew Perepechko <Andrew_Perepechko@xyratex.com>
7 *
8 */
9
10#define DEBUG_SUBSYSTEM S_LLITE
11
12#include <linux/fs.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
Greg Kroah-Hartman67a235f2014-07-11 21:51:41 -070015#include "../include/obd_support.h"
Greg Kroah-Hartman67a235f2014-07-11 21:51:41 -070016#include "../include/lustre_dlm.h"
17#include "../include/lustre_ver.h"
Andrew Perepechko7fc1f832013-12-03 21:58:49 +080018#include "llite_internal.h"
19
20/* If we ever have hundreds of extended attributes, we might want to consider
21 * using a hash or a tree structure instead of list for faster lookups.
22 */
23struct ll_xattr_entry {
24 struct list_head xe_list; /* protected with
Oleg Drokinc0894c62016-02-24 22:00:30 -050025 * lli_xattrs_list_rwsem
26 */
Andrew Perepechko7fc1f832013-12-03 21:58:49 +080027 char *xe_name; /* xattr name, \0-terminated */
28 char *xe_value; /* xattr value */
29 unsigned xe_namelen; /* strlen(xe_name) + 1 */
30 unsigned xe_vallen; /* xattr value length */
31};
32
33static struct kmem_cache *xattr_kmem;
34static struct lu_kmem_descr xattr_caches[] = {
35 {
36 .ckd_cache = &xattr_kmem,
37 .ckd_name = "xattr_kmem",
38 .ckd_size = sizeof(struct ll_xattr_entry)
39 },
40 {
41 .ckd_cache = NULL
42 }
43};
44
45int ll_xattr_init(void)
46{
47 return lu_kmem_init(xattr_caches);
48}
49
50void ll_xattr_fini(void)
51{
52 lu_kmem_fini(xattr_caches);
53}
54
55/**
56 * Initializes xattr cache for an inode.
57 *
58 * This initializes the xattr list and marks cache presence.
59 */
60static void ll_xattr_cache_init(struct ll_inode_info *lli)
61{
Andrew Perepechko7fc1f832013-12-03 21:58:49 +080062 INIT_LIST_HEAD(&lli->lli_xattrs);
63 lli->lli_flags |= LLIF_XATTR_CACHE;
64}
65
66/**
67 * This looks for a specific extended attribute.
68 *
69 * Find in @cache and return @xattr_name attribute in @xattr,
70 * for the NULL @xattr_name return the first cached @xattr.
71 *
72 * \retval 0 success
73 * \retval -ENODATA if not found
74 */
75static int ll_xattr_cache_find(struct list_head *cache,
76 const char *xattr_name,
77 struct ll_xattr_entry **xattr)
78{
79 struct ll_xattr_entry *entry;
80
Andrew Perepechko7fc1f832013-12-03 21:58:49 +080081 list_for_each_entry(entry, cache, xe_list) {
82 /* xattr_name == NULL means look for any entry */
Oleg Drokin6e168182016-02-16 00:46:46 -050083 if (!xattr_name || strcmp(xattr_name, entry->xe_name) == 0) {
Andrew Perepechko7fc1f832013-12-03 21:58:49 +080084 *xattr = entry;
85 CDEBUG(D_CACHE, "find: [%s]=%.*s\n",
86 entry->xe_name, entry->xe_vallen,
87 entry->xe_value);
88 return 0;
89 }
90 }
91
92 return -ENODATA;
93}
94
95/**
Andrew Perepechkoe93a3082014-02-09 02:51:48 -050096 * This adds an xattr.
Andrew Perepechko7fc1f832013-12-03 21:58:49 +080097 *
98 * Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
Andrew Perepechko7fc1f832013-12-03 21:58:49 +080099 *
100 * \retval 0 success
101 * \retval -ENOMEM if no memory could be allocated for the cached attr
Andrew Perepechkoe93a3082014-02-09 02:51:48 -0500102 * \retval -EPROTO if duplicate xattr is being added
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800103 */
104static int ll_xattr_cache_add(struct list_head *cache,
105 const char *xattr_name,
106 const char *xattr_val,
107 unsigned xattr_val_len)
108{
109 struct ll_xattr_entry *xattr;
110
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800111 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
Andrew Perepechkoe93a3082014-02-09 02:51:48 -0500112 CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name);
113 return -EPROTO;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800114 }
115
Amitoj Kaur Chawla21068c42016-02-26 14:25:09 +0530116 xattr = kmem_cache_zalloc(xattr_kmem, GFP_NOFS);
Oleg Drokin6e168182016-02-16 00:46:46 -0500117 if (!xattr) {
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800118 CDEBUG(D_CACHE, "failed to allocate xattr\n");
119 return -ENOMEM;
120 }
121
Tapasweni Pathakb3dd8952014-10-24 21:46:00 +0530122 xattr->xe_name = kstrdup(xattr_name, GFP_NOFS);
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800123 if (!xattr->xe_name) {
124 CDEBUG(D_CACHE, "failed to alloc xattr name %u\n",
125 xattr->xe_namelen);
126 goto err_name;
127 }
Ravindran, Madhusudhanan (M.)9cda6852015-03-12 17:35:52 +0000128 xattr->xe_value = kmemdup(xattr_val, xattr_val_len, GFP_NOFS);
Quentin Lambert695a0662015-02-12 15:56:07 +0100129 if (!xattr->xe_value)
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800130 goto err_value;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800131
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800132 xattr->xe_vallen = xattr_val_len;
133 list_add(&xattr->xe_list, cache);
134
Oleg Drokine15ba452016-02-26 01:49:49 -0500135 CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name, xattr_val_len,
136 xattr_val);
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800137
138 return 0;
139err_value:
Julia Lawall97903a22015-04-12 22:55:02 +0200140 kfree(xattr->xe_name);
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800141err_name:
Mike Rapoport50d30362015-10-20 12:39:51 +0300142 kmem_cache_free(xattr_kmem, xattr);
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800143
144 return -ENOMEM;
145}
146
147/**
148 * This removes an extended attribute from cache.
149 *
150 * Remove @xattr_name attribute from @cache.
151 *
152 * \retval 0 success
153 * \retval -ENODATA if @xattr_name is not cached
154 */
155static int ll_xattr_cache_del(struct list_head *cache,
156 const char *xattr_name)
157{
158 struct ll_xattr_entry *xattr;
159
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800160 CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name);
161
162 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
163 list_del(&xattr->xe_list);
Julia Lawall97903a22015-04-12 22:55:02 +0200164 kfree(xattr->xe_name);
165 kfree(xattr->xe_value);
Mike Rapoport50d30362015-10-20 12:39:51 +0300166 kmem_cache_free(xattr_kmem, xattr);
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800167
168 return 0;
169 }
170
171 return -ENODATA;
172}
173
174/**
175 * This iterates cached extended attributes.
176 *
177 * Walk over cached attributes in @cache and
178 * fill in @xld_buffer or only calculate buffer
179 * size if @xld_buffer is NULL.
180 *
181 * \retval >= 0 buffer list size
182 * \retval -ENODATA if the list cannot fit @xld_size buffer
183 */
184static int ll_xattr_cache_list(struct list_head *cache,
185 char *xld_buffer,
186 int xld_size)
187{
188 struct ll_xattr_entry *xattr, *tmp;
189 int xld_tail = 0;
190
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800191 list_for_each_entry_safe(xattr, tmp, cache, xe_list) {
192 CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n",
Oleg Drokine15ba452016-02-26 01:49:49 -0500193 xld_buffer, xld_tail, xattr->xe_name);
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800194
195 if (xld_buffer) {
196 xld_size -= xattr->xe_namelen;
197 if (xld_size < 0)
198 break;
199 memcpy(&xld_buffer[xld_tail],
200 xattr->xe_name, xattr->xe_namelen);
201 }
202 xld_tail += xattr->xe_namelen;
203 }
204
205 if (xld_size < 0)
206 return -ERANGE;
207
208 return xld_tail;
209}
210
211/**
212 * Check if the xattr cache is initialized (filled).
213 *
214 * \retval 0 @cache is not initialized
215 * \retval 1 @cache is initialized
216 */
John L. Hammond2d95f102014-04-27 13:07:05 -0400217static int ll_xattr_cache_valid(struct ll_inode_info *lli)
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800218{
219 return !!(lli->lli_flags & LLIF_XATTR_CACHE);
220}
221
222/**
223 * This finalizes the xattr cache.
224 *
225 * Free all xattr memory. @lli is the inode info pointer.
226 *
Masanari Iidad0a0acc2014-03-08 22:58:32 +0900227 * \retval 0 no error occurred
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800228 */
229static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
230{
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800231 if (!ll_xattr_cache_valid(lli))
232 return 0;
233
234 while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
235 ; /* empty loop */
236 lli->lli_flags &= ~LLIF_XATTR_CACHE;
237
238 return 0;
239}
240
241int ll_xattr_cache_destroy(struct inode *inode)
242{
243 struct ll_inode_info *lli = ll_i2info(inode);
244 int rc;
245
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800246 down_write(&lli->lli_xattrs_list_rwsem);
247 rc = ll_xattr_cache_destroy_locked(lli);
248 up_write(&lli->lli_xattrs_list_rwsem);
249
250 return rc;
251}
252
253/**
Andrew Perepechkoe93a3082014-02-09 02:51:48 -0500254 * Match or enqueue a PR lock.
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800255 *
256 * Find or request an LDLM lock with xattr data.
257 * Since LDLM does not provide API for atomic match_or_enqueue,
258 * the function handles it with a separate enq lock.
259 * If successful, the function exits with the list lock held.
260 *
Masanari Iidad0a0acc2014-03-08 22:58:32 +0900261 * \retval 0 no error occurred
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800262 * \retval -ENOMEM not enough memory
263 */
264static int ll_xattr_find_get_lock(struct inode *inode,
265 struct lookup_intent *oit,
266 struct ptlrpc_request **req)
267{
Oleg Drokin52ee0d22016-02-24 21:59:54 -0500268 enum ldlm_mode mode;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800269 struct lustre_handle lockh = { 0 };
270 struct md_op_data *op_data;
271 struct ll_inode_info *lli = ll_i2info(inode);
John L. Hammond70a251f2016-08-19 14:07:30 -0400272 struct ldlm_enqueue_info einfo = {
273 .ei_type = LDLM_IBITS,
274 .ei_mode = it_to_lock_mode(oit),
275 .ei_cb_bl = &ll_md_blocking_ast,
276 .ei_cb_cp = &ldlm_completion_ast,
277 };
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800278 struct ll_sb_info *sbi = ll_i2sbi(inode);
279 struct obd_export *exp = sbi->ll_md_exp;
280 int rc;
281
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800282 mutex_lock(&lli->lli_xattrs_enq_lock);
Lai Siyaod6abc592015-03-25 21:53:26 -0400283 /* inode may have been shrunk and recreated, so data is gone, match lock
Oleg Drokinc0894c62016-02-24 22:00:30 -0500284 * only when data exists.
285 */
Lai Siyaod6abc592015-03-25 21:53:26 -0400286 if (ll_xattr_cache_valid(lli)) {
287 /* Try matching first. */
288 mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
289 LCK_PR);
290 if (mode != 0) {
291 /* fake oit in mdc_revalidate_lock() manner */
John L. Hammonde476f2e2016-06-20 16:55:38 -0400292 oit->it_lock_handle = lockh.cookie;
293 oit->it_lock_mode = mode;
Lai Siyaod6abc592015-03-25 21:53:26 -0400294 goto out;
295 }
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800296 }
297
298 /* Enqueue if the lock isn't cached locally. */
299 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
300 LUSTRE_OPC_ANY, NULL);
301 if (IS_ERR(op_data)) {
302 mutex_unlock(&lli->lli_xattrs_enq_lock);
303 return PTR_ERR(op_data);
304 }
305
Andrew Perepechkoe93a3082014-02-09 02:51:48 -0500306 op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800307
John L. Hammond70a251f2016-08-19 14:07:30 -0400308 rc = md_enqueue(exp, &einfo, NULL, oit, op_data, &lockh, 0);
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800309 ll_finish_md_op_data(op_data);
310
311 if (rc < 0) {
312 CDEBUG(D_CACHE,
313 "md_intent_lock failed with %d for fid "DFID"\n",
314 rc, PFID(ll_inode2fid(inode)));
315 mutex_unlock(&lli->lli_xattrs_enq_lock);
316 return rc;
317 }
318
John L. Hammond8bf86fd2016-06-20 16:55:40 -0400319 *req = oit->it_request;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800320out:
321 down_write(&lli->lli_xattrs_list_rwsem);
322 mutex_unlock(&lli->lli_xattrs_enq_lock);
323
324 return 0;
325}
326
327/**
328 * Refill the xattr cache.
329 *
330 * Fetch and cache the whole of xattrs for @inode, acquiring
331 * a read or a write xattr lock depending on operation in @oit.
332 * Intent is dropped on exit unless the operation is setxattr.
333 *
Masanari Iidad0a0acc2014-03-08 22:58:32 +0900334 * \retval 0 no error occurred
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800335 * \retval -EPROTO network protocol error
336 * \retval -ENOMEM not enough memory for the cache
337 */
338static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
339{
340 struct ll_sb_info *sbi = ll_i2sbi(inode);
341 struct ptlrpc_request *req = NULL;
342 const char *xdata, *xval, *xtail, *xvtail;
343 struct ll_inode_info *lli = ll_i2info(inode);
344 struct mdt_body *body;
345 __u32 *xsizes;
Julia Lawallf82ced52015-06-20 21:07:50 +0200346 int rc, i;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800347
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800348 rc = ll_xattr_find_get_lock(inode, oit, &req);
349 if (rc)
Julia Lawall34e1f2b2014-08-30 16:24:55 +0200350 goto out_no_unlock;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800351
352 /* Do we have the data at this point? */
353 if (ll_xattr_cache_valid(lli)) {
354 ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1);
Julia Lawall34e1f2b2014-08-30 16:24:55 +0200355 rc = 0;
356 goto out_maybe_drop;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800357 }
358
359 /* Matched but no cache? Cancelled on error by a parallel refill. */
Oleg Drokin6e168182016-02-16 00:46:46 -0500360 if (unlikely(!req)) {
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800361 CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n");
Julia Lawall34e1f2b2014-08-30 16:24:55 +0200362 rc = -EIO;
363 goto out_maybe_drop;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800364 }
365
John L. Hammonde476f2e2016-06-20 16:55:38 -0400366 if (oit->it_status < 0) {
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800367 CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
John L. Hammonde476f2e2016-06-20 16:55:38 -0400368 oit->it_status, PFID(ll_inode2fid(inode)));
369 rc = oit->it_status;
Andrew Perepechkoe93a3082014-02-09 02:51:48 -0500370 /* xattr data is so large that we don't want to cache it */
371 if (rc == -ERANGE)
372 rc = -EAGAIN;
Julia Lawall34e1f2b2014-08-30 16:24:55 +0200373 goto out_destroy;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800374 }
375
376 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
Oleg Drokin6e168182016-02-16 00:46:46 -0500377 if (!body) {
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800378 CERROR("no MDT BODY in the refill xattr reply\n");
Julia Lawall34e1f2b2014-08-30 16:24:55 +0200379 rc = -EPROTO;
380 goto out_destroy;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800381 }
382 /* do not need swab xattr data */
383 xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
John L. Hammond2e1b5b82016-08-16 16:19:08 -0400384 body->mbo_eadatasize);
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800385 xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS,
John L. Hammond2e1b5b82016-08-16 16:19:08 -0400386 body->mbo_aclsize);
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800387 xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
John L. Hammond2e1b5b82016-08-16 16:19:08 -0400388 body->mbo_max_mdsize * sizeof(__u32));
Oleg Drokin6e168182016-02-16 00:46:46 -0500389 if (!xdata || !xval || !xsizes) {
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800390 CERROR("wrong setxattr reply\n");
Julia Lawall34e1f2b2014-08-30 16:24:55 +0200391 rc = -EPROTO;
392 goto out_destroy;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800393 }
394
John L. Hammond2e1b5b82016-08-16 16:19:08 -0400395 xtail = xdata + body->mbo_eadatasize;
396 xvtail = xval + body->mbo_aclsize;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800397
398 CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail);
399
400 ll_xattr_cache_init(lli);
401
John L. Hammond2e1b5b82016-08-16 16:19:08 -0400402 for (i = 0; i < body->mbo_max_mdsize; i++) {
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800403 CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
404 /* Perform consistency checks: attr names and vals in pill */
Oleg Drokin6e168182016-02-16 00:46:46 -0500405 if (!memchr(xdata, 0, xtail - xdata)) {
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800406 CERROR("xattr protocol violation (names are broken)\n");
407 rc = -EPROTO;
408 } else if (xval + *xsizes > xvtail) {
409 CERROR("xattr protocol violation (vals are broken)\n");
410 rc = -EPROTO;
411 } else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) {
412 rc = -ENOMEM;
Andrew Perepechkoe93a3082014-02-09 02:51:48 -0500413 } else if (!strcmp(xdata, XATTR_NAME_ACL_ACCESS)) {
414 /* Filter out ACL ACCESS since it's cached separately */
415 CDEBUG(D_CACHE, "not caching %s\n",
416 XATTR_NAME_ACL_ACCESS);
417 rc = 0;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800418 } else {
419 rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
420 *xsizes);
421 }
422 if (rc < 0) {
423 ll_xattr_cache_destroy_locked(lli);
Julia Lawall34e1f2b2014-08-30 16:24:55 +0200424 goto out_destroy;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800425 }
426 xdata += strlen(xdata) + 1;
427 xval += *xsizes;
428 xsizes++;
429 }
430
431 if (xdata != xtail || xval != xvtail)
432 CERROR("a hole in xattr data\n");
433
434 ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL);
435
Julia Lawall34e1f2b2014-08-30 16:24:55 +0200436 goto out_maybe_drop;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800437out_maybe_drop:
Andrew Perepechkoe93a3082014-02-09 02:51:48 -0500438
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800439 ll_intent_drop_lock(oit);
440
441 if (rc != 0)
442 up_write(&lli->lli_xattrs_list_rwsem);
443out_no_unlock:
444 ptlrpc_req_finished(req);
445
446 return rc;
447
448out_destroy:
449 up_write(&lli->lli_xattrs_list_rwsem);
450
451 ldlm_lock_decref_and_cancel((struct lustre_handle *)
John L. Hammonde476f2e2016-06-20 16:55:38 -0400452 &oit->it_lock_handle,
453 oit->it_lock_mode);
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800454
455 goto out_no_unlock;
456}
457
458/**
459 * Get an xattr value or list xattrs using the write-through cache.
460 *
461 * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or
462 * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode.
463 * The resulting value/list is stored in @buffer if the former
464 * is not larger than @size.
465 *
Masanari Iidad0a0acc2014-03-08 22:58:32 +0900466 * \retval 0 no error occurred
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800467 * \retval -EPROTO network protocol error
468 * \retval -ENOMEM not enough memory for the cache
469 * \retval -ERANGE the buffer is not large enough
470 * \retval -ENODATA no such attr or the list is empty
471 */
Oleg Drokine15ba452016-02-26 01:49:49 -0500472int ll_xattr_cache_get(struct inode *inode, const char *name, char *buffer,
473 size_t size, __u64 valid)
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800474{
475 struct lookup_intent oit = { .it_op = IT_GETXATTR };
476 struct ll_inode_info *lli = ll_i2info(inode);
477 int rc = 0;
478
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800479 LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS));
480
481 down_read(&lli->lli_xattrs_list_rwsem);
482 if (!ll_xattr_cache_valid(lli)) {
483 up_read(&lli->lli_xattrs_list_rwsem);
484 rc = ll_xattr_cache_refill(inode, &oit);
485 if (rc)
486 return rc;
487 downgrade_write(&lli->lli_xattrs_list_rwsem);
488 } else {
489 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR_HITS, 1);
490 }
491
492 if (valid & OBD_MD_FLXATTR) {
493 struct ll_xattr_entry *xattr;
494
495 rc = ll_xattr_cache_find(&lli->lli_xattrs, name, &xattr);
496 if (rc == 0) {
497 rc = xattr->xe_vallen;
498 /* zero size means we are only requested size in rc */
499 if (size != 0) {
500 if (size >= xattr->xe_vallen)
501 memcpy(buffer, xattr->xe_value,
Oleg Drokine15ba452016-02-26 01:49:49 -0500502 xattr->xe_vallen);
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800503 else
504 rc = -ERANGE;
505 }
506 }
507 } else if (valid & OBD_MD_FLXATTRLS) {
508 rc = ll_xattr_cache_list(&lli->lli_xattrs,
509 size ? buffer : NULL, size);
510 }
511
Julia Lawall34e1f2b2014-08-30 16:24:55 +0200512 goto out;
Andrew Perepechko7fc1f832013-12-03 21:58:49 +0800513out:
514 up_read(&lli->lli_xattrs_list_rwsem);
515
516 return rc;
517}