blob: 6e25c1bb6aa316aa7e783ed9c1e9fc45aeec48b5 [file] [log] [blame]
Peng Taod7e09d02013-05-02 16:46:55 +08001/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
Oleg Drokin6a5b99a2016-06-14 23:33:40 -040018 * http://www.gnu.org/licenses/gpl-2.0.html
Peng Taod7e09d02013-05-02 16:46:55 +080019 *
Peng Taod7e09d02013-05-02 16:46:55 +080020 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
Andreas Dilger1dc563a2015-11-08 18:09:37 -050026 * Copyright (c) 2011, 2015, Intel Corporation.
Peng Taod7e09d02013-05-02 16:46:55 +080027 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 */
32
33#ifndef __LUSTRE_LU_OBJECT_H
34#define __LUSTRE_LU_OBJECT_H
35
36#include <stdarg.h>
Greg Kroah-Hartman9fdaf8c2014-07-11 20:51:16 -070037#include "../../include/linux/libcfs/libcfs.h"
Greg Kroah-Hartman1accaad2014-07-11 21:34:24 -070038#include "lustre/lustre_idl.h"
39#include "lu_ref.h"
Peng Taod7e09d02013-05-02 16:46:55 +080040
41struct seq_file;
Peng Taod7e09d02013-05-02 16:46:55 +080042struct lustre_cfg;
43struct lprocfs_stats;
44
45/** \defgroup lu lu
46 * lu_* data-types represent server-side entities shared by data and meta-data
47 * stacks.
48 *
49 * Design goals:
50 *
51 * -# support for layering.
52 *
53 * Server side object is split into layers, one per device in the
54 * corresponding device stack. Individual layer is represented by struct
55 * lu_object. Compound layered object --- by struct lu_object_header. Most
56 * interface functions take lu_object as an argument and operate on the
57 * whole compound object. This decision was made due to the following
58 * reasons:
59 *
60 * - it's envisaged that lu_object will be used much more often than
61 * lu_object_header;
62 *
63 * - we want lower (non-top) layers to be able to initiate operations
64 * on the whole object.
65 *
66 * Generic code supports layering more complex than simple stacking, e.g.,
67 * it is possible that at some layer object "spawns" multiple sub-objects
68 * on the lower layer.
69 *
70 * -# fid-based identification.
71 *
72 * Compound object is uniquely identified by its fid. Objects are indexed
73 * by their fids (hash table is used for index).
74 *
75 * -# caching and life-cycle management.
76 *
77 * Object's life-time is controlled by reference counting. When reference
78 * count drops to 0, object is returned to cache. Cached objects still
79 * retain their identity (i.e., fid), and can be recovered from cache.
80 *
81 * Objects are kept in the global LRU list, and lu_site_purge() function
82 * can be used to reclaim given number of unused objects from the tail of
83 * the LRU.
84 *
85 * -# avoiding recursion.
86 *
87 * Generic code tries to replace recursion through layers by iterations
88 * where possible. Additionally to the end of reducing stack consumption,
89 * data, when practically possible, are allocated through lu_context_key
90 * interface rather than on stack.
91 * @{
92 */
93
94struct lu_site;
95struct lu_object;
96struct lu_device;
97struct lu_object_header;
98struct lu_context;
99struct lu_env;
100
101/**
102 * Operations common for data and meta-data devices.
103 */
104struct lu_device_operations {
105 /**
106 * Allocate object for the given device (without lower-layer
107 * parts). This is called by lu_object_operations::loo_object_init()
108 * from the parent layer, and should setup at least lu_object::lo_dev
109 * and lu_object::lo_ops fields of resulting lu_object.
110 *
111 * Object creation protocol.
112 *
113 * Due to design goal of avoiding recursion, object creation (see
114 * lu_object_alloc()) is somewhat involved:
115 *
116 * - first, lu_device_operations::ldo_object_alloc() method of the
117 * top-level device in the stack is called. It should allocate top
118 * level object (including lu_object_header), but without any
119 * lower-layer sub-object(s).
120 *
121 * - then lu_object_alloc() sets fid in the header of newly created
122 * object.
123 *
124 * - then lu_object_operations::loo_object_init() is called. It has
125 * to allocate lower-layer object(s). To do this,
126 * lu_object_operations::loo_object_init() calls ldo_object_alloc()
127 * of the lower-layer device(s).
128 *
129 * - for all new objects allocated by
130 * lu_object_operations::loo_object_init() (and inserted into object
131 * stack), lu_object_operations::loo_object_init() is called again
132 * repeatedly, until no new objects are created.
133 *
134 * \post ergo(!IS_ERR(result), result->lo_dev == d &&
135 * result->lo_ops != NULL);
136 */
137 struct lu_object *(*ldo_object_alloc)(const struct lu_env *env,
138 const struct lu_object_header *h,
139 struct lu_device *d);
140 /**
141 * process config specific for device.
142 */
143 int (*ldo_process_config)(const struct lu_env *env,
144 struct lu_device *, struct lustre_cfg *);
145 int (*ldo_recovery_complete)(const struct lu_env *,
146 struct lu_device *);
147
148 /**
149 * initialize local objects for device. this method called after layer has
150 * been initialized (after LCFG_SETUP stage) and before it starts serving
151 * user requests.
152 */
153
154 int (*ldo_prepare)(const struct lu_env *,
155 struct lu_device *parent,
156 struct lu_device *dev);
157
158};
159
160/**
161 * For lu_object_conf flags
162 */
Oleg Drokin426652f2016-02-24 21:59:47 -0500163enum loc_flags {
Peng Taod7e09d02013-05-02 16:46:55 +0800164 /* This is a new object to be allocated, or the file
Oleg Drokinc56e2562016-02-24 22:00:25 -0500165 * corresponding to the object does not exists.
166 */
Peng Taod7e09d02013-05-02 16:46:55 +0800167 LOC_F_NEW = 0x00000001,
Oleg Drokin426652f2016-02-24 21:59:47 -0500168};
Peng Taod7e09d02013-05-02 16:46:55 +0800169
170/**
171 * Object configuration, describing particulars of object being created. On
172 * server this is not used, as server objects are full identified by fid. On
173 * client configuration contains struct lustre_md.
174 */
175struct lu_object_conf {
176 /**
177 * Some hints for obj find and alloc.
178 */
Oleg Drokin426652f2016-02-24 21:59:47 -0500179 enum loc_flags loc_flags;
Peng Taod7e09d02013-05-02 16:46:55 +0800180};
181
182/**
183 * Type of "printer" function used by lu_object_operations::loo_object_print()
184 * method.
185 *
186 * Printer function is needed to provide some flexibility in (semi-)debugging
187 * output: possible implementations: printk, CDEBUG, sysfs/seq_file
188 */
189typedef int (*lu_printer_t)(const struct lu_env *env,
190 void *cookie, const char *format, ...)
Mario J. Rugiero70837c12015-03-10 12:02:03 -0300191 __printf(3, 4);
Peng Taod7e09d02013-05-02 16:46:55 +0800192
193/**
194 * Operations specific for particular lu_object.
195 */
196struct lu_object_operations {
Peng Taod7e09d02013-05-02 16:46:55 +0800197 /**
198 * Allocate lower-layer parts of the object by calling
199 * lu_device_operations::ldo_object_alloc() of the corresponding
200 * underlying device.
201 *
202 * This method is called once for each object inserted into object
203 * stack. It's responsibility of this method to insert lower-layer
204 * object(s) it create into appropriate places of object stack.
205 */
206 int (*loo_object_init)(const struct lu_env *env,
207 struct lu_object *o,
208 const struct lu_object_conf *conf);
209 /**
210 * Called (in top-to-bottom order) during object allocation after all
211 * layers were allocated and initialized. Can be used to perform
212 * initialization depending on lower layers.
213 */
214 int (*loo_object_start)(const struct lu_env *env,
215 struct lu_object *o);
216 /**
217 * Called before lu_object_operations::loo_object_free() to signal
218 * that object is being destroyed. Dual to
219 * lu_object_operations::loo_object_init().
220 */
221 void (*loo_object_delete)(const struct lu_env *env,
222 struct lu_object *o);
223 /**
224 * Dual to lu_device_operations::ldo_object_alloc(). Called when
225 * object is removed from memory.
226 */
227 void (*loo_object_free)(const struct lu_env *env,
228 struct lu_object *o);
229 /**
230 * Called when last active reference to the object is released (and
231 * object returns to the cache). This method is optional.
232 */
233 void (*loo_object_release)(const struct lu_env *env,
234 struct lu_object *o);
235 /**
236 * Optional debugging helper. Print given object.
237 */
238 int (*loo_object_print)(const struct lu_env *env, void *cookie,
239 lu_printer_t p, const struct lu_object *o);
240 /**
241 * Optional debugging method. Returns true iff method is internally
242 * consistent.
243 */
244 int (*loo_object_invariant)(const struct lu_object *o);
245};
246
247/**
248 * Type of lu_device.
249 */
250struct lu_device_type;
251
252/**
253 * Device: a layer in the server side abstraction stacking.
254 */
255struct lu_device {
256 /**
257 * reference count. This is incremented, in particular, on each object
258 * created at this layer.
259 *
260 * \todo XXX which means that atomic_t is probably too small.
261 */
262 atomic_t ld_ref;
263 /**
264 * Pointer to device type. Never modified once set.
265 */
266 struct lu_device_type *ld_type;
267 /**
268 * Operation vector for this device.
269 */
270 const struct lu_device_operations *ld_ops;
271 /**
272 * Stack this device belongs to.
273 */
274 struct lu_site *ld_site;
Peng Taod7e09d02013-05-02 16:46:55 +0800275
276 /** \todo XXX: temporary back pointer into obd. */
277 struct obd_device *ld_obd;
278 /**
279 * A list of references to this object, for debugging.
280 */
281 struct lu_ref ld_reference;
282 /**
283 * Link the device to the site.
284 **/
285 struct list_head ld_linkage;
286};
287
288struct lu_device_type_operations;
289
290/**
291 * Tag bits for device type. They are used to distinguish certain groups of
292 * device types.
293 */
294enum lu_device_tag {
295 /** this is meta-data device */
296 LU_DEVICE_MD = (1 << 0),
297 /** this is data device */
298 LU_DEVICE_DT = (1 << 1),
299 /** data device in the client stack */
300 LU_DEVICE_CL = (1 << 2)
301};
302
303/**
304 * Type of device.
305 */
306struct lu_device_type {
307 /**
308 * Tag bits. Taken from enum lu_device_tag. Never modified once set.
309 */
310 __u32 ldt_tags;
311 /**
312 * Name of this class. Unique system-wide. Never modified once set.
313 */
314 char *ldt_name;
315 /**
316 * Operations for this type.
317 */
318 const struct lu_device_type_operations *ldt_ops;
319 /**
320 * \todo XXX: temporary pointer to associated obd_type.
321 */
322 struct obd_type *ldt_obd_type;
323 /**
324 * \todo XXX: temporary: context tags used by obd_*() calls.
325 */
326 __u32 ldt_ctx_tags;
327 /**
328 * Number of existing device type instances.
329 */
330 unsigned ldt_device_nr;
331 /**
332 * Linkage into a global list of all device types.
333 *
334 * \see lu_device_types.
335 */
336 struct list_head ldt_linkage;
337};
338
339/**
340 * Operations on a device type.
341 */
342struct lu_device_type_operations {
343 /**
344 * Allocate new device.
345 */
346 struct lu_device *(*ldto_device_alloc)(const struct lu_env *env,
347 struct lu_device_type *t,
348 struct lustre_cfg *lcfg);
349 /**
350 * Free device. Dual to
351 * lu_device_type_operations::ldto_device_alloc(). Returns pointer to
352 * the next device in the stack.
353 */
354 struct lu_device *(*ldto_device_free)(const struct lu_env *,
355 struct lu_device *);
356
357 /**
358 * Initialize the devices after allocation
359 */
360 int (*ldto_device_init)(const struct lu_env *env,
361 struct lu_device *, const char *,
362 struct lu_device *);
363 /**
364 * Finalize device. Dual to
365 * lu_device_type_operations::ldto_device_init(). Returns pointer to
366 * the next device in the stack.
367 */
368 struct lu_device *(*ldto_device_fini)(const struct lu_env *env,
369 struct lu_device *);
370 /**
371 * Initialize device type. This is called on module load.
372 */
373 int (*ldto_init)(struct lu_device_type *t);
374 /**
375 * Finalize device type. Dual to
376 * lu_device_type_operations::ldto_init(). Called on module unload.
377 */
378 void (*ldto_fini)(struct lu_device_type *t);
379 /**
380 * Called when the first device is created.
381 */
382 void (*ldto_start)(struct lu_device_type *t);
383 /**
384 * Called when number of devices drops to 0.
385 */
386 void (*ldto_stop)(struct lu_device_type *t);
387};
388
389static inline int lu_device_is_md(const struct lu_device *d)
390{
Oleg Drokind2a13982016-02-16 00:46:52 -0500391 return ergo(d, d->ld_type->ldt_tags & LU_DEVICE_MD);
Peng Taod7e09d02013-05-02 16:46:55 +0800392}
393
394/**
Peng Taod7e09d02013-05-02 16:46:55 +0800395 * Common object attributes.
396 */
397struct lu_attr {
398 /** size in bytes */
399 __u64 la_size;
400 /** modification time in seconds since Epoch */
Oleg Drokin21aef7d2014-08-15 12:55:56 -0400401 s64 la_mtime;
Peng Taod7e09d02013-05-02 16:46:55 +0800402 /** access time in seconds since Epoch */
Oleg Drokin21aef7d2014-08-15 12:55:56 -0400403 s64 la_atime;
Peng Taod7e09d02013-05-02 16:46:55 +0800404 /** change time in seconds since Epoch */
Oleg Drokin21aef7d2014-08-15 12:55:56 -0400405 s64 la_ctime;
Peng Taod7e09d02013-05-02 16:46:55 +0800406 /** 512-byte blocks allocated to object */
407 __u64 la_blocks;
408 /** permission bits and file type */
409 __u32 la_mode;
410 /** owner id */
411 __u32 la_uid;
412 /** group id */
413 __u32 la_gid;
414 /** object flags */
415 __u32 la_flags;
416 /** number of persistent references to this object */
417 __u32 la_nlink;
418 /** blk bits of the object*/
419 __u32 la_blkbits;
420 /** blk size of the object*/
421 __u32 la_blksize;
422 /** real device */
423 __u32 la_rdev;
424 /**
425 * valid bits
426 *
427 * \see enum la_valid
428 */
429 __u64 la_valid;
430};
431
432/** Bit-mask of valid attributes */
433enum la_valid {
434 LA_ATIME = 1 << 0,
435 LA_MTIME = 1 << 1,
436 LA_CTIME = 1 << 2,
437 LA_SIZE = 1 << 3,
438 LA_MODE = 1 << 4,
439 LA_UID = 1 << 5,
440 LA_GID = 1 << 6,
441 LA_BLOCKS = 1 << 7,
442 LA_TYPE = 1 << 8,
443 LA_FLAGS = 1 << 9,
444 LA_NLINK = 1 << 10,
445 LA_RDEV = 1 << 11,
446 LA_BLKSIZE = 1 << 12,
447 LA_KILL_SUID = 1 << 13,
448 LA_KILL_SGID = 1 << 14,
449};
450
451/**
452 * Layer in the layered object.
453 */
454struct lu_object {
455 /**
456 * Header for this object.
457 */
458 struct lu_object_header *lo_header;
459 /**
460 * Device for this layer.
461 */
462 struct lu_device *lo_dev;
463 /**
464 * Operations for this object.
465 */
466 const struct lu_object_operations *lo_ops;
467 /**
468 * Linkage into list of all layers.
469 */
470 struct list_head lo_linkage;
471 /**
Peng Taod7e09d02013-05-02 16:46:55 +0800472 * Link to the device, for debugging.
473 */
John L. Hammond631abc62013-07-25 01:17:30 +0800474 struct lu_ref_link lo_dev_ref;
Peng Taod7e09d02013-05-02 16:46:55 +0800475};
476
477enum lu_object_header_flags {
478 /**
479 * Don't keep this object in cache. Object will be destroyed as soon
480 * as last reference to it is released. This flag cannot be cleared
481 * once set.
482 */
483 LU_OBJECT_HEARD_BANSHEE = 0,
484 /**
485 * Mark this object has already been taken out of cache.
486 */
Fan Yong8b88bca2015-12-23 12:32:12 -0500487 LU_OBJECT_UNHASHED = 1,
Peng Taod7e09d02013-05-02 16:46:55 +0800488};
489
490enum lu_object_header_attr {
491 LOHA_EXISTS = 1 << 0,
492 LOHA_REMOTE = 1 << 1,
493 /**
494 * UNIX file type is stored in S_IFMT bits.
495 */
496 LOHA_FT_START = 001 << 12, /**< S_IFIFO */
497 LOHA_FT_END = 017 << 12, /**< S_IFMT */
498};
499
500/**
501 * "Compound" object, consisting of multiple layers.
502 *
503 * Compound object with given fid is unique with given lu_site.
504 *
505 * Note, that object does *not* necessary correspond to the real object in the
506 * persistent storage: object is an anchor for locking and method calling, so
507 * it is created for things like not-yet-existing child created by mkdir or
508 * create calls. lu_object_operations::loo_exists() can be used to check
509 * whether object is backed by persistent storage entity.
510 */
511struct lu_object_header {
512 /**
Andreas Dilgera700f972014-04-27 13:06:51 -0400513 * Fid, uniquely identifying this object.
514 */
515 struct lu_fid loh_fid;
516 /**
Peng Taod7e09d02013-05-02 16:46:55 +0800517 * Object flags from enum lu_object_header_flags. Set and checked
518 * atomically.
519 */
520 unsigned long loh_flags;
521 /**
522 * Object reference count. Protected by lu_site::ls_guard.
523 */
524 atomic_t loh_ref;
525 /**
Peng Taod7e09d02013-05-02 16:46:55 +0800526 * Common object attributes, cached for efficiency. From enum
527 * lu_object_header_attr.
528 */
529 __u32 loh_attr;
530 /**
531 * Linkage into per-site hash table. Protected by lu_site::ls_guard.
532 */
533 struct hlist_node loh_hash;
534 /**
535 * Linkage into per-site LRU list. Protected by lu_site::ls_guard.
536 */
537 struct list_head loh_lru;
538 /**
539 * Linkage into list of layers. Never modified once set (except lately
540 * during object destruction). No locking is necessary.
541 */
542 struct list_head loh_layers;
543 /**
544 * A list of references to this object, for debugging.
545 */
546 struct lu_ref loh_reference;
547};
548
549struct fld;
550
551struct lu_site_bkt_data {
552 /**
Frank Zago6e580ab2015-09-14 18:41:18 -0400553 * number of object in this bucket on the lsb_lru list.
Peng Taod7e09d02013-05-02 16:46:55 +0800554 */
Frank Zago6e580ab2015-09-14 18:41:18 -0400555 long lsb_lru_len;
Peng Taod7e09d02013-05-02 16:46:55 +0800556 /**
557 * LRU list, updated on each access to object. Protected by
558 * bucket lock of lu_site::ls_obj_hash.
559 *
560 * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
561 * moved to the lu_site::ls_lru.prev (this is due to the non-existence
562 * of list_for_each_entry_safe_reverse()).
563 */
564 struct list_head lsb_lru;
565 /**
566 * Wait-queue signaled when an object in this site is ultimately
567 * destroyed (lu_object_free()). It is used by lu_object_find() to
568 * wait before re-trying when object in the process of destruction is
569 * found in the hash table.
570 *
571 * \see htable_lookup().
572 */
573 wait_queue_head_t lsb_marche_funebre;
574};
575
576enum {
577 LU_SS_CREATED = 0,
578 LU_SS_CACHE_HIT,
579 LU_SS_CACHE_MISS,
580 LU_SS_CACHE_RACE,
581 LU_SS_CACHE_DEATH_RACE,
582 LU_SS_LRU_PURGED,
Ann Koehlera0b88032015-09-14 18:41:30 -0400583 LU_SS_LRU_LEN, /* # of objects in lsb_lru lists */
Peng Taod7e09d02013-05-02 16:46:55 +0800584 LU_SS_LAST_STAT
585};
586
587/**
588 * lu_site is a "compartment" within which objects are unique, and LRU
589 * discipline is maintained.
590 *
591 * lu_site exists so that multiple layered stacks can co-exist in the same
592 * address space.
593 *
594 * lu_site has the same relation to lu_device as lu_object_header to
595 * lu_object.
596 */
597struct lu_site {
598 /**
599 * objects hash table
600 */
Lisa Nguyen6da6eab2013-10-21 18:16:26 -0700601 struct cfs_hash *ls_obj_hash;
Peng Taod7e09d02013-05-02 16:46:55 +0800602 /**
603 * index of bucket on hash table while purging
604 */
605 int ls_purge_start;
606 /**
607 * Top-level device for this stack.
608 */
609 struct lu_device *ls_top_dev;
610 /**
611 * Bottom-level device for this stack
612 */
613 struct lu_device *ls_bottom_dev;
614 /**
615 * Linkage into global list of sites.
616 */
617 struct list_head ls_linkage;
618 /**
619 * List for lu device for this site, protected
620 * by ls_ld_lock.
621 **/
622 struct list_head ls_ld_linkage;
623 spinlock_t ls_ld_lock;
624
625 /**
626 * lu_site stats
627 */
628 struct lprocfs_stats *ls_stats;
629 /**
630 * XXX: a hack! fld has to find md_site via site, remove when possible
631 */
632 struct seq_server_site *ld_seq_site;
633};
634
635static inline struct lu_site_bkt_data *
636lu_site_bkt_from_fid(struct lu_site *site, struct lu_fid *fid)
637{
Lisa Nguyen6ea510c2013-10-21 18:16:05 -0700638 struct cfs_hash_bd bd;
Peng Taod7e09d02013-05-02 16:46:55 +0800639
640 cfs_hash_bd_get(site->ls_obj_hash, fid, &bd);
641 return cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
642}
643
Liu Xuezhao56f4c5a2013-07-23 00:06:44 +0800644static inline struct seq_server_site *lu_site2seq(const struct lu_site *s)
645{
646 return s->ld_seq_site;
647}
648
Peng Taod7e09d02013-05-02 16:46:55 +0800649/** \name ctors
650 * Constructors/destructors.
651 * @{
652 */
653
Oleg Drokine9570b42016-03-30 19:49:05 -0400654int lu_site_init(struct lu_site *s, struct lu_device *d);
655void lu_site_fini(struct lu_site *s);
656int lu_site_init_finish(struct lu_site *s);
657void lu_stack_fini(const struct lu_env *env, struct lu_device *top);
658void lu_device_get(struct lu_device *d);
659void lu_device_put(struct lu_device *d);
660int lu_device_init(struct lu_device *d, struct lu_device_type *t);
661void lu_device_fini(struct lu_device *d);
662int lu_object_header_init(struct lu_object_header *h);
Peng Taod7e09d02013-05-02 16:46:55 +0800663void lu_object_header_fini(struct lu_object_header *h);
Oleg Drokine9570b42016-03-30 19:49:05 -0400664int lu_object_init(struct lu_object *o,
665 struct lu_object_header *h, struct lu_device *d);
666void lu_object_fini(struct lu_object *o);
667void lu_object_add_top(struct lu_object_header *h, struct lu_object *o);
668void lu_object_add(struct lu_object *before, struct lu_object *o);
Peng Taod7e09d02013-05-02 16:46:55 +0800669
Peng Taod7e09d02013-05-02 16:46:55 +0800670/**
671 * Helpers to initialize and finalize device types.
672 */
673
674int lu_device_type_init(struct lu_device_type *ldt);
675void lu_device_type_fini(struct lu_device_type *ldt);
676void lu_types_stop(void);
677
678/** @} ctors */
679
680/** \name caching
681 * Caching and reference counting.
682 * @{
683 */
684
685/**
686 * Acquire additional reference to the given object. This function is used to
687 * attain additional reference. To acquire initial reference use
688 * lu_object_find().
689 */
690static inline void lu_object_get(struct lu_object *o)
691{
692 LASSERT(atomic_read(&o->lo_header->loh_ref) > 0);
693 atomic_inc(&o->lo_header->loh_ref);
694}
695
696/**
697 * Return true of object will not be cached after last reference to it is
698 * released.
699 */
700static inline int lu_object_is_dying(const struct lu_object_header *h)
701{
702 return test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
703}
704
705void lu_object_put(const struct lu_env *env, struct lu_object *o);
Peng Taod7e09d02013-05-02 16:46:55 +0800706void lu_object_unhash(const struct lu_env *env, struct lu_object *o);
707
708int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr);
709
710void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
711 lu_printer_t printer);
Peng Taod7e09d02013-05-02 16:46:55 +0800712struct lu_object *lu_object_find_at(const struct lu_env *env,
713 struct lu_device *dev,
714 const struct lu_fid *f,
715 const struct lu_object_conf *conf);
716struct lu_object *lu_object_find_slice(const struct lu_env *env,
717 struct lu_device *dev,
718 const struct lu_fid *f,
719 const struct lu_object_conf *conf);
720/** @} caching */
721
722/** \name helpers
723 * Helpers.
724 * @{
725 */
726
727/**
728 * First (topmost) sub-object of given compound object
729 */
730static inline struct lu_object *lu_object_top(struct lu_object_header *h)
731{
732 LASSERT(!list_empty(&h->loh_layers));
733 return container_of0(h->loh_layers.next, struct lu_object, lo_linkage);
734}
735
736/**
737 * Next sub-object in the layering
738 */
739static inline struct lu_object *lu_object_next(const struct lu_object *o)
740{
741 return container_of0(o->lo_linkage.next, struct lu_object, lo_linkage);
742}
743
744/**
745 * Pointer to the fid of this object.
746 */
747static inline const struct lu_fid *lu_object_fid(const struct lu_object *o)
748{
749 return &o->lo_header->loh_fid;
750}
751
752/**
753 * return device operations vector for this object
754 */
Valdis Kletnieksb8947b32015-12-22 19:36:49 -0500755static inline const struct lu_device_operations *
Peng Taod7e09d02013-05-02 16:46:55 +0800756lu_object_ops(const struct lu_object *o)
757{
758 return o->lo_dev->ld_ops;
759}
760
761/**
762 * Given a compound object, find its slice, corresponding to the device type
763 * \a dtype.
764 */
765struct lu_object *lu_object_locate(struct lu_object_header *h,
766 const struct lu_device_type *dtype);
767
768/**
769 * Printer function emitting messages through libcfs_debug_msg().
770 */
771int lu_cdebug_printer(const struct lu_env *env,
772 void *cookie, const char *format, ...);
773
774/**
775 * Print object description followed by a user-supplied message.
776 */
777#define LU_OBJECT_DEBUG(mask, env, object, format, ...) \
778do { \
Peng Taod7e09d02013-05-02 16:46:55 +0800779 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
Oleg Drokin83e8d022016-04-28 12:07:31 -0400780 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
Peng Taod7e09d02013-05-02 16:46:55 +0800781 lu_object_print(env, &msgdata, lu_cdebug_printer, object);\
Bob Glossmand55d5e82016-06-20 16:55:37 -0400782 CDEBUG(mask, format "\n", ## __VA_ARGS__); \
Peng Taod7e09d02013-05-02 16:46:55 +0800783 } \
784} while (0)
785
786/**
787 * Print short object description followed by a user-supplied message.
788 */
789#define LU_OBJECT_HEADER(mask, env, object, format, ...) \
790do { \
Peng Taod7e09d02013-05-02 16:46:55 +0800791 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
Oleg Drokin83e8d022016-04-28 12:07:31 -0400792 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
Peng Taod7e09d02013-05-02 16:46:55 +0800793 lu_object_header_print(env, &msgdata, lu_cdebug_printer,\
794 (object)->lo_header); \
795 lu_cdebug_printer(env, &msgdata, "\n"); \
Mike Rapoportb2952d62015-09-03 11:49:13 +0300796 CDEBUG(mask, format, ## __VA_ARGS__); \
Peng Taod7e09d02013-05-02 16:46:55 +0800797 } \
798} while (0)
799
800void lu_object_print (const struct lu_env *env, void *cookie,
801 lu_printer_t printer, const struct lu_object *o);
802void lu_object_header_print(const struct lu_env *env, void *cookie,
803 lu_printer_t printer,
804 const struct lu_object_header *hdr);
805
806/**
807 * Check object consistency.
808 */
809int lu_object_invariant(const struct lu_object *o);
810
Peng Taod7e09d02013-05-02 16:46:55 +0800811/**
812 * Check whether object exists, no matter on local or remote storage.
813 * Note: LOHA_EXISTS will be set once some one created the object,
814 * and it does not needs to be committed to storage.
815 */
816#define lu_object_exists(o) ((o)->lo_header->loh_attr & LOHA_EXISTS)
817
818/**
819 * Check whether object on the remote storage.
820 */
821#define lu_object_remote(o) unlikely((o)->lo_header->loh_attr & LOHA_REMOTE)
822
823static inline int lu_object_assert_exists(const struct lu_object *o)
824{
825 return lu_object_exists(o);
826}
827
828static inline int lu_object_assert_not_exists(const struct lu_object *o)
829{
830 return !lu_object_exists(o);
831}
832
833/**
834 * Attr of this object.
835 */
836static inline __u32 lu_object_attr(const struct lu_object *o)
837{
838 LASSERT(lu_object_exists(o) != 0);
839 return o->lo_header->loh_attr;
840}
841
John L. Hammond631abc62013-07-25 01:17:30 +0800842static inline void lu_object_ref_add(struct lu_object *o,
843 const char *scope,
844 const void *source)
Peng Taod7e09d02013-05-02 16:46:55 +0800845{
John L. Hammond631abc62013-07-25 01:17:30 +0800846 lu_ref_add(&o->lo_header->loh_reference, scope, source);
847}
848
849static inline void lu_object_ref_add_at(struct lu_object *o,
850 struct lu_ref_link *link,
851 const char *scope,
852 const void *source)
853{
854 lu_ref_add_at(&o->lo_header->loh_reference, link, scope, source);
Peng Taod7e09d02013-05-02 16:46:55 +0800855}
856
857static inline void lu_object_ref_del(struct lu_object *o,
858 const char *scope, const void *source)
859{
860 lu_ref_del(&o->lo_header->loh_reference, scope, source);
861}
862
863static inline void lu_object_ref_del_at(struct lu_object *o,
864 struct lu_ref_link *link,
865 const char *scope, const void *source)
866{
867 lu_ref_del_at(&o->lo_header->loh_reference, link, scope, source);
868}
869
870/** input params, should be filled out by mdt */
871struct lu_rdpg {
872 /** hash */
873 __u64 rp_hash;
874 /** count in bytes */
875 unsigned int rp_count;
876 /** number of pages */
877 unsigned int rp_npages;
878 /** requested attr */
879 __u32 rp_attrs;
880 /** pointers to pages */
881 struct page **rp_pages;
882};
883
884enum lu_xattr_flags {
885 LU_XATTR_REPLACE = (1 << 0),
886 LU_XATTR_CREATE = (1 << 1)
887};
888
889/** @} helpers */
890
891/** \name lu_context
Oleg Drokinc56e2562016-02-24 22:00:25 -0500892 * @{
893 */
Peng Taod7e09d02013-05-02 16:46:55 +0800894
895/** For lu_context health-checks */
896enum lu_context_state {
897 LCS_INITIALIZED = 1,
898 LCS_ENTERED,
899 LCS_LEFT,
900 LCS_FINALIZED
901};
902
903/**
904 * lu_context. Execution context for lu_object methods. Currently associated
905 * with thread.
906 *
907 * All lu_object methods, except device and device type methods (called during
908 * system initialization and shutdown) are executed "within" some
909 * lu_context. This means, that pointer to some "current" lu_context is passed
910 * as an argument to all methods.
911 *
912 * All service ptlrpc threads create lu_context as part of their
913 * initialization. It is possible to create "stand-alone" context for other
914 * execution environments (like system calls).
915 *
916 * lu_object methods mainly use lu_context through lu_context_key interface
917 * that allows each layer to associate arbitrary pieces of data with each
918 * context (see pthread_key_create(3) for similar interface).
919 *
920 * On a client, lu_context is bound to a thread, see cl_env_get().
921 *
922 * \see lu_context_key
923 */
924struct lu_context {
925 /**
926 * lu_context is used on the client side too. Yet we don't want to
927 * allocate values of server-side keys for the client contexts and
928 * vice versa.
929 *
930 * To achieve this, set of tags in introduced. Contexts and keys are
931 * marked with tags. Key value are created only for context whose set
932 * of tags has non-empty intersection with one for key. Tags are taken
933 * from enum lu_context_tag.
934 */
935 __u32 lc_tags;
936 enum lu_context_state lc_state;
937 /**
938 * Pointer to the home service thread. NULL for other execution
939 * contexts.
940 */
941 struct ptlrpc_thread *lc_thread;
942 /**
943 * Pointer to an array with key values. Internal implementation
944 * detail.
945 */
946 void **lc_value;
947 /**
948 * Linkage into a list of all remembered contexts. Only
949 * `non-transient' contexts, i.e., ones created for service threads
950 * are placed here.
951 */
952 struct list_head lc_remember;
953 /**
954 * Version counter used to skip calls to lu_context_refill() when no
955 * keys were registered.
956 */
957 unsigned lc_version;
958 /**
959 * Debugging cookie.
960 */
961 unsigned lc_cookie;
962};
963
964/**
965 * lu_context_key interface. Similar to pthread_key.
966 */
967
968enum lu_context_tag {
969 /**
970 * Thread on md server
971 */
972 LCT_MD_THREAD = 1 << 0,
973 /**
974 * Thread on dt server
975 */
976 LCT_DT_THREAD = 1 << 1,
977 /**
978 * Context for transaction handle
979 */
980 LCT_TX_HANDLE = 1 << 2,
981 /**
982 * Thread on client
983 */
984 LCT_CL_THREAD = 1 << 3,
985 /**
986 * A per-request session on a server, and a per-system-call session on
987 * a client.
988 */
989 LCT_SESSION = 1 << 4,
990 /**
991 * A per-request data on OSP device
992 */
993 LCT_OSP_THREAD = 1 << 5,
994 /**
995 * MGS device thread
996 */
997 LCT_MG_THREAD = 1 << 6,
998 /**
999 * Context for local operations
1000 */
1001 LCT_LOCAL = 1 << 7,
1002 /**
Wang Di7f937be2016-04-27 18:20:55 -04001003 * session for server thread
1004 **/
1005 LCT_SERVER_SESSION = BIT(8),
1006 /**
Peng Taod7e09d02013-05-02 16:46:55 +08001007 * Set when at least one of keys, having values in this context has
1008 * non-NULL lu_context_key::lct_exit() method. This is used to
1009 * optimize lu_context_exit() call.
1010 */
1011 LCT_HAS_EXIT = 1 << 28,
1012 /**
1013 * Don't add references for modules creating key values in that context.
1014 * This is only for contexts used internally by lu_object framework.
1015 */
1016 LCT_NOREF = 1 << 29,
1017 /**
1018 * Key is being prepared for retiring, don't create new values for it.
1019 */
1020 LCT_QUIESCENT = 1 << 30,
1021 /**
1022 * Context should be remembered.
1023 */
1024 LCT_REMEMBER = 1 << 31,
1025 /**
1026 * Contexts usable in cache shrinker thread.
1027 */
1028 LCT_SHRINKER = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD|LCT_NOREF
1029};
1030
1031/**
1032 * Key. Represents per-context value slot.
1033 *
1034 * Keys are usually registered when module owning the key is initialized, and
1035 * de-registered when module is unloaded. Once key is registered, all new
1036 * contexts with matching tags, will get key value. "Old" contexts, already
1037 * initialized at the time of key registration, can be forced to get key value
1038 * by calling lu_context_refill().
1039 *
1040 * Every key value is counted in lu_context_key::lct_used and acquires a
1041 * reference on an owning module. This means, that all key values have to be
1042 * destroyed before module can be unloaded. This is usually achieved by
1043 * stopping threads started by the module, that created contexts in their
1044 * entry functions. Situation is complicated by the threads shared by multiple
1045 * modules, like ptlrpcd daemon on a client. To work around this problem,
1046 * contexts, created in such threads, are `remembered' (see
1047 * LCT_REMEMBER)---i.e., added into a global list. When module is preparing
1048 * for unloading it does the following:
1049 *
1050 * - marks its keys as `quiescent' (lu_context_tag::LCT_QUIESCENT)
1051 * preventing new key values from being allocated in the new contexts,
1052 * and
1053 *
1054 * - scans a list of remembered contexts, destroying values of module
1055 * keys, thus releasing references to the module.
1056 *
1057 * This is done by lu_context_key_quiesce(). If module is re-activated
1058 * before key has been de-registered, lu_context_key_revive() call clears
1059 * `quiescent' marker.
1060 *
1061 * lu_context code doesn't provide any internal synchronization for these
1062 * activities---it's assumed that startup (including threads start-up) and
1063 * shutdown are serialized by some external means.
1064 *
1065 * \see lu_context
1066 */
1067struct lu_context_key {
1068 /**
1069 * Set of tags for which values of this key are to be instantiated.
1070 */
1071 __u32 lct_tags;
1072 /**
1073 * Value constructor. This is called when new value is created for a
1074 * context. Returns pointer to new value of error pointer.
1075 */
1076 void *(*lct_init)(const struct lu_context *ctx,
1077 struct lu_context_key *key);
1078 /**
1079 * Value destructor. Called when context with previously allocated
1080 * value of this slot is destroyed. \a data is a value that was returned
1081 * by a matching call to lu_context_key::lct_init().
1082 */
1083 void (*lct_fini)(const struct lu_context *ctx,
1084 struct lu_context_key *key, void *data);
1085 /**
1086 * Optional method called on lu_context_exit() for all allocated
1087 * keys. Can be used by debugging code checking that locks are
1088 * released, etc.
1089 */
1090 void (*lct_exit)(const struct lu_context *ctx,
1091 struct lu_context_key *key, void *data);
1092 /**
1093 * Internal implementation detail: index within lu_context::lc_value[]
1094 * reserved for this key.
1095 */
1096 int lct_index;
1097 /**
1098 * Internal implementation detail: number of values created for this
1099 * key.
1100 */
1101 atomic_t lct_used;
1102 /**
1103 * Internal implementation detail: module for this key.
1104 */
Greg Kroah-Hartmanc34d9cd2013-08-04 07:48:41 +08001105 struct module *lct_owner;
Peng Taod7e09d02013-05-02 16:46:55 +08001106 /**
1107 * References to this key. For debugging.
1108 */
1109 struct lu_ref lct_reference;
1110};
1111
1112#define LU_KEY_INIT(mod, type) \
Greg Donald9c234f62014-10-02 18:10:23 -05001113 static void *mod##_key_init(const struct lu_context *ctx, \
Peng Taod7e09d02013-05-02 16:46:55 +08001114 struct lu_context_key *key) \
1115 { \
1116 type *value; \
1117 \
Greg Kroah-Hartman5f479922016-04-11 09:30:50 -07001118 CLASSERT(PAGE_SIZE >= sizeof(*value)); \
Peng Taod7e09d02013-05-02 16:46:55 +08001119 \
Oleg Drokinfca5ba82015-09-16 12:26:55 -04001120 value = kzalloc(sizeof(*value), GFP_NOFS); \
Oleg Drokind2a13982016-02-16 00:46:52 -05001121 if (!value) \
Peng Taod7e09d02013-05-02 16:46:55 +08001122 value = ERR_PTR(-ENOMEM); \
1123 \
1124 return value; \
1125 } \
Mike Rapoportb2952d62015-09-03 11:49:13 +03001126 struct __##mod##__dummy_init {; } /* semicolon catcher */
Peng Taod7e09d02013-05-02 16:46:55 +08001127
1128#define LU_KEY_FINI(mod, type) \
1129 static void mod##_key_fini(const struct lu_context *ctx, \
Greg Donald9c234f62014-10-02 18:10:23 -05001130 struct lu_context_key *key, void *data) \
Peng Taod7e09d02013-05-02 16:46:55 +08001131 { \
1132 type *info = data; \
1133 \
Oleg Drokinfca5ba82015-09-16 12:26:55 -04001134 kfree(info); \
Peng Taod7e09d02013-05-02 16:46:55 +08001135 } \
Mike Rapoportb2952d62015-09-03 11:49:13 +03001136 struct __##mod##__dummy_fini {; } /* semicolon catcher */
Peng Taod7e09d02013-05-02 16:46:55 +08001137
1138#define LU_KEY_INIT_FINI(mod, type) \
Greg Donald1d8cb702014-08-25 20:07:19 -05001139 LU_KEY_INIT(mod, type); \
1140 LU_KEY_FINI(mod, type)
Peng Taod7e09d02013-05-02 16:46:55 +08001141
1142#define LU_CONTEXT_KEY_DEFINE(mod, tags) \
1143 struct lu_context_key mod##_thread_key = { \
1144 .lct_tags = tags, \
1145 .lct_init = mod##_key_init, \
1146 .lct_fini = mod##_key_fini \
1147 }
1148
1149#define LU_CONTEXT_KEY_INIT(key) \
1150do { \
1151 (key)->lct_owner = THIS_MODULE; \
1152} while (0)
1153
Oleg Drokine9570b42016-03-30 19:49:05 -04001154int lu_context_key_register(struct lu_context_key *key);
1155void lu_context_key_degister(struct lu_context_key *key);
1156void *lu_context_key_get(const struct lu_context *ctx,
1157 const struct lu_context_key *key);
1158void lu_context_key_quiesce(struct lu_context_key *key);
1159void lu_context_key_revive(struct lu_context_key *key);
Peng Taod7e09d02013-05-02 16:46:55 +08001160
Peng Taod7e09d02013-05-02 16:46:55 +08001161/*
1162 * LU_KEY_INIT_GENERIC() has to be a macro to correctly determine an
1163 * owning module.
1164 */
1165
1166#define LU_KEY_INIT_GENERIC(mod) \
1167 static void mod##_key_init_generic(struct lu_context_key *k, ...) \
1168 { \
1169 struct lu_context_key *key = k; \
1170 va_list args; \
1171 \
1172 va_start(args, k); \
1173 do { \
1174 LU_CONTEXT_KEY_INIT(key); \
1175 key = va_arg(args, struct lu_context_key *); \
Oleg Drokind2a13982016-02-16 00:46:52 -05001176 } while (key); \
Peng Taod7e09d02013-05-02 16:46:55 +08001177 va_end(args); \
1178 }
1179
1180#define LU_TYPE_INIT(mod, ...) \
1181 LU_KEY_INIT_GENERIC(mod) \
1182 static int mod##_type_init(struct lu_device_type *t) \
1183 { \
1184 mod##_key_init_generic(__VA_ARGS__, NULL); \
1185 return lu_context_key_register_many(__VA_ARGS__, NULL); \
1186 } \
Mike Rapoportb2952d62015-09-03 11:49:13 +03001187 struct __##mod##_dummy_type_init {; }
Peng Taod7e09d02013-05-02 16:46:55 +08001188
1189#define LU_TYPE_FINI(mod, ...) \
1190 static void mod##_type_fini(struct lu_device_type *t) \
1191 { \
1192 lu_context_key_degister_many(__VA_ARGS__, NULL); \
1193 } \
Mike Rapoportb2952d62015-09-03 11:49:13 +03001194 struct __##mod##_dummy_type_fini {; }
Peng Taod7e09d02013-05-02 16:46:55 +08001195
1196#define LU_TYPE_START(mod, ...) \
1197 static void mod##_type_start(struct lu_device_type *t) \
1198 { \
1199 lu_context_key_revive_many(__VA_ARGS__, NULL); \
1200 } \
Mike Rapoportb2952d62015-09-03 11:49:13 +03001201 struct __##mod##_dummy_type_start {; }
Peng Taod7e09d02013-05-02 16:46:55 +08001202
1203#define LU_TYPE_STOP(mod, ...) \
1204 static void mod##_type_stop(struct lu_device_type *t) \
1205 { \
1206 lu_context_key_quiesce_many(__VA_ARGS__, NULL); \
1207 } \
Mike Rapoportb2952d62015-09-03 11:49:13 +03001208 struct __##mod##_dummy_type_stop {; }
Peng Taod7e09d02013-05-02 16:46:55 +08001209
Peng Taod7e09d02013-05-02 16:46:55 +08001210#define LU_TYPE_INIT_FINI(mod, ...) \
1211 LU_TYPE_INIT(mod, __VA_ARGS__); \
1212 LU_TYPE_FINI(mod, __VA_ARGS__); \
1213 LU_TYPE_START(mod, __VA_ARGS__); \
1214 LU_TYPE_STOP(mod, __VA_ARGS__)
1215
Oleg Drokine9570b42016-03-30 19:49:05 -04001216int lu_context_init(struct lu_context *ctx, __u32 tags);
1217void lu_context_fini(struct lu_context *ctx);
1218void lu_context_enter(struct lu_context *ctx);
1219void lu_context_exit(struct lu_context *ctx);
1220int lu_context_refill(struct lu_context *ctx);
Peng Taod7e09d02013-05-02 16:46:55 +08001221
1222/*
1223 * Helper functions to operate on multiple keys. These are used by the default
1224 * device type operations, defined by LU_TYPE_INIT_FINI().
1225 */
1226
Oleg Drokine9570b42016-03-30 19:49:05 -04001227int lu_context_key_register_many(struct lu_context_key *k, ...);
Peng Taod7e09d02013-05-02 16:46:55 +08001228void lu_context_key_degister_many(struct lu_context_key *k, ...);
Oleg Drokine9570b42016-03-30 19:49:05 -04001229void lu_context_key_revive_many(struct lu_context_key *k, ...);
1230void lu_context_key_quiesce_many(struct lu_context_key *k, ...);
Peng Taod7e09d02013-05-02 16:46:55 +08001231
Peng Taod7e09d02013-05-02 16:46:55 +08001232/**
1233 * Environment.
1234 */
1235struct lu_env {
1236 /**
1237 * "Local" context, used to store data instead of stack.
1238 */
1239 struct lu_context le_ctx;
1240 /**
1241 * "Session" context for per-request data.
1242 */
1243 struct lu_context *le_ses;
1244};
1245
Oleg Drokine9570b42016-03-30 19:49:05 -04001246int lu_env_init(struct lu_env *env, __u32 tags);
1247void lu_env_fini(struct lu_env *env);
1248int lu_env_refill(struct lu_env *env);
Peng Taod7e09d02013-05-02 16:46:55 +08001249
1250/** @} lu_context */
1251
1252/**
1253 * Output site statistical counters into a buffer. Suitable for
1254 * ll_rd_*()-style functions.
1255 */
Peng Tao73bb1da2013-05-29 21:40:55 +08001256int lu_site_stats_print(const struct lu_site *s, struct seq_file *m);
Peng Taod7e09d02013-05-02 16:46:55 +08001257
1258/**
1259 * Common name structure to be passed around for various name related methods.
1260 */
1261struct lu_name {
1262 const char *ln_name;
1263 int ln_namelen;
1264};
1265
1266/**
1267 * Common buffer structure to be passed around for various xattr_{s,g}et()
1268 * methods.
1269 */
1270struct lu_buf {
1271 void *lb_buf;
1272 ssize_t lb_len;
1273};
1274
1275#define DLUBUF "(%p %zu)"
1276#define PLUBUF(buf) (buf)->lb_buf, (buf)->lb_len
1277/**
1278 * One-time initializers, called at obdclass module initialization, not
1279 * exported.
1280 */
1281
1282/**
1283 * Initialization of global lu_* data.
1284 */
1285int lu_global_init(void);
1286
1287/**
1288 * Dual to lu_global_init().
1289 */
1290void lu_global_fini(void);
1291
1292struct lu_kmem_descr {
1293 struct kmem_cache **ckd_cache;
1294 const char *ckd_name;
1295 const size_t ckd_size;
1296};
1297
1298int lu_kmem_init(struct lu_kmem_descr *caches);
1299void lu_kmem_fini(struct lu_kmem_descr *caches);
1300
Peng Taod7e09d02013-05-02 16:46:55 +08001301/** @} lu */
1302#endif /* __LUSTRE_LU_OBJECT_H */