blob: fbecbd089d75f4a9eead25b1439e6629893c3639 [file] [log] [blame]
Sage Weil5cd068c2010-07-07 08:38:17 -07001#ifndef CEPH_CRUSH_CRUSH_H
2#define CEPH_CRUSH_CRUSH_H
Sage Weil5ecc0a02009-10-06 11:31:11 -07003
Ilya Dryomovb459be72015-06-12 13:21:07 +03004#ifdef __KERNEL__
5# include <linux/types.h>
6#else
7# include "crush_compat.h"
8#endif
Sage Weil5ecc0a02009-10-06 11:31:11 -07009
10/*
11 * CRUSH is a pseudo-random data distribution algorithm that
12 * efficiently distributes input values (typically, data objects)
13 * across a heterogeneous, structured storage cluster.
14 *
15 * The algorithm was originally described in detail in this paper
16 * (although the algorithm has evolved somewhat since then):
17 *
18 * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
19 *
20 * LGPL2
21 */
22
23
24#define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */
25
Sage Weil5ecc0a02009-10-06 11:31:11 -070026#define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */
Ilya Dryomovb459be72015-06-12 13:21:07 +030027#define CRUSH_MAX_RULESET (1<<8) /* max crush ruleset number */
28#define CRUSH_MAX_RULES CRUSH_MAX_RULESET /* should be the same as max rulesets */
Sage Weil5ecc0a02009-10-06 11:31:11 -070029
Ilya Dryomovb459be72015-06-12 13:21:07 +030030#define CRUSH_MAX_DEVICE_WEIGHT (100u * 0x10000u)
31#define CRUSH_MAX_BUCKET_WEIGHT (65535u * 0x10000u)
Sage Weil5ecc0a02009-10-06 11:31:11 -070032
Ilya Dryomov9a3b4902013-12-24 21:19:25 +020033#define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */
34#define CRUSH_ITEM_NONE 0x7fffffff /* no result */
Ilya Dryomovc6d98a62013-12-24 21:19:25 +020035
Sage Weil5ecc0a02009-10-06 11:31:11 -070036/*
37 * CRUSH uses user-defined "rules" to describe how inputs should be
38 * mapped to devices. A rule consists of sequence of steps to perform
39 * to generate the set of output devices.
40 */
41struct crush_rule_step {
42 __u32 op;
43 __s32 arg1;
44 __s32 arg2;
45};
46
47/* step op codes */
48enum {
49 CRUSH_RULE_NOOP = 0,
50 CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */
51 CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */
52 /* arg2 = type */
53 CRUSH_RULE_CHOOSE_INDEP = 3, /* same */
54 CRUSH_RULE_EMIT = 4, /* no args */
Ilya Dryomov917edad2013-12-24 21:19:26 +020055 CRUSH_RULE_CHOOSELEAF_FIRSTN = 6,
56 CRUSH_RULE_CHOOSELEAF_INDEP = 7,
Ilya Dryomovbe3226a2013-12-24 21:19:26 +020057
Ilya Dryomovcc10df42013-12-24 21:19:26 +020058 CRUSH_RULE_SET_CHOOSE_TRIES = 8, /* override choose_total_tries */
Ilya Dryomov917edad2013-12-24 21:19:26 +020059 CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */
Ilya Dryomovf046bf92013-12-24 21:19:27 +020060 CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10,
61 CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11,
Ilya Dryomovdc6ae6d2016-01-31 14:36:07 +010062 CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12,
63 CRUSH_RULE_SET_CHOOSELEAF_STABLE = 13
Sage Weil5ecc0a02009-10-06 11:31:11 -070064};
65
66/*
67 * for specifying choose num (arg1) relative to the max parameter
68 * passed to do_rule
69 */
70#define CRUSH_CHOOSE_N 0
71#define CRUSH_CHOOSE_N_MINUS(x) (-(x))
72
73/*
74 * The rule mask is used to describe what the rule is intended for.
75 * Given a ruleset and size of output set, we search through the
76 * rule list for a matching rule_mask.
77 */
78struct crush_rule_mask {
79 __u8 ruleset;
80 __u8 type;
81 __u8 min_size;
82 __u8 max_size;
83};
84
85struct crush_rule {
86 __u32 len;
87 struct crush_rule_mask mask;
88 struct crush_rule_step steps[0];
89};
90
91#define crush_rule_size(len) (sizeof(struct crush_rule) + \
92 (len)*sizeof(struct crush_rule_step))
93
94
95
96/*
97 * A bucket is a named container of other items (either devices or
98 * other buckets). Items within a bucket are chosen using one of a
99 * few different algorithms. The table summarizes how the speed of
100 * each option measures up against mapping stability when items are
101 * added or removed.
102 *
103 * Bucket Alg Speed Additions Removals
104 * ------------------------------------------------
105 * uniform O(1) poor poor
106 * list O(n) optimal poor
107 * tree O(log n) good good
Ilya Dryomov958a2762015-04-14 16:54:52 +0300108 * straw O(n) better better
109 * straw2 O(n) optimal optimal
Sage Weil5ecc0a02009-10-06 11:31:11 -0700110 */
111enum {
112 CRUSH_BUCKET_UNIFORM = 1,
113 CRUSH_BUCKET_LIST = 2,
114 CRUSH_BUCKET_TREE = 3,
Ilya Dryomov958a2762015-04-14 16:54:52 +0300115 CRUSH_BUCKET_STRAW = 4,
116 CRUSH_BUCKET_STRAW2 = 5,
Sage Weil5ecc0a02009-10-06 11:31:11 -0700117};
Sage Weilc6cf7262009-11-06 16:39:26 -0800118extern const char *crush_bucket_alg_name(int alg);
Sage Weil5ecc0a02009-10-06 11:31:11 -0700119
Ilya Dryomovb459be72015-06-12 13:21:07 +0300120/*
121 * although tree was a legacy algorithm, it has been buggy, so
122 * exclude it.
123 */
124#define CRUSH_LEGACY_ALLOWED_BUCKET_ALGS ( \
125 (1 << CRUSH_BUCKET_UNIFORM) | \
126 (1 << CRUSH_BUCKET_LIST) | \
127 (1 << CRUSH_BUCKET_STRAW))
128
Sage Weil5ecc0a02009-10-06 11:31:11 -0700129struct crush_bucket {
130 __s32 id; /* this'll be negative */
131 __u16 type; /* non-zero; type=0 is reserved for devices */
Sage Weilfb690392009-11-07 20:18:22 -0800132 __u8 alg; /* one of CRUSH_BUCKET_* */
133 __u8 hash; /* which hash function to use, CRUSH_HASH_* */
Sage Weil5ecc0a02009-10-06 11:31:11 -0700134 __u32 weight; /* 16-bit fixed point */
135 __u32 size; /* num items */
136 __s32 *items;
137
Sage Weil5ecc0a02009-10-06 11:31:11 -0700138};
139
140struct crush_bucket_uniform {
141 struct crush_bucket h;
142 __u32 item_weight; /* 16-bit fixed point; all items equally weighted */
143};
144
145struct crush_bucket_list {
146 struct crush_bucket h;
147 __u32 *item_weights; /* 16-bit fixed point */
148 __u32 *sum_weights; /* 16-bit fixed point. element i is sum
149 of weights 0..i, inclusive */
150};
151
152struct crush_bucket_tree {
153 struct crush_bucket h; /* note: h.size is _tree_ size, not number of
154 actual items */
155 __u8 num_nodes;
156 __u32 *node_weights;
157};
158
159struct crush_bucket_straw {
160 struct crush_bucket h;
161 __u32 *item_weights; /* 16-bit fixed point */
162 __u32 *straws; /* 16-bit fixed point */
163};
164
Ilya Dryomov958a2762015-04-14 16:54:52 +0300165struct crush_bucket_straw2 {
166 struct crush_bucket h;
167 __u32 *item_weights; /* 16-bit fixed point */
168};
169
Sage Weil5ecc0a02009-10-06 11:31:11 -0700170
171
172/*
173 * CRUSH map includes all buckets, rules, etc.
174 */
175struct crush_map {
176 struct crush_bucket **buckets;
177 struct crush_rule **rules;
178
Sage Weil5ecc0a02009-10-06 11:31:11 -0700179 __s32 max_buckets;
180 __u32 max_rules;
181 __s32 max_devices;
Sage Weil546f04e2012-07-30 18:15:23 -0700182
183 /* choose local retries before re-descent */
184 __u32 choose_local_tries;
185 /* choose local attempts using a fallback permutation before
186 * re-descent */
187 __u32 choose_local_fallback_tries;
Ilya Dryomovb459be72015-06-12 13:21:07 +0300188 /* choose attempts before giving up */
Sage Weil546f04e2012-07-30 18:15:23 -0700189 __u32 choose_total_tries;
Ilya Dryomovf18650a2013-12-24 21:19:26 +0200190 /* attempt chooseleaf inner descent once for firstn mode; on
191 * reject retry outer descent. Note that this does *not*
192 * apply to a collision: in that case we will retry as we used
193 * to. */
Jim Schutt1604f482012-11-30 09:15:25 -0700194 __u32 chooseleaf_descend_once;
Ilya Dryomove2b149c2014-03-19 16:58:37 +0200195
196 /* if non-zero, feed r into chooseleaf, bit-shifted right by (r-1)
197 * bits. a value of 1 is best for new clusters. for legacy clusters
198 * that want to limit reshuffling, a value of 3 or 4 will make the
199 * mappings line up a bit better with previous mappings. */
200 __u8 chooseleaf_vary_r;
Ilya Dryomovb459be72015-06-12 13:21:07 +0300201
Ilya Dryomovdc6ae6d2016-01-31 14:36:07 +0100202 /* if true, it makes chooseleaf firstn to return stable results (if
203 * no local retry) so that data migrations would be optimal when some
204 * device fails. */
205 __u8 chooseleaf_stable;
206
Ilya Dryomov66a0e2d2017-01-31 15:55:06 +0100207 /*
208 * This value is calculated after decode or construction by
209 * the builder. It is exposed here (rather than having a
210 * 'build CRUSH working space' function) so that callers can
211 * reserve a static buffer, allocate space on the stack, or
212 * otherwise avoid calling into the heap allocator if they
213 * want to. The size of the working space depends on the map,
214 * while the size of the scratch vector passed to the mapper
215 * depends on the size of the desired result set.
216 *
217 * Nothing stops the caller from allocating both in one swell
218 * foop and passing in two points, though.
219 */
220 size_t working_size;
221
Ilya Dryomovb459be72015-06-12 13:21:07 +0300222#ifndef __KERNEL__
223 /*
224 * version 0 (original) of straw_calc has various flaws. version 1
225 * fixes a few of them.
226 */
227 __u8 straw_calc_version;
228
229 /*
230 * allowed bucket algs is a bitmask, here the bit positions
231 * are CRUSH_BUCKET_*. note that these are *bits* and
232 * CRUSH_BUCKET_* values are not, so we need to or together (1
233 * << CRUSH_BUCKET_WHATEVER). The 0th bit is not used to
234 * minimize confusion (bucket type values start at 1).
235 */
236 __u32 allowed_bucket_algs;
237
238 __u32 *choose_tries;
239#endif
Sage Weil5ecc0a02009-10-06 11:31:11 -0700240};
241
242
243/* crush.c */
Sage Weil8b12d472012-05-07 15:38:35 -0700244extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos);
Sage Weil5ecc0a02009-10-06 11:31:11 -0700245extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
246extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
247extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
248extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
Ilya Dryomov958a2762015-04-14 16:54:52 +0300249extern void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b);
Sage Weil5ecc0a02009-10-06 11:31:11 -0700250extern void crush_destroy_bucket(struct crush_bucket *b);
Ilya Dryomovbfb16d72013-12-24 21:19:24 +0200251extern void crush_destroy_rule(struct crush_rule *r);
Sage Weil5ecc0a02009-10-06 11:31:11 -0700252extern void crush_destroy(struct crush_map *map);
253
Sage Weilf671d4c2012-05-07 15:36:49 -0700254static inline int crush_calc_tree_node(int i)
255{
256 return ((i+1) << 1)-1;
257}
258
Ilya Dryomov66a0e2d2017-01-31 15:55:06 +0100259/*
260 * These data structures are private to the CRUSH implementation. They
261 * are exposed in this header file because builder needs their
262 * definitions to calculate the total working size.
263 *
264 * Moving this out of the crush map allow us to treat the CRUSH map as
265 * immutable within the mapper and removes the requirement for a CRUSH
266 * map lock.
267 */
268struct crush_work_bucket {
269 __u32 perm_x; /* @x for which *perm is defined */
270 __u32 perm_n; /* num elements of *perm that are permuted/defined */
271 __u32 *perm; /* Permutation of the bucket's items */
272};
273
274struct crush_work {
275 struct crush_work_bucket **work; /* Per-bucket working store */
276};
277
Sage Weil5ecc0a02009-10-06 11:31:11 -0700278#endif