blob: 48a1a7d100f190efae067afa7866b75e10eff9fe [file] [log] [blame]
Sage Weil5cd068c2010-07-07 08:38:17 -07001#ifndef CEPH_CRUSH_CRUSH_H
2#define CEPH_CRUSH_CRUSH_H
Sage Weil5ecc0a02009-10-06 11:31:11 -07003
4#include <linux/types.h>
5
6/*
7 * CRUSH is a pseudo-random data distribution algorithm that
8 * efficiently distributes input values (typically, data objects)
9 * across a heterogeneous, structured storage cluster.
10 *
11 * The algorithm was originally described in detail in this paper
12 * (although the algorithm has evolved somewhat since then):
13 *
14 * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
15 *
16 * LGPL2
17 */
18
19
20#define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */
21
Sage Weil5ecc0a02009-10-06 11:31:11 -070022#define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */
Sage Weil5ecc0a02009-10-06 11:31:11 -070023
24
Ilya Dryomov9a3b4902013-12-24 21:19:25 +020025#define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */
26#define CRUSH_ITEM_NONE 0x7fffffff /* no result */
Ilya Dryomovc6d98a62013-12-24 21:19:25 +020027
Sage Weil5ecc0a02009-10-06 11:31:11 -070028/*
29 * CRUSH uses user-defined "rules" to describe how inputs should be
30 * mapped to devices. A rule consists of sequence of steps to perform
31 * to generate the set of output devices.
32 */
33struct crush_rule_step {
34 __u32 op;
35 __s32 arg1;
36 __s32 arg2;
37};
38
39/* step op codes */
40enum {
41 CRUSH_RULE_NOOP = 0,
42 CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */
43 CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */
44 /* arg2 = type */
45 CRUSH_RULE_CHOOSE_INDEP = 3, /* same */
46 CRUSH_RULE_EMIT = 4, /* no args */
Ilya Dryomov917edad2013-12-24 21:19:26 +020047 CRUSH_RULE_CHOOSELEAF_FIRSTN = 6,
48 CRUSH_RULE_CHOOSELEAF_INDEP = 7,
Ilya Dryomovbe3226a2013-12-24 21:19:26 +020049
Ilya Dryomovcc10df42013-12-24 21:19:26 +020050 CRUSH_RULE_SET_CHOOSE_TRIES = 8, /* override choose_total_tries */
Ilya Dryomov917edad2013-12-24 21:19:26 +020051 CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */
Ilya Dryomovf046bf92013-12-24 21:19:27 +020052 CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10,
53 CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11,
Ilya Dryomovd83ed852014-03-19 16:58:37 +020054 CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12
Sage Weil5ecc0a02009-10-06 11:31:11 -070055};
56
57/*
58 * for specifying choose num (arg1) relative to the max parameter
59 * passed to do_rule
60 */
61#define CRUSH_CHOOSE_N 0
62#define CRUSH_CHOOSE_N_MINUS(x) (-(x))
63
64/*
65 * The rule mask is used to describe what the rule is intended for.
66 * Given a ruleset and size of output set, we search through the
67 * rule list for a matching rule_mask.
68 */
69struct crush_rule_mask {
70 __u8 ruleset;
71 __u8 type;
72 __u8 min_size;
73 __u8 max_size;
74};
75
76struct crush_rule {
77 __u32 len;
78 struct crush_rule_mask mask;
79 struct crush_rule_step steps[0];
80};
81
82#define crush_rule_size(len) (sizeof(struct crush_rule) + \
83 (len)*sizeof(struct crush_rule_step))
84
85
86
87/*
88 * A bucket is a named container of other items (either devices or
89 * other buckets). Items within a bucket are chosen using one of a
90 * few different algorithms. The table summarizes how the speed of
91 * each option measures up against mapping stability when items are
92 * added or removed.
93 *
94 * Bucket Alg Speed Additions Removals
95 * ------------------------------------------------
96 * uniform O(1) poor poor
97 * list O(n) optimal poor
98 * tree O(log n) good good
Ilya Dryomov958a2762015-04-14 16:54:52 +030099 * straw O(n) better better
100 * straw2 O(n) optimal optimal
Sage Weil5ecc0a02009-10-06 11:31:11 -0700101 */
102enum {
103 CRUSH_BUCKET_UNIFORM = 1,
104 CRUSH_BUCKET_LIST = 2,
105 CRUSH_BUCKET_TREE = 3,
Ilya Dryomov958a2762015-04-14 16:54:52 +0300106 CRUSH_BUCKET_STRAW = 4,
107 CRUSH_BUCKET_STRAW2 = 5,
Sage Weil5ecc0a02009-10-06 11:31:11 -0700108};
Sage Weilc6cf7262009-11-06 16:39:26 -0800109extern const char *crush_bucket_alg_name(int alg);
Sage Weil5ecc0a02009-10-06 11:31:11 -0700110
111struct crush_bucket {
112 __s32 id; /* this'll be negative */
113 __u16 type; /* non-zero; type=0 is reserved for devices */
Sage Weilfb690392009-11-07 20:18:22 -0800114 __u8 alg; /* one of CRUSH_BUCKET_* */
115 __u8 hash; /* which hash function to use, CRUSH_HASH_* */
Sage Weil5ecc0a02009-10-06 11:31:11 -0700116 __u32 weight; /* 16-bit fixed point */
117 __u32 size; /* num items */
118 __s32 *items;
119
120 /*
121 * cached random permutation: used for uniform bucket and for
122 * the linear search fallback for the other bucket types.
123 */
124 __u32 perm_x; /* @x for which *perm is defined */
125 __u32 perm_n; /* num elements of *perm that are permuted/defined */
126 __u32 *perm;
127};
128
129struct crush_bucket_uniform {
130 struct crush_bucket h;
131 __u32 item_weight; /* 16-bit fixed point; all items equally weighted */
132};
133
134struct crush_bucket_list {
135 struct crush_bucket h;
136 __u32 *item_weights; /* 16-bit fixed point */
137 __u32 *sum_weights; /* 16-bit fixed point. element i is sum
138 of weights 0..i, inclusive */
139};
140
141struct crush_bucket_tree {
142 struct crush_bucket h; /* note: h.size is _tree_ size, not number of
143 actual items */
144 __u8 num_nodes;
145 __u32 *node_weights;
146};
147
148struct crush_bucket_straw {
149 struct crush_bucket h;
150 __u32 *item_weights; /* 16-bit fixed point */
151 __u32 *straws; /* 16-bit fixed point */
152};
153
Ilya Dryomov958a2762015-04-14 16:54:52 +0300154struct crush_bucket_straw2 {
155 struct crush_bucket h;
156 __u32 *item_weights; /* 16-bit fixed point */
157};
158
Sage Weil5ecc0a02009-10-06 11:31:11 -0700159
160
161/*
162 * CRUSH map includes all buckets, rules, etc.
163 */
164struct crush_map {
165 struct crush_bucket **buckets;
166 struct crush_rule **rules;
167
Sage Weil5ecc0a02009-10-06 11:31:11 -0700168 __s32 max_buckets;
169 __u32 max_rules;
170 __s32 max_devices;
Sage Weil546f04e2012-07-30 18:15:23 -0700171
172 /* choose local retries before re-descent */
173 __u32 choose_local_tries;
174 /* choose local attempts using a fallback permutation before
175 * re-descent */
176 __u32 choose_local_fallback_tries;
177 /* choose attempts before giving up */
178 __u32 choose_total_tries;
Ilya Dryomovf18650a2013-12-24 21:19:26 +0200179 /* attempt chooseleaf inner descent once for firstn mode; on
180 * reject retry outer descent. Note that this does *not*
181 * apply to a collision: in that case we will retry as we used
182 * to. */
Jim Schutt1604f482012-11-30 09:15:25 -0700183 __u32 chooseleaf_descend_once;
Ilya Dryomove2b149c2014-03-19 16:58:37 +0200184
185 /* if non-zero, feed r into chooseleaf, bit-shifted right by (r-1)
186 * bits. a value of 1 is best for new clusters. for legacy clusters
187 * that want to limit reshuffling, a value of 3 or 4 will make the
188 * mappings line up a bit better with previous mappings. */
189 __u8 chooseleaf_vary_r;
Sage Weil5ecc0a02009-10-06 11:31:11 -0700190};
191
192
193/* crush.c */
Sage Weil8b12d472012-05-07 15:38:35 -0700194extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos);
Sage Weil5ecc0a02009-10-06 11:31:11 -0700195extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
196extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
197extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
198extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
Ilya Dryomov958a2762015-04-14 16:54:52 +0300199extern void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b);
Sage Weil5ecc0a02009-10-06 11:31:11 -0700200extern void crush_destroy_bucket(struct crush_bucket *b);
Ilya Dryomovbfb16d72013-12-24 21:19:24 +0200201extern void crush_destroy_rule(struct crush_rule *r);
Sage Weil5ecc0a02009-10-06 11:31:11 -0700202extern void crush_destroy(struct crush_map *map);
203
Sage Weilf671d4c2012-05-07 15:36:49 -0700204static inline int crush_calc_tree_node(int i)
205{
206 return ((i+1) << 1)-1;
207}
208
Sage Weil5ecc0a02009-10-06 11:31:11 -0700209#endif