Sage Weil | 5cd068c | 2010-07-07 08:38:17 -0700 | [diff] [blame] | 1 | #ifndef CEPH_CRUSH_CRUSH_H |
| 2 | #define CEPH_CRUSH_CRUSH_H |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 3 | |
Ilya Dryomov | b459be7 | 2015-06-12 13:21:07 +0300 | [diff] [blame] | 4 | #ifdef __KERNEL__ |
Ilya Dryomov | 5cf9c4a | 2017-06-22 19:44:05 +0200 | [diff] [blame] | 5 | # include <linux/rbtree.h> |
Ilya Dryomov | b459be7 | 2015-06-12 13:21:07 +0300 | [diff] [blame] | 6 | # include <linux/types.h> |
| 7 | #else |
| 8 | # include "crush_compat.h" |
| 9 | #endif |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 10 | |
| 11 | /* |
| 12 | * CRUSH is a pseudo-random data distribution algorithm that |
| 13 | * efficiently distributes input values (typically, data objects) |
| 14 | * across a heterogeneous, structured storage cluster. |
| 15 | * |
| 16 | * The algorithm was originally described in detail in this paper |
| 17 | * (although the algorithm has evolved somewhat since then): |
| 18 | * |
| 19 | * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf |
| 20 | * |
| 21 | * LGPL2 |
| 22 | */ |
| 23 | |
| 24 | |
| 25 | #define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */ |
| 26 | |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 27 | #define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */ |
Ilya Dryomov | b459be7 | 2015-06-12 13:21:07 +0300 | [diff] [blame] | 28 | #define CRUSH_MAX_RULESET (1<<8) /* max crush ruleset number */ |
| 29 | #define CRUSH_MAX_RULES CRUSH_MAX_RULESET /* should be the same as max rulesets */ |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 30 | |
Ilya Dryomov | b459be7 | 2015-06-12 13:21:07 +0300 | [diff] [blame] | 31 | #define CRUSH_MAX_DEVICE_WEIGHT (100u * 0x10000u) |
| 32 | #define CRUSH_MAX_BUCKET_WEIGHT (65535u * 0x10000u) |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 33 | |
Ilya Dryomov | 9a3b490 | 2013-12-24 21:19:25 +0200 | [diff] [blame] | 34 | #define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */ |
| 35 | #define CRUSH_ITEM_NONE 0x7fffffff /* no result */ |
Ilya Dryomov | c6d98a6 | 2013-12-24 21:19:25 +0200 | [diff] [blame] | 36 | |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 37 | /* |
| 38 | * CRUSH uses user-defined "rules" to describe how inputs should be |
| 39 | * mapped to devices. A rule consists of sequence of steps to perform |
| 40 | * to generate the set of output devices. |
| 41 | */ |
| 42 | struct crush_rule_step { |
| 43 | __u32 op; |
| 44 | __s32 arg1; |
| 45 | __s32 arg2; |
| 46 | }; |
| 47 | |
| 48 | /* step op codes */ |
| 49 | enum { |
| 50 | CRUSH_RULE_NOOP = 0, |
| 51 | CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */ |
| 52 | CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */ |
| 53 | /* arg2 = type */ |
| 54 | CRUSH_RULE_CHOOSE_INDEP = 3, /* same */ |
| 55 | CRUSH_RULE_EMIT = 4, /* no args */ |
Ilya Dryomov | 917edad | 2013-12-24 21:19:26 +0200 | [diff] [blame] | 56 | CRUSH_RULE_CHOOSELEAF_FIRSTN = 6, |
| 57 | CRUSH_RULE_CHOOSELEAF_INDEP = 7, |
Ilya Dryomov | be3226a | 2013-12-24 21:19:26 +0200 | [diff] [blame] | 58 | |
Ilya Dryomov | cc10df4 | 2013-12-24 21:19:26 +0200 | [diff] [blame] | 59 | CRUSH_RULE_SET_CHOOSE_TRIES = 8, /* override choose_total_tries */ |
Ilya Dryomov | 917edad | 2013-12-24 21:19:26 +0200 | [diff] [blame] | 60 | CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */ |
Ilya Dryomov | f046bf9 | 2013-12-24 21:19:27 +0200 | [diff] [blame] | 61 | CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10, |
| 62 | CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11, |
Ilya Dryomov | dc6ae6d | 2016-01-31 14:36:07 +0100 | [diff] [blame] | 63 | CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12, |
| 64 | CRUSH_RULE_SET_CHOOSELEAF_STABLE = 13 |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 65 | }; |
| 66 | |
| 67 | /* |
| 68 | * for specifying choose num (arg1) relative to the max parameter |
| 69 | * passed to do_rule |
| 70 | */ |
| 71 | #define CRUSH_CHOOSE_N 0 |
| 72 | #define CRUSH_CHOOSE_N_MINUS(x) (-(x)) |
| 73 | |
| 74 | /* |
| 75 | * The rule mask is used to describe what the rule is intended for. |
| 76 | * Given a ruleset and size of output set, we search through the |
| 77 | * rule list for a matching rule_mask. |
| 78 | */ |
| 79 | struct crush_rule_mask { |
| 80 | __u8 ruleset; |
| 81 | __u8 type; |
| 82 | __u8 min_size; |
| 83 | __u8 max_size; |
| 84 | }; |
| 85 | |
| 86 | struct crush_rule { |
| 87 | __u32 len; |
| 88 | struct crush_rule_mask mask; |
| 89 | struct crush_rule_step steps[0]; |
| 90 | }; |
| 91 | |
| 92 | #define crush_rule_size(len) (sizeof(struct crush_rule) + \ |
| 93 | (len)*sizeof(struct crush_rule_step)) |
| 94 | |
| 95 | |
| 96 | |
| 97 | /* |
| 98 | * A bucket is a named container of other items (either devices or |
| 99 | * other buckets). Items within a bucket are chosen using one of a |
| 100 | * few different algorithms. The table summarizes how the speed of |
| 101 | * each option measures up against mapping stability when items are |
| 102 | * added or removed. |
| 103 | * |
| 104 | * Bucket Alg Speed Additions Removals |
| 105 | * ------------------------------------------------ |
| 106 | * uniform O(1) poor poor |
| 107 | * list O(n) optimal poor |
| 108 | * tree O(log n) good good |
Ilya Dryomov | 958a276 | 2015-04-14 16:54:52 +0300 | [diff] [blame] | 109 | * straw O(n) better better |
| 110 | * straw2 O(n) optimal optimal |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 111 | */ |
| 112 | enum { |
| 113 | CRUSH_BUCKET_UNIFORM = 1, |
| 114 | CRUSH_BUCKET_LIST = 2, |
| 115 | CRUSH_BUCKET_TREE = 3, |
Ilya Dryomov | 958a276 | 2015-04-14 16:54:52 +0300 | [diff] [blame] | 116 | CRUSH_BUCKET_STRAW = 4, |
| 117 | CRUSH_BUCKET_STRAW2 = 5, |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 118 | }; |
Sage Weil | c6cf726 | 2009-11-06 16:39:26 -0800 | [diff] [blame] | 119 | extern const char *crush_bucket_alg_name(int alg); |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 120 | |
Ilya Dryomov | b459be7 | 2015-06-12 13:21:07 +0300 | [diff] [blame] | 121 | /* |
| 122 | * although tree was a legacy algorithm, it has been buggy, so |
| 123 | * exclude it. |
| 124 | */ |
| 125 | #define CRUSH_LEGACY_ALLOWED_BUCKET_ALGS ( \ |
| 126 | (1 << CRUSH_BUCKET_UNIFORM) | \ |
| 127 | (1 << CRUSH_BUCKET_LIST) | \ |
| 128 | (1 << CRUSH_BUCKET_STRAW)) |
| 129 | |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 130 | struct crush_bucket { |
| 131 | __s32 id; /* this'll be negative */ |
| 132 | __u16 type; /* non-zero; type=0 is reserved for devices */ |
Sage Weil | fb69039 | 2009-11-07 20:18:22 -0800 | [diff] [blame] | 133 | __u8 alg; /* one of CRUSH_BUCKET_* */ |
| 134 | __u8 hash; /* which hash function to use, CRUSH_HASH_* */ |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 135 | __u32 weight; /* 16-bit fixed point */ |
| 136 | __u32 size; /* num items */ |
| 137 | __s32 *items; |
| 138 | |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 139 | }; |
| 140 | |
Ilya Dryomov | 069f322 | 2017-06-22 19:44:05 +0200 | [diff] [blame] | 141 | /** @ingroup API |
| 142 | * |
| 143 | * Replacement weights for each item in a bucket. The size of the |
| 144 | * array must be exactly the size of the straw2 bucket, just as the |
| 145 | * item_weights array. |
| 146 | * |
| 147 | */ |
| 148 | struct crush_weight_set { |
| 149 | __u32 *weights; /*!< 16.16 fixed point weights |
| 150 | in the same order as items */ |
| 151 | __u32 size; /*!< size of the __weights__ array */ |
| 152 | }; |
| 153 | |
| 154 | /** @ingroup API |
| 155 | * |
| 156 | * Replacement weights and ids for a given straw2 bucket, for |
| 157 | * placement purposes. |
| 158 | * |
| 159 | * When crush_do_rule() chooses the Nth item from a straw2 bucket, the |
| 160 | * replacement weights found at __weight_set[N]__ are used instead of |
| 161 | * the weights from __item_weights__. If __N__ is greater than |
| 162 | * __weight_set_size__, the weights found at __weight_set_size-1__ are |
| 163 | * used instead. For instance if __weight_set__ is: |
| 164 | * |
| 165 | * [ [ 0x10000, 0x20000 ], // position 0 |
| 166 | * [ 0x20000, 0x40000 ] ] // position 1 |
| 167 | * |
| 168 | * choosing the 0th item will use position 0 weights [ 0x10000, 0x20000 ] |
| 169 | * choosing the 1th item will use position 1 weights [ 0x20000, 0x40000 ] |
| 170 | * choosing the 2th item will use position 1 weights [ 0x20000, 0x40000 ] |
| 171 | * etc. |
| 172 | * |
| 173 | */ |
| 174 | struct crush_choose_arg { |
| 175 | __s32 *ids; /*!< values to use instead of items */ |
| 176 | __u32 ids_size; /*!< size of the __ids__ array */ |
| 177 | struct crush_weight_set *weight_set; /*!< weight replacements for |
| 178 | a given position */ |
| 179 | __u32 weight_set_size; /*!< size of the __weight_set__ array */ |
| 180 | }; |
| 181 | |
| 182 | /** @ingroup API |
| 183 | * |
| 184 | * Replacement weights and ids for each bucket in the crushmap. The |
| 185 | * __size__ of the __args__ array must be exactly the same as the |
| 186 | * __map->max_buckets__. |
| 187 | * |
| 188 | * The __crush_choose_arg__ at index N will be used when choosing |
| 189 | * an item from the bucket __map->buckets[N]__ bucket, provided it |
| 190 | * is a straw2 bucket. |
| 191 | * |
| 192 | */ |
| 193 | struct crush_choose_arg_map { |
Ilya Dryomov | 5cf9c4a | 2017-06-22 19:44:05 +0200 | [diff] [blame] | 194 | #ifdef __KERNEL__ |
| 195 | struct rb_node node; |
Ilya Dryomov | e17e896 | 2017-07-24 16:43:49 +0200 | [diff] [blame] | 196 | s64 choose_args_index; |
Ilya Dryomov | 5cf9c4a | 2017-06-22 19:44:05 +0200 | [diff] [blame] | 197 | #endif |
Ilya Dryomov | 069f322 | 2017-06-22 19:44:05 +0200 | [diff] [blame] | 198 | struct crush_choose_arg *args; /*!< replacement for each bucket |
| 199 | in the crushmap */ |
| 200 | __u32 size; /*!< size of the __args__ array */ |
| 201 | }; |
| 202 | |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 203 | struct crush_bucket_uniform { |
| 204 | struct crush_bucket h; |
| 205 | __u32 item_weight; /* 16-bit fixed point; all items equally weighted */ |
| 206 | }; |
| 207 | |
| 208 | struct crush_bucket_list { |
| 209 | struct crush_bucket h; |
| 210 | __u32 *item_weights; /* 16-bit fixed point */ |
| 211 | __u32 *sum_weights; /* 16-bit fixed point. element i is sum |
| 212 | of weights 0..i, inclusive */ |
| 213 | }; |
| 214 | |
| 215 | struct crush_bucket_tree { |
| 216 | struct crush_bucket h; /* note: h.size is _tree_ size, not number of |
| 217 | actual items */ |
| 218 | __u8 num_nodes; |
| 219 | __u32 *node_weights; |
| 220 | }; |
| 221 | |
| 222 | struct crush_bucket_straw { |
| 223 | struct crush_bucket h; |
| 224 | __u32 *item_weights; /* 16-bit fixed point */ |
| 225 | __u32 *straws; /* 16-bit fixed point */ |
| 226 | }; |
| 227 | |
Ilya Dryomov | 958a276 | 2015-04-14 16:54:52 +0300 | [diff] [blame] | 228 | struct crush_bucket_straw2 { |
| 229 | struct crush_bucket h; |
| 230 | __u32 *item_weights; /* 16-bit fixed point */ |
| 231 | }; |
| 232 | |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 233 | |
| 234 | |
| 235 | /* |
| 236 | * CRUSH map includes all buckets, rules, etc. |
| 237 | */ |
| 238 | struct crush_map { |
| 239 | struct crush_bucket **buckets; |
| 240 | struct crush_rule **rules; |
| 241 | |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 242 | __s32 max_buckets; |
| 243 | __u32 max_rules; |
| 244 | __s32 max_devices; |
Sage Weil | 546f04e | 2012-07-30 18:15:23 -0700 | [diff] [blame] | 245 | |
| 246 | /* choose local retries before re-descent */ |
| 247 | __u32 choose_local_tries; |
| 248 | /* choose local attempts using a fallback permutation before |
| 249 | * re-descent */ |
| 250 | __u32 choose_local_fallback_tries; |
Ilya Dryomov | b459be7 | 2015-06-12 13:21:07 +0300 | [diff] [blame] | 251 | /* choose attempts before giving up */ |
Sage Weil | 546f04e | 2012-07-30 18:15:23 -0700 | [diff] [blame] | 252 | __u32 choose_total_tries; |
Ilya Dryomov | f18650a | 2013-12-24 21:19:26 +0200 | [diff] [blame] | 253 | /* attempt chooseleaf inner descent once for firstn mode; on |
| 254 | * reject retry outer descent. Note that this does *not* |
| 255 | * apply to a collision: in that case we will retry as we used |
| 256 | * to. */ |
Jim Schutt | 1604f48 | 2012-11-30 09:15:25 -0700 | [diff] [blame] | 257 | __u32 chooseleaf_descend_once; |
Ilya Dryomov | e2b149c | 2014-03-19 16:58:37 +0200 | [diff] [blame] | 258 | |
| 259 | /* if non-zero, feed r into chooseleaf, bit-shifted right by (r-1) |
| 260 | * bits. a value of 1 is best for new clusters. for legacy clusters |
| 261 | * that want to limit reshuffling, a value of 3 or 4 will make the |
| 262 | * mappings line up a bit better with previous mappings. */ |
| 263 | __u8 chooseleaf_vary_r; |
Ilya Dryomov | b459be7 | 2015-06-12 13:21:07 +0300 | [diff] [blame] | 264 | |
Ilya Dryomov | dc6ae6d | 2016-01-31 14:36:07 +0100 | [diff] [blame] | 265 | /* if true, it makes chooseleaf firstn to return stable results (if |
| 266 | * no local retry) so that data migrations would be optimal when some |
| 267 | * device fails. */ |
| 268 | __u8 chooseleaf_stable; |
| 269 | |
Ilya Dryomov | 66a0e2d | 2017-01-31 15:55:06 +0100 | [diff] [blame] | 270 | /* |
| 271 | * This value is calculated after decode or construction by |
| 272 | * the builder. It is exposed here (rather than having a |
| 273 | * 'build CRUSH working space' function) so that callers can |
| 274 | * reserve a static buffer, allocate space on the stack, or |
| 275 | * otherwise avoid calling into the heap allocator if they |
| 276 | * want to. The size of the working space depends on the map, |
| 277 | * while the size of the scratch vector passed to the mapper |
| 278 | * depends on the size of the desired result set. |
| 279 | * |
| 280 | * Nothing stops the caller from allocating both in one swell |
| 281 | * foop and passing in two points, though. |
| 282 | */ |
| 283 | size_t working_size; |
| 284 | |
Ilya Dryomov | b459be7 | 2015-06-12 13:21:07 +0300 | [diff] [blame] | 285 | #ifndef __KERNEL__ |
| 286 | /* |
| 287 | * version 0 (original) of straw_calc has various flaws. version 1 |
| 288 | * fixes a few of them. |
| 289 | */ |
| 290 | __u8 straw_calc_version; |
| 291 | |
| 292 | /* |
| 293 | * allowed bucket algs is a bitmask, here the bit positions |
| 294 | * are CRUSH_BUCKET_*. note that these are *bits* and |
| 295 | * CRUSH_BUCKET_* values are not, so we need to or together (1 |
| 296 | * << CRUSH_BUCKET_WHATEVER). The 0th bit is not used to |
| 297 | * minimize confusion (bucket type values start at 1). |
| 298 | */ |
| 299 | __u32 allowed_bucket_algs; |
| 300 | |
| 301 | __u32 *choose_tries; |
Ilya Dryomov | 5cf9c4a | 2017-06-22 19:44:05 +0200 | [diff] [blame] | 302 | #else |
| 303 | /* CrushWrapper::choose_args */ |
| 304 | struct rb_root choose_args; |
Ilya Dryomov | b459be7 | 2015-06-12 13:21:07 +0300 | [diff] [blame] | 305 | #endif |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 306 | }; |
| 307 | |
| 308 | |
| 309 | /* crush.c */ |
Sage Weil | 8b12d47 | 2012-05-07 15:38:35 -0700 | [diff] [blame] | 310 | extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos); |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 311 | extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b); |
| 312 | extern void crush_destroy_bucket_list(struct crush_bucket_list *b); |
| 313 | extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b); |
| 314 | extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b); |
Ilya Dryomov | 958a276 | 2015-04-14 16:54:52 +0300 | [diff] [blame] | 315 | extern void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b); |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 316 | extern void crush_destroy_bucket(struct crush_bucket *b); |
Ilya Dryomov | bfb16d7 | 2013-12-24 21:19:24 +0200 | [diff] [blame] | 317 | extern void crush_destroy_rule(struct crush_rule *r); |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 318 | extern void crush_destroy(struct crush_map *map); |
| 319 | |
Sage Weil | f671d4c | 2012-05-07 15:36:49 -0700 | [diff] [blame] | 320 | static inline int crush_calc_tree_node(int i) |
| 321 | { |
| 322 | return ((i+1) << 1)-1; |
| 323 | } |
| 324 | |
Ilya Dryomov | 66a0e2d | 2017-01-31 15:55:06 +0100 | [diff] [blame] | 325 | /* |
| 326 | * These data structures are private to the CRUSH implementation. They |
| 327 | * are exposed in this header file because builder needs their |
| 328 | * definitions to calculate the total working size. |
| 329 | * |
| 330 | * Moving this out of the crush map allow us to treat the CRUSH map as |
| 331 | * immutable within the mapper and removes the requirement for a CRUSH |
| 332 | * map lock. |
| 333 | */ |
| 334 | struct crush_work_bucket { |
| 335 | __u32 perm_x; /* @x for which *perm is defined */ |
| 336 | __u32 perm_n; /* num elements of *perm that are permuted/defined */ |
| 337 | __u32 *perm; /* Permutation of the bucket's items */ |
| 338 | }; |
| 339 | |
| 340 | struct crush_work { |
| 341 | struct crush_work_bucket **work; /* Per-bucket working store */ |
| 342 | }; |
| 343 | |
Sage Weil | 5ecc0a0 | 2009-10-06 11:31:11 -0700 | [diff] [blame] | 344 | #endif |