Kees Cook | 95925c9 | 2017-04-24 13:23:21 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This is for all the tests related to refcount bugs (e.g. overflow, |
| 3 | * underflow, reaching zero untested, etc). |
| 4 | */ |
| 5 | #include "lkdtm.h" |
| 6 | #include <linux/refcount.h> |
| 7 | |
| 8 | #ifdef CONFIG_REFCOUNT_FULL |
| 9 | #define REFCOUNT_MAX (UINT_MAX - 1) |
| 10 | #define REFCOUNT_SATURATED UINT_MAX |
| 11 | #else |
| 12 | #define REFCOUNT_MAX INT_MAX |
| 13 | #define REFCOUNT_SATURATED (INT_MIN / 2) |
| 14 | #endif |
| 15 | |
| 16 | static void overflow_check(refcount_t *ref) |
| 17 | { |
| 18 | switch (refcount_read(ref)) { |
| 19 | case REFCOUNT_SATURATED: |
| 20 | pr_info("Overflow detected: saturated\n"); |
| 21 | break; |
| 22 | case REFCOUNT_MAX: |
| 23 | pr_warn("Overflow detected: unsafely reset to max\n"); |
| 24 | break; |
| 25 | default: |
| 26 | pr_err("Fail: refcount wrapped to %d\n", refcount_read(ref)); |
| 27 | } |
| 28 | } |
| 29 | |
| 30 | /* |
| 31 | * A refcount_inc() above the maximum value of the refcount implementation, |
| 32 | * should at least saturate, and at most also WARN. |
| 33 | */ |
| 34 | void lkdtm_REFCOUNT_INC_OVERFLOW(void) |
| 35 | { |
| 36 | refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1); |
| 37 | |
| 38 | pr_info("attempting good refcount_inc() without overflow\n"); |
| 39 | refcount_dec(&over); |
| 40 | refcount_inc(&over); |
| 41 | |
| 42 | pr_info("attempting bad refcount_inc() overflow\n"); |
| 43 | refcount_inc(&over); |
| 44 | refcount_inc(&over); |
| 45 | |
| 46 | overflow_check(&over); |
| 47 | } |
| 48 | |
| 49 | /* refcount_add() should behave just like refcount_inc() above. */ |
| 50 | void lkdtm_REFCOUNT_ADD_OVERFLOW(void) |
| 51 | { |
| 52 | refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1); |
| 53 | |
| 54 | pr_info("attempting good refcount_add() without overflow\n"); |
| 55 | refcount_dec(&over); |
| 56 | refcount_dec(&over); |
| 57 | refcount_dec(&over); |
| 58 | refcount_dec(&over); |
| 59 | refcount_add(4, &over); |
| 60 | |
| 61 | pr_info("attempting bad refcount_add() overflow\n"); |
| 62 | refcount_add(4, &over); |
| 63 | |
| 64 | overflow_check(&over); |
| 65 | } |
| 66 | |
| 67 | /* refcount_inc_not_zero() should behave just like refcount_inc() above. */ |
| 68 | void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void) |
| 69 | { |
| 70 | refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX); |
| 71 | |
| 72 | pr_info("attempting bad refcount_inc_not_zero() overflow\n"); |
| 73 | if (!refcount_inc_not_zero(&over)) |
| 74 | pr_warn("Weird: refcount_inc_not_zero() reported zero\n"); |
| 75 | |
| 76 | overflow_check(&over); |
| 77 | } |
| 78 | |
| 79 | /* refcount_add_not_zero() should behave just like refcount_inc() above. */ |
| 80 | void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void) |
| 81 | { |
| 82 | refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX); |
| 83 | |
| 84 | pr_info("attempting bad refcount_add_not_zero() overflow\n"); |
| 85 | if (!refcount_add_not_zero(6, &over)) |
| 86 | pr_warn("Weird: refcount_add_not_zero() reported zero\n"); |
| 87 | |
| 88 | overflow_check(&over); |
| 89 | } |
| 90 | |
| 91 | static void check_zero(refcount_t *ref) |
| 92 | { |
| 93 | switch (refcount_read(ref)) { |
| 94 | case REFCOUNT_SATURATED: |
| 95 | pr_info("Zero detected: saturated\n"); |
| 96 | break; |
| 97 | case REFCOUNT_MAX: |
| 98 | pr_warn("Zero detected: unsafely reset to max\n"); |
| 99 | break; |
| 100 | case 0: |
| 101 | pr_warn("Still at zero: refcount_inc/add() must not inc-from-0\n"); |
| 102 | break; |
| 103 | default: |
| 104 | pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref)); |
| 105 | } |
| 106 | } |
| 107 | |
| 108 | /* |
| 109 | * A refcount_dec(), as opposed to a refcount_dec_and_test(), when it hits |
| 110 | * zero it should either saturate (when inc-from-zero isn't protected) |
| 111 | * or stay at zero (when inc-from-zero is protected) and should WARN for both. |
| 112 | */ |
| 113 | void lkdtm_REFCOUNT_DEC_ZERO(void) |
| 114 | { |
| 115 | refcount_t zero = REFCOUNT_INIT(2); |
| 116 | |
| 117 | pr_info("attempting good refcount_dec()\n"); |
| 118 | refcount_dec(&zero); |
| 119 | |
| 120 | pr_info("attempting bad refcount_dec() to zero\n"); |
| 121 | refcount_dec(&zero); |
| 122 | |
| 123 | check_zero(&zero); |
| 124 | } |
| 125 | |
| 126 | static void check_negative(refcount_t *ref, int start) |
| 127 | { |
| 128 | /* |
| 129 | * CONFIG_REFCOUNT_FULL refuses to move a refcount at all on an |
| 130 | * over-sub, so we have to track our starting position instead of |
| 131 | * looking only at zero-pinning. |
| 132 | */ |
| 133 | if (refcount_read(ref) == start) { |
| 134 | pr_warn("Still at %d: refcount_inc/add() must not inc-from-0\n", |
| 135 | start); |
| 136 | return; |
| 137 | } |
| 138 | |
| 139 | switch (refcount_read(ref)) { |
| 140 | case REFCOUNT_SATURATED: |
| 141 | pr_info("Negative detected: saturated\n"); |
| 142 | break; |
| 143 | case REFCOUNT_MAX: |
| 144 | pr_warn("Negative detected: unsafely reset to max\n"); |
| 145 | break; |
| 146 | default: |
| 147 | pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref)); |
| 148 | } |
| 149 | } |
| 150 | |
| 151 | /* A refcount_dec() going negative should saturate and may WARN. */ |
| 152 | void lkdtm_REFCOUNT_DEC_NEGATIVE(void) |
| 153 | { |
| 154 | refcount_t neg = REFCOUNT_INIT(0); |
| 155 | |
| 156 | pr_info("attempting bad refcount_dec() below zero\n"); |
| 157 | refcount_dec(&neg); |
| 158 | |
| 159 | check_negative(&neg, 0); |
| 160 | } |
| 161 | |
| 162 | /* |
| 163 | * A refcount_dec_and_test() should act like refcount_dec() above when |
| 164 | * going negative. |
| 165 | */ |
| 166 | void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void) |
| 167 | { |
| 168 | refcount_t neg = REFCOUNT_INIT(0); |
| 169 | |
| 170 | pr_info("attempting bad refcount_dec_and_test() below zero\n"); |
| 171 | if (refcount_dec_and_test(&neg)) |
| 172 | pr_warn("Weird: refcount_dec_and_test() reported zero\n"); |
| 173 | |
| 174 | check_negative(&neg, 0); |
| 175 | } |
| 176 | |
| 177 | /* |
| 178 | * A refcount_sub_and_test() should act like refcount_dec_and_test() |
| 179 | * above when going negative. |
| 180 | */ |
| 181 | void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void) |
| 182 | { |
| 183 | refcount_t neg = REFCOUNT_INIT(3); |
| 184 | |
| 185 | pr_info("attempting bad refcount_sub_and_test() below zero\n"); |
| 186 | if (refcount_sub_and_test(5, &neg)) |
| 187 | pr_warn("Weird: refcount_sub_and_test() reported zero\n"); |
| 188 | |
| 189 | check_negative(&neg, 3); |
| 190 | } |
| 191 | |
| 192 | static void check_from_zero(refcount_t *ref) |
| 193 | { |
| 194 | switch (refcount_read(ref)) { |
| 195 | case 0: |
| 196 | pr_info("Zero detected: stayed at zero\n"); |
| 197 | break; |
| 198 | case REFCOUNT_SATURATED: |
| 199 | pr_info("Zero detected: saturated\n"); |
| 200 | break; |
| 201 | case REFCOUNT_MAX: |
| 202 | pr_warn("Zero detected: unsafely reset to max\n"); |
| 203 | break; |
| 204 | default: |
Colin Ian King | 917c8c2 | 2017-08-18 16:34:59 +0100 | [diff] [blame] | 205 | pr_info("Fail: zero not detected, incremented to %d\n", |
Kees Cook | 95925c9 | 2017-04-24 13:23:21 -0700 | [diff] [blame] | 206 | refcount_read(ref)); |
| 207 | } |
| 208 | } |
| 209 | |
| 210 | /* |
| 211 | * A refcount_inc() from zero should pin to zero or saturate and may WARN. |
| 212 | * Only CONFIG_REFCOUNT_FULL provides this protection currently. |
| 213 | */ |
| 214 | void lkdtm_REFCOUNT_INC_ZERO(void) |
| 215 | { |
| 216 | refcount_t zero = REFCOUNT_INIT(0); |
| 217 | |
| 218 | pr_info("attempting safe refcount_inc_not_zero() from zero\n"); |
| 219 | if (!refcount_inc_not_zero(&zero)) { |
| 220 | pr_info("Good: zero detected\n"); |
| 221 | if (refcount_read(&zero) == 0) |
| 222 | pr_info("Correctly stayed at zero\n"); |
| 223 | else |
| 224 | pr_err("Fail: refcount went past zero!\n"); |
| 225 | } else { |
| 226 | pr_err("Fail: Zero not detected!?\n"); |
| 227 | } |
| 228 | |
| 229 | pr_info("attempting bad refcount_inc() from zero\n"); |
| 230 | refcount_inc(&zero); |
| 231 | |
| 232 | check_from_zero(&zero); |
| 233 | } |
| 234 | |
| 235 | /* |
| 236 | * A refcount_add() should act like refcount_inc() above when starting |
| 237 | * at zero. |
| 238 | */ |
| 239 | void lkdtm_REFCOUNT_ADD_ZERO(void) |
| 240 | { |
| 241 | refcount_t zero = REFCOUNT_INIT(0); |
| 242 | |
| 243 | pr_info("attempting safe refcount_add_not_zero() from zero\n"); |
| 244 | if (!refcount_add_not_zero(3, &zero)) { |
| 245 | pr_info("Good: zero detected\n"); |
| 246 | if (refcount_read(&zero) == 0) |
| 247 | pr_info("Correctly stayed at zero\n"); |
| 248 | else |
| 249 | pr_err("Fail: refcount went past zero\n"); |
| 250 | } else { |
| 251 | pr_err("Fail: Zero not detected!?\n"); |
| 252 | } |
| 253 | |
| 254 | pr_info("attempting bad refcount_add() from zero\n"); |
| 255 | refcount_add(3, &zero); |
| 256 | |
| 257 | check_from_zero(&zero); |
| 258 | } |
| 259 | |
| 260 | static void check_saturated(refcount_t *ref) |
| 261 | { |
| 262 | switch (refcount_read(ref)) { |
| 263 | case REFCOUNT_SATURATED: |
| 264 | pr_info("Saturation detected: still saturated\n"); |
| 265 | break; |
| 266 | case REFCOUNT_MAX: |
| 267 | pr_warn("Saturation detected: unsafely reset to max\n"); |
| 268 | break; |
| 269 | default: |
| 270 | pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref)); |
| 271 | } |
| 272 | } |
| 273 | |
| 274 | /* |
| 275 | * A refcount_inc() from a saturated value should at most warn about |
| 276 | * being saturated already. |
| 277 | */ |
| 278 | void lkdtm_REFCOUNT_INC_SATURATED(void) |
| 279 | { |
| 280 | refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); |
| 281 | |
| 282 | pr_info("attempting bad refcount_inc() from saturated\n"); |
| 283 | refcount_inc(&sat); |
| 284 | |
| 285 | check_saturated(&sat); |
| 286 | } |
| 287 | |
| 288 | /* Should act like refcount_inc() above from saturated. */ |
| 289 | void lkdtm_REFCOUNT_DEC_SATURATED(void) |
| 290 | { |
| 291 | refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); |
| 292 | |
| 293 | pr_info("attempting bad refcount_dec() from saturated\n"); |
| 294 | refcount_dec(&sat); |
| 295 | |
| 296 | check_saturated(&sat); |
| 297 | } |
| 298 | |
| 299 | /* Should act like refcount_inc() above from saturated. */ |
| 300 | void lkdtm_REFCOUNT_ADD_SATURATED(void) |
| 301 | { |
| 302 | refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); |
| 303 | |
| 304 | pr_info("attempting bad refcount_dec() from saturated\n"); |
| 305 | refcount_add(8, &sat); |
| 306 | |
| 307 | check_saturated(&sat); |
| 308 | } |
| 309 | |
| 310 | /* Should act like refcount_inc() above from saturated. */ |
| 311 | void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void) |
| 312 | { |
| 313 | refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); |
| 314 | |
| 315 | pr_info("attempting bad refcount_inc_not_zero() from saturated\n"); |
| 316 | if (!refcount_inc_not_zero(&sat)) |
| 317 | pr_warn("Weird: refcount_inc_not_zero() reported zero\n"); |
| 318 | |
| 319 | check_saturated(&sat); |
| 320 | } |
| 321 | |
| 322 | /* Should act like refcount_inc() above from saturated. */ |
| 323 | void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void) |
| 324 | { |
| 325 | refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); |
| 326 | |
| 327 | pr_info("attempting bad refcount_add_not_zero() from saturated\n"); |
| 328 | if (!refcount_add_not_zero(7, &sat)) |
| 329 | pr_warn("Weird: refcount_add_not_zero() reported zero\n"); |
| 330 | |
| 331 | check_saturated(&sat); |
| 332 | } |
| 333 | |
| 334 | /* Should act like refcount_inc() above from saturated. */ |
| 335 | void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void) |
| 336 | { |
| 337 | refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); |
| 338 | |
| 339 | pr_info("attempting bad refcount_dec_and_test() from saturated\n"); |
| 340 | if (refcount_dec_and_test(&sat)) |
| 341 | pr_warn("Weird: refcount_dec_and_test() reported zero\n"); |
| 342 | |
| 343 | check_saturated(&sat); |
| 344 | } |
| 345 | |
| 346 | /* Should act like refcount_inc() above from saturated. */ |
| 347 | void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void) |
| 348 | { |
| 349 | refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); |
| 350 | |
| 351 | pr_info("attempting bad refcount_sub_and_test() from saturated\n"); |
| 352 | if (refcount_sub_and_test(8, &sat)) |
| 353 | pr_warn("Weird: refcount_sub_and_test() reported zero\n"); |
| 354 | |
| 355 | check_saturated(&sat); |
| 356 | } |
Kees Cook | c7fea48 | 2017-07-21 06:19:14 -0700 | [diff] [blame] | 357 | |
| 358 | /* Used to time the existing atomic_t when used for reference counting */ |
| 359 | void lkdtm_ATOMIC_TIMING(void) |
| 360 | { |
| 361 | unsigned int i; |
| 362 | atomic_t count = ATOMIC_INIT(1); |
| 363 | |
| 364 | for (i = 0; i < INT_MAX - 1; i++) |
| 365 | atomic_inc(&count); |
| 366 | |
| 367 | for (i = INT_MAX; i > 0; i--) |
| 368 | if (atomic_dec_and_test(&count)) |
| 369 | break; |
| 370 | |
| 371 | if (i != 1) |
| 372 | pr_err("atomic timing: out of sync up/down cycle: %u\n", i - 1); |
| 373 | else |
| 374 | pr_info("atomic timing: done\n"); |
| 375 | } |
| 376 | |
| 377 | /* |
| 378 | * This can be compared to ATOMIC_TIMING when implementing fast refcount |
| 379 | * protections. Looking at the number of CPU cycles tells the real story |
| 380 | * about performance. For example: |
| 381 | * cd /sys/kernel/debug/provoke-crash |
| 382 | * perf stat -B -- cat <(echo REFCOUNT_TIMING) > DIRECT |
| 383 | */ |
| 384 | void lkdtm_REFCOUNT_TIMING(void) |
| 385 | { |
| 386 | unsigned int i; |
| 387 | refcount_t count = REFCOUNT_INIT(1); |
| 388 | |
| 389 | for (i = 0; i < INT_MAX - 1; i++) |
| 390 | refcount_inc(&count); |
| 391 | |
| 392 | for (i = INT_MAX; i > 0; i--) |
| 393 | if (refcount_dec_and_test(&count)) |
| 394 | break; |
| 395 | |
| 396 | if (i != 1) |
| 397 | pr_err("refcount: out of sync up/down cycle: %u\n", i - 1); |
| 398 | else |
| 399 | pr_info("refcount timing: done\n"); |
| 400 | } |