Huadong Liu | f2a2ce0 | 2013-01-30 13:22:24 +0100 | [diff] [blame^] | 1 | #include <math.h> |
| 2 | #include "json.h" |
| 3 | #include "idletime.h" |
| 4 | |
| 5 | static volatile struct idle_prof_common ipc; |
| 6 | |
| 7 | /* Get time to complete an unit work on a particular cpu. |
| 8 | * The minimum number in CALIBRATE_RUNS runs is returned. |
| 9 | */ |
| 10 | static double calibrate_unit(unsigned char *data) |
| 11 | { |
| 12 | unsigned long t, i, j, k; |
| 13 | struct timeval tps; |
| 14 | double tunit = 0.0; |
| 15 | |
| 16 | for (i=0; i<CALIBRATE_RUNS; i++) { |
| 17 | |
| 18 | fio_gettime(&tps, NULL); |
| 19 | /* scale for less variance */ |
| 20 | for (j=0; j < CALIBRATE_SCALE; j++) { |
| 21 | /* unit of work */ |
| 22 | for (k=0; k < page_size; k++) { |
| 23 | data[(k+j)%page_size] = k%256; |
| 24 | /* we won't see STOP here. this is to match |
| 25 | * the same statement in the profiling loop. |
| 26 | */ |
| 27 | if (ipc.status == IDLE_PROF_STATUS_PROF_STOP) |
| 28 | return 0.0; |
| 29 | } |
| 30 | } |
| 31 | |
| 32 | t = utime_since_now(&tps); |
| 33 | if (!t) |
| 34 | continue; |
| 35 | |
| 36 | /* get the minimum time to complete CALIBRATE_SCALE units */ |
| 37 | if ((i==0) || ((double)t < tunit)) |
| 38 | tunit = (double)t; |
| 39 | } |
| 40 | |
| 41 | return tunit/CALIBRATE_SCALE; |
| 42 | } |
| 43 | |
| 44 | static void *idle_prof_thread_fn(void *data) |
| 45 | { |
| 46 | int retval; |
| 47 | unsigned long j, k; |
| 48 | struct idle_prof_thread *ipt = data; |
| 49 | |
| 50 | /* wait for all threads are spawned */ |
| 51 | pthread_mutex_lock(&ipt->init_lock); |
| 52 | |
| 53 | /* exit if any other thread failed to start */ |
| 54 | if (ipc.status == IDLE_PROF_STATUS_ABORT) |
| 55 | return NULL; |
| 56 | |
| 57 | #if defined(FIO_HAVE_CPU_AFFINITY) |
| 58 | os_cpu_mask_t cpu_mask; |
| 59 | memset(&cpu_mask, 0, sizeof(cpu_mask)); |
| 60 | fio_cpu_set(&cpu_mask, ipt->cpu); |
| 61 | |
| 62 | if ((retval=fio_setaffinity(gettid(), cpu_mask)) == -1) |
| 63 | log_err("fio: fio_setaffinity failed\n"); |
| 64 | #else |
| 65 | retval = -1; |
| 66 | log_err("fio: fio_setaffinity not supported\n"); |
| 67 | #endif |
| 68 | if (retval == -1) { |
| 69 | ipt->state = TD_EXITED; |
| 70 | pthread_mutex_unlock(&ipt->init_lock); |
| 71 | return NULL; |
| 72 | } |
| 73 | |
| 74 | ipt->cali_time = calibrate_unit(ipt->data); |
| 75 | |
| 76 | /* delay to set IDLE class till now for better calibration accuracy */ |
| 77 | #if defined(FIO_HAVE_SCHED_IDLE) |
| 78 | if ((retval = fio_set_sched_idle())) |
| 79 | log_err("fio: fio_set_sched_idle failed\n"); |
| 80 | #else |
| 81 | retval = -1; |
| 82 | log_err("fio: fio_set_sched_idle not supported\n"); |
| 83 | #endif |
| 84 | if (retval == -1) { |
| 85 | ipt->state = TD_EXITED; |
| 86 | pthread_mutex_unlock(&ipt->init_lock); |
| 87 | return NULL; |
| 88 | } |
| 89 | |
| 90 | ipt->state = TD_INITIALIZED; |
| 91 | |
| 92 | /* signal the main thread that calibration is done */ |
| 93 | pthread_cond_signal(&ipt->cond); |
| 94 | pthread_mutex_unlock(&ipt->init_lock); |
| 95 | |
| 96 | /* wait for other calibration to finish */ |
| 97 | pthread_mutex_lock(&ipt->start_lock); |
| 98 | |
| 99 | /* exit if other threads failed to initialize */ |
| 100 | if (ipc.status == IDLE_PROF_STATUS_ABORT) |
| 101 | return NULL; |
| 102 | |
| 103 | /* exit if we are doing calibration only */ |
| 104 | if (ipc.status == IDLE_PROF_STATUS_CALI_STOP) |
| 105 | return NULL; |
| 106 | |
| 107 | fio_gettime(&ipt->tps, NULL); |
| 108 | ipt->state = TD_RUNNING; |
| 109 | |
| 110 | j = 0; |
| 111 | while (1) { |
| 112 | for (k=0; k < page_size; k++) { |
| 113 | ipt->data[(k+j)%page_size] = k%256; |
| 114 | if (ipc.status == IDLE_PROF_STATUS_PROF_STOP) { |
| 115 | fio_gettime(&ipt->tpe, NULL); |
| 116 | goto idle_prof_done; |
| 117 | } |
| 118 | } |
| 119 | j++; |
| 120 | } |
| 121 | |
| 122 | idle_prof_done: |
| 123 | |
| 124 | ipt->loops = j + (double)k/page_size; |
| 125 | ipt->state = TD_EXITED; |
| 126 | pthread_mutex_unlock(&ipt->start_lock); |
| 127 | |
| 128 | return NULL; |
| 129 | } |
| 130 | |
| 131 | /* calculate mean and standard deviation to complete an unit of work */ |
| 132 | static void calibration_stats(void) |
| 133 | { |
| 134 | int i; |
| 135 | double sum=0.0, var=0.0; |
| 136 | struct idle_prof_thread *ipt; |
| 137 | |
| 138 | for (i = 0; i < ipc.nr_cpus; i++) { |
| 139 | ipt = &ipc.ipts[i]; |
| 140 | sum += ipt->cali_time; |
| 141 | } |
| 142 | |
| 143 | ipc.cali_mean = sum/ipc.nr_cpus; |
| 144 | |
| 145 | for (i = 0; i < ipc.nr_cpus; i++) { |
| 146 | ipt = &ipc.ipts[i]; |
| 147 | var += pow(ipt->cali_time-ipc.cali_mean, 2); |
| 148 | } |
| 149 | |
| 150 | ipc.cali_stddev = sqrt(var/(ipc.nr_cpus-1)); |
| 151 | } |
| 152 | |
| 153 | void fio_idle_prof_init(void) |
| 154 | { |
| 155 | int i, ret; |
| 156 | struct timeval tp; |
| 157 | struct timespec ts; |
| 158 | pthread_attr_t tattr; |
| 159 | struct idle_prof_thread *ipt; |
| 160 | |
| 161 | ipc.nr_cpus = cpus_online(); |
| 162 | ipc.status = IDLE_PROF_STATUS_OK; |
| 163 | |
| 164 | if (ipc.opt == IDLE_PROF_OPT_NONE) |
| 165 | return; |
| 166 | |
| 167 | if ((ret = pthread_attr_init(&tattr))) { |
| 168 | log_err("fio: pthread_attr_init %s\n", strerror(ret)); |
| 169 | return; |
| 170 | } |
| 171 | if ((ret = pthread_attr_setscope(&tattr, PTHREAD_SCOPE_SYSTEM))) { |
| 172 | log_err("fio: pthread_attr_setscope %s\n", strerror(ret)); |
| 173 | return; |
| 174 | } |
| 175 | |
| 176 | ipc.ipts = malloc(ipc.nr_cpus * sizeof(struct idle_prof_thread)); |
| 177 | if (!ipc.ipts) { |
| 178 | log_err("fio: malloc failed\n"); |
| 179 | return; |
| 180 | } |
| 181 | |
| 182 | ipc.buf = malloc(ipc.nr_cpus * page_size); |
| 183 | if (!ipc.buf) { |
| 184 | log_err("fio: malloc failed\n"); |
| 185 | free(ipc.ipts); |
| 186 | return; |
| 187 | } |
| 188 | |
| 189 | /* profiling aborts on any single thread failure since the |
| 190 | * result won't be accurate if any cpu is not used. |
| 191 | */ |
| 192 | for (i = 0; i < ipc.nr_cpus; i++) { |
| 193 | ipt = &ipc.ipts[i]; |
| 194 | |
| 195 | ipt->cpu = i; |
| 196 | ipt->state = TD_NOT_CREATED; |
| 197 | ipt->data = (unsigned char *)(ipc.buf + page_size * i); |
| 198 | |
| 199 | if ((ret = pthread_mutex_init(&ipt->init_lock, NULL))) { |
| 200 | ipc.status = IDLE_PROF_STATUS_ABORT; |
| 201 | log_err("fio: pthread_mutex_init %s\n", strerror(ret)); |
| 202 | break; |
| 203 | } |
| 204 | |
| 205 | if ((ret = pthread_mutex_init(&ipt->start_lock, NULL))) { |
| 206 | ipc.status = IDLE_PROF_STATUS_ABORT; |
| 207 | log_err("fio: pthread_mutex_init %s\n", strerror(ret)); |
| 208 | break; |
| 209 | } |
| 210 | |
| 211 | if ((ret = pthread_cond_init(&ipt->cond, NULL))) { |
| 212 | ipc.status = IDLE_PROF_STATUS_ABORT; |
| 213 | log_err("fio: pthread_cond_init %s\n", strerror(ret)); |
| 214 | break; |
| 215 | } |
| 216 | |
| 217 | /* make sure all threads are spawned before they start */ |
| 218 | pthread_mutex_lock(&ipt->init_lock); |
| 219 | |
| 220 | /* make sure all threads finish init before profiling starts */ |
| 221 | pthread_mutex_lock(&ipt->start_lock); |
| 222 | |
| 223 | if ((ret = pthread_create(&ipt->thread, &tattr, idle_prof_thread_fn, ipt))) { |
| 224 | ipc.status = IDLE_PROF_STATUS_ABORT; |
| 225 | log_err("fio: pthread_create %s\n", strerror(ret)); |
| 226 | break; |
| 227 | } else { |
| 228 | ipt->state = TD_CREATED; |
| 229 | } |
| 230 | |
| 231 | if ((ret = pthread_detach(ipt->thread))) { |
| 232 | /* log error and let the thread spin */ |
| 233 | log_err("fio: pthread_detatch %s\n", strerror(ret)); |
| 234 | } |
| 235 | } |
| 236 | |
| 237 | /* let good threads continue so that they can exit |
| 238 | * if errors on other threads occurred previously. |
| 239 | */ |
| 240 | for (i = 0; i < ipc.nr_cpus; i++) { |
| 241 | ipt = &ipc.ipts[i]; |
| 242 | pthread_mutex_unlock(&ipt->init_lock); |
| 243 | } |
| 244 | |
| 245 | if (ipc.status == IDLE_PROF_STATUS_ABORT) |
| 246 | return; |
| 247 | |
| 248 | /* wait for calibration to finish */ |
| 249 | for (i = 0; i < ipc.nr_cpus; i++) { |
| 250 | ipt = &ipc.ipts[i]; |
| 251 | pthread_mutex_lock(&ipt->init_lock); |
| 252 | while ((ipt->state!=TD_EXITED) && (ipt->state!=TD_INITIALIZED)) { |
| 253 | fio_gettime(&tp, NULL); |
| 254 | ts.tv_sec = tp.tv_sec + 1; |
| 255 | ts.tv_nsec = tp.tv_usec * 1000; |
| 256 | pthread_cond_timedwait(&ipt->cond, &ipt->init_lock, &ts); |
| 257 | } |
| 258 | pthread_mutex_unlock(&ipt->init_lock); |
| 259 | |
| 260 | /* any thread failed to initialize would abort other threads |
| 261 | * later after fio_idle_prof_start. |
| 262 | */ |
| 263 | if (ipt->state == TD_EXITED) |
| 264 | ipc.status = IDLE_PROF_STATUS_ABORT; |
| 265 | } |
| 266 | |
| 267 | if (ipc.status != IDLE_PROF_STATUS_ABORT) |
| 268 | calibration_stats(); |
| 269 | else |
| 270 | ipc.cali_mean = ipc.cali_stddev = 0.0; |
| 271 | |
| 272 | if (ipc.opt == IDLE_PROF_OPT_CALI) |
| 273 | ipc.status = IDLE_PROF_STATUS_CALI_STOP; |
| 274 | } |
| 275 | |
| 276 | void fio_idle_prof_start(void) |
| 277 | { |
| 278 | int i; |
| 279 | struct idle_prof_thread *ipt; |
| 280 | |
| 281 | if (ipc.opt == IDLE_PROF_OPT_NONE) |
| 282 | return; |
| 283 | |
| 284 | /* unlock regardless abort is set or not */ |
| 285 | for (i = 0; i < ipc.nr_cpus; i++) { |
| 286 | ipt = &ipc.ipts[i]; |
| 287 | pthread_mutex_unlock(&ipt->start_lock); |
| 288 | } |
| 289 | } |
| 290 | |
| 291 | void fio_idle_prof_stop(void) |
| 292 | { |
| 293 | int i; |
| 294 | uint64_t runt; |
| 295 | struct timeval tp; |
| 296 | struct timespec ts; |
| 297 | struct idle_prof_thread *ipt; |
| 298 | |
| 299 | if (ipc.opt == IDLE_PROF_OPT_NONE) |
| 300 | return; |
| 301 | |
| 302 | if (ipc.opt == IDLE_PROF_OPT_CALI) |
| 303 | return; |
| 304 | |
| 305 | ipc.status = IDLE_PROF_STATUS_PROF_STOP; |
| 306 | |
| 307 | /* wait for all threads to exit from profiling */ |
| 308 | for (i = 0; i < ipc.nr_cpus; i++) { |
| 309 | ipt = &ipc.ipts[i]; |
| 310 | pthread_mutex_lock(&ipt->start_lock); |
| 311 | while ((ipt->state!=TD_EXITED) && (ipt->state!=TD_NOT_CREATED)) { |
| 312 | fio_gettime(&tp, NULL); |
| 313 | ts.tv_sec = tp.tv_sec + 1; |
| 314 | ts.tv_nsec = tp.tv_usec * 1000; |
| 315 | /* timed wait in case a signal is not received */ |
| 316 | pthread_cond_timedwait(&ipt->cond, &ipt->start_lock, &ts); |
| 317 | } |
| 318 | pthread_mutex_unlock(&ipt->start_lock); |
| 319 | |
| 320 | /* calculate idleness */ |
| 321 | if (ipc.cali_mean != 0.0) { |
| 322 | runt = utime_since(&ipt->tps, &ipt->tpe); |
| 323 | ipt->idleness = ipt->loops * ipc.cali_mean / runt; |
| 324 | } else |
| 325 | ipt->idleness = 0.0; |
| 326 | } |
| 327 | |
| 328 | /* memory allocations are freed via explicit fio_idle_prof_cleanup |
| 329 | * after profiling stats are collected by apps. |
| 330 | */ |
| 331 | |
| 332 | return; |
| 333 | } |
| 334 | |
| 335 | /* return system idle percentage when cpu is -1; |
| 336 | * return one cpu idle percentage otherwise. |
| 337 | */ |
| 338 | static double fio_idle_prof_cpu_stat(int cpu) |
| 339 | { |
| 340 | int i, nr_cpus = ipc.nr_cpus; |
| 341 | struct idle_prof_thread *ipt; |
| 342 | double p = 0.0; |
| 343 | |
| 344 | if (ipc.opt == IDLE_PROF_OPT_NONE) |
| 345 | return 0.0; |
| 346 | |
| 347 | if ((cpu >= nr_cpus) || (cpu < -1)) { |
| 348 | log_err("fio: idle profiling invalid cpu index\n"); |
| 349 | return 0.0; |
| 350 | } |
| 351 | |
| 352 | if (cpu == -1) { |
| 353 | for (i = 0; i < nr_cpus; i++) { |
| 354 | ipt = &ipc.ipts[i]; |
| 355 | p += ipt->idleness; |
| 356 | } |
| 357 | p /= nr_cpus; |
| 358 | } else { |
| 359 | ipt = &ipc.ipts[cpu]; |
| 360 | p = ipt->idleness; |
| 361 | } |
| 362 | |
| 363 | return p*100.0; |
| 364 | } |
| 365 | |
| 366 | void fio_idle_prof_cleanup(void) |
| 367 | { |
| 368 | if (ipc.ipts) { |
| 369 | free(ipc.ipts); |
| 370 | ipc.ipts = NULL; |
| 371 | } |
| 372 | |
| 373 | if (ipc.buf) { |
| 374 | free(ipc.buf); |
| 375 | ipc.buf = NULL; |
| 376 | } |
| 377 | } |
| 378 | |
| 379 | int fio_idle_prof_parse_opt(const char *args) |
| 380 | { |
| 381 | ipc.opt = IDLE_PROF_OPT_NONE; /* default */ |
| 382 | |
| 383 | if (!args) { |
| 384 | log_err("fio: empty idle-prof option string\n"); |
| 385 | return -1; |
| 386 | } |
| 387 | |
| 388 | #if defined(FIO_HAVE_CPU_AFFINITY) && defined(FIO_HAVE_SCHED_IDLE) |
| 389 | if (strcmp("calibrate", args) == 0) { |
| 390 | ipc.opt = IDLE_PROF_OPT_CALI; |
| 391 | fio_idle_prof_init(); |
| 392 | fio_idle_prof_start(); |
| 393 | fio_idle_prof_stop(); |
| 394 | show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL); |
| 395 | return 1; |
| 396 | } else if (strcmp("system", args) == 0) { |
| 397 | ipc.opt = IDLE_PROF_OPT_SYSTEM; |
| 398 | return 0; |
| 399 | } else if (strcmp("percpu", args) == 0) { |
| 400 | ipc.opt = IDLE_PROF_OPT_PERCPU; |
| 401 | return 0; |
| 402 | } else { |
| 403 | log_err("fio: incorrect idle-prof option\n", args); |
| 404 | return -1; |
| 405 | } |
| 406 | #else |
| 407 | log_err("fio: idle-prof not supported on this platform\n"); |
| 408 | return -1; |
| 409 | #endif |
| 410 | } |
| 411 | |
| 412 | void show_idle_prof_stats(int output, struct json_object *parent) |
| 413 | { |
| 414 | int i, nr_cpus = ipc.nr_cpus; |
| 415 | struct json_object *tmp; |
| 416 | char s[MAX_CPU_STR_LEN]; |
| 417 | |
| 418 | if (output == FIO_OUTPUT_NORMAL) { |
| 419 | if (ipc.opt > IDLE_PROF_OPT_CALI) |
| 420 | log_info("\nCPU idleness:\n"); |
| 421 | else if (ipc.opt == IDLE_PROF_OPT_CALI) |
| 422 | log_info("CPU idleness:\n"); |
| 423 | |
| 424 | if (ipc.opt >= IDLE_PROF_OPT_SYSTEM) |
| 425 | log_info(" system: %3.2f%%\n", fio_idle_prof_cpu_stat(-1)); |
| 426 | |
| 427 | if (ipc.opt == IDLE_PROF_OPT_PERCPU) { |
| 428 | log_info(" percpu: %3.2f%%", fio_idle_prof_cpu_stat(0)); |
| 429 | for (i=1; i<nr_cpus; i++) { |
| 430 | log_info(", %3.2f%%", fio_idle_prof_cpu_stat(i)); |
| 431 | } |
| 432 | log_info("\n"); |
| 433 | } |
| 434 | |
| 435 | if (ipc.opt >= IDLE_PROF_OPT_CALI) { |
| 436 | log_info(" unit work: mean=%3.2fus,", ipc.cali_mean); |
| 437 | log_info(" stddev=%3.2f\n", ipc.cali_stddev); |
| 438 | } |
| 439 | |
| 440 | /* dynamic mem allocations can now be freed */ |
| 441 | if (ipc.opt != IDLE_PROF_OPT_NONE) |
| 442 | fio_idle_prof_cleanup(); |
| 443 | |
| 444 | return; |
| 445 | } |
| 446 | |
| 447 | if ((ipc.opt != IDLE_PROF_OPT_NONE) && (output == FIO_OUTPUT_JSON)) { |
| 448 | if (!parent) |
| 449 | return; |
| 450 | |
| 451 | tmp = json_create_object(); |
| 452 | if (!tmp) |
| 453 | return; |
| 454 | |
| 455 | json_object_add_value_object(parent, "cpu_idleness", tmp); |
| 456 | json_object_add_value_float(tmp, "system", fio_idle_prof_cpu_stat(-1)); |
| 457 | |
| 458 | if (ipc.opt == IDLE_PROF_OPT_PERCPU) { |
| 459 | for (i=0; i<nr_cpus; i++) { |
| 460 | snprintf(s, MAX_CPU_STR_LEN, "cpu-%d", i); |
| 461 | json_object_add_value_float(tmp, s, fio_idle_prof_cpu_stat(i)); |
| 462 | } |
| 463 | } |
| 464 | |
| 465 | json_object_add_value_float(tmp, "unit_mean", ipc.cali_mean); |
| 466 | json_object_add_value_float(tmp, "unit_stddev", ipc.cali_stddev); |
| 467 | |
| 468 | fio_idle_prof_cleanup(); |
| 469 | |
| 470 | return; |
| 471 | } |
| 472 | } |