autoserv: Manually flush metrics.
autoserv processes are short lived. It doesn't make sense to use a
background process to report metrics for autoserv.
This change implies that any processes forked by autoserv can no longer
report metrics.
BUG=chromium:682318
TEST=Verify that metrics from autoserv are still reported correctly.
Change-Id: Idfa11265fe522701e11720efaedd6180c35dd9b1
Reviewed-on: https://chromium-review.googlesource.com/432042
Commit-Ready: Prathmesh Prabhu <pprabhu@chromium.org>
Tested-by: Prathmesh Prabhu <pprabhu@chromium.org>
Reviewed-by: Aviv Keshet <akeshet@chromium.org>
diff --git a/server/autoserv b/server/autoserv
index 6e14399..54760c3 100755
--- a/server/autoserv
+++ b/server/autoserv
@@ -520,57 +520,60 @@
auto_start_servod = _CONFIG.get_config_value(
'AUTOSERV', 'auto_start_servod', type=bool, default=False)
+ site_utils.SetupTsMonGlobalState('autoserv', indirect=False,
+ short_lived=True)
try:
- with site_utils.SetupTsMonGlobalState('autoserv', indirect=True, short_lived=True):
- try:
- if repair:
- if auto_start_servod and len(machines) == 1:
- _start_servod(machines[0])
- job.repair(job_labels)
- elif verify:
- job.verify(job_labels)
- elif provision:
- job.provision(job_labels)
- elif reset:
- job.reset(job_labels)
- elif cleanup:
- job.cleanup(job_labels)
+ try:
+ if repair:
+ if auto_start_servod and len(machines) == 1:
+ _start_servod(machines[0])
+ job.repair(job_labels)
+ elif verify:
+ job.verify(job_labels)
+ elif provision:
+ job.provision(job_labels)
+ elif reset:
+ job.reset(job_labels)
+ elif cleanup:
+ job.cleanup(job_labels)
+ else:
+ if auto_start_servod and len(machines) == 1:
+ _start_servod(machines[0])
+ if use_ssp:
+ try:
+ _run_with_ssp(job, container_name, job_or_task_id,
+ results, parser, ssp_url, job_folder,
+ machines)
+ finally:
+ # Update the ownership of files in result folder.
+ correct_results_folder_permission(results)
else:
- if auto_start_servod and len(machines) == 1:
- _start_servod(machines[0])
- if use_ssp:
+ if collect_crashinfo:
+ # Update the ownership of files in result folder. If the
+ # job to collect crashinfo was running inside container
+ # (SSP) and crashed before correcting folder permission,
+ # the result folder might have wrong permission setting.
try:
- _run_with_ssp(job, container_name, job_or_task_id,
- results, parser, ssp_url, job_folder,
- machines)
- finally:
- # Update the ownership of files in result folder.
correct_results_folder_permission(results)
- else:
- if collect_crashinfo:
- # Update the ownership of files in result folder. If the
- # job to collect crashinfo was running inside container
- # (SSP) and crashed before correcting folder permission,
- # the result folder might have wrong permission setting.
- try:
- correct_results_folder_permission(results)
- except:
- # Ignore any error as the user may not have root
- # permission to run sudo command.
- pass
- job.run(install_before, install_after,
- verify_job_repo_url=verify_job_repo_url,
- only_collect_crashinfo=collect_crashinfo,
- skip_crash_collection=skip_crash_collection,
- job_labels=job_labels,
- use_packaging=(not no_use_packaging))
- finally:
- while job.hosts:
- host = job.hosts.pop()
- host.close()
+ except:
+ # Ignore any error as the user may not have root
+ # permission to run sudo command.
+ pass
+ job.run(install_before, install_after,
+ verify_job_repo_url=verify_job_repo_url,
+ only_collect_crashinfo=collect_crashinfo,
+ skip_crash_collection=skip_crash_collection,
+ job_labels=job_labels,
+ use_packaging=(not no_use_packaging))
+ finally:
+ while job.hosts:
+ host = job.hosts.pop()
+ host.close()
except:
exit_code = 1
traceback.print_exc()
+ finally:
+ metrics.Flush()
if pid_file_manager:
pid_file_manager.num_tests_failed = job.num_tests_failed