Merge pull request #6223 from sreecha/set_ulimit

Enable core-dumps for stress clients and servers when launching stress tests in GKE
diff --git a/tools/gcp/stress_test/run_client.py b/tools/gcp/stress_test/run_client.py
index 8f2a9c1..2004bf6 100755
--- a/tools/gcp/stress_test/run_client.py
+++ b/tools/gcp/stress_test/run_client.py
@@ -31,6 +31,7 @@
 import datetime
 import os
 import re
+import resource
 import select
 import subprocess
 import sys
@@ -89,6 +90,11 @@
                examining logs). This is the reason why the script waits forever
                in case of failures
   """
+  # Set the 'core file' size to 'unlimited' so that 'core' files are generated
+  # if the client crashes (Note: This is not relevant for Java and Go clients)
+  resource.setrlimit(resource.RLIMIT_CORE,
+                     (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
+
   env = dict(os.environ)
   image_type = env['STRESS_TEST_IMAGE_TYPE']
   stress_client_cmd = env['STRESS_TEST_CMD'].split()
diff --git a/tools/gcp/stress_test/run_server.py b/tools/gcp/stress_test/run_server.py
index 796f092..a666ae2 100755
--- a/tools/gcp/stress_test/run_server.py
+++ b/tools/gcp/stress_test/run_server.py
@@ -30,6 +30,7 @@
 
 import datetime
 import os
+import resource
 import select
 import subprocess
 import sys
@@ -56,6 +57,10 @@
          might want to connect to the pod for examining logs). This is the
          reason why the script waits forever in case of failures.
   """
+  # Set the 'core file' size to 'unlimited' so that 'core' files are generated
+  # if the server crashes (Note: This is not relevant for Java and Go servers)
+  resource.setrlimit(resource.RLIMIT_CORE,
+                     (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
 
   # Read the parameters from environment variables
   env = dict(os.environ)
@@ -78,9 +83,10 @@
   logfile_name = env.get('LOGFILE_NAME')
 
   print('pod_name: %s, project_id: %s, run_id: %s, dataset_id: %s, '
-        'summary_table_id: %s, qps_table_id: %s') % (
-            pod_name, project_id, run_id, dataset_id, summary_table_id,
-            qps_table_id)
+        'summary_table_id: %s, qps_table_id: %s') % (pod_name, project_id,
+                                                     run_id, dataset_id,
+                                                     summary_table_id,
+                                                     qps_table_id)
 
   bq_helper = BigQueryHelper(run_id, image_type, pod_name, project_id,
                              dataset_id, summary_table_id, qps_table_id)