Regen docs (#373)
diff --git a/docs/dyn/dataproc_v1alpha1.projects.regions.jobs.html b/docs/dyn/dataproc_v1alpha1.projects.regions.jobs.html
index 298b4ee..eb57c6c 100644
--- a/docs/dyn/dataproc_v1alpha1.projects.regions.jobs.html
+++ b/docs/dyn/dataproc_v1alpha1.projects.regions.jobs.html
@@ -121,12 +121,48 @@
{ # A Cloud Dataproc job resource.
"status": { # Cloud Dataproc job status. # Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.
"insertTime": "A String", # The time of the job request.
"endTime": "A String", # The time when the job completed.
},
+ "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "A String",
+ ],
+ "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
+ "A String",
+ ],
+ "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
+ "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
+ "a_key": "A String",
+ },
+ },
+ "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+ "A String",
+ ],
+ "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
+ "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
+ "A String",
+ ],
+ "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
+ "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
+ "a_key": "A String",
+ },
+ },
+ "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
+ "a_key": "A String",
+ },
+ "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
+ "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
+ "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
+ },
+ "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
+ "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
+ "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
+ },
"sparkSqlJob": { # A Cloud Dataproc job for running Spark SQL queries. # Job is a SparkSql job.
"queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
"scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
@@ -158,26 +194,21 @@
"a_key": "A String",
},
},
- "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
- "a_key": "A String",
- },
"yarnApplications": [ # Output-only The collection of Yarn applications spun up by this job.
{ # A YARN application created by a job. Application information is a subset of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
"progress": 3.14, # Required The numerical progress of the application, from 1 to 100.
"state": "A String", # Required The application state.
- "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
"name": "A String", # Required The application name.
+ "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
},
],
- "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
- "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
- "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
- },
- "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
- "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- "A String",
- ],
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
+ "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
+ "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
+ "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
+ "a_key": "A String",
+ },
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -185,28 +216,6 @@
"a_key": "A String",
},
},
- "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- "A String",
- ],
- "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
- "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- "A String",
- ],
- "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
- "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- "a_key": "A String",
- },
- },
- "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
- "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
- "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
- "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
- "a_key": "A String",
- },
- "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
- "A String",
- ],
"queryList": { # A list of queries to run on a cluster. # A list of queries.
"queries": [ # Required The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob:
# "hiveJob": {
@@ -221,11 +230,7 @@
"A String",
],
},
- "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
- "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
- "a_key": "A String",
- },
- },
+ "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
"properties": { # Optional A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
"a_key": "A String",
},
@@ -260,10 +265,10 @@
"driverControlFilesUri": "A String", # Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"driverInputResourceUri": "A String", # Output-only A URI pointing to the location of the stdin of the job's driver program, only set if the job is interactive.
"sparkJob": { # A Cloud Dataproc job for running Spark applications on YARN. # Job is a Spark job.
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
"A String",
],
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -287,13 +292,10 @@
"scheduling": { # Job scheduling options.Beta Feature: These options are available for testing purposes only. They may be changed before final release. # Optional Job scheduling configuration.
"maxFailuresPerHour": 42, # Optional Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10.
},
- "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
- "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
- "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
- },
"statusHistory": [ # Output-only The previous job status.
{ # Cloud Dataproc job status.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.
@@ -351,12 +353,48 @@
{ # A Cloud Dataproc job resource.
"status": { # Cloud Dataproc job status. # Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.
"insertTime": "A String", # The time of the job request.
"endTime": "A String", # The time when the job completed.
},
+ "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "A String",
+ ],
+ "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
+ "A String",
+ ],
+ "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
+ "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
+ "a_key": "A String",
+ },
+ },
+ "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+ "A String",
+ ],
+ "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
+ "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
+ "A String",
+ ],
+ "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
+ "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
+ "a_key": "A String",
+ },
+ },
+ "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
+ "a_key": "A String",
+ },
+ "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
+ "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
+ "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
+ },
+ "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
+ "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
+ "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
+ },
"sparkSqlJob": { # A Cloud Dataproc job for running Spark SQL queries. # Job is a SparkSql job.
"queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
"scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
@@ -388,26 +426,21 @@
"a_key": "A String",
},
},
- "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
- "a_key": "A String",
- },
"yarnApplications": [ # Output-only The collection of Yarn applications spun up by this job.
{ # A YARN application created by a job. Application information is a subset of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
"progress": 3.14, # Required The numerical progress of the application, from 1 to 100.
"state": "A String", # Required The application state.
- "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
"name": "A String", # Required The application name.
+ "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
},
],
- "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
- "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
- "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
- },
- "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
- "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- "A String",
- ],
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
+ "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
+ "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
+ "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
+ "a_key": "A String",
+ },
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -415,28 +448,6 @@
"a_key": "A String",
},
},
- "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- "A String",
- ],
- "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
- "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- "A String",
- ],
- "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
- "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- "a_key": "A String",
- },
- },
- "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
- "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
- "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
- "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
- "a_key": "A String",
- },
- "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
- "A String",
- ],
"queryList": { # A list of queries to run on a cluster. # A list of queries.
"queries": [ # Required The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob:
# "hiveJob": {
@@ -451,11 +462,7 @@
"A String",
],
},
- "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
- "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
- "a_key": "A String",
- },
- },
+ "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
"properties": { # Optional A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
"a_key": "A String",
},
@@ -490,10 +497,10 @@
"driverControlFilesUri": "A String", # Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"driverInputResourceUri": "A String", # Output-only A URI pointing to the location of the stdin of the job's driver program, only set if the job is interactive.
"sparkJob": { # A Cloud Dataproc job for running Spark applications on YARN. # Job is a Spark job.
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
"A String",
],
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -517,13 +524,10 @@
"scheduling": { # Job scheduling options.Beta Feature: These options are available for testing purposes only. They may be changed before final release. # Optional Job scheduling configuration.
"maxFailuresPerHour": 42, # Optional Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10.
},
- "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
- "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
- "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
- },
"statusHistory": [ # Output-only The previous job status.
{ # Cloud Dataproc job status.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.
@@ -581,12 +585,48 @@
{ # A Cloud Dataproc job resource.
"status": { # Cloud Dataproc job status. # Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.
"insertTime": "A String", # The time of the job request.
"endTime": "A String", # The time when the job completed.
},
+ "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "A String",
+ ],
+ "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
+ "A String",
+ ],
+ "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
+ "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
+ "a_key": "A String",
+ },
+ },
+ "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+ "A String",
+ ],
+ "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
+ "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
+ "A String",
+ ],
+ "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
+ "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
+ "a_key": "A String",
+ },
+ },
+ "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
+ "a_key": "A String",
+ },
+ "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
+ "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
+ "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
+ },
+ "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
+ "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
+ "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
+ },
"sparkSqlJob": { # A Cloud Dataproc job for running Spark SQL queries. # Job is a SparkSql job.
"queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
"scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
@@ -618,26 +658,21 @@
"a_key": "A String",
},
},
- "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
- "a_key": "A String",
- },
"yarnApplications": [ # Output-only The collection of Yarn applications spun up by this job.
{ # A YARN application created by a job. Application information is a subset of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
"progress": 3.14, # Required The numerical progress of the application, from 1 to 100.
"state": "A String", # Required The application state.
- "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
"name": "A String", # Required The application name.
+ "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
},
],
- "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
- "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
- "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
- },
- "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
- "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- "A String",
- ],
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
+ "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
+ "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
+ "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
+ "a_key": "A String",
+ },
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -645,28 +680,6 @@
"a_key": "A String",
},
},
- "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- "A String",
- ],
- "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
- "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- "A String",
- ],
- "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
- "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- "a_key": "A String",
- },
- },
- "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
- "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
- "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
- "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
- "a_key": "A String",
- },
- "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
- "A String",
- ],
"queryList": { # A list of queries to run on a cluster. # A list of queries.
"queries": [ # Required The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob:
# "hiveJob": {
@@ -681,11 +694,7 @@
"A String",
],
},
- "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
- "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
- "a_key": "A String",
- },
- },
+ "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
"properties": { # Optional A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
"a_key": "A String",
},
@@ -720,10 +729,10 @@
"driverControlFilesUri": "A String", # Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"driverInputResourceUri": "A String", # Output-only A URI pointing to the location of the stdin of the job's driver program, only set if the job is interactive.
"sparkJob": { # A Cloud Dataproc job for running Spark applications on YARN. # Job is a Spark job.
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
"A String",
],
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -747,13 +756,10 @@
"scheduling": { # Job scheduling options.Beta Feature: These options are available for testing purposes only. They may be changed before final release. # Optional Job scheduling configuration.
"maxFailuresPerHour": 42, # Optional Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10.
},
- "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
- "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
- "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
- },
"statusHistory": [ # Output-only The previous job status.
{ # Cloud Dataproc job status.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.
@@ -824,12 +830,48 @@
{ # A Cloud Dataproc job resource.
"status": { # Cloud Dataproc job status. # Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.
"insertTime": "A String", # The time of the job request.
"endTime": "A String", # The time when the job completed.
},
+ "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "A String",
+ ],
+ "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
+ "A String",
+ ],
+ "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
+ "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
+ "a_key": "A String",
+ },
+ },
+ "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+ "A String",
+ ],
+ "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
+ "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
+ "A String",
+ ],
+ "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
+ "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
+ "a_key": "A String",
+ },
+ },
+ "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
+ "a_key": "A String",
+ },
+ "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
+ "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
+ "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
+ },
+ "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
+ "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
+ "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
+ },
"sparkSqlJob": { # A Cloud Dataproc job for running Spark SQL queries. # Job is a SparkSql job.
"queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
"scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
@@ -861,26 +903,21 @@
"a_key": "A String",
},
},
- "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
- "a_key": "A String",
- },
"yarnApplications": [ # Output-only The collection of Yarn applications spun up by this job.
{ # A YARN application created by a job. Application information is a subset of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
"progress": 3.14, # Required The numerical progress of the application, from 1 to 100.
"state": "A String", # Required The application state.
- "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
"name": "A String", # Required The application name.
+ "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
},
],
- "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
- "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
- "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
- },
- "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
- "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- "A String",
- ],
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
+ "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
+ "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
+ "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
+ "a_key": "A String",
+ },
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -888,28 +925,6 @@
"a_key": "A String",
},
},
- "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- "A String",
- ],
- "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
- "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- "A String",
- ],
- "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
- "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- "a_key": "A String",
- },
- },
- "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
- "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
- "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
- "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
- "a_key": "A String",
- },
- "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
- "A String",
- ],
"queryList": { # A list of queries to run on a cluster. # A list of queries.
"queries": [ # Required The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob:
# "hiveJob": {
@@ -924,11 +939,7 @@
"A String",
],
},
- "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
- "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
- "a_key": "A String",
- },
- },
+ "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
"properties": { # Optional A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
"a_key": "A String",
},
@@ -963,10 +974,10 @@
"driverControlFilesUri": "A String", # Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"driverInputResourceUri": "A String", # Output-only A URI pointing to the location of the stdin of the job's driver program, only set if the job is interactive.
"sparkJob": { # A Cloud Dataproc job for running Spark applications on YARN. # Job is a Spark job.
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
"A String",
],
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -990,13 +1001,10 @@
"scheduling": { # Job scheduling options.Beta Feature: These options are available for testing purposes only. They may be changed before final release. # Optional Job scheduling configuration.
"maxFailuresPerHour": 42, # Optional Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10.
},
- "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
- "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
- "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
- },
"statusHistory": [ # Output-only The previous job status.
{ # Cloud Dataproc job status.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.
@@ -1065,12 +1073,48 @@
{ # A Cloud Dataproc job resource.
"status": { # Cloud Dataproc job status. # Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.
"insertTime": "A String", # The time of the job request.
"endTime": "A String", # The time when the job completed.
},
+ "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "A String",
+ ],
+ "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
+ "A String",
+ ],
+ "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
+ "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
+ "a_key": "A String",
+ },
+ },
+ "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+ "A String",
+ ],
+ "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
+ "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
+ "A String",
+ ],
+ "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
+ "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
+ "a_key": "A String",
+ },
+ },
+ "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
+ "a_key": "A String",
+ },
+ "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
+ "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
+ "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
+ },
+ "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
+ "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
+ "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
+ },
"sparkSqlJob": { # A Cloud Dataproc job for running Spark SQL queries. # Job is a SparkSql job.
"queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
"scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
@@ -1102,26 +1146,21 @@
"a_key": "A String",
},
},
- "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
- "a_key": "A String",
- },
"yarnApplications": [ # Output-only The collection of Yarn applications spun up by this job.
{ # A YARN application created by a job. Application information is a subset of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
"progress": 3.14, # Required The numerical progress of the application, from 1 to 100.
"state": "A String", # Required The application state.
- "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
"name": "A String", # Required The application name.
+ "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
},
],
- "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
- "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
- "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
- },
- "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
- "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- "A String",
- ],
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
+ "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
+ "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
+ "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
+ "a_key": "A String",
+ },
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -1129,28 +1168,6 @@
"a_key": "A String",
},
},
- "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- "A String",
- ],
- "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
- "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- "A String",
- ],
- "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
- "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- "a_key": "A String",
- },
- },
- "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
- "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
- "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
- "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
- "a_key": "A String",
- },
- "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
- "A String",
- ],
"queryList": { # A list of queries to run on a cluster. # A list of queries.
"queries": [ # Required The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob:
# "hiveJob": {
@@ -1165,11 +1182,7 @@
"A String",
],
},
- "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
- "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
- "a_key": "A String",
- },
- },
+ "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
"properties": { # Optional A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
"a_key": "A String",
},
@@ -1204,10 +1217,10 @@
"driverControlFilesUri": "A String", # Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"driverInputResourceUri": "A String", # Output-only A URI pointing to the location of the stdin of the job's driver program, only set if the job is interactive.
"sparkJob": { # A Cloud Dataproc job for running Spark applications on YARN. # Job is a Spark job.
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
"A String",
],
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -1231,13 +1244,10 @@
"scheduling": { # Job scheduling options.Beta Feature: These options are available for testing purposes only. They may be changed before final release. # Optional Job scheduling configuration.
"maxFailuresPerHour": 42, # Optional Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10.
},
- "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
- "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
- "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
- },
"statusHistory": [ # Output-only The previous job status.
{ # Cloud Dataproc job status.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.
@@ -1287,12 +1297,48 @@
{ # A Cloud Dataproc job resource.
"status": { # Cloud Dataproc job status. # Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.
"insertTime": "A String", # The time of the job request.
"endTime": "A String", # The time when the job completed.
},
+ "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "A String",
+ ],
+ "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
+ "A String",
+ ],
+ "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
+ "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
+ "a_key": "A String",
+ },
+ },
+ "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+ "A String",
+ ],
+ "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
+ "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
+ "A String",
+ ],
+ "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
+ "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
+ "a_key": "A String",
+ },
+ },
+ "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
+ "a_key": "A String",
+ },
+ "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
+ "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
+ "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
+ },
+ "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
+ "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
+ "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
+ },
"sparkSqlJob": { # A Cloud Dataproc job for running Spark SQL queries. # Job is a SparkSql job.
"queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
"scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
@@ -1324,26 +1370,21 @@
"a_key": "A String",
},
},
- "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
- "a_key": "A String",
- },
"yarnApplications": [ # Output-only The collection of Yarn applications spun up by this job.
{ # A YARN application created by a job. Application information is a subset of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
"progress": 3.14, # Required The numerical progress of the application, from 1 to 100.
"state": "A String", # Required The application state.
- "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
"name": "A String", # Required The application name.
+ "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
},
],
- "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
- "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
- "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
- },
- "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
- "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- "A String",
- ],
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
+ "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
+ "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
+ "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
+ "a_key": "A String",
+ },
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -1351,28 +1392,6 @@
"a_key": "A String",
},
},
- "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- "A String",
- ],
- "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
- "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- "A String",
- ],
- "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
- "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- "a_key": "A String",
- },
- },
- "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
- "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
- "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
- "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
- "a_key": "A String",
- },
- "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
- "A String",
- ],
"queryList": { # A list of queries to run on a cluster. # A list of queries.
"queries": [ # Required The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob:
# "hiveJob": {
@@ -1387,11 +1406,7 @@
"A String",
],
},
- "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
- "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
- "a_key": "A String",
- },
- },
+ "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
"properties": { # Optional A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
"a_key": "A String",
},
@@ -1426,10 +1441,10 @@
"driverControlFilesUri": "A String", # Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"driverInputResourceUri": "A String", # Output-only A URI pointing to the location of the stdin of the job's driver program, only set if the job is interactive.
"sparkJob": { # A Cloud Dataproc job for running Spark applications on YARN. # Job is a Spark job.
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
"A String",
],
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -1453,13 +1468,10 @@
"scheduling": { # Job scheduling options.Beta Feature: These options are available for testing purposes only. They may be changed before final release. # Optional Job scheduling configuration.
"maxFailuresPerHour": 42, # Optional Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10.
},
- "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
- "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
- "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
- },
"statusHistory": [ # Output-only The previous job status.
{ # Cloud Dataproc job status.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.
@@ -1512,12 +1524,48 @@
"job": { # A Cloud Dataproc job resource. # Required The job resource.
"status": { # Cloud Dataproc job status. # Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.
"insertTime": "A String", # The time of the job request.
"endTime": "A String", # The time when the job completed.
},
+ "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "A String",
+ ],
+ "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
+ "A String",
+ ],
+ "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
+ "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
+ "a_key": "A String",
+ },
+ },
+ "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+ "A String",
+ ],
+ "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
+ "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
+ "A String",
+ ],
+ "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
+ "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
+ "a_key": "A String",
+ },
+ },
+ "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
+ "a_key": "A String",
+ },
+ "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
+ "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
+ "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
+ },
+ "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
+ "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
+ "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
+ },
"sparkSqlJob": { # A Cloud Dataproc job for running Spark SQL queries. # Job is a SparkSql job.
"queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
"scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
@@ -1549,26 +1597,21 @@
"a_key": "A String",
},
},
- "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
- "a_key": "A String",
- },
"yarnApplications": [ # Output-only The collection of Yarn applications spun up by this job.
{ # A YARN application created by a job. Application information is a subset of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
"progress": 3.14, # Required The numerical progress of the application, from 1 to 100.
"state": "A String", # Required The application state.
- "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
"name": "A String", # Required The application name.
+ "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
},
],
- "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
- "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
- "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
- },
- "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
- "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- "A String",
- ],
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
+ "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
+ "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
+ "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
+ "a_key": "A String",
+ },
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -1576,28 +1619,6 @@
"a_key": "A String",
},
},
- "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- "A String",
- ],
- "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
- "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- "A String",
- ],
- "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
- "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- "a_key": "A String",
- },
- },
- "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
- "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
- "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
- "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
- "a_key": "A String",
- },
- "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
- "A String",
- ],
"queryList": { # A list of queries to run on a cluster. # A list of queries.
"queries": [ # Required The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob:
# "hiveJob": {
@@ -1612,11 +1633,7 @@
"A String",
],
},
- "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
- "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
- "a_key": "A String",
- },
- },
+ "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
"properties": { # Optional A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
"a_key": "A String",
},
@@ -1651,10 +1668,10 @@
"driverControlFilesUri": "A String", # Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"driverInputResourceUri": "A String", # Output-only A URI pointing to the location of the stdin of the job's driver program, only set if the job is interactive.
"sparkJob": { # A Cloud Dataproc job for running Spark applications on YARN. # Job is a Spark job.
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
"A String",
],
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -1678,13 +1695,10 @@
"scheduling": { # Job scheduling options.Beta Feature: These options are available for testing purposes only. They may be changed before final release. # Optional Job scheduling configuration.
"maxFailuresPerHour": 42, # Optional Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10.
},
- "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
- "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
- "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
- },
"statusHistory": [ # Output-only The previous job status.
{ # Cloud Dataproc job status.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.
@@ -1734,12 +1748,48 @@
{ # A Cloud Dataproc job resource.
"status": { # Cloud Dataproc job status. # Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.
"insertTime": "A String", # The time of the job request.
"endTime": "A String", # The time when the job completed.
},
+ "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "A String",
+ ],
+ "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
+ "A String",
+ ],
+ "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
+ "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
+ "a_key": "A String",
+ },
+ },
+ "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+ "A String",
+ ],
+ "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
+ "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
+ "A String",
+ ],
+ "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
+ "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
+ "a_key": "A String",
+ },
+ },
+ "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
+ "a_key": "A String",
+ },
+ "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
+ "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
+ "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
+ },
+ "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
+ "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
+ "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
+ },
"sparkSqlJob": { # A Cloud Dataproc job for running Spark SQL queries. # Job is a SparkSql job.
"queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
"scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
@@ -1771,26 +1821,21 @@
"a_key": "A String",
},
},
- "labels": { # Optional The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a given job.
- "a_key": "A String",
- },
"yarnApplications": [ # Output-only The collection of Yarn applications spun up by this job.
{ # A YARN application created by a job. Application information is a subset of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
"progress": 3.14, # Required The numerical progress of the application, from 1 to 100.
"state": "A String", # Required The application state.
- "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
"name": "A String", # Required The application name.
+ "trackingUrl": "A String", # Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
},
],
- "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully-qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
- "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
- "jobId": "A String", # Required The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 100 characters.
- },
- "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
- "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- "A String",
- ],
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
+ "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
+ "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
+ "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
+ "a_key": "A String",
+ },
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -1798,28 +1843,6 @@
"a_key": "A String",
},
},
- "fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- "A String",
- ],
- "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
- "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- "A String",
- ],
- "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar hdfs:/tmp/test-samples/custom-wordcount.jar file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
- "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- "a_key": "A String",
- },
- },
- "driverOutputUri": "A String", # Output-only A URI pointing to the location of the mixed stdout/stderr of the job's driver program—for example, <code>gs://sysbucket123/foo-cluster/jobid-123/driver/output</code>.
- "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
- "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
- "scriptVariables": { # Optional Mapping of query variable names to values (equivalent to the Pig command: "name=value").
- "a_key": "A String",
- },
- "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
- "A String",
- ],
"queryList": { # A list of queries to run on a cluster. # A list of queries.
"queries": [ # Required The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob:
# "hiveJob": {
@@ -1834,11 +1857,7 @@
"A String",
],
},
- "loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
- "driverLogLevels": { # The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: com.google = FATAL, root = INFO, org.apache = DEBUG
- "a_key": "A String",
- },
- },
+ "continueOnFailure": True or False, # Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.
"properties": { # Optional A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.
"a_key": "A String",
},
@@ -1873,10 +1892,10 @@
"driverControlFilesUri": "A String", # Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"driverInputResourceUri": "A String", # Output-only A URI pointing to the location of the stdin of the job's driver program, only set if the job is interactive.
"sparkJob": { # A Cloud Dataproc job for running Spark applications on YARN. # Job is a Spark job.
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
"A String",
],
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
"loggingConfiguration": { # The runtime logging configuration of the job. # Optional The runtime log configuration for job execution.
@@ -1900,13 +1919,10 @@
"scheduling": { # Job scheduling options.Beta Feature: These options are available for testing purposes only. They may be changed before final release. # Optional Job scheduling configuration.
"maxFailuresPerHour": 42, # Optional Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10.
},
- "placement": { # Cloud Dataproc job configuration. # Required Job information, including how, when, and where to run the job.
- "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
- "clusterUuid": "A String", # Output-only A cluster UUID generated by the Dataproc service when the job is submitted.
- },
"statusHistory": [ # Output-only The previous job status.
{ # Cloud Dataproc job status.
"stateStartTime": "A String", # Output-only The time when this state was entered.
+ "substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
"startTime": "A String", # The time when the server started the job.
"state": "A String", # Required A state message specifying the overall job state.
"details": "A String", # Optional Job state details, such as an error description if the state is <code>ERROR</code>.