Fix method doc signatures for multiline required parameters (#374)
* Fix method doc signatures for multiline required parameters.
Existing doc generator failed to recognize parameters as required when parameter descriptions
extended over more than one line. Besides presenting incorrect information, resulting
inconsistencies broke checks for automated sample generation.
* Regen docs
diff --git a/docs/dyn/dataproc_v1.projects.regions.jobs.html b/docs/dyn/dataproc_v1.projects.regions.jobs.html
index 0ed4853..b5ceaf5 100644
--- a/docs/dyn/dataproc_v1.projects.regions.jobs.html
+++ b/docs/dyn/dataproc_v1.projects.regions.jobs.html
@@ -122,43 +122,8 @@
"status": { # Cloud Dataproc job status. # Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields.
"state": "A String", # Output-only A state message specifying the overall job state.
"stateStartTime": "A String", # Output-only The time when this state was entered.
- "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
"substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
- },
- "hadoopJob": { # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
- "A String",
- ],
- "loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
- "driverLogLevels": { # The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- "a_key": "A String",
- },
- },
- "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- "A String",
- ],
- "fileUris": [ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- "A String",
- ],
- "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
- "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- "A String",
- ],
- "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
- "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- "a_key": "A String",
- },
- },
- "labels": { # Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
- "a_key": "A String",
- },
- "placement": { # Cloud Dataproc job config. # Required Job information, including how, when, and where to run the job.
- "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
- "clusterUuid": "A String", # Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
- },
- "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
- "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
- "jobId": "A String", # Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.
+ "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
},
"sparkSqlJob": { # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/) queries. # Job is a SparkSql job.
"queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
@@ -223,10 +188,16 @@
"a_key": "A String",
},
},
- "driverOutputResourceUri": "A String", # Output-only A URI pointing to the location of the stdout of the job's driver program.
- "pysparkJob": { # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
- "mainPythonFileUri": "A String", # Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
+ "placement": { # Cloud Dataproc job config. # Required Job information, including how, when, and where to run the job.
+ "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
+ "clusterUuid": "A String", # Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
+ },
+ "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
+ "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
+ "jobId": "A String", # Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.
+ },
+ "hadoopJob": { # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
+ "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
"A String",
],
"loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
@@ -234,9 +205,38 @@
"a_key": "A String",
},
},
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "A String",
+ ],
+ "fileUris": [ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+ "A String",
+ ],
+ "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
+ "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
+ "A String",
+ ],
+ "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+ "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
+ "a_key": "A String",
+ },
+ },
+ "labels": { # Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
+ "a_key": "A String",
+ },
+ "driverOutputResourceUri": "A String", # Output-only A URI pointing to the location of the stdout of the job's driver program.
+ "pysparkJob": { # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
+ "mainPythonFileUri": "A String", # Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.
"args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
+ "loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
+ "driverLogLevels": { # The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+ "a_key": "A String",
+ },
+ },
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
+ "A String",
+ ],
"fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.
"A String",
],
@@ -252,7 +252,7 @@
},
"driverControlFilesUri": "A String", # Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"sparkJob": { # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) applications on YARN. # Job is a Spark job.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
"loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
@@ -260,7 +260,7 @@
"a_key": "A String",
},
},
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
"A String",
],
"fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.
@@ -290,8 +290,8 @@
{ # Cloud Dataproc job status.
"state": "A String", # Output-only A state message specifying the overall job state.
"stateStartTime": "A String", # Output-only The time when this state was entered.
- "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
"substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
+ "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
},
],
"hiveJob": { # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN. # Job is a Hive job.
@@ -368,43 +368,8 @@
"status": { # Cloud Dataproc job status. # Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields.
"state": "A String", # Output-only A state message specifying the overall job state.
"stateStartTime": "A String", # Output-only The time when this state was entered.
- "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
"substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
- },
- "hadoopJob": { # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
- "A String",
- ],
- "loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
- "driverLogLevels": { # The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- "a_key": "A String",
- },
- },
- "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- "A String",
- ],
- "fileUris": [ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- "A String",
- ],
- "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
- "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- "A String",
- ],
- "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
- "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- "a_key": "A String",
- },
- },
- "labels": { # Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
- "a_key": "A String",
- },
- "placement": { # Cloud Dataproc job config. # Required Job information, including how, when, and where to run the job.
- "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
- "clusterUuid": "A String", # Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
- },
- "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
- "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
- "jobId": "A String", # Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.
+ "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
},
"sparkSqlJob": { # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/) queries. # Job is a SparkSql job.
"queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
@@ -469,10 +434,16 @@
"a_key": "A String",
},
},
- "driverOutputResourceUri": "A String", # Output-only A URI pointing to the location of the stdout of the job's driver program.
- "pysparkJob": { # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
- "mainPythonFileUri": "A String", # Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
+ "placement": { # Cloud Dataproc job config. # Required Job information, including how, when, and where to run the job.
+ "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
+ "clusterUuid": "A String", # Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
+ },
+ "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
+ "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
+ "jobId": "A String", # Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.
+ },
+ "hadoopJob": { # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
+ "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
"A String",
],
"loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
@@ -480,9 +451,38 @@
"a_key": "A String",
},
},
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "A String",
+ ],
+ "fileUris": [ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+ "A String",
+ ],
+ "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
+ "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
+ "A String",
+ ],
+ "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+ "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
+ "a_key": "A String",
+ },
+ },
+ "labels": { # Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
+ "a_key": "A String",
+ },
+ "driverOutputResourceUri": "A String", # Output-only A URI pointing to the location of the stdout of the job's driver program.
+ "pysparkJob": { # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
+ "mainPythonFileUri": "A String", # Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.
"args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
+ "loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
+ "driverLogLevels": { # The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+ "a_key": "A String",
+ },
+ },
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
+ "A String",
+ ],
"fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.
"A String",
],
@@ -498,7 +498,7 @@
},
"driverControlFilesUri": "A String", # Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"sparkJob": { # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) applications on YARN. # Job is a Spark job.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
"loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
@@ -506,7 +506,7 @@
"a_key": "A String",
},
},
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
"A String",
],
"fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.
@@ -536,8 +536,8 @@
{ # Cloud Dataproc job status.
"state": "A String", # Output-only A state message specifying the overall job state.
"stateStartTime": "A String", # Output-only The time when this state was entered.
- "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
"substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
+ "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
},
],
"hiveJob": { # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN. # Job is a Hive job.
@@ -597,43 +597,8 @@
"status": { # Cloud Dataproc job status. # Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields.
"state": "A String", # Output-only A state message specifying the overall job state.
"stateStartTime": "A String", # Output-only The time when this state was entered.
- "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
"substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
- },
- "hadoopJob": { # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
- "A String",
- ],
- "loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
- "driverLogLevels": { # The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- "a_key": "A String",
- },
- },
- "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- "A String",
- ],
- "fileUris": [ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- "A String",
- ],
- "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
- "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- "A String",
- ],
- "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
- "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- "a_key": "A String",
- },
- },
- "labels": { # Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
- "a_key": "A String",
- },
- "placement": { # Cloud Dataproc job config. # Required Job information, including how, when, and where to run the job.
- "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
- "clusterUuid": "A String", # Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
- },
- "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
- "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
- "jobId": "A String", # Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.
+ "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
},
"sparkSqlJob": { # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/) queries. # Job is a SparkSql job.
"queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
@@ -698,10 +663,16 @@
"a_key": "A String",
},
},
- "driverOutputResourceUri": "A String", # Output-only A URI pointing to the location of the stdout of the job's driver program.
- "pysparkJob": { # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
- "mainPythonFileUri": "A String", # Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
+ "placement": { # Cloud Dataproc job config. # Required Job information, including how, when, and where to run the job.
+ "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
+ "clusterUuid": "A String", # Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
+ },
+ "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
+ "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
+ "jobId": "A String", # Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.
+ },
+ "hadoopJob": { # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
+ "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
"A String",
],
"loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
@@ -709,9 +680,38 @@
"a_key": "A String",
},
},
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "A String",
+ ],
+ "fileUris": [ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+ "A String",
+ ],
+ "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
+ "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
+ "A String",
+ ],
+ "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+ "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
+ "a_key": "A String",
+ },
+ },
+ "labels": { # Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
+ "a_key": "A String",
+ },
+ "driverOutputResourceUri": "A String", # Output-only A URI pointing to the location of the stdout of the job's driver program.
+ "pysparkJob": { # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
+ "mainPythonFileUri": "A String", # Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.
"args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
+ "loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
+ "driverLogLevels": { # The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+ "a_key": "A String",
+ },
+ },
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
+ "A String",
+ ],
"fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.
"A String",
],
@@ -727,7 +727,7 @@
},
"driverControlFilesUri": "A String", # Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"sparkJob": { # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) applications on YARN. # Job is a Spark job.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
"loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
@@ -735,7 +735,7 @@
"a_key": "A String",
},
},
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
"A String",
],
"fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.
@@ -765,8 +765,8 @@
{ # Cloud Dataproc job status.
"state": "A String", # Output-only A state message specifying the overall job state.
"stateStartTime": "A String", # Output-only The time when this state was entered.
- "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
"substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
+ "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
},
],
"hiveJob": { # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN. # Job is a Hive job.
@@ -830,43 +830,8 @@
"status": { # Cloud Dataproc job status. # Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields.
"state": "A String", # Output-only A state message specifying the overall job state.
"stateStartTime": "A String", # Output-only The time when this state was entered.
- "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
"substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
- },
- "hadoopJob": { # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
- "A String",
- ],
- "loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
- "driverLogLevels": { # The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- "a_key": "A String",
- },
- },
- "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- "A String",
- ],
- "fileUris": [ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- "A String",
- ],
- "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
- "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- "A String",
- ],
- "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
- "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- "a_key": "A String",
- },
- },
- "labels": { # Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
- "a_key": "A String",
- },
- "placement": { # Cloud Dataproc job config. # Required Job information, including how, when, and where to run the job.
- "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
- "clusterUuid": "A String", # Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
- },
- "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
- "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
- "jobId": "A String", # Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.
+ "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
},
"sparkSqlJob": { # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/) queries. # Job is a SparkSql job.
"queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
@@ -931,10 +896,16 @@
"a_key": "A String",
},
},
- "driverOutputResourceUri": "A String", # Output-only A URI pointing to the location of the stdout of the job's driver program.
- "pysparkJob": { # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
- "mainPythonFileUri": "A String", # Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
+ "placement": { # Cloud Dataproc job config. # Required Job information, including how, when, and where to run the job.
+ "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
+ "clusterUuid": "A String", # Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
+ },
+ "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
+ "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
+ "jobId": "A String", # Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.
+ },
+ "hadoopJob": { # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
+ "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
"A String",
],
"loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
@@ -942,9 +913,38 @@
"a_key": "A String",
},
},
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "A String",
+ ],
+ "fileUris": [ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+ "A String",
+ ],
+ "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
+ "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
+ "A String",
+ ],
+ "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+ "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
+ "a_key": "A String",
+ },
+ },
+ "labels": { # Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
+ "a_key": "A String",
+ },
+ "driverOutputResourceUri": "A String", # Output-only A URI pointing to the location of the stdout of the job's driver program.
+ "pysparkJob": { # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
+ "mainPythonFileUri": "A String", # Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.
"args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
+ "loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
+ "driverLogLevels": { # The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+ "a_key": "A String",
+ },
+ },
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
+ "A String",
+ ],
"fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.
"A String",
],
@@ -960,7 +960,7 @@
},
"driverControlFilesUri": "A String", # Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"sparkJob": { # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) applications on YARN. # Job is a Spark job.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
"loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
@@ -968,7 +968,7 @@
"a_key": "A String",
},
},
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
"A String",
],
"fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.
@@ -998,8 +998,8 @@
{ # Cloud Dataproc job status.
"state": "A String", # Output-only A state message specifying the overall job state.
"stateStartTime": "A String", # Output-only The time when this state was entered.
- "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
"substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
+ "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
},
],
"hiveJob": { # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN. # Job is a Hive job.
@@ -1044,43 +1044,8 @@
"status": { # Cloud Dataproc job status. # Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields.
"state": "A String", # Output-only A state message specifying the overall job state.
"stateStartTime": "A String", # Output-only The time when this state was entered.
- "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
"substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
- },
- "hadoopJob": { # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
- "A String",
- ],
- "loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
- "driverLogLevels": { # The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- "a_key": "A String",
- },
- },
- "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- "A String",
- ],
- "fileUris": [ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- "A String",
- ],
- "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
- "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- "A String",
- ],
- "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
- "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- "a_key": "A String",
- },
- },
- "labels": { # Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
- "a_key": "A String",
- },
- "placement": { # Cloud Dataproc job config. # Required Job information, including how, when, and where to run the job.
- "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
- "clusterUuid": "A String", # Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
- },
- "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
- "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
- "jobId": "A String", # Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.
+ "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
},
"sparkSqlJob": { # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/) queries. # Job is a SparkSql job.
"queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
@@ -1145,10 +1110,16 @@
"a_key": "A String",
},
},
- "driverOutputResourceUri": "A String", # Output-only A URI pointing to the location of the stdout of the job's driver program.
- "pysparkJob": { # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
- "mainPythonFileUri": "A String", # Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
+ "placement": { # Cloud Dataproc job config. # Required Job information, including how, when, and where to run the job.
+ "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
+ "clusterUuid": "A String", # Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
+ },
+ "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
+ "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
+ "jobId": "A String", # Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.
+ },
+ "hadoopJob": { # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
+ "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
"A String",
],
"loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
@@ -1156,9 +1127,38 @@
"a_key": "A String",
},
},
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "A String",
+ ],
+ "fileUris": [ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+ "A String",
+ ],
+ "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
+ "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
+ "A String",
+ ],
+ "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+ "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
+ "a_key": "A String",
+ },
+ },
+ "labels": { # Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
+ "a_key": "A String",
+ },
+ "driverOutputResourceUri": "A String", # Output-only A URI pointing to the location of the stdout of the job's driver program.
+ "pysparkJob": { # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
+ "mainPythonFileUri": "A String", # Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.
"args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
+ "loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
+ "driverLogLevels": { # The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+ "a_key": "A String",
+ },
+ },
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
+ "A String",
+ ],
"fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.
"A String",
],
@@ -1174,7 +1174,7 @@
},
"driverControlFilesUri": "A String", # Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"sparkJob": { # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) applications on YARN. # Job is a Spark job.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
"loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
@@ -1182,7 +1182,7 @@
"a_key": "A String",
},
},
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
"A String",
],
"fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.
@@ -1212,8 +1212,8 @@
{ # Cloud Dataproc job status.
"state": "A String", # Output-only A state message specifying the overall job state.
"stateStartTime": "A String", # Output-only The time when this state was entered.
- "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
"substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
+ "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
},
],
"hiveJob": { # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN. # Job is a Hive job.
@@ -1261,43 +1261,8 @@
"status": { # Cloud Dataproc job status. # Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields.
"state": "A String", # Output-only A state message specifying the overall job state.
"stateStartTime": "A String", # Output-only The time when this state was entered.
- "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
"substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
- },
- "hadoopJob": { # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
- "A String",
- ],
- "loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
- "driverLogLevels": { # The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- "a_key": "A String",
- },
- },
- "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- "A String",
- ],
- "fileUris": [ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- "A String",
- ],
- "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
- "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- "A String",
- ],
- "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
- "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- "a_key": "A String",
- },
- },
- "labels": { # Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
- "a_key": "A String",
- },
- "placement": { # Cloud Dataproc job config. # Required Job information, including how, when, and where to run the job.
- "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
- "clusterUuid": "A String", # Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
- },
- "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
- "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
- "jobId": "A String", # Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.
+ "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
},
"sparkSqlJob": { # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/) queries. # Job is a SparkSql job.
"queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
@@ -1362,10 +1327,16 @@
"a_key": "A String",
},
},
- "driverOutputResourceUri": "A String", # Output-only A URI pointing to the location of the stdout of the job's driver program.
- "pysparkJob": { # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
- "mainPythonFileUri": "A String", # Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
+ "placement": { # Cloud Dataproc job config. # Required Job information, including how, when, and where to run the job.
+ "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
+ "clusterUuid": "A String", # Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
+ },
+ "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
+ "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
+ "jobId": "A String", # Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.
+ },
+ "hadoopJob": { # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
+ "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
"A String",
],
"loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
@@ -1373,9 +1344,38 @@
"a_key": "A String",
},
},
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "A String",
+ ],
+ "fileUris": [ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+ "A String",
+ ],
+ "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
+ "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
+ "A String",
+ ],
+ "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+ "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
+ "a_key": "A String",
+ },
+ },
+ "labels": { # Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
+ "a_key": "A String",
+ },
+ "driverOutputResourceUri": "A String", # Output-only A URI pointing to the location of the stdout of the job's driver program.
+ "pysparkJob": { # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
+ "mainPythonFileUri": "A String", # Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.
"args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
+ "loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
+ "driverLogLevels": { # The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+ "a_key": "A String",
+ },
+ },
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
+ "A String",
+ ],
"fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.
"A String",
],
@@ -1391,7 +1391,7 @@
},
"driverControlFilesUri": "A String", # Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"sparkJob": { # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) applications on YARN. # Job is a Spark job.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
"loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
@@ -1399,7 +1399,7 @@
"a_key": "A String",
},
},
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
"A String",
],
"fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.
@@ -1429,8 +1429,8 @@
{ # Cloud Dataproc job status.
"state": "A String", # Output-only A state message specifying the overall job state.
"stateStartTime": "A String", # Output-only The time when this state was entered.
- "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
"substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
+ "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
},
],
"hiveJob": { # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN. # Job is a Hive job.
@@ -1475,43 +1475,8 @@
"status": { # Cloud Dataproc job status. # Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields.
"state": "A String", # Output-only A state message specifying the overall job state.
"stateStartTime": "A String", # Output-only The time when this state was entered.
- "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
"substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
- },
- "hadoopJob": { # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
- "A String",
- ],
- "loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
- "driverLogLevels": { # The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
- "a_key": "A String",
- },
- },
- "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
- "A String",
- ],
- "fileUris": [ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
- "A String",
- ],
- "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
- "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
- "A String",
- ],
- "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
- "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
- "a_key": "A String",
- },
- },
- "labels": { # Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
- "a_key": "A String",
- },
- "placement": { # Cloud Dataproc job config. # Required Job information, including how, when, and where to run the job.
- "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
- "clusterUuid": "A String", # Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
- },
- "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
- "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
- "jobId": "A String", # Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.
+ "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
},
"sparkSqlJob": { # A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/) queries. # Job is a SparkSql job.
"queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
@@ -1576,10 +1541,16 @@
"a_key": "A String",
},
},
- "driverOutputResourceUri": "A String", # Output-only A URI pointing to the location of the stdout of the job's driver program.
- "pysparkJob": { # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
- "mainPythonFileUri": "A String", # Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
+ "placement": { # Cloud Dataproc job config. # Required Job information, including how, when, and where to run the job.
+ "clusterName": "A String", # Required The name of the cluster where the job will be submitted.
+ "clusterUuid": "A String", # Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
+ },
+ "reference": { # Encapsulates the full scoping used to reference a job. # Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>.
+ "projectId": "A String", # Required The ID of the Google Cloud Platform project that the job belongs to.
+ "jobId": "A String", # Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.
+ },
+ "hadoopJob": { # A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
+ "jarFileUris": [ # Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
"A String",
],
"loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
@@ -1587,9 +1558,38 @@
"a_key": "A String",
},
},
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "A String",
+ ],
+ "fileUris": [ # Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+ "A String",
+ ],
+ "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.
+ "archiveUris": [ # Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
+ "A String",
+ ],
+ "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+ "properties": { # Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
+ "a_key": "A String",
+ },
+ },
+ "labels": { # Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
+ "a_key": "A String",
+ },
+ "driverOutputResourceUri": "A String", # Output-only A URI pointing to the location of the stdout of the job's driver program.
+ "pysparkJob": { # A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
+ "mainPythonFileUri": "A String", # Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.
"args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
+ "loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
+ "driverLogLevels": { # The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
+ "a_key": "A String",
+ },
+ },
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
+ "A String",
+ ],
"fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.
"A String",
],
@@ -1605,7 +1605,7 @@
},
"driverControlFilesUri": "A String", # Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"sparkJob": { # A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) applications on YARN. # Job is a Spark job.
- "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
+ "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
"A String",
],
"loggingConfig": { # The runtime logging config of the job. # Optional The runtime log config for job execution.
@@ -1613,7 +1613,7 @@
"a_key": "A String",
},
},
- "args": [ # Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
+ "jarFileUris": [ # Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
"A String",
],
"fileUris": [ # Optional HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.
@@ -1643,8 +1643,8 @@
{ # Cloud Dataproc job status.
"state": "A String", # Output-only A state message specifying the overall job state.
"stateStartTime": "A String", # Output-only The time when this state was entered.
- "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
"substate": "A String", # Output-only Additional state information, which includes status reported by the agent.
+ "details": "A String", # Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.
},
],
"hiveJob": { # A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN. # Job is a Hive job.