Remove Moderator Sample because there is no Moderator API.
Reviewed in https://codereview.appspot.com/6776055/.
diff --git a/docs/dyn/bigquery_v2.jobs.html b/docs/dyn/bigquery_v2.jobs.html
index 786089c..0689f9c 100644
--- a/docs/dyn/bigquery_v2.jobs.html
+++ b/docs/dyn/bigquery_v2.jobs.html
@@ -124,8 +124,17 @@
},
"kind": "bigquery#job", # [Output-only] The type of the resource.
"statistics": { # [Output-only] Information about the job, including starting time and ending time of the job.
+ "load": { # [Output-only] Statistics for a load job.
+ "outputRows": "A String", # [Output-only] Number of rows imported in a load job. Note that while an import job is in the running state, this value may change.
+ "inputFiles": "A String", # [Output-only] Number of source files in a load job.
+ "inputFileBytes": "A String", # [Output-only] Number of bytes of source data in a joad job.
+ "outputBytes": "A String", # [Output-only] Size of the loaded data in bytes. Note that while an import job is in the running state, this value may change.
+ },
+ "query": { # [Output-only] Statistics for a query job.
+ "totalBytesProcessed": "A String", # [Output-only] Total bytes processed for this job.
+ },
"endTime": "A String", # [Output-only] End time of this job, in milliseconds since the epoch.
- "totalBytesProcessed": "A String", # [Output-only] Total bytes processed for this job.
+ "totalBytesProcessed": "A String", # [Output-only] [Deprecated] Use the bytes processed in the query statistics instead.
"startTime": "A String", # [Output-only] Start time of this job, in milliseconds since the epoch.
},
"jobReference": { # [Optional] Reference describing the unique-per-user name of the job.
@@ -137,6 +146,7 @@
"load": { # [Pick one] Configures a load job.
"encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
"fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
+ "sourceFormat": "A String", # [Experimental] Optional and defaults to CSV. Format of source files. For CSV uploads, specify "CSV". For imports of datastore backups, specify "DATASTORE_BACKUP". For imports of newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
"destinationTable": { # [Required] Table being written to.
"projectId": "A String", # [Required] ID of the project billed for storage of the table.
"tableId": "A String", # [Required] ID of the table.
@@ -148,7 +158,7 @@
"sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
"A String",
],
- "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied.
+ "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied. If no quoting is done, use am empty string.
"createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
"schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
"schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
@@ -208,14 +218,15 @@
},
},
"extract": { # [Pick one] Configures an extract job.
+ "destinationFormat": "A String", # [Experimental] Optional and defaults to CSV. Format with which files should be exported. To export to CSV, specify "CSV". Tables with nested or repeated fields cannot be exported as CSV. To export to newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
"destinationUri": "A String", # [Required] The fully-qualified Google Cloud Storage URI where the extracted table should be written.
- "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','
+ "printHeader": True or False, # [Optional] Whether to print out a heder row in the results. Default is true.
"sourceTable": { # [Required] A reference to the table being exported.
"projectId": "A String", # [Required] ID of the project billed for storage of the table.
"tableId": "A String", # [Required] ID of the table.
"datasetId": "A String", # [Required] ID of the dataset containing the table.
},
- "printHeader": True or False, # [Optional] Whether to print out a heder row in the results. Default is true.
+ "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','
},
"properties": { # [Optional] Properties providing extra details about how the job should be run. Not used for most jobs.
"a_key": "A String", # Key-value property pairs.
@@ -243,10 +254,10 @@
{
"kind": "bigquery#getQueryResultsResponse", # The resource type of the response.
"rows": [ # An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above. Present only when the query completes successfully.
- {
- "f": [ # Represents a single row in the result set, consisting of one or more fields.
- {
- "v": "A String", # Contains the field value in this row, as a string.
+ { # Represents a single row in the result set, consisting of one or more fields.
+ "f": [
+ { # Represents a single cell in the result set.
+ "v": "",
},
],
},
@@ -302,8 +313,17 @@
},
"kind": "bigquery#job", # [Output-only] The type of the resource.
"statistics": { # [Output-only] Information about the job, including starting time and ending time of the job.
+ "load": { # [Output-only] Statistics for a load job.
+ "outputRows": "A String", # [Output-only] Number of rows imported in a load job. Note that while an import job is in the running state, this value may change.
+ "inputFiles": "A String", # [Output-only] Number of source files in a load job.
+ "inputFileBytes": "A String", # [Output-only] Number of bytes of source data in a joad job.
+ "outputBytes": "A String", # [Output-only] Size of the loaded data in bytes. Note that while an import job is in the running state, this value may change.
+ },
+ "query": { # [Output-only] Statistics for a query job.
+ "totalBytesProcessed": "A String", # [Output-only] Total bytes processed for this job.
+ },
"endTime": "A String", # [Output-only] End time of this job, in milliseconds since the epoch.
- "totalBytesProcessed": "A String", # [Output-only] Total bytes processed for this job.
+ "totalBytesProcessed": "A String", # [Output-only] [Deprecated] Use the bytes processed in the query statistics instead.
"startTime": "A String", # [Output-only] Start time of this job, in milliseconds since the epoch.
},
"jobReference": { # [Optional] Reference describing the unique-per-user name of the job.
@@ -315,6 +335,7 @@
"load": { # [Pick one] Configures a load job.
"encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
"fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
+ "sourceFormat": "A String", # [Experimental] Optional and defaults to CSV. Format of source files. For CSV uploads, specify "CSV". For imports of datastore backups, specify "DATASTORE_BACKUP". For imports of newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
"destinationTable": { # [Required] Table being written to.
"projectId": "A String", # [Required] ID of the project billed for storage of the table.
"tableId": "A String", # [Required] ID of the table.
@@ -326,7 +347,7 @@
"sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
"A String",
],
- "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied.
+ "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied. If no quoting is done, use am empty string.
"createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
"schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
"schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
@@ -386,14 +407,15 @@
},
},
"extract": { # [Pick one] Configures an extract job.
+ "destinationFormat": "A String", # [Experimental] Optional and defaults to CSV. Format with which files should be exported. To export to CSV, specify "CSV". Tables with nested or repeated fields cannot be exported as CSV. To export to newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
"destinationUri": "A String", # [Required] The fully-qualified Google Cloud Storage URI where the extracted table should be written.
- "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','
+ "printHeader": True or False, # [Optional] Whether to print out a heder row in the results. Default is true.
"sourceTable": { # [Required] A reference to the table being exported.
"projectId": "A String", # [Required] ID of the project billed for storage of the table.
"tableId": "A String", # [Required] ID of the table.
"datasetId": "A String", # [Required] ID of the dataset containing the table.
},
- "printHeader": True or False, # [Optional] Whether to print out a heder row in the results. Default is true.
+ "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','
},
"properties": { # [Optional] Properties providing extra details about how the job should be run. Not used for most jobs.
"a_key": "A String", # Key-value property pairs.
@@ -428,8 +450,17 @@
},
"kind": "bigquery#job", # [Output-only] The type of the resource.
"statistics": { # [Output-only] Information about the job, including starting time and ending time of the job.
+ "load": { # [Output-only] Statistics for a load job.
+ "outputRows": "A String", # [Output-only] Number of rows imported in a load job. Note that while an import job is in the running state, this value may change.
+ "inputFiles": "A String", # [Output-only] Number of source files in a load job.
+ "inputFileBytes": "A String", # [Output-only] Number of bytes of source data in a joad job.
+ "outputBytes": "A String", # [Output-only] Size of the loaded data in bytes. Note that while an import job is in the running state, this value may change.
+ },
+ "query": { # [Output-only] Statistics for a query job.
+ "totalBytesProcessed": "A String", # [Output-only] Total bytes processed for this job.
+ },
"endTime": "A String", # [Output-only] End time of this job, in milliseconds since the epoch.
- "totalBytesProcessed": "A String", # [Output-only] Total bytes processed for this job.
+ "totalBytesProcessed": "A String", # [Output-only] [Deprecated] Use the bytes processed in the query statistics instead.
"startTime": "A String", # [Output-only] Start time of this job, in milliseconds since the epoch.
},
"jobReference": { # [Optional] Reference describing the unique-per-user name of the job.
@@ -441,6 +472,7 @@
"load": { # [Pick one] Configures a load job.
"encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
"fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
+ "sourceFormat": "A String", # [Experimental] Optional and defaults to CSV. Format of source files. For CSV uploads, specify "CSV". For imports of datastore backups, specify "DATASTORE_BACKUP". For imports of newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
"destinationTable": { # [Required] Table being written to.
"projectId": "A String", # [Required] ID of the project billed for storage of the table.
"tableId": "A String", # [Required] ID of the table.
@@ -452,7 +484,7 @@
"sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
"A String",
],
- "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied.
+ "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied. If no quoting is done, use am empty string.
"createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
"schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
"schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
@@ -512,14 +544,15 @@
},
},
"extract": { # [Pick one] Configures an extract job.
+ "destinationFormat": "A String", # [Experimental] Optional and defaults to CSV. Format with which files should be exported. To export to CSV, specify "CSV". Tables with nested or repeated fields cannot be exported as CSV. To export to newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
"destinationUri": "A String", # [Required] The fully-qualified Google Cloud Storage URI where the extracted table should be written.
- "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','
+ "printHeader": True or False, # [Optional] Whether to print out a heder row in the results. Default is true.
"sourceTable": { # [Required] A reference to the table being exported.
"projectId": "A String", # [Required] ID of the project billed for storage of the table.
"tableId": "A String", # [Required] ID of the table.
"datasetId": "A String", # [Required] ID of the dataset containing the table.
},
- "printHeader": True or False, # [Optional] Whether to print out a heder row in the results. Default is true.
+ "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','
},
"properties": { # [Optional] Properties providing extra details about how the job should be run. Not used for most jobs.
"a_key": "A String", # Key-value property pairs.
@@ -578,8 +611,17 @@
},
"kind": "bigquery#job", # The resource type.
"statistics": { # [Output-only] Information about the job, including starting time and ending time of the job.
+ "load": { # [Output-only] Statistics for a load job.
+ "outputRows": "A String", # [Output-only] Number of rows imported in a load job. Note that while an import job is in the running state, this value may change.
+ "inputFiles": "A String", # [Output-only] Number of source files in a load job.
+ "inputFileBytes": "A String", # [Output-only] Number of bytes of source data in a joad job.
+ "outputBytes": "A String", # [Output-only] Size of the loaded data in bytes. Note that while an import job is in the running state, this value may change.
+ },
+ "query": { # [Output-only] Statistics for a query job.
+ "totalBytesProcessed": "A String", # [Output-only] Total bytes processed for this job.
+ },
"endTime": "A String", # [Output-only] End time of this job, in milliseconds since the epoch.
- "totalBytesProcessed": "A String", # [Output-only] Total bytes processed for this job.
+ "totalBytesProcessed": "A String", # [Output-only] [Deprecated] Use the bytes processed in the query statistics instead.
"startTime": "A String", # [Output-only] Start time of this job, in milliseconds since the epoch.
},
"jobReference": { # Job reference uniquely identifying the job.
@@ -591,6 +633,7 @@
"load": { # [Pick one] Configures a load job.
"encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
"fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
+ "sourceFormat": "A String", # [Experimental] Optional and defaults to CSV. Format of source files. For CSV uploads, specify "CSV". For imports of datastore backups, specify "DATASTORE_BACKUP". For imports of newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
"destinationTable": { # [Required] Table being written to.
"projectId": "A String", # [Required] ID of the project billed for storage of the table.
"tableId": "A String", # [Required] ID of the table.
@@ -602,7 +645,7 @@
"sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
"A String",
],
- "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied.
+ "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied. If no quoting is done, use am empty string.
"createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
"schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
"schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
@@ -662,14 +705,15 @@
},
},
"extract": { # [Pick one] Configures an extract job.
+ "destinationFormat": "A String", # [Experimental] Optional and defaults to CSV. Format with which files should be exported. To export to CSV, specify "CSV". Tables with nested or repeated fields cannot be exported as CSV. To export to newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
"destinationUri": "A String", # [Required] The fully-qualified Google Cloud Storage URI where the extracted table should be written.
- "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','
+ "printHeader": True or False, # [Optional] Whether to print out a heder row in the results. Default is true.
"sourceTable": { # [Required] A reference to the table being exported.
"projectId": "A String", # [Required] ID of the project billed for storage of the table.
"tableId": "A String", # [Required] ID of the table.
"datasetId": "A String", # [Required] ID of the dataset containing the table.
},
- "printHeader": True or False, # [Optional] Whether to print out a heder row in the results. Default is true.
+ "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','
},
"properties": { # [Optional] Properties providing extra details about how the job should be run. Not used for most jobs.
"a_key": "A String", # Key-value property pairs.
@@ -729,10 +773,10 @@
{
"kind": "bigquery#queryResponse", # The resource type.
"rows": [ # An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above.
- {
- "f": [ # Represents a single row in the result set, consisting of one or more fields.
- {
- "v": "A String", # Contains the field value in this row, as a string.
+ { # Represents a single row in the result set, consisting of one or more fields.
+ "f": [
+ { # Represents a single cell in the result set.
+ "v": "",
},
],
},