Update docs for 1.4.2 release
diff --git a/docs/dyn/bigquery_v2.tables.html b/docs/dyn/bigquery_v2.tables.html
index fec1c63..b4b1a31 100644
--- a/docs/dyn/bigquery_v2.tables.html
+++ b/docs/dyn/bigquery_v2.tables.html
@@ -124,14 +124,50 @@
"lastModifiedTime": "A String", # [Output-only] The time when this table was last modified, in milliseconds since the epoch.
"description": "A String", # [Optional] A user-friendly description of this table.
"creationTime": "A String", # [Output-only] The time when this table was created, in milliseconds since the epoch.
+ "externalDataConfiguration": { # [Experimental] Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.
+ "compression": "A String", # [Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE.
+ "csvOptions": { # Additional properties to set if sourceFormat is set to CSV.
+ "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
+ "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
+ "skipLeadingRows": 42, # [Optional] The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.
+ "allowJaggedRows": True or False, # [Optional] Indicates if BigQuery should accept rows that are missing trailing optional columns. If true, BigQuery treats missing trailing columns as null values. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
+ "quote": """, # [Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
+ "allowQuotedNewlines": True or False, # [Optional] Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
+ },
+ "sourceFormat": "A String", # [Required] The data format. For CSV files, specify "CSV". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
+ "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
+ "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
+ "sourceUris": [ # [Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources, plus an additional limit of 10 GB maximum size across all URIs.
+ "A String",
+ ],
+ "schema": { # [Required] The schema for the data.
+ "fields": [ # Describes the fields in a table.
+ {
+ "fields": [ # [Optional] Describes the nested schema fields if the type property is set to RECORD.
+ # Object with schema name: TableFieldSchema
+ ],
+ "type": "A String", # [Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates that the field contains a nested schema).
+ "description": "A String", # [Optional] The field description. The maximum length is 16K characters.
+ "name": "A String", # [Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 128 characters.
+ "mode": "A String", # [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.
+ },
+ ],
+ },
+ },
"tableReference": { # [Required] Reference describing the ID of this table.
"projectId": "A String", # [Required] The ID of the project containing this table.
"tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
"datasetId": "A String", # [Required] The ID of the dataset containing this table.
},
- "numRows": "A String", # [Output-only] The number of rows of data in this table. This property is unavailable for tables that are actively receiving streaming inserts.
- "numBytes": "A String", # [Output-only] The size of the table in bytes. This property is unavailable for tables that are actively receiving streaming inserts.
+ "numRows": "A String", # [Output-only] The number of rows of data in this table, excluding any data in the streaming buffer.
+ "numBytes": "A String", # [Output-only] The size of this table in bytes, excluding any data in the streaming buffer.
"etag": "A String", # [Output-only] A hash of this resource.
+ "location": "A String", # [Output-only] The geographic location where the table resides. This value is inherited from the dataset.
+ "streamingBuffer": { # [Output-only] Contains information regarding this table's streaming buffer, if one is present. This field will be absent if the table is not being streamed to or if there is no data in the streaming buffer.
+ "estimatedBytes": "A String", # [Output-only] A lower-bound estimate of the number of bytes currently in the streaming buffer.
+ "estimatedRows": "A String", # [Output-only] A lower-bound estimate of the number of rows currently in the streaming buffer.
+ "oldestEntryTime": "A String", # [Output-only] Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available.
+ },
"friendlyName": "A String", # [Optional] A descriptive name for this table.
"expirationTime": "A String", # [Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
"view": { # [Optional] The view definition.
@@ -171,14 +207,50 @@
"lastModifiedTime": "A String", # [Output-only] The time when this table was last modified, in milliseconds since the epoch.
"description": "A String", # [Optional] A user-friendly description of this table.
"creationTime": "A String", # [Output-only] The time when this table was created, in milliseconds since the epoch.
+ "externalDataConfiguration": { # [Experimental] Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.
+ "compression": "A String", # [Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE.
+ "csvOptions": { # Additional properties to set if sourceFormat is set to CSV.
+ "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
+ "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
+ "skipLeadingRows": 42, # [Optional] The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.
+ "allowJaggedRows": True or False, # [Optional] Indicates if BigQuery should accept rows that are missing trailing optional columns. If true, BigQuery treats missing trailing columns as null values. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
+ "quote": """, # [Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
+ "allowQuotedNewlines": True or False, # [Optional] Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
+ },
+ "sourceFormat": "A String", # [Required] The data format. For CSV files, specify "CSV". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
+ "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
+ "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
+ "sourceUris": [ # [Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources, plus an additional limit of 10 GB maximum size across all URIs.
+ "A String",
+ ],
+ "schema": { # [Required] The schema for the data.
+ "fields": [ # Describes the fields in a table.
+ {
+ "fields": [ # [Optional] Describes the nested schema fields if the type property is set to RECORD.
+ # Object with schema name: TableFieldSchema
+ ],
+ "type": "A String", # [Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates that the field contains a nested schema).
+ "description": "A String", # [Optional] The field description. The maximum length is 16K characters.
+ "name": "A String", # [Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 128 characters.
+ "mode": "A String", # [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.
+ },
+ ],
+ },
+ },
"tableReference": { # [Required] Reference describing the ID of this table.
"projectId": "A String", # [Required] The ID of the project containing this table.
"tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
"datasetId": "A String", # [Required] The ID of the dataset containing this table.
},
- "numRows": "A String", # [Output-only] The number of rows of data in this table. This property is unavailable for tables that are actively receiving streaming inserts.
- "numBytes": "A String", # [Output-only] The size of the table in bytes. This property is unavailable for tables that are actively receiving streaming inserts.
+ "numRows": "A String", # [Output-only] The number of rows of data in this table, excluding any data in the streaming buffer.
+ "numBytes": "A String", # [Output-only] The size of this table in bytes, excluding any data in the streaming buffer.
"etag": "A String", # [Output-only] A hash of this resource.
+ "location": "A String", # [Output-only] The geographic location where the table resides. This value is inherited from the dataset.
+ "streamingBuffer": { # [Output-only] Contains information regarding this table's streaming buffer, if one is present. This field will be absent if the table is not being streamed to or if there is no data in the streaming buffer.
+ "estimatedBytes": "A String", # [Output-only] A lower-bound estimate of the number of bytes currently in the streaming buffer.
+ "estimatedRows": "A String", # [Output-only] A lower-bound estimate of the number of rows currently in the streaming buffer.
+ "oldestEntryTime": "A String", # [Output-only] Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available.
+ },
"friendlyName": "A String", # [Optional] A descriptive name for this table.
"expirationTime": "A String", # [Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
"view": { # [Optional] The view definition.
@@ -211,14 +283,50 @@
"lastModifiedTime": "A String", # [Output-only] The time when this table was last modified, in milliseconds since the epoch.
"description": "A String", # [Optional] A user-friendly description of this table.
"creationTime": "A String", # [Output-only] The time when this table was created, in milliseconds since the epoch.
+ "externalDataConfiguration": { # [Experimental] Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.
+ "compression": "A String", # [Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE.
+ "csvOptions": { # Additional properties to set if sourceFormat is set to CSV.
+ "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
+ "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
+ "skipLeadingRows": 42, # [Optional] The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.
+ "allowJaggedRows": True or False, # [Optional] Indicates if BigQuery should accept rows that are missing trailing optional columns. If true, BigQuery treats missing trailing columns as null values. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
+ "quote": """, # [Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
+ "allowQuotedNewlines": True or False, # [Optional] Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
+ },
+ "sourceFormat": "A String", # [Required] The data format. For CSV files, specify "CSV". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
+ "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
+ "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
+ "sourceUris": [ # [Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources, plus an additional limit of 10 GB maximum size across all URIs.
+ "A String",
+ ],
+ "schema": { # [Required] The schema for the data.
+ "fields": [ # Describes the fields in a table.
+ {
+ "fields": [ # [Optional] Describes the nested schema fields if the type property is set to RECORD.
+ # Object with schema name: TableFieldSchema
+ ],
+ "type": "A String", # [Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates that the field contains a nested schema).
+ "description": "A String", # [Optional] The field description. The maximum length is 16K characters.
+ "name": "A String", # [Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 128 characters.
+ "mode": "A String", # [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.
+ },
+ ],
+ },
+ },
"tableReference": { # [Required] Reference describing the ID of this table.
"projectId": "A String", # [Required] The ID of the project containing this table.
"tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
"datasetId": "A String", # [Required] The ID of the dataset containing this table.
},
- "numRows": "A String", # [Output-only] The number of rows of data in this table. This property is unavailable for tables that are actively receiving streaming inserts.
- "numBytes": "A String", # [Output-only] The size of the table in bytes. This property is unavailable for tables that are actively receiving streaming inserts.
+ "numRows": "A String", # [Output-only] The number of rows of data in this table, excluding any data in the streaming buffer.
+ "numBytes": "A String", # [Output-only] The size of this table in bytes, excluding any data in the streaming buffer.
"etag": "A String", # [Output-only] A hash of this resource.
+ "location": "A String", # [Output-only] The geographic location where the table resides. This value is inherited from the dataset.
+ "streamingBuffer": { # [Output-only] Contains information regarding this table's streaming buffer, if one is present. This field will be absent if the table is not being streamed to or if there is no data in the streaming buffer.
+ "estimatedBytes": "A String", # [Output-only] A lower-bound estimate of the number of bytes currently in the streaming buffer.
+ "estimatedRows": "A String", # [Output-only] A lower-bound estimate of the number of rows currently in the streaming buffer.
+ "oldestEntryTime": "A String", # [Output-only] Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available.
+ },
"friendlyName": "A String", # [Optional] A descriptive name for this table.
"expirationTime": "A String", # [Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
"view": { # [Optional] The view definition.
@@ -307,14 +415,50 @@
"lastModifiedTime": "A String", # [Output-only] The time when this table was last modified, in milliseconds since the epoch.
"description": "A String", # [Optional] A user-friendly description of this table.
"creationTime": "A String", # [Output-only] The time when this table was created, in milliseconds since the epoch.
+ "externalDataConfiguration": { # [Experimental] Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.
+ "compression": "A String", # [Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE.
+ "csvOptions": { # Additional properties to set if sourceFormat is set to CSV.
+ "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
+ "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
+ "skipLeadingRows": 42, # [Optional] The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.
+ "allowJaggedRows": True or False, # [Optional] Indicates if BigQuery should accept rows that are missing trailing optional columns. If true, BigQuery treats missing trailing columns as null values. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
+ "quote": """, # [Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
+ "allowQuotedNewlines": True or False, # [Optional] Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
+ },
+ "sourceFormat": "A String", # [Required] The data format. For CSV files, specify "CSV". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
+ "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
+ "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
+ "sourceUris": [ # [Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources, plus an additional limit of 10 GB maximum size across all URIs.
+ "A String",
+ ],
+ "schema": { # [Required] The schema for the data.
+ "fields": [ # Describes the fields in a table.
+ {
+ "fields": [ # [Optional] Describes the nested schema fields if the type property is set to RECORD.
+ # Object with schema name: TableFieldSchema
+ ],
+ "type": "A String", # [Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates that the field contains a nested schema).
+ "description": "A String", # [Optional] The field description. The maximum length is 16K characters.
+ "name": "A String", # [Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 128 characters.
+ "mode": "A String", # [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.
+ },
+ ],
+ },
+ },
"tableReference": { # [Required] Reference describing the ID of this table.
"projectId": "A String", # [Required] The ID of the project containing this table.
"tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
"datasetId": "A String", # [Required] The ID of the dataset containing this table.
},
- "numRows": "A String", # [Output-only] The number of rows of data in this table. This property is unavailable for tables that are actively receiving streaming inserts.
- "numBytes": "A String", # [Output-only] The size of the table in bytes. This property is unavailable for tables that are actively receiving streaming inserts.
+ "numRows": "A String", # [Output-only] The number of rows of data in this table, excluding any data in the streaming buffer.
+ "numBytes": "A String", # [Output-only] The size of this table in bytes, excluding any data in the streaming buffer.
"etag": "A String", # [Output-only] A hash of this resource.
+ "location": "A String", # [Output-only] The geographic location where the table resides. This value is inherited from the dataset.
+ "streamingBuffer": { # [Output-only] Contains information regarding this table's streaming buffer, if one is present. This field will be absent if the table is not being streamed to or if there is no data in the streaming buffer.
+ "estimatedBytes": "A String", # [Output-only] A lower-bound estimate of the number of bytes currently in the streaming buffer.
+ "estimatedRows": "A String", # [Output-only] A lower-bound estimate of the number of rows currently in the streaming buffer.
+ "oldestEntryTime": "A String", # [Output-only] Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available.
+ },
"friendlyName": "A String", # [Optional] A descriptive name for this table.
"expirationTime": "A String", # [Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
"view": { # [Optional] The view definition.
@@ -347,14 +491,50 @@
"lastModifiedTime": "A String", # [Output-only] The time when this table was last modified, in milliseconds since the epoch.
"description": "A String", # [Optional] A user-friendly description of this table.
"creationTime": "A String", # [Output-only] The time when this table was created, in milliseconds since the epoch.
+ "externalDataConfiguration": { # [Experimental] Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.
+ "compression": "A String", # [Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE.
+ "csvOptions": { # Additional properties to set if sourceFormat is set to CSV.
+ "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
+ "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
+ "skipLeadingRows": 42, # [Optional] The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.
+ "allowJaggedRows": True or False, # [Optional] Indicates if BigQuery should accept rows that are missing trailing optional columns. If true, BigQuery treats missing trailing columns as null values. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
+ "quote": """, # [Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
+ "allowQuotedNewlines": True or False, # [Optional] Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
+ },
+ "sourceFormat": "A String", # [Required] The data format. For CSV files, specify "CSV". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
+ "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
+ "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
+ "sourceUris": [ # [Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources, plus an additional limit of 10 GB maximum size across all URIs.
+ "A String",
+ ],
+ "schema": { # [Required] The schema for the data.
+ "fields": [ # Describes the fields in a table.
+ {
+ "fields": [ # [Optional] Describes the nested schema fields if the type property is set to RECORD.
+ # Object with schema name: TableFieldSchema
+ ],
+ "type": "A String", # [Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates that the field contains a nested schema).
+ "description": "A String", # [Optional] The field description. The maximum length is 16K characters.
+ "name": "A String", # [Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 128 characters.
+ "mode": "A String", # [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.
+ },
+ ],
+ },
+ },
"tableReference": { # [Required] Reference describing the ID of this table.
"projectId": "A String", # [Required] The ID of the project containing this table.
"tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
"datasetId": "A String", # [Required] The ID of the dataset containing this table.
},
- "numRows": "A String", # [Output-only] The number of rows of data in this table. This property is unavailable for tables that are actively receiving streaming inserts.
- "numBytes": "A String", # [Output-only] The size of the table in bytes. This property is unavailable for tables that are actively receiving streaming inserts.
+ "numRows": "A String", # [Output-only] The number of rows of data in this table, excluding any data in the streaming buffer.
+ "numBytes": "A String", # [Output-only] The size of this table in bytes, excluding any data in the streaming buffer.
"etag": "A String", # [Output-only] A hash of this resource.
+ "location": "A String", # [Output-only] The geographic location where the table resides. This value is inherited from the dataset.
+ "streamingBuffer": { # [Output-only] Contains information regarding this table's streaming buffer, if one is present. This field will be absent if the table is not being streamed to or if there is no data in the streaming buffer.
+ "estimatedBytes": "A String", # [Output-only] A lower-bound estimate of the number of bytes currently in the streaming buffer.
+ "estimatedRows": "A String", # [Output-only] A lower-bound estimate of the number of rows currently in the streaming buffer.
+ "oldestEntryTime": "A String", # [Output-only] Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available.
+ },
"friendlyName": "A String", # [Optional] A descriptive name for this table.
"expirationTime": "A String", # [Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
"view": { # [Optional] The view definition.
@@ -395,14 +575,50 @@
"lastModifiedTime": "A String", # [Output-only] The time when this table was last modified, in milliseconds since the epoch.
"description": "A String", # [Optional] A user-friendly description of this table.
"creationTime": "A String", # [Output-only] The time when this table was created, in milliseconds since the epoch.
+ "externalDataConfiguration": { # [Experimental] Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.
+ "compression": "A String", # [Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE.
+ "csvOptions": { # Additional properties to set if sourceFormat is set to CSV.
+ "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
+ "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
+ "skipLeadingRows": 42, # [Optional] The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.
+ "allowJaggedRows": True or False, # [Optional] Indicates if BigQuery should accept rows that are missing trailing optional columns. If true, BigQuery treats missing trailing columns as null values. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
+ "quote": """, # [Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
+ "allowQuotedNewlines": True or False, # [Optional] Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
+ },
+ "sourceFormat": "A String", # [Required] The data format. For CSV files, specify "CSV". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
+ "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
+ "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
+ "sourceUris": [ # [Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources, plus an additional limit of 10 GB maximum size across all URIs.
+ "A String",
+ ],
+ "schema": { # [Required] The schema for the data.
+ "fields": [ # Describes the fields in a table.
+ {
+ "fields": [ # [Optional] Describes the nested schema fields if the type property is set to RECORD.
+ # Object with schema name: TableFieldSchema
+ ],
+ "type": "A String", # [Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates that the field contains a nested schema).
+ "description": "A String", # [Optional] The field description. The maximum length is 16K characters.
+ "name": "A String", # [Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 128 characters.
+ "mode": "A String", # [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.
+ },
+ ],
+ },
+ },
"tableReference": { # [Required] Reference describing the ID of this table.
"projectId": "A String", # [Required] The ID of the project containing this table.
"tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
"datasetId": "A String", # [Required] The ID of the dataset containing this table.
},
- "numRows": "A String", # [Output-only] The number of rows of data in this table. This property is unavailable for tables that are actively receiving streaming inserts.
- "numBytes": "A String", # [Output-only] The size of the table in bytes. This property is unavailable for tables that are actively receiving streaming inserts.
+ "numRows": "A String", # [Output-only] The number of rows of data in this table, excluding any data in the streaming buffer.
+ "numBytes": "A String", # [Output-only] The size of this table in bytes, excluding any data in the streaming buffer.
"etag": "A String", # [Output-only] A hash of this resource.
+ "location": "A String", # [Output-only] The geographic location where the table resides. This value is inherited from the dataset.
+ "streamingBuffer": { # [Output-only] Contains information regarding this table's streaming buffer, if one is present. This field will be absent if the table is not being streamed to or if there is no data in the streaming buffer.
+ "estimatedBytes": "A String", # [Output-only] A lower-bound estimate of the number of bytes currently in the streaming buffer.
+ "estimatedRows": "A String", # [Output-only] A lower-bound estimate of the number of rows currently in the streaming buffer.
+ "oldestEntryTime": "A String", # [Output-only] Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available.
+ },
"friendlyName": "A String", # [Optional] A descriptive name for this table.
"expirationTime": "A String", # [Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
"view": { # [Optional] The view definition.
@@ -435,14 +651,50 @@
"lastModifiedTime": "A String", # [Output-only] The time when this table was last modified, in milliseconds since the epoch.
"description": "A String", # [Optional] A user-friendly description of this table.
"creationTime": "A String", # [Output-only] The time when this table was created, in milliseconds since the epoch.
+ "externalDataConfiguration": { # [Experimental] Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.
+ "compression": "A String", # [Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE.
+ "csvOptions": { # Additional properties to set if sourceFormat is set to CSV.
+ "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
+ "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
+ "skipLeadingRows": 42, # [Optional] The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.
+ "allowJaggedRows": True or False, # [Optional] Indicates if BigQuery should accept rows that are missing trailing optional columns. If true, BigQuery treats missing trailing columns as null values. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
+ "quote": """, # [Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
+ "allowQuotedNewlines": True or False, # [Optional] Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
+ },
+ "sourceFormat": "A String", # [Required] The data format. For CSV files, specify "CSV". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
+ "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
+ "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
+ "sourceUris": [ # [Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources, plus an additional limit of 10 GB maximum size across all URIs.
+ "A String",
+ ],
+ "schema": { # [Required] The schema for the data.
+ "fields": [ # Describes the fields in a table.
+ {
+ "fields": [ # [Optional] Describes the nested schema fields if the type property is set to RECORD.
+ # Object with schema name: TableFieldSchema
+ ],
+ "type": "A String", # [Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates that the field contains a nested schema).
+ "description": "A String", # [Optional] The field description. The maximum length is 16K characters.
+ "name": "A String", # [Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 128 characters.
+ "mode": "A String", # [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.
+ },
+ ],
+ },
+ },
"tableReference": { # [Required] Reference describing the ID of this table.
"projectId": "A String", # [Required] The ID of the project containing this table.
"tableId": "A String", # [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
"datasetId": "A String", # [Required] The ID of the dataset containing this table.
},
- "numRows": "A String", # [Output-only] The number of rows of data in this table. This property is unavailable for tables that are actively receiving streaming inserts.
- "numBytes": "A String", # [Output-only] The size of the table in bytes. This property is unavailable for tables that are actively receiving streaming inserts.
+ "numRows": "A String", # [Output-only] The number of rows of data in this table, excluding any data in the streaming buffer.
+ "numBytes": "A String", # [Output-only] The size of this table in bytes, excluding any data in the streaming buffer.
"etag": "A String", # [Output-only] A hash of this resource.
+ "location": "A String", # [Output-only] The geographic location where the table resides. This value is inherited from the dataset.
+ "streamingBuffer": { # [Output-only] Contains information regarding this table's streaming buffer, if one is present. This field will be absent if the table is not being streamed to or if there is no data in the streaming buffer.
+ "estimatedBytes": "A String", # [Output-only] A lower-bound estimate of the number of bytes currently in the streaming buffer.
+ "estimatedRows": "A String", # [Output-only] A lower-bound estimate of the number of rows currently in the streaming buffer.
+ "oldestEntryTime": "A String", # [Output-only] Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available.
+ },
"friendlyName": "A String", # [Optional] A descriptive name for this table.
"expirationTime": "A String", # [Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
"view": { # [Optional] The view definition.