refresh docs
diff --git a/docs/dyn/bigquery_v2.jobs.html b/docs/dyn/bigquery_v2.jobs.html
index 29631bc..6ba9877 100644
--- a/docs/dyn/bigquery_v2.jobs.html
+++ b/docs/dyn/bigquery_v2.jobs.html
@@ -136,18 +136,19 @@
"configuration": { # [Required] Describes the job configuration.
"load": { # [Pick one] Configures a load job.
"encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
- "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','
+ "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
"destinationTable": { # [Required] Table being written to.
"projectId": "A String", # [Required] ID of the project billed for storage of the table.
"tableId": "A String", # [Required] ID of the table.
"datasetId": "A String", # [Required] ID of the dataset containing the table.
},
- "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
"writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
+ "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
+ "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
"sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
"A String",
],
- "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
+ "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied.
"createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
"schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
"schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
@@ -312,18 +313,19 @@
"configuration": { # [Required] Describes the job configuration.
"load": { # [Pick one] Configures a load job.
"encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
- "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','
+ "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
"destinationTable": { # [Required] Table being written to.
"projectId": "A String", # [Required] ID of the project billed for storage of the table.
"tableId": "A String", # [Required] ID of the table.
"datasetId": "A String", # [Required] ID of the dataset containing the table.
},
- "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
"writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
+ "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
+ "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
"sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
"A String",
],
- "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
+ "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied.
"createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
"schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
"schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
@@ -436,18 +438,19 @@
"configuration": { # [Required] Describes the job configuration.
"load": { # [Pick one] Configures a load job.
"encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
- "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','
+ "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
"destinationTable": { # [Required] Table being written to.
"projectId": "A String", # [Required] ID of the project billed for storage of the table.
"tableId": "A String", # [Required] ID of the table.
"datasetId": "A String", # [Required] ID of the dataset containing the table.
},
- "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
"writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
+ "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
+ "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
"sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
"A String",
],
- "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
+ "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied.
"createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
"schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
"schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
@@ -584,18 +587,19 @@
"configuration": { # [Full-projection-only] Specifies the job configuration.
"load": { # [Pick one] Configures a load job.
"encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
- "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','
+ "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
"destinationTable": { # [Required] Table being written to.
"projectId": "A String", # [Required] ID of the project billed for storage of the table.
"tableId": "A String", # [Required] ID of the table.
"datasetId": "A String", # [Required] ID of the dataset containing the table.
},
- "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
"writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
+ "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
+ "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
"sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
"A String",
],
- "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
+ "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied.
"createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
"schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
"schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"