Refresh docs
diff --git a/docs/dyn/bigquery_v2.jobs.html b/docs/dyn/bigquery_v2.jobs.html
index 6ba9877..c686d41 100644
--- a/docs/dyn/bigquery_v2.jobs.html
+++ b/docs/dyn/bigquery_v2.jobs.html
@@ -152,6 +152,7 @@
"createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
"schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
"schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
+ "allowQuotedNewlines": True or False, # [Experimental] Whether to allow quoted newlines in the source CSV data.
"schema": { # [Optional] Schema of the table being written to.
"fields": [ # Describes the fields in a table.
{
@@ -329,6 +330,7 @@
"createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
"schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
"schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
+ "allowQuotedNewlines": True or False, # [Experimental] Whether to allow quoted newlines in the source CSV data.
"schema": { # [Optional] Schema of the table being written to.
"fields": [ # Describes the fields in a table.
{
@@ -454,6 +456,7 @@
"createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
"schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
"schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
+ "allowQuotedNewlines": True or False, # [Experimental] Whether to allow quoted newlines in the source CSV data.
"schema": { # [Optional] Schema of the table being written to.
"fields": [ # Describes the fields in a table.
{
@@ -603,6 +606,7 @@
"createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
"schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
"schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
+ "allowQuotedNewlines": True or False, # [Experimental] Whether to allow quoted newlines in the source CSV data.
"schema": { # [Optional] Schema of the table being written to.
"fields": [ # Describes the fields in a table.
{
@@ -708,13 +712,14 @@
{
"timeoutMs": 42, # [Optional] How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error.
- "query": "A String", # [Required] A query string, following the BigQuery query syntax of the query to execute. Table names should be qualified by dataset name in the format projectId:datasetId.tableId unless you specify the defaultDataset value. If the table is in the same project as the job, you can omit the project ID. Example: SELECT f1 FROM myProjectId:myDatasetId.myTableId.
"kind": "bigquery#queryRequest", # The resource type of the request.
- "maxResults": 42, # [Optional] The maximum number of results to return per page of results. If the response list exceeds the maximum response size for a single response, you will have to page through the results. Default is to return the maximum response size.
+ "dryRun": True or False, # [Optional] If set, don't actually run the query. A valid query will return an empty response, while an invalid query will return the same error it would if it wasn't a dry run.
"defaultDataset": { # [Optional] Specifies the default datasetId and projectId to assume for any unqualified table names in the query. If not set, all table names in the query string must be fully-qualified in the format projectId:datasetId.tableid.
"projectId": "A String", # [Optional] The ID of the container project.
"datasetId": "A String", # [Required] A unique ID for this dataset, without the project name.
},
+ "maxResults": 42, # [Optional] The maximum number of results to return per page of results. If the response list exceeds the maximum response size for a single response, you will have to page through the results. Default is to return the maximum response size.
+ "query": "A String", # [Required] A query string, following the BigQuery query syntax of the query to execute. Table names should be qualified by dataset name in the format projectId:datasetId.tableId unless you specify the defaultDataset value. If the table is in the same project as the job, you can omit the project ID. Example: SELECT f1 FROM myProjectId:myDatasetId.myTableId.
}