Refresh docs
diff --git a/docs/dyn/bigquery_v2.jobs.html b/docs/dyn/bigquery_v2.jobs.html
index 70b0098..26cc5ab 100644
--- a/docs/dyn/bigquery_v2.jobs.html
+++ b/docs/dyn/bigquery_v2.jobs.html
@@ -144,34 +144,34 @@
       "etag": "A String", # [Output-only] A hash of this resource.
       "configuration": { # [Required] Describes the job configuration.
         "load": { # [Pick one] Configures a load job.
-          "encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
-          "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
-          "sourceFormat": "A String", # [Experimental] Optional and defaults to CSV. Format of source files. For CSV uploads, specify "CSV". For imports of datastore backups, specify "DATASTORE_BACKUP". For imports of newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
-          "destinationTable": { # [Required] Table being written to.
+          "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
+          "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
+          "sourceFormat": "A String", # [Optional] The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". The default value is CSV.
+          "destinationTable": { # [Required] The destination table to load the data into.
             "projectId": "A String", # [Required] ID of the project billed for storage of the table.
             "tableId": "A String", # [Required] ID of the table.
             "datasetId": "A String", # [Required] ID of the dataset containing the table.
           },
-          "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
-          "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
-          "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
-          "sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
+          "writeDisposition": "A String", # [Optional] Specifies the action that occurs if the destination table already exists. Each action is atomic and only occurs if BigQuery is able to fully load the data and the load job completes without error. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists, a 'duplicate' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion.
+          "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an 'invalid' error is returned in the job result and the job fails. The default value is 0, which requires that all records are valid.
+          "skipLeadingRows": 42, # [Optional] The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.
+          "sourceUris": [ # [Required] The fully-qualified URIs that point to your data on Google Cloud Storage.
             "A String",
           ],
-          "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied. If no quoting is done, use am empty string.
-          "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
-          "schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
-          "schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
-          "allowQuotedNewlines": True or False, # [Experimental] Whether to allow quoted newlines in the source CSV data.
-          "schema": { # [Optional] Schema of the table being written to.
+          "quote": "A String", # [Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
+          "createDisposition": "A String", # [Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion.
+          "schemaInlineFormat": "A String", # [Deprecated] The format of the schemaInline property.
+          "schemaInline": "A String", # [Deprecated] The inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT".
+          "allowQuotedNewlines": True or False, # Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
+          "schema": { # [Optional] The schema for the destination table. The schema can be omitted if the destination table already exists or if the schema can be inferred from the loaded data.
             "fields": [ # Describes the fields in a table.
               {
-                "fields": [ # [Optional] Describes nested fields when type is RECORD.
+                "fields": [ # [Optional] Describes the nested schema fields if the type property is set to RECORD.
                   # Object with schema name: TableFieldSchema
                 ],
-                "type": "A String", # [Required] Data type of the field.
-                "mode": "A String", # [Optional] Mode of the field (whether or not it can be null. Default is NULLABLE.
-                "name": "A String", # [Required] Name of the field.
+                "type": "A String", # [Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates a nested schema).
+                "mode": "A String", # [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.
+                "name": "A String", # [Required] The field name.
               },
             ],
           },
@@ -200,6 +200,7 @@
           },
           "priority": "A String", # [Optional] Specifies a priority for the query. Default is INTERACTIVE. Alternative is BATCH.
           "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_EMPTY.
+          "allowLargeResults": True or False, # [Experimental] If true, allows >128M results to be materialized in the destination table. Requires destination_table to be set.
           "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
           "query": "A String", # [Required] BigQuery SQL query to execute.
           "preserveNulls": True or False, # [Experimental] If set, preserve null values in table data, rather than mapping null values to the column's default value. This flag currently defaults to false, but the default will soon be changed to true. Shortly afterward, this flag will be removed completely. Please specify true if possible, and false only if you need to force the old behavior while updating client code.
@@ -273,12 +274,12 @@
     "schema": { # The schema of the results. Present only when the query completes successfully.
       "fields": [ # Describes the fields in a table.
         {
-          "fields": [ # [Optional] Describes nested fields when type is RECORD.
+          "fields": [ # [Optional] Describes the nested schema fields if the type property is set to RECORD.
             # Object with schema name: TableFieldSchema
           ],
-          "type": "A String", # [Required] Data type of the field.
-          "mode": "A String", # [Optional] Mode of the field (whether or not it can be null. Default is NULLABLE.
-          "name": "A String", # [Required] Name of the field.
+          "type": "A String", # [Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates a nested schema).
+          "mode": "A String", # [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.
+          "name": "A String", # [Required] The field name.
         },
       ],
     },
@@ -334,34 +335,34 @@
     "etag": "A String", # [Output-only] A hash of this resource.
     "configuration": { # [Required] Describes the job configuration.
       "load": { # [Pick one] Configures a load job.
-        "encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
-        "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
-        "sourceFormat": "A String", # [Experimental] Optional and defaults to CSV. Format of source files. For CSV uploads, specify "CSV". For imports of datastore backups, specify "DATASTORE_BACKUP". For imports of newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
-        "destinationTable": { # [Required] Table being written to.
+        "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
+        "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
+        "sourceFormat": "A String", # [Optional] The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". The default value is CSV.
+        "destinationTable": { # [Required] The destination table to load the data into.
           "projectId": "A String", # [Required] ID of the project billed for storage of the table.
           "tableId": "A String", # [Required] ID of the table.
           "datasetId": "A String", # [Required] ID of the dataset containing the table.
         },
-        "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
-        "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
-        "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
-        "sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
+        "writeDisposition": "A String", # [Optional] Specifies the action that occurs if the destination table already exists. Each action is atomic and only occurs if BigQuery is able to fully load the data and the load job completes without error. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists, a 'duplicate' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion.
+        "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an 'invalid' error is returned in the job result and the job fails. The default value is 0, which requires that all records are valid.
+        "skipLeadingRows": 42, # [Optional] The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.
+        "sourceUris": [ # [Required] The fully-qualified URIs that point to your data on Google Cloud Storage.
           "A String",
         ],
-        "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied. If no quoting is done, use am empty string.
-        "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
-        "schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
-        "schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
-        "allowQuotedNewlines": True or False, # [Experimental] Whether to allow quoted newlines in the source CSV data.
-        "schema": { # [Optional] Schema of the table being written to.
+        "quote": "A String", # [Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
+        "createDisposition": "A String", # [Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion.
+        "schemaInlineFormat": "A String", # [Deprecated] The format of the schemaInline property.
+        "schemaInline": "A String", # [Deprecated] The inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT".
+        "allowQuotedNewlines": True or False, # Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
+        "schema": { # [Optional] The schema for the destination table. The schema can be omitted if the destination table already exists or if the schema can be inferred from the loaded data.
           "fields": [ # Describes the fields in a table.
             {
-              "fields": [ # [Optional] Describes nested fields when type is RECORD.
+              "fields": [ # [Optional] Describes the nested schema fields if the type property is set to RECORD.
                 # Object with schema name: TableFieldSchema
               ],
-              "type": "A String", # [Required] Data type of the field.
-              "mode": "A String", # [Optional] Mode of the field (whether or not it can be null. Default is NULLABLE.
-              "name": "A String", # [Required] Name of the field.
+              "type": "A String", # [Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates a nested schema).
+              "mode": "A String", # [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.
+              "name": "A String", # [Required] The field name.
             },
           ],
         },
@@ -390,6 +391,7 @@
         },
         "priority": "A String", # [Optional] Specifies a priority for the query. Default is INTERACTIVE. Alternative is BATCH.
         "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_EMPTY.
+        "allowLargeResults": True or False, # [Experimental] If true, allows >128M results to be materialized in the destination table. Requires destination_table to be set.
         "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
         "query": "A String", # [Required] BigQuery SQL query to execute.
         "preserveNulls": True or False, # [Experimental] If set, preserve null values in table data, rather than mapping null values to the column's default value. This flag currently defaults to false, but the default will soon be changed to true. Shortly afterward, this flag will be removed completely. Please specify true if possible, and false only if you need to force the old behavior while updating client code.
@@ -472,34 +474,34 @@
       "etag": "A String", # [Output-only] A hash of this resource.
       "configuration": { # [Required] Describes the job configuration.
         "load": { # [Pick one] Configures a load job.
-          "encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
-          "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
-          "sourceFormat": "A String", # [Experimental] Optional and defaults to CSV. Format of source files. For CSV uploads, specify "CSV". For imports of datastore backups, specify "DATASTORE_BACKUP". For imports of newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
-          "destinationTable": { # [Required] Table being written to.
+          "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
+          "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
+          "sourceFormat": "A String", # [Optional] The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". The default value is CSV.
+          "destinationTable": { # [Required] The destination table to load the data into.
             "projectId": "A String", # [Required] ID of the project billed for storage of the table.
             "tableId": "A String", # [Required] ID of the table.
             "datasetId": "A String", # [Required] ID of the dataset containing the table.
           },
-          "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
-          "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
-          "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
-          "sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
+          "writeDisposition": "A String", # [Optional] Specifies the action that occurs if the destination table already exists. Each action is atomic and only occurs if BigQuery is able to fully load the data and the load job completes without error. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists, a 'duplicate' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion.
+          "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an 'invalid' error is returned in the job result and the job fails. The default value is 0, which requires that all records are valid.
+          "skipLeadingRows": 42, # [Optional] The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.
+          "sourceUris": [ # [Required] The fully-qualified URIs that point to your data on Google Cloud Storage.
             "A String",
           ],
-          "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied. If no quoting is done, use am empty string.
-          "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
-          "schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
-          "schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
-          "allowQuotedNewlines": True or False, # [Experimental] Whether to allow quoted newlines in the source CSV data.
-          "schema": { # [Optional] Schema of the table being written to.
+          "quote": "A String", # [Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
+          "createDisposition": "A String", # [Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion.
+          "schemaInlineFormat": "A String", # [Deprecated] The format of the schemaInline property.
+          "schemaInline": "A String", # [Deprecated] The inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT".
+          "allowQuotedNewlines": True or False, # Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
+          "schema": { # [Optional] The schema for the destination table. The schema can be omitted if the destination table already exists or if the schema can be inferred from the loaded data.
             "fields": [ # Describes the fields in a table.
               {
-                "fields": [ # [Optional] Describes nested fields when type is RECORD.
+                "fields": [ # [Optional] Describes the nested schema fields if the type property is set to RECORD.
                   # Object with schema name: TableFieldSchema
                 ],
-                "type": "A String", # [Required] Data type of the field.
-                "mode": "A String", # [Optional] Mode of the field (whether or not it can be null. Default is NULLABLE.
-                "name": "A String", # [Required] Name of the field.
+                "type": "A String", # [Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates a nested schema).
+                "mode": "A String", # [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.
+                "name": "A String", # [Required] The field name.
               },
             ],
           },
@@ -528,6 +530,7 @@
           },
           "priority": "A String", # [Optional] Specifies a priority for the query. Default is INTERACTIVE. Alternative is BATCH.
           "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_EMPTY.
+          "allowLargeResults": True or False, # [Experimental] If true, allows >128M results to be materialized in the destination table. Requires destination_table to be set.
           "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
           "query": "A String", # [Required] BigQuery SQL query to execute.
           "preserveNulls": True or False, # [Experimental] If set, preserve null values in table data, rather than mapping null values to the column's default value. This flag currently defaults to false, but the default will soon be changed to true. Shortly afterward, this flag will be removed completely. Please specify true if possible, and false only if you need to force the old behavior while updating client code.
@@ -634,34 +637,34 @@
         "state": "A String", # Running state of the job. When the state is DONE, errorResult can be checked to determine whether the job succeeded or failed.
         "configuration": { # [Full-projection-only] Specifies the job configuration.
           "load": { # [Pick one] Configures a load job.
-            "encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
-            "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
-            "sourceFormat": "A String", # [Experimental] Optional and defaults to CSV. Format of source files. For CSV uploads, specify "CSV". For imports of datastore backups, specify "DATASTORE_BACKUP". For imports of newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON".
-            "destinationTable": { # [Required] Table being written to.
+            "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
+            "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
+            "sourceFormat": "A String", # [Optional] The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". The default value is CSV.
+            "destinationTable": { # [Required] The destination table to load the data into.
               "projectId": "A String", # [Required] ID of the project billed for storage of the table.
               "tableId": "A String", # [Required] ID of the table.
               "datasetId": "A String", # [Required] ID of the dataset containing the table.
             },
-            "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
-            "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
-            "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
-            "sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
+            "writeDisposition": "A String", # [Optional] Specifies the action that occurs if the destination table already exists. Each action is atomic and only occurs if BigQuery is able to fully load the data and the load job completes without error. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists, a 'duplicate' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion.
+            "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an 'invalid' error is returned in the job result and the job fails. The default value is 0, which requires that all records are valid.
+            "skipLeadingRows": 42, # [Optional] The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.
+            "sourceUris": [ # [Required] The fully-qualified URIs that point to your data on Google Cloud Storage.
               "A String",
             ],
-            "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied. If no quoting is done, use am empty string.
-            "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
-            "schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
-            "schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
-            "allowQuotedNewlines": True or False, # [Experimental] Whether to allow quoted newlines in the source CSV data.
-            "schema": { # [Optional] Schema of the table being written to.
+            "quote": "A String", # [Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
+            "createDisposition": "A String", # [Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion.
+            "schemaInlineFormat": "A String", # [Deprecated] The format of the schemaInline property.
+            "schemaInline": "A String", # [Deprecated] The inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT".
+            "allowQuotedNewlines": True or False, # Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
+            "schema": { # [Optional] The schema for the destination table. The schema can be omitted if the destination table already exists or if the schema can be inferred from the loaded data.
               "fields": [ # Describes the fields in a table.
                 {
-                  "fields": [ # [Optional] Describes nested fields when type is RECORD.
+                  "fields": [ # [Optional] Describes the nested schema fields if the type property is set to RECORD.
                     # Object with schema name: TableFieldSchema
                   ],
-                  "type": "A String", # [Required] Data type of the field.
-                  "mode": "A String", # [Optional] Mode of the field (whether or not it can be null. Default is NULLABLE.
-                  "name": "A String", # [Required] Name of the field.
+                  "type": "A String", # [Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates a nested schema).
+                  "mode": "A String", # [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.
+                  "name": "A String", # [Required] The field name.
                 },
               ],
             },
@@ -690,6 +693,7 @@
             },
             "priority": "A String", # [Optional] Specifies a priority for the query. Default is INTERACTIVE. Alternative is BATCH.
             "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_EMPTY.
+            "allowLargeResults": True or False, # [Experimental] If true, allows >128M results to be materialized in the destination table. Requires destination_table to be set.
             "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
             "query": "A String", # [Required] BigQuery SQL query to execute.
             "preserveNulls": True or False, # [Experimental] If set, preserve null values in table data, rather than mapping null values to the column's default value. This flag currently defaults to false, but the default will soon be changed to true. Shortly afterward, this flag will be removed completely. Please specify true if possible, and false only if you need to force the old behavior while updating client code.
@@ -795,12 +799,12 @@
     "schema": { # The schema of the results. Present only when the query completes successfully.
       "fields": [ # Describes the fields in a table.
         {
-          "fields": [ # [Optional] Describes nested fields when type is RECORD.
+          "fields": [ # [Optional] Describes the nested schema fields if the type property is set to RECORD.
             # Object with schema name: TableFieldSchema
           ],
-          "type": "A String", # [Required] Data type of the field.
-          "mode": "A String", # [Optional] Mode of the field (whether or not it can be null. Default is NULLABLE.
-          "name": "A String", # [Required] Name of the field.
+          "type": "A String", # [Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates a nested schema).
+          "mode": "A String", # [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.
+          "name": "A String", # [Required] The field name.
         },
       ],
     },