Regen all docs. (#700)

* Stop recursing if discovery == {}

* Generate docs with 'make docs'.
diff --git a/docs/dyn/bigquery_v2.tabledata.html b/docs/dyn/bigquery_v2.tabledata.html
index e3eabd9..ff0a8ac 100644
--- a/docs/dyn/bigquery_v2.tabledata.html
+++ b/docs/dyn/bigquery_v2.tabledata.html
@@ -78,7 +78,7 @@
   <code><a href="#insertAll">insertAll(projectId, datasetId, tableId, body)</a></code></p>
 <p class="firstline">Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.</p>
 <p class="toc_element">
-  <code><a href="#list">list(projectId, datasetId, tableId, pageToken=None, maxResults=None, startIndex=None)</a></code></p>
+  <code><a href="#list">list(projectId, datasetId, tableId, selectedFields=None, pageToken=None, maxResults=None, startIndex=None)</a></code></p>
 <p class="firstline">Retrieves table data from a specified set of rows. Requires the READER dataset role.</p>
 <p class="toc_element">
   <code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
@@ -107,7 +107,7 @@
       },
     ],
     "skipInvalidRows": True or False, # [Optional] Insert all valid rows of a request, even if invalid rows exist. The default value is false, which causes the entire request to fail if any invalid rows exist.
-    "templateSuffix": "A String", # [Experimental] If specified, treats the destination table as a base template, and inserts the rows into an instance table named "{destination}{templateSuffix}". BigQuery will manage creation of the instance table, using the schema of the base template table. See https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables for considerations when working with templates tables.
+    "templateSuffix": "A String", # If specified, treats the destination table as a base template, and inserts the rows into an instance table named "{destination}{templateSuffix}". BigQuery will manage creation of the instance table, using the schema of the base template table. See https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables for considerations when working with templates tables.
   }
 
 
@@ -133,13 +133,14 @@
 </div>
 
 <div class="method">
-    <code class="details" id="list">list(projectId, datasetId, tableId, pageToken=None, maxResults=None, startIndex=None)</code>
+    <code class="details" id="list">list(projectId, datasetId, tableId, selectedFields=None, pageToken=None, maxResults=None, startIndex=None)</code>
   <pre>Retrieves table data from a specified set of rows. Requires the READER dataset role.
 
 Args:
   projectId: string, Project ID of the table to read (required)
   datasetId: string, Dataset ID of the table to read (required)
   tableId: string, Table ID of the table to read (required)
+  selectedFields: string, List of fields to return (comma-separated). If unspecified, all fields are returned
   pageToken: string, Page token, returned by a previous call, identifying the result set
   maxResults: integer, Maximum number of results to return
   startIndex: string, Zero-based index of the starting row to read
@@ -150,7 +151,6 @@
     {
     "pageToken": "A String", # A token used for paging results. Providing this token instead of the startIndex parameter can help you retrieve stable results when an underlying table is changing.
     "kind": "bigquery#tableDataList", # The resource type of the response.
-    "etag": "A String", # A hash of this page of results.
     "rows": [ # Rows of results.
       {
         "f": [ # Represents a single row in the result set, consisting of one or more fields.
@@ -161,6 +161,7 @@
       },
     ],
     "totalRows": "A String", # The total number of rows in the complete table.
+    "etag": "A String", # A hash of this page of results.
   }</pre>
 </div>