blob: c686d41e5dc09bbe17d68d2105672cc48b02cdb8 [file] [log] [blame]
Joe Gregorio075572b2012-07-09 16:53:09 -04001<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5 margin: 0;
6 padding: 0;
7 border: 0;
8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
13}
14
15body {
16 font-size: 13px;
17 padding: 1em;
18}
19
20h1 {
21 font-size: 26px;
22 margin-bottom: 1em;
23}
24
25h2 {
26 font-size: 24px;
27 margin-bottom: 1em;
28}
29
30h3 {
31 font-size: 20px;
32 margin-bottom: 1em;
33 margin-top: 1em;
34}
35
36pre, code {
37 line-height: 1.5;
38 font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42 margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46 font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50 border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54 margin-top: 0.5em;
55}
56
57.firstline {
58 margin-left: 2 em;
59}
60
61.method {
62 margin-top: 1em;
63 border: solid 1px #CCC;
64 padding: 1em;
65 background: #EEE;
66}
67
68.details {
69 font-weight: bold;
70 font-size: 14px;
71}
72
73</style>
74
75<h1><a href="bigquery_v2.html">BigQuery API</a> . <a href="bigquery_v2.jobs.html">jobs</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
78 <code><a href="#get">get(projectId, jobId)</a></code></p>
79<p class="firstline">Retrieves the specified job by ID.</p>
80<p class="toc_element">
81 <code><a href="#getQueryResults">getQueryResults(projectId, jobId, timeoutMs=None, maxResults=None, startIndex=None)</a></code></p>
82<p class="firstline">Retrieves the results of a query job.</p>
83<p class="toc_element">
84 <code><a href="#insert">insert(projectId, body=None, media_body=None)</a></code></p>
85<p class="firstline">Starts a new asynchronous job.</p>
86<p class="toc_element">
87 <code><a href="#list">list(projectId, projection=None, stateFilter=None, pageToken=None, allUsers=None, maxResults=None)</a></code></p>
88<p class="firstline">Lists all the Jobs in the specified project that were started by the user.</p>
89<p class="toc_element">
90 <code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
91<p class="firstline">Retrieves the next page of results.</p>
92<p class="toc_element">
93 <code><a href="#query">query(projectId, body)</a></code></p>
94<p class="firstline">Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.</p>
95<h3>Method Details</h3>
96<div class="method">
97 <code class="details" id="get">get(projectId, jobId)</code>
98 <pre>Retrieves the specified job by ID.
99
100Args:
101 projectId: string, Project ID of the requested job (required)
102 jobId: string, Job ID of the requested job (required)
103
104Returns:
105 An object of the form:
106
107 {
108 "status": { # [Output-only] The status of this job. Examine this value when polling an asynchronous job to see if the job is complete.
109 "state": "A String", # [Output-only] Running state of the job.
110 "errors": [ # [Output-only] All errors encountered during the running of the job. Errors here do not necessarily mean that the job has completed or was unsuccessful.
111 {
112 "debugInfo": "A String", # Debugging information for the service, if present. Should be ignored.
113 "message": "A String", # A human readable explanation of the error.
114 "reason": "A String", # Specifies the error reason. For example, reason will be "required" or "invalid" if some field was missing or malformed.
115 "location": "A String", # Specifies where the error occurred, if present.
116 },
117 ],
118 "errorResult": { # [Output-only] Final error result of the job. If present, indicates that the job has completed and was unsuccessful.
119 "debugInfo": "A String", # Debugging information for the service, if present. Should be ignored.
120 "message": "A String", # A human readable explanation of the error.
121 "reason": "A String", # Specifies the error reason. For example, reason will be "required" or "invalid" if some field was missing or malformed.
122 "location": "A String", # Specifies where the error occurred, if present.
123 },
124 },
125 "kind": "bigquery#job", # [Output-only] The type of the resource.
126 "statistics": { # [Output-only] Information about the job, including starting time and ending time of the job.
127 "endTime": "A String", # [Output-only] End time of this job, in milliseconds since the epoch.
128 "totalBytesProcessed": "A String", # [Output-only] Total bytes processed for this job.
129 "startTime": "A String", # [Output-only] Start time of this job, in milliseconds since the epoch.
130 },
131 "jobReference": { # [Optional] Reference describing the unique-per-user name of the job.
132 "projectId": "A String", # [Required] Project ID being billed for the job.
133 "jobId": "A String", # [Required] ID of the job.
134 },
135 "etag": "A String", # [Output-only] A hash of this resource.
136 "configuration": { # [Required] Describes the job configuration.
137 "load": { # [Pick one] Configures a load job.
138 "encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
Joe Gregorioad8013f2012-08-03 08:44:02 -0400139 "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
Joe Gregorio075572b2012-07-09 16:53:09 -0400140 "destinationTable": { # [Required] Table being written to.
141 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
142 "tableId": "A String", # [Required] ID of the table.
143 "datasetId": "A String", # [Required] ID of the dataset containing the table.
144 },
Joe Gregorio075572b2012-07-09 16:53:09 -0400145 "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
Joe Gregorioad8013f2012-08-03 08:44:02 -0400146 "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
147 "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
Joe Gregorio075572b2012-07-09 16:53:09 -0400148 "sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
149 "A String",
150 ],
Joe Gregorioad8013f2012-08-03 08:44:02 -0400151 "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied.
Joe Gregorio075572b2012-07-09 16:53:09 -0400152 "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
153 "schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
154 "schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
Joe Gregorio1b425aa2012-08-24 12:04:34 -0400155 "allowQuotedNewlines": True or False, # [Experimental] Whether to allow quoted newlines in the source CSV data.
Joe Gregorio075572b2012-07-09 16:53:09 -0400156 "schema": { # [Optional] Schema of the table being written to.
157 "fields": [ # Describes the fields in a table.
158 {
159 "fields": [ # [Optional] Describes nested fields when type is RECORD.
160 # Object with schema name: TableFieldSchema
161 ],
162 "type": "A String", # [Required] Data type of the field.
163 "mode": "A String", # [Optional] Mode of the field (whether or not it can be null. Default is NULLABLE.
164 "name": "A String", # [Required] Name of the field.
165 },
166 ],
167 },
168 },
169 "link": { # [Pick one] Configures a link job.
170 "createDisposition": "A String", # [Optional] Whether or not to create a new table, if none exists.
171 "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
172 "destinationTable": { # [Required] The destination table of the link job.
173 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
174 "tableId": "A String", # [Required] ID of the table.
175 "datasetId": "A String", # [Required] ID of the dataset containing the table.
176 },
177 "sourceUri": [ # [Required] URI of source table to link.
178 "A String",
179 ],
180 },
181 "query": { # [Pick one] Configures a query job.
182 "defaultDataset": { # [Optional] Specifies the default dataset to assume for unqualified table names in the query.
183 "projectId": "A String", # [Optional] The ID of the container project.
184 "datasetId": "A String", # [Required] A unique ID for this dataset, without the project name.
185 },
186 "destinationTable": { # [Optional] Describes the table where the query results should be stored. If not present, a new table will be created to store the results.
187 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
188 "tableId": "A String", # [Required] ID of the table.
189 "datasetId": "A String", # [Required] ID of the dataset containing the table.
190 },
191 "priority": "A String", # [Experimental] Specifies a priority for the query. Default is INTERACTIVE. Alternative is BATCH, which may be subject to looser quota restrictions.
192 "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_EMPTY.
193 "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
194 "query": "A String", # [Required] BigQuery SQL query to execute.
195 },
196 "copy": { # [Pick one] Copies a table.
197 "createDisposition": "A String", # [Optional] Whether or not to create a new table, if none exists.
198 "writeDisposition": "A String", # [Optional] Whether or not to append or require the table to be empty.
199 "destinationTable": { # [Required] The destination table
200 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
201 "tableId": "A String", # [Required] ID of the table.
202 "datasetId": "A String", # [Required] ID of the dataset containing the table.
203 },
204 "sourceTable": { # [Required] Source table to copy.
205 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
206 "tableId": "A String", # [Required] ID of the table.
207 "datasetId": "A String", # [Required] ID of the dataset containing the table.
208 },
209 },
210 "extract": { # [Pick one] Configures an extract job.
211 "destinationUri": "A String", # [Required] The fully-qualified Google Cloud Storage URI where the extracted table should be written.
212 "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','
213 "sourceTable": { # [Required] A reference to the table being exported.
214 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
215 "tableId": "A String", # [Required] ID of the table.
216 "datasetId": "A String", # [Required] ID of the dataset containing the table.
217 },
218 "printHeader": True or False, # [Optional] Whether to print out a heder row in the results. Default is true.
219 },
220 "properties": { # [Optional] Properties providing extra details about how the job should be run. Not used for most jobs.
Joe Gregoriofa08c2e2012-07-23 16:52:03 -0400221 "a_key": "A String", # Key-value property pairs.
Joe Gregorio075572b2012-07-09 16:53:09 -0400222 },
223 },
224 "id": "A String", # [Output-only] Opaque ID field of the job
225 "selfLink": "A String", # [Output-only] A URL that can be used to access this resource again.
226 }</pre>
227</div>
228
229<div class="method">
230 <code class="details" id="getQueryResults">getQueryResults(projectId, jobId, timeoutMs=None, maxResults=None, startIndex=None)</code>
231 <pre>Retrieves the results of a query job.
232
233Args:
234 projectId: string, Project ID of the query job (required)
235 jobId: string, Job ID of the query job (required)
236 timeoutMs: integer, How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error
237 maxResults: integer, Maximum number of results to read
238 startIndex: string, Zero-based index of the starting row
239
240Returns:
241 An object of the form:
242
243 {
244 "kind": "bigquery#getQueryResultsResponse", # The resource type of the response.
245 "rows": [ # An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above. Present only when the query completes successfully.
246 {
247 "f": [ # Represents a single row in the result set, consisting of one or more fields.
248 {
249 "v": "A String", # Contains the field value in this row, as a string.
250 },
251 ],
252 },
253 ],
254 "jobReference": { # Reference to the BigQuery Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults).
255 "projectId": "A String", # [Required] Project ID being billed for the job.
256 "jobId": "A String", # [Required] ID of the job.
257 },
258 "jobComplete": True or False, # Whether the query has completed or not. If rows or totalRows are present, this will always be true. If this is false, totalRows will not be available.
259 "totalRows": "A String", # The total number of rows in the complete query result set, which can be more than the number of rows in this single page of results. Present only when the query completes successfully.
260 "etag": "A String", # A hash of this response.
261 "schema": { # The schema of the results. Present only when the query completes successfully.
262 "fields": [ # Describes the fields in a table.
263 {
264 "fields": [ # [Optional] Describes nested fields when type is RECORD.
265 # Object with schema name: TableFieldSchema
266 ],
267 "type": "A String", # [Required] Data type of the field.
268 "mode": "A String", # [Optional] Mode of the field (whether or not it can be null. Default is NULLABLE.
269 "name": "A String", # [Required] Name of the field.
270 },
271 ],
272 },
273 }</pre>
274</div>
275
276<div class="method">
277 <code class="details" id="insert">insert(projectId, body=None, media_body=None)</code>
278 <pre>Starts a new asynchronous job.
279
280Args:
281 projectId: string, Project ID of the project that will be billed for the job (required)
282 body: object, The request body.
283 The object takes the form of:
284
285{
286 "status": { # [Output-only] The status of this job. Examine this value when polling an asynchronous job to see if the job is complete.
287 "state": "A String", # [Output-only] Running state of the job.
288 "errors": [ # [Output-only] All errors encountered during the running of the job. Errors here do not necessarily mean that the job has completed or was unsuccessful.
289 {
290 "debugInfo": "A String", # Debugging information for the service, if present. Should be ignored.
291 "message": "A String", # A human readable explanation of the error.
292 "reason": "A String", # Specifies the error reason. For example, reason will be "required" or "invalid" if some field was missing or malformed.
293 "location": "A String", # Specifies where the error occurred, if present.
294 },
295 ],
296 "errorResult": { # [Output-only] Final error result of the job. If present, indicates that the job has completed and was unsuccessful.
297 "debugInfo": "A String", # Debugging information for the service, if present. Should be ignored.
298 "message": "A String", # A human readable explanation of the error.
299 "reason": "A String", # Specifies the error reason. For example, reason will be "required" or "invalid" if some field was missing or malformed.
300 "location": "A String", # Specifies where the error occurred, if present.
301 },
302 },
303 "kind": "bigquery#job", # [Output-only] The type of the resource.
304 "statistics": { # [Output-only] Information about the job, including starting time and ending time of the job.
305 "endTime": "A String", # [Output-only] End time of this job, in milliseconds since the epoch.
306 "totalBytesProcessed": "A String", # [Output-only] Total bytes processed for this job.
307 "startTime": "A String", # [Output-only] Start time of this job, in milliseconds since the epoch.
308 },
309 "jobReference": { # [Optional] Reference describing the unique-per-user name of the job.
310 "projectId": "A String", # [Required] Project ID being billed for the job.
311 "jobId": "A String", # [Required] ID of the job.
312 },
313 "etag": "A String", # [Output-only] A hash of this resource.
314 "configuration": { # [Required] Describes the job configuration.
315 "load": { # [Pick one] Configures a load job.
316 "encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
Joe Gregorioad8013f2012-08-03 08:44:02 -0400317 "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
Joe Gregorio075572b2012-07-09 16:53:09 -0400318 "destinationTable": { # [Required] Table being written to.
319 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
320 "tableId": "A String", # [Required] ID of the table.
321 "datasetId": "A String", # [Required] ID of the dataset containing the table.
322 },
Joe Gregorio075572b2012-07-09 16:53:09 -0400323 "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
Joe Gregorioad8013f2012-08-03 08:44:02 -0400324 "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
325 "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
Joe Gregorio075572b2012-07-09 16:53:09 -0400326 "sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
327 "A String",
328 ],
Joe Gregorioad8013f2012-08-03 08:44:02 -0400329 "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied.
Joe Gregorio075572b2012-07-09 16:53:09 -0400330 "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
331 "schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
332 "schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
Joe Gregorio1b425aa2012-08-24 12:04:34 -0400333 "allowQuotedNewlines": True or False, # [Experimental] Whether to allow quoted newlines in the source CSV data.
Joe Gregorio075572b2012-07-09 16:53:09 -0400334 "schema": { # [Optional] Schema of the table being written to.
335 "fields": [ # Describes the fields in a table.
336 {
337 "fields": [ # [Optional] Describes nested fields when type is RECORD.
338 # Object with schema name: TableFieldSchema
339 ],
340 "type": "A String", # [Required] Data type of the field.
341 "mode": "A String", # [Optional] Mode of the field (whether or not it can be null. Default is NULLABLE.
342 "name": "A String", # [Required] Name of the field.
343 },
344 ],
345 },
346 },
347 "link": { # [Pick one] Configures a link job.
348 "createDisposition": "A String", # [Optional] Whether or not to create a new table, if none exists.
349 "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
350 "destinationTable": { # [Required] The destination table of the link job.
351 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
352 "tableId": "A String", # [Required] ID of the table.
353 "datasetId": "A String", # [Required] ID of the dataset containing the table.
354 },
355 "sourceUri": [ # [Required] URI of source table to link.
356 "A String",
357 ],
358 },
359 "query": { # [Pick one] Configures a query job.
360 "defaultDataset": { # [Optional] Specifies the default dataset to assume for unqualified table names in the query.
361 "projectId": "A String", # [Optional] The ID of the container project.
362 "datasetId": "A String", # [Required] A unique ID for this dataset, without the project name.
363 },
364 "destinationTable": { # [Optional] Describes the table where the query results should be stored. If not present, a new table will be created to store the results.
365 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
366 "tableId": "A String", # [Required] ID of the table.
367 "datasetId": "A String", # [Required] ID of the dataset containing the table.
368 },
369 "priority": "A String", # [Experimental] Specifies a priority for the query. Default is INTERACTIVE. Alternative is BATCH, which may be subject to looser quota restrictions.
370 "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_EMPTY.
371 "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
372 "query": "A String", # [Required] BigQuery SQL query to execute.
373 },
374 "copy": { # [Pick one] Copies a table.
375 "createDisposition": "A String", # [Optional] Whether or not to create a new table, if none exists.
376 "writeDisposition": "A String", # [Optional] Whether or not to append or require the table to be empty.
377 "destinationTable": { # [Required] The destination table
378 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
379 "tableId": "A String", # [Required] ID of the table.
380 "datasetId": "A String", # [Required] ID of the dataset containing the table.
381 },
382 "sourceTable": { # [Required] Source table to copy.
383 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
384 "tableId": "A String", # [Required] ID of the table.
385 "datasetId": "A String", # [Required] ID of the dataset containing the table.
386 },
387 },
388 "extract": { # [Pick one] Configures an extract job.
389 "destinationUri": "A String", # [Required] The fully-qualified Google Cloud Storage URI where the extracted table should be written.
390 "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','
391 "sourceTable": { # [Required] A reference to the table being exported.
392 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
393 "tableId": "A String", # [Required] ID of the table.
394 "datasetId": "A String", # [Required] ID of the dataset containing the table.
395 },
396 "printHeader": True or False, # [Optional] Whether to print out a heder row in the results. Default is true.
397 },
398 "properties": { # [Optional] Properties providing extra details about how the job should be run. Not used for most jobs.
Joe Gregoriofa08c2e2012-07-23 16:52:03 -0400399 "a_key": "A String", # Key-value property pairs.
Joe Gregorio075572b2012-07-09 16:53:09 -0400400 },
401 },
402 "id": "A String", # [Output-only] Opaque ID field of the job
403 "selfLink": "A String", # [Output-only] A URL that can be used to access this resource again.
404 }
405
406 media_body: string, The filename of the media request body, or an instance of a MediaUpload object.
407
408Returns:
409 An object of the form:
410
411 {
412 "status": { # [Output-only] The status of this job. Examine this value when polling an asynchronous job to see if the job is complete.
413 "state": "A String", # [Output-only] Running state of the job.
414 "errors": [ # [Output-only] All errors encountered during the running of the job. Errors here do not necessarily mean that the job has completed or was unsuccessful.
415 {
416 "debugInfo": "A String", # Debugging information for the service, if present. Should be ignored.
417 "message": "A String", # A human readable explanation of the error.
418 "reason": "A String", # Specifies the error reason. For example, reason will be "required" or "invalid" if some field was missing or malformed.
419 "location": "A String", # Specifies where the error occurred, if present.
420 },
421 ],
422 "errorResult": { # [Output-only] Final error result of the job. If present, indicates that the job has completed and was unsuccessful.
423 "debugInfo": "A String", # Debugging information for the service, if present. Should be ignored.
424 "message": "A String", # A human readable explanation of the error.
425 "reason": "A String", # Specifies the error reason. For example, reason will be "required" or "invalid" if some field was missing or malformed.
426 "location": "A String", # Specifies where the error occurred, if present.
427 },
428 },
429 "kind": "bigquery#job", # [Output-only] The type of the resource.
430 "statistics": { # [Output-only] Information about the job, including starting time and ending time of the job.
431 "endTime": "A String", # [Output-only] End time of this job, in milliseconds since the epoch.
432 "totalBytesProcessed": "A String", # [Output-only] Total bytes processed for this job.
433 "startTime": "A String", # [Output-only] Start time of this job, in milliseconds since the epoch.
434 },
435 "jobReference": { # [Optional] Reference describing the unique-per-user name of the job.
436 "projectId": "A String", # [Required] Project ID being billed for the job.
437 "jobId": "A String", # [Required] ID of the job.
438 },
439 "etag": "A String", # [Output-only] A hash of this resource.
440 "configuration": { # [Required] Describes the job configuration.
441 "load": { # [Pick one] Configures a load job.
442 "encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
Joe Gregorioad8013f2012-08-03 08:44:02 -0400443 "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
Joe Gregorio075572b2012-07-09 16:53:09 -0400444 "destinationTable": { # [Required] Table being written to.
445 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
446 "tableId": "A String", # [Required] ID of the table.
447 "datasetId": "A String", # [Required] ID of the dataset containing the table.
448 },
Joe Gregorio075572b2012-07-09 16:53:09 -0400449 "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
Joe Gregorioad8013f2012-08-03 08:44:02 -0400450 "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
451 "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
Joe Gregorio075572b2012-07-09 16:53:09 -0400452 "sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
453 "A String",
454 ],
Joe Gregorioad8013f2012-08-03 08:44:02 -0400455 "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied.
Joe Gregorio075572b2012-07-09 16:53:09 -0400456 "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
457 "schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
458 "schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
Joe Gregorio1b425aa2012-08-24 12:04:34 -0400459 "allowQuotedNewlines": True or False, # [Experimental] Whether to allow quoted newlines in the source CSV data.
Joe Gregorio075572b2012-07-09 16:53:09 -0400460 "schema": { # [Optional] Schema of the table being written to.
461 "fields": [ # Describes the fields in a table.
462 {
463 "fields": [ # [Optional] Describes nested fields when type is RECORD.
464 # Object with schema name: TableFieldSchema
465 ],
466 "type": "A String", # [Required] Data type of the field.
467 "mode": "A String", # [Optional] Mode of the field (whether or not it can be null. Default is NULLABLE.
468 "name": "A String", # [Required] Name of the field.
469 },
470 ],
471 },
472 },
473 "link": { # [Pick one] Configures a link job.
474 "createDisposition": "A String", # [Optional] Whether or not to create a new table, if none exists.
475 "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
476 "destinationTable": { # [Required] The destination table of the link job.
477 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
478 "tableId": "A String", # [Required] ID of the table.
479 "datasetId": "A String", # [Required] ID of the dataset containing the table.
480 },
481 "sourceUri": [ # [Required] URI of source table to link.
482 "A String",
483 ],
484 },
485 "query": { # [Pick one] Configures a query job.
486 "defaultDataset": { # [Optional] Specifies the default dataset to assume for unqualified table names in the query.
487 "projectId": "A String", # [Optional] The ID of the container project.
488 "datasetId": "A String", # [Required] A unique ID for this dataset, without the project name.
489 },
490 "destinationTable": { # [Optional] Describes the table where the query results should be stored. If not present, a new table will be created to store the results.
491 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
492 "tableId": "A String", # [Required] ID of the table.
493 "datasetId": "A String", # [Required] ID of the dataset containing the table.
494 },
495 "priority": "A String", # [Experimental] Specifies a priority for the query. Default is INTERACTIVE. Alternative is BATCH, which may be subject to looser quota restrictions.
496 "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_EMPTY.
497 "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
498 "query": "A String", # [Required] BigQuery SQL query to execute.
499 },
500 "copy": { # [Pick one] Copies a table.
501 "createDisposition": "A String", # [Optional] Whether or not to create a new table, if none exists.
502 "writeDisposition": "A String", # [Optional] Whether or not to append or require the table to be empty.
503 "destinationTable": { # [Required] The destination table
504 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
505 "tableId": "A String", # [Required] ID of the table.
506 "datasetId": "A String", # [Required] ID of the dataset containing the table.
507 },
508 "sourceTable": { # [Required] Source table to copy.
509 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
510 "tableId": "A String", # [Required] ID of the table.
511 "datasetId": "A String", # [Required] ID of the dataset containing the table.
512 },
513 },
514 "extract": { # [Pick one] Configures an extract job.
515 "destinationUri": "A String", # [Required] The fully-qualified Google Cloud Storage URI where the extracted table should be written.
516 "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','
517 "sourceTable": { # [Required] A reference to the table being exported.
518 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
519 "tableId": "A String", # [Required] ID of the table.
520 "datasetId": "A String", # [Required] ID of the dataset containing the table.
521 },
522 "printHeader": True or False, # [Optional] Whether to print out a heder row in the results. Default is true.
523 },
524 "properties": { # [Optional] Properties providing extra details about how the job should be run. Not used for most jobs.
Joe Gregoriofa08c2e2012-07-23 16:52:03 -0400525 "a_key": "A String", # Key-value property pairs.
Joe Gregorio075572b2012-07-09 16:53:09 -0400526 },
527 },
528 "id": "A String", # [Output-only] Opaque ID field of the job
529 "selfLink": "A String", # [Output-only] A URL that can be used to access this resource again.
530 }</pre>
531</div>
532
533<div class="method">
534 <code class="details" id="list">list(projectId, projection=None, stateFilter=None, pageToken=None, allUsers=None, maxResults=None)</code>
535 <pre>Lists all the Jobs in the specified project that were started by the user.
536
537Args:
538 projectId: string, Project ID of the jobs to list (required)
539 projection: string, Restrict information returned to a set of selected fields
540 Allowed values
541 full - Includes all job data
542 minimal - Does not include the job configuration
543 stateFilter: string, Filter for job state (repeated)
544 Allowed values
545 done - Finished jobs
546 pending - Pending jobs
547 running - Running jobs
548 pageToken: string, Page token, returned by a previous call, to request the next page of results
549 allUsers: boolean, Whether to display jobs owned by all users in the project. Default false
550 maxResults: integer, Maximum number of results to return
551
552Returns:
553 An object of the form:
554
555 {
556 "nextPageToken": "A String", # A token to request the next page of results.
557 "totalItems": 42, # Total number of jobs in this collection.
558 "kind": "bigquery#jobList", # The resource type of the response.
559 "etag": "A String", # A hash of this page of results.
560 "jobs": [ # List of jobs that were requested.
561 {
562 "status": { # [Full-projection-only] Describes the state of the job.
563 "state": "A String", # [Output-only] Running state of the job.
564 "errors": [ # [Output-only] All errors encountered during the running of the job. Errors here do not necessarily mean that the job has completed or was unsuccessful.
565 {
566 "debugInfo": "A String", # Debugging information for the service, if present. Should be ignored.
567 "message": "A String", # A human readable explanation of the error.
568 "reason": "A String", # Specifies the error reason. For example, reason will be "required" or "invalid" if some field was missing or malformed.
569 "location": "A String", # Specifies where the error occurred, if present.
570 },
571 ],
572 "errorResult": { # [Output-only] Final error result of the job. If present, indicates that the job has completed and was unsuccessful.
573 "debugInfo": "A String", # Debugging information for the service, if present. Should be ignored.
574 "message": "A String", # A human readable explanation of the error.
575 "reason": "A String", # Specifies the error reason. For example, reason will be "required" or "invalid" if some field was missing or malformed.
576 "location": "A String", # Specifies where the error occurred, if present.
577 },
578 },
579 "kind": "bigquery#job", # The resource type.
580 "statistics": { # [Output-only] Information about the job, including starting time and ending time of the job.
581 "endTime": "A String", # [Output-only] End time of this job, in milliseconds since the epoch.
582 "totalBytesProcessed": "A String", # [Output-only] Total bytes processed for this job.
583 "startTime": "A String", # [Output-only] Start time of this job, in milliseconds since the epoch.
584 },
585 "jobReference": { # Job reference uniquely identifying the job.
586 "projectId": "A String", # [Required] Project ID being billed for the job.
587 "jobId": "A String", # [Required] ID of the job.
588 },
589 "state": "A String", # Running state of the job. When the state is DONE, errorResult can be checked to determine whether the job succeeded or failed.
590 "configuration": { # [Full-projection-only] Specifies the job configuration.
591 "load": { # [Pick one] Configures a load job.
592 "encoding": "A String", # [Optional] Character encoding of the input data. May be UTF-8 or ISO-8859-1. Default is UTF-8.
Joe Gregorioad8013f2012-08-03 08:44:02 -0400593 "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the import data. Default is ','. Note that delimiters are applied to the raw, binary data before the encoding is applied.
Joe Gregorio075572b2012-07-09 16:53:09 -0400594 "destinationTable": { # [Required] Table being written to.
595 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
596 "tableId": "A String", # [Required] ID of the table.
597 "datasetId": "A String", # [Required] ID of the dataset containing the table.
598 },
Joe Gregorio075572b2012-07-09 16:53:09 -0400599 "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
Joe Gregorioad8013f2012-08-03 08:44:02 -0400600 "maxBadRecords": 42, # [Optional] Maximum number of bad records that should be ignored before the entire job is aborted and no updates are performed.
601 "skipLeadingRows": 42, # [Optional] Number of rows of initial data to skip in the data being imported.
Joe Gregorio075572b2012-07-09 16:53:09 -0400602 "sourceUris": [ # [Required] Source URIs describing Google Cloud Storage locations of data to load.
603 "A String",
604 ],
Joe Gregorioad8013f2012-08-03 08:44:02 -0400605 "quote": "A String", # [Optional] Quote character to use. Default is '"'. Note that quoting is done on the raw, binary data before the encoding is applied.
Joe Gregorio075572b2012-07-09 16:53:09 -0400606 "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
607 "schemaInlineFormat": "A String", # [Experimental] Format of inlineSchema field.
608 "schemaInline": "A String", # [Experimental] Inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, baz:FLOAT"
Joe Gregorio1b425aa2012-08-24 12:04:34 -0400609 "allowQuotedNewlines": True or False, # [Experimental] Whether to allow quoted newlines in the source CSV data.
Joe Gregorio075572b2012-07-09 16:53:09 -0400610 "schema": { # [Optional] Schema of the table being written to.
611 "fields": [ # Describes the fields in a table.
612 {
613 "fields": [ # [Optional] Describes nested fields when type is RECORD.
614 # Object with schema name: TableFieldSchema
615 ],
616 "type": "A String", # [Required] Data type of the field.
617 "mode": "A String", # [Optional] Mode of the field (whether or not it can be null. Default is NULLABLE.
618 "name": "A String", # [Required] Name of the field.
619 },
620 ],
621 },
622 },
623 "link": { # [Pick one] Configures a link job.
624 "createDisposition": "A String", # [Optional] Whether or not to create a new table, if none exists.
625 "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_APPEND.
626 "destinationTable": { # [Required] The destination table of the link job.
627 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
628 "tableId": "A String", # [Required] ID of the table.
629 "datasetId": "A String", # [Required] ID of the dataset containing the table.
630 },
631 "sourceUri": [ # [Required] URI of source table to link.
632 "A String",
633 ],
634 },
635 "query": { # [Pick one] Configures a query job.
636 "defaultDataset": { # [Optional] Specifies the default dataset to assume for unqualified table names in the query.
637 "projectId": "A String", # [Optional] The ID of the container project.
638 "datasetId": "A String", # [Required] A unique ID for this dataset, without the project name.
639 },
640 "destinationTable": { # [Optional] Describes the table where the query results should be stored. If not present, a new table will be created to store the results.
641 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
642 "tableId": "A String", # [Required] ID of the table.
643 "datasetId": "A String", # [Required] ID of the dataset containing the table.
644 },
645 "priority": "A String", # [Experimental] Specifies a priority for the query. Default is INTERACTIVE. Alternative is BATCH, which may be subject to looser quota restrictions.
646 "writeDisposition": "A String", # [Optional] Whether to overwrite an existing table (WRITE_TRUNCATE), append to an existing table (WRITE_APPEND), or require that the the table is empty (WRITE_EMPTY). Default is WRITE_EMPTY.
647 "createDisposition": "A String", # [Optional] Whether to create the table if it doesn't already exist (CREATE_IF_NEEDED) or to require the table already exist (CREATE_NEVER). Default is CREATE_IF_NEEDED.
648 "query": "A String", # [Required] BigQuery SQL query to execute.
649 },
650 "copy": { # [Pick one] Copies a table.
651 "createDisposition": "A String", # [Optional] Whether or not to create a new table, if none exists.
652 "writeDisposition": "A String", # [Optional] Whether or not to append or require the table to be empty.
653 "destinationTable": { # [Required] The destination table
654 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
655 "tableId": "A String", # [Required] ID of the table.
656 "datasetId": "A String", # [Required] ID of the dataset containing the table.
657 },
658 "sourceTable": { # [Required] Source table to copy.
659 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
660 "tableId": "A String", # [Required] ID of the table.
661 "datasetId": "A String", # [Required] ID of the dataset containing the table.
662 },
663 },
664 "extract": { # [Pick one] Configures an extract job.
665 "destinationUri": "A String", # [Required] The fully-qualified Google Cloud Storage URI where the extracted table should be written.
666 "fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','
667 "sourceTable": { # [Required] A reference to the table being exported.
668 "projectId": "A String", # [Required] ID of the project billed for storage of the table.
669 "tableId": "A String", # [Required] ID of the table.
670 "datasetId": "A String", # [Required] ID of the dataset containing the table.
671 },
672 "printHeader": True or False, # [Optional] Whether to print out a heder row in the results. Default is true.
673 },
674 "properties": { # [Optional] Properties providing extra details about how the job should be run. Not used for most jobs.
Joe Gregoriofa08c2e2012-07-23 16:52:03 -0400675 "a_key": "A String", # Key-value property pairs.
Joe Gregorio075572b2012-07-09 16:53:09 -0400676 },
677 },
678 "id": "A String", # Unique opaque ID of the job.
679 "errorResult": { # A result object that will be present only if the job has failed.
680 "debugInfo": "A String", # Debugging information for the service, if present. Should be ignored.
681 "message": "A String", # A human readable explanation of the error.
682 "reason": "A String", # Specifies the error reason. For example, reason will be "required" or "invalid" if some field was missing or malformed.
683 "location": "A String", # Specifies where the error occurred, if present.
684 },
685 },
686 ],
687 }</pre>
688</div>
689
690<div class="method">
691 <code class="details" id="list_next">list_next(previous_request, previous_response)</code>
692 <pre>Retrieves the next page of results.
693
694Args:
695 previous_request: The request for the previous page. (required)
696 previous_response: The response from the request for the previous page. (required)
697
698Returns:
699 A request object that you can call 'execute()' on to request the next
700 page. Returns None if there are no more items in the collection.
701 </pre>
702</div>
703
704<div class="method">
705 <code class="details" id="query">query(projectId, body)</code>
706 <pre>Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.
707
708Args:
709 projectId: string, Project ID of the project billed for the query (required)
710 body: object, The request body. (required)
711 The object takes the form of:
712
713{
714 "timeoutMs": 42, # [Optional] How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error.
Joe Gregorio075572b2012-07-09 16:53:09 -0400715 "kind": "bigquery#queryRequest", # The resource type of the request.
Joe Gregorio1b425aa2012-08-24 12:04:34 -0400716 "dryRun": True or False, # [Optional] If set, don't actually run the query. A valid query will return an empty response, while an invalid query will return the same error it would if it wasn't a dry run.
Joe Gregorio075572b2012-07-09 16:53:09 -0400717 "defaultDataset": { # [Optional] Specifies the default datasetId and projectId to assume for any unqualified table names in the query. If not set, all table names in the query string must be fully-qualified in the format projectId:datasetId.tableid.
718 "projectId": "A String", # [Optional] The ID of the container project.
719 "datasetId": "A String", # [Required] A unique ID for this dataset, without the project name.
720 },
Joe Gregorio1b425aa2012-08-24 12:04:34 -0400721 "maxResults": 42, # [Optional] The maximum number of results to return per page of results. If the response list exceeds the maximum response size for a single response, you will have to page through the results. Default is to return the maximum response size.
722 "query": "A String", # [Required] A query string, following the BigQuery query syntax of the query to execute. Table names should be qualified by dataset name in the format projectId:datasetId.tableId unless you specify the defaultDataset value. If the table is in the same project as the job, you can omit the project ID. Example: SELECT f1 FROM myProjectId:myDatasetId.myTableId.
Joe Gregorio075572b2012-07-09 16:53:09 -0400723 }
724
725
726Returns:
727 An object of the form:
728
729 {
730 "kind": "bigquery#queryResponse", # The resource type.
731 "rows": [ # An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above.
732 {
733 "f": [ # Represents a single row in the result set, consisting of one or more fields.
734 {
735 "v": "A String", # Contains the field value in this row, as a string.
736 },
737 ],
738 },
739 ],
740 "jobReference": { # Reference to the Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults).
741 "projectId": "A String", # [Required] Project ID being billed for the job.
742 "jobId": "A String", # [Required] ID of the job.
743 },
744 "jobComplete": True or False, # Whether the query has completed or not. If rows or totalRows are present, this will always be true. If this is false, totalRows will not be available.
745 "totalRows": "A String", # The total number of rows in the complete query result set, which can be more than the number of rows in this single page of results.
746 "schema": { # The schema of the results. Present only when the query completes successfully.
747 "fields": [ # Describes the fields in a table.
748 {
749 "fields": [ # [Optional] Describes nested fields when type is RECORD.
750 # Object with schema name: TableFieldSchema
751 ],
752 "type": "A String", # [Required] Data type of the field.
753 "mode": "A String", # [Optional] Mode of the field (whether or not it can be null. Default is NULLABLE.
754 "name": "A String", # [Required] Name of the field.
755 },
756 ],
757 },
758 }</pre>
759</div>
760
761</body></html>