Ruben Brunk | 370e243 | 2014-10-14 18:33:23 -0700 | [diff] [blame] | 1 | # Copyright 2013 The Android Open Source Project |
| 2 | # |
| 3 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | # you may not use this file except in compliance with the License. |
| 5 | # You may obtain a copy of the License at |
| 6 | # |
| 7 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | # |
| 9 | # Unless required by applicable law or agreed to in writing, software |
| 10 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | # See the License for the specific language governing permissions and |
| 13 | # limitations under the License. |
| 14 | |
| 15 | import its.error |
| 16 | import os |
| 17 | import os.path |
| 18 | import sys |
| 19 | import re |
| 20 | import json |
| 21 | import time |
| 22 | import unittest |
| 23 | import socket |
| 24 | import subprocess |
| 25 | import hashlib |
| 26 | import numpy |
| 27 | |
| 28 | class ItsSession(object): |
| 29 | """Controls a device over adb to run ITS scripts. |
| 30 | |
| 31 | The script importing this module (on the host machine) prepares JSON |
| 32 | objects encoding CaptureRequests, specifying sets of parameters to use |
Chien-Yu Chen | 682faa2 | 2014-10-22 17:34:44 -0700 | [diff] [blame] | 33 | when capturing an image using the Camera2 APIs. This class encapsulates |
Ruben Brunk | 370e243 | 2014-10-14 18:33:23 -0700 | [diff] [blame] | 34 | sending the requests to the device, monitoring the device's progress, and |
| 35 | copying the resultant captures back to the host machine when done. TCP |
| 36 | forwarded over adb is the transport mechanism used. |
| 37 | |
| 38 | The device must have CtsVerifier.apk installed. |
| 39 | |
| 40 | Attributes: |
| 41 | sock: The open socket. |
| 42 | """ |
| 43 | |
| 44 | # Open a connection to localhost:6000, forwarded to port 6000 on the device. |
| 45 | # TODO: Support multiple devices running over different TCP ports. |
| 46 | IPADDR = '127.0.0.1' |
| 47 | PORT = 6000 |
| 48 | BUFFER_SIZE = 4096 |
| 49 | |
| 50 | # Seconds timeout on each socket operation. |
| 51 | SOCK_TIMEOUT = 10.0 |
| 52 | |
| 53 | PACKAGE = 'com.android.cts.verifier.camera.its' |
| 54 | INTENT_START = 'com.android.cts.verifier.camera.its.START' |
| 55 | ACTION_ITS_RESULT = 'com.android.cts.verifier.camera.its.ACTION_ITS_RESULT' |
Yin-Chia Yeh | ab98ada | 2015-03-05 13:28:53 -0800 | [diff] [blame^] | 56 | EXTRA_CAMERA_ID = 'camera.its.extra.CAMERA_ID' |
Ruben Brunk | 370e243 | 2014-10-14 18:33:23 -0700 | [diff] [blame] | 57 | EXTRA_SUCCESS = 'camera.its.extra.SUCCESS' |
Yin-Chia Yeh | ab98ada | 2015-03-05 13:28:53 -0800 | [diff] [blame^] | 58 | EXTRA_SUMMARY = 'camera.its.extra.SUMMARY' |
Ruben Brunk | 370e243 | 2014-10-14 18:33:23 -0700 | [diff] [blame] | 59 | |
| 60 | # TODO: Handle multiple connected devices. |
| 61 | ADB = "adb -d" |
| 62 | |
| 63 | # Definitions for some of the common output format options for do_capture(). |
| 64 | # Each gets images of full resolution for each requested format. |
| 65 | CAP_RAW = {"format":"raw"} |
| 66 | CAP_DNG = {"format":"dng"} |
| 67 | CAP_YUV = {"format":"yuv"} |
| 68 | CAP_JPEG = {"format":"jpeg"} |
| 69 | CAP_RAW_YUV = [{"format":"raw"}, {"format":"yuv"}] |
| 70 | CAP_DNG_YUV = [{"format":"dng"}, {"format":"yuv"}] |
| 71 | CAP_RAW_JPEG = [{"format":"raw"}, {"format":"jpeg"}] |
| 72 | CAP_DNG_JPEG = [{"format":"dng"}, {"format":"jpeg"}] |
| 73 | CAP_YUV_JPEG = [{"format":"yuv"}, {"format":"jpeg"}] |
| 74 | CAP_RAW_YUV_JPEG = [{"format":"raw"}, {"format":"yuv"}, {"format":"jpeg"}] |
| 75 | CAP_DNG_YUV_JPEG = [{"format":"dng"}, {"format":"yuv"}, {"format":"jpeg"}] |
| 76 | |
| 77 | # Method to handle the case where the service isn't already running. |
| 78 | # This occurs when a test is invoked directly from the command line, rather |
| 79 | # than as a part of a separate test harness which is setting up the device |
| 80 | # and the TCP forwarding. |
| 81 | def __pre_init(self): |
| 82 | |
| 83 | # This also includes the optional reboot handling: if the user |
| 84 | # provides a "reboot" or "reboot=N" arg, then reboot the device, |
| 85 | # waiting for N seconds (default 30) before returning. |
| 86 | for s in sys.argv[1:]: |
| 87 | if s[:6] == "reboot": |
| 88 | duration = 30 |
| 89 | if len(s) > 7 and s[6] == "=": |
| 90 | duration = int(s[7:]) |
| 91 | print "Rebooting device" |
| 92 | _run("%s reboot" % (ItsSession.ADB)); |
| 93 | _run("%s wait-for-device" % (ItsSession.ADB)) |
| 94 | time.sleep(duration) |
| 95 | print "Reboot complete" |
| 96 | |
| 97 | # TODO: Figure out why "--user 0" is needed, and fix the problem. |
| 98 | _run('%s shell am force-stop --user 0 %s' % (ItsSession.ADB, self.PACKAGE)) |
| 99 | _run(('%s shell am startservice --user 0 -t text/plain ' |
| 100 | '-a %s') % (ItsSession.ADB, self.INTENT_START)) |
| 101 | |
| 102 | # Wait until the socket is ready to accept a connection. |
| 103 | proc = subprocess.Popen( |
| 104 | ItsSession.ADB.split() + ["logcat"], |
| 105 | stdout=subprocess.PIPE) |
| 106 | logcat = proc.stdout |
| 107 | while True: |
| 108 | line = logcat.readline().strip() |
| 109 | if line.find('ItsService ready') >= 0: |
| 110 | break |
| 111 | proc.kill() |
| 112 | |
| 113 | # Setup the TCP-over-ADB forwarding. |
| 114 | _run('%s forward tcp:%d tcp:%d' % (ItsSession.ADB,self.PORT,self.PORT)) |
| 115 | |
| 116 | def __init__(self): |
| 117 | if "noinit" not in sys.argv: |
| 118 | self.__pre_init() |
| 119 | self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
| 120 | self.sock.connect((self.IPADDR, self.PORT)) |
| 121 | self.sock.settimeout(self.SOCK_TIMEOUT) |
| 122 | self.__close_camera() |
| 123 | self.__open_camera() |
| 124 | |
| 125 | def __del__(self): |
| 126 | if hasattr(self, 'sock') and self.sock: |
| 127 | self.__close_camera() |
| 128 | self.sock.close() |
| 129 | |
| 130 | def __enter__(self): |
| 131 | return self |
| 132 | |
| 133 | def __exit__(self, type, value, traceback): |
| 134 | return False |
| 135 | |
| 136 | def __read_response_from_socket(self): |
| 137 | # Read a line (newline-terminated) string serialization of JSON object. |
| 138 | chars = [] |
| 139 | while len(chars) == 0 or chars[-1] != '\n': |
| 140 | ch = self.sock.recv(1) |
| 141 | if len(ch) == 0: |
| 142 | # Socket was probably closed; otherwise don't get empty strings |
| 143 | raise its.error.Error('Problem with socket on device side') |
| 144 | chars.append(ch) |
| 145 | line = ''.join(chars) |
| 146 | jobj = json.loads(line) |
| 147 | # Optionally read a binary buffer of a fixed size. |
| 148 | buf = None |
| 149 | if jobj.has_key("bufValueSize"): |
| 150 | n = jobj["bufValueSize"] |
| 151 | buf = bytearray(n) |
| 152 | view = memoryview(buf) |
| 153 | while n > 0: |
| 154 | nbytes = self.sock.recv_into(view, n) |
| 155 | view = view[nbytes:] |
| 156 | n -= nbytes |
| 157 | buf = numpy.frombuffer(buf, dtype=numpy.uint8) |
| 158 | return jobj, buf |
| 159 | |
| 160 | def __open_camera(self): |
| 161 | # Get the camera ID to open as an argument. |
| 162 | camera_id = 0 |
| 163 | for s in sys.argv[1:]: |
| 164 | if s[:7] == "camera=" and len(s) > 7: |
| 165 | camera_id = int(s[7:]) |
| 166 | cmd = {"cmdName":"open", "cameraId":camera_id} |
| 167 | self.sock.send(json.dumps(cmd) + "\n") |
| 168 | data,_ = self.__read_response_from_socket() |
| 169 | if data['tag'] != 'cameraOpened': |
| 170 | raise its.error.Error('Invalid command response') |
| 171 | |
| 172 | def __close_camera(self): |
| 173 | cmd = {"cmdName":"close"} |
| 174 | self.sock.send(json.dumps(cmd) + "\n") |
| 175 | data,_ = self.__read_response_from_socket() |
| 176 | if data['tag'] != 'cameraClosed': |
| 177 | raise its.error.Error('Invalid command response') |
| 178 | |
| 179 | def do_vibrate(self, pattern): |
| 180 | """Cause the device to vibrate to a specific pattern. |
| 181 | |
| 182 | Args: |
| 183 | pattern: Durations (ms) for which to turn on or off the vibrator. |
| 184 | The first value indicates the number of milliseconds to wait |
| 185 | before turning the vibrator on. The next value indicates the |
| 186 | number of milliseconds for which to keep the vibrator on |
| 187 | before turning it off. Subsequent values alternate between |
| 188 | durations in milliseconds to turn the vibrator off or to turn |
| 189 | the vibrator on. |
| 190 | |
| 191 | Returns: |
| 192 | Nothing. |
| 193 | """ |
| 194 | cmd = {} |
| 195 | cmd["cmdName"] = "doVibrate" |
| 196 | cmd["pattern"] = pattern |
| 197 | self.sock.send(json.dumps(cmd) + "\n") |
| 198 | data,_ = self.__read_response_from_socket() |
| 199 | if data['tag'] != 'vibrationStarted': |
| 200 | raise its.error.Error('Invalid command response') |
| 201 | |
| 202 | def start_sensor_events(self): |
| 203 | """Start collecting sensor events on the device. |
| 204 | |
| 205 | See get_sensor_events for more info. |
| 206 | |
| 207 | Returns: |
| 208 | Nothing. |
| 209 | """ |
| 210 | cmd = {} |
| 211 | cmd["cmdName"] = "startSensorEvents" |
| 212 | self.sock.send(json.dumps(cmd) + "\n") |
| 213 | data,_ = self.__read_response_from_socket() |
| 214 | if data['tag'] != 'sensorEventsStarted': |
| 215 | raise its.error.Error('Invalid command response') |
| 216 | |
| 217 | def get_sensor_events(self): |
| 218 | """Get a trace of all sensor events on the device. |
| 219 | |
| 220 | The trace starts when the start_sensor_events function is called. If |
| 221 | the test runs for a long time after this call, then the device's |
| 222 | internal memory can fill up. Calling get_sensor_events gets all events |
| 223 | from the device, and then stops the device from collecting events and |
| 224 | clears the internal buffer; to start again, the start_sensor_events |
| 225 | call must be used again. |
| 226 | |
| 227 | Events from the accelerometer, compass, and gyro are returned; each |
| 228 | has a timestamp and x,y,z values. |
| 229 | |
| 230 | Note that sensor events are only produced if the device isn't in its |
| 231 | standby mode (i.e.) if the screen is on. |
| 232 | |
| 233 | Returns: |
| 234 | A Python dictionary with three keys ("accel", "mag", "gyro") each |
| 235 | of which maps to a list of objects containing "time","x","y","z" |
| 236 | keys. |
| 237 | """ |
| 238 | cmd = {} |
| 239 | cmd["cmdName"] = "getSensorEvents" |
| 240 | self.sock.send(json.dumps(cmd) + "\n") |
| 241 | data,_ = self.__read_response_from_socket() |
| 242 | if data['tag'] != 'sensorEvents': |
| 243 | raise its.error.Error('Invalid command response') |
| 244 | return data['objValue'] |
| 245 | |
Yin-Chia Yeh | ab98ada | 2015-03-05 13:28:53 -0800 | [diff] [blame^] | 246 | def get_camera_ids(self): |
| 247 | """Get a list of camera device Ids that can be opened. |
| 248 | |
| 249 | Returns: |
| 250 | a list of camera ID string |
| 251 | """ |
| 252 | cmd = {} |
| 253 | cmd["cmdName"] = "getCameraIds" |
| 254 | self.sock.send(json.dumps(cmd) + "\n") |
| 255 | data,_ = self.__read_response_from_socket() |
| 256 | if data['tag'] != 'cameraIds': |
| 257 | raise its.error.Error('Invalid command response') |
| 258 | return data['objValue']['cameraIdArray'] |
| 259 | |
Ruben Brunk | 370e243 | 2014-10-14 18:33:23 -0700 | [diff] [blame] | 260 | def get_camera_properties(self): |
| 261 | """Get the camera properties object for the device. |
| 262 | |
| 263 | Returns: |
| 264 | The Python dictionary object for the CameraProperties object. |
| 265 | """ |
| 266 | cmd = {} |
| 267 | cmd["cmdName"] = "getCameraProperties" |
| 268 | self.sock.send(json.dumps(cmd) + "\n") |
| 269 | data,_ = self.__read_response_from_socket() |
| 270 | if data['tag'] != 'cameraProperties': |
| 271 | raise its.error.Error('Invalid command response') |
| 272 | return data['objValue']['cameraProperties'] |
| 273 | |
| 274 | def do_3a(self, regions_ae=[[0,0,1,1,1]], |
| 275 | regions_awb=[[0,0,1,1,1]], |
| 276 | regions_af=[[0,0,1,1,1]], |
| 277 | do_ae=True, do_awb=True, do_af=True, |
| 278 | lock_ae=False, lock_awb=False, |
Zhijun He | eb3ff47 | 2014-11-20 13:47:11 -0800 | [diff] [blame] | 279 | get_results=False, |
| 280 | ev_comp=0): |
Ruben Brunk | 370e243 | 2014-10-14 18:33:23 -0700 | [diff] [blame] | 281 | """Perform a 3A operation on the device. |
| 282 | |
| 283 | Triggers some or all of AE, AWB, and AF, and returns once they have |
| 284 | converged. Uses the vendor 3A that is implemented inside the HAL. |
| 285 | |
| 286 | Throws an assertion if 3A fails to converge. |
| 287 | |
| 288 | Args: |
| 289 | regions_ae: List of weighted AE regions. |
| 290 | regions_awb: List of weighted AWB regions. |
| 291 | regions_af: List of weighted AF regions. |
| 292 | do_ae: Trigger AE and wait for it to converge. |
| 293 | do_awb: Wait for AWB to converge. |
| 294 | do_af: Trigger AF and wait for it to converge. |
| 295 | lock_ae: Request AE lock after convergence, and wait for it. |
| 296 | lock_awb: Request AWB lock after convergence, and wait for it. |
| 297 | get_results: Return the 3A results from this function. |
Zhijun He | eb3ff47 | 2014-11-20 13:47:11 -0800 | [diff] [blame] | 298 | ev_comp: An EV compensation value to use when running AE. |
Ruben Brunk | 370e243 | 2014-10-14 18:33:23 -0700 | [diff] [blame] | 299 | |
| 300 | Region format in args: |
| 301 | Arguments are lists of weighted regions; each weighted region is a |
| 302 | list of 5 values, [x,y,w,h, wgt], and each argument is a list of |
| 303 | these 5-value lists. The coordinates are given as normalized |
| 304 | rectangles (x,y,w,h) specifying the region. For example: |
| 305 | [[0.0, 0.0, 1.0, 0.5, 5], [0.0, 0.5, 1.0, 0.5, 10]]. |
| 306 | Weights are non-negative integers. |
| 307 | |
| 308 | Returns: |
| 309 | Five values are returned if get_results is true:: |
| 310 | * AE sensitivity; None if do_ae is False |
| 311 | * AE exposure time; None if do_ae is False |
| 312 | * AWB gains (list); None if do_awb is False |
| 313 | * AWB transform (list); None if do_awb is false |
| 314 | * AF focus position; None if do_af is false |
| 315 | Otherwise, it returns five None values. |
| 316 | """ |
| 317 | print "Running vendor 3A on device" |
| 318 | cmd = {} |
| 319 | cmd["cmdName"] = "do3A" |
| 320 | cmd["regions"] = {"ae": sum(regions_ae, []), |
| 321 | "awb": sum(regions_awb, []), |
| 322 | "af": sum(regions_af, [])} |
| 323 | cmd["triggers"] = {"ae": do_ae, "af": do_af} |
| 324 | if lock_ae: |
| 325 | cmd["aeLock"] = True |
| 326 | if lock_awb: |
| 327 | cmd["awbLock"] = True |
Zhijun He | eb3ff47 | 2014-11-20 13:47:11 -0800 | [diff] [blame] | 328 | if ev_comp != 0: |
| 329 | cmd["evComp"] = ev_comp |
Ruben Brunk | 370e243 | 2014-10-14 18:33:23 -0700 | [diff] [blame] | 330 | self.sock.send(json.dumps(cmd) + "\n") |
| 331 | |
| 332 | # Wait for each specified 3A to converge. |
| 333 | ae_sens = None |
| 334 | ae_exp = None |
| 335 | awb_gains = None |
| 336 | awb_transform = None |
| 337 | af_dist = None |
| 338 | converged = False |
| 339 | while True: |
| 340 | data,_ = self.__read_response_from_socket() |
| 341 | vals = data['strValue'].split() |
| 342 | if data['tag'] == 'aeResult': |
| 343 | ae_sens, ae_exp = [int(i) for i in vals] |
| 344 | elif data['tag'] == 'afResult': |
| 345 | af_dist = float(vals[0]) |
| 346 | elif data['tag'] == 'awbResult': |
| 347 | awb_gains = [float(f) for f in vals[:4]] |
| 348 | awb_transform = [float(f) for f in vals[4:]] |
| 349 | elif data['tag'] == '3aConverged': |
| 350 | converged = True |
| 351 | elif data['tag'] == '3aDone': |
| 352 | break |
| 353 | else: |
| 354 | raise its.error.Error('Invalid command response') |
| 355 | if converged and not get_results: |
| 356 | return None,None,None,None,None |
| 357 | if (do_ae and ae_sens == None or do_awb and awb_gains == None |
| 358 | or do_af and af_dist == None or not converged): |
| 359 | raise its.error.Error('3A failed to converge') |
| 360 | return ae_sens, ae_exp, awb_gains, awb_transform, af_dist |
| 361 | |
| 362 | def do_capture(self, cap_request, out_surfaces=None): |
| 363 | """Issue capture request(s), and read back the image(s) and metadata. |
| 364 | |
| 365 | The main top-level function for capturing one or more images using the |
| 366 | device. Captures a single image if cap_request is a single object, and |
| 367 | captures a burst if it is a list of objects. |
| 368 | |
| 369 | The out_surfaces field can specify the width(s), height(s), and |
| 370 | format(s) of the captured image. The formats may be "yuv", "jpeg", |
| 371 | "dng", "raw", or "raw10". The default is a YUV420 frame ("yuv") |
| 372 | corresponding to a full sensor frame. |
| 373 | |
| 374 | Note that one or more surfaces can be specified, allowing a capture to |
| 375 | request images back in multiple formats (e.g.) raw+yuv, raw+jpeg, |
| 376 | yuv+jpeg, raw+yuv+jpeg. If the size is omitted for a surface, the |
| 377 | default is the largest resolution available for the format of that |
| 378 | surface. At most one output surface can be specified for a given format, |
| 379 | and raw+dng, raw10+dng, and raw+raw10 are not supported as combinations. |
| 380 | |
| 381 | Example of a single capture request: |
| 382 | |
| 383 | { |
| 384 | "android.sensor.exposureTime": 100*1000*1000, |
| 385 | "android.sensor.sensitivity": 100 |
| 386 | } |
| 387 | |
| 388 | Example of a list of capture requests: |
| 389 | |
| 390 | [ |
| 391 | { |
| 392 | "android.sensor.exposureTime": 100*1000*1000, |
| 393 | "android.sensor.sensitivity": 100 |
| 394 | }, |
| 395 | { |
| 396 | "android.sensor.exposureTime": 100*1000*1000, |
| 397 | "android.sensor.sensitivity": 200 |
| 398 | } |
| 399 | ] |
| 400 | |
| 401 | Examples of output surface specifications: |
| 402 | |
| 403 | { |
| 404 | "width": 640, |
| 405 | "height": 480, |
| 406 | "format": "yuv" |
| 407 | } |
| 408 | |
| 409 | [ |
| 410 | { |
| 411 | "format": "jpeg" |
| 412 | }, |
| 413 | { |
| 414 | "format": "raw" |
| 415 | } |
| 416 | ] |
| 417 | |
| 418 | The following variables defined in this class are shortcuts for |
| 419 | specifying one or more formats where each output is the full size for |
| 420 | that format; they can be used as values for the out_surfaces arguments: |
| 421 | |
| 422 | CAP_RAW |
| 423 | CAP_DNG |
| 424 | CAP_YUV |
| 425 | CAP_JPEG |
| 426 | CAP_RAW_YUV |
| 427 | CAP_DNG_YUV |
| 428 | CAP_RAW_JPEG |
| 429 | CAP_DNG_JPEG |
| 430 | CAP_YUV_JPEG |
| 431 | CAP_RAW_YUV_JPEG |
| 432 | CAP_DNG_YUV_JPEG |
| 433 | |
Chien-Yu Chen | 682faa2 | 2014-10-22 17:34:44 -0700 | [diff] [blame] | 434 | If multiple formats are specified, then this function returns multiple |
Ruben Brunk | 370e243 | 2014-10-14 18:33:23 -0700 | [diff] [blame] | 435 | capture objects, one for each requested format. If multiple formats and |
| 436 | multiple captures (i.e. a burst) are specified, then this function |
| 437 | returns multiple lists of capture objects. In both cases, the order of |
| 438 | the returned objects matches the order of the requested formats in the |
| 439 | out_surfaces parameter. For example: |
| 440 | |
| 441 | yuv_cap = do_capture( req1 ) |
| 442 | yuv_cap = do_capture( req1, yuv_fmt ) |
| 443 | yuv_cap, raw_cap = do_capture( req1, [yuv_fmt,raw_fmt] ) |
| 444 | yuv_caps = do_capture( [req1,req2], yuv_fmt ) |
| 445 | yuv_caps, raw_caps = do_capture( [req1,req2], [yuv_fmt,raw_fmt] ) |
| 446 | |
| 447 | Args: |
| 448 | cap_request: The Python dict/list specifying the capture(s), which |
| 449 | will be converted to JSON and sent to the device. |
| 450 | out_surfaces: (Optional) specifications of the output image formats |
| 451 | and sizes to use for each capture. |
| 452 | |
| 453 | Returns: |
| 454 | An object, list of objects, or list of lists of objects, where each |
| 455 | object contains the following fields: |
| 456 | * data: the image data as a numpy array of bytes. |
| 457 | * width: the width of the captured image. |
| 458 | * height: the height of the captured image. |
| 459 | * format: image the format, in ["yuv","jpeg","raw","raw10","dng"]. |
Chien-Yu Chen | 682faa2 | 2014-10-22 17:34:44 -0700 | [diff] [blame] | 460 | * metadata: the capture result object (Python dictionary). |
Ruben Brunk | 370e243 | 2014-10-14 18:33:23 -0700 | [diff] [blame] | 461 | """ |
| 462 | cmd = {} |
| 463 | cmd["cmdName"] = "doCapture" |
| 464 | if not isinstance(cap_request, list): |
| 465 | cmd["captureRequests"] = [cap_request] |
| 466 | else: |
| 467 | cmd["captureRequests"] = cap_request |
| 468 | if out_surfaces is not None: |
| 469 | if not isinstance(out_surfaces, list): |
| 470 | cmd["outputSurfaces"] = [out_surfaces] |
| 471 | else: |
| 472 | cmd["outputSurfaces"] = out_surfaces |
| 473 | formats = [c["format"] if c.has_key("format") else "yuv" |
| 474 | for c in cmd["outputSurfaces"]] |
| 475 | formats = [s if s != "jpg" else "jpeg" for s in formats] |
| 476 | else: |
| 477 | formats = ['yuv'] |
| 478 | ncap = len(cmd["captureRequests"]) |
| 479 | nsurf = 1 if out_surfaces is None else len(cmd["outputSurfaces"]) |
| 480 | if len(formats) > len(set(formats)): |
| 481 | raise its.error.Error('Duplicate format requested') |
| 482 | if "dng" in formats and "raw" in formats or \ |
| 483 | "dng" in formats and "raw10" in formats or \ |
| 484 | "raw" in formats and "raw10" in formats: |
| 485 | raise its.error.Error('Different raw formats not supported') |
| 486 | print "Capturing %d frame%s with %d format%s [%s]" % ( |
| 487 | ncap, "s" if ncap>1 else "", nsurf, "s" if nsurf>1 else "", |
| 488 | ",".join(formats)) |
| 489 | self.sock.send(json.dumps(cmd) + "\n") |
| 490 | |
| 491 | # Wait for ncap*nsurf images and ncap metadata responses. |
| 492 | # Assume that captures come out in the same order as requested in |
Chien-Yu Chen | 682faa2 | 2014-10-22 17:34:44 -0700 | [diff] [blame] | 493 | # the burst, however individual images of different formats can come |
Ruben Brunk | 370e243 | 2014-10-14 18:33:23 -0700 | [diff] [blame] | 494 | # out in any order for that capture. |
| 495 | nbufs = 0 |
| 496 | bufs = {"yuv":[], "raw":[], "raw10":[], "dng":[], "jpeg":[]} |
| 497 | mds = [] |
| 498 | widths = None |
| 499 | heights = None |
| 500 | while nbufs < ncap*nsurf or len(mds) < ncap: |
| 501 | jsonObj,buf = self.__read_response_from_socket() |
| 502 | if jsonObj['tag'] in ['jpegImage', 'yuvImage', 'rawImage', \ |
| 503 | 'raw10Image', 'dngImage'] and buf is not None: |
| 504 | fmt = jsonObj['tag'][:-5] |
| 505 | bufs[fmt].append(buf) |
| 506 | nbufs += 1 |
| 507 | elif jsonObj['tag'] == 'captureResults': |
| 508 | mds.append(jsonObj['objValue']['captureResult']) |
| 509 | outputs = jsonObj['objValue']['outputs'] |
| 510 | widths = [out['width'] for out in outputs] |
| 511 | heights = [out['height'] for out in outputs] |
| 512 | else: |
| 513 | # Just ignore other tags |
| 514 | None |
| 515 | rets = [] |
| 516 | for j,fmt in enumerate(formats): |
| 517 | objs = [] |
| 518 | for i in range(ncap): |
| 519 | obj = {} |
| 520 | obj["data"] = bufs[fmt][i] |
| 521 | obj["width"] = widths[j] |
| 522 | obj["height"] = heights[j] |
| 523 | obj["format"] = fmt |
| 524 | obj["metadata"] = mds[i] |
| 525 | objs.append(obj) |
| 526 | rets.append(objs if ncap>1 else objs[0]) |
| 527 | return rets if len(rets)>1 else rets[0] |
| 528 | |
Yin-Chia Yeh | ab98ada | 2015-03-05 13:28:53 -0800 | [diff] [blame^] | 529 | def report_result(camera_id, success, summary_path=None): |
Timothy Knight | ed07600 | 2014-10-23 16:12:26 -0700 | [diff] [blame] | 530 | """Send a pass/fail result to the device, via an intent. |
| 531 | |
| 532 | Args: |
| 533 | camera_id: The ID string of the camera for which to report pass/fail. |
| 534 | success: Boolean, indicating if the result was pass or fail. |
Yin-Chia Yeh | ab98ada | 2015-03-05 13:28:53 -0800 | [diff] [blame^] | 535 | summary_path: (Optional) path to ITS summary file on host PC |
Timothy Knight | ed07600 | 2014-10-23 16:12:26 -0700 | [diff] [blame] | 536 | |
| 537 | Returns: |
| 538 | Nothing. |
| 539 | """ |
Yin-Chia Yeh | ab98ada | 2015-03-05 13:28:53 -0800 | [diff] [blame^] | 540 | device_summary_path = "/sdcard/camera_" + camera_id + "_its_summary.txt" |
| 541 | if summary_path is not None: |
| 542 | _run("%s push %s %s" % ( |
| 543 | ItsSession.ADB, summary_path, device_summary_path)) |
| 544 | _run("%s shell am broadcast -a %s --es %s %s --es %s %s --es %s %s" % ( |
| 545 | ItsSession.ADB, ItsSession.ACTION_ITS_RESULT, |
| 546 | ItsSession.EXTRA_CAMERA_ID, camera_id, |
| 547 | ItsSession.EXTRA_SUCCESS, 'True' if success else 'False', |
| 548 | ItsSession.EXTRA_SUMMARY, device_summary_path)) |
| 549 | else: |
| 550 | _run("%s shell am broadcast -a %s --es %s %s --es %s %s --es %s %s" % ( |
| 551 | ItsSession.ADB, ItsSession.ACTION_ITS_RESULT, |
| 552 | ItsSession.EXTRA_CAMERA_ID, camera_id, |
| 553 | ItsSession.EXTRA_SUCCESS, 'True' if success else 'False', |
| 554 | ItsSession.EXTRA_SUMMARY, "null")) |
Ruben Brunk | 370e243 | 2014-10-14 18:33:23 -0700 | [diff] [blame] | 555 | |
| 556 | def _run(cmd): |
| 557 | """Replacement for os.system, with hiding of stdout+stderr messages. |
| 558 | """ |
| 559 | with open(os.devnull, 'wb') as devnull: |
| 560 | subprocess.check_call( |
| 561 | cmd.split(), stdout=devnull, stderr=subprocess.STDOUT) |
| 562 | |
| 563 | class __UnitTest(unittest.TestCase): |
| 564 | """Run a suite of unit tests on this module. |
| 565 | """ |
| 566 | |
| 567 | # TODO: Add some unit tests. |
| 568 | None |
| 569 | |
| 570 | if __name__ == '__main__': |
| 571 | unittest.main() |
| 572 | |