blob: 81a180f594c15fb6f761950053a09fd11804a045 [file] [log] [blame]
Ruben Brunk370e2432014-10-14 18:33:23 -07001# Copyright 2013 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import its.error
16import os
17import os.path
18import sys
19import re
20import json
21import time
22import unittest
23import socket
24import subprocess
25import hashlib
26import numpy
27
28class ItsSession(object):
29 """Controls a device over adb to run ITS scripts.
30
31 The script importing this module (on the host machine) prepares JSON
32 objects encoding CaptureRequests, specifying sets of parameters to use
Chien-Yu Chen682faa22014-10-22 17:34:44 -070033 when capturing an image using the Camera2 APIs. This class encapsulates
Ruben Brunk370e2432014-10-14 18:33:23 -070034 sending the requests to the device, monitoring the device's progress, and
35 copying the resultant captures back to the host machine when done. TCP
36 forwarded over adb is the transport mechanism used.
37
38 The device must have CtsVerifier.apk installed.
39
40 Attributes:
41 sock: The open socket.
42 """
43
44 # Open a connection to localhost:6000, forwarded to port 6000 on the device.
45 # TODO: Support multiple devices running over different TCP ports.
46 IPADDR = '127.0.0.1'
47 PORT = 6000
48 BUFFER_SIZE = 4096
49
50 # Seconds timeout on each socket operation.
51 SOCK_TIMEOUT = 10.0
52
53 PACKAGE = 'com.android.cts.verifier.camera.its'
54 INTENT_START = 'com.android.cts.verifier.camera.its.START'
55 ACTION_ITS_RESULT = 'com.android.cts.verifier.camera.its.ACTION_ITS_RESULT'
56 EXTRA_SUCCESS = 'camera.its.extra.SUCCESS'
57
58 # TODO: Handle multiple connected devices.
59 ADB = "adb -d"
60
61 # Definitions for some of the common output format options for do_capture().
62 # Each gets images of full resolution for each requested format.
63 CAP_RAW = {"format":"raw"}
64 CAP_DNG = {"format":"dng"}
65 CAP_YUV = {"format":"yuv"}
66 CAP_JPEG = {"format":"jpeg"}
67 CAP_RAW_YUV = [{"format":"raw"}, {"format":"yuv"}]
68 CAP_DNG_YUV = [{"format":"dng"}, {"format":"yuv"}]
69 CAP_RAW_JPEG = [{"format":"raw"}, {"format":"jpeg"}]
70 CAP_DNG_JPEG = [{"format":"dng"}, {"format":"jpeg"}]
71 CAP_YUV_JPEG = [{"format":"yuv"}, {"format":"jpeg"}]
72 CAP_RAW_YUV_JPEG = [{"format":"raw"}, {"format":"yuv"}, {"format":"jpeg"}]
73 CAP_DNG_YUV_JPEG = [{"format":"dng"}, {"format":"yuv"}, {"format":"jpeg"}]
74
75 # Method to handle the case where the service isn't already running.
76 # This occurs when a test is invoked directly from the command line, rather
77 # than as a part of a separate test harness which is setting up the device
78 # and the TCP forwarding.
79 def __pre_init(self):
80
81 # This also includes the optional reboot handling: if the user
82 # provides a "reboot" or "reboot=N" arg, then reboot the device,
83 # waiting for N seconds (default 30) before returning.
84 for s in sys.argv[1:]:
85 if s[:6] == "reboot":
86 duration = 30
87 if len(s) > 7 and s[6] == "=":
88 duration = int(s[7:])
89 print "Rebooting device"
90 _run("%s reboot" % (ItsSession.ADB));
91 _run("%s wait-for-device" % (ItsSession.ADB))
92 time.sleep(duration)
93 print "Reboot complete"
94
95 # TODO: Figure out why "--user 0" is needed, and fix the problem.
96 _run('%s shell am force-stop --user 0 %s' % (ItsSession.ADB, self.PACKAGE))
97 _run(('%s shell am startservice --user 0 -t text/plain '
98 '-a %s') % (ItsSession.ADB, self.INTENT_START))
99
100 # Wait until the socket is ready to accept a connection.
101 proc = subprocess.Popen(
102 ItsSession.ADB.split() + ["logcat"],
103 stdout=subprocess.PIPE)
104 logcat = proc.stdout
105 while True:
106 line = logcat.readline().strip()
107 if line.find('ItsService ready') >= 0:
108 break
109 proc.kill()
110
111 # Setup the TCP-over-ADB forwarding.
112 _run('%s forward tcp:%d tcp:%d' % (ItsSession.ADB,self.PORT,self.PORT))
113
114 def __init__(self):
115 if "noinit" not in sys.argv:
116 self.__pre_init()
117 self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
118 self.sock.connect((self.IPADDR, self.PORT))
119 self.sock.settimeout(self.SOCK_TIMEOUT)
120 self.__close_camera()
121 self.__open_camera()
122
123 def __del__(self):
124 if hasattr(self, 'sock') and self.sock:
125 self.__close_camera()
126 self.sock.close()
127
128 def __enter__(self):
129 return self
130
131 def __exit__(self, type, value, traceback):
132 return False
133
134 def __read_response_from_socket(self):
135 # Read a line (newline-terminated) string serialization of JSON object.
136 chars = []
137 while len(chars) == 0 or chars[-1] != '\n':
138 ch = self.sock.recv(1)
139 if len(ch) == 0:
140 # Socket was probably closed; otherwise don't get empty strings
141 raise its.error.Error('Problem with socket on device side')
142 chars.append(ch)
143 line = ''.join(chars)
144 jobj = json.loads(line)
145 # Optionally read a binary buffer of a fixed size.
146 buf = None
147 if jobj.has_key("bufValueSize"):
148 n = jobj["bufValueSize"]
149 buf = bytearray(n)
150 view = memoryview(buf)
151 while n > 0:
152 nbytes = self.sock.recv_into(view, n)
153 view = view[nbytes:]
154 n -= nbytes
155 buf = numpy.frombuffer(buf, dtype=numpy.uint8)
156 return jobj, buf
157
158 def __open_camera(self):
159 # Get the camera ID to open as an argument.
160 camera_id = 0
161 for s in sys.argv[1:]:
162 if s[:7] == "camera=" and len(s) > 7:
163 camera_id = int(s[7:])
164 cmd = {"cmdName":"open", "cameraId":camera_id}
165 self.sock.send(json.dumps(cmd) + "\n")
166 data,_ = self.__read_response_from_socket()
167 if data['tag'] != 'cameraOpened':
168 raise its.error.Error('Invalid command response')
169
170 def __close_camera(self):
171 cmd = {"cmdName":"close"}
172 self.sock.send(json.dumps(cmd) + "\n")
173 data,_ = self.__read_response_from_socket()
174 if data['tag'] != 'cameraClosed':
175 raise its.error.Error('Invalid command response')
176
177 def do_vibrate(self, pattern):
178 """Cause the device to vibrate to a specific pattern.
179
180 Args:
181 pattern: Durations (ms) for which to turn on or off the vibrator.
182 The first value indicates the number of milliseconds to wait
183 before turning the vibrator on. The next value indicates the
184 number of milliseconds for which to keep the vibrator on
185 before turning it off. Subsequent values alternate between
186 durations in milliseconds to turn the vibrator off or to turn
187 the vibrator on.
188
189 Returns:
190 Nothing.
191 """
192 cmd = {}
193 cmd["cmdName"] = "doVibrate"
194 cmd["pattern"] = pattern
195 self.sock.send(json.dumps(cmd) + "\n")
196 data,_ = self.__read_response_from_socket()
197 if data['tag'] != 'vibrationStarted':
198 raise its.error.Error('Invalid command response')
199
200 def start_sensor_events(self):
201 """Start collecting sensor events on the device.
202
203 See get_sensor_events for more info.
204
205 Returns:
206 Nothing.
207 """
208 cmd = {}
209 cmd["cmdName"] = "startSensorEvents"
210 self.sock.send(json.dumps(cmd) + "\n")
211 data,_ = self.__read_response_from_socket()
212 if data['tag'] != 'sensorEventsStarted':
213 raise its.error.Error('Invalid command response')
214
215 def get_sensor_events(self):
216 """Get a trace of all sensor events on the device.
217
218 The trace starts when the start_sensor_events function is called. If
219 the test runs for a long time after this call, then the device's
220 internal memory can fill up. Calling get_sensor_events gets all events
221 from the device, and then stops the device from collecting events and
222 clears the internal buffer; to start again, the start_sensor_events
223 call must be used again.
224
225 Events from the accelerometer, compass, and gyro are returned; each
226 has a timestamp and x,y,z values.
227
228 Note that sensor events are only produced if the device isn't in its
229 standby mode (i.e.) if the screen is on.
230
231 Returns:
232 A Python dictionary with three keys ("accel", "mag", "gyro") each
233 of which maps to a list of objects containing "time","x","y","z"
234 keys.
235 """
236 cmd = {}
237 cmd["cmdName"] = "getSensorEvents"
238 self.sock.send(json.dumps(cmd) + "\n")
239 data,_ = self.__read_response_from_socket()
240 if data['tag'] != 'sensorEvents':
241 raise its.error.Error('Invalid command response')
242 return data['objValue']
243
244 def get_camera_properties(self):
245 """Get the camera properties object for the device.
246
247 Returns:
248 The Python dictionary object for the CameraProperties object.
249 """
250 cmd = {}
251 cmd["cmdName"] = "getCameraProperties"
252 self.sock.send(json.dumps(cmd) + "\n")
253 data,_ = self.__read_response_from_socket()
254 if data['tag'] != 'cameraProperties':
255 raise its.error.Error('Invalid command response')
256 return data['objValue']['cameraProperties']
257
258 def do_3a(self, regions_ae=[[0,0,1,1,1]],
259 regions_awb=[[0,0,1,1,1]],
260 regions_af=[[0,0,1,1,1]],
261 do_ae=True, do_awb=True, do_af=True,
262 lock_ae=False, lock_awb=False,
263 get_results=False):
264 """Perform a 3A operation on the device.
265
266 Triggers some or all of AE, AWB, and AF, and returns once they have
267 converged. Uses the vendor 3A that is implemented inside the HAL.
268
269 Throws an assertion if 3A fails to converge.
270
271 Args:
272 regions_ae: List of weighted AE regions.
273 regions_awb: List of weighted AWB regions.
274 regions_af: List of weighted AF regions.
275 do_ae: Trigger AE and wait for it to converge.
276 do_awb: Wait for AWB to converge.
277 do_af: Trigger AF and wait for it to converge.
278 lock_ae: Request AE lock after convergence, and wait for it.
279 lock_awb: Request AWB lock after convergence, and wait for it.
280 get_results: Return the 3A results from this function.
281
282 Region format in args:
283 Arguments are lists of weighted regions; each weighted region is a
284 list of 5 values, [x,y,w,h, wgt], and each argument is a list of
285 these 5-value lists. The coordinates are given as normalized
286 rectangles (x,y,w,h) specifying the region. For example:
287 [[0.0, 0.0, 1.0, 0.5, 5], [0.0, 0.5, 1.0, 0.5, 10]].
288 Weights are non-negative integers.
289
290 Returns:
291 Five values are returned if get_results is true::
292 * AE sensitivity; None if do_ae is False
293 * AE exposure time; None if do_ae is False
294 * AWB gains (list); None if do_awb is False
295 * AWB transform (list); None if do_awb is false
296 * AF focus position; None if do_af is false
297 Otherwise, it returns five None values.
298 """
299 print "Running vendor 3A on device"
300 cmd = {}
301 cmd["cmdName"] = "do3A"
302 cmd["regions"] = {"ae": sum(regions_ae, []),
303 "awb": sum(regions_awb, []),
304 "af": sum(regions_af, [])}
305 cmd["triggers"] = {"ae": do_ae, "af": do_af}
306 if lock_ae:
307 cmd["aeLock"] = True
308 if lock_awb:
309 cmd["awbLock"] = True
310 self.sock.send(json.dumps(cmd) + "\n")
311
312 # Wait for each specified 3A to converge.
313 ae_sens = None
314 ae_exp = None
315 awb_gains = None
316 awb_transform = None
317 af_dist = None
318 converged = False
319 while True:
320 data,_ = self.__read_response_from_socket()
321 vals = data['strValue'].split()
322 if data['tag'] == 'aeResult':
323 ae_sens, ae_exp = [int(i) for i in vals]
324 elif data['tag'] == 'afResult':
325 af_dist = float(vals[0])
326 elif data['tag'] == 'awbResult':
327 awb_gains = [float(f) for f in vals[:4]]
328 awb_transform = [float(f) for f in vals[4:]]
329 elif data['tag'] == '3aConverged':
330 converged = True
331 elif data['tag'] == '3aDone':
332 break
333 else:
334 raise its.error.Error('Invalid command response')
335 if converged and not get_results:
336 return None,None,None,None,None
337 if (do_ae and ae_sens == None or do_awb and awb_gains == None
338 or do_af and af_dist == None or not converged):
339 raise its.error.Error('3A failed to converge')
340 return ae_sens, ae_exp, awb_gains, awb_transform, af_dist
341
342 def do_capture(self, cap_request, out_surfaces=None):
343 """Issue capture request(s), and read back the image(s) and metadata.
344
345 The main top-level function for capturing one or more images using the
346 device. Captures a single image if cap_request is a single object, and
347 captures a burst if it is a list of objects.
348
349 The out_surfaces field can specify the width(s), height(s), and
350 format(s) of the captured image. The formats may be "yuv", "jpeg",
351 "dng", "raw", or "raw10". The default is a YUV420 frame ("yuv")
352 corresponding to a full sensor frame.
353
354 Note that one or more surfaces can be specified, allowing a capture to
355 request images back in multiple formats (e.g.) raw+yuv, raw+jpeg,
356 yuv+jpeg, raw+yuv+jpeg. If the size is omitted for a surface, the
357 default is the largest resolution available for the format of that
358 surface. At most one output surface can be specified for a given format,
359 and raw+dng, raw10+dng, and raw+raw10 are not supported as combinations.
360
361 Example of a single capture request:
362
363 {
364 "android.sensor.exposureTime": 100*1000*1000,
365 "android.sensor.sensitivity": 100
366 }
367
368 Example of a list of capture requests:
369
370 [
371 {
372 "android.sensor.exposureTime": 100*1000*1000,
373 "android.sensor.sensitivity": 100
374 },
375 {
376 "android.sensor.exposureTime": 100*1000*1000,
377 "android.sensor.sensitivity": 200
378 }
379 ]
380
381 Examples of output surface specifications:
382
383 {
384 "width": 640,
385 "height": 480,
386 "format": "yuv"
387 }
388
389 [
390 {
391 "format": "jpeg"
392 },
393 {
394 "format": "raw"
395 }
396 ]
397
398 The following variables defined in this class are shortcuts for
399 specifying one or more formats where each output is the full size for
400 that format; they can be used as values for the out_surfaces arguments:
401
402 CAP_RAW
403 CAP_DNG
404 CAP_YUV
405 CAP_JPEG
406 CAP_RAW_YUV
407 CAP_DNG_YUV
408 CAP_RAW_JPEG
409 CAP_DNG_JPEG
410 CAP_YUV_JPEG
411 CAP_RAW_YUV_JPEG
412 CAP_DNG_YUV_JPEG
413
Chien-Yu Chen682faa22014-10-22 17:34:44 -0700414 If multiple formats are specified, then this function returns multiple
Ruben Brunk370e2432014-10-14 18:33:23 -0700415 capture objects, one for each requested format. If multiple formats and
416 multiple captures (i.e. a burst) are specified, then this function
417 returns multiple lists of capture objects. In both cases, the order of
418 the returned objects matches the order of the requested formats in the
419 out_surfaces parameter. For example:
420
421 yuv_cap = do_capture( req1 )
422 yuv_cap = do_capture( req1, yuv_fmt )
423 yuv_cap, raw_cap = do_capture( req1, [yuv_fmt,raw_fmt] )
424 yuv_caps = do_capture( [req1,req2], yuv_fmt )
425 yuv_caps, raw_caps = do_capture( [req1,req2], [yuv_fmt,raw_fmt] )
426
427 Args:
428 cap_request: The Python dict/list specifying the capture(s), which
429 will be converted to JSON and sent to the device.
430 out_surfaces: (Optional) specifications of the output image formats
431 and sizes to use for each capture.
432
433 Returns:
434 An object, list of objects, or list of lists of objects, where each
435 object contains the following fields:
436 * data: the image data as a numpy array of bytes.
437 * width: the width of the captured image.
438 * height: the height of the captured image.
439 * format: image the format, in ["yuv","jpeg","raw","raw10","dng"].
Chien-Yu Chen682faa22014-10-22 17:34:44 -0700440 * metadata: the capture result object (Python dictionary).
Ruben Brunk370e2432014-10-14 18:33:23 -0700441 """
442 cmd = {}
443 cmd["cmdName"] = "doCapture"
444 if not isinstance(cap_request, list):
445 cmd["captureRequests"] = [cap_request]
446 else:
447 cmd["captureRequests"] = cap_request
448 if out_surfaces is not None:
449 if not isinstance(out_surfaces, list):
450 cmd["outputSurfaces"] = [out_surfaces]
451 else:
452 cmd["outputSurfaces"] = out_surfaces
453 formats = [c["format"] if c.has_key("format") else "yuv"
454 for c in cmd["outputSurfaces"]]
455 formats = [s if s != "jpg" else "jpeg" for s in formats]
456 else:
457 formats = ['yuv']
458 ncap = len(cmd["captureRequests"])
459 nsurf = 1 if out_surfaces is None else len(cmd["outputSurfaces"])
460 if len(formats) > len(set(formats)):
461 raise its.error.Error('Duplicate format requested')
462 if "dng" in formats and "raw" in formats or \
463 "dng" in formats and "raw10" in formats or \
464 "raw" in formats and "raw10" in formats:
465 raise its.error.Error('Different raw formats not supported')
466 print "Capturing %d frame%s with %d format%s [%s]" % (
467 ncap, "s" if ncap>1 else "", nsurf, "s" if nsurf>1 else "",
468 ",".join(formats))
469 self.sock.send(json.dumps(cmd) + "\n")
470
471 # Wait for ncap*nsurf images and ncap metadata responses.
472 # Assume that captures come out in the same order as requested in
Chien-Yu Chen682faa22014-10-22 17:34:44 -0700473 # the burst, however individual images of different formats can come
Ruben Brunk370e2432014-10-14 18:33:23 -0700474 # out in any order for that capture.
475 nbufs = 0
476 bufs = {"yuv":[], "raw":[], "raw10":[], "dng":[], "jpeg":[]}
477 mds = []
478 widths = None
479 heights = None
480 while nbufs < ncap*nsurf or len(mds) < ncap:
481 jsonObj,buf = self.__read_response_from_socket()
482 if jsonObj['tag'] in ['jpegImage', 'yuvImage', 'rawImage', \
483 'raw10Image', 'dngImage'] and buf is not None:
484 fmt = jsonObj['tag'][:-5]
485 bufs[fmt].append(buf)
486 nbufs += 1
487 elif jsonObj['tag'] == 'captureResults':
488 mds.append(jsonObj['objValue']['captureResult'])
489 outputs = jsonObj['objValue']['outputs']
490 widths = [out['width'] for out in outputs]
491 heights = [out['height'] for out in outputs]
492 else:
493 # Just ignore other tags
494 None
495 rets = []
496 for j,fmt in enumerate(formats):
497 objs = []
498 for i in range(ncap):
499 obj = {}
500 obj["data"] = bufs[fmt][i]
501 obj["width"] = widths[j]
502 obj["height"] = heights[j]
503 obj["format"] = fmt
504 obj["metadata"] = mds[i]
505 objs.append(obj)
506 rets.append(objs if ncap>1 else objs[0])
507 return rets if len(rets)>1 else rets[0]
508
509 @staticmethod
Yin-Chia Yeha2277d02014-10-20 15:50:23 -0700510 def report_result(camera_id, success):
511 resultstr = "%s=%s" % (camera_id, 'True' if success else 'False')
Ruben Brunk370e2432014-10-14 18:33:23 -0700512 _run(('%s shell am broadcast '
Yin-Chia Yeha2277d02014-10-20 15:50:23 -0700513 '-a %s --es %s %s') % (ItsSession.ADB, ItsSession.ACTION_ITS_RESULT, \
514 ItsSession.EXTRA_SUCCESS, resultstr))
Ruben Brunk370e2432014-10-14 18:33:23 -0700515
516
517def _run(cmd):
518 """Replacement for os.system, with hiding of stdout+stderr messages.
519 """
520 with open(os.devnull, 'wb') as devnull:
521 subprocess.check_call(
522 cmd.split(), stdout=devnull, stderr=subprocess.STDOUT)
523
524class __UnitTest(unittest.TestCase):
525 """Run a suite of unit tests on this module.
526 """
527
528 # TODO: Add some unit tests.
529 None
530
531if __name__ == '__main__':
532 unittest.main()
533