Merge "new SensorService" into gingerbread
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index b7a3f99..f74240f 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -278,6 +278,62 @@
writer->stop();
}
+static void performSeekTest(const sp<MediaSource> &source) {
+ CHECK_EQ(OK, source->start());
+
+ int64_t durationUs;
+ CHECK(source->getFormat()->findInt64(kKeyDuration, &durationUs));
+
+ for (int64_t seekTimeUs = 0; seekTimeUs <= durationUs;
+ seekTimeUs += 60000ll) {
+ MediaSource::ReadOptions options;
+ options.setSeekTo(
+ seekTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
+
+ MediaBuffer *buffer;
+ status_t err;
+ for (;;) {
+ err = source->read(&buffer, &options);
+
+ options.clearSeekTo();
+
+ if (err == INFO_FORMAT_CHANGED) {
+ CHECK(buffer == NULL);
+ continue;
+ }
+
+ if (err != OK) {
+ CHECK(buffer == NULL);
+ break;
+ }
+
+ if (buffer->range_length() > 0) {
+ break;
+ }
+
+ CHECK(buffer != NULL);
+
+ buffer->release();
+ buffer = NULL;
+ }
+
+ if (err == OK) {
+ int64_t timeUs;
+ CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
+
+ printf("%lld\t%lld\t%lld\n", seekTimeUs, timeUs, seekTimeUs - timeUs);
+
+ buffer->release();
+ buffer = NULL;
+ } else {
+ printf("ERROR\n");
+ break;
+ }
+ }
+
+ CHECK_EQ(OK, source->stop());
+}
+
static void usage(const char *me) {
fprintf(stderr, "usage: %s\n", me);
fprintf(stderr, " -h(elp)\n");
@@ -291,6 +347,7 @@
fprintf(stderr, " -s(oftware) prefer software codec\n");
fprintf(stderr, " -o playback audio\n");
fprintf(stderr, " -w(rite) filename (write to .mp4 file)\n");
+ fprintf(stderr, " -k seek test\n");
}
int main(int argc, char **argv) {
@@ -300,6 +357,7 @@
bool listComponents = false;
bool dumpProfiles = false;
bool extractThumbnail = false;
+ bool seekTest = false;
gNumRepetitions = 1;
gMaxNumFrames = 0;
gReproduceBug = -1;
@@ -308,7 +366,7 @@
gWriteMP4 = false;
int res;
- while ((res = getopt(argc, argv, "han:lm:b:ptsow:")) >= 0) {
+ while ((res = getopt(argc, argv, "han:lm:b:ptsow:k")) >= 0) {
switch (res) {
case 'a':
{
@@ -375,6 +433,12 @@
break;
}
+ case 'k':
+ {
+ seekTest = true;
+ break;
+ }
+
case '?':
case 'h':
default:
@@ -585,6 +649,8 @@
if (gWriteMP4) {
writeSourceToMP4(mediaSource);
+ } else if (seekTest) {
+ performSeekTest(mediaSource);
} else {
playSource(&client, mediaSource);
}
diff --git a/core/java/android/hardware/Camera.java b/core/java/android/hardware/Camera.java
index 1100886..19e578f 100644
--- a/core/java/android/hardware/Camera.java
+++ b/core/java/android/hardware/Camera.java
@@ -32,23 +32,79 @@
import android.os.Message;
/**
- * The Camera class is used to connect/disconnect with the camera service,
- * set capture settings, start/stop preview, snap a picture, and retrieve
- * frames for encoding for video.
- * <p>There is no default constructor for this class. Use {@link #open()} to
- * get a Camera object.</p>
+ * The Camera class is used to set image capture settings, start/stop preview,
+ * snap pictures, and retrieve frames for encoding for video. This class is a
+ * client for the Camera service, which manages the actual camera hardware.
*
- * <p>In order to use the device camera, you must declare the
+ * <p>To access the device camera, you must declare the
* {@link android.Manifest.permission#CAMERA} permission in your Android
* Manifest. Also be sure to include the
* <a href="{@docRoot}guide/topics/manifest/uses-feature-element.html"><uses-feature></a>
- * manifest element in order to declare camera features used by your application.
+ * manifest element to declare camera features used by your application.
* For example, if you use the camera and auto-focus feature, your Manifest
* should include the following:</p>
* <pre> <uses-permission android:name="android.permission.CAMERA" />
* <uses-feature android:name="android.hardware.camera" />
* <uses-feature android:name="android.hardware.camera.autofocus" /></pre>
*
+ * <p>To take pictures with this class, use the following steps:</p>
+ *
+ * <ol>
+ * <li>Obtain an instance of Camera from {@link #open(int)}.
+ *
+ * <li>Get existing (default) settings with {@link #getParameters()}.
+ *
+ * <li>If necessary, modify the returned {@link Camera.Parameters} object and call
+ * {@link #setParameters(Camera.Parameters)}.
+ *
+ * <li>If desired, call {@link #setDisplayOrientation(int)}.
+ *
+ * <li><b>Important</b>: Pass a fully initialized {@link SurfaceHolder} to
+ * {@link #setPreviewDisplay(SurfaceHolder)}. Without a surface, the camera
+ * will be unable to start the preview.
+ *
+ * <li><b>Important</b>: Call {@link #startPreview()} to start updating the
+ * preview surface. Preview must be started before you can take a picture.
+ *
+ * <li>When you want, call {@link #takePicture(Camera.ShutterCallback,
+ * Camera.PictureCallback, Camera.PictureCallback, Camera.PictureCallback)} to
+ * capture a photo. Wait for the callbacks to provide the actual image data.
+ *
+ * <li>After taking a picture, preview display will have stopped. To take more
+ * photos, call {@link #startPreview()} again first.
+ *
+ * <li>Call {@link #stopPreview()} to stop updating the preview surface.
+ *
+ * <li><b>Important:</b> Call {@link #release()} to release the camera for
+ * use by other applications. Applications should release the camera
+ * immediately in {@link android.app.Activity#onPause()} (and re-{@link #open()}
+ * it in {@link android.app.Activity#onResume()}).
+ * </ol>
+ *
+ * <p>To quickly switch to video recording mode, use these steps:</p>
+ *
+ * <ol>
+ * <li>Obtain and initialize a Camera and start preview as described above.
+ *
+ * <li>Call {@link #unlock()} to allow the media process to access the camera.
+ *
+ * <li>Pass the camera to {@link android.media.MediaRecorder#setCamera(Camera)}.
+ * See {@link android.media.MediaRecorder} information about video recording.
+ *
+ * <li>When finished recording, call {@link #reconnect()} to re-acquire
+ * and re-lock the camera.
+ *
+ * <li>If desired, restart preview and take more photos or videos.
+ *
+ * <li>Call {@link #stopPreview()} and {@link #release()} as described above.
+ * </ol>
+ *
+ * <p>This class is not thread-safe, and is meant for use from one event thread.
+ * Most long-running operations (preview, focus, photo capture, etc) happen
+ * asynchronously and invoke callbacks as necessary. Callbacks will be invoked
+ * on the event thread {@link #open(int)} was called from. This class's methods
+ * must never be called from multiple threads at once.</p>
+ *
* <p class="caution"><strong>Caution:</strong> Different Android-powered devices
* may have different hardware specifications, such as megapixel ratings and
* auto-focus capabilities. In order for your application to be compatible with
@@ -84,12 +140,12 @@
private boolean mWithBuffer;
/**
- * Returns the number of Cameras available.
+ * Returns the number of physical cameras available on this device.
*/
public native static int getNumberOfCameras();
/**
- * Returns the information about the camera.
+ * Returns the information about a particular camera.
* If {@link #getNumberOfCameras()} returns N, the valid id is 0 to N-1.
*/
public native static void getCameraInfo(int cameraId, CameraInfo cameraInfo);
@@ -123,9 +179,30 @@
};
/**
- * Returns a new Camera object.
- * If {@link #getNumberOfCameras()} returns N, the valid id is 0 to N-1.
- * The id 0 is the default camera.
+ * Creates a new Camera object to access a particular hardware camera.
+ *
+ * <p>You must call {@link #release()} when you are done using the camera,
+ * otherwise it will remain locked and be unavailable to other applications.
+ *
+ * <p>Your application should only have one Camera object active at a time
+ * for a particular hardware camera.
+ *
+ * <p>Callbacks from other methods are delivered to the event loop of the
+ * thread which called open(). If this thread has no event loop, then
+ * callbacks are delivered to the main application event loop. If there
+ * is no main application event loop, callbacks are not delivered.
+ *
+ * <p class="caution"><b>Caution:</b> On some devices, this method may
+ * take a long time to complete. It is best to call this method from a
+ * worker thread (possibly using {@link android.os.AsyncTask}) to avoid
+ * blocking the main application UI thread.
+ *
+ * @param cameraId the hardware camera to access, between 0 and
+ * {@link #getNumberOfCameras()}-1. Use {@link #CAMERA_ID_DEFAULT}
+ * to access the default camera.
+ * @return a new Camera object, connected, locked and ready for use.
+ * @throws RuntimeException if connection to the camera service fails (for
+ * example, if the camera is in use by another process).
*/
public static Camera open(int cameraId) {
return new Camera(cameraId);
@@ -137,7 +214,8 @@
public static int CAMERA_ID_DEFAULT = 0;
/**
- * Returns a new Camera object. This returns the default camera.
+ * Equivalent to Camera.open(Camera.CAMERA_ID_DEFAULT).
+ * Creates a new Camera object to access the default camera.
*/
public static Camera open() {
return new Camera(CAMERA_ID_DEFAULT);
@@ -173,56 +251,83 @@
/**
* Disconnects and releases the Camera object resources.
- * <p>It is recommended that you call this as soon as you're done with the
- * Camera object.</p>
+ *
+ * <p>You must call this as soon as you're done with the Camera object.</p>
*/
public final void release() {
native_release();
}
/**
- * Reconnect to the camera after passing it to MediaRecorder. To save
- * setup/teardown time, a client of Camera can pass an initialized Camera
- * object to a MediaRecorder to use for video recording. Once the
- * MediaRecorder is done with the Camera, this method can be used to
- * re-establish a connection with the camera hardware. NOTE: The Camera
- * object must first be unlocked by the process that owns it before it
- * can be connected to another process.
+ * Unlocks the camera to allow another process to access it.
+ * Normally, the camera is locked to the process with an active Camera
+ * object until {@link #release()} is called. To allow rapid handoff
+ * between processes, you can call this method to release the camera
+ * temporarily for another process to use; once the other process is done
+ * you can call {@link #reconnect()} to reclaim the camera.
*
- * @throws IOException if the method fails.
- */
- public native final void reconnect() throws IOException;
-
- /**
- * Lock the camera to prevent other processes from accessing it. To save
- * setup/teardown time, a client of Camera can pass an initialized Camera
- * object to another process. This method is used to re-lock the Camera
- * object prevent other processes from accessing it. By default, the
- * Camera object is locked. Locking it again from the same process will
- * have no effect. Attempting to lock it from another process if it has
- * not been unlocked will fail.
+ * <p>This must be done before calling
+ * {@link android.media.MediaRecorder#setCamera(Camera)}.
*
- * @throws RuntimeException if the method fails.
- */
- public native final void lock();
-
- /**
- * Unlock the camera to allow another process to access it. To save
- * setup/teardown time, a client of Camera can pass an initialized Camera
- * object to another process. This method is used to unlock the Camera
- * object before handing off the Camera object to the other process.
+ * <p>If you are not recording video, you probably do not need this method.
*
- * @throws RuntimeException if the method fails.
+ * @throws RuntimeException if the camera cannot be unlocked.
*/
public native final void unlock();
/**
- * Sets the SurfaceHolder to be used for a picture preview. If the surface
- * changed since the last call, the screen will blank. Nothing happens
- * if the same surface is re-set.
+ * Re-locks the camera to prevent other processes from accessing it.
+ * Camera objects are locked by default unless {@link #unlock()} is
+ * called. Normally {@link #reconnect()} is used instead.
*
- * @param holder the SurfaceHolder upon which to place the picture preview
- * @throws IOException if the method fails.
+ * <p>If you are not recording video, you probably do not need this method.
+ *
+ * @throws RuntimeException if the camera cannot be re-locked (for
+ * example, if the camera is still in use by another process).
+ */
+ public native final void lock();
+
+ /**
+ * Reconnects to the camera service after another process used it.
+ * After {@link #unlock()} is called, another process may use the
+ * camera; when the process is done, you must reconnect to the camera,
+ * which will re-acquire the lock and allow you to continue using the
+ * camera.
+ *
+ * <p>This must be done after {@link android.media.MediaRecorder} is
+ * done recording if {@link android.media.MediaRecorder#setCamera(Camera)}
+ * was used.
+ *
+ * <p>If you are not recording video, you probably do not need this method.
+ *
+ * @throws IOException if a connection cannot be re-established (for
+ * example, if the camera is still in use by another process).
+ */
+ public native final void reconnect() throws IOException;
+
+ /**
+ * Sets the {@link Surface} to be used for live preview.
+ * A surface is necessary for preview, and preview is necessary to take
+ * pictures. The same surface can be re-set without harm.
+ *
+ * <p>The {@link SurfaceHolder} must already contain a surface when this
+ * method is called. If you are using {@link android.view.SurfaceView},
+ * you will need to register a {@link SurfaceHolder.Callback} with
+ * {@link SurfaceHolder#addCallback(SurfaceHolder.Callback)} and wait for
+ * {@link SurfaceHolder.Callback#surfaceCreated(SurfaceHolder)} before
+ * calling setPreviewDisplay() or starting preview.
+ *
+ * <p>This method must be called before {@link #startPreview()}. The
+ * one exception is that if the preview surface is not set (or set to null)
+ * before startPreview() is called, then this method may be called once
+ * with a non-null parameter to set the preview surface. (This allows
+ * camera setup and surface creation to happen in parallel, saving time.)
+ * The preview surface may not otherwise change while preview is running.
+ *
+ * @param holder containing the Surface on which to place the preview,
+ * or null to remove the preview surface
+ * @throws IOException if the method fails (for example, if the surface
+ * is unavailable or unsuitable).
*/
public final void setPreviewDisplay(SurfaceHolder holder) throws IOException {
if (holder != null) {
@@ -235,31 +340,47 @@
private native final void setPreviewDisplay(Surface surface);
/**
- * Used to get a copy of each preview frame.
+ * Callback interface used to deliver copies of preview frames as
+ * they are displayed.
+ *
+ * @see #setPreviewCallback(Camera.PreviewCallback)
+ * @see #setOneShotPreviewCallback(Camera.PreviewCallback)
+ * @see #setPreviewCallbackWithBuffer(Camera.PreviewCallback)
+ * @see #startPreview()
*/
public interface PreviewCallback
{
/**
- * The callback that delivers the preview frames.
+ * Called as preview frames are displayed. This callback is invoked
+ * on the event thread {@link #open(int)} was called from.
*
- * @param data The contents of the preview frame in the format defined
+ * @param data the contents of the preview frame in the format defined
* by {@link android.graphics.ImageFormat}, which can be queried
* with {@link android.hardware.Camera.Parameters#getPreviewFormat()}.
* If {@link android.hardware.Camera.Parameters#setPreviewFormat(int)}
* is never called, the default will be the YCbCr_420_SP
* (NV21) format.
- * @param camera The Camera service object.
+ * @param camera the Camera service object.
*/
void onPreviewFrame(byte[] data, Camera camera);
};
/**
- * Start drawing preview frames to the surface.
+ * Starts capturing and drawing preview frames to the screen.
+ * Preview will not actually start until a surface is supplied with
+ * {@link #setPreviewDisplay(SurfaceHolder)}.
+ *
+ * <p>If {@link #setPreviewCallback(Camera.PreviewCallback)},
+ * {@link #setOneShotPreviewCallback(Camera.PreviewCallback)}, or
+ * {@link #setPreviewCallbackWithBuffer(Camera.PreviewCallback)} were
+ * called, {@link Camera.PreviewCallback#onPreviewFrame(byte[], Camera)}
+ * will be called when preview data becomes available.
*/
public native final void startPreview();
/**
- * Stop drawing preview frames to the surface.
+ * Stops capturing and drawing preview frames to the surface, and
+ * resets the camera for a future call to {@link #startPreview()}.
*/
public native final void stopPreview();
@@ -272,11 +393,13 @@
public native final boolean previewEnabled();
/**
- * Can be called at any time to instruct the camera to use a callback for
- * each preview frame in addition to displaying it.
+ * Installs a callback to be invoked for every preview frame in addition
+ * to displaying them on the screen. The callback will be repeatedly called
+ * for as long as preview is active. This method can be called at any time,
+ * even while preview is live. Any other preview callbacks are overridden.
*
- * @param cb A callback object that receives a copy of each preview frame.
- * Pass null to stop receiving callbacks at any time.
+ * @param cb a callback object that receives a copy of each preview frame,
+ * or null to stop receiving callbacks.
*/
public final void setPreviewCallback(PreviewCallback cb) {
mPreviewCallback = cb;
@@ -288,10 +411,13 @@
}
/**
- * Installs a callback to retrieve a single preview frame, after which the
- * callback is cleared.
+ * Installs a callback to be invoked for the next preview frame in addition
+ * to displaying it on the screen. After one invocation, the callback is
+ * cleared. This method can be called any time, even when preview is live.
+ * Any other preview callbacks are overridden.
*
- * @param cb A callback object that receives a copy of the preview frame.
+ * @param cb a callback object that receives a copy of the next preview frame,
+ * or null to stop receiving callbacks.
*/
public final void setOneShotPreviewCallback(PreviewCallback cb) {
mPreviewCallback = cb;
@@ -303,17 +429,24 @@
private native final void setHasPreviewCallback(boolean installed, boolean manualBuffer);
/**
- * Installs a callback which will get called as long as there are buffers in the
- * preview buffer queue, which minimizes dynamic allocation of preview buffers.
+ * Installs a callback to be invoked for every preview frame, using buffers
+ * supplied with {@link #addCallbackBuffer(byte[])}, in addition to
+ * displaying them on the screen. The callback will be repeatedly called
+ * for as long as preview is active and buffers are available.
+ * Any other preview callbacks are overridden.
*
- * Apps must call addCallbackBuffer to explicitly register the buffers to use, or no callbacks
- * will be received. addCallbackBuffer may be safely called before or after
- * a call to setPreviewCallbackWithBuffer with a non-null callback parameter.
+ * <p>The purpose of this method is to improve preview efficiency and frame
+ * rate by allowing preview frame memory reuse. You must call
+ * {@link #addCallbackBuffer(byte[])} at some point -- before or after
+ * calling this method -- or no callbacks will received.
*
- * The buffer queue will be cleared upon any calls to setOneShotPreviewCallback,
- * setPreviewCallback, or to this method with a null callback parameter.
+ * The buffer queue will be cleared if this method is called with a null
+ * callback, {@link #setPreviewCallback(Camera.PreviewCallback)} is called,
+ * or {@link #setOneShotPreviewCallback(Camera.PreviewCallback)} is called.
*
- * @param cb A callback object that receives a copy of the preview frame. A null value will clear the queue.
+ * @param cb a callback object that receives a copy of the preview frame,
+ * or null to stop receiving callbacks and clear the buffer queue.
+ * @see #addCallbackBuffer(byte[])
*/
public final void setPreviewCallbackWithBuffer(PreviewCallback cb) {
mPreviewCallback = cb;
@@ -325,21 +458,27 @@
/**
* Adds a pre-allocated buffer to the preview callback buffer queue.
* Applications can add one or more buffers to the queue. When a preview
- * frame arrives and there is still available buffer, buffer will be filled
- * and it is removed from the queue. Then preview callback is invoked with
- * the buffer. If a frame arrives and there is no buffer left, the frame is
- * discarded. Applications should add the buffers back when they finish the
- * processing.
+ * frame arrives and there is still at least one available buffer, the
+ * buffer will be used and removed from the queue. Then preview callback is
+ * invoked with the buffer. If a frame arrives and there is no buffer left,
+ * the frame is discarded. Applications should add buffers back when they
+ * finish processing the data in them.
*
- * The image format of the callback buffer can be read from {@link
- * android.hardware.Camera.Parameters#getPreviewFormat()}. bitsPerPixel can
- * be read from {@link android.graphics.ImageFormat#getBitsPerPixel(int)}.
- * Preview width and height can be determined from getPreviewSize.
+ * <p>The size of the buffer is determined by multiplying the preview
+ * image width, height, and bytes per pixel. The width and height can be
+ * read from {@link Camera.Parameters#getPreviewSize()}. Bytes per pixel
+ * can be computed from
+ * {@link android.graphics.ImageFormat#getBitsPerPixel(int)} / 8,
+ * using the image format from {@link Camera.Parameters#getPreviewFormat()}.
*
- * Alternatively, a buffer from a previous callback may be passed in or used
- * to determine the size of new preview frame buffers.
+ * <p>This method is only necessary when
+ * {@link #setPreviewCallbackWithBuffer(PreviewCallback)} is used. When
+ * {@link #setPreviewCallback(PreviewCallback)} or
+ * {@link #setOneShotPreviewCallback(PreviewCallback)} are used, buffers
+ * are automatically allocated.
*
- * @param callbackBuffer The buffer to register. Size should be width * height * bitsPerPixel / 8.
+ * @param callbackBuffer the buffer to add to the queue.
+ * The size should be width * height * bits_per_pixel / 8.
* @see #setPreviewCallbackWithBuffer(PreviewCallback)
*/
public native final void addCallbackBuffer(byte[] callbackBuffer);
@@ -438,7 +577,8 @@
}
/**
- * Handles the callback for the camera auto focus.
+ * Callback interface used to notify on completion of camera auto focus.
+ *
* <p>Devices that do not support auto-focus will receive a "fake"
* callback to this interface. If your application needs auto-focus and
* should not be installed on devices <em>without</em> auto-focus, you must
@@ -446,13 +586,15 @@
* {@code android.hardware.camera.autofocus} feature, in the
* <a href="{@docRoot}guide/topics/manifest/uses-feature-element.html"><uses-feature></a>
* manifest element.</p>
+ *
+ * @see #autoFocus(AutoFocusCallback)
*/
public interface AutoFocusCallback
{
/**
- * Callback for the camera auto focus. If the camera does not support
- * auto-focus and autoFocus is called, onAutoFocus will be called
- * immediately with success.
+ * Called when the camera auto focus completes. If the camera does not
+ * support auto-focus and autoFocus is called, onAutoFocus will be
+ * called immediately with success.
*
* @param success true if focus was successful, false if otherwise
* @param camera the Camera service object
@@ -461,23 +603,28 @@
};
/**
- * Starts auto-focus function and registers a callback function to run when
- * camera is focused. Only valid after startPreview() has been called.
- * Applications should call {@link
- * android.hardware.Camera.Parameters#getFocusMode()} to determine if this
- * method should be called. If the camera does not support auto-focus, it is
- * a no-op and {@link AutoFocusCallback#onAutoFocus(boolean, Camera)}
+ * Starts camera auto-focus and registers a callback function to run when
+ * the camera is focused. This method is only valid when preview is active
+ * (between {@link #startPreview()} and before {@link #stopPreview()}).
+ *
+ * <p>Callers should check
+ * {@link android.hardware.Camera.Parameters#getFocusMode()} to determine if
+ * this method should be called. If the camera does not support auto-focus,
+ * it is a no-op and {@link AutoFocusCallback#onAutoFocus(boolean, Camera)}
* callback will be called immediately.
+ *
* <p>If your application should not be installed
* on devices without auto-focus, you must declare that your application
* uses auto-focus with the
* <a href="{@docRoot}guide/topics/manifest/uses-feature-element.html"><uses-feature></a>
* manifest element.</p>
+ *
* <p>If the current flash mode is not
* {@link android.hardware.Camera.Parameters#FLASH_MODE_OFF}, flash may be
- * fired during auto-focus depending on the driver.<p>
+ * fired during auto-focus, depending on the driver and camera hardware.<p>
*
* @param cb the callback to run
+ * @see #cancelAutoFocus()
*/
public final void autoFocus(AutoFocusCallback cb)
{
@@ -487,10 +634,12 @@
private native final void native_autoFocus();
/**
- * Cancels auto-focus function. If the auto-focus is still in progress,
- * this function will cancel it. Whether the auto-focus is in progress
- * or not, this function will return the focus position to the default.
+ * Cancels any auto-focus function in progress.
+ * Whether or not auto-focus is currently in progress,
+ * this function will return the focus position to the default.
* If the camera does not support auto-focus, this is a no-op.
+ *
+ * @see #autoFocus(Camera.AutoFocusCallback)
*/
public final void cancelAutoFocus()
{
@@ -500,23 +649,32 @@
private native final void native_cancelAutoFocus();
/**
- * An interface which contains a callback for the shutter closing after taking a picture.
+ * Callback interface used to signal the moment of actual image capture.
+ *
+ * @see #takePicture(ShutterCallback, PictureCallback, PictureCallback, PictureCallback)
*/
public interface ShutterCallback
{
/**
- * Can be used to play a shutter sound as soon as the image has been captured, but before
- * the data is available.
+ * Called as near as possible to the moment when a photo is captured
+ * from the sensor. This is a good opportunity to play a shutter sound
+ * or give other feedback of camera operation. This may be some time
+ * after the photo was triggered, but some time before the actual data
+ * is available.
*/
void onShutter();
}
/**
- * Handles the callback for when a picture is taken.
+ * Callback interface used to supply image data from a photo capture.
+ *
+ * @see #takePicture(ShutterCallback, PictureCallback, PictureCallback, PictureCallback)
*/
public interface PictureCallback {
/**
- * Callback for when a picture is taken.
+ * Called when image data is available after a picture is taken.
+ * The format of the data depends on the context of the callback
+ * and {@link Camera.Parameters} settings.
*
* @param data a byte array of the picture data
* @param camera the Camera service object
@@ -525,24 +683,9 @@
};
/**
- * Triggers an asynchronous image capture. The camera service will initiate
- * a series of callbacks to the application as the image capture progresses.
- * The shutter callback occurs after the image is captured. This can be used
- * to trigger a sound to let the user know that image has been captured. The
- * raw callback occurs when the raw image data is available (NOTE: the data
- * may be null if the hardware does not have enough memory to make a copy).
- * The jpeg callback occurs when the compressed image is available. If the
- * application does not need a particular callback, a null can be passed
- * instead of a callback method.
+ * Equivalent to takePicture(shutter, raw, null, jpeg).
*
- * This method is only valid after {@link #startPreview()} has been called.
- * This method will stop the preview. Applications should not call {@link
- * #stopPreview()} before this. After jpeg callback is received,
- * applications can call {@link #startPreview()} to restart the preview.
- *
- * @param shutter callback after the image is captured, may be null
- * @param raw callback with raw image data, may be null
- * @param jpeg callback with jpeg image data, may be null
+ * @see #takePicture(ShutterCallback, PictureCallback, PictureCallback, PictureCallback)
*/
public final void takePicture(ShutterCallback shutter, PictureCallback raw,
PictureCallback jpeg) {
@@ -563,15 +706,18 @@
* application does not need a particular callback, a null can be passed
* instead of a callback method.
*
- * This method is only valid after {@link #startPreview()} has been called.
- * This method will stop the preview. Applications should not call {@link
- * #stopPreview()} before this. After jpeg callback is received,
- * applications can call {@link #startPreview()} to restart the preview.
+ * <p>This method is only valid when preview is active (after
+ * {@link #startPreview()}). Preview will be stopped after the image is
+ * taken; callers must call {@link #startPreview()} again if they want to
+ * re-start preview or take more pictures.
*
- * @param shutter callback after the image is captured, may be null
- * @param raw callback with raw image data, may be null
+ * <p>After calling this method, you must not call {@link #startPreview()}
+ * or take another picture until the JPEG callback has returned.
+ *
+ * @param shutter the callback for image capture moment, or null
+ * @param raw the callback for raw (uncompressed) image data, or null
* @param postview callback with postview image data, may be null
- * @param jpeg callback with jpeg image data, may be null
+ * @param jpeg the callback for JPEG image data, or null
*/
public final void takePicture(ShutterCallback shutter, PictureCallback raw,
PictureCallback postview, PictureCallback jpeg) {
@@ -583,26 +729,29 @@
}
/**
- * Zooms to the requested value smoothly. Driver will notify {@link
+ * Zooms to the requested value smoothly. The driver will notify {@link
* OnZoomChangeListener} of the zoom value and whether zoom is stopped at
* the time. For example, suppose the current zoom is 0 and startSmoothZoom
- * is called with value 3. Method onZoomChange will be called three times
- * with zoom value 1, 2, and 3. The applications can call {@link
- * #stopSmoothZoom} to stop the zoom earlier. The applications should not
- * call startSmoothZoom again or change the zoom value before zoom stops. If
- * the passing zoom value equals to the current zoom value, no zoom callback
- * will be generated. This method is supported if {@link
- * android.hardware.Camera.Parameters#isSmoothZoomSupported} is true.
+ * is called with value 3. The
+ * {@link Camera.OnZoomChangeListener#onZoomChange(int, boolean, Camera)}
+ * method will be called three times with zoom values 1, 2, and 3.
+ * Applications can call {@link #stopSmoothZoom} to stop the zoom earlier.
+ * Applications should not call startSmoothZoom again or change the zoom
+ * value before zoom stops. If the supplied zoom value equals to the current
+ * zoom value, no zoom callback will be generated. This method is supported
+ * if {@link android.hardware.Camera.Parameters#isSmoothZoomSupported}
+ * returns true.
*
* @param value zoom value. The valid range is 0 to {@link
* android.hardware.Camera.Parameters#getMaxZoom}.
* @throws IllegalArgumentException if the zoom value is invalid.
* @throws RuntimeException if the method fails.
+ * @see #setZoomChangeListener(OnZoomChangeListener)
*/
public native final void startSmoothZoom(int value);
/**
- * Stops the smooth zoom. The applications should wait for the {@link
+ * Stops the smooth zoom. Applications should wait for the {@link
* OnZoomChangeListener} to know when the zoom is actually stopped. This
* method is supported if {@link
* android.hardware.Camera.Parameters#isSmoothZoomSupported} is true.
@@ -649,20 +798,21 @@
public native final void setDisplayOrientation(int degrees);
/**
- * Interface for a callback to be invoked when zoom value changes.
+ * Callback interface for zoom changes during a smooth zoom operation.
+ *
+ * @see #setZoomChangeListener(OnZoomChangeListener)
+ * @see #startSmoothZoom(int)
*/
public interface OnZoomChangeListener
{
/**
- * Called when the zoom value has changed.
+ * Called when the zoom value has changed during a smooth zoom.
*
* @param zoomValue the current zoom value. In smooth zoom mode, camera
* calls this for every new zoom value.
* @param stopped whether smooth zoom is stopped. If the value is true,
* this is the last zoom update for the application.
- *
* @param camera the Camera service object
- * @see #startSmoothZoom(int)
*/
void onZoomChange(int zoomValue, boolean stopped, Camera camera);
};
@@ -679,15 +829,25 @@
mZoomListener = listener;
}
- // These match the enum in include/ui/Camera.h
- /** Unspecified camerar error. @see #ErrorCallback */
+ // Error codes match the enum in include/ui/Camera.h
+
+ /**
+ * Unspecified camera error.
+ * @see Camera.ErrorCallback
+ */
public static final int CAMERA_ERROR_UNKNOWN = 1;
- /** Media server died. In this case, the application must release the
- * Camera object and instantiate a new one. @see #ErrorCallback */
+
+ /**
+ * Media server died. In this case, the application must release the
+ * Camera object and instantiate a new one.
+ * @see Camera.ErrorCallback
+ */
public static final int CAMERA_ERROR_SERVER_DIED = 100;
/**
- * Handles the camera error callback.
+ * Callback interface for camera error notification.
+ *
+ * @see #setErrorCallback(ErrorCallback)
*/
public interface ErrorCallback
{
@@ -705,7 +865,7 @@
/**
* Registers a callback to be invoked when an error occurs.
- * @param cb the callback to run
+ * @param cb The callback to run
*/
public final void setErrorCallback(ErrorCallback cb)
{
@@ -716,16 +876,21 @@
private native final String native_getParameters();
/**
- * Sets the Parameters for pictures from this Camera service.
+ * Changes the settings for this Camera service.
*
* @param params the Parameters to use for this Camera service
+ * @see #getParameters()
*/
public void setParameters(Parameters params) {
native_setParameters(params.flatten());
}
/**
- * Returns the picture Parameters for this Camera service.
+ * Returns the current settings for this Camera service.
+ * If modifications are made to the returned Parameters, they must be passed
+ * to {@link #setParameters(Camera.Parameters)} to take effect.
+ *
+ * @see #setParameters(Camera.Parameters)
*/
public Parameters getParameters() {
Parameters p = new Parameters();
@@ -735,7 +900,7 @@
}
/**
- * Handles the picture size (dimensions).
+ * Image size (width and height dimensions).
*/
public class Size {
/**
@@ -774,18 +939,21 @@
};
/**
- * Handles the parameters for pictures created by a Camera service.
+ * Camera service settings.
*
* <p>To make camera parameters take effect, applications have to call
- * Camera.setParameters. For example, after setWhiteBalance is called, white
- * balance is not changed until Camera.setParameters() is called.
+ * {@link Camera#setParameters(Camera.Parameters)}. For example, after
+ * {@link Camera.Parameters#setWhiteBalance} is called, white balance is not
+ * actually changed until {@link Camera#setParameters(Camera.Parameters)}
+ * is called with the changed parameters object.
*
* <p>Different devices may have different camera capabilities, such as
* picture size or flash modes. The application should query the camera
* capabilities before setting parameters. For example, the application
- * should call getSupportedColorEffects before calling setEffect. If the
- * camera does not support color effects, getSupportedColorEffects will
- * return null.
+ * should call {@link Camera.Parameters#getSupportedColorEffects()} before
+ * calling {@link Camera.Parameters#setColorEffect(String)}. If the
+ * camera does not support color effects,
+ * {@link Camera.Parameters#getSupportedColorEffects()} will return null.
*/
public class Parameters {
// Parameter keys to communicate with the camera driver.
diff --git a/core/java/android/os/BatteryStats.java b/core/java/android/os/BatteryStats.java
index e3f3b87..0afd6cd 100644
--- a/core/java/android/os/BatteryStats.java
+++ b/core/java/android/os/BatteryStats.java
@@ -1237,10 +1237,9 @@
linePrefix);
if (!linePrefix.equals(": ")) {
sb.append(" realtime");
- } else {
- sb.append(": (nothing executed)");
+ // Only print out wake locks that were held
+ pw.println(sb.toString());
}
- pw.println(sb.toString());
}
}
}
@@ -1453,11 +1452,10 @@
"window", which, linePrefix);
if (!linePrefix.equals(": ")) {
sb.append(" realtime");
- } else {
- sb.append(": (nothing executed)");
+ // Only print out wake locks that were held
+ pw.println(sb.toString());
+ uidActivity = true;
}
- pw.println(sb.toString());
- uidActivity = true;
}
}
diff --git a/core/java/android/os/DropBoxManager.java b/core/java/android/os/DropBoxManager.java
index 4df33e0..23fdb0b 100644
--- a/core/java/android/os/DropBoxManager.java
+++ b/core/java/android/os/DropBoxManager.java
@@ -53,6 +53,9 @@
/** Flag value: Content can be decompressed with {@link java.util.zip.GZIPOutputStream}. */
public static final int IS_GZIPPED = 4;
+ /** Flag value for serialization only: Value is a byte array, not a file descriptor */
+ private static final int HAS_BYTE_ARRAY = 8;
+
/**
* A single entry retrieved from the drop box.
* This may include a reference to a stream, so you must call
@@ -68,12 +71,25 @@
/** Create a new empty Entry with no contents. */
public Entry(String tag, long millis) {
- this(tag, millis, (Object) null, IS_EMPTY);
+ if (tag == null) throw new NullPointerException("tag == null");
+
+ mTag = tag;
+ mTimeMillis = millis;
+ mData = null;
+ mFileDescriptor = null;
+ mFlags = IS_EMPTY;
}
/** Create a new Entry with plain text contents. */
public Entry(String tag, long millis, String text) {
- this(tag, millis, (Object) text.getBytes(), IS_TEXT);
+ if (tag == null) throw new NullPointerException("tag == null");
+ if (text == null) throw new NullPointerException("text == null");
+
+ mTag = tag;
+ mTimeMillis = millis;
+ mData = text.getBytes();
+ mFileDescriptor = null;
+ mFlags = IS_TEXT;
}
/**
@@ -81,7 +97,16 @@
* The data array must not be modified after creating this entry.
*/
public Entry(String tag, long millis, byte[] data, int flags) {
- this(tag, millis, (Object) data, flags);
+ if (tag == null) throw new NullPointerException("tag == null");
+ if (((flags & IS_EMPTY) != 0) != (data == null)) {
+ throw new IllegalArgumentException("Bad flags: " + flags);
+ }
+
+ mTag = tag;
+ mTimeMillis = millis;
+ mData = data;
+ mFileDescriptor = null;
+ mFlags = flags;
}
/**
@@ -89,7 +114,16 @@
* Takes ownership of the ParcelFileDescriptor.
*/
public Entry(String tag, long millis, ParcelFileDescriptor data, int flags) {
- this(tag, millis, (Object) data, flags);
+ if (tag == null) throw new NullPointerException("tag == null");
+ if (((flags & IS_EMPTY) != 0) != (data == null)) {
+ throw new IllegalArgumentException("Bad flags: " + flags);
+ }
+
+ mTag = tag;
+ mTimeMillis = millis;
+ mData = null;
+ mFileDescriptor = data;
+ mFlags = flags;
}
/**
@@ -97,31 +131,14 @@
* The file will be read when the entry's contents are requested.
*/
public Entry(String tag, long millis, File data, int flags) throws IOException {
- this(tag, millis, (Object) ParcelFileDescriptor.open(
- data, ParcelFileDescriptor.MODE_READ_ONLY), flags);
- }
-
- /** Internal constructor for CREATOR.createFromParcel(). */
- private Entry(String tag, long millis, Object value, int flags) {
- if (tag == null) throw new NullPointerException();
- if (((flags & IS_EMPTY) != 0) != (value == null)) throw new IllegalArgumentException();
+ if (tag == null) throw new NullPointerException("tag == null");
+ if ((flags & IS_EMPTY) != 0) throw new IllegalArgumentException("Bad flags: " + flags);
mTag = tag;
mTimeMillis = millis;
+ mData = null;
+ mFileDescriptor = ParcelFileDescriptor.open(data, ParcelFileDescriptor.MODE_READ_ONLY);
mFlags = flags;
-
- if (value == null) {
- mData = null;
- mFileDescriptor = null;
- } else if (value instanceof byte[]) {
- mData = (byte[]) value;
- mFileDescriptor = null;
- } else if (value instanceof ParcelFileDescriptor) {
- mData = null;
- mFileDescriptor = (ParcelFileDescriptor) value;
- } else {
- throw new IllegalArgumentException();
- }
}
/** Close the input stream associated with this entry. */
@@ -149,6 +166,7 @@
InputStream is = null;
try {
is = getInputStream();
+ if (is == null) return null;
byte[] buf = new byte[maxBytes];
return new String(buf, 0, Math.max(0, is.read(buf)));
} catch (IOException e) {
@@ -174,8 +192,14 @@
public static final Parcelable.Creator<Entry> CREATOR = new Parcelable.Creator() {
public Entry[] newArray(int size) { return new Entry[size]; }
public Entry createFromParcel(Parcel in) {
- return new Entry(
- in.readString(), in.readLong(), in.readValue(null), in.readInt());
+ String tag = in.readString();
+ long millis = in.readLong();
+ int flags = in.readInt();
+ if ((flags & HAS_BYTE_ARRAY) != 0) {
+ return new Entry(tag, millis, in.createByteArray(), flags & ~HAS_BYTE_ARRAY);
+ } else {
+ return new Entry(tag, millis, in.readFileDescriptor(), flags);
+ }
}
};
@@ -187,11 +211,12 @@
out.writeString(mTag);
out.writeLong(mTimeMillis);
if (mFileDescriptor != null) {
- out.writeValue(mFileDescriptor);
+ out.writeInt(mFlags & ~HAS_BYTE_ARRAY); // Clear bit just to be safe
+ mFileDescriptor.writeToParcel(out, flags);
} else {
- out.writeValue(mData);
+ out.writeInt(mFlags | HAS_BYTE_ARRAY);
+ out.writeByteArray(mData);
}
- out.writeInt(mFlags);
}
}
@@ -225,7 +250,7 @@
* @param flags describing the data
*/
public void addData(String tag, byte[] data, int flags) {
- if (data == null) throw new NullPointerException();
+ if (data == null) throw new NullPointerException("data == null");
try { mService.add(new Entry(tag, 0, data, flags)); } catch (RemoteException e) {}
}
@@ -239,7 +264,7 @@
* @throws IOException if the file can't be opened
*/
public void addFile(String tag, File file, int flags) throws IOException {
- if (file == null) throw new NullPointerException();
+ if (file == null) throw new NullPointerException("file == null");
Entry entry = new Entry(tag, 0, file, flags);
try {
mService.add(entry);
diff --git a/core/java/android/os/Parcel.java b/core/java/android/os/Parcel.java
index b9c9565..31f8719 100644
--- a/core/java/android/os/Parcel.java
+++ b/core/java/android/os/Parcel.java
@@ -440,6 +440,12 @@
/**
* Write a FileDescriptor into the parcel at the current dataPosition(),
* growing dataCapacity() if needed.
+ *
+ * <p class="caution">The file descriptor will not be closed, which may
+ * result in file descriptor leaks when objects are returned from Binder
+ * calls. Use {@link ParcelFileDescriptor#writeToParcel} instead, which
+ * accepts contextual flags and will close the original file descriptor
+ * if {@link Parcelable#PARCELABLE_WRITE_RETURN_VALUE} is set.</p>
*/
public final native void writeFileDescriptor(FileDescriptor val);
@@ -1003,7 +1009,7 @@
/**
* Flatten a generic object in to a parcel. The given Object value may
* currently be one of the following types:
- *
+ *
* <ul>
* <li> null
* <li> String
@@ -1026,7 +1032,7 @@
* <li> Parcelable[]
* <li> CharSequence (as supported by {@link TextUtils#writeToParcel}).
* <li> List (as supported by {@link #writeList}).
- * <li> {@link SparseArray} (as supported by {@link #writeSparseArray}).
+ * <li> {@link SparseArray} (as supported by {@link #writeSparseArray(SparseArray)}).
* <li> {@link IBinder}
* <li> Any object that implements Serializable (but see
* {@link #writeSerializable} for caveats). Note that all of the
@@ -1035,6 +1041,13 @@
* approach is much less efficient and should be avoided whenever
* possible.
* </ul>
+ *
+ * <p class="caution">{@link Parcelable} objects are written with
+ * {@link Parcelable#writeToParcel} using contextual flags of 0. When
+ * serializing objects containing {@link ParcelFileDescriptor}s,
+ * this may result in file descriptor leaks when they are returned from
+ * Binder calls (where {@link Parcelable#PARCELABLE_WRITE_RETURN_VALUE}
+ * should be used).</p>
*/
public final void writeValue(Object v) {
if (v == null) {
@@ -1123,7 +1136,7 @@
/**
* Flatten the name of the class of the Parcelable and its contents
* into the parcel.
- *
+ *
* @param p The Parcelable object to be written.
* @param parcelableFlags Contextual flags as per
* {@link Parcelable#writeToParcel(Parcel, int) Parcelable.writeToParcel()}.
diff --git a/core/java/android/os/ParcelFileDescriptor.java b/core/java/android/os/ParcelFileDescriptor.java
index d26f066..9d213b3 100644
--- a/core/java/android/os/ParcelFileDescriptor.java
+++ b/core/java/android/os/ParcelFileDescriptor.java
@@ -250,6 +250,11 @@
return Parcelable.CONTENTS_FILE_DESCRIPTOR;
}
+ /**
+ * {@inheritDoc}
+ * If {@link Parcelable#PARCELABLE_WRITE_RETURN_VALUE} is set in flags,
+ * the file descriptor will be closed after a copy is written to the Parcel.
+ */
public void writeToParcel(Parcel out, int flags) {
out.writeFileDescriptor(mFileDescriptor);
if ((flags&PARCELABLE_WRITE_RETURN_VALUE) != 0 && !mClosed) {
diff --git a/core/java/com/google/android/mms/pdu/PduParser.java b/core/java/com/google/android/mms/pdu/PduParser.java
index 1cd118b..21f0c93 100644
--- a/core/java/com/google/android/mms/pdu/PduParser.java
+++ b/core/java/com/google/android/mms/pdu/PduParser.java
@@ -157,9 +157,11 @@
}
String ctTypeStr = new String(contentType);
if (ctTypeStr.equals(ContentType.MULTIPART_MIXED)
- || ctTypeStr.equals(ContentType.MULTIPART_RELATED)) {
+ || ctTypeStr.equals(ContentType.MULTIPART_RELATED)
+ || ctTypeStr.equals(ContentType.MULTIPART_ALTERNATIVE)) {
// The MMS content type must be "application/vnd.wap.multipart.mixed"
// or "application/vnd.wap.multipart.related"
+ // or "application/vnd.wap.multipart.alternative"
return retrieveConf;
}
return null;
diff --git a/core/java/com/google/android/mms/pdu/PduPersister.java b/core/java/com/google/android/mms/pdu/PduPersister.java
index d4ac24a..9fdd204 100644
--- a/core/java/com/google/android/mms/pdu/PduPersister.java
+++ b/core/java/com/google/android/mms/pdu/PduPersister.java
@@ -422,7 +422,8 @@
// Store simple string values directly in the database instead of an
// external file. This makes the text searchable and retrieval slightly
// faster.
- if ("text/plain".equals(type) || "application/smil".equals(type)) {
+ if (ContentType.TEXT_PLAIN.equals(type) || ContentType.APP_SMIL.equals(type)
+ || ContentType.TEXT_HTML.equals(type)) {
String text = c.getString(PART_COLUMN_TEXT);
byte [] blob = new EncodedStringValue(text != null ? text : "")
.getTextString();
@@ -736,7 +737,9 @@
try {
byte[] data = part.getData();
- if ("text/plain".equals(contentType) || "application/smil".equals(contentType)) {
+ if (ContentType.TEXT_PLAIN.equals(contentType)
+ || ContentType.APP_SMIL.equals(contentType)
+ || ContentType.TEXT_HTML.equals(contentType)) {
ContentValues cv = new ContentValues();
cv.put(Telephony.Mms.Part.TEXT, new EncodedStringValue(data).getString());
if (mContentResolver.update(uri, cv, null, null) != 1) {
diff --git a/core/jni/android_media_AudioTrack.cpp b/core/jni/android_media_AudioTrack.cpp
index ce43e73..c559670 100644
--- a/core/jni/android_media_AudioTrack.cpp
+++ b/core/jni/android_media_AudioTrack.cpp
@@ -360,6 +360,7 @@
if (lpTrack == NULL ) {
jniThrowException(env, "java/lang/IllegalStateException",
"Unable to retrieve AudioTrack pointer for start()");
+ return;
}
lpTrack->start();
@@ -375,6 +376,7 @@
if (lpTrack == NULL ) {
jniThrowException(env, "java/lang/IllegalStateException",
"Unable to retrieve AudioTrack pointer for stop()");
+ return;
}
lpTrack->stop();
@@ -390,6 +392,7 @@
if (lpTrack == NULL ) {
jniThrowException(env, "java/lang/IllegalStateException",
"Unable to retrieve AudioTrack pointer for pause()");
+ return;
}
lpTrack->pause();
@@ -405,6 +408,7 @@
if (lpTrack == NULL ) {
jniThrowException(env, "java/lang/IllegalStateException",
"Unable to retrieve AudioTrack pointer for flush()");
+ return;
}
lpTrack->flush();
@@ -419,6 +423,7 @@
if (lpTrack == NULL ) {
jniThrowException(env, "java/lang/IllegalStateException",
"Unable to retrieve AudioTrack pointer for setVolume()");
+ return;
}
lpTrack->setVolume(leftVol, rightVol);
@@ -515,6 +520,7 @@
if (lpTrack == NULL) {
jniThrowException(env, "java/lang/IllegalStateException",
"Unable to retrieve AudioTrack pointer for write()");
+ return 0;
}
// get the pointer for the audio data from the java array
@@ -801,6 +807,36 @@
return minBuffSize;
}
+// ----------------------------------------------------------------------------
+static void
+android_media_AudioTrack_setAuxEffectSendLevel(JNIEnv *env, jobject thiz, jfloat level )
+{
+ AudioTrack *lpTrack = (AudioTrack *)env->GetIntField(
+ thiz, javaAudioTrackFields.nativeTrackInJavaObj);
+ if (lpTrack == NULL ) {
+ jniThrowException(env, "java/lang/IllegalStateException",
+ "Unable to retrieve AudioTrack pointer for setAuxEffectSendLevel()");
+ return;
+ }
+
+ lpTrack->setAuxEffectSendLevel(level);
+}
+
+// ----------------------------------------------------------------------------
+static jint android_media_AudioTrack_attachAuxEffect(JNIEnv *env, jobject thiz,
+ jint effectId) {
+
+ AudioTrack *lpTrack = (AudioTrack *)env->GetIntField(
+ thiz, javaAudioTrackFields.nativeTrackInJavaObj);
+
+ if (lpTrack) {
+ return android_media_translateErrorCode( lpTrack->attachAuxEffect(effectId) );
+ } else {
+ jniThrowException(env, "java/lang/IllegalStateException",
+ "Unable to retrieve AudioTrack pointer for attachAuxEffect()");
+ return AUDIOTRACK_ERROR;
+ }
+}
// ----------------------------------------------------------------------------
// ----------------------------------------------------------------------------
@@ -837,6 +873,10 @@
"(I)I", (void *)android_media_AudioTrack_get_output_sample_rate},
{"native_get_min_buff_size",
"(III)I", (void *)android_media_AudioTrack_get_min_buff_size},
+ {"native_setAuxEffectSendLevel",
+ "(F)V", (void *)android_media_AudioTrack_setAuxEffectSendLevel},
+ {"native_attachAuxEffect",
+ "(I)I", (void *)android_media_AudioTrack_attachAuxEffect},
};
diff --git a/docs/html/guide/topics/resources/drawable-resource.jd b/docs/html/guide/topics/resources/drawable-resource.jd
index 1e4cca7..1c3cc4d 100644
--- a/docs/html/guide/topics/resources/drawable-resource.jd
+++ b/docs/html/guide/topics/resources/drawable-resource.jd
@@ -12,9 +12,10 @@
</div>
</div>
-<p>A drawable resource is a general concept for a graphic that you
-can retrieve with {@link android.content.res.Resources#getDrawable(int)}
-and draw on the screen. There are several different types of drawables:</p>
+<p>A drawable resource is a general concept for a graphic that can be drawn to the screen and which
+you can retrieve with APIs such as {@link android.content.res.Resources#getDrawable(int)} or apply
+to another XML resource with attributes such as {@code android:drawable} and {@code android:icon}.
+There are several different types of drawables:</p>
<dl>
<dt><a href="#Bitmap">Bitmap File</a><dt>
<dd>A bitmap graphic file ({@code .png}, {@code .jpg}, or {@code .gif}).
@@ -51,6 +52,12 @@
<p>Also see the <a href="animation-resource.html">Animation Resource</a> document for how to
create an {@link android.graphics.drawable.AnimationDrawable}.</p>
+<p class="note"><strong>Note:</strong> A <a
+href="{@docRoot}guide/topics/resources/more-resources.html#Color">color resource</a> can also be
+used as a drawable in XML. For example, when creating a <a href="#StateList">state list
+drawable</a>, you can reference a color resource for the {@code android:drawable} attribute ({@code
+android:drawable="@color/green"}).</p>
+
diff --git a/docs/html/guide/topics/resources/more-resources.jd b/docs/html/guide/topics/resources/more-resources.jd
index 22abbb2..1f03446 100644
--- a/docs/html/guide/topics/resources/more-resources.jd
+++ b/docs/html/guide/topics/resources/more-resources.jd
@@ -114,8 +114,9 @@
<h2 id="Color">Color</h2>
<p>A color value defined in XML.
-The color is specified with an RGB value and alpha channel. You can use color resource
-any place that accepts a hexadecimal color value.</p>
+The color is specified with an RGB value and alpha channel. You can use a color resource
+any place that accepts a hexadecimal color value. You can also use a color resource when a
+drawable resource is expected in XML (for example, {@code android:drawable="@color/green"}).</p>
<p>The value always begins with a pound (#) character and then followed by the
Alpha-Red-Green-Blue information in one of the following formats:</p>
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 194f23a..9fd905f 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -168,6 +168,15 @@
TX_DISABLE = 0
};
+ // special audio session values
+ enum audio_sessions {
+ SESSION_OUTPUT_STAGE = -1, // session for effects attached to a particular output stream
+ // (value must be less than 0)
+ SESSION_OUTPUT_MIX = 0, // session for effects applied to output mix. These effects can
+ // be moved by audio policy manager to another output stream
+ // (value must be 0)
+ };
+
/* These are static methods to control the system-wide AudioFlinger
* only privileged processes can have access to them
*/
@@ -353,8 +362,12 @@
uint32_t format = FORMAT_DEFAULT,
uint32_t channels = CHANNEL_OUT_STEREO,
output_flags flags = OUTPUT_FLAG_INDIRECT);
- static status_t startOutput(audio_io_handle_t output, AudioSystem::stream_type stream);
- static status_t stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream);
+ static status_t startOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session = 0);
+ static status_t stopOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session = 0);
static void releaseOutput(audio_io_handle_t output);
static audio_io_handle_t getInput(int inputSource,
uint32_t samplingRate = 0,
@@ -370,6 +383,16 @@
static status_t setStreamVolumeIndex(stream_type stream, int index);
static status_t getStreamVolumeIndex(stream_type stream, int *index);
+ static uint32_t getStrategyForStream(stream_type stream);
+
+ static audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc);
+ static status_t registerEffect(effect_descriptor_t *desc,
+ audio_io_handle_t output,
+ uint32_t strategy,
+ int session,
+ int id);
+ static status_t unregisterEffect(int id);
+
static const sp<IAudioPolicyService>& get_audio_policy_service();
// ----------------------------------------------------------------------------
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index ef537f4..4475d4a 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -261,8 +261,8 @@
/* set the send level for this track. An auxiliary effect should be attached
* to the track with attachEffect(). Level must be <= 1.0.
*/
- status_t setSendLevel(float level);
- void getSendLevel(float* level);
+ status_t setAuxEffectSendLevel(float level);
+ void getAuxEffectSendLevel(float* level);
/* set sample rate for this track, mostly used for games' sound effects
*/
@@ -479,6 +479,7 @@
uint32_t mUpdatePeriod;
uint32_t mFlags;
int mSessionId;
+ int mAuxEffectId;
};
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 5814fd6..70e505e 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -161,6 +161,8 @@
status_t *status,
int *id,
int *enabled) = 0;
+
+ virtual status_t moveEffects(int session, int srcOutput, int dstOutput) = 0;
};
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index 4804bbd..49eee59 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -53,8 +53,12 @@
uint32_t format = AudioSystem::FORMAT_DEFAULT,
uint32_t channels = 0,
AudioSystem::output_flags flags = AudioSystem::OUTPUT_FLAG_INDIRECT) = 0;
- virtual status_t startOutput(audio_io_handle_t output, AudioSystem::stream_type stream) = 0;
- virtual status_t stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream) = 0;
+ virtual status_t startOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session = 0) = 0;
+ virtual status_t stopOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session = 0) = 0;
virtual void releaseOutput(audio_io_handle_t output) = 0;
virtual audio_io_handle_t getInput(int inputSource,
uint32_t samplingRate = 0,
@@ -69,6 +73,14 @@
int indexMax) = 0;
virtual status_t setStreamVolumeIndex(AudioSystem::stream_type stream, int index) = 0;
virtual status_t getStreamVolumeIndex(AudioSystem::stream_type stream, int *index) = 0;
+ virtual uint32_t getStrategyForStream(AudioSystem::stream_type stream) = 0;
+ virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc) = 0;
+ virtual status_t registerEffect(effect_descriptor_t *desc,
+ audio_io_handle_t output,
+ uint32_t strategy,
+ int session,
+ int id) = 0;
+ virtual status_t unregisterEffect(int id) = 0;
};
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
index 2619691..af9a7ed 100644
--- a/include/media/IMediaPlayer.h
+++ b/include/media/IMediaPlayer.h
@@ -48,6 +48,8 @@
virtual status_t setVolume(float leftVolume, float rightVolume) = 0;
virtual status_t suspend() = 0;
virtual status_t resume() = 0;
+ virtual status_t setAuxEffectSendLevel(float level) = 0;
+ virtual status_t attachAuxEffect(int effectId) = 0;
// Invoke a generic method on the player by using opaque parcels
// for the request and reply.
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 4963f73..207191d 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -173,6 +173,8 @@
status_t resume();
status_t setAudioSessionId(int sessionId);
int getAudioSessionId();
+ status_t setAuxEffectSendLevel(float level);
+ status_t attachAuxEffect(int effectId);
private:
void clear_l();
status_t seekTo_l(int msec);
@@ -200,6 +202,7 @@
int mVideoWidth;
int mVideoHeight;
int mAudioSessionId;
+ float mSendLevel;
};
}; // namespace android
diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h
index 9af5871..9a09586 100644
--- a/include/media/stagefright/AudioPlayer.h
+++ b/include/media/stagefright/AudioPlayer.h
@@ -86,6 +86,10 @@
bool mStarted;
+ bool mIsFirstBuffer;
+ status_t mFirstBufferResult;
+ MediaBuffer *mFirstBuffer;
+
sp<MediaPlayerBase::AudioSink> mAudioSink;
static void AudioCallback(int event, void *user, void *info);
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index 232583a..d0c2dca 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -97,6 +97,7 @@
inline size_t write(const void *ptr, size_t size, size_t nmemb, FILE* stream);
bool exceedsFileSizeLimit();
bool exceedsFileDurationLimit();
+ void trackProgressStatus(const Track* track, int64_t timeUs, status_t err = OK);
MPEG4Writer(const MPEG4Writer &);
MPEG4Writer &operator=(const MPEG4Writer &);
diff --git a/include/media/stagefright/MediaSource.h b/include/media/stagefright/MediaSource.h
index 9cc94c8..a31395e 100644
--- a/include/media/stagefright/MediaSource.h
+++ b/include/media/stagefright/MediaSource.h
@@ -62,14 +62,21 @@
// a) not request a seek
// b) not be late, i.e. lateness_us = 0
struct ReadOptions {
+ enum SeekMode {
+ SEEK_PREVIOUS_SYNC,
+ SEEK_NEXT_SYNC,
+ SEEK_CLOSEST_SYNC,
+ SEEK_CLOSEST,
+ };
+
ReadOptions();
// Reset everything back to defaults.
void reset();
- void setSeekTo(int64_t time_us);
+ void setSeekTo(int64_t time_us, SeekMode mode = SEEK_CLOSEST_SYNC);
void clearSeekTo();
- bool getSeekTo(int64_t *time_us) const;
+ bool getSeekTo(int64_t *time_us, SeekMode *mode) const;
void setLateBy(int64_t lateness_us);
int64_t getLateBy() const;
@@ -81,6 +88,7 @@
uint32_t mOptions;
int64_t mSeekTimeUs;
+ SeekMode mSeekMode;
int64_t mLatenessUs;
};
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index cdbf483..e631c7f 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -46,6 +46,7 @@
kKeyIsSyncFrame = 'sync', // int32_t (bool)
kKeyIsCodecConfig = 'conf', // int32_t (bool)
kKeyTime = 'time', // int64_t (usecs)
+ kKeyTargetTime = 'tarT', // int64_t (usecs)
kKeyDuration = 'dura', // int64_t (usecs)
kKeyColorFormat = 'colf',
kKeyPlatformPrivate = 'priv', // pointer
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 214f43a..79e7a2f 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -141,6 +141,8 @@
bool mNoMoreOutputData;
bool mOutputPortSettingsHaveChanged;
int64_t mSeekTimeUs;
+ ReadOptions::SeekMode mSeekMode;
+ int64_t mTargetTimeUs;
MediaBuffer *mLeftOverBuffer;
diff --git a/media/java/android/media/AudioTrack.java b/media/java/android/media/AudioTrack.java
index 079c41f..6360541 100644
--- a/media/java/android/media/AudioTrack.java
+++ b/media/java/android/media/AudioTrack.java
@@ -963,6 +963,65 @@
return native_reload_static();
}
+ //--------------------------------------------------------------------------
+ // Audio effects management
+ //--------------------
+
+ /**
+ * Attaches an auxiliary effect to the audio track. A typical auxiliary effect is a
+ * reverberation effect which can be applied on any sound source that directs a certain
+ * amount of its energy to this effect. This amount is defined by setAuxEffectSendLevel().
+ * {@see #setAuxEffectSendLevel(float)}.
+ // TODO when AudioEffect are unhidden
+ * <p>After creating an auxiliary effect (e.g. {_at_link android.media.EnvironmentalReverb}),
+ * retrieve its ID with {_at_link android.media.AudioEffect#getId()} and use it when calling
+ * this method to attach the audio track to the effect.
+ * <p>To detach the effect from the audio track, call this method with a null effect id.
+ *
+ * @param effectId system wide unique id of the effect to attach
+ * @return error code or success, see {@link #SUCCESS},
+ * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
+ // FIXME: unhide.
+ * @hide
+ */
+ public int attachAuxEffect(int effectId) {
+ if (mState != STATE_INITIALIZED) {
+ return ERROR_INVALID_OPERATION;
+ }
+ return native_attachAuxEffect(effectId);
+ }
+
+ /**
+ * Sets the send level of the audio track to the attached auxiliary effect
+ * {@see #attachAuxEffect(int)}. The level value range is 0 to 1.0.
+ * <p>By default the send level is 0, so even if an effect is attached to the player
+ * this method must be called for the effect to be applied.
+ * <p>Note that the passed level value is a raw scalar. UI controls should be scaled
+ * logarithmically: the gain applied by audio framework ranges from -72dB to 0dB,
+ * so an appropriate conversion from linear UI input x to level is:
+ * x == 0 -> level = 0
+ * 0 < x <= R -> level = 10^(72*(x-R)/20/R)
+ *
+ * @param level send level scalar
+ * @return error code or success, see {@link #SUCCESS},
+ * {@link #ERROR_INVALID_OPERATION}
+ // FIXME: unhide.
+ * @hide
+ */
+ public int setAuxEffectSendLevel(float level) {
+ if (mState != STATE_INITIALIZED) {
+ return ERROR_INVALID_OPERATION;
+ }
+ // clamp the level
+ if (level < getMinVolume()) {
+ level = getMinVolume();
+ }
+ if (level > getMaxVolume()) {
+ level = getMaxVolume();
+ }
+ native_setAuxEffectSendLevel(level);
+ return SUCCESS;
+ }
//---------------------------------------------------------
// Interface definitions
@@ -1123,6 +1182,9 @@
private native final int native_get_session_id();
+ private native final int native_attachAuxEffect(int effectId);
+ private native final void native_setAuxEffectSendLevel(float level);
+
//---------------------------------------------------------
// Utility methods
//------------------
diff --git a/media/java/android/media/MediaPlayer.java b/media/java/android/media/MediaPlayer.java
index 8caa07a..e1f95b2 100644
--- a/media/java/android/media/MediaPlayer.java
+++ b/media/java/android/media/MediaPlayer.java
@@ -423,7 +423,7 @@
* <td>Successful invoke of this method in a valid state transfers the
* object to the <em>Stopped</em> state. Calling this method in an
* invalid state transfers the object to the <em>Error</em> state.</p></td></tr>
- * <tr><td>setAudioSessionId </p></td>
+ * <tr><td>setAudioSessionId </p></td>
* <td>{Idle} </p></td>
* <td>{Initialized, Prepared, Started, Paused, Stopped, PlaybackCompleted,
* Error} </p></td>
@@ -434,6 +434,15 @@
* <td>{} </p></td>
* <td>This method can be called in any state and calling it does not change
* the object state. </p></td></tr>
+ * <tr><td>attachAuxEffect </p></td>
+ * <td>{Initialized, Prepared, Started, Paused, Stopped, PlaybackCompleted} </p></td>
+ * <td>{Idle, Error} </p></td>
+ * <td>This method must be called after setDataSource.
+ * Calling it does not change the object state. </p></td></tr>
+ * <tr><td>setAuxEffectSendLevel </p></td>
+ * <td>any</p></td>
+ * <td>{} </p></td>
+ * <td>Calling this method does not change the object state. </p></td></tr>
*
* </table>
*
@@ -1187,7 +1196,7 @@
* @throws IllegalStateException if it is called in an invalid state
*
// FIXME: unhide.
- // FIXME: link to AudioEffect class when public.
+ // TODO when AudioEffect is unhidden
* @hide
*/
public native void setAudioSessionId(int sessionId) throws IllegalArgumentException, IllegalStateException;
@@ -1203,6 +1212,41 @@
public native int getAudioSessionId();
/**
+ * Attaches an auxiliary effect to the player. A typical auxiliary effect is a reverberation
+ * effect which can be applied on any sound source that directs a certain amount of its
+ * energy to this effect. This amount is defined by setAuxEffectSendLevel().
+ * {@see #setAuxEffectSendLevel(float)}.
+ // TODO when AudioEffect is unhidden
+ * <p>After creating an auxiliary effect (e.g. {_at_link android.media.EnvironmentalReverb}),
+ * retrieve its ID with {_at_link android.media.AudioEffect#getId()} and use it when calling
+ * this method to attach the player to the effect.
+ * <p>To detach the effect from the player, call this method with a null effect id.
+ * <p>This method must be called after one of the overloaded <code> setDataSource </code>
+ * methods.
+ *
+ * @param effectId system wide unique id of the effect to attach
+ // FIXME: unhide.
+ * @hide
+ */
+ public native void attachAuxEffect(int effectId);
+
+ /**
+ * Sets the send level of the player to the attached auxiliary effect
+ * {@see #attachAuxEffect(int)}. The level value range is 0 to 1.0.
+ * <p>By default the send level is 0, so even if an effect is attached to the player
+ * this method must be called for the effect to be applied.
+ * <p>Note that the passed level value is a raw scalar. UI controls should be scaled
+ * logarithmically: the gain applied by audio framework ranges from -72dB to 0dB,
+ * so an appropriate conversion from linear UI input x to level is:
+ * x == 0 -> level = 0
+ * 0 < x <= R -> level = 10^(72*(x-R)/20/R)
+ * @param level send level scalar
+ // FIXME: unhide.
+ * @hide
+ */
+ public native void setAuxEffectSendLevel(float level);
+
+ /**
* @param request Parcel destinated to the media player. The
* Interface token must be set to the IMediaPlayer
* one to be routed correctly through the system.
diff --git a/media/jni/android_media_MediaPlayer.cpp b/media/jni/android_media_MediaPlayer.cpp
index aedb54a..474a174 100644
--- a/media/jni/android_media_MediaPlayer.cpp
+++ b/media/jni/android_media_MediaPlayer.cpp
@@ -714,6 +714,28 @@
return mp->getAudioSessionId();
}
+static void
+android_media_MediaPlayer_setAuxEffectSendLevel(JNIEnv *env, jobject thiz, jfloat level)
+{
+ LOGV("setAuxEffectSendLevel: level %f", level);
+ sp<MediaPlayer> mp = getMediaPlayer(env, thiz);
+ if (mp == NULL ) {
+ jniThrowException(env, "java/lang/IllegalStateException", NULL);
+ return;
+ }
+ process_media_player_call( env, thiz, mp->setAuxEffectSendLevel(level), NULL, NULL );
+}
+
+static void android_media_MediaPlayer_attachAuxEffect(JNIEnv *env, jobject thiz, jint effectId) {
+ LOGV("attachAuxEffect(): %d", sessionId);
+ sp<MediaPlayer> mp = getMediaPlayer(env, thiz);
+ if (mp == NULL ) {
+ jniThrowException(env, "java/lang/IllegalStateException", NULL);
+ return;
+ }
+ process_media_player_call( env, thiz, mp->attachAuxEffect(effectId), NULL, NULL );
+}
+
// ----------------------------------------------------------------------------
static JNINativeMethod gMethods[] = {
@@ -748,6 +770,8 @@
{"native_suspend_resume", "(Z)I", (void *)android_media_MediaPlayer_native_suspend_resume},
{"getAudioSessionId", "()I", (void *)android_media_MediaPlayer_get_audio_session_id},
{"setAudioSessionId", "(I)V", (void *)android_media_MediaPlayer_set_audio_session_id},
+ {"setAuxEffectSendLevel", "(F)V", (void *)android_media_MediaPlayer_setAuxEffectSendLevel},
+ {"attachAuxEffect", "(I)V", (void *)android_media_MediaPlayer_attachAuxEffect},
};
static const char* const kClassPathName = "android/media/MediaPlayer";
diff --git a/media/libeffects/factory/EffectsFactory.c b/media/libeffects/factory/EffectsFactory.c
index edd6184..0be280c 100644
--- a/media/libeffects/factory/EffectsFactory.c
+++ b/media/libeffects/factory/EffectsFactory.c
@@ -31,7 +31,7 @@
static list_elem_t *gCurEffect; // current effect in enumeration process
static uint32_t gCurEffectIdx; // current effect index in enumeration process
-static const char * const gEffectLibPath = "/system/lib/soundfx"; // path to built-in effect libraries
+const char * const gEffectLibPath = "/system/lib/soundfx"; // path to built-in effect libraries
static int gInitDone; // true is global initialization has been preformed
static int gNextLibId; // used by loadLibrary() to allocate unique library handles
static int gCanQueryEffect; // indicates that call to EffectQueryEffect() is valid, i.e. that the list of effects
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 3bbcf55..9e39e79 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -18,7 +18,7 @@
#define LOG_TAG "Bundle"
#define ARRAY_SIZE(array) (sizeof array / sizeof array[0])
#define LVM_BUNDLE // Include all the bundle code
-#define LOG_NDEBUG 0
+//#define LOG_NDEBUG 0
#include <cutils/log.h>
#include <assert.h>
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index c77f551..7e3b743 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -590,18 +590,22 @@
return output;
}
-status_t AudioSystem::startOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+status_t AudioSystem::startOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- return aps->startOutput(output, stream);
+ return aps->startOutput(output, stream, session);
}
-status_t AudioSystem::stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+status_t AudioSystem::stopOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- return aps->stopOutput(output, stream);
+ return aps->stopOutput(output, stream, session);
}
void AudioSystem::releaseOutput(audio_io_handle_t output)
@@ -666,6 +670,38 @@
return aps->getStreamVolumeIndex(stream, index);
}
+uint32_t AudioSystem::getStrategyForStream(AudioSystem::stream_type stream)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return 0;
+ return aps->getStrategyForStream(stream);
+}
+
+audio_io_handle_t AudioSystem::getOutputForEffect(effect_descriptor_t *desc)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->getOutputForEffect(desc);
+}
+
+status_t AudioSystem::registerEffect(effect_descriptor_t *desc,
+ audio_io_handle_t output,
+ uint32_t strategy,
+ int session,
+ int id)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->registerEffect(desc, output, strategy, session, id);
+}
+
+status_t AudioSystem::unregisterEffect(int id)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->unregisterEffect(id);
+}
+
// ---------------------------------------------------------------------------
void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who) {
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 0f2093a..890786e 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -209,6 +209,7 @@
mFrameCount = frameCount;
mNotificationFramesReq = notificationFrames;
mSessionId = sessionId;
+ mAuxEffectId = 0;
// create the IAudioTrack
status_t status = createTrack(streamType, sampleRate, format, channelCount,
@@ -458,8 +459,9 @@
}
}
-status_t AudioTrack::setSendLevel(float level)
+status_t AudioTrack::setAuxEffectSendLevel(float level)
{
+ LOGV("setAuxEffectSendLevel(%f)", level);
if (level > 1.0f) {
return BAD_VALUE;
}
@@ -471,7 +473,7 @@
return NO_ERROR;
}
-void AudioTrack::getSendLevel(float* level)
+void AudioTrack::getAuxEffectSendLevel(float* level)
{
if (level != NULL) {
*level = mSendLevel;
@@ -637,7 +639,12 @@
status_t AudioTrack::attachAuxEffect(int effectId)
{
- return mAudioTrack->attachAuxEffect(effectId);
+ LOGV("attachAuxEffect(%d)", effectId);
+ status_t status = mAudioTrack->attachAuxEffect(effectId);
+ if (status == NO_ERROR) {
+ mAuxEffectId = effectId;
+ }
+ return status;
}
// -------------------------------------------------------------------------
@@ -752,6 +759,7 @@
mCblk->volumeLR = (uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | uint16_t(mVolume[LEFT] * 0x1000);
mCblk->sendLevel = uint16_t(mSendLevel * 0x1000);
+ mAudioTrack->attachAuxEffect(mAuxEffectId);
mCblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
mCblk->waitTimeMs = 0;
mRemainingFrames = mNotificationFramesAct;
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 7d6a5d3..3a89e25 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -69,7 +69,8 @@
QUERY_NUM_EFFECTS,
QUERY_EFFECT,
GET_EFFECT_DESCRIPTOR,
- CREATE_EFFECT
+ CREATE_EFFECT,
+ MOVE_EFFECTS
};
class BpAudioFlinger : public BpInterface<IAudioFlinger>
@@ -676,6 +677,17 @@
return effect;
}
+
+ virtual status_t moveEffects(int session, int srcOutput, int dstOutput)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.writeInt32(session);
+ data.writeInt32(srcOutput);
+ data.writeInt32(dstOutput);
+ remote()->transact(MOVE_EFFECTS, data, &reply);
+ return reply.readInt32();
+ }
};
IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger");
@@ -1024,6 +1036,14 @@
reply->write(&desc, sizeof(effect_descriptor_t));
return NO_ERROR;
} break;
+ case MOVE_EFFECTS: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ int session = data.readInt32();
+ int srcOutput = data.readInt32();
+ int dstOutput = data.readInt32();
+ reply->writeInt32(moveEffects(session, srcOutput, dstOutput));
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 18dd173..950c213 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -44,7 +44,11 @@
RELEASE_INPUT,
INIT_STREAM_VOLUME,
SET_STREAM_VOLUME,
- GET_STREAM_VOLUME
+ GET_STREAM_VOLUME,
+ GET_STRATEGY_FOR_STREAM,
+ GET_OUTPUT_FOR_EFFECT,
+ REGISTER_EFFECT,
+ UNREGISTER_EFFECT
};
class BpAudioPolicyService : public BpInterface<IAudioPolicyService>
@@ -137,22 +141,28 @@
return static_cast <audio_io_handle_t> (reply.readInt32());
}
- virtual status_t startOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+ virtual status_t startOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.writeInt32(output);
data.writeInt32(stream);
+ data.writeInt32(session);
remote()->transact(START_OUTPUT, data, &reply);
return static_cast <status_t> (reply.readInt32());
}
- virtual status_t stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+ virtual status_t stopOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.writeInt32(output);
data.writeInt32(stream);
+ data.writeInt32(session);
remote()->transact(STOP_OUTPUT, data, &reply);
return static_cast <status_t> (reply.readInt32());
}
@@ -242,6 +252,51 @@
if (index) *index = lIndex;
return static_cast <status_t> (reply.readInt32());
}
+
+ virtual uint32_t getStrategyForStream(AudioSystem::stream_type stream)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(static_cast <uint32_t>(stream));
+ remote()->transact(GET_STRATEGY_FOR_STREAM, data, &reply);
+ return reply.readInt32();
+ }
+
+ virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(desc, sizeof(effect_descriptor_t));
+ remote()->transact(GET_OUTPUT_FOR_EFFECT, data, &reply);
+ return static_cast <audio_io_handle_t> (reply.readInt32());
+ }
+
+ virtual status_t registerEffect(effect_descriptor_t *desc,
+ audio_io_handle_t output,
+ uint32_t strategy,
+ int session,
+ int id)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(desc, sizeof(effect_descriptor_t));
+ data.writeInt32(output);
+ data.writeInt32(strategy);
+ data.writeInt32(session);
+ data.writeInt32(id);
+ remote()->transact(REGISTER_EFFECT, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
+ virtual status_t unregisterEffect(int id)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(id);
+ remote()->transact(UNREGISTER_EFFECT, data, &reply);
+ return static_cast <status_t> (reply.readInt32());
+ }
+
};
IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -255,18 +310,24 @@
switch(code) {
case SET_DEVICE_CONNECTION_STATE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::audio_devices device = static_cast <AudioSystem::audio_devices>(data.readInt32());
- AudioSystem::device_connection_state state = static_cast <AudioSystem::device_connection_state>(data.readInt32());
+ AudioSystem::audio_devices device =
+ static_cast <AudioSystem::audio_devices>(data.readInt32());
+ AudioSystem::device_connection_state state =
+ static_cast <AudioSystem::device_connection_state>(data.readInt32());
const char *device_address = data.readCString();
- reply->writeInt32(static_cast <uint32_t>(setDeviceConnectionState(device, state, device_address)));
+ reply->writeInt32(static_cast<uint32_t> (setDeviceConnectionState(device,
+ state,
+ device_address)));
return NO_ERROR;
} break;
case GET_DEVICE_CONNECTION_STATE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::audio_devices device = static_cast <AudioSystem::audio_devices>(data.readInt32());
+ AudioSystem::audio_devices device =
+ static_cast<AudioSystem::audio_devices> (data.readInt32());
const char *device_address = data.readCString();
- reply->writeInt32(static_cast <uint32_t>(getDeviceConnectionState(device, device_address)));
+ reply->writeInt32(static_cast<uint32_t> (getDeviceConnectionState(device,
+ device_address)));
return NO_ERROR;
} break;
@@ -287,7 +348,8 @@
case SET_FORCE_USE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
AudioSystem::force_use usage = static_cast <AudioSystem::force_use>(data.readInt32());
- AudioSystem::forced_config config = static_cast <AudioSystem::forced_config>(data.readInt32());
+ AudioSystem::forced_config config =
+ static_cast <AudioSystem::forced_config>(data.readInt32());
reply->writeInt32(static_cast <uint32_t>(setForceUse(usage, config)));
return NO_ERROR;
} break;
@@ -301,11 +363,13 @@
case GET_OUTPUT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32());
+ AudioSystem::stream_type stream =
+ static_cast <AudioSystem::stream_type>(data.readInt32());
uint32_t samplingRate = data.readInt32();
uint32_t format = data.readInt32();
uint32_t channels = data.readInt32();
- AudioSystem::output_flags flags = static_cast <AudioSystem::output_flags>(data.readInt32());
+ AudioSystem::output_flags flags =
+ static_cast <AudioSystem::output_flags>(data.readInt32());
audio_io_handle_t output = getOutput(stream,
samplingRate,
@@ -320,7 +384,10 @@
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
uint32_t stream = data.readInt32();
- reply->writeInt32(static_cast <uint32_t>(startOutput(output, (AudioSystem::stream_type)stream)));
+ int session = data.readInt32();
+ reply->writeInt32(static_cast <uint32_t>(startOutput(output,
+ (AudioSystem::stream_type)stream,
+ session)));
return NO_ERROR;
} break;
@@ -328,7 +395,10 @@
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
uint32_t stream = data.readInt32();
- reply->writeInt32(static_cast <uint32_t>(stopOutput(output, (AudioSystem::stream_type)stream)));
+ int session = data.readInt32();
+ reply->writeInt32(static_cast <uint32_t>(stopOutput(output,
+ (AudioSystem::stream_type)stream,
+ session)));
return NO_ERROR;
} break;
@@ -345,7 +415,8 @@
uint32_t samplingRate = data.readInt32();
uint32_t format = data.readInt32();
uint32_t channels = data.readInt32();
- AudioSystem::audio_in_acoustics acoustics = static_cast <AudioSystem::audio_in_acoustics>(data.readInt32());
+ AudioSystem::audio_in_acoustics acoustics =
+ static_cast <AudioSystem::audio_in_acoustics>(data.readInt32());
audio_io_handle_t input = getInput(inputSource,
samplingRate,
format,
@@ -378,7 +449,8 @@
case INIT_STREAM_VOLUME: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32());
+ AudioSystem::stream_type stream =
+ static_cast <AudioSystem::stream_type>(data.readInt32());
int indexMin = data.readInt32();
int indexMax = data.readInt32();
reply->writeInt32(static_cast <uint32_t>(initStreamVolume(stream, indexMin,indexMax)));
@@ -387,7 +459,8 @@
case SET_STREAM_VOLUME: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32());
+ AudioSystem::stream_type stream =
+ static_cast <AudioSystem::stream_type>(data.readInt32());
int index = data.readInt32();
reply->writeInt32(static_cast <uint32_t>(setStreamVolumeIndex(stream, index)));
return NO_ERROR;
@@ -395,7 +468,8 @@
case GET_STREAM_VOLUME: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::stream_type stream = static_cast <AudioSystem::stream_type>(data.readInt32());
+ AudioSystem::stream_type stream =
+ static_cast <AudioSystem::stream_type>(data.readInt32());
int index;
status_t status = getStreamVolumeIndex(stream, &index);
reply->writeInt32(index);
@@ -403,6 +477,46 @@
return NO_ERROR;
} break;
+ case GET_STRATEGY_FOR_STREAM: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioSystem::stream_type stream =
+ static_cast <AudioSystem::stream_type>(data.readInt32());
+ reply->writeInt32(getStrategyForStream(stream));
+ return NO_ERROR;
+ } break;
+
+ case GET_OUTPUT_FOR_EFFECT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ effect_descriptor_t desc;
+ data.read(&desc, sizeof(effect_descriptor_t));
+ audio_io_handle_t output = getOutputForEffect(&desc);
+ reply->writeInt32(static_cast <int>(output));
+ return NO_ERROR;
+ } break;
+
+ case REGISTER_EFFECT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ effect_descriptor_t desc;
+ data.read(&desc, sizeof(effect_descriptor_t));
+ audio_io_handle_t output = data.readInt32();
+ uint32_t strategy = data.readInt32();
+ int session = data.readInt32();
+ int id = data.readInt32();
+ reply->writeInt32(static_cast <int32_t>(registerEffect(&desc,
+ output,
+ strategy,
+ session,
+ id)));
+ return NO_ERROR;
+ } break;
+
+ case UNREGISTER_EFFECT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ int id = data.readInt32();
+ reply->writeInt32(static_cast <int32_t>(unregisterEffect(id)));
+ return NO_ERROR;
+ } break;
+
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index ed792b3..0f55b19d 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -45,6 +45,8 @@
GET_METADATA,
SUSPEND,
RESUME,
+ SET_AUX_EFFECT_SEND_LEVEL,
+ ATTACH_AUX_EFFECT
};
class BpMediaPlayer: public BpInterface<IMediaPlayer>
@@ -221,6 +223,24 @@
return reply.readInt32();
}
+
+ status_t setAuxEffectSendLevel(float level)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+ data.writeFloat(level);
+ remote()->transact(SET_AUX_EFFECT_SEND_LEVEL, data, &reply);
+ return reply.readInt32();
+ }
+
+ status_t attachAuxEffect(int effectId)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+ data.writeInt32(effectId);
+ remote()->transact(ATTACH_AUX_EFFECT, data, &reply);
+ return reply.readInt32();
+ }
};
IMPLEMENT_META_INTERFACE(MediaPlayer, "android.media.IMediaPlayer");
@@ -339,6 +359,16 @@
reply->setDataPosition(0);
return NO_ERROR;
} break;
+ case SET_AUX_EFFECT_SEND_LEVEL: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ reply->writeInt32(setAuxEffectSendLevel(data.readFloat()));
+ return NO_ERROR;
+ } break;
+ case ATTACH_AUX_EFFECT: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ reply->writeInt32(attachAuxEffect(data.readInt32()));
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index b43f75f..1c99ae5 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -270,6 +270,7 @@
MEDIA_PLAYER_PLAYBACK_COMPLETE | MEDIA_PLAYER_PAUSED ) ) ) {
mPlayer->setLooping(mLoop);
mPlayer->setVolume(mLeftVolume, mRightVolume);
+ mPlayer->setAuxEffectSendLevel(mSendLevel);
mCurrentState = MEDIA_PLAYER_STARTED;
status_t ret = mPlayer->start();
if (ret != NO_ERROR) {
@@ -523,6 +524,31 @@
return mAudioSessionId;
}
+status_t MediaPlayer::setAuxEffectSendLevel(float level)
+{
+ LOGV("MediaPlayer::setAuxEffectSendLevel(%f)", level);
+ Mutex::Autolock _l(mLock);
+ mSendLevel = level;
+ if (mPlayer != 0) {
+ return mPlayer->setAuxEffectSendLevel(level);
+ }
+ return OK;
+}
+
+status_t MediaPlayer::attachAuxEffect(int effectId)
+{
+ LOGV("MediaPlayer::attachAuxEffect(%d)", effectId);
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == 0 ||
+ (mCurrentState & MEDIA_PLAYER_IDLE) ||
+ (mCurrentState == MEDIA_PLAYER_STATE_ERROR )) {
+ LOGE("attachAuxEffect called in state %d", mCurrentState);
+ return INVALID_OPERATION;
+ }
+
+ return mPlayer->attachAuxEffect(effectId);
+}
+
void MediaPlayer::notify(int msg, int ext1, int ext2)
{
LOGV("message received msg=%d, ext1=%d, ext2=%d", msg, ext1, ext2);
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 5401ec0..b5972e7 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -329,6 +329,10 @@
snprintf(buffer, 255, " msec per frame(%f), latency (%d)\n",
mMsecsPerFrame, mLatency);
result.append(buffer);
+ snprintf(buffer, 255, " aux effect id(%d), send level (%f)\n",
+ mAuxEffectId, mSendLevel);
+ result.append(buffer);
+
::write(fd, result.string(), result.size());
if (mTrack != 0) {
mTrack->dump(fd, args);
@@ -1093,6 +1097,21 @@
return NO_ERROR;
}
+status_t MediaPlayerService::Client::setAuxEffectSendLevel(float level)
+{
+ LOGV("[%d] setAuxEffectSendLevel(%f)", mConnId, level);
+ Mutex::Autolock l(mLock);
+ if (mAudioOutput != 0) return mAudioOutput->setAuxEffectSendLevel(level);
+ return NO_ERROR;
+}
+
+status_t MediaPlayerService::Client::attachAuxEffect(int effectId)
+{
+ LOGV("[%d] attachAuxEffect(%d)", mConnId, effectId);
+ Mutex::Autolock l(mLock);
+ if (mAudioOutput != 0) return mAudioOutput->attachAuxEffect(effectId);
+ return NO_ERROR;
+}
void MediaPlayerService::Client::notify(void* cookie, int msg, int ext1, int ext2)
{
@@ -1285,6 +1304,8 @@
mRightVolume = 1.0;
mLatency = 0;
mMsecsPerFrame = 0;
+ mAuxEffectId = 0;
+ mSendLevel = 0.0;
setMinBufferCount();
}
@@ -1417,10 +1438,13 @@
LOGV("setVolume");
t->setVolume(mLeftVolume, mRightVolume);
+
mMsecsPerFrame = 1.e3 / (float) sampleRate;
mLatency = t->latency();
mTrack = t;
- return NO_ERROR;
+
+ t->setAuxEffectSendLevel(mSendLevel);
+ return t->attachAuxEffect(mAuxEffectId);;
}
void MediaPlayerService::AudioOutput::start()
@@ -1428,6 +1452,7 @@
LOGV("start");
if (mTrack) {
mTrack->setVolume(mLeftVolume, mRightVolume);
+ mTrack->setAuxEffectSendLevel(mSendLevel);
mTrack->start();
}
}
@@ -1481,6 +1506,26 @@
}
}
+status_t MediaPlayerService::AudioOutput::setAuxEffectSendLevel(float level)
+{
+ LOGV("setAuxEffectSendLevel(%f)", level);
+ mSendLevel = level;
+ if (mTrack) {
+ return mTrack->setAuxEffectSendLevel(level);
+ }
+ return NO_ERROR;
+}
+
+status_t MediaPlayerService::AudioOutput::attachAuxEffect(int effectId)
+{
+ LOGV("attachAuxEffect(%d)", effectId);
+ mAuxEffectId = effectId;
+ if (mTrack) {
+ return mTrack->attachAuxEffect(effectId);
+ }
+ return NO_ERROR;
+}
+
// static
void MediaPlayerService::AudioOutput::CallbackWrapper(
int event, void *cookie, void *info) {
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 39f525e..a967ee2 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -91,6 +91,8 @@
virtual void close();
void setAudioStreamType(int streamType) { mStreamType = streamType; }
void setVolume(float left, float right);
+ status_t setAuxEffectSendLevel(float level);
+ status_t attachAuxEffect(int effectId);
virtual status_t dump(int fd, const Vector<String16>& args) const;
static bool isOnEmulator();
@@ -109,7 +111,8 @@
float mMsecsPerFrame;
uint32_t mLatency;
int mSessionId;
-
+ float mSendLevel;
+ int mAuxEffectId;
static bool mIsOnEmulator;
static int mMinBufferCount; // 12 for emulator; otherwise 4
@@ -221,6 +224,8 @@
Parcel *reply);
virtual status_t suspend();
virtual status_t resume();
+ virtual status_t setAuxEffectSendLevel(float level);
+ virtual status_t attachAuxEffect(int effectId);
sp<MediaPlayerBase> createPlayer(player_type playerType);
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 4840391..b56f997 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -406,16 +406,6 @@
return OK;
}
-status_t StagefrightRecorder::setParamTrackFrameStatus(int32_t nFrames) {
- LOGV("setParamTrackFrameStatus: %d", nFrames);
- if (nFrames <= 0) {
- LOGE("Invalid number of frames to track: %d", nFrames);
- return BAD_VALUE;
- }
- mTrackEveryNumberOfFrames = nFrames;
- return OK;
-}
-
status_t StagefrightRecorder::setParamTrackTimeStatus(int64_t timeDurationUs) {
LOGV("setParamTrackTimeStatus: %lld", timeDurationUs);
if (timeDurationUs < 20000) { // Infeasible if shorter than 20 ms?
@@ -510,11 +500,6 @@
if (safe_strtoi32(value.string(), &use64BitOffset)) {
return setParam64BitFileOffset(use64BitOffset != 0);
}
- } else if (key == "param-track-frame-status") {
- int32_t nFrames;
- if (safe_strtoi32(value.string(), &nFrames)) {
- return setParamTrackFrameStatus(nFrames);
- }
} else if (key == "param-track-time-status") {
int64_t timeDurationUs;
if (safe_strtoi64(value.string(), &timeDurationUs)) {
@@ -1011,9 +996,6 @@
meta->setInt32(kKeyBitRate, totalBitRate);
meta->setInt32(kKey64BitFileOffset, mUse64BitFileOffset);
meta->setInt32(kKeyTimeScale, mMovieTimeScale);
- if (mTrackEveryNumberOfFrames > 0) {
- meta->setInt32(kKeyTrackFrameStatus, mTrackEveryNumberOfFrames);
- }
if (mTrackEveryTimeDurationUs > 0) {
meta->setInt64(kKeyTrackTimeStatus, mTrackEveryTimeDurationUs);
}
@@ -1092,7 +1074,6 @@
mVideoEncoderLevel = -1;
mMaxFileDurationUs = 0;
mMaxFileSizeBytes = 0;
- mTrackEveryNumberOfFrames = 0;
mTrackEveryTimeDurationUs = 0;
mEncoderProfiles = MediaProfiles::getInstance();
@@ -1136,8 +1117,6 @@
result.append(buffer);
snprintf(buffer, SIZE, " Interleave duration (us): %d\n", mInterleaveDurationUs);
result.append(buffer);
- snprintf(buffer, SIZE, " Progress notification: %d frames\n", mTrackEveryNumberOfFrames);
- result.append(buffer);
snprintf(buffer, SIZE, " Progress notification: %lld us\n", mTrackEveryTimeDurationUs);
result.append(buffer);
snprintf(buffer, SIZE, " Audio\n");
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 9ab02cd..b4c5900 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -90,7 +90,6 @@
int32_t mAudioTimeScale;
int64_t mMaxFileSizeBytes;
int64_t mMaxFileDurationUs;
- int32_t mTrackEveryNumberOfFrames;
int64_t mTrackEveryTimeDurationUs;
String8 mParams;
@@ -120,7 +119,6 @@
status_t setParamVideoCameraId(int32_t cameraId);
status_t setParamVideoTimeScale(int32_t timeScale);
status_t setParamTrackTimeStatus(int64_t timeDurationUs);
- status_t setParamTrackFrameStatus(int32_t nFrames);
status_t setParamInterleaveDuration(int32_t durationUs);
status_t setParam64BitFileOffset(bool use64BitFileOffset);
status_t setParamMaxFileDurationUs(int64_t timeUs);
diff --git a/media/libstagefright/AMRExtractor.cpp b/media/libstagefright/AMRExtractor.cpp
index 0fea72e..9fc1d27 100644
--- a/media/libstagefright/AMRExtractor.cpp
+++ b/media/libstagefright/AMRExtractor.cpp
@@ -212,7 +212,8 @@
*out = NULL;
int64_t seekTimeUs;
- if (options && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
int64_t seekFrame = seekTimeUs / 20000ll; // 20ms per frame.
mCurrentTimeUs = seekFrame * 20000ll;
mOffset = seekFrame * mFrameSize + (mIsWide ? 9 : 6);
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index b79ba13..b7bde6b 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -41,6 +41,9 @@
mReachedEOS(false),
mFinalStatus(OK),
mStarted(false),
+ mIsFirstBuffer(false),
+ mFirstBufferResult(OK),
+ mFirstBuffer(NULL),
mAudioSink(audioSink) {
}
@@ -68,6 +71,24 @@
}
}
+ // We allow an optional INFO_FORMAT_CHANGED at the very beginning
+ // of playback, if there is one, getFormat below will retrieve the
+ // updated format, if there isn't, we'll stash away the valid buffer
+ // of data to be used on the first audio callback.
+
+ CHECK(mFirstBuffer == NULL);
+
+ mFirstBufferResult = mSource->read(&mFirstBuffer);
+ if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
+ LOGV("INFO_FORMAT_CHANGED!!!");
+
+ CHECK(mFirstBuffer == NULL);
+ mFirstBufferResult = OK;
+ mIsFirstBuffer = false;
+ } else {
+ mIsFirstBuffer = true;
+ }
+
sp<MetaData> format = mSource->getFormat();
const char *mime;
bool success = format->findCString(kKeyMIMEType, &mime);
@@ -87,6 +108,11 @@
DEFAULT_AUDIOSINK_BUFFERCOUNT,
&AudioPlayer::AudioSinkCallback, this);
if (err != OK) {
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
if (!sourceAlreadyStarted) {
mSource->stop();
}
@@ -110,6 +136,11 @@
delete mAudioTrack;
mAudioTrack = NULL;
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
if (!sourceAlreadyStarted) {
mSource->stop();
}
@@ -163,6 +194,12 @@
// Make sure to release any buffer we hold onto so that the
// source is able to stop().
+
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
if (mInputBuffer != NULL) {
LOGV("AudioPlayer releasing input buffer.");
@@ -247,6 +284,14 @@
Mutex::Autolock autoLock(mLock);
if (mSeeking) {
+ if (mIsFirstBuffer) {
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+ mIsFirstBuffer = false;
+ }
+
options.setSeekTo(mSeekTimeUs);
if (mInputBuffer != NULL) {
@@ -259,7 +304,17 @@
}
if (mInputBuffer == NULL) {
- status_t err = mSource->read(&mInputBuffer, &options);
+ status_t err;
+
+ if (mIsFirstBuffer) {
+ mInputBuffer = mFirstBuffer;
+ mFirstBuffer = NULL;
+ err = mFirstBufferResult;
+
+ mIsFirstBuffer = false;
+ } else {
+ err = mSource->read(&mInputBuffer, &options);
+ }
CHECK((err == OK && mInputBuffer != NULL)
|| (err != OK && mInputBuffer == NULL));
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index ffed74f..236a62b4 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -914,7 +914,8 @@
if (mSeeking) {
LOGV("seeking to %lld us (%.2f secs)", mSeekTimeUs, mSeekTimeUs / 1E6);
- options.setSeekTo(mSeekTimeUs);
+ options.setSeekTo(
+ mSeekTimeUs, MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
}
for (;;) {
status_t err = mVideoSource->read(&mVideoBuffer, &options);
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 6f4c980..4cc176a 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -267,7 +267,8 @@
*buffer = NULL;
int64_t seekTimeUs;
- if (options && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
return ERROR_UNSUPPORTED;
}
diff --git a/media/libstagefright/JPEGSource.cpp b/media/libstagefright/JPEGSource.cpp
index a4be2dd2..ec81097 100644
--- a/media/libstagefright/JPEGSource.cpp
+++ b/media/libstagefright/JPEGSource.cpp
@@ -112,7 +112,8 @@
*out = NULL;
int64_t seekTimeUs;
- if (options != NULL && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode mode;
+ if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) {
return UNKNOWN_ERROR;
}
diff --git a/media/libstagefright/MP3Extractor.cpp b/media/libstagefright/MP3Extractor.cpp
index 69c3a72..2248e23 100644
--- a/media/libstagefright/MP3Extractor.cpp
+++ b/media/libstagefright/MP3Extractor.cpp
@@ -586,7 +586,8 @@
*out = NULL;
int64_t seekTimeUs;
- if (options != NULL && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode mode;
+ if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) {
int32_t bitrate;
if (!mMeta->findInt32(kKeyBitRate, &bitrate)) {
// bitrate is in bits/sec.
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 0c2f1e6..7cea629 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -1468,12 +1468,45 @@
*out = NULL;
+ int64_t targetSampleTimeUs = -1;
+
int64_t seekTimeUs;
- if (options && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+ uint32_t findFlags = 0;
+ switch (mode) {
+ case ReadOptions::SEEK_PREVIOUS_SYNC:
+ findFlags = SampleTable::kFlagBefore;
+ break;
+ case ReadOptions::SEEK_NEXT_SYNC:
+ findFlags = SampleTable::kFlagAfter;
+ break;
+ case ReadOptions::SEEK_CLOSEST_SYNC:
+ case ReadOptions::SEEK_CLOSEST:
+ findFlags = SampleTable::kFlagClosest;
+ break;
+ default:
+ CHECK(!"Should not be here.");
+ break;
+ }
+
uint32_t sampleIndex;
- status_t err = mSampleTable->findClosestSample(
+ status_t err = mSampleTable->findSampleAtTime(
seekTimeUs * mTimescale / 1000000,
- &sampleIndex, SampleTable::kSyncSample_Flag);
+ &sampleIndex, findFlags);
+
+ if (mode == ReadOptions::SEEK_CLOSEST) {
+ // We found the closest sample already, now we want the sync
+ // sample preceding it (or the sample itself of course), even
+ // if the subsequent sync sample is closer.
+ findFlags = SampleTable::kFlagBefore;
+ }
+
+ uint32_t syncSampleIndex;
+ if (err == OK) {
+ err = mSampleTable->findSyncSampleNear(
+ sampleIndex, &syncSampleIndex, findFlags);
+ }
if (err != OK) {
if (err == ERROR_OUT_OF_RANGE) {
@@ -1487,7 +1520,27 @@
return err;
}
- mCurrentSampleIndex = sampleIndex;
+ uint32_t sampleTime;
+ CHECK_EQ(OK, mSampleTable->getMetaDataForSample(
+ sampleIndex, NULL, NULL, &sampleTime));
+
+ if (mode == ReadOptions::SEEK_CLOSEST) {
+ targetSampleTimeUs = (sampleTime * 1000000ll) / mTimescale;
+ }
+
+#if 0
+ uint32_t syncSampleTime;
+ CHECK_EQ(OK, mSampleTable->getMetaDataForSample(
+ syncSampleIndex, NULL, NULL, &syncSampleTime));
+
+ LOGI("seek to time %lld us => sample at time %lld us, "
+ "sync sample at time %lld us",
+ seekTimeUs,
+ sampleTime * 1000000ll / mTimescale,
+ syncSampleTime * 1000000ll / mTimescale);
+#endif
+
+ mCurrentSampleIndex = syncSampleIndex;
if (mBuffer != NULL) {
mBuffer->release();
mBuffer = NULL;
@@ -1536,6 +1589,12 @@
mBuffer->meta_data()->clear();
mBuffer->meta_data()->setInt64(
kKeyTime, ((int64_t)dts * 1000000) / mTimescale);
+
+ if (targetSampleTimeUs >= 0) {
+ mBuffer->meta_data()->setInt64(
+ kKeyTargetTime, targetSampleTimeUs);
+ }
+
++mCurrentSampleIndex;
}
@@ -1632,6 +1691,12 @@
mBuffer->meta_data()->clear();
mBuffer->meta_data()->setInt64(
kKeyTime, ((int64_t)dts * 1000000) / mTimescale);
+
+ if (targetSampleTimeUs >= 0) {
+ mBuffer->meta_data()->setInt64(
+ kKeyTargetTime, targetSampleTimeUs);
+ }
+
++mCurrentSampleIndex;
*out = mBuffer;
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index b7388bb..c4a25bc 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -111,7 +111,6 @@
int64_t mStartTimestampUs;
int64_t mPreviousTrackTimeUs;
int64_t mTrackEveryTimeDurationUs;
- int32_t mTrackEveryNumberOfFrames;
static void *ThreadWrapper(void *me);
void threadEntry();
@@ -121,7 +120,7 @@
void writeOneChunk(bool isAvc);
// Track authoring progress status
- void trackProgressStatus(int32_t nFrames, int64_t timeUs);
+ void trackProgressStatus(int64_t timeUs, status_t err = OK);
void initTrackingProgressStatus(MetaData *params);
// Utilities for collecting statistical data
@@ -742,7 +741,6 @@
mPreviousTrackTimeUs = -1;
mTrackingProgressStatus = false;
mTrackEveryTimeDurationUs = 0;
- mTrackEveryNumberOfFrames = 0;
{
int64_t timeUs;
if (params && params->findInt64(kKeyTrackTimeStatus, &timeUs)) {
@@ -751,14 +749,6 @@
mTrackingProgressStatus = true;
}
}
- {
- int32_t nFrames;
- if (params && params->findInt32(kKeyTrackFrameStatus, &nFrames)) {
- LOGV("Receive request to track progress status for every %d frames", nFrames);
- mTrackEveryNumberOfFrames = nFrames;
- mTrackingProgressStatus = true;
- }
- }
}
status_t MPEG4Writer::Track::start(MetaData *params) {
@@ -1164,7 +1154,7 @@
if (mPreviousTrackTimeUs <= 0) {
mPreviousTrackTimeUs = mStartTimestampUs;
}
- trackProgressStatus(mSampleInfos.size(), timestampUs);
+ trackProgressStatus(timestampUs);
}
if (mOwner->numTracks() == 1) {
off_t offset = is_avc? mOwner->addLengthPrefixedSample_l(copy)
@@ -1207,7 +1197,7 @@
if (mSampleInfos.empty()) {
err = UNKNOWN_ERROR;
}
- mOwner->notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_COMPLETION_STATUS, err);
+ mOwner->trackProgressStatus(this, -1, err);
// Last chunk
if (mOwner->numTracks() == 1) {
@@ -1237,26 +1227,61 @@
logStatisticalData(is_audio);
}
-void MPEG4Writer::Track::trackProgressStatus(int32_t nFrames, int64_t timeUs) {
- LOGV("trackProgressStatus: %d frames and %lld us", nFrames, timeUs);
- if (mTrackEveryNumberOfFrames > 0 &&
- nFrames % mTrackEveryNumberOfFrames == 0) {
- LOGV("Fire frame tracking progress status at frame %d", nFrames);
- mOwner->notify(MEDIA_RECORDER_EVENT_INFO,
- MEDIA_RECORDER_INFO_PROGRESS_FRAME_STATUS,
- nFrames);
- }
-
+void MPEG4Writer::Track::trackProgressStatus(int64_t timeUs, status_t err) {
+ LOGV("trackProgressStatus: %lld us", timeUs);
if (mTrackEveryTimeDurationUs > 0 &&
timeUs - mPreviousTrackTimeUs >= mTrackEveryTimeDurationUs) {
LOGV("Fire time tracking progress status at %lld us", timeUs);
- mOwner->notify(MEDIA_RECORDER_EVENT_INFO,
- MEDIA_RECORDER_INFO_PROGRESS_TIME_STATUS,
- timeUs / 1000);
+ mOwner->trackProgressStatus(this, timeUs - mPreviousTrackTimeUs, err);
mPreviousTrackTimeUs = timeUs;
}
}
+void MPEG4Writer::trackProgressStatus(
+ const MPEG4Writer::Track* track, int64_t timeUs, status_t err) {
+ Mutex::Autolock lock(mLock);
+ int32_t nTracks = mTracks.size();
+ CHECK(nTracks >= 1);
+ CHECK(nTracks < 64); // Arbitrary number
+
+ int32_t trackNum = 0;
+#if 0
+ // In the worst case, we can put the trackNum
+ // along with MEDIA_RECORDER_INFO_COMPLETION_STATUS
+ // to report the progress.
+ for (List<Track *>::iterator it = mTracks.begin();
+ it != mTracks.end(); ++it, ++trackNum) {
+ if (track == (*it)) {
+ break;
+ }
+ }
+#endif
+ CHECK(trackNum < nTracks);
+ trackNum <<= 16;
+
+ // Error notification
+ // Do not consider ERROR_END_OF_STREAM an error
+ if (err != OK && err != ERROR_END_OF_STREAM) {
+ notify(MEDIA_RECORDER_EVENT_ERROR,
+ trackNum | MEDIA_RECORDER_ERROR_UNKNOWN,
+ err);
+ return;
+ }
+
+ if (timeUs == -1) {
+ // Send completion notification
+ notify(MEDIA_RECORDER_EVENT_INFO,
+ trackNum | MEDIA_RECORDER_INFO_COMPLETION_STATUS,
+ err);
+ return;
+ } else {
+ // Send progress status
+ notify(MEDIA_RECORDER_EVENT_INFO,
+ trackNum | MEDIA_RECORDER_INFO_PROGRESS_TIME_STATUS,
+ timeUs / 1000);
+ }
+}
+
void MPEG4Writer::Track::findMinAvgMaxSampleDurationMs(
int32_t *min, int32_t *avg, int32_t *max) {
CHECK(!mSampleInfos.empty());
diff --git a/media/libstagefright/MediaSource.cpp b/media/libstagefright/MediaSource.cpp
index ec89b74..fd0e79c3 100644
--- a/media/libstagefright/MediaSource.cpp
+++ b/media/libstagefright/MediaSource.cpp
@@ -34,18 +34,22 @@
mLatenessUs = 0;
}
-void MediaSource::ReadOptions::setSeekTo(int64_t time_us) {
+void MediaSource::ReadOptions::setSeekTo(int64_t time_us, SeekMode mode) {
mOptions |= kSeekTo_Option;
mSeekTimeUs = time_us;
+ mSeekMode = mode;
}
void MediaSource::ReadOptions::clearSeekTo() {
mOptions &= ~kSeekTo_Option;
mSeekTimeUs = 0;
+ mSeekMode = SEEK_CLOSEST_SYNC;
}
-bool MediaSource::ReadOptions::getSeekTo(int64_t *time_us) const {
+bool MediaSource::ReadOptions::getSeekTo(
+ int64_t *time_us, SeekMode *mode) const {
*time_us = mSeekTimeUs;
+ *mode = mSeekMode;
return (mOptions & kSeekTo_Option) != 0;
}
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 7762bf4..1b63083 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -1272,6 +1272,8 @@
mNoMoreOutputData(false),
mOutputPortSettingsHaveChanged(false),
mSeekTimeUs(-1),
+ mSeekMode(ReadOptions::SEEK_CLOSEST_SYNC),
+ mTargetTimeUs(-1),
mLeftOverBuffer(NULL),
mPaused(false) {
mPortStatus[kPortIndexInput] = ENABLED;
@@ -1640,13 +1642,33 @@
kKeyBufferID,
msg.u.extended_buffer_data.buffer);
- mFilledBuffers.push_back(i);
- mBufferFilled.signal();
-
if (msg.u.extended_buffer_data.flags & OMX_BUFFERFLAG_EOS) {
CODEC_LOGV("No more output data.");
mNoMoreOutputData = true;
}
+
+ if (mTargetTimeUs >= 0) {
+ CHECK(msg.u.extended_buffer_data.timestamp <= mTargetTimeUs);
+
+ if (msg.u.extended_buffer_data.timestamp < mTargetTimeUs) {
+ CODEC_LOGV(
+ "skipping output buffer at timestamp %lld us",
+ msg.u.extended_buffer_data.timestamp);
+
+ fillOutputBuffer(info);
+ break;
+ }
+
+ CODEC_LOGV(
+ "returning output buffer at target timestamp "
+ "%lld us",
+ msg.u.extended_buffer_data.timestamp);
+
+ mTargetTimeUs = -1;
+ }
+
+ mFilledBuffers.push_back(i);
+ mBufferFilled.signal();
}
break;
@@ -2185,12 +2207,24 @@
}
MediaSource::ReadOptions options;
- options.setSeekTo(mSeekTimeUs);
+ options.setSeekTo(mSeekTimeUs, mSeekMode);
mSeekTimeUs = -1;
+ mSeekMode = ReadOptions::SEEK_CLOSEST_SYNC;
mBufferFilled.signal();
err = mSource->read(&srcBuffer, &options);
+
+ if (err == OK) {
+ int64_t targetTimeUs;
+ if (srcBuffer->meta_data()->findInt64(
+ kKeyTargetTime, &targetTimeUs)
+ && targetTimeUs >= 0) {
+ mTargetTimeUs = targetTimeUs;
+ } else {
+ mTargetTimeUs = -1;
+ }
+ }
} else if (mLeftOverBuffer) {
srcBuffer = mLeftOverBuffer;
mLeftOverBuffer = NULL;
@@ -2239,7 +2273,7 @@
int64_t lastBufferTimeUs;
CHECK(srcBuffer->meta_data()->findInt64(kKeyTime, &lastBufferTimeUs));
- CHECK(timestampUs >= 0);
+ CHECK(lastBufferTimeUs >= 0);
if (offset == 0) {
timestampUs = lastBufferTimeUs;
@@ -2696,6 +2730,8 @@
mNoMoreOutputData = false;
mOutputPortSettingsHaveChanged = false;
mSeekTimeUs = -1;
+ mSeekMode = ReadOptions::SEEK_CLOSEST_SYNC;
+ mTargetTimeUs = -1;
mFilledBuffers.clear();
mPaused = false;
@@ -2790,7 +2826,8 @@
bool seeking = false;
int64_t seekTimeUs;
- if (options && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode seekMode;
+ if (options && options->getSeekTo(&seekTimeUs, &seekMode)) {
seeking = true;
}
@@ -2800,6 +2837,7 @@
if (seeking) {
CHECK(seekTimeUs >= 0);
mSeekTimeUs = seekTimeUs;
+ mSeekMode = seekMode;
// There's no reason to trigger the code below, there's
// nothing to flush yet.
@@ -2823,6 +2861,7 @@
CHECK(seekTimeUs >= 0);
mSeekTimeUs = seekTimeUs;
+ mSeekMode = seekMode;
mFilledBuffers.clear();
diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp
index 6013b6f..c267a23 100644
--- a/media/libstagefright/OggExtractor.cpp
+++ b/media/libstagefright/OggExtractor.cpp
@@ -155,7 +155,8 @@
*out = NULL;
int64_t seekTimeUs;
- if (options && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
off_t pos = seekTimeUs * mExtractor->mImpl->approxBitrate() / 8000000ll;
LOGI("seeking to offset %ld", pos);
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 89a522e..2e62f9f 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -314,8 +314,10 @@
return time1 > time2 ? time1 - time2 : time2 - time1;
}
-status_t SampleTable::findClosestSample(
+status_t SampleTable::findSampleAtTime(
uint32_t req_time, uint32_t *sample_index, uint32_t flags) {
+ *sample_index = 0;
+
Mutex::Autolock autoLock(mLock);
uint32_t cur_sample = 0;
@@ -330,16 +332,37 @@
uint32_t time1 = time + j * delta;
uint32_t time2 = time1 + delta;
+ uint32_t sampleTime;
if (i+1 == mTimeToSampleCount
|| (abs_difference(req_time, time1)
< abs_difference(req_time, time2))) {
*sample_index = cur_sample + j;
+ sampleTime = time1;
} else {
*sample_index = cur_sample + j + 1;
+ sampleTime = time2;
}
- if (flags & kSyncSample_Flag) {
- return findClosestSyncSample_l(*sample_index, sample_index);
+ switch (flags) {
+ case kFlagBefore:
+ {
+ if (sampleTime > req_time && *sample_index > 0) {
+ --*sample_index;
+ }
+ break;
+ }
+
+ case kFlagAfter:
+ {
+ if (sampleTime < req_time
+ && *sample_index + 1 < mNumSampleSizes) {
+ ++*sample_index;
+ }
+ break;
+ }
+
+ default:
+ break;
}
return OK;
@@ -352,8 +375,10 @@
return ERROR_OUT_OF_RANGE;
}
-status_t SampleTable::findClosestSyncSample_l(
- uint32_t start_sample_index, uint32_t *sample_index) {
+status_t SampleTable::findSyncSampleNear(
+ uint32_t start_sample_index, uint32_t *sample_index, uint32_t flags) {
+ Mutex::Autolock autoLock(mLock);
+
*sample_index = 0;
if (mSyncSampleOffset < 0) {
@@ -362,29 +387,124 @@
return OK;
}
- uint32_t x;
- uint32_t left = 0;
- uint32_t right = mNumSyncSamples;
- while (left < right) {
- uint32_t mid = (left + right) / 2;
+ if (mNumSyncSamples == 0) {
+ *sample_index = 0;
+ return OK;
+ }
+ uint32_t left = 0;
+ while (left < mNumSyncSamples) {
+ uint32_t x;
if (mDataSource->readAt(
- mSyncSampleOffset + 8 + mid * 4, &x, 4) != 4) {
+ mSyncSampleOffset + 8 + left * 4, &x, 4) != 4) {
return ERROR_IO;
}
x = ntohl(x);
+ --x;
- if (x < (start_sample_index + 1)) {
- left = mid + 1;
- } else if (x > (start_sample_index + 1)) {
- right = mid;
- } else {
+ if (x >= start_sample_index) {
break;
}
+
+ ++left;
+ }
+
+ --left;
+ uint32_t x;
+ if (mDataSource->readAt(
+ mSyncSampleOffset + 8 + left * 4, &x, 4) != 4) {
+ return ERROR_IO;
+ }
+
+ x = ntohl(x);
+ --x;
+
+ if (left + 1 < mNumSyncSamples) {
+ uint32_t y;
+ if (mDataSource->readAt(
+ mSyncSampleOffset + 8 + (left + 1) * 4, &y, 4) != 4) {
+ return ERROR_IO;
+ }
+
+ y = ntohl(y);
+ --y;
+
+ // our sample lies between sync samples x and y.
+
+ status_t err = mSampleIterator->seekTo(start_sample_index);
+ if (err != OK) {
+ return err;
+ }
+
+ uint32_t sample_time = mSampleIterator->getSampleTime();
+
+ err = mSampleIterator->seekTo(x);
+ if (err != OK) {
+ return err;
+ }
+ uint32_t x_time = mSampleIterator->getSampleTime();
+
+ err = mSampleIterator->seekTo(y);
+ if (err != OK) {
+ return err;
+ }
+
+ uint32_t y_time = mSampleIterator->getSampleTime();
+
+ if (abs_difference(x_time, sample_time)
+ > abs_difference(y_time, sample_time)) {
+ // Pick the sync sample closest (timewise) to the start-sample.
+ x = y;
+ ++left;
+ }
}
- *sample_index = x - 1;
+ switch (flags) {
+ case kFlagBefore:
+ {
+ if (x > start_sample_index) {
+ CHECK(left > 0);
+
+ if (mDataSource->readAt(
+ mSyncSampleOffset + 8 + (left - 1) * 4, &x, 4) != 4) {
+ return ERROR_IO;
+ }
+
+ x = ntohl(x);
+ --x;
+
+ CHECK(x <= start_sample_index);
+ }
+ break;
+ }
+
+ case kFlagAfter:
+ {
+ if (x < start_sample_index) {
+ if (left + 1 >= mNumSyncSamples) {
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ if (mDataSource->readAt(
+ mSyncSampleOffset + 8 + (left + 1) * 4, &x, 4) != 4) {
+ return ERROR_IO;
+ }
+
+ x = ntohl(x);
+ --x;
+
+ CHECK(x >= start_sample_index);
+ }
+
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ *sample_index = x;
return OK;
}
diff --git a/media/libstagefright/ShoutcastSource.cpp b/media/libstagefright/ShoutcastSource.cpp
index ec25430..23b7681 100644
--- a/media/libstagefright/ShoutcastSource.cpp
+++ b/media/libstagefright/ShoutcastSource.cpp
@@ -93,7 +93,8 @@
*out = NULL;
int64_t seekTimeUs;
- if (options && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
return ERROR_UNSUPPORTED;
}
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index 7365dfa..7c2b07e 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -282,7 +282,8 @@
*out = NULL;
int64_t seekTimeUs;
- if (options != NULL && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode mode;
+ if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) {
int64_t pos = (seekTimeUs * mSampleRate) / 1000000 * mNumChannels * 2;
if (pos > mSize) {
pos = mSize;
diff --git a/media/libstagefright/codecs/aacdec/AACDecoder.cpp b/media/libstagefright/codecs/aacdec/AACDecoder.cpp
index f3b281f..c5b51c0 100644
--- a/media/libstagefright/codecs/aacdec/AACDecoder.cpp
+++ b/media/libstagefright/codecs/aacdec/AACDecoder.cpp
@@ -99,15 +99,6 @@
!= MP4AUDEC_SUCCESS) {
return ERROR_UNSUPPORTED;
}
-
- // Check on the sampling rate to see whether it is changed.
- int32_t sampleRate;
- CHECK(mMeta->findInt32(kKeySampleRate, &sampleRate));
- if (mConfig->samplingRate != sampleRate) {
- mMeta->setInt32(kKeySampleRate, mConfig->samplingRate);
- LOGW("Sample rate was %d, but now is %d",
- sampleRate, mConfig->samplingRate);
- }
}
return OK;
}
@@ -168,7 +159,8 @@
*out = NULL;
int64_t seekTimeUs;
- if (options && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
CHECK(seekTimeUs >= 0);
mNumSamplesOutput = 0;
@@ -215,6 +207,19 @@
Int decoderErr = PVMP4AudioDecodeFrame(mConfig, mDecoderBuf);
+ // Check on the sampling rate to see whether it is changed.
+ int32_t sampleRate;
+ CHECK(mMeta->findInt32(kKeySampleRate, &sampleRate));
+ if (mConfig->samplingRate != sampleRate) {
+ mMeta->setInt32(kKeySampleRate, mConfig->samplingRate);
+ LOGW("Sample rate was %d, but now is %d",
+ sampleRate, mConfig->samplingRate);
+ buffer->release();
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ return INFO_FORMAT_CHANGED;
+ }
+
size_t numOutBytes =
mConfig->frameLength * sizeof(int16_t) * mConfig->desiredChannels;
if (mConfig->aacPlusUpsamplingFactor == 2) {
diff --git a/media/libstagefright/codecs/aacenc/AACEncoder.cpp b/media/libstagefright/codecs/aacenc/AACEncoder.cpp
index 2317de6..e8235c2 100644
--- a/media/libstagefright/codecs/aacenc/AACEncoder.cpp
+++ b/media/libstagefright/codecs/aacenc/AACEncoder.cpp
@@ -202,7 +202,8 @@
*out = NULL;
int64_t seekTimeUs;
- CHECK(options == NULL || !options->getSeekTo(&seekTimeUs));
+ ReadOptions::SeekMode mode;
+ CHECK(options == NULL || !options->getSeekTo(&seekTimeUs, &mode));
MediaBuffer *buffer;
CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), OK);
diff --git a/media/libstagefright/codecs/amrnb/dec/AMRNBDecoder.cpp b/media/libstagefright/codecs/amrnb/dec/AMRNBDecoder.cpp
index 7728597..fb300da 100644
--- a/media/libstagefright/codecs/amrnb/dec/AMRNBDecoder.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/AMRNBDecoder.cpp
@@ -117,7 +117,8 @@
*out = NULL;
int64_t seekTimeUs;
- if (options && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
CHECK(seekTimeUs >= 0);
mNumSamplesOutput = 0;
diff --git a/media/libstagefright/codecs/amrnb/enc/AMRNBEncoder.cpp b/media/libstagefright/codecs/amrnb/enc/AMRNBEncoder.cpp
index 4c02fe9..c875426 100644
--- a/media/libstagefright/codecs/amrnb/enc/AMRNBEncoder.cpp
+++ b/media/libstagefright/codecs/amrnb/enc/AMRNBEncoder.cpp
@@ -145,7 +145,8 @@
*out = NULL;
int64_t seekTimeUs;
- CHECK(options == NULL || !options->getSeekTo(&seekTimeUs));
+ ReadOptions::SeekMode mode;
+ CHECK(options == NULL || !options->getSeekTo(&seekTimeUs, &mode));
while (mNumInputSamples < kNumSamplesPerFrame) {
if (mInputBuffer == NULL) {
diff --git a/media/libstagefright/codecs/amrwb/AMRWBDecoder.cpp b/media/libstagefright/codecs/amrwb/AMRWBDecoder.cpp
index c17c100..2a21472 100644
--- a/media/libstagefright/codecs/amrwb/AMRWBDecoder.cpp
+++ b/media/libstagefright/codecs/amrwb/AMRWBDecoder.cpp
@@ -135,7 +135,8 @@
*out = NULL;
int64_t seekTimeUs;
- if (options && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode seekMode;
+ if (options && options->getSeekTo(&seekTimeUs, &seekMode)) {
CHECK(seekTimeUs >= 0);
mNumSamplesOutput = 0;
diff --git a/media/libstagefright/codecs/amrwbenc/AMRWBEncoder.cpp b/media/libstagefright/codecs/amrwbenc/AMRWBEncoder.cpp
index 4257c6a..93304d0 100644
--- a/media/libstagefright/codecs/amrwbenc/AMRWBEncoder.cpp
+++ b/media/libstagefright/codecs/amrwbenc/AMRWBEncoder.cpp
@@ -196,7 +196,8 @@
*out = NULL;
int64_t seekTimeUs;
- CHECK(options == NULL || !options->getSeekTo(&seekTimeUs));
+ ReadOptions::SeekMode mode;
+ CHECK(options == NULL || !options->getSeekTo(&seekTimeUs, &mode));
while (mNumInputSamples < kNumSamplesPerFrame) {
if (mInputBuffer == NULL) {
diff --git a/media/libstagefright/codecs/avc/dec/AVCDecoder.cpp b/media/libstagefright/codecs/avc/dec/AVCDecoder.cpp
index 24c361e..050e3da 100644
--- a/media/libstagefright/codecs/avc/dec/AVCDecoder.cpp
+++ b/media/libstagefright/codecs/avc/dec/AVCDecoder.cpp
@@ -51,7 +51,9 @@
mInputBuffer(NULL),
mAnchorTimeUs(0),
mNumSamplesOutput(0),
- mPendingSeekTimeUs(-1) {
+ mPendingSeekTimeUs(-1),
+ mPendingSeekMode(MediaSource::ReadOptions::SEEK_CLOSEST_SYNC),
+ mTargetTimeUs(-1) {
memset(mHandle, 0, sizeof(tagAVCHandle));
mHandle->AVCObject = NULL;
mHandle->userData = this;
@@ -161,6 +163,8 @@
mAnchorTimeUs = 0;
mNumSamplesOutput = 0;
mPendingSeekTimeUs = -1;
+ mPendingSeekMode = ReadOptions::SEEK_CLOSEST_SYNC;
+ mTargetTimeUs = -1;
mStarted = true;
return OK;
@@ -229,11 +233,13 @@
*out = NULL;
int64_t seekTimeUs;
- if (options && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
LOGV("seek requested to %lld us (%.2f secs)", seekTimeUs, seekTimeUs / 1E6);
CHECK(seekTimeUs >= 0);
mPendingSeekTimeUs = seekTimeUs;
+ mPendingSeekMode = mode;
if (mInputBuffer) {
mInputBuffer->release();
@@ -246,6 +252,8 @@
if (mInputBuffer == NULL) {
LOGV("fetching new input buffer.");
+ bool seeking = false;
+
if (!mCodecSpecificData.isEmpty()) {
mInputBuffer = mCodecSpecificData.editItemAt(0);
mCodecSpecificData.removeAt(0);
@@ -258,7 +266,9 @@
ReadOptions seekOptions;
if (mPendingSeekTimeUs >= 0) {
- seekOptions.setSeekTo(mPendingSeekTimeUs);
+ seeking = true;
+
+ seekOptions.setSeekTo(mPendingSeekTimeUs, mPendingSeekMode);
mPendingSeekTimeUs = -1;
}
status_t err = mSource->read(&mInputBuffer, &seekOptions);
@@ -276,6 +286,16 @@
mInputBuffer = NULL;
}
}
+
+ if (seeking) {
+ int64_t targetTimeUs;
+ if (mInputBuffer->meta_data()->findInt64(kKeyTargetTime, &targetTimeUs)
+ && targetTimeUs >= 0) {
+ mTargetTimeUs = targetTimeUs;
+ } else {
+ mTargetTimeUs = -1;
+ }
+ }
}
const uint8_t *fragPtr;
@@ -394,9 +414,35 @@
CHECK(index >= 0);
CHECK(index < (int32_t)mFrames.size());
- *out = mFrames.editItemAt(index);
- (*out)->set_range(0, (*out)->size());
- (*out)->add_ref();
+ MediaBuffer *mbuf = mFrames.editItemAt(index);
+
+ bool skipFrame = false;
+
+ if (mTargetTimeUs >= 0) {
+ int64_t timeUs;
+ CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs));
+ CHECK(timeUs <= mTargetTimeUs);
+
+ if (timeUs < mTargetTimeUs) {
+ // We're still waiting for the frame with the matching
+ // timestamp and we won't return the current one.
+ skipFrame = true;
+
+ LOGV("skipping frame at %lld us", timeUs);
+ } else {
+ LOGV("found target frame at %lld us", timeUs);
+
+ mTargetTimeUs = -1;
+ }
+ }
+
+ if (!skipFrame) {
+ *out = mbuf;
+ (*out)->set_range(0, (*out)->size());
+ (*out)->add_ref();
+ } else {
+ *out = new MediaBuffer(0);
+ }
// Do _not_ release input buffer yet.
diff --git a/media/libstagefright/codecs/m4v_h263/dec/M4vH263Decoder.cpp b/media/libstagefright/codecs/m4v_h263/dec/M4vH263Decoder.cpp
index 8350f7a..0f08f6e 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/M4vH263Decoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/M4vH263Decoder.cpp
@@ -37,7 +37,8 @@
mStarted(false),
mHandle(new tagvideoDecControls),
mInputBuffer(NULL),
- mNumSamplesOutput(0) {
+ mNumSamplesOutput(0),
+ mTargetTimeUs(-1) {
LOGV("M4vH263Decoder");
memset(mHandle, 0, sizeof(tagvideoDecControls));
@@ -146,6 +147,7 @@
mSource->start();
mNumSamplesOutput = 0;
+ mTargetTimeUs = -1;
mStarted = true;
return OK;
@@ -175,8 +177,11 @@
MediaBuffer **out, const ReadOptions *options) {
*out = NULL;
+ bool seeking = false;
int64_t seekTimeUs;
- if (options && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+ seeking = true;
CHECK_EQ(PVResetVideoDecoder(mHandle), PV_TRUE);
}
@@ -186,6 +191,16 @@
return err;
}
+ if (seeking) {
+ int64_t targetTimeUs;
+ if (inputBuffer->meta_data()->findInt64(kKeyTargetTime, &targetTimeUs)
+ && targetTimeUs >= 0) {
+ mTargetTimeUs = targetTimeUs;
+ } else {
+ mTargetTimeUs = -1;
+ }
+ }
+
uint8_t *bitstream =
(uint8_t *) inputBuffer->data() + inputBuffer->range_offset();
@@ -221,17 +236,40 @@
return INFO_FORMAT_CHANGED;
}
- *out = mFrames[mNumSamplesOutput & 0x01];
- (*out)->add_ref();
-
int64_t timeUs;
CHECK(inputBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
- (*out)->meta_data()->setInt64(kKeyTime, timeUs);
- ++mNumSamplesOutput;
inputBuffer->release();
inputBuffer = NULL;
+ bool skipFrame = false;
+
+ if (mTargetTimeUs >= 0) {
+ CHECK(timeUs <= mTargetTimeUs);
+
+ if (timeUs < mTargetTimeUs) {
+ // We're still waiting for the frame with the matching
+ // timestamp and we won't return the current one.
+ skipFrame = true;
+
+ LOGV("skipping frame at %lld us", timeUs);
+ } else {
+ LOGV("found target frame at %lld us", timeUs);
+
+ mTargetTimeUs = -1;
+ }
+ }
+
+ if (skipFrame) {
+ *out = new MediaBuffer(0);
+ } else {
+ *out = mFrames[mNumSamplesOutput & 0x01];
+ (*out)->add_ref();
+ (*out)->meta_data()->setInt64(kKeyTime, timeUs);
+ }
+
+ ++mNumSamplesOutput;
+
return OK;
}
diff --git a/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp
index e375250..5002442 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp
@@ -267,7 +267,6 @@
status_t M4vH263Encoder::read(
MediaBuffer **out, const ReadOptions *options) {
- CHECK(!options);
*out = NULL;
MediaBuffer *outputBuffer;
diff --git a/media/libstagefright/codecs/mp3dec/MP3Decoder.cpp b/media/libstagefright/codecs/mp3dec/MP3Decoder.cpp
index f40bd11..c4a8280 100644
--- a/media/libstagefright/codecs/mp3dec/MP3Decoder.cpp
+++ b/media/libstagefright/codecs/mp3dec/MP3Decoder.cpp
@@ -122,7 +122,8 @@
*out = NULL;
int64_t seekTimeUs;
- if (options && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
CHECK(seekTimeUs >= 0);
mNumFramesOutput = 0;
diff --git a/media/libstagefright/codecs/on2/dec/VPXDecoder.cpp b/media/libstagefright/codecs/on2/dec/VPXDecoder.cpp
index bad8956..fbc97f4 100644
--- a/media/libstagefright/codecs/on2/dec/VPXDecoder.cpp
+++ b/media/libstagefright/codecs/on2/dec/VPXDecoder.cpp
@@ -39,7 +39,8 @@
mStarted(false),
mBufferSize(0),
mCtx(NULL),
- mBufferGroup(NULL) {
+ mBufferGroup(NULL),
+ mTargetTimeUs(-1) {
sp<MetaData> inputFormat = source->getFormat();
const char *mime;
CHECK(inputFormat->findCString(kKeyMIMEType, &mime));
@@ -94,6 +95,8 @@
mBufferGroup->add_buffer(new MediaBuffer(mBufferSize));
mBufferGroup->add_buffer(new MediaBuffer(mBufferSize));
+ mTargetTimeUs = -1;
+
mStarted = true;
return OK;
@@ -126,6 +129,13 @@
MediaBuffer **out, const ReadOptions *options) {
*out = NULL;
+ bool seeking = false;
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode seekMode;
+ if (options && options->getSeekTo(&seekTimeUs, &seekMode)) {
+ seeking = true;
+ }
+
MediaBuffer *input;
status_t err = mSource->read(&input, options);
@@ -135,6 +145,16 @@
LOGV("read %d bytes from source\n", input->range_length());
+ if (seeking) {
+ int64_t targetTimeUs;
+ if (input->meta_data()->findInt64(kKeyTargetTime, &targetTimeUs)
+ && targetTimeUs >= 0) {
+ mTargetTimeUs = targetTimeUs;
+ } else {
+ mTargetTimeUs = -1;
+ }
+ }
+
if (vpx_codec_decode(
(vpx_codec_ctx_t *)mCtx,
(uint8_t *)input->data() + input->range_offset(),
@@ -156,6 +176,29 @@
input->release();
input = NULL;
+ bool skipFrame = false;
+
+ if (mTargetTimeUs >= 0) {
+ CHECK(timeUs <= mTargetTimeUs);
+
+ if (timeUs < mTargetTimeUs) {
+ // We're still waiting for the frame with the matching
+ // timestamp and we won't return the current one.
+ skipFrame = true;
+
+ LOGV("skipping frame at %lld us", timeUs);
+ } else {
+ LOGV("found target frame at %lld us", timeUs);
+
+ mTargetTimeUs = -1;
+ }
+ }
+
+ if (skipFrame) {
+ *out = new MediaBuffer(0);
+ return OK;
+ }
+
vpx_codec_iter_t iter = NULL;
vpx_image_t *img = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter);
diff --git a/media/libstagefright/codecs/vorbis/dec/VorbisDecoder.cpp b/media/libstagefright/codecs/vorbis/dec/VorbisDecoder.cpp
index df3f16a..53f0638 100644
--- a/media/libstagefright/codecs/vorbis/dec/VorbisDecoder.cpp
+++ b/media/libstagefright/codecs/vorbis/dec/VorbisDecoder.cpp
@@ -200,7 +200,8 @@
*out = NULL;
int64_t seekTimeUs;
- if (options && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
CHECK(seekTimeUs >= 0);
mNumFramesOutput = 0;
diff --git a/media/libstagefright/include/AVCDecoder.h b/media/libstagefright/include/AVCDecoder.h
index 621aa9a..7810171 100644
--- a/media/libstagefright/include/AVCDecoder.h
+++ b/media/libstagefright/include/AVCDecoder.h
@@ -58,6 +58,9 @@
int64_t mAnchorTimeUs;
int64_t mNumSamplesOutput;
int64_t mPendingSeekTimeUs;
+ MediaSource::ReadOptions::SeekMode mPendingSeekMode;
+
+ int64_t mTargetTimeUs;
void addCodecSpecificData(const uint8_t *data, size_t size);
diff --git a/media/libstagefright/include/M4vH263Decoder.h b/media/libstagefright/include/M4vH263Decoder.h
index ec49e80..7d73e30 100644
--- a/media/libstagefright/include/M4vH263Decoder.h
+++ b/media/libstagefright/include/M4vH263Decoder.h
@@ -54,6 +54,7 @@
MediaBuffer *mInputBuffer;
int64_t mNumSamplesOutput;
+ int64_t mTargetTimeUs;
void allocateFrames(int32_t width, int32_t height);
void releaseFrames();
diff --git a/media/libstagefright/include/SampleTable.h b/media/libstagefright/include/SampleTable.h
index 533ce84..a2b2c99 100644
--- a/media/libstagefright/include/SampleTable.h
+++ b/media/libstagefright/include/SampleTable.h
@@ -63,11 +63,17 @@
uint32_t *decodingTime);
enum {
- kSyncSample_Flag = 1
+ kFlagBefore,
+ kFlagAfter,
+ kFlagClosest
};
- status_t findClosestSample(
+ status_t findSampleAtTime(
uint32_t req_time, uint32_t *sample_index, uint32_t flags);
+ status_t findSyncSampleNear(
+ uint32_t start_sample_index, uint32_t *sample_index,
+ uint32_t flags);
+
status_t findThumbnailSample(uint32_t *sample_index);
protected:
@@ -111,9 +117,6 @@
friend struct SampleIterator;
- status_t findClosestSyncSample_l(
- uint32_t start_sample_index, uint32_t *sample_index);
-
status_t getSampleSize_l(uint32_t sample_index, size_t *sample_size);
SampleTable(const SampleTable &);
diff --git a/media/libstagefright/include/VPXDecoder.h b/media/libstagefright/include/VPXDecoder.h
index 550c612..3b8362d 100644
--- a/media/libstagefright/include/VPXDecoder.h
+++ b/media/libstagefright/include/VPXDecoder.h
@@ -48,6 +48,8 @@
void *mCtx;
MediaBufferGroup *mBufferGroup;
+ int64_t mTargetTimeUs;
+
sp<MetaData> mFormat;
VPXDecoder(const VPXDecoder &);
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index 339a7b5..3739be1 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -281,7 +281,8 @@
*out = NULL;
int64_t seekTimeUs;
- if (options && options->getSeekTo(&seekTimeUs)) {
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
mBlockIter.seek(seekTimeUs);
}
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index b38a5c8..b88e69d 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -63,6 +63,8 @@
// ----------------------------------------------------------------------------
+extern const char * const gEffectLibPath;
+
namespace android {
static const char* kDeadlockedString = "AudioFlinger may be deadlocked\n";
@@ -127,8 +129,7 @@
AudioFlinger::AudioFlinger()
: BnAudioFlinger(),
- mAudioHardware(0), mMasterVolume(1.0f), mMasterMute(false), mNextUniqueId(1),
- mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0)
+ mAudioHardware(0), mMasterVolume(1.0f), mMasterMute(false), mNextUniqueId(1)
{
mHardwareStatus = AUDIO_HW_IDLE;
@@ -321,13 +322,19 @@
mClients.add(pid, client);
}
- // If no audio session id is provided, create one here
- // TODO: enforce same stream type for all tracks in same audio session?
- // TODO: prevent same audio session on different output threads
LOGV("createTrack() sessionId: %d", (sessionId == NULL) ? -2 : *sessionId);
- if (sessionId != NULL && *sessionId != 0) {
+ if (sessionId != NULL && *sessionId != AudioSystem::SESSION_OUTPUT_MIX) {
+ // prevent same audio session on different output threads
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ if (mPlaybackThreads.keyAt(i) != output &&
+ mPlaybackThreads.valueAt(i)->hasAudioSession(*sessionId)) {
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+ }
lSessionId = *sessionId;
} else {
+ // if no audio session id is provided, create one here
lSessionId = nextUniqueId();
if (sessionId != NULL) {
*sessionId = lSessionId;
@@ -1141,6 +1148,23 @@
{ // scope for mLock
Mutex::Autolock _l(mLock);
+
+ // all tracks in same audio session must share the same routing strategy otherwise
+ // conflicts will happen when tracks are moved from one output to another by audio policy
+ // manager
+ uint32_t strategy =
+ AudioSystem::getStrategyForStream((AudioSystem::stream_type)streamType);
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ sp<Track> t = mTracks[i];
+ if (t != 0) {
+ if (sessionId == t->sessionId() &&
+ strategy != AudioSystem::getStrategyForStream((AudioSystem::stream_type)t->type())) {
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+ }
+ }
+
track = new Track(this, client, streamType, sampleRate, format,
channelCount, frameCount, sharedBuffer, sessionId);
if (track->getCblk() == NULL || track->name() < 0) {
@@ -1153,6 +1177,7 @@
if (chain != 0) {
LOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
track->setMainBuffer(chain->inBuffer());
+ chain->setStrategy(AudioSystem::getStrategyForStream((AudioSystem::stream_type)track->type()));
}
}
lStatus = NO_ERROR;
@@ -1344,7 +1369,16 @@
mMixBuffer = new int16_t[mFrameCount * 2];
memset(mMixBuffer, 0, mFrameCount * 2 * sizeof(int16_t));
- //TODO handle effects reconfig
+ // force reconfiguration of effect chains and engines to take new buffer size and audio
+ // parameters into account
+ // Note that mLock is not held when readOutputParameters() is called from the constructor
+ // but in this case nothing is done below as no audio sessions have effect yet so it doesn't
+ // matter.
+ // create a copy of mEffectChains as calling moveEffectChain_l() can reorder some effect chains
+ Vector< sp<EffectChain> > effectChains = mEffectChains;
+ for (size_t i = 0; i < effectChains.size(); i ++) {
+ mAudioFlinger->moveEffectChain_l(effectChains[i]->sessionId(), this, this);
+ }
}
status_t AudioFlinger::PlaybackThread::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames)
@@ -1369,7 +1403,8 @@
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> track = mTracks[i];
- if (sessionId == track->sessionId()) {
+ if (sessionId == track->sessionId() &&
+ !(track->mCblk->flags & CBLK_INVALID_MSK)) {
return true;
}
}
@@ -1377,6 +1412,23 @@
return false;
}
+uint32_t AudioFlinger::PlaybackThread::getStrategyForSession_l(int sessionId)
+{
+ // session AudioSystem::SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that
+ // it is moved to correct output by audio policy manager when A2DP is connected or disconnected
+ if (sessionId == AudioSystem::SESSION_OUTPUT_MIX) {
+ return AudioSystem::getStrategyForStream(AudioSystem::MUSIC);
+ }
+ for (size_t i = 0; i < mTracks.size(); i++) {
+ sp<Track> track = mTracks[i];
+ if (sessionId == track->sessionId() &&
+ !(track->mCblk->flags & CBLK_INVALID_MSK)) {
+ return AudioSystem::getStrategyForStream((AudioSystem::stream_type) track->type());
+ }
+ }
+ return AudioSystem::getStrategyForStream(AudioSystem::MUSIC);
+}
+
sp<AudioFlinger::EffectChain> AudioFlinger::PlaybackThread::getEffectChain(int sessionId)
{
Mutex::Autolock _l(mLock);
@@ -1503,8 +1555,7 @@
// prevent any changes in effect chain list and in each effect chain
// during mixing and effect process as the audio buffers could be deleted
// or modified if an effect is created or deleted
- lockEffectChains_l();
- effectChains = mEffectChains;
+ lockEffectChains_l(effectChains);
}
if (LIKELY(mixerStatus == MIXER_TRACKS_READY)) {
@@ -1540,7 +1591,7 @@
effectChains[i]->process_l();
}
// enable changes in effect chain
- unlockEffectChains();
+ unlockEffectChains(effectChains);
#ifdef LVMX
int audioOutputType = LifeVibes::getMixerType(mId, mType);
if (LifeVibes::audioOutputTypeIsLifeVibes(audioOutputType)) {
@@ -1571,7 +1622,7 @@
mStandby = false;
} else {
// enable changes in effect chain
- unlockEffectChains();
+ unlockEffectChains(effectChains);
usleep(sleepTime);
}
@@ -1625,7 +1676,7 @@
}
#endif
// Delegate master volume control to effect in output mix effect chain if needed
- sp<EffectChain> chain = getEffectChain_l(0);
+ sp<EffectChain> chain = getEffectChain_l(AudioSystem::SESSION_OUTPUT_MIX);
if (chain != 0) {
uint32_t v = (uint32_t)(masterVolume * (1 << 24));
chain->setVolume_l(&v, &v);
@@ -1814,8 +1865,10 @@
void AudioFlinger::MixerThread::invalidateTracks(int streamType)
{
- LOGV ("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d", this, streamType, mTracks.size());
+ LOGV ("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d",
+ this, streamType, mTracks.size());
Mutex::Autolock _l(mLock);
+
size_t size = mTracks.size();
for (size_t i = 0; i < size; i++) {
sp<Track> t = mTracks[i];
@@ -2070,7 +2123,6 @@
// hardware resources as soon as possible
nsecs_t standbyDelay = microseconds(activeSleepTime*2);
-
while (!exitPending())
{
bool rampVolume;
@@ -2246,7 +2298,8 @@
if (UNLIKELY(trackToRemove != 0)) {
mActiveTracks.remove(trackToRemove);
if (!effectChains.isEmpty()) {
- LOGV("stopping track on chain %p for session Id: %d", effectChains[0].get(), trackToRemove->sessionId());
+ LOGV("stopping track on chain %p for session Id: %d", effectChains[0].get(),
+ trackToRemove->sessionId());
effectChains[0]->stopTrack();
}
if (trackToRemove->isTerminated()) {
@@ -2255,7 +2308,7 @@
}
}
- lockEffectChains_l();
+ lockEffectChains_l(effectChains);
}
if (LIKELY(mixerStatus == MIXER_TRACKS_READY)) {
@@ -2301,7 +2354,7 @@
for (size_t i = 0; i < effectChains.size(); i ++) {
effectChains[i]->process_l();
}
- unlockEffectChains();
+ unlockEffectChains(effectChains);
mLastWriteTime = systemTime();
mInWrite = true;
@@ -2312,7 +2365,7 @@
mInWrite = false;
mStandby = false;
} else {
- unlockEffectChains();
+ unlockEffectChains(effectChains);
usleep(sleepTime);
}
@@ -2505,8 +2558,7 @@
// prevent any changes in effect chain list and in each effect chain
// during mixing and effect process as the audio buffers could be deleted
// or modified if an effect is created or deleted
- lockEffectChains_l();
- effectChains = mEffectChains;
+ lockEffectChains_l(effectChains);
}
if (LIKELY(mixerStatus == MIXER_TRACKS_READY)) {
@@ -2547,7 +2599,7 @@
effectChains[i]->process_l();
}
// enable changes in effect chain
- unlockEffectChains();
+ unlockEffectChains(effectChains);
standbyTime = systemTime() + kStandbyTimeInNsecs;
for (size_t i = 0; i < outputTracks.size(); i++) {
@@ -2557,7 +2609,7 @@
mBytesWritten += mixBufferSize;
} else {
// enable changes in effect chain
- unlockEffectChains();
+ unlockEffectChains(effectChains);
usleep(sleepTime);
}
@@ -2859,7 +2911,9 @@
if (thread != 0) {
if (!isOutputTrack()) {
if (mState == ACTIVE || mState == RESUMING) {
- AudioSystem::stopOutput(thread->id(), (AudioSystem::stream_type)mStreamType);
+ AudioSystem::stopOutput(thread->id(),
+ (AudioSystem::stream_type)mStreamType,
+ mSessionId);
}
AudioSystem::releaseOutput(thread->id());
}
@@ -2966,7 +3020,9 @@
if (!isOutputTrack() && state != ACTIVE && state != RESUMING) {
thread->mLock.unlock();
- status = AudioSystem::startOutput(thread->id(), (AudioSystem::stream_type)mStreamType);
+ status = AudioSystem::startOutput(thread->id(),
+ (AudioSystem::stream_type)mStreamType,
+ mSessionId);
thread->mLock.lock();
}
if (status == NO_ERROR) {
@@ -2999,7 +3055,9 @@
}
if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) {
thread->mLock.unlock();
- AudioSystem::stopOutput(thread->id(), (AudioSystem::stream_type)mStreamType);
+ AudioSystem::stopOutput(thread->id(),
+ (AudioSystem::stream_type)mStreamType,
+ mSessionId);
thread->mLock.lock();
}
}
@@ -3016,7 +3074,9 @@
LOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
if (!isOutputTrack()) {
thread->mLock.unlock();
- AudioSystem::stopOutput(thread->id(), (AudioSystem::stream_type)mStreamType);
+ AudioSystem::stopOutput(thread->id(),
+ (AudioSystem::stream_type)mStreamType,
+ mSessionId);
thread->mLock.lock();
}
}
@@ -3585,7 +3645,7 @@
}
// If no audio session id is provided, create one here
- if (sessionId != NULL && *sessionId != 0) {
+ if (sessionId != NULL && *sessionId != AudioSystem::SESSION_OUTPUT_MIX) {
lSessionId = *sessionId;
} else {
lSessionId = nextUniqueId();
@@ -4416,8 +4476,8 @@
thread->type() != PlaybackThread::DIRECT) {
MixerThread *srcThread = (MixerThread *)thread;
srcThread->invalidateTracks(stream);
- }
}
+ }
return NO_ERROR;
}
@@ -4472,12 +4532,26 @@
status_t AudioFlinger::loadEffectLibrary(const char *libPath, int *handle)
{
+ // check calling permissions
+ if (!settingsAllowed()) {
+ return PERMISSION_DENIED;
+ }
+ // only allow libraries loaded from /system/lib/soundfx for now
+ if (strncmp(gEffectLibPath, libPath, strlen(gEffectLibPath)) != 0) {
+ return PERMISSION_DENIED;
+ }
+
Mutex::Autolock _l(mLock);
return EffectLoadLibrary(libPath, handle);
}
status_t AudioFlinger::unloadEffectLibrary(int handle)
{
+ // check calling permissions
+ if (!settingsAllowed()) {
+ return PERMISSION_DENIED;
+ }
+
Mutex::Autolock _l(mLock);
return EffectUnloadLibrary(handle);
}
@@ -4522,7 +4596,8 @@
sp<Client> client;
wp<Client> wclient;
- LOGV("createEffect pid %d, client %p, priority %d, sessionId %d, output %d", pid, effectClient.get(), priority, sessionId, output);
+ LOGV("createEffect pid %d, client %p, priority %d, sessionId %d, output %d",
+ pid, effectClient.get(), priority, sessionId, output);
if (pDesc == NULL) {
lStatus = BAD_VALUE;
@@ -4577,7 +4652,7 @@
// an auxiliary version of this effect type is available
found = true;
memcpy(&d, &desc, sizeof(effect_descriptor_t));
- if (sessionId != 0 ||
+ if (sessionId != AudioSystem::SESSION_OUTPUT_MIX ||
(desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
break;
}
@@ -4590,22 +4665,23 @@
}
// For same effect type, chose auxiliary version over insert version if
// connect to output mix (Compliance to OpenSL ES)
- if (sessionId == 0 &&
+ if (sessionId == AudioSystem::SESSION_OUTPUT_MIX &&
(d.flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_AUXILIARY) {
memcpy(&desc, &d, sizeof(effect_descriptor_t));
}
}
// Do not allow auxiliary effects on a session different from 0 (output mix)
- if (sessionId != 0 &&
+ if (sessionId != AudioSystem::SESSION_OUTPUT_MIX &&
(desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
lStatus = INVALID_OPERATION;
goto Exit;
}
- // Session -1 is reserved for output stage effects that can only be created
- // by audio policy manager (running in same process)
- if (sessionId == -1 && getpid() != IPCThreadState::self()->getCallingPid()) {
+ // Session AudioSystem::SESSION_OUTPUT_STAGE is reserved for output stage effects
+ // that can only be created by audio policy manager (running in same process)
+ if (sessionId == AudioSystem::SESSION_OUTPUT_STAGE &&
+ getpid() != IPCThreadState::self()->getCallingPid()) {
lStatus = INVALID_OPERATION;
goto Exit;
}
@@ -4617,13 +4693,14 @@
// output threads.
// TODO: allow attachment of effect to inputs
if (output == 0) {
- if (sessionId <= 0) {
- // default to first output
- // TODO: define criteria to choose output when not specified. Or
- // receive output from audio policy manager
- if (mPlaybackThreads.size() != 0) {
- output = mPlaybackThreads.keyAt(0);
- }
+ if (sessionId == AudioSystem::SESSION_OUTPUT_STAGE) {
+ // output must be specified by AudioPolicyManager when using session
+ // AudioSystem::SESSION_OUTPUT_STAGE
+ lStatus = BAD_VALUE;
+ goto Exit;
+ } else if (sessionId == AudioSystem::SESSION_OUTPUT_MIX) {
+ output = AudioSystem::getOutputForEffect(&desc);
+ LOGV("createEffect() got output %d for effect %s", output, desc.name);
} else {
// look for the thread where the specified audio session is present
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
@@ -4636,7 +4713,7 @@
}
PlaybackThread *thread = checkPlaybackThread_l(output);
if (thread == NULL) {
- LOGE("unknown output thread");
+ LOGE("createEffect() unknown output thread");
lStatus = BAD_VALUE;
goto Exit;
}
@@ -4651,7 +4728,8 @@
}
// create effect on selected output trhead
- handle = thread->createEffect_l(client, effectClient, priority, sessionId, &desc, enabled, &lStatus);
+ handle = thread->createEffect_l(client, effectClient, priority, sessionId,
+ &desc, enabled, &lStatus);
if (handle != 0 && id != NULL) {
*id = handle->id();
}
@@ -4664,31 +4742,64 @@
return handle;
}
-status_t AudioFlinger::registerEffectResource_l(effect_descriptor_t *desc) {
- if (mTotalEffectsCpuLoad + desc->cpuLoad > MAX_EFFECTS_CPU_LOAD) {
- LOGW("registerEffectResource() CPU Load limit exceeded for Fx %s, CPU %f MIPS",
- desc->name, (float)desc->cpuLoad/10);
- return INVALID_OPERATION;
+status_t AudioFlinger::moveEffects(int session, int srcOutput, int dstOutput)
+{
+ LOGV("moveEffects() session %d, srcOutput %d, dstOutput %d",
+ session, srcOutput, dstOutput);
+ Mutex::Autolock _l(mLock);
+ if (srcOutput == dstOutput) {
+ LOGW("moveEffects() same dst and src outputs %d", dstOutput);
+ return NO_ERROR;
}
- if (mTotalEffectsMemory + desc->memoryUsage > MAX_EFFECTS_MEMORY) {
- LOGW("registerEffectResource() memory limit exceeded for Fx %s, Memory %d KB",
- desc->name, desc->memoryUsage);
- return INVALID_OPERATION;
+ PlaybackThread *srcThread = checkPlaybackThread_l(srcOutput);
+ if (srcThread == NULL) {
+ LOGW("moveEffects() bad srcOutput %d", srcOutput);
+ return BAD_VALUE;
}
- mTotalEffectsCpuLoad += desc->cpuLoad;
- mTotalEffectsMemory += desc->memoryUsage;
- LOGV("registerEffectResource_l() effect %s, CPU %d, memory %d",
- desc->name, desc->cpuLoad, desc->memoryUsage);
- LOGV(" total CPU %d, total memory %d", mTotalEffectsCpuLoad, mTotalEffectsMemory);
+ PlaybackThread *dstThread = checkPlaybackThread_l(dstOutput);
+ if (dstThread == NULL) {
+ LOGW("moveEffects() bad dstOutput %d", dstOutput);
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock _dl(dstThread->mLock);
+ Mutex::Autolock _sl(srcThread->mLock);
+ moveEffectChain_l(session, srcThread, dstThread);
+
return NO_ERROR;
}
-void AudioFlinger::unregisterEffectResource_l(effect_descriptor_t *desc) {
- mTotalEffectsCpuLoad -= desc->cpuLoad;
- mTotalEffectsMemory -= desc->memoryUsage;
- LOGV("unregisterEffectResource_l() effect %s, CPU %d, memory %d",
- desc->name, desc->cpuLoad, desc->memoryUsage);
- LOGV(" total CPU %d, total memory %d", mTotalEffectsCpuLoad, mTotalEffectsMemory);
+// moveEffectChain_l mustbe called with both srcThread and dstThread mLocks held
+status_t AudioFlinger::moveEffectChain_l(int session,
+ AudioFlinger::PlaybackThread *srcThread,
+ AudioFlinger::PlaybackThread *dstThread)
+{
+ LOGV("moveEffectChain_l() session %d from thread %p to thread %p",
+ session, srcThread, dstThread);
+
+ sp<EffectChain> chain = srcThread->getEffectChain_l(session);
+ if (chain == 0) {
+ LOGW("moveEffectChain_l() effect chain for session %d not on source thread %p",
+ session, srcThread);
+ return INVALID_OPERATION;
+ }
+
+ // remove chain first. This is usefull only if reconfiguring effect chain on same output thread,
+ // so that a new chain is created with correct parameters when first effect is added. This is
+ // otherwise unecessary as removeEffect_l() will remove the chain when last effect is
+ // removed.
+ srcThread->removeEffectChain_l(chain);
+
+ // transfer all effects one by one so that new effect chain is created on new thread with
+ // correct buffer sizes and audio parameters and effect engines reconfigured accordingly
+ sp<EffectModule> effect = chain->getEffectFromId_l(0);
+ while (effect != 0) {
+ srcThread->removeEffect_l(effect);
+ dstThread->addEffect_l(effect);
+ effect = chain->getEffectFromId_l(0);
+ }
+
+ return NO_ERROR;
}
// PlaybackThread::createEffect_l() must be called with AudioFlinger::mLock held
@@ -4707,6 +4818,7 @@
status_t lStatus;
sp<Track> track;
sp<EffectChain> chain;
+ bool chainCreated = false;
bool effectCreated = false;
bool effectRegistered = false;
@@ -4718,16 +4830,18 @@
// Do not allow auxiliary effect on session other than 0
if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY &&
- sessionId != 0) {
- LOGW("createEffect_l() Cannot add auxiliary effect %s to session %d", desc->name, sessionId);
+ sessionId != AudioSystem::SESSION_OUTPUT_MIX) {
+ LOGW("createEffect_l() Cannot add auxiliary effect %s to session %d",
+ desc->name, sessionId);
lStatus = BAD_VALUE;
goto Exit;
}
// Do not allow effects with session ID 0 on direct output or duplicating threads
// TODO: add rule for hw accelerated effects on direct outputs with non PCM format
- if (sessionId == 0 && mType != MIXER) {
- LOGW("createEffect_l() Cannot add auxiliary effect %s to session %d", desc->name, sessionId);
+ if (sessionId == AudioSystem::SESSION_OUTPUT_MIX && mType != MIXER) {
+ LOGW("createEffect_l() Cannot add auxiliary effect %s to session %d",
+ desc->name, sessionId);
lStatus = BAD_VALUE;
goto Exit;
}
@@ -4744,6 +4858,8 @@
LOGV("createEffect_l() new effect chain for session %d", sessionId);
chain = new EffectChain(this, sessionId);
addEffectChain_l(chain);
+ chain->setStrategy(getStrategyForSession_l(sessionId));
+ chainCreated = true;
} else {
effect = chain->getEffectFromDesc_l(desc);
}
@@ -4751,14 +4867,15 @@
LOGV("createEffect_l() got effect %p on chain %p", effect == 0 ? 0 : effect.get(), chain.get());
if (effect == 0) {
+ int id = mAudioFlinger->nextUniqueId();
// Check CPU and memory usage
- lStatus = mAudioFlinger->registerEffectResource_l(desc);
+ lStatus = AudioSystem::registerEffect(desc, mId, chain->strategy(), sessionId, id);
if (lStatus != NO_ERROR) {
goto Exit;
}
effectRegistered = true;
// create a new effect module if none present in the chain
- effect = new EffectModule(this, chain, desc, mAudioFlinger->nextUniqueId(), sessionId);
+ effect = new EffectModule(this, chain, desc, id, sessionId);
lStatus = effect->status();
if (lStatus != NO_ERROR) {
goto Exit;
@@ -4782,14 +4899,15 @@
Exit:
if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
+ Mutex::Autolock _l(mLock);
if (effectCreated) {
- Mutex::Autolock _l(mLock);
- if (chain->removeEffect_l(effect) == 0) {
- removeEffectChain_l(chain);
- }
+ chain->removeEffect_l(effect);
}
if (effectRegistered) {
- mAudioFlinger->unregisterEffectResource_l(desc);
+ AudioSystem::unregisterEffect(effect->id());
+ }
+ if (chainCreated) {
+ removeEffectChain_l(chain);
}
handle.clear();
}
@@ -4800,26 +4918,71 @@
return handle;
}
-void AudioFlinger::PlaybackThread::disconnectEffect(const sp< EffectModule>& effect,
- const wp<EffectHandle>& handle) {
+// PlaybackThread::addEffect_l() must be called with AudioFlinger::mLock and
+// PlaybackThread::mLock held
+status_t AudioFlinger::PlaybackThread::addEffect_l(const sp<EffectModule>& effect)
+{
+ // check for existing effect chain with the requested audio session
+ int sessionId = effect->sessionId();
+ sp<EffectChain> chain = getEffectChain_l(sessionId);
+ bool chainCreated = false;
+
+ if (chain == 0) {
+ // create a new chain for this session
+ LOGV("addEffect_l() new effect chain for session %d", sessionId);
+ chain = new EffectChain(this, sessionId);
+ addEffectChain_l(chain);
+ chain->setStrategy(getStrategyForSession_l(sessionId));
+ chainCreated = true;
+ }
+ LOGV("addEffect_l() %p chain %p effect %p", this, chain.get(), effect.get());
+
+ if (chain->getEffectFromId_l(effect->id()) != 0) {
+ LOGW("addEffect_l() %p effect %s already present in chain %p",
+ this, effect->desc().name, chain.get());
+ return BAD_VALUE;
+ }
+
+ status_t status = chain->addEffect_l(effect);
+ if (status != NO_ERROR) {
+ if (chainCreated) {
+ removeEffectChain_l(chain);
+ }
+ return status;
+ }
+
+ effect->setDevice(mDevice);
+ effect->setMode(mAudioFlinger->getMode());
+ return NO_ERROR;
+}
+
+void AudioFlinger::PlaybackThread::removeEffect_l(const sp<EffectModule>& effect) {
+
+ LOGV("removeEffect_l() %p effect %p", this, effect.get());
effect_descriptor_t desc = effect->desc();
+ if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
+ detachAuxEffect_l(effect->id());
+ }
+
+ sp<EffectChain> chain = effect->chain().promote();
+ if (chain != 0) {
+ // remove effect chain if removing last effect
+ if (chain->removeEffect_l(effect) == 0) {
+ removeEffectChain_l(chain);
+ }
+ } else {
+ LOGW("removeEffect_l() %p cannot promote chain for effect %p", this, effect.get());
+ }
+}
+
+void AudioFlinger::PlaybackThread::disconnectEffect(const sp<EffectModule>& effect,
+ const wp<EffectHandle>& handle) {
Mutex::Autolock _l(mLock);
+ LOGV("disconnectEffect() %p effect %p", this, effect.get());
// delete the effect module if removing last handle on it
if (effect->removeHandle(handle) == 0) {
- if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
- detachAuxEffect_l(effect->id());
- }
- sp<EffectChain> chain = effect->chain().promote();
- if (chain != 0) {
- // remove effect chain if remove last effect
- if (chain->removeEffect_l(effect) == 0) {
- removeEffectChain_l(chain);
- }
- }
- mLock.unlock();
- mAudioFlinger->mLock.lock();
- mAudioFlinger->unregisterEffectResource_l(&desc);
- mAudioFlinger->mLock.unlock();
+ removeEffect_l(effect);
+ AudioSystem::unregisterEffect(effect->id());
}
}
@@ -4863,13 +5026,16 @@
chain->setInBuffer(buffer, ownsBuffer);
chain->setOutBuffer(mMixBuffer);
- // Effect chain for session -1 is inserted at end of effect chains list
- // in order to be processed last as it contains output stage effects
- // Effect chain for session 0 is inserted before session -1 to be processed
+ // Effect chain for session AudioSystem::SESSION_OUTPUT_STAGE is inserted at end of effect
+ // chains list in order to be processed last as it contains output stage effects
+ // Effect chain for session AudioSystem::SESSION_OUTPUT_MIX is inserted before
+ // session AudioSystem::SESSION_OUTPUT_STAGE to be processed
// after track specific effects and before output stage
- // Effect chain for session other than 0 is inserted at beginning of effect
- // chains list to be processed before output mix effects. Relative order between
- // sessions other than 0 is not important
+ // It is therefore mandatory that AudioSystem::SESSION_OUTPUT_MIX == 0 and
+ // that AudioSystem::SESSION_OUTPUT_STAGE < AudioSystem::SESSION_OUTPUT_MIX
+ // Effect chain for other sessions are inserted at beginning of effect
+ // chains list to be processed before output mix effects. Relative order between other
+ // sessions is not important
size_t size = mEffectChains.size();
size_t i = 0;
for (i = 0; i < size; i++) {
@@ -4896,26 +5062,30 @@
track->setMainBuffer(mMixBuffer);
}
}
+ break;
}
}
return mEffectChains.size();
}
-void AudioFlinger::PlaybackThread::lockEffectChains_l()
+void AudioFlinger::PlaybackThread::lockEffectChains_l(
+ Vector<sp <AudioFlinger::EffectChain> >& effectChains)
{
+ effectChains = mEffectChains;
for (size_t i = 0; i < mEffectChains.size(); i++) {
mEffectChains[i]->lock();
}
}
-void AudioFlinger::PlaybackThread::unlockEffectChains()
+void AudioFlinger::PlaybackThread::unlockEffectChains(
+ Vector<sp <AudioFlinger::EffectChain> >& effectChains)
{
- Mutex::Autolock _l(mLock);
- for (size_t i = 0; i < mEffectChains.size(); i++) {
- mEffectChains[i]->unlock();
+ for (size_t i = 0; i < effectChains.size(); i++) {
+ effectChains[i]->unlock();
}
}
+
sp<AudioFlinger::EffectModule> AudioFlinger::PlaybackThread::getEffect_l(int sessionId, int effectId)
{
sp<EffectModule> effect;
@@ -4927,21 +5097,23 @@
return effect;
}
-status_t AudioFlinger::PlaybackThread::attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId)
+status_t AudioFlinger::PlaybackThread::attachAuxEffect(
+ const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId)
{
Mutex::Autolock _l(mLock);
return attachAuxEffect_l(track, EffectId);
}
-status_t AudioFlinger::PlaybackThread::attachAuxEffect_l(const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId)
+status_t AudioFlinger::PlaybackThread::attachAuxEffect_l(
+ const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId)
{
status_t status = NO_ERROR;
if (EffectId == 0) {
track->setAuxBuffer(0, NULL);
} else {
- // Auxiliary effects are always in audio session 0
- sp<EffectModule> effect = getEffect_l(0, EffectId);
+ // Auxiliary effects are always in audio session AudioSystem::SESSION_OUTPUT_MIX
+ sp<EffectModule> effect = getEffect_l(AudioSystem::SESSION_OUTPUT_MIX, EffectId);
if (effect != 0) {
if ((effect->desc().flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
track->setAuxBuffer(EffectId, (int32_t *)effect->inBuffer());
@@ -5137,7 +5309,7 @@
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
AudioMixer::ditherAndClamp(mConfig.inputCfg.buffer.s32,
mConfig.inputCfg.buffer.s32,
- mConfig.inputCfg.buffer.frameCount);
+ mConfig.inputCfg.buffer.frameCount/2);
}
// do the actual processing in the effect engine
@@ -5214,7 +5386,8 @@
mConfig.outputCfg.bufferProvider.releaseBuffer = NULL;
mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
// Insert effect:
- // - in session 0 or -1, always overwrites output buffer: input buffer == output buffer
+ // - in session AudioSystem::SESSION_OUTPUT_MIX or AudioSystem::SESSION_OUTPUT_STAGE,
+ // always overwrites output buffer: input buffer == output buffer
// - in other sessions:
// last effect in the chain accumulates in output buffer: input buffer != output buffer
// other effect: overwrites output buffer: input buffer == output buffer
@@ -5231,6 +5404,9 @@
mConfig.inputCfg.buffer.frameCount = thread->frameCount();
mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
+ LOGV("configure() %p thread %p buffer %p framecount %d",
+ this, thread.get(), mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
+
status_t cmdStatus;
int size = sizeof(int);
status_t status = (*mEffectInterface)->command(mEffectInterface, EFFECT_CMD_CONFIGURE, sizeof(effect_config_t), &mConfig, &size, &cmdStatus);
@@ -5753,7 +5929,7 @@
mVolumeCtrlIdx(-1), mLeftVolume(0), mRightVolume(0),
mNewLeftVolume(0), mNewRightVolume(0)
{
-
+ mStrategy = AudioSystem::getStrategyForStream(AudioSystem::MUSIC);
}
AudioFlinger::EffectChain::~EffectChain()
@@ -5786,7 +5962,8 @@
size_t size = mEffects.size();
for (size_t i = 0; i < size; i++) {
- if (mEffects[i]->id() == id) {
+ // by convention, return first effect if id provided is 0 (0 is never a valid id)
+ if (id == 0 || mEffects[i]->id() == id) {
effect = mEffects[i];
break;
}
@@ -5816,21 +5993,24 @@
}
// addEffect_l() must be called with PlaybackThread::mLock held
-status_t AudioFlinger::EffectChain::addEffect_l(sp<EffectModule>& effect)
+status_t AudioFlinger::EffectChain::addEffect_l(const sp<EffectModule>& effect)
{
effect_descriptor_t desc = effect->desc();
uint32_t insertPref = desc.flags & EFFECT_FLAG_INSERT_MASK;
Mutex::Autolock _l(mLock);
+ effect->setChain(this);
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread == 0) {
+ return NO_INIT;
+ }
+ effect->setThread(thread);
if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
// Auxiliary effects are inserted at the beginning of mEffects vector as
// they are processed first and accumulated in chain input buffer
mEffects.insertAt(effect, 0);
- sp<ThreadBase> thread = mThread.promote();
- if (thread == 0) {
- return NO_INIT;
- }
+
// the input buffer for auxiliary effect contains mono samples in
// 32 bit format. This is to avoid saturation in AudoMixer
// accumulation stage. Saturation is done in EffectModule::process() before
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 99816f9..a8c9a92 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -168,8 +168,7 @@
int *id,
int *enabled);
- status_t registerEffectResource_l(effect_descriptor_t *desc);
- void unregisterEffectResource_l(effect_descriptor_t *desc);
+ virtual status_t moveEffects(int session, int srcOutput, int dstOutput);
enum hardware_call_state {
AUDIO_HW_IDLE = 0,
@@ -619,15 +618,22 @@
sp<EffectChain> getEffectChain_l(int sessionId);
status_t addEffectChain_l(const sp<EffectChain>& chain);
size_t removeEffectChain_l(const sp<EffectChain>& chain);
- void lockEffectChains_l();
- void unlockEffectChains();
+ void lockEffectChains_l(Vector<sp <EffectChain> >& effectChains);
+ void unlockEffectChains(Vector<sp <EffectChain> >& effectChains);
sp<AudioFlinger::EffectModule> getEffect_l(int sessionId, int effectId);
void detachAuxEffect_l(int effectId);
- status_t attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId);
- status_t attachAuxEffect_l(const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId);
+ status_t attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> track,
+ int EffectId);
+ status_t attachAuxEffect_l(const sp<AudioFlinger::PlaybackThread::Track> track,
+ int EffectId);
void setMode(uint32_t mode);
+ status_t addEffect_l(const sp< EffectModule>& effect);
+ void removeEffect_l(const sp< EffectModule>& effect);
+
+ uint32_t getStrategyForSession_l(int sessionId);
+
struct stream_type_t {
stream_type_t()
: volume(1.0f),
@@ -690,7 +696,10 @@
class MixerThread : public PlaybackThread {
public:
- MixerThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, int id, uint32_t device);
+ MixerThread (const sp<AudioFlinger>& audioFlinger,
+ AudioStreamOut* output,
+ int id,
+ uint32_t device);
virtual ~MixerThread();
// Thread virtuals
@@ -701,7 +710,8 @@
virtual status_t dumpInternals(int fd, const Vector<String16>& args);
protected:
- uint32_t prepareTracks_l(const SortedVector< wp<Track> >& activeTracks, Vector< sp<Track> > *tracksToRemove);
+ uint32_t prepareTracks_l(const SortedVector< wp<Track> >& activeTracks,
+ Vector< sp<Track> > *tracksToRemove);
virtual int getTrackName_l();
virtual void deleteTrackName_l(int name);
virtual uint32_t activeSleepTimeUs();
@@ -764,6 +774,9 @@
void audioConfigChanged_l(int event, int ioHandle, void *param2);
int nextUniqueId();
+ status_t moveEffectChain_l(int session,
+ AudioFlinger::PlaybackThread *srcThread,
+ AudioFlinger::PlaybackThread *dstThread);
friend class AudioBuffer;
@@ -931,6 +944,9 @@
uint32_t status() {
return mStatus;
}
+ int sessionId() {
+ return mSessionId;
+ }
status_t setEnabled(bool enabled);
bool isEnabled();
@@ -938,6 +954,8 @@
int16_t *inBuffer() { return mConfig.inputCfg.buffer.s16; }
void setOutBuffer(int16_t *buffer) { mConfig.outputCfg.buffer.s16 = buffer; }
int16_t *outBuffer() { return mConfig.outputCfg.buffer.s16; }
+ void setChain(const wp<EffectChain>& chain) { mChain = chain; }
+ void setThread(const wp<ThreadBase>& thread) { mThread = thread; }
status_t addHandle(sp<EffectHandle>& handle);
void disconnect(const wp<EffectHandle>& handle);
@@ -1061,19 +1079,19 @@
mLock.unlock();
}
- status_t addEffect_l(sp<EffectModule>& handle);
+ status_t addEffect_l(const sp<EffectModule>& handle);
size_t removeEffect_l(const sp<EffectModule>& handle);
int sessionId() {
return mSessionId;
}
+
sp<EffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor);
sp<EffectModule> getEffectFromId_l(int id);
bool setVolume_l(uint32_t *left, uint32_t *right);
void setDevice_l(uint32_t device);
void setMode_l(uint32_t mode);
-
void setInBuffer(int16_t *buffer, bool ownsBuffer = false) {
mInBuffer = buffer;
mOwnInBuffer = ownsBuffer;
@@ -1092,6 +1110,10 @@
void stopTrack() {mActiveTrackCnt--;}
int activeTracks() { return mActiveTrackCnt;}
+ uint32_t strategy() { return mStrategy; }
+ void setStrategy(uint32_t strategy)
+ { mStrategy = strategy; }
+
status_t dump(int fd, const Vector<String16>& args);
protected:
@@ -1112,7 +1134,7 @@
uint32_t mRightVolume; // previous volume on right channel
uint32_t mNewLeftVolume; // new volume on left channel
uint32_t mNewRightVolume; // new volume on right channel
-
+ uint32_t mStrategy; // strategy for this effect chain
};
friend class RecordThread;
@@ -1142,12 +1164,6 @@
#endif
uint32_t mMode;
- // Maximum CPU load allocated to audio effects in 0.1 MIPS (ARMv5TE, 0 WS memory) units
- static const uint32_t MAX_EFFECTS_CPU_LOAD = 1000;
- // Maximum memory allocated to audio effects in KB
- static const uint32_t MAX_EFFECTS_MEMORY = 512;
- uint32_t mTotalEffectsCpuLoad; // current CPU load used by effects
- uint32_t mTotalEffectsMemory; // current memory used by effects
};
// ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioPolicyManagerBase.cpp b/services/audioflinger/AudioPolicyManagerBase.cpp
index 381a958..1d87c0d 100644
--- a/services/audioflinger/AudioPolicyManagerBase.cpp
+++ b/services/audioflinger/AudioPolicyManagerBase.cpp
@@ -538,9 +538,11 @@
return output;
}
-status_t AudioPolicyManagerBase::startOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+status_t AudioPolicyManagerBase::startOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session)
{
- LOGV("startOutput() output %d, stream %d", output, stream);
+ LOGV("startOutput() output %d, stream %d, session %d", output, stream, session);
ssize_t index = mOutputs.indexOfKey(output);
if (index < 0) {
LOGW("startOutput() unknow output %d", output);
@@ -574,9 +576,11 @@
return NO_ERROR;
}
-status_t AudioPolicyManagerBase::stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+status_t AudioPolicyManagerBase::stopOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session)
{
- LOGV("stopOutput() output %d, stream %d", output, stream);
+ LOGV("stopOutput() output %d, stream %d, session %d", output, stream, session);
ssize_t index = mOutputs.indexOfKey(output);
if (index < 0) {
LOGW("stopOutput() unknow output %d", output);
@@ -602,8 +606,12 @@
setOutputDevice(output, getNewDevice(output));
#ifdef WITH_A2DP
- if (mA2dpOutput != 0 && !a2dpUsedForSonification() && strategy == STRATEGY_SONIFICATION) {
- setStrategyMute(STRATEGY_MEDIA, false, mA2dpOutput, mOutputs.valueFor(mHardwareOutput)->mLatency*2);
+ if (mA2dpOutput != 0 && !a2dpUsedForSonification() &&
+ strategy == STRATEGY_SONIFICATION) {
+ setStrategyMute(STRATEGY_MEDIA,
+ false,
+ mA2dpOutput,
+ mOutputs.valueFor(mHardwareOutput)->mLatency*2);
}
#endif
if (output != mHardwareOutput) {
@@ -826,6 +834,85 @@
return NO_ERROR;
}
+audio_io_handle_t AudioPolicyManagerBase::getOutputForEffect(effect_descriptor_t *desc)
+{
+ LOGV("getOutputForEffect()");
+ // apply simple rule where global effects are attached to the same output as MUSIC streams
+ return getOutput(AudioSystem::MUSIC);
+}
+
+status_t AudioPolicyManagerBase::registerEffect(effect_descriptor_t *desc,
+ audio_io_handle_t output,
+ uint32_t strategy,
+ int session,
+ int id)
+{
+ ssize_t index = mOutputs.indexOfKey(output);
+ if (index < 0) {
+ LOGW("registerEffect() unknown output %d", output);
+ return INVALID_OPERATION;
+ }
+
+ if (mTotalEffectsCpuLoad + desc->cpuLoad > getMaxEffectsCpuLoad()) {
+ LOGW("registerEffect() CPU Load limit exceeded for Fx %s, CPU %f MIPS",
+ desc->name, (float)desc->cpuLoad/10);
+ return INVALID_OPERATION;
+ }
+ if (mTotalEffectsMemory + desc->memoryUsage > getMaxEffectsMemory()) {
+ LOGW("registerEffect() memory limit exceeded for Fx %s, Memory %d KB",
+ desc->name, desc->memoryUsage);
+ return INVALID_OPERATION;
+ }
+ mTotalEffectsCpuLoad += desc->cpuLoad;
+ mTotalEffectsMemory += desc->memoryUsage;
+ LOGV("registerEffect() effect %s, output %d, strategy %d session %d id %d",
+ desc->name, output, strategy, session, id);
+
+ LOGV("registerEffect() CPU %d, memory %d", desc->cpuLoad, desc->memoryUsage);
+ LOGV(" total CPU %d, total memory %d", mTotalEffectsCpuLoad, mTotalEffectsMemory);
+
+ EffectDescriptor *pDesc = new EffectDescriptor();
+ memcpy (&pDesc->mDesc, desc, sizeof(effect_descriptor_t));
+ pDesc->mOutput = output;
+ pDesc->mStrategy = (routing_strategy)strategy;
+ pDesc->mSession = session;
+ mEffects.add(id, pDesc);
+
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManagerBase::unregisterEffect(int id)
+{
+ ssize_t index = mEffects.indexOfKey(id);
+ if (index < 0) {
+ LOGW("unregisterEffect() unknown effect ID %d", id);
+ return INVALID_OPERATION;
+ }
+
+ EffectDescriptor *pDesc = mEffects.valueAt(index);
+
+ if (mTotalEffectsCpuLoad < pDesc->mDesc.cpuLoad) {
+ LOGW("unregisterEffect() CPU load %d too high for total %d",
+ pDesc->mDesc.cpuLoad, mTotalEffectsCpuLoad);
+ pDesc->mDesc.cpuLoad = mTotalEffectsCpuLoad;
+ }
+ mTotalEffectsCpuLoad -= pDesc->mDesc.cpuLoad;
+ if (mTotalEffectsMemory < pDesc->mDesc.memoryUsage) {
+ LOGW("unregisterEffect() memory %d too big for total %d",
+ pDesc->mDesc.memoryUsage, mTotalEffectsMemory);
+ pDesc->mDesc.memoryUsage = mTotalEffectsMemory;
+ }
+ mTotalEffectsMemory -= pDesc->mDesc.memoryUsage;
+ LOGV("unregisterEffect() effect %s, ID %d, CPU %d, memory %d",
+ pDesc->mDesc.name, id, pDesc->mDesc.cpuLoad, pDesc->mDesc.memoryUsage);
+ LOGV(" total CPU %d, total memory %d", mTotalEffectsCpuLoad, mTotalEffectsMemory);
+
+ mEffects.removeItem(id);
+ delete pDesc;
+
+ return NO_ERROR;
+}
+
status_t AudioPolicyManagerBase::dump(int fd)
{
const size_t SIZE = 256;
@@ -890,6 +977,19 @@
write(fd, buffer, strlen(buffer));
}
+ snprintf(buffer, SIZE, "\nTotal Effects CPU: %f MIPS, Total Effects memory: %d KB\n",
+ (float)mTotalEffectsCpuLoad/10, mTotalEffectsMemory);
+ write(fd, buffer, strlen(buffer));
+
+ snprintf(buffer, SIZE, "Registered effects:\n");
+ write(fd, buffer, strlen(buffer));
+ for (size_t i = 0; i < mEffects.size(); i++) {
+ snprintf(buffer, SIZE, "- Effect %d dump:\n", mEffects.keyAt(i));
+ write(fd, buffer, strlen(buffer));
+ mEffects.valueAt(i)->dump(fd);
+ }
+
+
return NO_ERROR;
}
@@ -902,7 +1002,8 @@
#ifdef AUDIO_POLICY_TEST
Thread(false),
#endif //AUDIO_POLICY_TEST
- mPhoneState(AudioSystem::MODE_NORMAL), mRingerMode(0), mMusicStopTime(0), mLimitRingtoneVolume(false)
+ mPhoneState(AudioSystem::MODE_NORMAL), mRingerMode(0), mMusicStopTime(0),
+ mLimitRingtoneVolume(false), mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0)
{
mpClientInterface = clientInterface;
@@ -938,6 +1039,7 @@
} else {
addOutput(mHardwareOutput, outputDesc);
setOutputDevice(mHardwareOutput, (uint32_t)AudioSystem::DEVICE_OUT_SPEAKER, true);
+ //TODO: configure audio effect output stage here
}
updateDeviceForStrategy();
@@ -1152,6 +1254,9 @@
if (mA2dpOutput) {
// add A2DP output descriptor
addOutput(mA2dpOutput, outputDesc);
+
+ //TODO: configure audio effect output stage here
+
// set initial stream volume for A2DP device
applyStreamVolumes(mA2dpOutput, device);
if (a2dpUsedForSonification()) {
@@ -1269,6 +1374,7 @@
AudioParameter param;
param.add(String8("closing"), String8("true"));
mpClientInterface->setParameters(mA2dpOutput, param.toString());
+
mpClientInterface->closeOutput(mA2dpOutput);
delete mOutputs.valueFor(mA2dpOutput);
mOutputs.removeItem(mA2dpOutput);
@@ -1282,48 +1388,54 @@
uint32_t curDevice = getDeviceForStrategy(strategy, false);
bool a2dpWasUsed = AudioSystem::isA2dpDevice((AudioSystem::audio_devices)(prevDevice & ~AudioSystem::DEVICE_OUT_SPEAKER));
bool a2dpIsUsed = AudioSystem::isA2dpDevice((AudioSystem::audio_devices)(curDevice & ~AudioSystem::DEVICE_OUT_SPEAKER));
- AudioOutputDescriptor *hwOutputDesc = mOutputs.valueFor(mHardwareOutput);
- AudioOutputDescriptor *a2dpOutputDesc;
+ audio_io_handle_t srcOutput = 0;
+ audio_io_handle_t dstOutput = 0;
if (a2dpWasUsed && !a2dpIsUsed) {
bool dupUsed = a2dpUsedForSonification() && a2dpWasUsed && (AudioSystem::popCount(prevDevice) == 2);
-
+ dstOutput = mHardwareOutput;
if (dupUsed) {
- LOGV("checkOutputForStrategy() moving strategy %d to duplicated", strategy);
- a2dpOutputDesc = mOutputs.valueFor(mDuplicatedOutput);
+ LOGV("checkOutputForStrategy() moving strategy %d from duplicated", strategy);
+ srcOutput = mDuplicatedOutput;
} else {
- LOGV("checkOutputForStrategy() moving strategy %d to a2dp", strategy);
- a2dpOutputDesc = mOutputs.valueFor(mA2dpOutput);
+ LOGV("checkOutputForStrategy() moving strategy %d from a2dp", strategy);
+ srcOutput = mA2dpOutput;
}
- for (int i = 0; i < (int)AudioSystem::NUM_STREAM_TYPES; i++) {
- if (getStrategy((AudioSystem::stream_type)i) == strategy) {
- mpClientInterface->setStreamOutput((AudioSystem::stream_type)i, mHardwareOutput);
- }
- }
// do not change newDevice if it was already set before this call by a previous call to
// getNewDevice() or checkOutputForStrategy() for a strategy with higher priority
- if (newDevice == 0 && hwOutputDesc->isUsedByStrategy(strategy)) {
+ if (newDevice == 0 && mOutputs.valueFor(mHardwareOutput)->isUsedByStrategy(strategy)) {
newDevice = getDeviceForStrategy(strategy, false);
}
}
if (a2dpIsUsed && !a2dpWasUsed) {
bool dupUsed = a2dpUsedForSonification() && a2dpIsUsed && (AudioSystem::popCount(curDevice) == 2);
- audio_io_handle_t a2dpOutput;
-
+ srcOutput = mHardwareOutput;
if (dupUsed) {
- LOGV("checkOutputForStrategy() moving strategy %d from duplicated", strategy);
- a2dpOutputDesc = mOutputs.valueFor(mDuplicatedOutput);
- a2dpOutput = mDuplicatedOutput;
+ LOGV("checkOutputForStrategy() moving strategy %d to duplicated", strategy);
+ dstOutput = mDuplicatedOutput;
} else {
- LOGV("checkOutputForStrategy() moving strategy %d from a2dp", strategy);
- a2dpOutputDesc = mOutputs.valueFor(mA2dpOutput);
- a2dpOutput = mA2dpOutput;
+ LOGV("checkOutputForStrategy() moving strategy %d to a2dp", strategy);
+ dstOutput = mA2dpOutput;
}
+ }
+ if (srcOutput != 0 && dstOutput != 0) {
+ // Move effects associated to this strategy from previous output to new output
+ for (size_t i = 0; i < mEffects.size(); i++) {
+ EffectDescriptor *desc = mEffects.valueAt(i);
+ if (desc->mSession != AudioSystem::SESSION_OUTPUT_STAGE &&
+ desc->mStrategy == strategy &&
+ desc->mOutput == srcOutput) {
+ LOGV("checkOutputForStrategy() moving effect %d to output %d", mEffects.keyAt(i), dstOutput);
+ mpClientInterface->moveEffects(desc->mSession, srcOutput, dstOutput);
+ desc->mOutput = dstOutput;
+ }
+ }
+ // Move tracks associated to this strategy from previous output to new output
for (int i = 0; i < (int)AudioSystem::NUM_STREAM_TYPES; i++) {
if (getStrategy((AudioSystem::stream_type)i) == strategy) {
- mpClientInterface->setStreamOutput((AudioSystem::stream_type)i, a2dpOutput);
+ mpClientInterface->setStreamOutput((AudioSystem::stream_type)i, dstOutput);
}
}
}
@@ -1371,8 +1483,12 @@
return device;
}
-AudioPolicyManagerBase::routing_strategy AudioPolicyManagerBase::getStrategy(AudioSystem::stream_type stream)
-{
+uint32_t AudioPolicyManagerBase::getStrategyForStream(AudioSystem::stream_type stream) {
+ return (uint32_t)getStrategy(stream);
+}
+
+AudioPolicyManagerBase::routing_strategy AudioPolicyManagerBase::getStrategy(
+ AudioSystem::stream_type stream) {
// stream to strategy mapping
switch (stream) {
case AudioSystem::VOICE_CALL:
@@ -1836,6 +1952,16 @@
(format !=0 && !AudioSystem::isLinearPCM(format)));
}
+uint32_t AudioPolicyManagerBase::getMaxEffectsCpuLoad()
+{
+ return MAX_EFFECTS_CPU_LOAD;
+}
+
+uint32_t AudioPolicyManagerBase::getMaxEffectsMemory()
+{
+ return MAX_EFFECTS_MEMORY;
+}
+
// --- AudioOutputDescriptor class implementation
AudioPolicyManagerBase::AudioOutputDescriptor::AudioOutputDescriptor()
@@ -1969,5 +2095,27 @@
mCanBeMuted);
}
+// --- EffectDescriptor class implementation
+
+status_t AudioPolicyManagerBase::EffectDescriptor::dump(int fd)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ snprintf(buffer, SIZE, " Output: %d\n", mOutput);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Strategy: %d\n", mStrategy);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Session: %d\n", mSession);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Name: %s\n", mDesc.name);
+ result.append(buffer);
+ write(fd, result.string(), result.size());
+
+ return NO_ERROR;
+}
+
+
}; // namespace android
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index bb3905c..f24e08e 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -119,7 +119,8 @@
if (!AudioSystem::isOutputDevice(device) && !AudioSystem::isInputDevice(device)) {
return BAD_VALUE;
}
- if (state != AudioSystem::DEVICE_STATE_AVAILABLE && state != AudioSystem::DEVICE_STATE_UNAVAILABLE) {
+ if (state != AudioSystem::DEVICE_STATE_AVAILABLE &&
+ state != AudioSystem::DEVICE_STATE_UNAVAILABLE) {
return BAD_VALUE;
}
@@ -128,8 +129,9 @@
return mpPolicyManager->setDeviceConnectionState(device, state, device_address);
}
-AudioSystem::device_connection_state AudioPolicyService::getDeviceConnectionState(AudioSystem::audio_devices device,
- const char *device_address)
+AudioSystem::device_connection_state AudioPolicyService::getDeviceConnectionState(
+ AudioSystem::audio_devices device,
+ const char *device_address)
{
if (mpPolicyManager == NULL) {
return AudioSystem::DEVICE_STATE_UNAVAILABLE;
@@ -175,7 +177,8 @@
return NO_ERROR;
}
-status_t AudioPolicyService::setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config)
+status_t AudioPolicyService::setForceUse(AudioSystem::force_use usage,
+ AudioSystem::forced_config config)
{
if (mpPolicyManager == NULL) {
return NO_INIT;
@@ -223,24 +226,28 @@
return mpPolicyManager->getOutput(stream, samplingRate, format, channels, flags);
}
-status_t AudioPolicyService::startOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+status_t AudioPolicyService::startOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session)
{
if (mpPolicyManager == NULL) {
return NO_INIT;
}
LOGV("startOutput() tid %d", gettid());
Mutex::Autolock _l(mLock);
- return mpPolicyManager->startOutput(output, stream);
+ return mpPolicyManager->startOutput(output, stream, session);
}
-status_t AudioPolicyService::stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream)
+status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session)
{
if (mpPolicyManager == NULL) {
return NO_INIT;
}
LOGV("stopOutput() tid %d", gettid());
Mutex::Autolock _l(mLock);
- return mpPolicyManager->stopOutput(output, stream);
+ return mpPolicyManager->stopOutput(output, stream, session);
}
void AudioPolicyService::releaseOutput(audio_io_handle_t output)
@@ -339,8 +346,46 @@
return mpPolicyManager->getStreamVolumeIndex(stream, index);
}
+uint32_t AudioPolicyService::getStrategyForStream(AudioSystem::stream_type stream)
+{
+ if (mpPolicyManager == NULL) {
+ return 0;
+ }
+ return mpPolicyManager->getStrategyForStream(stream);
+}
+
+audio_io_handle_t AudioPolicyService::getOutputForEffect(effect_descriptor_t *desc)
+{
+ if (mpPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mpPolicyManager->getOutputForEffect(desc);
+}
+
+status_t AudioPolicyService::registerEffect(effect_descriptor_t *desc,
+ audio_io_handle_t output,
+ uint32_t strategy,
+ int session,
+ int id)
+{
+ if (mpPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ return mpPolicyManager->registerEffect(desc, output, strategy, session, id);
+}
+
+status_t AudioPolicyService::unregisterEffect(int id)
+{
+ if (mpPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ return mpPolicyManager->unregisterEffect(id);
+}
+
void AudioPolicyService::binderDied(const wp<IBinder>& who) {
- LOGW("binderDied() %p, tid %d, calling tid %d", who.unsafe_get(), gettid(), IPCThreadState::self()->getCallingPid());
+ LOGW("binderDied() %p, tid %d, calling tid %d", who.unsafe_get(), gettid(),
+ IPCThreadState::self()->getCallingPid());
}
static bool tryLock(Mutex& mutex)
@@ -447,10 +492,16 @@
return 0;
}
- return af->openOutput(pDevices, pSamplingRate, (uint32_t *)pFormat, pChannels, pLatencyMs, flags);
+ return af->openOutput(pDevices,
+ pSamplingRate,
+ (uint32_t *)pFormat,
+ pChannels,
+ pLatencyMs,
+ flags);
}
-audio_io_handle_t AudioPolicyService::openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2)
+audio_io_handle_t AudioPolicyService::openDuplicateOutput(audio_io_handle_t output1,
+ audio_io_handle_t output2)
{
sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
if (af == 0) {
@@ -514,12 +565,16 @@
return af->closeInput(input);
}
-status_t AudioPolicyService::setStreamVolume(AudioSystem::stream_type stream, float volume, audio_io_handle_t output, int delayMs)
+status_t AudioPolicyService::setStreamVolume(AudioSystem::stream_type stream,
+ float volume,
+ audio_io_handle_t output,
+ int delayMs)
{
return mAudioCommandThread->volumeCommand((int)stream, volume, (int)output, delayMs);
}
-status_t AudioPolicyService::setStreamOutput(AudioSystem::stream_type stream, audio_io_handle_t output)
+status_t AudioPolicyService::setStreamOutput(AudioSystem::stream_type stream,
+ audio_io_handle_t output)
{
sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
@@ -527,8 +582,18 @@
return af->setStreamOutput(stream, output);
}
+status_t AudioPolicyService::moveEffects(int session, audio_io_handle_t srcOutput,
+ audio_io_handle_t dstOutput)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
-void AudioPolicyService::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs, int delayMs)
+ return af->moveEffects(session, (int)srcOutput, (int)dstOutput);
+}
+
+void AudioPolicyService::setParameters(audio_io_handle_t ioHandle,
+ const String8& keyValuePairs,
+ int delayMs)
{
mAudioCommandThread->parametersCommand((int)ioHandle, keyValuePairs, delayMs);
}
@@ -539,7 +604,8 @@
return result;
}
-status_t AudioPolicyService::startTone(ToneGenerator::tone_type tone, AudioSystem::stream_type stream)
+status_t AudioPolicyService::startTone(ToneGenerator::tone_type tone,
+ AudioSystem::stream_type stream)
{
mTonePlaybackThread->startToneCommand(tone, stream);
return NO_ERROR;
@@ -623,8 +689,11 @@
}break;
case SET_VOLUME: {
VolumeData *data = (VolumeData *)command->mParam;
- LOGV("AudioCommandThread() processing set volume stream %d, volume %f, output %d", data->mStream, data->mVolume, data->mIO);
- command->mStatus = AudioSystem::setStreamVolume(data->mStream, data->mVolume, data->mIO);
+ LOGV("AudioCommandThread() processing set volume stream %d, \
+ volume %f, output %d", data->mStream, data->mVolume, data->mIO);
+ command->mStatus = AudioSystem::setStreamVolume(data->mStream,
+ data->mVolume,
+ data->mIO);
if (command->mWaitStatus) {
command->mCond.signal();
mWaitWorkCV.wait(mLock);
@@ -633,7 +702,8 @@
}break;
case SET_PARAMETERS: {
ParametersData *data = (ParametersData *)command->mParam;
- LOGV("AudioCommandThread() processing set parameters string %s, io %d", data->mKeyValuePairs.string(), data->mIO);
+ LOGV("AudioCommandThread() processing set parameters string %s, io %d",
+ data->mKeyValuePairs.string(), data->mIO);
command->mStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs);
if (command->mWaitStatus) {
command->mCond.signal();
@@ -643,7 +713,8 @@
}break;
case SET_VOICE_VOLUME: {
VoiceVolumeData *data = (VoiceVolumeData *)command->mParam;
- LOGV("AudioCommandThread() processing set voice volume volume %f", data->mVolume);
+ LOGV("AudioCommandThread() processing set voice volume volume %f",
+ data->mVolume);
command->mStatus = AudioSystem::setVoiceVolume(data->mVolume);
if (command->mWaitStatus) {
command->mCond.signal();
@@ -734,7 +805,10 @@
mWaitWorkCV.signal();
}
-status_t AudioPolicyService::AudioCommandThread::volumeCommand(int stream, float volume, int output, int delayMs)
+status_t AudioPolicyService::AudioCommandThread::volumeCommand(int stream,
+ float volume,
+ int output,
+ int delayMs)
{
status_t status = NO_ERROR;
@@ -752,7 +826,8 @@
}
Mutex::Autolock _l(mLock);
insertCommand_l(command, delayMs);
- LOGV("AudioCommandThread() adding set volume stream %d, volume %f, output %d", stream, volume, output);
+ LOGV("AudioCommandThread() adding set volume stream %d, volume %f, output %d",
+ stream, volume, output);
mWaitWorkCV.signal();
if (command->mWaitStatus) {
command->mCond.wait(mLock);
@@ -762,7 +837,9 @@
return status;
}
-status_t AudioPolicyService::AudioCommandThread::parametersCommand(int ioHandle, const String8& keyValuePairs, int delayMs)
+status_t AudioPolicyService::AudioCommandThread::parametersCommand(int ioHandle,
+ const String8& keyValuePairs,
+ int delayMs)
{
status_t status = NO_ERROR;
@@ -779,7 +856,8 @@
}
Mutex::Autolock _l(mLock);
insertCommand_l(command, delayMs);
- LOGV("AudioCommandThread() adding set parameter string %s, io %d ,delay %d", keyValuePairs.string(), ioHandle, delayMs);
+ LOGV("AudioCommandThread() adding set parameter string %s, io %d ,delay %d",
+ keyValuePairs.string(), ioHandle, delayMs);
mWaitWorkCV.signal();
if (command->mWaitStatus) {
command->mCond.wait(mLock);
@@ -840,7 +918,8 @@
ParametersData *data = (ParametersData *)command->mParam;
ParametersData *data2 = (ParametersData *)command2->mParam;
if (data->mIO != data2->mIO) break;
- LOGV("Comparing parameter command %s to new command %s", data2->mKeyValuePairs.string(), data->mKeyValuePairs.string());
+ LOGV("Comparing parameter command %s to new command %s",
+ data2->mKeyValuePairs.string(), data->mKeyValuePairs.string());
AudioParameter param = AudioParameter(data->mKeyValuePairs);
AudioParameter param2 = AudioParameter(data2->mKeyValuePairs);
for (size_t j = 0; j < param.size(); j++) {
@@ -872,7 +951,8 @@
VolumeData *data2 = (VolumeData *)command2->mParam;
if (data->mIO != data2->mIO) break;
if (data->mStream != data2->mStream) break;
- LOGV("Filtering out volume command on output %d for stream %d", data->mIO, data->mStream);
+ LOGV("Filtering out volume command on output %d for stream %d",
+ data->mIO, data->mStream);
removedCommands.add(command2);
} break;
case START_TONE:
@@ -896,7 +976,8 @@
removedCommands.clear();
// insert command at the right place according to its time stamp
- LOGV("inserting command: %d at index %d, num commands %d", command->mCommand, (int)i+1, mAudioCommands.size());
+ LOGV("inserting command: %d at index %d, num commands %d",
+ command->mCommand, (int)i+1, mAudioCommands.size());
mAudioCommands.insertAt(command, i + 1);
}
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
index a13d0bd..558f455 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audioflinger/AudioPolicyService.h
@@ -28,7 +28,8 @@
// ----------------------------------------------------------------------------
-class AudioPolicyService: public BnAudioPolicyService, public AudioPolicyClientInterface, public IBinder::DeathRecipient
+class AudioPolicyService: public BnAudioPolicyService, public AudioPolicyClientInterface,
+ public IBinder::DeathRecipient
{
public:
@@ -43,8 +44,9 @@
virtual status_t setDeviceConnectionState(AudioSystem::audio_devices device,
AudioSystem::device_connection_state state,
const char *device_address);
- virtual AudioSystem::device_connection_state getDeviceConnectionState(AudioSystem::audio_devices device,
- const char *device_address);
+ virtual AudioSystem::device_connection_state getDeviceConnectionState(
+ AudioSystem::audio_devices device,
+ const char *device_address);
virtual status_t setPhoneState(int state);
virtual status_t setRingerMode(uint32_t mode, uint32_t mask);
virtual status_t setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config);
@@ -53,15 +55,21 @@
uint32_t samplingRate = 0,
uint32_t format = AudioSystem::FORMAT_DEFAULT,
uint32_t channels = 0,
- AudioSystem::output_flags flags = AudioSystem::OUTPUT_FLAG_INDIRECT);
- virtual status_t startOutput(audio_io_handle_t output, AudioSystem::stream_type stream);
- virtual status_t stopOutput(audio_io_handle_t output, AudioSystem::stream_type stream);
+ AudioSystem::output_flags flags =
+ AudioSystem::OUTPUT_FLAG_INDIRECT);
+ virtual status_t startOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session = 0);
+ virtual status_t stopOutput(audio_io_handle_t output,
+ AudioSystem::stream_type stream,
+ int session = 0);
virtual void releaseOutput(audio_io_handle_t output);
virtual audio_io_handle_t getInput(int inputSource,
uint32_t samplingRate = 0,
uint32_t format = AudioSystem::FORMAT_DEFAULT,
uint32_t channels = 0,
- AudioSystem::audio_in_acoustics acoustics = (AudioSystem::audio_in_acoustics)0);
+ AudioSystem::audio_in_acoustics acoustics =
+ (AudioSystem::audio_in_acoustics)0);
virtual status_t startInput(audio_io_handle_t input);
virtual status_t stopInput(audio_io_handle_t input);
virtual void releaseInput(audio_io_handle_t input);
@@ -71,6 +79,16 @@
virtual status_t setStreamVolumeIndex(AudioSystem::stream_type stream, int index);
virtual status_t getStreamVolumeIndex(AudioSystem::stream_type stream, int *index);
+ virtual uint32_t getStrategyForStream(AudioSystem::stream_type stream);
+
+ virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc);
+ virtual status_t registerEffect(effect_descriptor_t *desc,
+ audio_io_handle_t output,
+ uint32_t strategy,
+ int session,
+ int id);
+ virtual status_t unregisterEffect(int id);
+
virtual status_t onTransact(
uint32_t code,
const Parcel& data,
@@ -89,7 +107,8 @@
uint32_t *pChannels,
uint32_t *pLatencyMs,
AudioSystem::output_flags flags);
- virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2);
+ virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
+ audio_io_handle_t output2);
virtual status_t closeOutput(audio_io_handle_t output);
virtual status_t suspendOutput(audio_io_handle_t output);
virtual status_t restoreOutput(audio_io_handle_t output);
@@ -99,13 +118,21 @@
uint32_t *pChannels,
uint32_t acoustics);
virtual status_t closeInput(audio_io_handle_t input);
- virtual status_t setStreamVolume(AudioSystem::stream_type stream, float volume, audio_io_handle_t output, int delayMs = 0);
+ virtual status_t setStreamVolume(AudioSystem::stream_type stream,
+ float volume,
+ audio_io_handle_t output,
+ int delayMs = 0);
virtual status_t setStreamOutput(AudioSystem::stream_type stream, audio_io_handle_t output);
- virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs, int delayMs = 0);
+ virtual void setParameters(audio_io_handle_t ioHandle,
+ const String8& keyValuePairs,
+ int delayMs = 0);
virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
virtual status_t startTone(ToneGenerator::tone_type tone, AudioSystem::stream_type stream);
virtual status_t stopTone();
virtual status_t setVoiceVolume(float volume, int delayMs = 0);
+ virtual status_t moveEffects(int session,
+ audio_io_handle_t srcOutput,
+ audio_io_handle_t dstOutput);
private:
AudioPolicyService();
diff --git a/services/tests/servicestests/src/com/android/server/DropBoxTest.java b/services/tests/servicestests/src/com/android/server/DropBoxTest.java
index 78a90fb..f3baff4 100644
--- a/services/tests/servicestests/src/com/android/server/DropBoxTest.java
+++ b/services/tests/servicestests/src/com/android/server/DropBoxTest.java
@@ -20,6 +20,10 @@
import android.content.Context;
import android.content.Intent;
import android.os.DropBoxManager;
+import android.os.Parcel;
+import android.os.Parcelable;
+import android.os.ParcelFileDescriptor;
+import android.os.Process;
import android.os.ServiceManager;
import android.os.StatFs;
import android.provider.Settings;
@@ -27,10 +31,13 @@
import com.android.server.DropBoxManagerService;
+import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
+import java.io.IOException;
import java.io.InputStream;
+import java.io.InputStreamReader;
import java.util.Random;
import java.util.zip.GZIPOutputStream;
@@ -531,6 +538,203 @@
service.stop();
}
+ public void testDropBoxEntrySerialization() throws Exception {
+ // Make sure DropBoxManager.Entry can be serialized to a Parcel and back
+ // under a variety of conditions.
+
+ Parcel parcel = Parcel.obtain();
+ File dir = getEmptyDir("testDropBoxEntrySerialization");
+
+ new DropBoxManager.Entry("empty", 1000000).writeToParcel(parcel, 0);
+ new DropBoxManager.Entry("string", 2000000, "String Value").writeToParcel(parcel, 0);
+ new DropBoxManager.Entry("bytes", 3000000, "Bytes Value".getBytes(),
+ DropBoxManager.IS_TEXT).writeToParcel(parcel, 0);
+ new DropBoxManager.Entry("zerobytes", 4000000, new byte[0], 0).writeToParcel(parcel, 0);
+ new DropBoxManager.Entry("emptybytes", 5000000, (byte[]) null,
+ DropBoxManager.IS_EMPTY).writeToParcel(parcel, 0);
+
+ try {
+ new DropBoxManager.Entry("badbytes", 99999,
+ "Bad Bytes Value".getBytes(),
+ DropBoxManager.IS_EMPTY).writeToParcel(parcel, 0);
+ fail("IllegalArgumentException expected for non-null byte[] and IS_EMPTY flags");
+ } catch (IllegalArgumentException e) {
+ // expected
+ }
+
+ try {
+ new DropBoxManager.Entry("badbytes", 99999, (byte[]) null, 0).writeToParcel(parcel, 0);
+ fail("IllegalArgumentException expected for null byte[] and non-IS_EMPTY flags");
+ } catch (IllegalArgumentException e) {
+ // expected
+ }
+
+ File f = new File(dir, "file.dat");
+ FileOutputStream os = new FileOutputStream(f);
+ os.write("File Value".getBytes());
+ os.close();
+
+ new DropBoxManager.Entry("file", 6000000, f, DropBoxManager.IS_TEXT).writeToParcel(
+ parcel, Parcelable.PARCELABLE_WRITE_RETURN_VALUE);
+ new DropBoxManager.Entry("binfile", 7000000, f, 0).writeToParcel(
+ parcel, Parcelable.PARCELABLE_WRITE_RETURN_VALUE);
+ new DropBoxManager.Entry("emptyfile", 8000000, (ParcelFileDescriptor) null,
+ DropBoxManager.IS_EMPTY).writeToParcel(parcel, 0);
+
+ try {
+ new DropBoxManager.Entry("badfile", 99999, new File(dir, "nonexist.dat"), 0);
+ fail("IOException expected for nonexistent file");
+ } catch (IOException e) {
+ // expected
+ }
+
+ try {
+ new DropBoxManager.Entry("badfile", 99999, f, DropBoxManager.IS_EMPTY).writeToParcel(
+ parcel, 0);
+ fail("IllegalArgumentException expected for non-null file and IS_EMPTY flags");
+ } catch (IllegalArgumentException e) {
+ // expected
+ }
+
+ try {
+ new DropBoxManager.Entry("badfile", 99999, (ParcelFileDescriptor) null, 0);
+ fail("IllegalArgumentException expected for null PFD and non-IS_EMPTY flags");
+ } catch (IllegalArgumentException e) {
+ // expected
+ }
+
+ File gz = new File(dir, "file.gz");
+ GZIPOutputStream gzout = new GZIPOutputStream(new FileOutputStream(gz));
+ gzout.write("Gzip File Value".getBytes());
+ gzout.close();
+
+ new DropBoxManager.Entry("gzipfile", 9000000, gz,
+ DropBoxManager.IS_TEXT | DropBoxManager.IS_GZIPPED).writeToParcel(
+ parcel, Parcelable.PARCELABLE_WRITE_RETURN_VALUE);
+ new DropBoxManager.Entry("gzipbinfile", 10000000, gz,
+ DropBoxManager.IS_GZIPPED).writeToParcel(
+ parcel, Parcelable.PARCELABLE_WRITE_RETURN_VALUE);
+
+ //
+ // Switch from writing to reading
+ //
+
+ parcel.setDataPosition(0);
+ DropBoxManager.Entry e;
+
+ e = DropBoxManager.Entry.CREATOR.createFromParcel(parcel);
+ assertEquals("empty", e.getTag());
+ assertEquals(1000000, e.getTimeMillis());
+ assertEquals(DropBoxManager.IS_EMPTY, e.getFlags());
+ assertEquals(null, e.getText(100));
+ assertEquals(null, e.getInputStream());
+ e.close();
+
+ e = DropBoxManager.Entry.CREATOR.createFromParcel(parcel);
+ assertEquals("string", e.getTag());
+ assertEquals(2000000, e.getTimeMillis());
+ assertEquals(DropBoxManager.IS_TEXT, e.getFlags());
+ assertEquals("String Value", e.getText(100));
+ assertEquals("String Value",
+ new BufferedReader(new InputStreamReader(e.getInputStream())).readLine());
+ e.close();
+
+ e = DropBoxManager.Entry.CREATOR.createFromParcel(parcel);
+ assertEquals("bytes", e.getTag());
+ assertEquals(3000000, e.getTimeMillis());
+ assertEquals(DropBoxManager.IS_TEXT, e.getFlags());
+ assertEquals("Bytes Value", e.getText(100));
+ e.close();
+
+ e = DropBoxManager.Entry.CREATOR.createFromParcel(parcel);
+ assertEquals("zerobytes", e.getTag());
+ assertEquals(4000000, e.getTimeMillis());
+ assertEquals(0, e.getFlags());
+ assertEquals(null, e.getText(100));
+ assertEquals(null,
+ new BufferedReader(new InputStreamReader(e.getInputStream())).readLine());
+ e.close();
+
+ e = DropBoxManager.Entry.CREATOR.createFromParcel(parcel);
+ assertEquals("emptybytes", e.getTag());
+ assertEquals(5000000, e.getTimeMillis());
+ assertEquals(DropBoxManager.IS_EMPTY, e.getFlags());
+ assertEquals(null, e.getText(100));
+ assertEquals(null, e.getInputStream());
+ e.close();
+
+ e = DropBoxManager.Entry.CREATOR.createFromParcel(parcel);
+ assertEquals("file", e.getTag());
+ assertEquals(6000000, e.getTimeMillis());
+ assertEquals(DropBoxManager.IS_TEXT, e.getFlags());
+ assertEquals("File Value", e.getText(100));
+ e.close();
+
+ e = DropBoxManager.Entry.CREATOR.createFromParcel(parcel);
+ assertEquals("binfile", e.getTag());
+ assertEquals(7000000, e.getTimeMillis());
+ assertEquals(0, e.getFlags());
+ assertEquals(null, e.getText(100));
+ assertEquals("File Value",
+ new BufferedReader(new InputStreamReader(e.getInputStream())).readLine());
+ e.close();
+
+ e = DropBoxManager.Entry.CREATOR.createFromParcel(parcel);
+ assertEquals("emptyfile", e.getTag());
+ assertEquals(8000000, e.getTimeMillis());
+ assertEquals(DropBoxManager.IS_EMPTY, e.getFlags());
+ assertEquals(null, e.getText(100));
+ assertEquals(null, e.getInputStream());
+ e.close();
+
+ e = DropBoxManager.Entry.CREATOR.createFromParcel(parcel);
+ assertEquals("gzipfile", e.getTag());
+ assertEquals(9000000, e.getTimeMillis());
+ assertEquals(DropBoxManager.IS_TEXT, e.getFlags());
+ assertEquals("Gzip File Value", e.getText(100));
+ e.close();
+
+ e = DropBoxManager.Entry.CREATOR.createFromParcel(parcel);
+ assertEquals("gzipbinfile", e.getTag());
+ assertEquals(10000000, e.getTimeMillis());
+ assertEquals(0, e.getFlags());
+ assertEquals(null, e.getText(100));
+ assertEquals("Gzip File Value",
+ new BufferedReader(new InputStreamReader(e.getInputStream())).readLine());
+ e.close();
+
+ assertEquals(0, parcel.dataAvail());
+ parcel.recycle();
+ }
+
+ public void testDropBoxEntrySerializationDoesntLeakFileDescriptors() throws Exception {
+ File dir = getEmptyDir("testDropBoxEntrySerialization");
+ File f = new File(dir, "file.dat");
+ FileOutputStream os = new FileOutputStream(f);
+ os.write("File Value".getBytes());
+ os.close();
+
+ int before = countOpenFiles();
+ assertTrue(before > 0);
+
+ for (int i = 0; i < 1000; i++) {
+ Parcel parcel = Parcel.obtain();
+ new DropBoxManager.Entry("file", 1000000, f, 0).writeToParcel(
+ parcel, Parcelable.PARCELABLE_WRITE_RETURN_VALUE);
+
+ parcel.setDataPosition(0);
+ DropBoxManager.Entry e = DropBoxManager.Entry.CREATOR.createFromParcel(parcel);
+ assertEquals("file", e.getTag());
+ e.close();
+
+ parcel.recycle();
+ }
+
+ int after = countOpenFiles();
+ assertTrue(after > 0);
+ assertTrue(after < before + 20);
+ }
+
private void addRandomEntry(DropBoxManager dropbox, String tag, int size) throws Exception {
byte[] bytes = new byte[size];
new Random(System.currentTimeMillis()).nextBytes(bytes);
@@ -564,4 +768,8 @@
assertTrue(dir.listFiles().length == 0);
return dir;
}
+
+ private int countOpenFiles() {
+ return new File("/proc/" + Process.myPid() + "/fd").listFiles().length;
+ }
}