Upintegrate Audio Flinger changes from ICS_AAH

Bring in changes to audio flinger made to support timed audio tracks
and HW master volume control.

Change-Id: Ide52d48809bdbed13acf35fd59b24637e35064ae
Signed-off-by: John Grossman <johngro@google.com>
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index ac7f6cf..9f2bd3a 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -446,7 +446,7 @@
      */
             status_t dump(int fd, const Vector<String16>& args) const;
 
-private:
+protected:
     /* copying audio tracks is not allowed */
                         AudioTrack(const AudioTrack& other);
             AudioTrack& operator = (const AudioTrack& other);
@@ -518,10 +518,33 @@
     int                     mAuxEffectId;
     mutable Mutex           mLock;
     status_t                mRestoreStatus;
+    bool                    mIsTimed;
     int                     mPreviousPriority;          // before start()
     int                     mPreviousSchedulingGroup;
 };
 
+class TimedAudioTrack : public AudioTrack
+{
+public:
+    TimedAudioTrack();
+
+    /* allocate a shared memory buffer that can be passed to queueTimedBuffer */
+    status_t allocateTimedBuffer(size_t size, sp<IMemory>* buffer);
+
+    /* queue a buffer obtained via allocateTimedBuffer for playback at the
+       given timestamp.  PTS units a microseconds on the media time timeline.
+       The media time transform (set with setMediaTimeTransform) set by the
+       audio producer will handle converting from media time to local time
+       (perhaps going through the common time timeline in the case of
+       synchronized multiroom audio case) */
+    status_t queueTimedBuffer(const sp<IMemory>& buffer, int64_t pts);
+
+    /* define a transform between media time and either common time or
+       local time */
+    enum TargetTimeline {LOCAL_TIME, COMMON_TIME};
+    status_t setMediaTimeTransform(const LinearTransform& xform,
+                                   TargetTimeline target);
+};
 
 }; // namespace android
 
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 433ce7c..7a2ada0 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -55,6 +55,7 @@
                                 uint32_t flags,
                                 const sp<IMemory>& sharedBuffer,
                                 audio_io_handle_t output,
+                                bool isTimed,
                                 int *sessionId,
                                 status_t *status) = 0;
 
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
index e4772a1..77f3e21 100644
--- a/include/media/IAudioTrack.h
+++ b/include/media/IAudioTrack.h
@@ -24,7 +24,7 @@
 #include <utils/Errors.h>
 #include <binder/IInterface.h>
 #include <binder/IMemory.h>
-
+#include <utils/LinearTransform.h>
 
 namespace android {
 
@@ -71,6 +71,23 @@
      */
     virtual status_t    attachAuxEffect(int effectId) = 0;
 
+
+    /* Allocate a shared memory buffer suitable for holding timed audio
+       samples */
+    virtual status_t    allocateTimedBuffer(size_t size,
+                                            sp<IMemory>* buffer) = 0;
+
+    /* Queue a buffer obtained via allocateTimedBuffer for playback at the given
+       timestamp */
+    virtual status_t    queueTimedBuffer(const sp<IMemory>& buffer,
+                                         int64_t pts) = 0;
+
+    /* Define the linear transform that will be applied to the timestamps
+       given to queueTimedBuffer (which are expressed in media time).
+       Target specifies whether this transform converts media time to local time
+       or Tungsten time. The values for target are defined in AudioTrack.h */
+    virtual status_t    setMediaTimeTransform(const LinearTransform& xform,
+                                              int target) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index aead9a1..74c97ed 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -80,7 +80,9 @@
 
 AudioTrack::AudioTrack()
     : mStatus(NO_INIT),
-      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
+      mIsTimed(false),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
 {
 }
 
@@ -96,7 +98,9 @@
         int notificationFrames,
         int sessionId)
     : mStatus(NO_INIT),
-      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
+      mIsTimed(false),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             frameCount, flags, cbf, user, notificationFrames,
@@ -134,7 +138,9 @@
         int notificationFrames,
         int sessionId)
     : mStatus(NO_INIT),
-      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
+      mIsTimed(false),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             0, flags, cbf, user, notificationFrames,
@@ -540,6 +546,10 @@
 {
     int afSamplingRate;
 
+    if (mIsTimed) {
+        return INVALID_OPERATION;
+    }
+
     if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
         return NO_INIT;
     }
@@ -553,6 +563,10 @@
 
 uint32_t AudioTrack::getSampleRate() const
 {
+    if (mIsTimed) {
+        return INVALID_OPERATION;
+    }
+
     AutoMutex lock(mLock);
     return mCblk->sampleRate;
 }
@@ -578,6 +592,10 @@
         return NO_ERROR;
     }
 
+    if (mIsTimed) {
+        return INVALID_OPERATION;
+    }
+
     if (loopStart >= loopEnd ||
         loopEnd - loopStart > cblk->frameCount ||
         cblk->server > loopStart) {
@@ -641,6 +659,8 @@
 
 status_t AudioTrack::setPosition(uint32_t position)
 {
+    if (mIsTimed) return INVALID_OPERATION;
+
     AutoMutex lock(mLock);
 
     if (!stopped_l()) return INVALID_OPERATION;
@@ -791,6 +811,7 @@
                                                       ((uint16_t)flags) << 16,
                                                       sharedBuffer,
                                                       output,
+                                                      mIsTimed,
                                                       &mSessionId,
                                                       &status);
 
@@ -957,6 +978,7 @@
 {
 
     if (mSharedBuffer != 0) return INVALID_OPERATION;
+    if (mIsTimed) return INVALID_OPERATION;
 
     if (ssize_t(userSize) < 0) {
         // Sanity-check: user is most-likely passing an error code, and it would
@@ -1013,6 +1035,59 @@
 
 // -------------------------------------------------------------------------
 
+TimedAudioTrack::TimedAudioTrack() {
+    mIsTimed = true;
+}
+
+status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
+{
+    status_t result = UNKNOWN_ERROR;
+
+    // If the track is not invalid already, try to allocate a buffer.  alloc
+    // fails indicating that the server is dead, flag the track as invalid so
+    // we can attempt to restore in in just a bit.
+    if (!(mCblk->flags & CBLK_INVALID_MSK)) {
+        result = mAudioTrack->allocateTimedBuffer(size, buffer);
+        if (result == DEAD_OBJECT) {
+            android_atomic_or(CBLK_INVALID_ON, &mCblk->flags);
+        }
+    }
+
+    // If the track is invalid at this point, attempt to restore it. and try the
+    // allocation one more time.
+    if (mCblk->flags & CBLK_INVALID_MSK) {
+        mCblk->lock.lock();
+        result = restoreTrack_l(mCblk, false);
+        mCblk->lock.unlock();
+
+        if (result == OK)
+            result = mAudioTrack->allocateTimedBuffer(size, buffer);
+    }
+
+    return result;
+}
+
+status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
+                                           int64_t pts)
+{
+    // restart track if it was disabled by audioflinger due to previous underrun
+    if (mActive && (mCblk->flags & CBLK_DISABLED_MSK)) {
+        android_atomic_and(~CBLK_DISABLED_ON, &mCblk->flags);
+        ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
+        mAudioTrack->start(0);
+    }
+
+    return mAudioTrack->queueTimedBuffer(buffer, pts);
+}
+
+status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
+                                                TargetTimeline target)
+{
+    return mAudioTrack->setMediaTimeTransform(xform, target);
+}
+
+// -------------------------------------------------------------------------
+
 bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
 {
     Buffer audioBuffer;
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 4507e5d..ebadbfa 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -90,6 +90,7 @@
                                 uint32_t flags,
                                 const sp<IMemory>& sharedBuffer,
                                 audio_io_handle_t output,
+                                bool isTimed,
                                 int *sessionId,
                                 status_t *status)
     {
@@ -105,6 +106,7 @@
         data.writeInt32(flags);
         data.writeStrongBinder(sharedBuffer->asBinder());
         data.writeInt32((int32_t) output);
+        data.writeInt32(isTimed);
         int lSessionId = 0;
         if (sessionId != NULL) {
             lSessionId = *sessionId;
@@ -689,11 +691,12 @@
             uint32_t flags = data.readInt32();
             sp<IMemory> buffer = interface_cast<IMemory>(data.readStrongBinder());
             audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
+            bool isTimed = data.readInt32();
             int sessionId = data.readInt32();
             status_t status;
             sp<IAudioTrack> track = createTrack(pid,
                     (audio_stream_type_t) streamType, sampleRate, format,
-                    channelCount, bufferCount, flags, buffer, output, &sessionId, &status);
+                    channelCount, bufferCount, flags, buffer, output, isTimed, &sessionId, &status);
             reply->writeInt32(sessionId);
             reply->writeInt32(status);
             reply->writeStrongBinder(track->asBinder());
diff --git a/media/libmedia/IAudioTrack.cpp b/media/libmedia/IAudioTrack.cpp
index a7958de..28ebbbfc 100644
--- a/media/libmedia/IAudioTrack.cpp
+++ b/media/libmedia/IAudioTrack.cpp
@@ -35,7 +35,10 @@
     FLUSH,
     MUTE,
     PAUSE,
-    ATTACH_AUX_EFFECT
+    ATTACH_AUX_EFFECT,
+    ALLOCATE_TIMED_BUFFER,
+    QUEUE_TIMED_BUFFER,
+    SET_MEDIA_TIME_TRANSFORM,
 };
 
 class BpAudioTrack : public BpInterface<IAudioTrack>
@@ -114,6 +117,52 @@
         }
         return status;
     }
+
+    virtual status_t allocateTimedBuffer(size_t size, sp<IMemory>* buffer) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeInt32(size);
+        status_t status = remote()->transact(ALLOCATE_TIMED_BUFFER,
+                                             data, &reply);
+        if (status == NO_ERROR) {
+            status = reply.readInt32();
+            if (status == NO_ERROR) {
+                *buffer = interface_cast<IMemory>(reply.readStrongBinder());
+            }
+        }
+        return status;
+    }
+
+    virtual status_t queueTimedBuffer(const sp<IMemory>& buffer,
+                                      int64_t pts) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeStrongBinder(buffer->asBinder());
+        data.writeInt64(pts);
+        status_t status = remote()->transact(QUEUE_TIMED_BUFFER,
+                                             data, &reply);
+        if (status == NO_ERROR) {
+            status = reply.readInt32();
+        }
+        return status;
+    }
+
+    virtual status_t setMediaTimeTransform(const LinearTransform& xform,
+                                           int target) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeInt64(xform.a_zero);
+        data.writeInt64(xform.b_zero);
+        data.writeInt32(xform.a_to_b_numer);
+        data.writeInt32(xform.a_to_b_denom);
+        data.writeInt32(target);
+        status_t status = remote()->transact(SET_MEDIA_TIME_TRANSFORM,
+                                             data, &reply);
+        if (status == NO_ERROR) {
+            status = reply.readInt32();
+        }
+        return status;
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioTrack, "android.media.IAudioTrack");
@@ -159,10 +208,38 @@
             reply->writeInt32(attachAuxEffect(data.readInt32()));
             return NO_ERROR;
         } break;
+        case ALLOCATE_TIMED_BUFFER: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            sp<IMemory> buffer;
+            status_t status = allocateTimedBuffer(data.readInt32(), &buffer);
+            reply->writeInt32(status);
+            if (status == NO_ERROR) {
+                reply->writeStrongBinder(buffer->asBinder());
+            }
+            return NO_ERROR;
+        } break;
+        case QUEUE_TIMED_BUFFER: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            sp<IMemory> buffer = interface_cast<IMemory>(
+                data.readStrongBinder());
+            uint64_t pts = data.readInt64();
+            reply->writeInt32(queueTimedBuffer(buffer, pts));
+            return NO_ERROR;
+        } break;
+        case SET_MEDIA_TIME_TRANSFORM: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            LinearTransform xform;
+            xform.a_zero = data.readInt64();
+            xform.b_zero = data.readInt64();
+            xform.a_to_b_numer = data.readInt32();
+            xform.a_to_b_denom = data.readInt32();
+            int target = data.readInt32();
+            reply->writeInt32(setMediaTimeTransform(xform, target));
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
 }
 
 }; // namespace android
-
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 157405a..22fa752 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -7,6 +7,7 @@
     AudioMixer.cpp.arm          \
     AudioResampler.cpp.arm      \
     AudioPolicyService.cpp      \
+    AudioBufferProvider.cpp     \
     ServiceUtilities.cpp
 #   AudioResamplerSinc.cpp.arm
 #   AudioResamplerCubic.cpp.arm
@@ -17,6 +18,7 @@
 
 LOCAL_SHARED_LIBRARIES := \
     libaudioutils \
+    libcommon_time_client \
     libcutils \
     libutils \
     libbinder \
diff --git a/services/audioflinger/AudioBufferProvider.cpp b/services/audioflinger/AudioBufferProvider.cpp
new file mode 100644
index 0000000..678fd58
--- /dev/null
+++ b/services/audioflinger/AudioBufferProvider.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef __STRICT_ANSI__
+#define __STDINT_LIMITS
+#define __STDC_LIMIT_MACROS
+#include <stdint.h>
+
+#include "AudioBufferProvider.h"
+
+namespace android {
+
+const int64_t AudioBufferProvider::kInvalidPTS = INT64_MAX;
+
+}; // namespace android
diff --git a/services/audioflinger/AudioBufferProvider.h b/services/audioflinger/AudioBufferProvider.h
index 81c5c39..62ad6bd 100644
--- a/services/audioflinger/AudioBufferProvider.h
+++ b/services/audioflinger/AudioBufferProvider.h
@@ -38,8 +38,15 @@
     };
 
     virtual ~AudioBufferProvider() {}
-    
-    virtual status_t getNextBuffer(Buffer* buffer) = 0;
+
+    // value representing an invalid presentation timestamp
+    static const int64_t kInvalidPTS;
+
+    // pts is the local time when the next sample yielded by getNextBuffer
+    // will be rendered.
+    // Pass kInvalidPTS if the PTS is unknown or not applicable.
+    virtual status_t getNextBuffer(Buffer* buffer, int64_t pts) = 0;
+
     virtual void releaseBuffer(Buffer* buffer) = 0;
 };
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 0248687..31567c2 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -61,6 +61,9 @@
 #include <powermanager/PowerManager.h>
 // #define DEBUG_CPU_USAGE 10  // log statistics every n wall clock seconds
 
+#include <common_time/cc_helper.h>
+#include <common_time/local_clock.h>
+
 // ----------------------------------------------------------------------------
 
 
@@ -69,7 +72,6 @@
 static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
 static const char kHardwareLockedString[] = "Hardware lock is taken\n";
 
-//static const nsecs_t kStandbyTimeInNsecs = seconds(3);
 static const float MAX_GAIN = 4096.0f;
 static const uint32_t MAX_GAIN_INT = 0x1000;
 
@@ -99,6 +101,7 @@
 // maximum divider applied to the active sleep time in the mixer thread loop
 static const uint32_t kMaxThreadSleepTimeShift = 2;
 
+nsecs_t AudioFlinger::mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs;
 
 // ----------------------------------------------------------------------------
 
@@ -147,11 +150,14 @@
 
 AudioFlinger::AudioFlinger()
     : BnAudioFlinger(),
-        mPrimaryHardwareDev(NULL),
-        mHardwareStatus(AUDIO_HW_IDLE), // see also onFirstRef()
-        mMasterVolume(1.0f), mMasterMute(false), mNextUniqueId(1),
-        mMode(AUDIO_MODE_INVALID),
-        mBtNrecIsOff(false)
+      mPrimaryHardwareDev(NULL),
+      mHardwareStatus(AUDIO_HW_IDLE), // see also onFirstRef()
+      mMasterVolume(1.0f),
+      mMasterVolumeSupportLvl(MVS_NONE),
+      mMasterMute(false),
+      mNextUniqueId(1),
+      mMode(AUDIO_MODE_INVALID),
+      mBtNrecIsOff(false)
 {
 }
 
@@ -162,6 +168,18 @@
     Mutex::Autolock _l(mLock);
 
     /* TODO: move all this work into an Init() function */
+    char val_str[PROPERTY_VALUE_MAX] = { 0 };
+    if (property_get("ro.audio.flinger_standbytime_ms", val_str, NULL) >= 0) {
+        uint32_t int_val;
+        if (1 == sscanf(val_str, "%u", &int_val)) {
+            mStandbyTimeInNsecs = milliseconds(int_val);
+            ALOGI("Using %u mSec as standby time.", int_val);
+        } else {
+            mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs;
+            ALOGI("Using default %u mSec as standby time.",
+                    (uint32_t)(mStandbyTimeInNsecs / 1000000));
+        }
+    }
 
     for (size_t i = 0; i < ARRAY_SIZE(audio_interfaces); i++) {
         const hw_module_t *mod;
@@ -193,6 +211,32 @@
 
     AutoMutex lock(mHardwareLock);
 
+    // Determine the level of master volume support the primary audio HAL has,
+    // and set the initial master volume at the same time.
+    float initialVolume = 1.0;
+    mMasterVolumeSupportLvl = MVS_NONE;
+    if (0 == mPrimaryHardwareDev->init_check(mPrimaryHardwareDev)) {
+        audio_hw_device_t *dev = mPrimaryHardwareDev;
+
+        mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
+        if ((NULL != dev->get_master_volume) &&
+            (NO_ERROR == dev->get_master_volume(dev, &initialVolume))) {
+            mMasterVolumeSupportLvl = MVS_FULL;
+        } else {
+            mMasterVolumeSupportLvl = MVS_SETONLY;
+            initialVolume = 1.0;
+        }
+
+        mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
+        if ((NULL == dev->set_master_volume) ||
+            (NO_ERROR != dev->set_master_volume(dev, initialVolume))) {
+            mMasterVolumeSupportLvl = MVS_NONE;
+        }
+        mHardwareStatus = AUDIO_HW_INIT;
+    }
+
+    // Set the mode for each audio HAL, and try to set the initial volume (if
+    // supported) for all of the non-primary audio HALs.
     for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
         audio_hw_device_t *dev = mAudioHwDevs[i];
 
@@ -203,11 +247,22 @@
             mMode = AUDIO_MODE_NORMAL;  // assigned multiple times with same value
             mHardwareStatus = AUDIO_HW_SET_MODE;
             dev->set_mode(dev, mMode);
-            mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
-            dev->set_master_volume(dev, 1.0f);
-            mHardwareStatus = AUDIO_HW_IDLE;
+
+            if ((dev != mPrimaryHardwareDev) &&
+                (NULL != dev->set_master_volume)) {
+                mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
+                dev->set_master_volume(dev, initialVolume);
+            }
+
+            mHardwareStatus = AUDIO_HW_INIT;
         }
     }
+
+    mMasterVolumeSW = (MVS_NONE == mMasterVolumeSupportLvl)
+                    ? initialVolume
+                    : 1.0;
+    mMasterVolume   = initialVolume;
+    mHardwareStatus = AUDIO_HW_IDLE;
 }
 
 AudioFlinger::~AudioFlinger()
@@ -273,7 +328,10 @@
     String8 result;
     hardware_call_state hardwareStatus = mHardwareStatus;
 
-    snprintf(buffer, SIZE, "Hardware status: %d\n", hardwareStatus);
+    snprintf(buffer, SIZE, "Hardware status: %d\n"
+                           "Standby Time mSec: %u\n",
+                            hardwareStatus,
+                            (uint32_t)(mStandbyTimeInNsecs / 1000000));
     result.append(buffer);
     write(fd, result.string(), result.size());
     return NO_ERROR;
@@ -377,6 +435,7 @@
         uint32_t flags,
         const sp<IMemory>& sharedBuffer,
         audio_io_handle_t output,
+        bool isTimed,
         int *sessionId,
         status_t *status)
 {
@@ -435,7 +494,7 @@
         ALOGV("createTrack() lSessionId: %d", lSessionId);
 
         track = thread->createTrack_l(client, streamType, sampleRate, format,
-                channelMask, frameCount, sharedBuffer, lSessionId, &lStatus);
+                channelMask, frameCount, sharedBuffer, lSessionId, isTimed, &lStatus);
 
         // move effect chain to this output thread if an effect on same session was waiting
         // for a track to be created
@@ -528,20 +587,29 @@
         return PERMISSION_DENIED;
     }
 
+    float swmv = value;
+
     // when hw supports master volume, don't scale in sw mixer
-    { // scope for the lock
-        AutoMutex lock(mHardwareLock);
-        mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
-        if (mPrimaryHardwareDev->set_master_volume(mPrimaryHardwareDev, value) == NO_ERROR) {
-            value = 1.0f;
+    if (MVS_NONE != mMasterVolumeSupportLvl) {
+        for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+            AutoMutex lock(mHardwareLock);
+            audio_hw_device_t *dev = mAudioHwDevs[i];
+
+            mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
+            if (NULL != dev->set_master_volume) {
+                dev->set_master_volume(dev, value);
+            }
+            mHardwareStatus = AUDIO_HW_IDLE;
         }
-        mHardwareStatus = AUDIO_HW_IDLE;
+
+        swmv = 1.0;
     }
 
     Mutex::Autolock _l(mLock);
-    mMasterVolume = value;
+    mMasterVolume   = value;
+    mMasterVolumeSW = swmv;
     for (size_t i = 0; i < mPlaybackThreads.size(); i++)
-       mPlaybackThreads.valueAt(i)->setMasterVolume(value);
+       mPlaybackThreads.valueAt(i)->setMasterVolume(swmv);
 
     return NO_ERROR;
 }
@@ -635,12 +703,36 @@
     return masterVolume_l();
 }
 
+float AudioFlinger::masterVolumeSW() const
+{
+    Mutex::Autolock _l(mLock);
+    return masterVolumeSW_l();
+}
+
 bool AudioFlinger::masterMute() const
 {
     Mutex::Autolock _l(mLock);
     return masterMute_l();
 }
 
+float AudioFlinger::masterVolume_l() const
+{
+    if (MVS_FULL == mMasterVolumeSupportLvl) {
+        float ret_val;
+        AutoMutex lock(mHardwareLock);
+
+        mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
+        assert(NULL != mPrimaryHardwareDev);
+        assert(NULL != mPrimaryHardwareDev->get_master_volume);
+
+        mPrimaryHardwareDev->get_master_volume(mPrimaryHardwareDev, &ret_val);
+        mHardwareStatus = AUDIO_HW_IDLE;
+        return ret_val;
+    }
+
+    return mMasterVolume;
+}
+
 status_t AudioFlinger::setStreamVolume(audio_stream_type_t stream, float value,
         audio_io_handle_t output)
 {
@@ -1367,7 +1459,7 @@
         mOutput(output),
         // Assumes constructor is called by AudioFlinger with it's mLock held,
         // but it would be safer to explicitly pass initial masterVolume as parameter
-        mMasterVolume(audioFlinger->masterVolume_l()),
+        mMasterVolume(audioFlinger->masterVolumeSW_l()),
         mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false)
 {
     snprintf(mName, kNameLength, "AudioOut_%d", id);
@@ -1485,6 +1577,7 @@
         int frameCount,
         const sp<IMemory>& sharedBuffer,
         int sessionId,
+        bool isTimed,
         status_t *status)
 {
     sp<Track> track;
@@ -1535,9 +1628,14 @@
             }
         }
 
-        track = new Track(this, client, streamType, sampleRate, format,
-                channelMask, frameCount, sharedBuffer, sessionId);
-        if (track->getCblk() == NULL || track->name() < 0) {
+        if (!isTimed) {
+            track = new Track(this, client, streamType, sampleRate, format,
+                    channelMask, frameCount, sharedBuffer, sessionId);
+        } else {
+            track = TimedTrack::create(this, client, streamType, sampleRate, format,
+                    channelMask, frameCount, sharedBuffer, sessionId);
+        }
+        if (track == NULL || track->getCblk() == NULL || track->name() < 0) {
             lStatus = NO_MEMORY;
             goto Exit;
         }
@@ -1941,7 +2039,7 @@
                         }
                     }
 
-                    standbyTime = systemTime() + kStandbyTimeInNsecs;
+                    standbyTime = systemTime() + mStandbyTimeInNsecs;
                     sleepTime = idleSleepTime;
                     sleepTimeShift = 0;
                     continue;
@@ -1957,8 +2055,21 @@
         }
 
         if (CC_LIKELY(mixerStatus == MIXER_TRACKS_READY)) {
+            // obtain the presentation timestamp of the next output buffer
+            int64_t pts;
+            status_t status = INVALID_OPERATION;
+
+            if (NULL != mOutput->stream->get_next_write_timestamp) {
+                status = mOutput->stream->get_next_write_timestamp(
+                        mOutput->stream, &pts);
+            }
+
+            if (status != NO_ERROR) {
+                pts = AudioBufferProvider::kInvalidPTS;
+            }
+
             // mix buffers...
-            mAudioMixer->process();
+            mAudioMixer->process(pts);
             // increase sleep time progressively when application underrun condition clears.
             // Only increase sleep time if the mixer is ready for two consecutive times to avoid
             // that a steady state of alternating ready/not ready conditions keeps the sleep time
@@ -1967,7 +2078,7 @@
                 sleepTimeShift--;
             }
             sleepTime = 0;
-            standbyTime = systemTime() + kStandbyTimeInNsecs;
+            standbyTime = systemTime() + mStandbyTimeInNsecs;
             //TODO: delay standby when effects have a tail
         } else {
             // If no tracks are ready, sleep once for the duration of an output
@@ -2114,7 +2225,7 @@
                 ALOG_ASSERT(minFrames <= cblk->frameCount);
             }
         }
-        if ((cblk->framesReady() >= minFrames) && track->isReady() &&
+        if ((track->framesReady() >= minFrames) && track->isReady() &&
                 !track->isPaused() && !track->isTerminated())
         {
             //ALOGV("track %d u=%08x, s=%08x [OK] on thread %p", name, cblk->user, cblk->server, this);
@@ -2785,7 +2896,8 @@
             // output audio to hardware
             while (frameCount) {
                 buffer.frameCount = frameCount;
-                activeTrack->getNextBuffer(&buffer);
+                activeTrack->getNextBuffer(&buffer,
+                                           AudioBufferProvider::kInvalidPTS);
                 if (CC_UNLIKELY(buffer.raw == NULL)) {
                     memset(curBuf, 0, frameCount * mFrameSize);
                     break;
@@ -3038,7 +3150,7 @@
                         }
                     }
 
-                    standbyTime = systemTime() + kStandbyTimeInNsecs;
+                    standbyTime = systemTime() + mStandbyTimeInNsecs;
                     sleepTime = idleSleepTime;
                     continue;
                 }
@@ -3055,7 +3167,7 @@
         if (CC_LIKELY(mixerStatus == MIXER_TRACKS_READY)) {
             // mix buffers...
             if (outputsReady(outputTracks)) {
-                mAudioMixer->process();
+                mAudioMixer->process(AudioBufferProvider::kInvalidPTS);
             } else {
                 memset(mMixBuffer, 0, mixBufferSize);
             }
@@ -3092,7 +3204,7 @@
             // enable changes in effect chain
             unlockEffectChains(effectChains);
 
-            standbyTime = systemTime() + kStandbyTimeInNsecs;
+            standbyTime = systemTime() + mStandbyTimeInNsecs;
             for (size_t i = 0; i < outputTracks.size(); i++) {
                 outputTracks[i]->write(mMixBuffer, writeFrames);
             }
@@ -3443,7 +3555,8 @@
             (int)mAuxBuffer);
 }
 
-status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
+    AudioBufferProvider::Buffer* buffer, int64_t pts)
 {
      audio_track_cblk_t* cblk = this->cblk();
      uint32_t framesReady;
@@ -3484,10 +3597,14 @@
      return NOT_ENOUGH_DATA;
 }
 
+uint32_t AudioFlinger::PlaybackThread::Track::framesReady() const{
+    return mCblk->framesReady();
+}
+
 bool AudioFlinger::PlaybackThread::Track::isReady() const {
     if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) return true;
 
-    if (mCblk->framesReady() >= mCblk->frameCount ||
+    if (framesReady() >= mCblk->frameCount ||
             (mCblk->flags & CBLK_FORCEREADY_MSK)) {
         mFillingUpStatus = FS_FILLED;
         android_atomic_and(~CBLK_FORCEREADY_MSK, &mCblk->flags);
@@ -3644,6 +3761,393 @@
     mAuxBuffer = buffer;
 }
 
+// timed audio tracks
+
+sp<AudioFlinger::PlaybackThread::TimedTrack>
+AudioFlinger::PlaybackThread::TimedTrack::create(
+            const wp<ThreadBase>& thread,
+            const sp<Client>& client,
+            audio_stream_type_t streamType,
+            uint32_t sampleRate,
+            audio_format_t format,
+            uint32_t channelMask,
+            int frameCount,
+            const sp<IMemory>& sharedBuffer,
+            int sessionId) {
+    if (!client->reserveTimedTrack())
+        return NULL;
+
+    sp<TimedTrack> track = new TimedTrack(
+        thread, client, streamType, sampleRate, format, channelMask, frameCount,
+        sharedBuffer, sessionId);
+
+    if (track == NULL) {
+        client->releaseTimedTrack();
+        return NULL;
+    }
+
+    return track;
+}
+
+AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
+            const wp<ThreadBase>& thread,
+            const sp<Client>& client,
+            audio_stream_type_t streamType,
+            uint32_t sampleRate,
+            audio_format_t format,
+            uint32_t channelMask,
+            int frameCount,
+            const sp<IMemory>& sharedBuffer,
+            int sessionId)
+    : Track(thread, client, streamType, sampleRate, format, channelMask,
+            frameCount, sharedBuffer, sessionId),
+      mTimedSilenceBuffer(NULL),
+      mTimedSilenceBufferSize(0),
+      mTimedAudioOutputOnTime(false),
+      mMediaTimeTransformValid(false)
+{
+    LocalClock lc;
+    mLocalTimeFreq = lc.getLocalFreq();
+
+    mLocalTimeToSampleTransform.a_zero = 0;
+    mLocalTimeToSampleTransform.b_zero = 0;
+    mLocalTimeToSampleTransform.a_to_b_numer = sampleRate;
+    mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
+    LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
+                            &mLocalTimeToSampleTransform.a_to_b_denom);
+}
+
+AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
+    mClient->releaseTimedTrack();
+    delete [] mTimedSilenceBuffer;
+}
+
+status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer(
+    size_t size, sp<IMemory>* buffer) {
+
+    Mutex::Autolock _l(mTimedBufferQueueLock);
+
+    trimTimedBufferQueue_l();
+
+    // lazily initialize the shared memory heap for timed buffers
+    if (mTimedMemoryDealer == NULL) {
+        const int kTimedBufferHeapSize = 512 << 10;
+
+        mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
+                                              "AudioFlingerTimed");
+        if (mTimedMemoryDealer == NULL)
+            return NO_MEMORY;
+    }
+
+    sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
+    if (newBuffer == NULL) {
+        newBuffer = mTimedMemoryDealer->allocate(size);
+        if (newBuffer == NULL)
+            return NO_MEMORY;
+    }
+
+    *buffer = newBuffer;
+    return NO_ERROR;
+}
+
+// caller must hold mTimedBufferQueueLock
+void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() {
+    int64_t mediaTimeNow;
+    {
+        Mutex::Autolock mttLock(mMediaTimeTransformLock);
+        if (!mMediaTimeTransformValid)
+            return;
+
+        int64_t targetTimeNow;
+        status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME)
+            ? mCCHelper.getCommonTime(&targetTimeNow)
+            : mCCHelper.getLocalTime(&targetTimeNow);
+
+        if (OK != res)
+            return;
+
+        if (!mMediaTimeTransform.doReverseTransform(targetTimeNow,
+                                                    &mediaTimeNow)) {
+            return;
+        }
+    }
+
+    size_t trimIndex;
+    for (trimIndex = 0; trimIndex < mTimedBufferQueue.size(); trimIndex++) {
+        if (mTimedBufferQueue[trimIndex].pts() > mediaTimeNow)
+            break;
+    }
+
+    if (trimIndex) {
+        mTimedBufferQueue.removeItemsAt(0, trimIndex);
+    }
+}
+
+status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer(
+    const sp<IMemory>& buffer, int64_t pts) {
+
+    {
+        Mutex::Autolock mttLock(mMediaTimeTransformLock);
+        if (!mMediaTimeTransformValid)
+            return INVALID_OPERATION;
+    }
+
+    Mutex::Autolock _l(mTimedBufferQueueLock);
+
+    mTimedBufferQueue.add(TimedBuffer(buffer, pts));
+
+    return NO_ERROR;
+}
+
+status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform(
+    const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) {
+
+    ALOGV("%s az=%lld bz=%lld n=%d d=%u tgt=%d", __PRETTY_FUNCTION__,
+         xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom,
+         target);
+
+    if (!(target == TimedAudioTrack::LOCAL_TIME ||
+          target == TimedAudioTrack::COMMON_TIME)) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mMediaTimeTransformLock);
+    mMediaTimeTransform = xform;
+    mMediaTimeTransformTarget = target;
+    mMediaTimeTransformValid = true;
+
+    return NO_ERROR;
+}
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+// implementation of getNextBuffer for tracks whose buffers have timestamps
+status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer(
+    AudioBufferProvider::Buffer* buffer, int64_t pts)
+{
+    if (pts == AudioBufferProvider::kInvalidPTS) {
+        buffer->raw = 0;
+        buffer->frameCount = 0;
+        return INVALID_OPERATION;
+    }
+
+    // get ahold of the output stream that these samples will be written to
+    sp<ThreadBase> thread = mThread.promote();
+    if (thread == NULL) {
+        buffer->raw = 0;
+        buffer->frameCount = 0;
+        return INVALID_OPERATION;
+    }
+    PlaybackThread* playbackThread = static_cast<PlaybackThread*>(thread.get());
+
+    Mutex::Autolock _l(mTimedBufferQueueLock);
+
+    while (true) {
+
+        // if we have no timed buffers, then fail
+        if (mTimedBufferQueue.isEmpty()) {
+            buffer->raw = 0;
+            buffer->frameCount = 0;
+            return NOT_ENOUGH_DATA;
+        }
+
+        TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
+
+        // calculate the PTS of the head of the timed buffer queue expressed in
+        // local time
+        int64_t headLocalPTS;
+        {
+            Mutex::Autolock mttLock(mMediaTimeTransformLock);
+
+            assert(mMediaTimeTransformValid);
+
+            if (mMediaTimeTransform.a_to_b_denom == 0) {
+                // the transform represents a pause, so yield silence
+                timedYieldSilence(buffer->frameCount, buffer);
+                return NO_ERROR;
+            }
+
+            int64_t transformedPTS;
+            if (!mMediaTimeTransform.doForwardTransform(head.pts(),
+                                                        &transformedPTS)) {
+                // the transform failed.  this shouldn't happen, but if it does
+                // then just drop this buffer
+                ALOGW("timedGetNextBuffer transform failed");
+                buffer->raw = 0;
+                buffer->frameCount = 0;
+                mTimedBufferQueue.removeAt(0);
+                return NO_ERROR;
+            }
+
+            if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) {
+                if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS,
+                                                          &headLocalPTS)) {
+                    buffer->raw = 0;
+                    buffer->frameCount = 0;
+                    return INVALID_OPERATION;
+                }
+            } else {
+                headLocalPTS = transformedPTS;
+            }
+        }
+
+        // adjust the head buffer's PTS to reflect the portion of the head buffer
+        // that has already been consumed
+        int64_t effectivePTS = headLocalPTS +
+                ((head.position() / mCblk->frameSize) * mLocalTimeFreq / sampleRate());
+
+        // Calculate the delta in samples between the head of the input buffer
+        // queue and the start of the next output buffer that will be written.
+        // If the transformation fails because of over or underflow, it means
+        // that the sample's position in the output stream is so far out of
+        // whack that it should just be dropped.
+        int64_t sampleDelta;
+        if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) {
+            ALOGV("*** head buffer is too far from PTS: dropped buffer");
+            mTimedBufferQueue.removeAt(0);
+            continue;
+        }
+        if (!mLocalTimeToSampleTransform.doForwardTransform(
+                (effectivePTS - pts) << 32, &sampleDelta)) {
+            ALOGV("*** too late during sample rate transform: dropped buffer");
+            mTimedBufferQueue.removeAt(0);
+            continue;
+        }
+
+        ALOGV("*** %s head.pts=%lld head.pos=%d pts=%lld sampleDelta=[%d.%08x]",
+             __PRETTY_FUNCTION__, head.pts(), head.position(), pts,
+             static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1) + (sampleDelta >> 32)),
+             static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF));
+
+        // if the delta between the ideal placement for the next input sample and
+        // the current output position is within this threshold, then we will
+        // concatenate the next input samples to the previous output
+        const int64_t kSampleContinuityThreshold =
+                (static_cast<int64_t>(sampleRate()) << 32) / 10;
+
+        // if this is the first buffer of audio that we're emitting from this track
+        // then it should be almost exactly on time.
+        const int64_t kSampleStartupThreshold = 1LL << 32;
+
+        if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
+            (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
+            // the next input is close enough to being on time, so concatenate it
+            // with the last output
+            timedYieldSamples(buffer);
+
+            ALOGV("*** on time: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
+            return NO_ERROR;
+        } else if (sampleDelta > 0) {
+            // the gap between the current output position and the proper start of
+            // the next input sample is too big, so fill it with silence
+            uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
+
+            timedYieldSilence(framesUntilNextInput, buffer);
+            ALOGV("*** silence: frameCount=%u", buffer->frameCount);
+            return NO_ERROR;
+        } else {
+            // the next input sample is late
+            uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32));
+            size_t onTimeSamplePosition =
+                    head.position() + lateFrames * mCblk->frameSize;
+
+            if (onTimeSamplePosition > head.buffer()->size()) {
+                // all the remaining samples in the head are too late, so
+                // drop it and move on
+                ALOGV("*** too late: dropped buffer");
+                mTimedBufferQueue.removeAt(0);
+                continue;
+            } else {
+                // skip over the late samples
+                head.setPosition(onTimeSamplePosition);
+
+                // yield the available samples
+                timedYieldSamples(buffer);
+
+                ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
+                return NO_ERROR;
+            }
+        }
+    }
+}
+
+// Yield samples from the timed buffer queue head up to the given output
+// buffer's capacity.
+//
+// Caller must hold mTimedBufferQueueLock
+void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples(
+    AudioBufferProvider::Buffer* buffer) {
+
+    const TimedBuffer& head = mTimedBufferQueue[0];
+
+    buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) +
+                   head.position());
+
+    uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) /
+                                 mCblk->frameSize);
+    size_t framesRequested = buffer->frameCount;
+    buffer->frameCount = min(framesLeftInHead, framesRequested);
+
+    mTimedAudioOutputOnTime = true;
+}
+
+// Yield samples of silence up to the given output buffer's capacity
+//
+// Caller must hold mTimedBufferQueueLock
+void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence(
+    uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
+
+    // lazily allocate a buffer filled with silence
+    if (mTimedSilenceBufferSize < numFrames * mCblk->frameSize) {
+        delete [] mTimedSilenceBuffer;
+        mTimedSilenceBufferSize = numFrames * mCblk->frameSize;
+        mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize];
+        memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize);
+    }
+
+    buffer->raw = mTimedSilenceBuffer;
+    size_t framesRequested = buffer->frameCount;
+    buffer->frameCount = min(numFrames, framesRequested);
+
+    mTimedAudioOutputOnTime = false;
+}
+
+void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer(
+    AudioBufferProvider::Buffer* buffer) {
+
+    Mutex::Autolock _l(mTimedBufferQueueLock);
+
+    if (buffer->raw != mTimedSilenceBuffer) {
+        TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
+        head.setPosition(head.position() + buffer->frameCount * mCblk->frameSize);
+        if (static_cast<size_t>(head.position()) >= head.buffer()->size()) {
+            mTimedBufferQueue.removeAt(0);
+        }
+    }
+
+    buffer->raw = 0;
+    buffer->frameCount = 0;
+}
+
+uint32_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
+    Mutex::Autolock _l(mTimedBufferQueueLock);
+
+    uint32_t frames = 0;
+    for (size_t i = 0; i < mTimedBufferQueue.size(); i++) {
+        const TimedBuffer& tb = mTimedBufferQueue[i];
+        frames += (tb.buffer()->size() - tb.position())  / mCblk->frameSize;
+    }
+
+    return frames;
+}
+
+AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer()
+        : mPTS(0), mPosition(0) {}
+
+AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer(
+    const sp<IMemory>& buffer, int64_t pts)
+        : mBuffer(buffer), mPTS(pts), mPosition(0) {}
+
 // ----------------------------------------------------------------------------
 
 // RecordTrack constructor must be called with AudioFlinger::mLock held
@@ -3680,7 +4184,7 @@
     }
 }
 
-status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts)
 {
     audio_track_cblk_t* cblk = this->cblk();
     uint32_t framesAvail;
@@ -4002,7 +4506,8 @@
         mAudioFlinger(audioFlinger),
         // FIXME should be a "k" constant not hard-coded, in .h or ro. property, see 4 lines below
         mMemoryDealer(new MemoryDealer(1024*1024, "AudioFlinger::Client")),
-        mPid(pid)
+        mPid(pid),
+        mTimedTrackCount(0)
 {
     // 1 MB of address space is good for 32 tracks, 8 buffers each, 4 KB/buffer
 }
@@ -4018,6 +4523,31 @@
     return mMemoryDealer;
 }
 
+// Reserve one of the limited slots for a timed audio track associated
+// with this client
+bool AudioFlinger::Client::reserveTimedTrack()
+{
+    const int kMaxTimedTracksPerClient = 4;
+
+    Mutex::Autolock _l(mTimedTrackLock);
+
+    if (mTimedTrackCount >= kMaxTimedTracksPerClient) {
+        ALOGW("can not create timed track - pid %d has exceeded the limit",
+             mPid);
+        return false;
+    }
+
+    mTimedTrackCount++;
+    return true;
+}
+
+// Release a slot for a timed audio track
+void AudioFlinger::Client::releaseTimedTrack()
+{
+    Mutex::Autolock _l(mTimedTrackLock);
+    mTimedTrackCount--;
+}
+
 // ----------------------------------------------------------------------------
 
 AudioFlinger::NotificationClient::NotificationClient(const sp<AudioFlinger>& audioFlinger,
@@ -4084,6 +4614,38 @@
     return mTrack->attachAuxEffect(EffectId);
 }
 
+status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size,
+                                                         sp<IMemory>* buffer) {
+    if (!mTrack->isTimedTrack())
+        return INVALID_OPERATION;
+
+    PlaybackThread::TimedTrack* tt =
+            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
+    return tt->allocateTimedBuffer(size, buffer);
+}
+
+status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer,
+                                                     int64_t pts) {
+    if (!mTrack->isTimedTrack())
+        return INVALID_OPERATION;
+
+    PlaybackThread::TimedTrack* tt =
+            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
+    return tt->queueTimedBuffer(buffer, pts);
+}
+
+status_t AudioFlinger::TrackHandle::setMediaTimeTransform(
+    const LinearTransform& xform, int target) {
+
+    if (!mTrack->isTimedTrack())
+        return INVALID_OPERATION;
+
+    PlaybackThread::TimedTrack* tt =
+            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
+    return tt->setMediaTimeTransform(
+        xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
+}
+
 status_t AudioFlinger::TrackHandle::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
@@ -4313,7 +4875,8 @@
             }
 
             buffer.frameCount = mFrameCount;
-            if (CC_LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) {
+            if (CC_LIKELY(mActiveTrack->getNextBuffer(
+                    &buffer, AudioBufferProvider::kInvalidPTS) == NO_ERROR)) {
                 size_t framesOut = buffer.frameCount;
                 if (mResampler == NULL) {
                     // no resampling
@@ -4591,7 +5154,7 @@
     return NO_ERROR;
 }
 
-status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts)
 {
     size_t framesReq = buffer->frameCount;
     size_t framesReady = mFrameCount - mRsmpInIndex;
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index aa0b8f8..9396a0b 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -22,11 +22,14 @@
 #include <sys/types.h>
 #include <limits.h>
 
+#include <common_time/cc_helper.h>
+
 #include <media/IAudioFlinger.h>
 #include <media/IAudioFlingerClient.h>
 #include <media/IAudioTrack.h>
 #include <media/IAudioRecord.h>
 #include <media/AudioSystem.h>
+#include <media/AudioTrack.h>
 
 #include <utils/Atomic.h>
 #include <utils/Errors.h>
@@ -55,7 +58,7 @@
 
 // ----------------------------------------------------------------------------
 
-static const nsecs_t kStandbyTimeInNsecs = seconds(3);
+static const nsecs_t kDefaultStandbyTimeInNsecs = seconds(3);
 
 class AudioFlinger :
     public BinderService<AudioFlinger>,
@@ -78,6 +81,7 @@
                                 uint32_t flags,
                                 const sp<IMemory>& sharedBuffer,
                                 audio_io_handle_t output,
+                                bool isTimed,
                                 int *sessionId,
                                 status_t *status);
 
@@ -102,6 +106,7 @@
     virtual     status_t    setMasterMute(bool muted);
 
     virtual     float       masterVolume() const;
+    virtual     float       masterVolumeSW() const;
     virtual     bool        masterMute() const;
 
     virtual     status_t    setStreamVolume(audio_stream_type_t stream, float value,
@@ -206,6 +211,8 @@
     audio_hw_device_t*      findSuitableHwDev_l(uint32_t devices);
     void                    purgeStaleEffects_l();
 
+    static nsecs_t          mStandbyTimeInNsecs;
+
     // Internal dump utilites.
     status_t dumpPermissionDenial(int fd, const Vector<String16>& args);
     status_t dumpClients(int fd, const Vector<String16>& args);
@@ -220,12 +227,18 @@
         pid_t               pid() const { return mPid; }
         sp<AudioFlinger>    audioFlinger() const { return mAudioFlinger; }
 
+        bool reserveTimedTrack();
+        void releaseTimedTrack();
+
     private:
                             Client(const Client&);
                             Client& operator = (const Client&);
         const sp<AudioFlinger> mAudioFlinger;
         const sp<MemoryDealer> mMemoryDealer;
         const pid_t         mPid;
+
+        Mutex               mTimedTrackLock;
+        int                 mTimedTrackCount;
     };
 
     // --- Notification Client ---
@@ -333,7 +346,9 @@
                                 TrackBase(const TrackBase&);
                                 TrackBase& operator = (const TrackBase&);
 
-            virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer) = 0;
+            virtual status_t getNextBuffer(
+                AudioBufferProvider::Buffer* buffer,
+                int64_t pts) = 0;
             virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
 
             audio_format_t format() const {
@@ -609,7 +624,6 @@
                     int16_t     *mainBuffer() const { return mMainBuffer; }
                     int         auxEffectId() const { return mAuxEffectId; }
 
-
         protected:
             friend class ThreadBase;
             friend class TrackHandle;
@@ -620,7 +634,11 @@
                                 Track(const Track&);
                                 Track& operator = (const Track&);
 
-            virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
+            virtual status_t getNextBuffer(
+                AudioBufferProvider::Buffer* buffer,
+                int64_t pts);
+            virtual uint32_t framesReady() const;
+
             bool isMuted() const { return mMute; }
             bool isPausing() const {
                 return mState == PAUSING;
@@ -636,6 +654,8 @@
                 return (mStreamType == AUDIO_STREAM_CNT);
             }
 
+            virtual bool isTimedTrack() const { return false; }
+
             // we don't really need a lock for these
             volatile bool       mMute;
             // FILLED state is used for suppressing volume ramp at begin of playing
@@ -652,6 +672,79 @@
             bool                mHasVolumeController;
         };  // end of Track
 
+        class TimedTrack : public Track {
+          public:
+            static sp<TimedTrack> create(const wp<ThreadBase>& thread,
+                                         const sp<Client>& client,
+                                         audio_stream_type_t streamType,
+                                         uint32_t sampleRate,
+                                         audio_format_t format,
+                                         uint32_t channelMask,
+                                         int frameCount,
+                                         const sp<IMemory>& sharedBuffer,
+                                         int sessionId);
+            ~TimedTrack();
+
+            class TimedBuffer {
+              public:
+                TimedBuffer();
+                TimedBuffer(const sp<IMemory>& buffer, int64_t pts);
+                const sp<IMemory>& buffer() const { return mBuffer; }
+                int64_t pts() const { return mPTS; }
+                int position() const { return mPosition; }
+                void setPosition(int pos) { mPosition = pos; }
+              private:
+                sp<IMemory> mBuffer;
+                int64_t mPTS;
+                int mPosition;
+            };
+
+            virtual bool isTimedTrack() const { return true; }
+
+            virtual uint32_t framesReady() const;
+
+            virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
+                                           int64_t pts);
+            virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
+            void timedYieldSamples(AudioBufferProvider::Buffer* buffer);
+            void timedYieldSilence(uint32_t numFrames,
+                                   AudioBufferProvider::Buffer* buffer);
+
+            status_t    allocateTimedBuffer(size_t size,
+                                            sp<IMemory>* buffer);
+            status_t    queueTimedBuffer(const sp<IMemory>& buffer,
+                                         int64_t pts);
+            status_t    setMediaTimeTransform(const LinearTransform& xform,
+                                              TimedAudioTrack::TargetTimeline target);
+            void        trimTimedBufferQueue_l();
+
+          private:
+            TimedTrack(const wp<ThreadBase>& thread,
+                       const sp<Client>& client,
+                       audio_stream_type_t streamType,
+                       uint32_t sampleRate,
+                       audio_format_t format,
+                       uint32_t channelMask,
+                       int frameCount,
+                       const sp<IMemory>& sharedBuffer,
+                       int sessionId);
+
+            uint64_t            mLocalTimeFreq;
+            LinearTransform     mLocalTimeToSampleTransform;
+            sp<MemoryDealer>    mTimedMemoryDealer;
+            Vector<TimedBuffer> mTimedBufferQueue;
+            uint8_t*            mTimedSilenceBuffer;
+            uint32_t            mTimedSilenceBufferSize;
+            mutable Mutex       mTimedBufferQueueLock;
+            bool                mTimedAudioOutputOnTime;
+            CCHelper            mCCHelper;
+
+            Mutex               mMediaTimeTransformLock;
+            LinearTransform     mMediaTimeTransform;
+            bool                mMediaTimeTransformValid;
+            TimedAudioTrack::TargetTimeline mMediaTimeTransformTarget;
+        };
+
 
         // playback track
         class OutputTrack : public Track {
@@ -726,6 +819,7 @@
                                     int frameCount,
                                     const sp<IMemory>& sharedBuffer,
                                     int sessionId,
+                                    bool isTimed,
                                     status_t *status);
 
                     AudioStreamOut* getOutput() const;
@@ -920,6 +1014,12 @@
         virtual void        mute(bool);
         virtual void        pause();
         virtual status_t    attachAuxEffect(int effectId);
+        virtual status_t    allocateTimedBuffer(size_t size,
+                                                sp<IMemory>* buffer);
+        virtual status_t    queueTimedBuffer(const sp<IMemory>& buffer,
+                                             int64_t pts);
+        virtual status_t    setMediaTimeTransform(const LinearTransform& xform,
+                                                  int target);
         virtual status_t onTransact(
             uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
     private:
@@ -967,7 +1067,9 @@
                                 RecordTrack(const RecordTrack&);
                                 RecordTrack& operator = (const RecordTrack&);
 
-            virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
+            virtual status_t getNextBuffer(
+                AudioBufferProvider::Buffer* buffer,
+                int64_t pts);
 
             bool                mOverflow;
         };
@@ -1004,7 +1106,8 @@
                 AudioStreamIn* clearInput();
                 virtual audio_stream_t* stream();
 
-        virtual status_t    getNextBuffer(AudioBufferProvider::Buffer* buffer);
+        virtual status_t    getNextBuffer(AudioBufferProvider::Buffer* buffer,
+                                          int64_t pts);
         virtual void        releaseBuffer(AudioBufferProvider::Buffer* buffer);
         virtual bool        checkForNewParameters_l();
         virtual String8     getParameters(const String8& keys);
@@ -1401,6 +1504,28 @@
     friend class RecordThread;
     friend class PlaybackThread;
 
+    enum master_volume_support {
+        // MVS_NONE:
+        // Audio HAL has no support for master volume, either setting or
+        // getting.  All master volume control must be implemented in SW by the
+        // AudioFlinger mixing core.
+        MVS_NONE,
+
+        // MVS_SETONLY:
+        // Audio HAL has support for setting master volume, but not for getting
+        // master volume (original HAL design did not include a getter).
+        // AudioFlinger needs to keep track of the last set master volume in
+        // addition to needing to set an initial, default, master volume at HAL
+        // load time.
+        MVS_SETONLY,
+
+        // MVS_FULL:
+        // Audio HAL has support both for setting and getting master volume.
+        // AudioFlinger should send all set and get master volume requests
+        // directly to the HAL.
+        MVS_FULL,
+    };
+
     mutable     Mutex                               mLock;
 
                 DefaultKeyedVector< pid_t, wp<Client> >     mClients;   // see ~Client()
@@ -1429,6 +1554,7 @@
         AUDIO_SET_VOICE_VOLUME,
         AUDIO_SET_PARAMETER,
         AUDIO_HW_GET_INPUT_BUFFER_SIZE,
+        AUDIO_HW_GET_MASTER_VOLUME,
     };
 
     mutable     hardware_call_state                 mHardwareStatus;    // for dump only
@@ -1439,6 +1565,8 @@
 
                 // both are protected by mLock
                 float                               mMasterVolume;
+                float                               mMasterVolumeSW;
+                master_volume_support               mMasterVolumeSupportLvl;
                 bool                                mMasterMute;
 
                 DefaultKeyedVector< audio_io_handle_t, sp<RecordThread> >    mRecordThreads;
@@ -1451,7 +1579,8 @@
                 // protected by mLock
                 Vector<AudioSessionRef*> mAudioSessionRefs;
 
-                float       masterVolume_l() const  { return mMasterVolume; }
+                float       masterVolume_l() const;
+                float       masterVolumeSW_l() const  { return mMasterVolumeSW; }
                 bool        masterMute_l() const    { return mMasterMute; }
 
 private:
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index cb7678b..c399691 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -33,6 +33,8 @@
 #include <system/audio.h>
 
 #include <audio_utils/primitives.h>
+#include <common_time/local_clock.h>
+#include <common_time/cc_helper.h>
 
 #include "AudioMixer.h"
 
@@ -45,6 +47,9 @@
 {
     // AudioMixer is not yet capable of multi-channel beyond stereo
     assert(2 == MAX_NUM_CHANNELS);
+    
+    LocalClock lc;
+
     mState.enabledTracks= 0;
     mState.needsChanged = 0;
     mState.frameCount   = frameCount;
@@ -80,6 +85,7 @@
         t->sampleRate = mSampleRate;
         t->mainBuffer = NULL;
         t->auxBuffer = NULL;
+        t->localTimeFreq = lc.getLocalFreq();
         t++;
     }
 }
@@ -289,6 +295,7 @@
             if (resampler == NULL) {
                 resampler = AudioResampler::create(
                         format, channelCount, devSampleRate);
+                resampler->setLocalTimeFreq(localTimeFreq);
             }
             return true;
         }
@@ -333,13 +340,13 @@
 
 
 
-void AudioMixer::process()
+void AudioMixer::process(int64_t pts)
 {
-    mState.hook(&mState);
+    mState.hook(&mState, pts);
 }
 
 
-void AudioMixer::process__validate(state_t* state)
+void AudioMixer::process__validate(state_t* state, int64_t pts)
 {
     ALOGW_IF(!state->needsChanged,
         "in process__validate() but nothing's invalid");
@@ -443,7 +450,7 @@
         countActiveTracks, state->enabledTracks,
         all16BitsStereoNoResample, resampling, volumeRamp);
 
-    state->hook(state);
+   state->hook(state, pts);
 
     // Now that the volume ramp has been done, set optimal state and
     // track hooks for subsequent mixer process
@@ -757,7 +764,7 @@
 }
 
 // no-op case
-void AudioMixer::process__nop(state_t* state)
+void AudioMixer::process__nop(state_t* state, int64_t pts)
 {
     uint32_t e0 = state->enabledTracks;
     size_t bufSize = state->frameCount * sizeof(int16_t) * MAX_NUM_CHANNELS;
@@ -787,7 +794,9 @@
             size_t outFrames = state->frameCount;
             while (outFrames) {
                 t1.buffer.frameCount = outFrames;
-                t1.bufferProvider->getNextBuffer(&t1.buffer);
+                int64_t outputPTS = calculateOutputPTS(
+                    t1, pts, state->frameCount - outFrames);
+                t1.bufferProvider->getNextBuffer(&t1.buffer, outputPTS);
                 if (t1.buffer.raw == NULL) break;
                 outFrames -= t1.buffer.frameCount;
                 t1.bufferProvider->releaseBuffer(&t1.buffer);
@@ -797,7 +806,7 @@
 }
 
 // generic code without resampling
-void AudioMixer::process__genericNoResampling(state_t* state)
+void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts)
 {
     int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));
 
@@ -809,7 +818,7 @@
         e0 &= ~(1<<i);
         track_t& t = state->tracks[i];
         t.buffer.frameCount = state->frameCount;
-        t.bufferProvider->getNextBuffer(&t.buffer);
+        t.bufferProvider->getNextBuffer(&t.buffer, pts);
         t.frameCount = t.buffer.frameCount;
         t.in = t.buffer.raw;
         // t.in == NULL can happen if the track was flushed just after having
@@ -863,7 +872,9 @@
                     if (t.frameCount == 0 && outFrames) {
                         t.bufferProvider->releaseBuffer(&t.buffer);
                         t.buffer.frameCount = (state->frameCount - numFrames) - (BLOCKSIZE - outFrames);
-                        t.bufferProvider->getNextBuffer(&t.buffer);
+                        int64_t outputPTS = calculateOutputPTS(
+                            t, pts, numFrames + (BLOCKSIZE - outFrames));
+                        t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);
                         t.in = t.buffer.raw;
                         if (t.in == NULL) {
                             enabledTracks &= ~(1<<i);
@@ -892,7 +903,7 @@
 
 
 // generic code with resampling
-void AudioMixer::process__genericResampling(state_t* state)
+void AudioMixer::process__genericResampling(state_t* state, int64_t pts)
 {
     // this const just means that local variable outTemp doesn't change
     int32_t* const outTemp = state->outputTemp;
@@ -932,6 +943,7 @@
             // acquire/release the buffers because it's done by
             // the resampler.
             if ((t.needs & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) {
+                t.resampler->setPTS(pts);
                 (t.hook)(&t, outTemp, numFrames, state->resampleTemp, aux);
             } else {
 
@@ -939,7 +951,8 @@
 
                 while (outFrames < numFrames) {
                     t.buffer.frameCount = numFrames - outFrames;
-                    t.bufferProvider->getNextBuffer(&t.buffer);
+                    int64_t outputPTS = calculateOutputPTS(t, pts, outFrames);
+                    t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);
                     t.in = t.buffer.raw;
                     // t.in == NULL can happen if the track was flushed just after having
                     // been enabled for mixing.
@@ -959,7 +972,8 @@
 }
 
 // one track, 16 bits stereo without resampling is the most common case
-void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state)
+void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state,
+                                                           int64_t pts)
 {
     // This method is only called when state->enabledTracks has exactly
     // one bit set.  The asserts below would verify this, but are commented out
@@ -979,7 +993,8 @@
     const uint32_t vrl = t.volumeRL;
     while (numFrames) {
         b.frameCount = numFrames;
-        t.bufferProvider->getNextBuffer(&b);
+        int64_t outputPTS = calculateOutputPTS(t, pts, out - t.mainBuffer);
+        t.bufferProvider->getNextBuffer(&b, outputPTS);
         const int16_t *in = b.i16;
 
         // in == NULL can happen if the track was flushed just after having
@@ -1023,7 +1038,8 @@
 // 2 tracks is also a common case
 // NEVER used in current implementation of process__validate()
 // only use if the 2 tracks have the same output buffer
-void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state)
+void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state,
+                                                            int64_t pts)
 {
     int i;
     uint32_t en = state->enabledTracks;
@@ -1057,7 +1073,9 @@
 
         if (frameCount0 == 0) {
             b0.frameCount = numFrames;
-            t0.bufferProvider->getNextBuffer(&b0);
+            int64_t outputPTS = calculateOutputPTS(t0, pts,
+                                                   out - t0.mainBuffer);
+            t0.bufferProvider->getNextBuffer(&b0, outputPTS);
             if (b0.i16 == NULL) {
                 if (buff == NULL) {
                     buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount];
@@ -1071,7 +1089,9 @@
         }
         if (frameCount1 == 0) {
             b1.frameCount = numFrames;
-            t1.bufferProvider->getNextBuffer(&b1);
+            int64_t outputPTS = calculateOutputPTS(t1, pts,
+                                                   out - t0.mainBuffer);
+            t1.bufferProvider->getNextBuffer(&b1, outputPTS);
             if (b1.i16 == NULL) {
                 if (buff == NULL) {
                     buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount];
@@ -1117,5 +1137,14 @@
 }
 #endif
 
+int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS,
+                                       int outputFrameIndex)
+{
+    if (AudioBufferProvider::kInvalidPTS == basePTS)
+        return AudioBufferProvider::kInvalidPTS;
+
+    return basePTS + ((outputFrameIndex * t.localTimeFreq) / t.sampleRate);
+}
+
 // ----------------------------------------------------------------------------
 }; // namespace android
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index c956918..f442671 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -79,7 +79,7 @@
     void        setParameter(int name, int target, int param, void *value);
 
     void        setBufferProvider(int name, AudioBufferProvider* bufferProvider);
-    void        process();
+    void        process(int64_t pts);
 
     uint32_t    trackNames() const { return mTrackNames; }
 
@@ -114,7 +114,7 @@
     struct state_t;
     struct track_t;
 
-    typedef void (*mix_t)(state_t* state);
+    typedef void (*mix_t)(state_t* state, int64_t pts);
     typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux);
     static const int BLOCKSIZE = 16; // 4 cache lines
 
@@ -152,6 +152,8 @@
         int32_t*           mainBuffer;
         int32_t*           auxBuffer;
 
+        uint64_t    localTimeFreq;
+
         bool        setResampler(uint32_t sampleRate, uint32_t devSampleRate);
         bool        doesResample() const { return resampler != NULL; }
         void        resetResampler() { if (resampler != NULL) resampler->reset(); }
@@ -187,14 +189,19 @@
     static void volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
     static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
 
-    static void process__validate(state_t* state);
-    static void process__nop(state_t* state);
-    static void process__genericNoResampling(state_t* state);
-    static void process__genericResampling(state_t* state);
-    static void process__OneTrack16BitsStereoNoResampling(state_t* state);
+    static void process__validate(state_t* state, int64_t pts);
+    static void process__nop(state_t* state, int64_t pts);
+    static void process__genericNoResampling(state_t* state, int64_t pts);
+    static void process__genericResampling(state_t* state, int64_t pts);
+    static void process__OneTrack16BitsStereoNoResampling(state_t* state,
+                                                          int64_t pts);
 #if 0
-    static void process__TwoTracks16BitsStereoNoResampling(state_t* state);
+    static void process__TwoTracks16BitsStereoNoResampling(state_t* state,
+                                                           int64_t pts);
 #endif
+
+    static int64_t calculateOutputPTS(const track_t& t, int64_t basePTS,
+                                      int outputFrameIndex);
 };
 
 // ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp
index 9486b9c..398ba0b 100644
--- a/services/audioflinger/AudioResampler.cpp
+++ b/services/audioflinger/AudioResampler.cpp
@@ -122,7 +122,8 @@
         int32_t sampleRate) :
     mBitDepth(bitDepth), mChannelCount(inChannelCount),
             mSampleRate(sampleRate), mInSampleRate(sampleRate), mInputIndex(0),
-            mPhaseFraction(0) {
+            mPhaseFraction(0), mLocalTimeFreq(0),
+            mPTS(AudioBufferProvider::kInvalidPTS) {
     // sanity check on format
     if ((bitDepth != 16) ||(inChannelCount < 1) || (inChannelCount > 2)) {
         ALOGE("Unsupported sample format, %d bits, %d channels", bitDepth,
@@ -150,6 +151,23 @@
     mVolume[1] = right;
 }
 
+void AudioResampler::setLocalTimeFreq(uint64_t freq) {
+    mLocalTimeFreq = freq;
+}
+
+void AudioResampler::setPTS(int64_t pts) {
+    mPTS = pts;
+}
+
+int64_t AudioResampler::calculateOutputPTS(int outputFrameIndex) {
+
+    if (mPTS == AudioBufferProvider::kInvalidPTS) {
+        return AudioBufferProvider::kInvalidPTS;
+    } else {
+        return mPTS + ((outputFrameIndex * mLocalTimeFreq) / mSampleRate);
+    }
+}
+
 void AudioResampler::reset() {
     mInputIndex = 0;
     mPhaseFraction = 0;
@@ -196,7 +214,8 @@
         // buffer is empty, fetch a new one
         while (mBuffer.frameCount == 0) {
             mBuffer.frameCount = inFrameCount;
-            provider->getNextBuffer(&mBuffer);
+            provider->getNextBuffer(&mBuffer,
+                                    calculateOutputPTS(outputIndex / 2));
             if (mBuffer.raw == NULL) {
                 goto resampleStereo16_exit;
             }
@@ -290,7 +309,8 @@
         // buffer is empty, fetch a new one
         while (mBuffer.frameCount == 0) {
             mBuffer.frameCount = inFrameCount;
-            provider->getNextBuffer(&mBuffer);
+            provider->getNextBuffer(&mBuffer,
+                                    calculateOutputPTS(outputIndex / 2));
             if (mBuffer.raw == NULL) {
                 mInputIndex = inputIndex;
                 mPhaseFraction = phaseFraction;
diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h
index c23016e..9deb796 100644
--- a/services/audioflinger/AudioResampler.h
+++ b/services/audioflinger/AudioResampler.h
@@ -49,6 +49,10 @@
     virtual void init() = 0;
     virtual void setSampleRate(int32_t inSampleRate);
     virtual void setVolume(int16_t left, int16_t right);
+    virtual void setLocalTimeFreq(uint64_t freq);
+
+    // set the PTS of the next buffer output by the resampler
+    virtual void setPTS(int64_t pts);
 
     virtual void resample(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider) = 0;
@@ -72,6 +76,8 @@
     AudioResampler(const AudioResampler&);
     AudioResampler& operator=(const AudioResampler&);
 
+    int64_t calculateOutputPTS(int outputFrameIndex);
+
     const int32_t mBitDepth;
     const int32_t mChannelCount;
     const int32_t mSampleRate;
@@ -85,6 +91,8 @@
     size_t mInputIndex;
     int32_t mPhaseIncrement;
     uint32_t mPhaseFraction;
+    uint64_t mLocalTimeFreq;
+    int64_t mPTS;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp
index c0e760e..18e59e9 100644
--- a/services/audioflinger/AudioResamplerCubic.cpp
+++ b/services/audioflinger/AudioResamplerCubic.cpp
@@ -65,7 +65,7 @@
     // fetch first buffer
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = inFrameCount;
-        provider->getNextBuffer(&mBuffer);
+        provider->getNextBuffer(&mBuffer, mPTS);
         if (mBuffer.raw == NULL)
             return;
         // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
@@ -95,7 +95,8 @@
                 inputIndex = 0;
                 provider->releaseBuffer(&mBuffer);
                 mBuffer.frameCount = inFrameCount;
-                provider->getNextBuffer(&mBuffer);
+                provider->getNextBuffer(&mBuffer,
+                                        calculateOutputPTS(outputIndex / 2));
                 if (mBuffer.raw == NULL)
                     goto save_state;  // ugly, but efficient
                 in = mBuffer.i16;
@@ -130,7 +131,7 @@
     // fetch first buffer
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = inFrameCount;
-        provider->getNextBuffer(&mBuffer);
+        provider->getNextBuffer(&mBuffer, mPTS);
         if (mBuffer.raw == NULL)
             return;
         // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
@@ -160,7 +161,8 @@
                 inputIndex = 0;
                 provider->releaseBuffer(&mBuffer);
                 mBuffer.frameCount = inFrameCount;
-                provider->getNextBuffer(&mBuffer);
+                provider->getNextBuffer(&mBuffer,
+                                        calculateOutputPTS(outputIndex / 2));
                 if (mBuffer.raw == NULL)
                     goto save_state;  // ugly, but efficient
                 // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
@@ -181,4 +183,3 @@
 // ----------------------------------------------------------------------------
 }
 ; // namespace android
-
diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp
index 7a27b81..d373c08 100644
--- a/services/audioflinger/AudioResamplerSinc.cpp
+++ b/services/audioflinger/AudioResamplerSinc.cpp
@@ -203,7 +203,8 @@
         // buffer is empty, fetch a new one
         while (mBuffer.frameCount == 0) {
             mBuffer.frameCount = inFrameCount;
-            provider->getNextBuffer(&mBuffer);
+            provider->getNextBuffer(&mBuffer,
+                                    calculateOutputPTS(outputIndex / 2));
             if (mBuffer.raw == NULL) {
                 goto resample_exit;
             }
@@ -354,4 +355,3 @@
 
 // ----------------------------------------------------------------------------
 }; // namespace android
-