Merge "Fix technology extra naming for IsoDep and NfcB (API part)." into gingerbread
diff --git a/telephony/java/com/android/internal/telephony/sip/SipPhone.java b/telephony/java/com/android/internal/telephony/sip/SipPhone.java
index a92ac1c..72f3831 100755
--- a/telephony/java/com/android/internal/telephony/sip/SipPhone.java
+++ b/telephony/java/com/android/internal/telephony/sip/SipPhone.java
@@ -17,6 +17,7 @@
 package com.android.internal.telephony.sip;
 
 import android.content.Context;
+import android.media.AudioManager;
 import android.net.rtp.AudioGroup;
 import android.net.sip.SipAudioCall;
 import android.net.sip.SipErrorCode;
@@ -126,7 +127,7 @@
                     (ringingCall.getState() == Call.State.WAITING)) {
                 if (DEBUG) Log.d(LOG_TAG, "acceptCall");
                 // Always unmute when answering a new call
-                setMute(false);
+                ringingCall.setMute(false);
                 ringingCall.acceptCall();
             } else {
                 throw new CallStateException("phone not ringing");
@@ -170,7 +171,7 @@
             throw new CallStateException("cannot dial in current state");
         }
 
-        setMute(false);
+        foregroundCall.setMute(false);
         try {
             Connection c = foregroundCall.dial(dialString);
             return c;
@@ -288,16 +289,13 @@
 
     @Override
     public void setEchoSuppressionEnabled(boolean enabled) {
+        // TODO: Remove the enabled argument. We should check the speakerphone
+        // state with AudioManager instead of keeping a state here so the
+        // method with a state argument is redundant. Also rename the method
+        // to something like onSpeaerphoneStateChanged(). Echo suppression may
+        // not be available on every device.
         synchronized (SipPhone.class) {
-            AudioGroup audioGroup = foregroundCall.getAudioGroup();
-            if (audioGroup == null) return;
-            int mode = audioGroup.getMode();
-            audioGroup.setMode(enabled
-                    ? AudioGroup.MODE_ECHO_SUPPRESSION
-                    : AudioGroup.MODE_NORMAL);
-            if (DEBUG) Log.d(LOG_TAG, String.format(
-                    "audioGroup mode change: %d --> %d", mode,
-                    audioGroup.getMode()));
+            foregroundCall.setAudioGroupMode();
         }
     }
 
@@ -450,13 +448,33 @@
             ((SipConnection) connections.get(0)).acceptCall();
         }
 
+        private boolean isSpeakerOn() {
+            return ((AudioManager) mContext.getSystemService(Context.AUDIO_SERVICE))
+                    .isSpeakerphoneOn();
+        }
+
+        void setAudioGroupMode() {
+            AudioGroup audioGroup = getAudioGroup();
+            if (audioGroup == null) return;
+            int mode = audioGroup.getMode();
+            if (state == State.HOLDING) {
+                audioGroup.setMode(AudioGroup.MODE_ON_HOLD);
+            } else if (getMute()) {
+                audioGroup.setMode(AudioGroup.MODE_MUTED);
+            } else if (isSpeakerOn()) {
+                audioGroup.setMode(AudioGroup.MODE_ECHO_SUPPRESSION);
+            } else {
+                audioGroup.setMode(AudioGroup.MODE_NORMAL);
+            }
+            if (DEBUG) Log.d(LOG_TAG, String.format(
+                    "audioGroup mode change: %d --> %d", mode,
+                    audioGroup.getMode()));
+        }
+
         void hold() throws CallStateException {
             setState(State.HOLDING);
-            AudioGroup audioGroup = getAudioGroup();
-            if (audioGroup != null) {
-                audioGroup.setMode(AudioGroup.MODE_ON_HOLD);
-            }
             for (Connection c : connections) ((SipConnection) c).hold();
+            setAudioGroupMode();
         }
 
         void unhold() throws CallStateException {
@@ -465,19 +483,19 @@
             for (Connection c : connections) {
                 ((SipConnection) c).unhold(audioGroup);
             }
+            setAudioGroupMode();
         }
 
         void setMute(boolean muted) {
-            AudioGroup audioGroup = getAudioGroup();
-            if (audioGroup == null) return;
-            audioGroup.setMode(
-                    muted ? AudioGroup.MODE_MUTED : AudioGroup.MODE_NORMAL);
+            for (Connection c : connections) {
+                ((SipConnection) c).setMute(muted);
+            }
         }
 
         boolean getMute() {
-            AudioGroup audioGroup = getAudioGroup();
-            if (audioGroup == null) return false;
-            return (audioGroup.getMode() == AudioGroup.MODE_MUTED);
+            return connections.isEmpty()
+                    ? false
+                    : ((SipConnection) connections.get(0)).getMute();
         }
 
         void merge(SipCall that) throws CallStateException {
@@ -736,6 +754,17 @@
             }
         }
 
+        void setMute(boolean muted) {
+            if ((mSipAudioCall != null) && (muted != mSipAudioCall.isMuted())) {
+                mSipAudioCall.toggleMute();
+            }
+        }
+
+        boolean getMute() {
+            return (mSipAudioCall == null) ? false
+                                           : mSipAudioCall.isMuted();
+        }
+
         @Override
         protected void setState(Call.State state) {
             if (state == mState) return;
diff --git a/voip/jni/rtp/AudioGroup.cpp b/voip/jni/rtp/AudioGroup.cpp
index 0c8a725..60abf2a 100644
--- a/voip/jni/rtp/AudioGroup.cpp
+++ b/voip/jni/rtp/AudioGroup.cpp
@@ -63,6 +63,14 @@
 // real jitter buffer. For a stream at 8000Hz it takes 8192 bytes. These numbers
 // are chosen by experiments and each of them can be adjusted as needed.
 
+// Originally a stream does not send packets when it is receive-only or there is
+// nothing to mix. However, this causes some problems with certain firewalls and
+// proxies. A firewall might remove a port mapping when there is no outgoing
+// packet for a preiod of time, and a proxy might wait for incoming packets from
+// both sides before start forwarding. To solve these problems, we send out a
+// silence packet on the stream for every second. It should be good enough to
+// keep the stream alive with relatively low resources.
+
 // Other notes:
 // + We use elapsedRealtime() to get the time. Since we use 32bit variables
 //   instead of 64bit ones, comparison must be done by subtraction.
@@ -110,7 +118,7 @@
     int mSampleRate;
     int mSampleCount;
     int mInterval;
-    int mLogThrottle;
+    int mKeepAlive;
 
     int16_t *mBuffer;
     int mBufferMask;
@@ -262,12 +270,8 @@
     ++mSequence;
     mTimestamp += mSampleCount;
 
-    if (mMode == RECEIVE_ONLY) {
-        return;
-    }
-
     // If there is an ongoing DTMF event, send it now.
-    if (mDtmfEvent != -1) {
+    if (mMode != RECEIVE_ONLY && mDtmfEvent != -1) {
         int duration = mTimestamp - mDtmfStart;
         // Make sure duration is reasonable.
         if (duration >= 0 && duration < mSampleRate * 100) {
@@ -289,43 +293,55 @@
         mDtmfEvent = -1;
     }
 
-    // It is time to mix streams.
-    bool mixed = false;
     int32_t buffer[mSampleCount + 3];
-    memset(buffer, 0, sizeof(buffer));
-    while (chain) {
-        if (chain != this &&
-            chain->mix(buffer, tick - mInterval, tick, mSampleRate)) {
-            mixed = true;
+    int16_t samples[mSampleCount];
+    if (mMode == RECEIVE_ONLY) {
+        if ((mTick ^ mKeepAlive) >> 10 == 0) {
+            return;
         }
-        chain = chain->mNext;
-    }
-    if (!mixed) {
-        if ((mTick ^ mLogThrottle) >> 10) {
-            mLogThrottle = mTick;
+        mKeepAlive = mTick;
+        memset(samples, 0, sizeof(samples));
+    } else {
+        // Mix all other streams.
+        bool mixed = false;
+        memset(buffer, 0, sizeof(buffer));
+        while (chain) {
+            if (chain != this &&
+                chain->mix(buffer, tick - mInterval, tick, mSampleRate)) {
+                mixed = true;
+            }
+            chain = chain->mNext;
+        }
+
+        if (mixed) {
+            // Saturate into 16 bits.
+            for (int i = 0; i < mSampleCount; ++i) {
+                int32_t sample = buffer[i];
+                if (sample < -32768) {
+                    sample = -32768;
+                }
+                if (sample > 32767) {
+                    sample = 32767;
+                }
+                samples[i] = sample;
+            }
+        } else {
+            if ((mTick ^ mKeepAlive) >> 10 == 0) {
+                return;
+            }
+            mKeepAlive = mTick;
+            memset(samples, 0, sizeof(samples));
             LOGV("stream[%d] no data", mSocket);
         }
-        return;
     }
 
-    // Cook the packet and send it out.
-    int16_t samples[mSampleCount];
-    for (int i = 0; i < mSampleCount; ++i) {
-        int32_t sample = buffer[i];
-        if (sample < -32768) {
-            sample = -32768;
-        }
-        if (sample > 32767) {
-            sample = 32767;
-        }
-        samples[i] = sample;
-    }
     if (!mCodec) {
         // Special case for device stream.
         send(mSocket, samples, sizeof(samples), MSG_DONTWAIT);
         return;
     }
 
+    // Cook the packet and send it out.
     buffer[0] = htonl(mCodecMagic | mSequence);
     buffer[1] = htonl(mTimestamp);
     buffer[2] = mSsrc;
@@ -883,7 +899,7 @@
     int codecType = -1;
     char codecName[16];
     int sampleRate = -1;
-    sscanf(codecSpec, "%d %[^/]%*c%d", &codecType, codecName, &sampleRate);
+    sscanf(codecSpec, "%d %15[^/]%*c%d", &codecType, codecName, &sampleRate);
     codec = newAudioCodec(codecName);
     int sampleCount = (codec ? codec->set(sampleRate, codecSpec) : -1);
     env->ReleaseStringUTFChars(jCodecSpec, codecSpec);