Merge "Fix uninitialized variable in SurfaceTexture"
diff --git a/api/current.txt b/api/current.txt
index ebf3893..c76627f 100644
--- a/api/current.txt
+++ b/api/current.txt
@@ -17115,14 +17115,32 @@
package android.speech.tts {
+ public abstract class SynthesisRequest {
+ ctor public SynthesisRequest(java.lang.String);
+ method public abstract int audioAvailable(byte[], int, int);
+ method public abstract int completeAudioAvailable(int, int, int, byte[], int, int);
+ method public abstract int done();
+ method public abstract void error();
+ method public java.lang.String getCountry();
+ method public java.lang.String getLanguage();
+ method public abstract int getMaxBufferSize();
+ method public int getPitch();
+ method public int getSpeechRate();
+ method public java.lang.String getText();
+ method public java.lang.String getVariant();
+ method public abstract int start(int, int, int);
+ }
+
public class TextToSpeech {
ctor public TextToSpeech(android.content.Context, android.speech.tts.TextToSpeech.OnInitListener);
+ ctor public TextToSpeech(android.content.Context, android.speech.tts.TextToSpeech.OnInitListener, java.lang.String);
method public int addEarcon(java.lang.String, java.lang.String, int);
method public int addEarcon(java.lang.String, java.lang.String);
method public int addSpeech(java.lang.String, java.lang.String, int);
method public int addSpeech(java.lang.String, java.lang.String);
method public boolean areDefaultsEnforced();
method public java.lang.String getDefaultEngine();
+ method public java.util.List<android.speech.tts.TextToSpeech.EngineInfo> getEngines();
method public java.util.Locale getLanguage();
method public int isLanguageAvailable(java.util.Locale);
method public boolean isSpeaking();
@@ -17167,12 +17185,20 @@
field public static final java.lang.String EXTRA_VOICE_DATA_FILES = "dataFiles";
field public static final java.lang.String EXTRA_VOICE_DATA_FILES_INFO = "dataFilesInfo";
field public static final java.lang.String EXTRA_VOICE_DATA_ROOT_DIRECTORY = "dataRoot";
+ field public static final java.lang.String INTENT_ACTION_TTS_SERVICE = "android.intent.action.TTS_SERVICE";
field public static final java.lang.String KEY_PARAM_PAN = "pan";
field public static final java.lang.String KEY_PARAM_STREAM = "streamType";
field public static final java.lang.String KEY_PARAM_UTTERANCE_ID = "utteranceId";
field public static final java.lang.String KEY_PARAM_VOLUME = "volume";
}
+ public static class TextToSpeech.EngineInfo {
+ ctor public TextToSpeech.EngineInfo();
+ field public int icon;
+ field public java.lang.String label;
+ field public java.lang.String name;
+ }
+
public static abstract interface TextToSpeech.OnInitListener {
method public abstract void onInit(int);
}
@@ -17181,6 +17207,16 @@
method public abstract void onUtteranceCompleted(java.lang.String);
}
+ public abstract class TextToSpeechService extends android.app.Service {
+ ctor public TextToSpeechService();
+ method public android.os.IBinder onBind(android.content.Intent);
+ method protected abstract java.lang.String[] onGetLanguage();
+ method protected abstract int onIsLanguageAvailable(java.lang.String, java.lang.String, java.lang.String);
+ method protected abstract int onLoadLanguage(java.lang.String, java.lang.String, java.lang.String);
+ method protected abstract void onStop();
+ method protected abstract void onSynthesizeText(android.speech.tts.SynthesisRequest);
+ }
+
}
package android.telephony {
diff --git a/cmds/stagefright/audioloop.cpp b/cmds/stagefright/audioloop.cpp
index 8733662..ceb254f 100644
--- a/cmds/stagefright/audioloop.cpp
+++ b/cmds/stagefright/audioloop.cpp
@@ -11,6 +11,8 @@
#include <media/stagefright/OMXClient.h>
#include <media/stagefright/OMXCodec.h>
+#include <hardware/audio.h>
+
using namespace android;
int main() {
@@ -31,8 +33,8 @@
AUDIO_SOURCE_DEFAULT,
kSampleRate,
kNumChannels == 1
- ? AudioSystem::CHANNEL_IN_MONO
- : AudioSystem::CHANNEL_IN_STEREO);
+ ? AUDIO_CHANNEL_IN_MONO
+ : AUDIO_CHANNEL_IN_STEREO);
#endif
sp<MetaData> meta = new MetaData;
diff --git a/core/java/android/speech/tts/SynthesisRequest.java b/core/java/android/speech/tts/SynthesisRequest.java
index 515218b..6df9af2 100644
--- a/core/java/android/speech/tts/SynthesisRequest.java
+++ b/core/java/android/speech/tts/SynthesisRequest.java
@@ -24,8 +24,6 @@
*
* Alternatively, the engine can provide all the audio at once, by using
* {@link #completeAudioAvailable}.
- *
- * @hide Pending approval
*/
public abstract class SynthesisRequest {
@@ -92,16 +90,14 @@
}
/**
- * Gets the speech rate to use. {@link TextToSpeech.Engine#DEFAULT_RATE} (100)
- * is the normal rate.
+ * Gets the speech rate to use. The normal rate is 100.
*/
public int getSpeechRate() {
return mSpeechRate;
}
/**
- * Gets the pitch to use. {@link TextToSpeech.Engine#DEFAULT_PITCH} (100)
- * is the normal pitch.
+ * Gets the pitch to use. The normal pitch is 100.
*/
public int getPitch() {
return mPitch;
diff --git a/core/java/android/speech/tts/TextToSpeech.java b/core/java/android/speech/tts/TextToSpeech.java
index e247df8..1d26c22 100755
--- a/core/java/android/speech/tts/TextToSpeech.java
+++ b/core/java/android/speech/tts/TextToSpeech.java
@@ -220,8 +220,6 @@
* extend {@link TextToSpeechService}. Normal applications should not use this intent
* directly, instead they should talk to the TTS service using the the methods in this
* class.
- *
- * @hide Pending API council approval
*/
@SdkConstant(SdkConstantType.SERVICE_ACTION)
public static final String INTENT_ACTION_TTS_SERVICE =
@@ -428,7 +426,7 @@
private final Bundle mParams = new Bundle();
/**
- * The constructor for the TextToSpeech class.
+ * The constructor for the TextToSpeech class, using the default TTS engine.
* This will also initialize the associated TextToSpeech engine if it isn't already running.
*
* @param context
@@ -442,7 +440,15 @@
}
/**
- * @hide pending approval
+ * The constructor for the TextToSpeech class, using the given TTS engine.
+ * This will also initialize the associated TextToSpeech engine if it isn't already running.
+ *
+ * @param context
+ * The context this instance is running in.
+ * @param listener
+ * The {@link TextToSpeech.OnInitListener} that will be called when the
+ * TextToSpeech engine has initialized.
+ * @param engine Package name of the TTS engine to use.
*/
public TextToSpeech(Context context, OnInitListener listener, String engine) {
mContext = context;
@@ -1060,8 +1066,6 @@
* Gets a list of all installed TTS engines.
*
* @return A list of engine info objects. The list can be empty, but will never by {@code null}.
- *
- * @hide Pending approval
*/
public List<EngineInfo> getEngines() {
PackageManager pm = mContext.getPackageManager();
@@ -1144,7 +1148,6 @@
* Information about an installed text-to-speech engine.
*
* @see TextToSpeech#getEngines
- * @hide Pending approval
*/
public static class EngineInfo {
/**
diff --git a/core/java/android/speech/tts/TextToSpeechService.java b/core/java/android/speech/tts/TextToSpeechService.java
index da97fb4..590e2ef 100644
--- a/core/java/android/speech/tts/TextToSpeechService.java
+++ b/core/java/android/speech/tts/TextToSpeechService.java
@@ -41,8 +41,6 @@
/**
* Abstract base class for TTS engine implementations.
- *
- * @hide Pending approval
*/
public abstract class TextToSpeechService extends Service {
diff --git a/core/java/android/text/style/TextAppearanceSpan.java b/core/java/android/text/style/TextAppearanceSpan.java
index de929e3..deed713 100644
--- a/core/java/android/text/style/TextAppearanceSpan.java
+++ b/core/java/android/text/style/TextAppearanceSpan.java
@@ -51,10 +51,9 @@
* to determine the color. The <code>appearance</code> should be,
* for example, <code>android.R.style.TextAppearance_Small</code>,
* and the <code>colorList</code> should be, for example,
- * <code>android.R.styleable.Theme_textColorDim</code>.
+ * <code>android.R.styleable.Theme_textColorPrimary</code>.
*/
- public TextAppearanceSpan(Context context, int appearance,
- int colorList) {
+ public TextAppearanceSpan(Context context, int appearance, int colorList) {
ColorStateList textColor;
TypedArray a =
diff --git a/core/java/android/view/ViewGroup.java b/core/java/android/view/ViewGroup.java
index 7b404b4..1a84175 100644
--- a/core/java/android/view/ViewGroup.java
+++ b/core/java/android/view/ViewGroup.java
@@ -2203,10 +2203,10 @@
}
/**
- * Perform dispatching of a {@link #saveHierarchyState freeze()} to only this view,
- * not to its children. For use when overriding
- * {@link #dispatchSaveInstanceState dispatchFreeze()} to allow subclasses to freeze
- * their own state but not the state of their children.
+ * Perform dispatching of a {@link #saveHierarchyState(android.util.SparseArray)} freeze()}
+ * to only this view, not to its children. For use when overriding
+ * {@link #dispatchSaveInstanceState(android.util.SparseArray)} dispatchFreeze()} to allow
+ * subclasses to freeze their own state but not the state of their children.
*
* @param container the container
*/
diff --git a/core/java/android/view/ViewRoot.java b/core/java/android/view/ViewRoot.java
index f02daba..20503b7 100644
--- a/core/java/android/view/ViewRoot.java
+++ b/core/java/android/view/ViewRoot.java
@@ -1926,6 +1926,7 @@
void dispatchDetachedFromWindow() {
if (mView != null && mView.mAttachInfo != null) {
mView.dispatchDetachedFromWindow();
+ mAttached = false;
}
mView = null;
diff --git a/core/java/android/view/inputmethod/InputMethodSubtype.java b/core/java/android/view/inputmethod/InputMethodSubtype.java
index 25f2229..807f6ce 100644
--- a/core/java/android/view/inputmethod/InputMethodSubtype.java
+++ b/core/java/android/view/inputmethod/InputMethodSubtype.java
@@ -60,8 +60,7 @@
mSubtypeLocale = locale != null ? locale : "";
mSubtypeMode = mode != null ? mode : "";
mSubtypeExtraValue = extraValue != null ? extraValue : "";
- mSubtypeHashCode = hashCodeInternal(mSubtypeNameResId, mSubtypeIconResId, mSubtypeLocale,
- mSubtypeMode, mSubtypeExtraValue);
+ mSubtypeHashCode = hashCodeInternal(mSubtypeLocale, mSubtypeMode, mSubtypeExtraValue);
}
InputMethodSubtype(Parcel source) {
@@ -74,8 +73,7 @@
mSubtypeMode = s != null ? s : "";
s = source.readString();
mSubtypeExtraValue = s != null ? s : "";
- mSubtypeHashCode = hashCodeInternal(mSubtypeNameResId, mSubtypeIconResId, mSubtypeLocale,
- mSubtypeMode, mSubtypeExtraValue);
+ mSubtypeHashCode = hashCodeInternal(mSubtypeLocale, mSubtypeMode, mSubtypeExtraValue);
}
/**
@@ -195,9 +193,8 @@
}
};
- private static int hashCodeInternal(int nameResId, int iconResId, String locale,
- String mode, String extraValue) {
- return Arrays.hashCode(new Object[] {nameResId, iconResId, locale, mode, extraValue});
+ private static int hashCodeInternal(String locale, String mode, String extraValue) {
+ return Arrays.hashCode(new Object[] {locale, mode, extraValue});
}
/**
diff --git a/core/java/android/webkit/WebSettings.java b/core/java/android/webkit/WebSettings.java
index 71d6080..3e11197 100644
--- a/core/java/android/webkit/WebSettings.java
+++ b/core/java/android/webkit/WebSettings.java
@@ -219,6 +219,7 @@
private boolean mAllowContentAccess = true;
private boolean mLoadWithOverviewMode = false;
private boolean mEnableSmoothTransition = false;
+ private boolean mForceUserScalable = false;
// AutoFill Profile data
/**
@@ -1658,6 +1659,23 @@
}
}
+ /**
+ * Returns whether the viewport metatag can disable zooming
+ * @hide
+ */
+ public boolean forceUserScalable() {
+ return mForceUserScalable;
+ }
+
+ /**
+ * Sets whether viewport metatag can disable zooming.
+ * @param flag Whether or not to forceably enable user scalable.
+ * @hide
+ */
+ public synchronized void setForceUserScalable(boolean flag) {
+ mForceUserScalable = flag;
+ }
+
synchronized void setSyntheticLinksEnabled(boolean flag) {
if (mSyntheticLinksEnabled != flag) {
mSyntheticLinksEnabled = flag;
diff --git a/core/java/android/webkit/WebViewCore.java b/core/java/android/webkit/WebViewCore.java
index 0271695..09205a5 100644
--- a/core/java/android/webkit/WebViewCore.java
+++ b/core/java/android/webkit/WebViewCore.java
@@ -2253,6 +2253,27 @@
// set the viewport settings from WebKit
setViewportSettingsFromNative();
+ if (mSettings.forceUserScalable()) {
+ mViewportUserScalable = true;
+ if (mViewportInitialScale > 0) {
+ if (mViewportMinimumScale > 0) {
+ mViewportMinimumScale = Math.min(mViewportMinimumScale,
+ mViewportInitialScale / 2);
+ }
+ if (mViewportMaximumScale > 0) {
+ mViewportMaximumScale = Math.max(mViewportMaximumScale,
+ mViewportInitialScale * 2);
+ }
+ } else {
+ if (mViewportMinimumScale > 0) {
+ mViewportMinimumScale = Math.min(mViewportMinimumScale, 50);
+ }
+ if (mViewportMaximumScale > 0) {
+ mViewportMaximumScale = Math.max(mViewportMaximumScale, 200);
+ }
+ }
+ }
+
// adjust the default scale to match the densityDpi
float adjust = 1.0f;
if (mViewportDensityDpi == -1) {
@@ -2589,11 +2610,11 @@
// called by JNI
private Class<?> getPluginClass(String libName, String clsName) {
-
+
if (mWebView == null) {
return null;
}
-
+
PluginManager pluginManager = PluginManager.getInstance(null);
String pkgName = pluginManager.getPluginsAPKName(libName);
@@ -2601,7 +2622,7 @@
Log.w(LOGTAG, "Unable to resolve " + libName + " to a plugin APK");
return null;
}
-
+
try {
return pluginManager.getPluginClass(pkgName, clsName);
} catch (NameNotFoundException e) {
@@ -2656,7 +2677,7 @@
view.mView = pluginView;
return view;
}
-
+
// called by JNI. PluginWidget functions for creating an embedded View for
// the surface drawing model.
private ViewManager.ChildView addSurface(View pluginView, int x, int y,
diff --git a/core/java/android/widget/ImageView.java b/core/java/android/widget/ImageView.java
index 1fe6f4b..d8068f9 100644
--- a/core/java/android/widget/ImageView.java
+++ b/core/java/android/widget/ImageView.java
@@ -218,15 +218,16 @@
/**
* An optional argument to supply a maximum width for this view. Only valid if
- * {@link #setAdjustViewBounds} has been set to true. To set an image to be a maximum of 100 x
- * 100 while preserving the original aspect ratio, do the following: 1) set adjustViewBounds to
- * true 2) set maxWidth and maxHeight to 100 3) set the height and width layout params to
- * WRAP_CONTENT.
+ * {@link #setAdjustViewBounds(boolean)} has been set to true. To set an image to be a maximum
+ * of 100 x 100 while preserving the original aspect ratio, do the following: 1) set
+ * adjustViewBounds to true 2) set maxWidth and maxHeight to 100 3) set the height and width
+ * layout params to WRAP_CONTENT.
*
* <p>
* Note that this view could be still smaller than 100 x 100 using this approach if the original
* image is small. To set an image to a fixed size, specify that size in the layout params and
- * then use {@link #setScaleType} to determine how to fit the image within the bounds.
+ * then use {@link #setScaleType(android.widget.ImageView.ScaleType)} to determine how to fit
+ * the image within the bounds.
* </p>
*
* @param maxWidth maximum width for this view
@@ -240,15 +241,16 @@
/**
* An optional argument to supply a maximum height for this view. Only valid if
- * {@link #setAdjustViewBounds} has been set to true. To set an image to be a maximum of 100 x
- * 100 while preserving the original aspect ratio, do the following: 1) set adjustViewBounds to
- * true 2) set maxWidth and maxHeight to 100 3) set the height and width layout params to
- * WRAP_CONTENT.
+ * {@link #setAdjustViewBounds(boolean)} has been set to true. To set an image to be a
+ * maximum of 100 x 100 while preserving the original aspect ratio, do the following: 1) set
+ * adjustViewBounds to true 2) set maxWidth and maxHeight to 100 3) set the height and width
+ * layout params to WRAP_CONTENT.
*
* <p>
* Note that this view could be still smaller than 100 x 100 using this approach if the original
* image is small. To set an image to a fixed size, specify that size in the layout params and
- * then use {@link #setScaleType} to determine how to fit the image within the bounds.
+ * then use {@link #setScaleType(android.widget.ImageView.ScaleType)} to determine how to fit
+ * the image within the bounds.
* </p>
*
* @param maxHeight maximum height for this view
@@ -272,8 +274,8 @@
*
* <p class="note">This does Bitmap reading and decoding on the UI
* thread, which can cause a latency hiccup. If that's a concern,
- * consider using {@link #setImageDrawable} or
- * {@link #setImageBitmap} and
+ * consider using {@link #setImageDrawable(android.graphics.drawable.Drawable)} or
+ * {@link #setImageBitmap(android.graphics.Bitmap)} and
* {@link android.graphics.BitmapFactory} instead.</p>
*
* @param resId the resource identifier of the the drawable
@@ -297,8 +299,8 @@
*
* <p class="note">This does Bitmap reading and decoding on the UI
* thread, which can cause a latency hiccup. If that's a concern,
- * consider using {@link #setImageDrawable} or
- * {@link #setImageBitmap} and
+ * consider using {@link #setImageDrawable(android.graphics.drawable.Drawable)} or
+ * {@link #setImageBitmap(android.graphics.Bitmap)} and
* {@link android.graphics.BitmapFactory} instead.</p>
*
* @param uri The Uri of an image
@@ -902,12 +904,12 @@
/**
* <p>Set the offset of the widget's text baseline from the widget's top
- * boundary. This value is overridden by the {@link #setBaselineAlignBottom}
+ * boundary. This value is overridden by the {@link #setBaselineAlignBottom(boolean)}
* property.</p>
*
* @param baseline The baseline to use, or -1 if none is to be provided.
*
- * @see #setBaseline
+ * @see #setBaseline(int)
* @attr ref android.R.styleable#ImageView_baseline
*/
public void setBaseline(int baseline) {
diff --git a/core/java/android/widget/TextView.java b/core/java/android/widget/TextView.java
index 4d3aa68..6c5d117 100644
--- a/core/java/android/widget/TextView.java
+++ b/core/java/android/widget/TextView.java
@@ -60,6 +60,7 @@
import android.text.SpanWatcher;
import android.text.Spannable;
import android.text.SpannableString;
+import android.text.SpannableStringBuilder;
import android.text.Spanned;
import android.text.SpannedString;
import android.text.StaticLayout;
@@ -80,10 +81,13 @@
import android.text.method.TextKeyListener;
import android.text.method.TimeKeyListener;
import android.text.method.TransformationMethod;
+import android.text.method.WordIterator;
import android.text.style.ClickableSpan;
import android.text.style.ParagraphStyle;
import android.text.style.SuggestionSpan;
+import android.text.style.TextAppearanceSpan;
import android.text.style.URLSpan;
+import android.text.style.UnderlineSpan;
import android.text.style.UpdateAppearance;
import android.text.util.Linkify;
import android.util.AttributeSet;
@@ -127,6 +131,7 @@
import java.io.IOException;
import java.lang.ref.WeakReference;
+import java.text.BreakIterator;
import java.util.ArrayList;
/**
@@ -314,6 +319,7 @@
private int mTextEditSuggestionsBottomWindowLayout, mTextEditSuggestionsTopWindowLayout;
private int mTextEditSuggestionItemLayout;
private SuggestionsPopupWindow mSuggestionsPopupWindow;
+ private SuggestionRangeSpan mSuggestionRangeSpan;
private int mCursorDrawableRes;
private final Drawable[] mCursorDrawable = new Drawable[2];
@@ -8225,13 +8231,20 @@
return ((minOffset >= selectionStart) && (maxOffset < selectionEnd));
}
+ private static class SuggestionRangeSpan extends UnderlineSpan {
+ // TODO themable, would be nice to make it a child class of TextAppearanceSpan, but
+ // there is no way to have underline and TextAppearanceSpan.
+ }
+
private class SuggestionsPopupWindow implements OnClickListener {
private static final int MAX_NUMBER_SUGGESTIONS = 5;
- private static final long NO_SUGGESTIONS = -1L;
+ private static final int NO_SUGGESTIONS = -1;
private final PopupWindow mContainer;
private final ViewGroup[] mSuggestionViews = new ViewGroup[2];
private final int[] mSuggestionViewLayouts = new int[] {
mTextEditSuggestionsBottomWindowLayout, mTextEditSuggestionsTopWindowLayout};
+ private WordIterator mWordIterator;
+ private TextAppearanceSpan[] mHighlightSpans = new TextAppearanceSpan[0];
public SuggestionsPopupWindow() {
mContainer = new PopupWindow(TextView.this.mContext, null,
@@ -8244,6 +8257,11 @@
mContainer.setHeight(ViewGroup.LayoutParams.WRAP_CONTENT);
}
+ private class SuggestionInfo {
+ int suggestionStart, suggestionEnd; // range of suggestion item with replacement text
+ int spanStart, spanEnd; // range in TextView where text should be inserted
+ }
+
private ViewGroup getViewGroup(boolean under) {
final int viewIndex = under ? 0 : 1;
ViewGroup viewGroup = mSuggestionViews[viewIndex];
@@ -8277,6 +8295,7 @@
"Inflated TextEdit suggestion item is not a TextView: " + childView);
}
+ childView.setTag(new SuggestionInfo());
viewGroup.addView(childView);
childView.setOnClickListener(this);
}
@@ -8299,21 +8318,28 @@
mContainer.setContentView(viewGroup);
int totalNbSuggestions = 0;
+ int spanUnionStart = mText.length();
+ int spanUnionEnd = 0;
+
for (int spanIndex = 0; spanIndex < nbSpans; spanIndex++) {
SuggestionSpan suggestionSpan = suggestionSpans[spanIndex];
final int spanStart = spannable.getSpanStart(suggestionSpan);
final int spanEnd = spannable.getSpanEnd(suggestionSpan);
- final Long spanRange = packRangeInLong(spanStart, spanEnd);
+ spanUnionStart = Math.min(spanStart, spanUnionStart);
+ spanUnionEnd = Math.max(spanEnd, spanUnionEnd);
String[] suggestions = suggestionSpan.getSuggestions();
int nbSuggestions = suggestions.length;
for (int suggestionIndex = 0; suggestionIndex < nbSuggestions; suggestionIndex++) {
TextView textView = (TextView) viewGroup.getChildAt(totalNbSuggestions);
textView.setText(suggestions[suggestionIndex]);
- textView.setTag(spanRange);
+ SuggestionInfo suggestionInfo = (SuggestionInfo) textView.getTag();
+ suggestionInfo.spanStart = spanStart;
+ suggestionInfo.spanEnd = spanEnd;
totalNbSuggestions++;
- if (totalNbSuggestions == MAX_NUMBER_SUGGESTIONS) {
+ if (totalNbSuggestions > MAX_NUMBER_SUGGESTIONS) {
+ // Also end outer for loop
spanIndex = nbSpans;
break;
}
@@ -8324,8 +8350,18 @@
// TODO Replace by final text, use a dedicated layout, add a fade out timer...
TextView textView = (TextView) viewGroup.getChildAt(0);
textView.setText("No suggestions available");
- textView.setTag(NO_SUGGESTIONS);
+ SuggestionInfo suggestionInfo = (SuggestionInfo) textView.getTag();
+ suggestionInfo.spanStart = NO_SUGGESTIONS;
totalNbSuggestions++;
+ } else {
+ if (mSuggestionRangeSpan == null) mSuggestionRangeSpan = new SuggestionRangeSpan();
+ ((Editable) mText).setSpan(mSuggestionRangeSpan, spanUnionStart, spanUnionEnd,
+ Spanned.SPAN_EXCLUSIVE_EXCLUSIVE);
+
+ for (int i = 0; i < totalNbSuggestions; i++) {
+ final TextView textView = (TextView) viewGroup.getChildAt(i);
+ highlightTextDifferences(textView, spanUnionStart, spanUnionEnd);
+ }
}
for (int i = 0; i < MAX_NUMBER_SUGGESTIONS; i++) {
@@ -8338,7 +8374,158 @@
positionAtCursor();
}
+ private long[] getWordLimits(CharSequence text) {
+ if (mWordIterator == null) mWordIterator = new WordIterator(); // TODO locale
+ mWordIterator.setCharSequence(text);
+
+ // First pass will simply count the number of words to be able to create an array
+ // Not too expensive since previous break positions are cached by the BreakIterator
+ int nbWords = 0;
+ int position = mWordIterator.following(0);
+ while (position != BreakIterator.DONE) {
+ nbWords++;
+ position = mWordIterator.following(position);
+ }
+
+ int index = 0;
+ long[] result = new long[nbWords];
+
+ position = mWordIterator.following(0);
+ while (position != BreakIterator.DONE) {
+ int wordStart = mWordIterator.getBeginning(position);
+ result[index++] = packRangeInLong(wordStart, position);
+ position = mWordIterator.following(position);
+ }
+
+ return result;
+ }
+
+ private TextAppearanceSpan highlightSpan(int index) {
+ final int length = mHighlightSpans.length;
+ if (index < length) {
+ return mHighlightSpans[index];
+ }
+
+ // Assumes indexes are requested in sequence: simply append one more item
+ TextAppearanceSpan[] newArray = new TextAppearanceSpan[length + 1];
+ System.arraycopy(mHighlightSpans, 0, newArray, 0, length);
+ TextAppearanceSpan highlightSpan = new TextAppearanceSpan(mContext,
+ android.R.style.TextAppearance_SuggestionHighlight);
+ newArray[length] = highlightSpan;
+ mHighlightSpans = newArray;
+ return highlightSpan;
+ }
+
+ private void highlightTextDifferences(TextView textView, int unionStart, int unionEnd) {
+ SuggestionInfo suggestionInfo = (SuggestionInfo) textView.getTag();
+ final int spanStart = suggestionInfo.spanStart;
+ final int spanEnd = suggestionInfo.spanEnd;
+
+ // Remove all text formating by converting to Strings
+ final String text = textView.getText().toString();
+ final String sourceText = mText.subSequence(spanStart, spanEnd).toString();
+
+ long[] sourceWordLimits = getWordLimits(sourceText);
+ long[] wordLimits = getWordLimits(text);
+
+ SpannableStringBuilder ssb = new SpannableStringBuilder();
+ // span [spanStart, spanEnd] is included in union [spanUnionStart, int spanUnionEnd]
+ // The final result is made of 3 parts: the text before, between and after the span
+ // This is the text before, provided for context
+ ssb.append(mText.subSequence(unionStart, spanStart).toString());
+
+ // shift is used to offset spans positions wrt span's beginning
+ final int shift = spanStart - unionStart;
+ suggestionInfo.suggestionStart = shift;
+ suggestionInfo.suggestionEnd = shift + text.length();
+
+ // This is the actual suggestion text, which will be highlighted by the following code
+ ssb.append(text);
+
+ String[] words = new String[wordLimits.length];
+ for (int i = 0; i < wordLimits.length; i++) {
+ int wordStart = extractRangeStartFromLong(wordLimits[i]);
+ int wordEnd = extractRangeEndFromLong(wordLimits[i]);
+ words[i] = text.substring(wordStart, wordEnd);
+ }
+
+ // Highlighted word algorithm is bases on word matching between source and text
+ // Matching words are found from left to right. TODO: change for RTL languages
+ // Characters between matching words are highlighted
+ int previousCommonWordIndex = -1;
+ int nbHighlightSpans = 0;
+ for (int i = 0; i < sourceWordLimits.length; i++) {
+ int wordStart = extractRangeStartFromLong(sourceWordLimits[i]);
+ int wordEnd = extractRangeEndFromLong(sourceWordLimits[i]);
+ String sourceWord = sourceText.substring(wordStart, wordEnd);
+
+ for (int j = previousCommonWordIndex + 1; j < words.length; j++) {
+ if (sourceWord.equals(words[j])) {
+ if (j != previousCommonWordIndex + 1) {
+ int firstDifferentPosition = previousCommonWordIndex < 0 ? 0 :
+ extractRangeEndFromLong(wordLimits[previousCommonWordIndex]);
+ int lastDifferentPosition = extractRangeStartFromLong(wordLimits[j]);
+ ssb.setSpan(highlightSpan(nbHighlightSpans++),
+ shift + firstDifferentPosition, shift + lastDifferentPosition,
+ Spanned.SPAN_EXCLUSIVE_EXCLUSIVE);
+ } else {
+ // Compare characters between words
+ int previousSourceWordEnd = i == 0 ? 0 :
+ extractRangeEndFromLong(sourceWordLimits[i - 1]);
+ int sourceWordStart = extractRangeStartFromLong(sourceWordLimits[i]);
+ String sourceSpaces = sourceText.substring(previousSourceWordEnd,
+ sourceWordStart);
+
+ int previousWordEnd = j == 0 ? 0 :
+ extractRangeEndFromLong(wordLimits[j - 1]);
+ int currentWordStart = extractRangeStartFromLong(wordLimits[j]);
+ String textSpaces = text.substring(previousWordEnd, currentWordStart);
+
+ if (!sourceSpaces.equals(textSpaces)) {
+ ssb.setSpan(highlightSpan(nbHighlightSpans++),
+ shift + previousWordEnd, shift + currentWordStart,
+ Spanned.SPAN_EXCLUSIVE_EXCLUSIVE);
+ }
+ }
+ previousCommonWordIndex = j;
+ break;
+ }
+ }
+ }
+
+ // Finally, compare ends of Strings
+ if (previousCommonWordIndex < words.length - 1) {
+ int firstDifferentPosition = previousCommonWordIndex < 0 ? 0 :
+ extractRangeEndFromLong(wordLimits[previousCommonWordIndex]);
+ int lastDifferentPosition = textView.length();
+ ssb.setSpan(highlightSpan(nbHighlightSpans++),
+ shift + firstDifferentPosition, shift + lastDifferentPosition,
+ Spanned.SPAN_EXCLUSIVE_EXCLUSIVE);
+ } else {
+ int lastSourceWordEnd = sourceWordLimits.length == 0 ? 0 :
+ extractRangeEndFromLong(sourceWordLimits[sourceWordLimits.length - 1]);
+ String sourceSpaces = sourceText.substring(lastSourceWordEnd, sourceText.length());
+
+ int lastCommonTextWordEnd = previousCommonWordIndex < 0 ? 0 :
+ extractRangeEndFromLong(wordLimits[previousCommonWordIndex]);
+ String textSpaces = text.substring(lastCommonTextWordEnd, textView.length());
+
+ if (!sourceSpaces.equals(textSpaces) && textSpaces.length() > 0) {
+ ssb.setSpan(highlightSpan(nbHighlightSpans++),
+ shift + lastCommonTextWordEnd, shift + textView.length(),
+ Spanned.SPAN_EXCLUSIVE_EXCLUSIVE);
+ }
+ }
+
+ // Final part, text after the current suggestion range.
+ ssb.append(mText.subSequence(spanEnd, unionEnd).toString());
+ textView.setText(ssb);
+ }
+
public void hide() {
+ if ((mText instanceof Editable) && mSuggestionRangeSpan != null) {
+ ((Editable) mText).removeSpan(mSuggestionRangeSpan);
+ }
mContainer.dismiss();
}
@@ -8346,11 +8533,15 @@
public void onClick(View view) {
if (view instanceof TextView) {
TextView textView = (TextView) view;
- Long range = ((Long) view.getTag());
- if (range != NO_SUGGESTIONS) {
- final int spanStart = extractRangeStartFromLong(range);
- final int spanEnd = extractRangeEndFromLong(range);
- ((Editable) mText).replace(spanStart, spanEnd, textView.getText());
+ SuggestionInfo suggestionInfo = (SuggestionInfo) textView.getTag();
+ final int spanStart = suggestionInfo.spanStart;
+ final int spanEnd = suggestionInfo.spanEnd;
+ if (spanStart != NO_SUGGESTIONS) {
+ final int suggestionStart = suggestionInfo.suggestionStart;
+ final int suggestionEnd = suggestionInfo.suggestionEnd;
+ final String suggestion = textView.getText().subSequence(
+ suggestionStart, suggestionEnd).toString();
+ ((Editable) mText).replace(spanStart, spanEnd, suggestion);
}
}
hide();
diff --git a/core/jni/android_media_AudioRecord.cpp b/core/jni/android_media_AudioRecord.cpp
index 1b6b24f..0afc523 100644
--- a/core/jni/android_media_AudioRecord.cpp
+++ b/core/jni/android_media_AudioRecord.cpp
@@ -31,6 +31,9 @@
#include "media/AudioRecord.h"
#include "media/mediarecorder.h"
+#include <cutils/bitops.h>
+
+#include <hardware/audio.h>
// ----------------------------------------------------------------------------
@@ -130,11 +133,11 @@
//LOGV("sampleRate=%d, audioFormat=%d, channels=%x, buffSizeInBytes=%d",
// sampleRateInHertz, audioFormat, channels, buffSizeInBytes);
- if (!AudioSystem::isInputChannel(channels)) {
+ if (!audio_is_input_channel(channels)) {
LOGE("Error creating AudioRecord: channel count is not 1 or 2.");
return AUDIORECORD_ERROR_SETUP_INVALIDCHANNELMASK;
}
- uint32_t nbChannels = AudioSystem::popCount(channels);
+ uint32_t nbChannels = popcount(channels);
// compare the format against the Java constants
if ((audioFormat != javaAudioRecordFields.PCM16)
@@ -145,7 +148,7 @@
int bytesPerSample = audioFormat==javaAudioRecordFields.PCM16 ? 2 : 1;
int format = audioFormat==javaAudioRecordFields.PCM16 ?
- AudioSystem::PCM_16_BIT : AudioSystem::PCM_8_BIT;
+ AUDIO_FORMAT_PCM_16_BIT : AUDIO_FORMAT_PCM_8_BIT;
if (buffSizeInBytes == 0) {
LOGE("Error creating AudioRecord: frameCount is 0.");
@@ -154,7 +157,7 @@
int frameSize = nbChannels * bytesPerSample;
size_t frameCount = buffSizeInBytes / frameSize;
- if (source >= AUDIO_SOURCE_LIST_END) {
+ if (source >= AUDIO_SOURCE_CNT) {
LOGE("Error creating AudioRecord: unknown source.");
return AUDIORECORD_ERROR_SETUP_INVALIDSOURCE;
}
@@ -463,7 +466,7 @@
status_t result = AudioRecord::getMinFrameCount(&frameCount,
sampleRateInHertz,
(audioFormat == javaAudioRecordFields.PCM16 ?
- AudioSystem::PCM_16_BIT : AudioSystem::PCM_8_BIT),
+ AUDIO_FORMAT_PCM_16_BIT : AUDIO_FORMAT_PCM_8_BIT),
nbChannels);
if (result == BAD_VALUE) {
diff --git a/core/jni/android_media_AudioSystem.cpp b/core/jni/android_media_AudioSystem.cpp
index 5016bf9..09c0953 100644
--- a/core/jni/android_media_AudioSystem.cpp
+++ b/core/jni/android_media_AudioSystem.cpp
@@ -30,6 +30,9 @@
#include <media/AudioSystem.h>
#include <media/AudioTrack.h>
+#include <hardware/audio.h>
+#include <hardware/audio_policy.h>
+
// ----------------------------------------------------------------------------
using namespace android;
@@ -129,8 +132,8 @@
android_media_AudioSystem_setDeviceConnectionState(JNIEnv *env, jobject thiz, jint device, jint state, jstring device_address)
{
const char *c_address = env->GetStringUTFChars(device_address, NULL);
- int status = check_AudioSystem_Command(AudioSystem::setDeviceConnectionState(static_cast <AudioSystem::audio_devices>(device),
- static_cast <AudioSystem::device_connection_state>(state),
+ int status = check_AudioSystem_Command(AudioSystem::setDeviceConnectionState(static_cast <audio_devices_t>(device),
+ static_cast <audio_policy_dev_state_t>(state),
c_address));
env->ReleaseStringUTFChars(device_address, c_address);
return status;
@@ -140,7 +143,7 @@
android_media_AudioSystem_getDeviceConnectionState(JNIEnv *env, jobject thiz, jint device, jstring device_address)
{
const char *c_address = env->GetStringUTFChars(device_address, NULL);
- int state = static_cast <int>(AudioSystem::getDeviceConnectionState(static_cast <AudioSystem::audio_devices>(device),
+ int state = static_cast <int>(AudioSystem::getDeviceConnectionState(static_cast <audio_devices_t>(device),
c_address));
env->ReleaseStringUTFChars(device_address, c_address);
return state;
@@ -161,20 +164,20 @@
static int
android_media_AudioSystem_setForceUse(JNIEnv *env, jobject thiz, jint usage, jint config)
{
- return check_AudioSystem_Command(AudioSystem::setForceUse(static_cast <AudioSystem::force_use>(usage),
- static_cast <AudioSystem::forced_config>(config)));
+ return check_AudioSystem_Command(AudioSystem::setForceUse(static_cast <audio_policy_force_use_t>(usage),
+ static_cast <audio_policy_forced_cfg_t>(config)));
}
static int
android_media_AudioSystem_getForceUse(JNIEnv *env, jobject thiz, jint usage)
{
- return static_cast <int>(AudioSystem::getForceUse(static_cast <AudioSystem::force_use>(usage)));
+ return static_cast <int>(AudioSystem::getForceUse(static_cast <audio_policy_force_use_t>(usage)));
}
static int
android_media_AudioSystem_initStreamVolume(JNIEnv *env, jobject thiz, jint stream, jint indexMin, jint indexMax)
{
- return check_AudioSystem_Command(AudioSystem::initStreamVolume(static_cast <AudioSystem::stream_type>(stream),
+ return check_AudioSystem_Command(AudioSystem::initStreamVolume(static_cast <audio_stream_type_t>(stream),
indexMin,
indexMax));
}
@@ -182,14 +185,14 @@
static int
android_media_AudioSystem_setStreamVolumeIndex(JNIEnv *env, jobject thiz, jint stream, jint index)
{
- return check_AudioSystem_Command(AudioSystem::setStreamVolumeIndex(static_cast <AudioSystem::stream_type>(stream), index));
+ return check_AudioSystem_Command(AudioSystem::setStreamVolumeIndex(static_cast <audio_stream_type_t>(stream), index));
}
static int
android_media_AudioSystem_getStreamVolumeIndex(JNIEnv *env, jobject thiz, jint stream)
{
int index;
- if (AudioSystem::getStreamVolumeIndex(static_cast <AudioSystem::stream_type>(stream), &index) != NO_ERROR) {
+ if (AudioSystem::getStreamVolumeIndex(static_cast <audio_stream_type_t>(stream), &index) != NO_ERROR) {
index = -1;
}
return index;
@@ -198,7 +201,7 @@
static jint
android_media_AudioSystem_getDevicesForStream(JNIEnv *env, jobject thiz, jint stream)
{
- return (jint) AudioSystem::getDevicesForStream(static_cast <AudioSystem::stream_type>(stream));
+ return (jint) AudioSystem::getDevicesForStream(static_cast <audio_stream_type_t>(stream));
}
// ----------------------------------------------------------------------------
diff --git a/core/jni/android_media_AudioTrack.cpp b/core/jni/android_media_AudioTrack.cpp
index 587a16c..e3b2f2c 100644
--- a/core/jni/android_media_AudioTrack.cpp
+++ b/core/jni/android_media_AudioTrack.cpp
@@ -33,6 +33,9 @@
#include <binder/MemoryHeapBase.h>
#include <binder/MemoryBase.h>
+#include <cutils/bitops.h>
+
+#include <hardware/audio.h>
// ----------------------------------------------------------------------------
@@ -77,7 +80,7 @@
AudioTrackJniStorage() {
mCallbackData.audioTrack_class = 0;
mCallbackData.audioTrack_ref = 0;
- mStreamType = AudioSystem::DEFAULT;
+ mStreamType = AUDIO_STREAM_DEFAULT;
}
~AudioTrackJniStorage() {
@@ -181,30 +184,30 @@
return AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
}
- if (!AudioSystem::isOutputChannel(channels)) {
+ if (!audio_is_output_channel(channels)) {
LOGE("Error creating AudioTrack: invalid channel mask.");
return AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
}
- int nbChannels = AudioSystem::popCount(channels);
+ int nbChannels = popcount(channels);
// check the stream type
- AudioSystem::stream_type atStreamType;
+ audio_stream_type_t atStreamType;
if (streamType == javaAudioTrackFields.STREAM_VOICE_CALL) {
- atStreamType = AudioSystem::VOICE_CALL;
+ atStreamType = AUDIO_STREAM_VOICE_CALL;
} else if (streamType == javaAudioTrackFields.STREAM_SYSTEM) {
- atStreamType = AudioSystem::SYSTEM;
+ atStreamType = AUDIO_STREAM_SYSTEM;
} else if (streamType == javaAudioTrackFields.STREAM_RING) {
- atStreamType = AudioSystem::RING;
+ atStreamType = AUDIO_STREAM_RING;
} else if (streamType == javaAudioTrackFields.STREAM_MUSIC) {
- atStreamType = AudioSystem::MUSIC;
+ atStreamType = AUDIO_STREAM_MUSIC;
} else if (streamType == javaAudioTrackFields.STREAM_ALARM) {
- atStreamType = AudioSystem::ALARM;
+ atStreamType = AUDIO_STREAM_ALARM;
} else if (streamType == javaAudioTrackFields.STREAM_NOTIFICATION) {
- atStreamType = AudioSystem::NOTIFICATION;
+ atStreamType = AUDIO_STREAM_NOTIFICATION;
} else if (streamType == javaAudioTrackFields.STREAM_BLUETOOTH_SCO) {
- atStreamType = AudioSystem::BLUETOOTH_SCO;
+ atStreamType = AUDIO_STREAM_BLUETOOTH_SCO;
} else if (streamType == javaAudioTrackFields.STREAM_DTMF) {
- atStreamType = AudioSystem::DTMF;
+ atStreamType = AUDIO_STREAM_DTMF;
} else {
LOGE("Error creating AudioTrack: unknown stream type.");
return AUDIOTRACK_ERROR_SETUP_INVALIDSTREAMTYPE;
@@ -233,7 +236,7 @@
// compute the frame count
int bytesPerSample = audioFormat == javaAudioTrackFields.PCM16 ? 2 : 1;
int format = audioFormat == javaAudioTrackFields.PCM16 ?
- AudioSystem::PCM_16_BIT : AudioSystem::PCM_8_BIT;
+ AUDIO_FORMAT_PCM_16_BIT : AUDIO_FORMAT_PCM_8_BIT;
int frameCount = buffSizeInBytes / (nbChannels * bytesPerSample);
AudioTrackJniStorage* lpJniStorage = new AudioTrackJniStorage();
@@ -755,25 +758,25 @@
int afSamplingRate;
// convert the stream type from Java to native value
// FIXME: code duplication with android_media_AudioTrack_native_setup()
- AudioSystem::stream_type nativeStreamType;
+ audio_stream_type_t nativeStreamType;
if (javaStreamType == javaAudioTrackFields.STREAM_VOICE_CALL) {
- nativeStreamType = AudioSystem::VOICE_CALL;
+ nativeStreamType = AUDIO_STREAM_VOICE_CALL;
} else if (javaStreamType == javaAudioTrackFields.STREAM_SYSTEM) {
- nativeStreamType = AudioSystem::SYSTEM;
+ nativeStreamType = AUDIO_STREAM_SYSTEM;
} else if (javaStreamType == javaAudioTrackFields.STREAM_RING) {
- nativeStreamType = AudioSystem::RING;
+ nativeStreamType = AUDIO_STREAM_RING;
} else if (javaStreamType == javaAudioTrackFields.STREAM_MUSIC) {
- nativeStreamType = AudioSystem::MUSIC;
+ nativeStreamType = AUDIO_STREAM_MUSIC;
} else if (javaStreamType == javaAudioTrackFields.STREAM_ALARM) {
- nativeStreamType = AudioSystem::ALARM;
+ nativeStreamType = AUDIO_STREAM_ALARM;
} else if (javaStreamType == javaAudioTrackFields.STREAM_NOTIFICATION) {
- nativeStreamType = AudioSystem::NOTIFICATION;
+ nativeStreamType = AUDIO_STREAM_NOTIFICATION;
} else if (javaStreamType == javaAudioTrackFields.STREAM_BLUETOOTH_SCO) {
- nativeStreamType = AudioSystem::BLUETOOTH_SCO;
+ nativeStreamType = AUDIO_STREAM_BLUETOOTH_SCO;
} else if (javaStreamType == javaAudioTrackFields.STREAM_DTMF) {
- nativeStreamType = AudioSystem::DTMF;
+ nativeStreamType = AUDIO_STREAM_DTMF;
} else {
- nativeStreamType = AudioSystem::DEFAULT;
+ nativeStreamType = AUDIO_STREAM_DEFAULT;
}
if (AudioSystem::getOutputSamplingRate(&afSamplingRate, nativeStreamType) != NO_ERROR) {
@@ -793,7 +796,7 @@
jint sampleRateInHertz, jint nbChannels, jint audioFormat) {
int frameCount = 0;
- if (AudioTrack::getMinFrameCount(&frameCount, AudioSystem::DEFAULT,
+ if (AudioTrack::getMinFrameCount(&frameCount, AUDIO_STREAM_DEFAULT,
sampleRateInHertz) != NO_ERROR) {
return -1;
}
diff --git a/core/res/res/drawable-hdpi/text_edit_suggestions_bottom_window.9.png b/core/res/res/drawable-hdpi/text_edit_suggestions_bottom_window.9.png
new file mode 100644
index 0000000..c97514f
--- /dev/null
+++ b/core/res/res/drawable-hdpi/text_edit_suggestions_bottom_window.9.png
Binary files differ
diff --git a/core/res/res/drawable-hdpi/text_edit_suggestions_top_window.9.png b/core/res/res/drawable-hdpi/text_edit_suggestions_top_window.9.png
new file mode 100644
index 0000000..ff6b34a
--- /dev/null
+++ b/core/res/res/drawable-hdpi/text_edit_suggestions_top_window.9.png
Binary files differ
diff --git a/core/res/res/layout/text_edit_suggestion_item.xml b/core/res/res/layout/text_edit_suggestion_item.xml
index a54cad2..ef537d9 100644
--- a/core/res/res/layout/text_edit_suggestion_item.xml
+++ b/core/res/res/layout/text_edit_suggestion_item.xml
@@ -21,7 +21,7 @@
android:paddingRight="16dip"
android:paddingTop="8dip"
android:paddingBottom="8dip"
- android:layout_gravity="center"
+ android:layout_gravity="left|center_vertical"
android:textAppearance="?android:attr/textAppearanceMedium"
- android:textColor="@android:color/black" />
+ android:textColor="@android:color/dim_foreground_light" />
diff --git a/core/res/res/values/colors.xml b/core/res/res/values/colors.xml
index 9c59cb6..39d2329 100644
--- a/core/res/res/values/colors.xml
+++ b/core/res/res/values/colors.xml
@@ -60,6 +60,7 @@
<color name="highlighted_text_light">#9983CC39</color>
<color name="link_text_dark">#5c5cff</color>
<color name="link_text_light">#0000ee</color>
+ <color name="suggestion_highlight_text">#177bbd</color>
<drawable name="stat_notify_sync_noanim">@drawable/stat_notify_sync_anim0</drawable>
<drawable name="stat_sys_download_done">@drawable/stat_sys_download_anim0</drawable>
diff --git a/core/res/res/values/public.xml b/core/res/res/values/public.xml
index 778d934..501d478 100644
--- a/core/res/res/values/public.xml
+++ b/core/res/res/values/public.xml
@@ -1659,6 +1659,7 @@
<public type="attr" name="stopWithTask" />
<public type="style" name="Theme.Holo.Light.NoActionBar" />
+ <public type="style" name="TextAppearance.SuggestionHighlight" />
<public type="attr" name="textSuggestionsWindowStyle" />
<public type="attr" name="textEditSuggestionsBottomWindowLayout" />
diff --git a/core/res/res/values/styles.xml b/core/res/res/values/styles.xml
index 8ce35f8..e666698 100644
--- a/core/res/res/values/styles.xml
+++ b/core/res/res/values/styles.xml
@@ -876,6 +876,13 @@
<item name="android:textSize">30sp</item>
</style>
+ <!-- @hide -->
+ <style name="TextAppearance.SuggestionHighlight">
+ <item name="android:textSize">18sp</item>
+ <item name="android:textColor">@android:color/suggestion_highlight_text</item>
+ <item name="android:textStyle">bold</item>
+ </style>
+
<!-- Preference Styles -->
<style name="Preference">
diff --git a/core/tests/coretests/AndroidManifest.xml b/core/tests/coretests/AndroidManifest.xml
index 4be6995..010e3c3 100644
--- a/core/tests/coretests/AndroidManifest.xml
+++ b/core/tests/coretests/AndroidManifest.xml
@@ -100,6 +100,12 @@
<application android:theme="@style/Theme">
<uses-library android:name="android.test.runner" />
+ <activity android:name="android.view.ViewAttachTestActivity" android:label="View Attach Test">
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+ <category android:name="android.intent.category.FRAMEWORK_INSTRUMENTATION_TEST" />
+ </intent-filter>
+ </activity>
<activity android:name="StubTestBrowserActivity" android:label="Stubbed Test Browser">
<intent-filter>
<action android:name="android.intent.action.MAIN"/>
diff --git a/core/tests/coretests/res/layout/attach_view_test.xml b/core/tests/coretests/res/layout/attach_view_test.xml
new file mode 100644
index 0000000..42841cb
--- /dev/null
+++ b/core/tests/coretests/res/layout/attach_view_test.xml
@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="utf-8"?>
+<LinearLayout
+ xmlns:android="http://schemas.android.com/apk/res/android"
+ android:orientation="vertical"
+ android:layout_width="fill_parent"
+ android:layout_height="fill_parent">
+ <android.view.ViewAttachView
+ android:id="@+id/view_attach_view"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent"
+ android:keepScreenOn="true"/>
+</LinearLayout>
diff --git a/core/tests/coretests/src/android/view/ViewAttachTest.java b/core/tests/coretests/src/android/view/ViewAttachTest.java
new file mode 100644
index 0000000..cff66e4
--- /dev/null
+++ b/core/tests/coretests/src/android/view/ViewAttachTest.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.view;
+
+import android.content.pm.ActivityInfo;
+import android.os.SystemClock;
+import android.test.ActivityInstrumentationTestCase2;
+
+public class ViewAttachTest extends
+ ActivityInstrumentationTestCase2<ViewAttachTestActivity> {
+
+ public ViewAttachTest() {
+ super(ViewAttachTestActivity.class);
+ }
+
+ /**
+ * Make sure that onAttachedToWindow and onDetachedToWindow is called in the
+ * correct order The ViewAttachTestActivity contains a view that will throw
+ * an RuntimeException if onDetachedToWindow and onAttachedToWindow is
+ * called in the wrong order.
+ *
+ * 1. Initiate the activity 2. Perform a series of orientation changes to
+ * the activity (this will force the View hierarchy to be rebuild,
+ * generating onAttachedToWindow and onDetachedToWindow)
+ *
+ * Expected result: No RuntimeException is thrown from the TestView in
+ * ViewFlipperTestActivity.
+ *
+ * @throws Throwable
+ */
+ public void testAttached() throws Throwable {
+ final ViewAttachTestActivity activity = getActivity();
+ for (int i = 0; i < 20; i++) {
+ activity.setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT);
+ SystemClock.sleep(250);
+ activity.setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_LANDSCAPE);
+ SystemClock.sleep(250);
+ }
+ }
+}
diff --git a/core/tests/coretests/src/android/view/ViewAttachTestActivity.java b/core/tests/coretests/src/android/view/ViewAttachTestActivity.java
new file mode 100644
index 0000000..59e25ae
--- /dev/null
+++ b/core/tests/coretests/src/android/view/ViewAttachTestActivity.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.view;
+
+import com.android.frameworks.coretests.R;
+
+import android.app.Activity;
+import android.os.Bundle;
+
+public class ViewAttachTestActivity extends Activity {
+ public static final String TAG = "OnAttachedTest";
+ @Override
+ public void onCreate(Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+ setContentView(R.layout.attach_view_test);
+ }
+}
diff --git a/core/tests/coretests/src/android/view/ViewAttachView.java b/core/tests/coretests/src/android/view/ViewAttachView.java
new file mode 100644
index 0000000..5af2d8f
--- /dev/null
+++ b/core/tests/coretests/src/android/view/ViewAttachView.java
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.view;
+
+import android.content.Context;
+import android.graphics.Canvas;
+import android.graphics.Color;
+import android.os.SystemClock;
+import android.util.AttributeSet;
+import android.util.Log;
+import android.view.View;
+
+/**
+ * A View that will throw a RuntimeException if onAttachedToWindow and
+ * onDetachedFromWindow is called in the wrong order for ViewAttachTest
+ */
+public class ViewAttachView extends View {
+ public static final String TAG = "OnAttachedTest";
+ private boolean attached;
+
+ public ViewAttachView(Context context, AttributeSet attrs, int defStyle) {
+ super(context, attrs, defStyle);
+ init(attrs, defStyle);
+ }
+
+ public ViewAttachView(Context context, AttributeSet attrs) {
+ super(context, attrs);
+ init(attrs, 0);
+ }
+
+ public ViewAttachView(Context context) {
+ super(context);
+ init(null, 0);
+ }
+
+ private void init(AttributeSet attrs, int defStyle) {
+ SystemClock.sleep(2000);
+ }
+
+ @Override
+ protected void onAttachedToWindow() {
+ Log.d(TAG, "onAttachedToWindow");
+ super.onAttachedToWindow();
+ if (attached) {
+ throw new RuntimeException("OnAttachedToWindow called more than once in a row");
+ }
+ attached = true;
+ }
+
+ @Override
+ protected void onDetachedFromWindow() {
+ Log.d(TAG, "onDetachedFromWindow");
+ super.onDetachedFromWindow();
+ if (!attached) {
+ throw new RuntimeException(
+ "onDetachedFromWindowcalled without prior call to OnAttachedToWindow");
+ }
+ attached = false;
+ }
+
+ @Override
+ protected void onDraw(Canvas canvas) {
+ super.onDraw(canvas);
+ canvas.drawColor(Color.BLUE);
+ }
+}
diff --git a/graphics/jni/android_renderscript_RenderScript.cpp b/graphics/jni/android_renderscript_RenderScript.cpp
index b752850..c3f2360 100644
--- a/graphics/jni/android_renderscript_RenderScript.cpp
+++ b/graphics/jni/android_renderscript_RenderScript.cpp
@@ -237,7 +237,11 @@
size_t receiveLen;
uint32_t subID;
- int id = rsContextGetMessage(con, buf, &receiveLen, &subID, sizeof(buf), true);
+ int id = rsContextGetMessage(con,
+ buf, sizeof(buf),
+ &receiveLen, sizeof(receiveLen),
+ &subID, sizeof(subID),
+ true);
if (!id && receiveLen) {
LOGV("message receive buffer too small. %i", receiveLen);
}
@@ -252,7 +256,11 @@
jint *ptr = _env->GetIntArrayElements(data, NULL);
size_t receiveLen;
uint32_t subID;
- int id = rsContextGetMessage(con, ptr, &receiveLen, &subID, len * 4, true);
+ int id = rsContextGetMessage(con,
+ ptr, len * 4,
+ &receiveLen, sizeof(receiveLen),
+ &subID, sizeof(subID),
+ true);
if (!id && receiveLen) {
LOGV("message receive buffer too small. %i", receiveLen);
}
@@ -266,7 +274,8 @@
jint *auxDataPtr = _env->GetIntArrayElements(auxData, NULL);
size_t receiveLen;
uint32_t subID;
- int id = rsContextPeekMessage(con, &receiveLen, &subID, wait);
+ int id = rsContextPeekMessage(con, &receiveLen, sizeof(receiveLen),
+ &subID, sizeof(subID), wait);
auxDataPtr[0] = (jint)subID;
auxDataPtr[1] = (jint)receiveLen;
_env->ReleaseIntArrayElements(auxData, auxDataPtr, 0);
@@ -426,7 +435,9 @@
bitmap.lockPixels();
const void* ptr = bitmap.getPixels();
- jint id = (jint)rsaAllocationCreateFromBitmap(con, (RsType)type, (RsAllocationMipmapControl)mip, ptr, usage);
+ jint id = (jint)rsaAllocationCreateFromBitmap(con,
+ (RsType)type, (RsAllocationMipmapControl)mip,
+ ptr, bitmap.getSize(), usage);
bitmap.unlockPixels();
return id;
}
@@ -440,7 +451,9 @@
bitmap.lockPixels();
const void* ptr = bitmap.getPixels();
- jint id = (jint)rsaAllocationCubeCreateFromBitmap(con, (RsType)type, (RsAllocationMipmapControl)mip, ptr, usage);
+ jint id = (jint)rsaAllocationCubeCreateFromBitmap(con,
+ (RsType)type, (RsAllocationMipmapControl)mip,
+ ptr, bitmap.getSize(), usage);
bitmap.unlockPixels();
return id;
}
diff --git a/include/media/AudioParameter.h b/include/media/AudioParameter.h
new file mode 100644
index 0000000..79d5d82
--- /dev/null
+++ b/include/media/AudioParameter.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2008-2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIOPARAMETER_H_
+#define ANDROID_AUDIOPARAMETER_H_
+
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class AudioParameter {
+
+public:
+ AudioParameter() {}
+ AudioParameter(const String8& keyValuePairs);
+ virtual ~AudioParameter();
+
+ // reserved parameter keys for changing standard parameters with setParameters() function.
+ // Using these keys is mandatory for AudioFlinger to properly monitor audio output/input
+ // configuration changes and act accordingly.
+ // keyRouting: to change audio routing, value is an int in audio_devices_t
+ // keySamplingRate: to change sampling rate routing, value is an int
+ // keyFormat: to change audio format, value is an int in audio_format_t
+ // keyChannels: to change audio channel configuration, value is an int in audio_channels_t
+ // keyFrameCount: to change audio output frame count, value is an int
+ // keyInputSource: to change audio input source, value is an int in audio_source_t
+ // (defined in media/mediarecorder.h)
+ static const char *keyRouting;
+ static const char *keySamplingRate;
+ static const char *keyFormat;
+ static const char *keyChannels;
+ static const char *keyFrameCount;
+ static const char *keyInputSource;
+
+ String8 toString();
+
+ status_t add(const String8& key, const String8& value);
+ status_t addInt(const String8& key, const int value);
+ status_t addFloat(const String8& key, const float value);
+
+ status_t remove(const String8& key);
+
+ status_t get(const String8& key, String8& value);
+ status_t getInt(const String8& key, int& value);
+ status_t getFloat(const String8& key, float& value);
+ status_t getAt(size_t index, String8& key, String8& value);
+
+ size_t size() { return mParameters.size(); }
+
+private:
+ String8 mKeyValuePairs;
+ KeyedVector <String8, String8> mParameters;
+};
+
+}; // namespace android
+
+#endif /*ANDROID_AUDIOPARAMETER_H_*/
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 293764d..def3612 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -30,6 +30,7 @@
#include <binder/IMemory.h>
#include <utils/threads.h>
+#include <hardware/audio.h>
namespace android {
@@ -127,9 +128,9 @@
*
* inputSource: Select the audio input to record to (e.g. AUDIO_SOURCE_DEFAULT).
* sampleRate: Track sampling rate in Hz.
- * format: Audio format (e.g AudioSystem::PCM_16_BIT for signed
+ * format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
* 16 bits per sample).
- * channels: Channel mask: see AudioSystem::audio_channels.
+ * channels: Channel mask: see audio_channels_t.
* frameCount: Total size of track PCM buffer in frames. This defines the
* latency of the track.
* flags: A bitmask of acoustic values from enum record_flags. It enables
@@ -142,15 +143,15 @@
*/
enum record_flags {
- RECORD_AGC_ENABLE = AudioSystem::AGC_ENABLE,
- RECORD_NS_ENABLE = AudioSystem::NS_ENABLE,
- RECORD_IIR_ENABLE = AudioSystem::TX_IIR_ENABLE
+ RECORD_AGC_ENABLE = AUDIO_IN_ACOUSTICS_AGC_ENABLE,
+ RECORD_NS_ENABLE = AUDIO_IN_ACOUSTICS_NS_ENABLE,
+ RECORD_IIR_ENABLE = AUDIO_IN_ACOUSTICS_TX_IIR_ENABLE,
};
AudioRecord(int inputSource,
uint32_t sampleRate = 0,
int format = 0,
- uint32_t channels = AudioSystem::CHANNEL_IN_MONO,
+ uint32_t channels = AUDIO_CHANNEL_IN_MONO,
int frameCount = 0,
uint32_t flags = 0,
callback_t cbf = 0,
@@ -176,7 +177,7 @@
status_t set(int inputSource = 0,
uint32_t sampleRate = 0,
int format = 0,
- uint32_t channels = AudioSystem::CHANNEL_IN_MONO,
+ uint32_t channels = AUDIO_CHANNEL_IN_MONO,
int frameCount = 0,
uint32_t flags = 0,
callback_t cbf = 0,
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index edf4b8b7..eb61a87 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -21,10 +21,15 @@
#include <utils/threads.h>
#include <media/IAudioFlinger.h>
+#include <hardware/audio.h>
+#include <hardware/audio_policy.h>
+
+/* XXX: Should be include by all the users instead */
+#include <media/AudioParameter.h>
+
namespace android {
typedef void (*audio_error_callback)(status_t err);
-typedef int audio_io_handle_t;
class IAudioPolicyService;
class String8;
@@ -33,155 +38,6 @@
{
public:
- // must match android/media/AudioSystem.java STREAM_* constants
- enum stream_type {
- DEFAULT =-1,
- VOICE_CALL = 0,
- SYSTEM = 1,
- RING = 2,
- MUSIC = 3,
- ALARM = 4,
- NOTIFICATION = 5,
- BLUETOOTH_SCO = 6,
- ENFORCED_AUDIBLE = 7, // Sounds that cannot be muted by user and must be routed to speaker
- DTMF = 8,
- TTS = 9,
- NUM_STREAM_TYPES
- };
-
- // Audio sub formats (see AudioSystem::audio_format).
- enum pcm_sub_format {
- PCM_SUB_16_BIT = 0x1, // must be 1 for backward compatibility
- PCM_SUB_8_BIT = 0x2, // must be 2 for backward compatibility
- };
-
- // FIXME These sub_format enums are currently unused
-
- // MP3 sub format field definition : can use 11 LSBs in the same way as MP3 frame header to specify
- // bit rate, stereo mode, version...
- enum mp3_sub_format {
- //TODO
- };
-
- // AMR NB/WB sub format field definition: specify frame block interleaving, bandwidth efficient or octet aligned,
- // encoding mode for recording...
- enum amr_sub_format {
- //TODO
- };
-
- // AAC sub format field definition: specify profile or bitrate for recording...
- enum aac_sub_format {
- //TODO
- };
-
- // VORBIS sub format field definition: specify quality for recording...
- enum vorbis_sub_format {
- //TODO
- };
-
- // Audio format consists in a main format field (upper 8 bits) and a sub format field (lower 24 bits).
- // The main format indicates the main codec type. The sub format field indicates options and parameters
- // for each format. The sub format is mainly used for record to indicate for instance the requested bitrate
- // or profile. It can also be used for certain formats to give informations not present in the encoded
- // audio stream (e.g. octet alignement for AMR).
- enum audio_format {
- INVALID_FORMAT = -1,
- FORMAT_DEFAULT = 0,
- PCM = 0x00000000, // must be 0 for backward compatibility
- MP3 = 0x01000000,
- AMR_NB = 0x02000000,
- AMR_WB = 0x03000000,
- AAC = 0x04000000,
- HE_AAC_V1 = 0x05000000,
- HE_AAC_V2 = 0x06000000,
- VORBIS = 0x07000000,
- MAIN_FORMAT_MASK = 0xFF000000,
- SUB_FORMAT_MASK = 0x00FFFFFF,
- // Aliases
- PCM_16_BIT = (PCM|PCM_SUB_16_BIT),
- PCM_8_BIT = (PCM|PCM_SUB_8_BIT)
- };
-
-
- // Channel mask definitions must be kept in sync with values in /media/java/android/media/AudioFormat.java
- enum audio_channels {
- // output channels
- CHANNEL_OUT_FRONT_LEFT = 0x4,
- CHANNEL_OUT_FRONT_RIGHT = 0x8,
- CHANNEL_OUT_FRONT_CENTER = 0x10,
- CHANNEL_OUT_LOW_FREQUENCY = 0x20,
- CHANNEL_OUT_BACK_LEFT = 0x40,
- CHANNEL_OUT_BACK_RIGHT = 0x80,
- CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x100,
- CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x200,
- CHANNEL_OUT_BACK_CENTER = 0x400,
- CHANNEL_OUT_MONO = CHANNEL_OUT_FRONT_LEFT,
- CHANNEL_OUT_STEREO = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT),
- CHANNEL_OUT_QUAD = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
- CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT),
- CHANNEL_OUT_SURROUND = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
- CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_BACK_CENTER),
- CHANNEL_OUT_5POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
- CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT),
- CHANNEL_OUT_7POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
- CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
- CHANNEL_OUT_FRONT_LEFT_OF_CENTER | CHANNEL_OUT_FRONT_RIGHT_OF_CENTER),
- CHANNEL_OUT_ALL = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
- CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
- CHANNEL_OUT_FRONT_LEFT_OF_CENTER | CHANNEL_OUT_FRONT_RIGHT_OF_CENTER | CHANNEL_OUT_BACK_CENTER),
-
- // input channels
- CHANNEL_IN_LEFT = 0x4,
- CHANNEL_IN_RIGHT = 0x8,
- CHANNEL_IN_FRONT = 0x10,
- CHANNEL_IN_BACK = 0x20,
- CHANNEL_IN_LEFT_PROCESSED = 0x40,
- CHANNEL_IN_RIGHT_PROCESSED = 0x80,
- CHANNEL_IN_FRONT_PROCESSED = 0x100,
- CHANNEL_IN_BACK_PROCESSED = 0x200,
- CHANNEL_IN_PRESSURE = 0x400,
- CHANNEL_IN_X_AXIS = 0x800,
- CHANNEL_IN_Y_AXIS = 0x1000,
- CHANNEL_IN_Z_AXIS = 0x2000,
- CHANNEL_IN_VOICE_UPLINK = 0x4000,
- CHANNEL_IN_VOICE_DNLINK = 0x8000,
- CHANNEL_IN_MONO = CHANNEL_IN_FRONT,
- CHANNEL_IN_STEREO = (CHANNEL_IN_LEFT | CHANNEL_IN_RIGHT),
- CHANNEL_IN_ALL = (CHANNEL_IN_LEFT | CHANNEL_IN_RIGHT | CHANNEL_IN_FRONT | CHANNEL_IN_BACK|
- CHANNEL_IN_LEFT_PROCESSED | CHANNEL_IN_RIGHT_PROCESSED | CHANNEL_IN_FRONT_PROCESSED | CHANNEL_IN_BACK_PROCESSED|
- CHANNEL_IN_PRESSURE | CHANNEL_IN_X_AXIS | CHANNEL_IN_Y_AXIS | CHANNEL_IN_Z_AXIS |
- CHANNEL_IN_VOICE_UPLINK | CHANNEL_IN_VOICE_DNLINK)
- };
-
- // must match android/media/AudioSystem.java MODE_* values
- enum audio_mode {
- MODE_INVALID = -2,
- MODE_CURRENT = -1,
- MODE_NORMAL = 0,
- MODE_RINGTONE,
- MODE_IN_CALL,
- MODE_IN_COMMUNICATION,
- NUM_MODES // not a valid entry, denotes end-of-list
- };
-
- enum audio_in_acoustics {
- AGC_ENABLE = 0x0001,
- AGC_DISABLE = 0,
- NS_ENABLE = 0x0002,
- NS_DISABLE = 0,
- TX_IIR_ENABLE = 0x0004,
- TX_DISABLE = 0
- };
-
- // special audio session values
- enum audio_sessions {
- SESSION_OUTPUT_STAGE = -1, // session for effects attached to a particular output stream
- // (value must be less than 0)
- SESSION_OUTPUT_MIX = 0, // session for effects applied to output mix. These effects can
- // be moved by audio policy manager to another output stream
- // (value must be 0)
- };
-
/* These are static methods to control the system-wide AudioFlinger
* only privileged processes can have access to them
*/
@@ -206,7 +62,7 @@
static status_t setStreamMute(int stream, bool mute);
static status_t getStreamMute(int stream, bool* mute);
- // set audio mode in audio hardware (see AudioSystem::audio_mode)
+ // set audio mode in audio hardware (see audio_mode_t)
static status_t setMode(int mode);
// returns true in *state if tracks are active on the specified stream or has been active
@@ -227,9 +83,9 @@
static float linearToLog(int volume);
static int logToLinear(float volume);
- static status_t getOutputSamplingRate(int* samplingRate, int stream = DEFAULT);
- static status_t getOutputFrameCount(int* frameCount, int stream = DEFAULT);
- static status_t getOutputLatency(uint32_t* latency, int stream = DEFAULT);
+ static status_t getOutputSamplingRate(int* samplingRate, int stream = AUDIO_STREAM_DEFAULT);
+ static status_t getOutputFrameCount(int* frameCount, int stream = AUDIO_STREAM_DEFAULT);
+ static status_t getOutputLatency(uint32_t* latency, int stream = AUDIO_STREAM_DEFAULT);
static bool routedToA2dpOutput(int streamType);
@@ -247,93 +103,11 @@
// - BAD_VALUE: invalid parameter
// NOTE: this feature is not supported on all hardware platforms and it is
// necessary to check returned status before using the returned values.
- static status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, int stream = DEFAULT);
+ static status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, int stream = AUDIO_STREAM_DEFAULT);
static unsigned int getInputFramesLost(audio_io_handle_t ioHandle);
static int newAudioSessionId();
- //
- // AudioPolicyService interface
- //
-
- enum audio_devices {
- // output devices
- DEVICE_OUT_EARPIECE = 0x1,
- DEVICE_OUT_SPEAKER = 0x2,
- DEVICE_OUT_WIRED_HEADSET = 0x4,
- DEVICE_OUT_WIRED_HEADPHONE = 0x8,
- DEVICE_OUT_BLUETOOTH_SCO = 0x10,
- DEVICE_OUT_BLUETOOTH_SCO_HEADSET = 0x20,
- DEVICE_OUT_BLUETOOTH_SCO_CARKIT = 0x40,
- DEVICE_OUT_BLUETOOTH_A2DP = 0x80,
- DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES = 0x100,
- DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER = 0x200,
- DEVICE_OUT_AUX_DIGITAL = 0x400,
- DEVICE_OUT_ANLG_DOCK_HEADSET = 0x800,
- DEVICE_OUT_DGTL_DOCK_HEADSET = 0x1000,
- DEVICE_OUT_DEFAULT = 0x8000,
- DEVICE_OUT_ALL = (DEVICE_OUT_EARPIECE | DEVICE_OUT_SPEAKER | DEVICE_OUT_WIRED_HEADSET |
- DEVICE_OUT_WIRED_HEADPHONE | DEVICE_OUT_BLUETOOTH_SCO | DEVICE_OUT_BLUETOOTH_SCO_HEADSET |
- DEVICE_OUT_BLUETOOTH_SCO_CARKIT | DEVICE_OUT_BLUETOOTH_A2DP | DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
- DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER | DEVICE_OUT_AUX_DIGITAL |
- DEVICE_OUT_ANLG_DOCK_HEADSET | DEVICE_OUT_DGTL_DOCK_HEADSET |
- DEVICE_OUT_DEFAULT),
- DEVICE_OUT_ALL_A2DP = (DEVICE_OUT_BLUETOOTH_A2DP | DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
- DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
-
- // input devices
- DEVICE_IN_COMMUNICATION = 0x10000,
- DEVICE_IN_AMBIENT = 0x20000,
- DEVICE_IN_BUILTIN_MIC = 0x40000,
- DEVICE_IN_BLUETOOTH_SCO_HEADSET = 0x80000,
- DEVICE_IN_WIRED_HEADSET = 0x100000,
- DEVICE_IN_AUX_DIGITAL = 0x200000,
- DEVICE_IN_VOICE_CALL = 0x400000,
- DEVICE_IN_BACK_MIC = 0x800000,
- DEVICE_IN_DEFAULT = 0x80000000,
-
- DEVICE_IN_ALL = (DEVICE_IN_COMMUNICATION | DEVICE_IN_AMBIENT | DEVICE_IN_BUILTIN_MIC |
- DEVICE_IN_BLUETOOTH_SCO_HEADSET | DEVICE_IN_WIRED_HEADSET | DEVICE_IN_AUX_DIGITAL |
- DEVICE_IN_VOICE_CALL | DEVICE_IN_BACK_MIC | DEVICE_IN_DEFAULT)
- };
-
- // device connection states used for setDeviceConnectionState()
- enum device_connection_state {
- DEVICE_STATE_UNAVAILABLE,
- DEVICE_STATE_AVAILABLE,
- NUM_DEVICE_STATES
- };
-
- // request to open a direct output with getOutput() (by opposition to sharing an output with other AudioTracks)
- enum output_flags {
- OUTPUT_FLAG_INDIRECT = 0x0,
- OUTPUT_FLAG_DIRECT = 0x1
- };
-
- // device categories used for setForceUse()
- enum forced_config {
- FORCE_NONE,
- FORCE_SPEAKER,
- FORCE_HEADPHONES,
- FORCE_BT_SCO,
- FORCE_BT_A2DP,
- FORCE_WIRED_ACCESSORY,
- FORCE_BT_CAR_DOCK,
- FORCE_BT_DESK_DOCK,
- FORCE_ANALOG_DOCK,
- FORCE_DIGITAL_DOCK,
- NUM_FORCE_CONFIG,
- FORCE_DEFAULT = FORCE_NONE
- };
-
- // usages used for setForceUse(), must match AudioSystem.java
- enum force_use {
- FOR_COMMUNICATION,
- FOR_MEDIA,
- FOR_RECORD,
- FOR_DOCK,
- NUM_FORCE_USE
- };
// types of io configuration change events received with ioConfigChanged()
enum io_config_event {
@@ -364,40 +138,40 @@
//
// IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
//
- static status_t setDeviceConnectionState(audio_devices device, device_connection_state state, const char *device_address);
- static device_connection_state getDeviceConnectionState(audio_devices device, const char *device_address);
+ static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state, const char *device_address);
+ static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device, const char *device_address);
static status_t setPhoneState(int state);
static status_t setRingerMode(uint32_t mode, uint32_t mask);
- static status_t setForceUse(force_use usage, forced_config config);
- static forced_config getForceUse(force_use usage);
- static audio_io_handle_t getOutput(stream_type stream,
+ static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
+ static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
+ static audio_io_handle_t getOutput(audio_stream_type_t stream,
uint32_t samplingRate = 0,
- uint32_t format = FORMAT_DEFAULT,
- uint32_t channels = CHANNEL_OUT_STEREO,
- output_flags flags = OUTPUT_FLAG_INDIRECT);
+ uint32_t format = AUDIO_FORMAT_DEFAULT,
+ uint32_t channels = AUDIO_CHANNEL_OUT_STEREO,
+ audio_policy_output_flags_t flags = AUDIO_POLICY_OUTPUT_FLAG_INDIRECT);
static status_t startOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session = 0);
static status_t stopOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session = 0);
static void releaseOutput(audio_io_handle_t output);
static audio_io_handle_t getInput(int inputSource,
uint32_t samplingRate = 0,
- uint32_t format = FORMAT_DEFAULT,
- uint32_t channels = CHANNEL_IN_MONO,
- audio_in_acoustics acoustics = (audio_in_acoustics)0);
+ uint32_t format = AUDIO_FORMAT_DEFAULT,
+ uint32_t channels = AUDIO_CHANNEL_IN_MONO,
+ audio_in_acoustics_t acoustics = (audio_in_acoustics_t)0);
static status_t startInput(audio_io_handle_t input);
static status_t stopInput(audio_io_handle_t input);
static void releaseInput(audio_io_handle_t input);
- static status_t initStreamVolume(stream_type stream,
+ static status_t initStreamVolume(audio_stream_type_t stream,
int indexMin,
int indexMax);
- static status_t setStreamVolumeIndex(stream_type stream, int index);
- static status_t getStreamVolumeIndex(stream_type stream, int *index);
+ static status_t setStreamVolumeIndex(audio_stream_type_t stream, int index);
+ static status_t getStreamVolumeIndex(audio_stream_type_t stream, int *index);
- static uint32_t getStrategyForStream(stream_type stream);
- static uint32_t getDevicesForStream(stream_type stream);
+ static uint32_t getStrategyForStream(audio_stream_type_t stream);
+ static uint32_t getDevicesForStream(audio_stream_type_t stream);
static audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc);
static status_t registerEffect(effect_descriptor_t *desc,
@@ -411,17 +185,6 @@
// ----------------------------------------------------------------------------
- static uint32_t popCount(uint32_t u);
- static bool isOutputDevice(audio_devices device);
- static bool isInputDevice(audio_devices device);
- static bool isA2dpDevice(audio_devices device);
- static bool isBluetoothScoDevice(audio_devices device);
- static bool isLowVisibility(stream_type stream);
- static bool isOutputChannel(uint32_t channel);
- static bool isInputChannel(uint32_t channel);
- static bool isValidFormat(uint32_t format);
- static bool isLinearPCM(uint32_t format);
-
private:
class AudioFlingerClient: public IBinder::DeathRecipient, public BnAudioFlingerClient
@@ -473,50 +236,6 @@
static DefaultKeyedVector<audio_io_handle_t, OutputDescriptor *> gOutputs;
};
-class AudioParameter {
-
-public:
- AudioParameter() {}
- AudioParameter(const String8& keyValuePairs);
- virtual ~AudioParameter();
-
- // reserved parameter keys for changing standard parameters with setParameters() function.
- // Using these keys is mandatory for AudioFlinger to properly monitor audio output/input
- // configuration changes and act accordingly.
- // keyRouting: to change audio routing, value is an int in AudioSystem::audio_devices
- // keySamplingRate: to change sampling rate routing, value is an int
- // keyFormat: to change audio format, value is an int in AudioSystem::audio_format
- // keyChannels: to change audio channel configuration, value is an int in AudioSystem::audio_channels
- // keyFrameCount: to change audio output frame count, value is an int
- // keyInputSource: to change audio input source, value is an int in audio_source
- // (defined in media/mediarecorder.h)
- static const char *keyRouting;
- static const char *keySamplingRate;
- static const char *keyFormat;
- static const char *keyChannels;
- static const char *keyFrameCount;
- static const char *keyInputSource;
-
- String8 toString();
-
- status_t add(const String8& key, const String8& value);
- status_t addInt(const String8& key, const int value);
- status_t addFloat(const String8& key, const float value);
-
- status_t remove(const String8& key);
-
- status_t get(const String8& key, String8& value);
- status_t getInt(const String8& key, int& value);
- status_t getFloat(const String8& key, float& value);
- status_t getAt(size_t index, String8& key, String8& value);
-
- size_t size() { return mParameters.size(); }
-
-private:
- String8 mKeyValuePairs;
- KeyedVector <String8, String8> mParameters;
-};
-
}; // namespace android
#endif /*ANDROID_AUDIOSYSTEM_H_*/
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 3e346db..de928da 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -30,7 +30,6 @@
#include <binder/IMemory.h>
#include <utils/threads.h>
-
namespace android {
// ----------------------------------------------------------------------------
@@ -126,11 +125,11 @@
* Parameters:
*
* streamType: Select the type of audio stream this track is attached to
- * (e.g. AudioSystem::MUSIC).
+ * (e.g. AUDIO_STREAM_MUSIC).
* sampleRate: Track sampling rate in Hz.
- * format: Audio format (e.g AudioSystem::PCM_16_BIT for signed
+ * format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
* 16 bits per sample).
- * channels: Channel mask: see AudioSystem::audio_channels.
+ * channels: Channel mask: see audio_channels_t.
* frameCount: Total size of track PCM buffer in frames. This defines the
* latency of the track.
* flags: Reserved for future use.
diff --git a/include/media/EffectApi.h b/include/media/EffectApi.h
index b97c22e..a5ad846 100644
--- a/include/media/EffectApi.h
+++ b/include/media/EffectApi.h
@@ -602,9 +602,9 @@
// Audio mode
enum audio_mode_e {
- AUDIO_MODE_NORMAL, // device idle
- AUDIO_MODE_RINGTONE, // device ringing
- AUDIO_MODE_IN_CALL // audio call connected (VoIP or telephony)
+ AUDIO_EFFECT_MODE_NORMAL, // device idle
+ AUDIO_EFFECT_MODE_RINGTONE, // device ringing
+ AUDIO_EFFECT_MODE_IN_CALL, // audio call connected (VoIP or telephony)
};
// Values for "accessMode" field of buffer_config_t:
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index 720a562..09b2bfe 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -26,6 +26,7 @@
#include <binder/IInterface.h>
#include <media/AudioSystem.h>
+#include <hardware/audio_policy.h>
namespace android {
@@ -39,42 +40,42 @@
//
// IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
//
- virtual status_t setDeviceConnectionState(AudioSystem::audio_devices device,
- AudioSystem::device_connection_state state,
+ virtual status_t setDeviceConnectionState(audio_devices_t device,
+ audio_policy_dev_state_t state,
const char *device_address) = 0;
- virtual AudioSystem::device_connection_state getDeviceConnectionState(AudioSystem::audio_devices device,
+ virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
const char *device_address) = 0;
virtual status_t setPhoneState(int state) = 0;
virtual status_t setRingerMode(uint32_t mode, uint32_t mask) = 0;
- virtual status_t setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config) = 0;
- virtual AudioSystem::forced_config getForceUse(AudioSystem::force_use usage) = 0;
- virtual audio_io_handle_t getOutput(AudioSystem::stream_type stream,
+ virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) = 0;
+ virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) = 0;
+ virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
uint32_t samplingRate = 0,
- uint32_t format = AudioSystem::FORMAT_DEFAULT,
+ uint32_t format = AUDIO_FORMAT_DEFAULT,
uint32_t channels = 0,
- AudioSystem::output_flags flags = AudioSystem::OUTPUT_FLAG_INDIRECT) = 0;
+ audio_policy_output_flags_t flags = AUDIO_POLICY_OUTPUT_FLAG_INDIRECT) = 0;
virtual status_t startOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session = 0) = 0;
virtual status_t stopOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session = 0) = 0;
virtual void releaseOutput(audio_io_handle_t output) = 0;
virtual audio_io_handle_t getInput(int inputSource,
uint32_t samplingRate = 0,
- uint32_t format = AudioSystem::FORMAT_DEFAULT,
+ uint32_t format = AUDIO_FORMAT_DEFAULT,
uint32_t channels = 0,
- AudioSystem::audio_in_acoustics acoustics = (AudioSystem::audio_in_acoustics)0) = 0;
+ audio_in_acoustics_t acoustics = (audio_in_acoustics_t)0) = 0;
virtual status_t startInput(audio_io_handle_t input) = 0;
virtual status_t stopInput(audio_io_handle_t input) = 0;
virtual void releaseInput(audio_io_handle_t input) = 0;
- virtual status_t initStreamVolume(AudioSystem::stream_type stream,
+ virtual status_t initStreamVolume(audio_stream_type_t stream,
int indexMin,
int indexMax) = 0;
- virtual status_t setStreamVolumeIndex(AudioSystem::stream_type stream, int index) = 0;
- virtual status_t getStreamVolumeIndex(AudioSystem::stream_type stream, int *index) = 0;
- virtual uint32_t getStrategyForStream(AudioSystem::stream_type stream) = 0;
- virtual uint32_t getDevicesForStream(AudioSystem::stream_type stream) = 0;
+ virtual status_t setStreamVolumeIndex(audio_stream_type_t stream, int index) = 0;
+ virtual status_t getStreamVolumeIndex(audio_stream_type_t stream, int *index) = 0;
+ virtual uint32_t getStrategyForStream(audio_stream_type_t stream) = 0;
+ virtual uint32_t getDevicesForStream(audio_stream_type_t stream) = 0;
virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc) = 0;
virtual status_t registerEffect(effect_descriptor_t *desc,
audio_io_handle_t output,
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index e1b6dd6..bebecc0 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -85,7 +85,7 @@
// audio data.
virtual status_t open(
uint32_t sampleRate, int channelCount,
- int format=AudioSystem::PCM_16_BIT,
+ int format=AUDIO_FORMAT_PCM_16_BIT,
int bufferCount=DEFAULT_AUDIOSINK_BUFFERCOUNT,
AudioCallback cb = NULL,
void *cookie = NULL) = 0;
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
index c42346e..5fe7722 100644
--- a/include/media/MediaRecorderBase.h
+++ b/include/media/MediaRecorderBase.h
@@ -20,6 +20,8 @@
#include <media/mediarecorder.h>
+#include <hardware/audio.h>
+
namespace android {
class Surface;
@@ -29,7 +31,7 @@
virtual ~MediaRecorderBase() {}
virtual status_t init() = 0;
- virtual status_t setAudioSource(audio_source as) = 0;
+ virtual status_t setAudioSource(audio_source_t as) = 0;
virtual status_t setVideoSource(video_source vs) = 0;
virtual status_t setOutputFormat(output_format of) = 0;
virtual status_t setAudioEncoder(audio_encoder ae) = 0;
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 67d940b..18a3c6a 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -33,23 +33,6 @@
typedef void (*media_completion_f)(status_t status, void *cookie);
-/* Do not change these values without updating their counterparts
- * in media/java/android/media/MediaRecorder.java!
- */
-enum audio_source {
- AUDIO_SOURCE_DEFAULT = 0,
- AUDIO_SOURCE_MIC = 1,
- AUDIO_SOURCE_VOICE_UPLINK = 2,
- AUDIO_SOURCE_VOICE_DOWNLINK = 3,
- AUDIO_SOURCE_VOICE_CALL = 4,
- AUDIO_SOURCE_CAMCORDER = 5,
- AUDIO_SOURCE_VOICE_RECOGNITION = 6,
- AUDIO_SOURCE_VOICE_COMMUNICATION = 7,
- AUDIO_SOURCE_MAX = AUDIO_SOURCE_VOICE_COMMUNICATION,
-
- AUDIO_SOURCE_LIST_END // must be last - used to validate audio source type
-};
-
enum video_source {
VIDEO_SOURCE_DEFAULT = 0,
VIDEO_SOURCE_CAMERA = 1,
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 9e6f0e2..20a9e16 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -24,16 +24,18 @@
#include <media/stagefright/MediaBuffer.h>
#include <utils/List.h>
+#include <hardware/audio.h>
+
namespace android {
class AudioRecord;
struct AudioSource : public MediaSource, public MediaBufferObserver {
// Note that the "channels" parameter is _not_ the number of channels,
- // but a bitmask of AudioSystem::audio_channels constants.
+ // but a bitmask of audio_channels_t constants.
AudioSource(
int inputSource, uint32_t sampleRate,
- uint32_t channels = AudioSystem::CHANNEL_IN_MONO);
+ uint32_t channels = AUDIO_CHANNEL_IN_MONO);
status_t initCheck() const;
diff --git a/include/media/stagefright/MediaErrors.h b/include/media/stagefright/MediaErrors.h
index 6df4d86..1a6d548 100644
--- a/include/media/stagefright/MediaErrors.h
+++ b/include/media/stagefright/MediaErrors.h
@@ -42,6 +42,17 @@
INFO_DISCONTINUITY = MEDIA_ERROR_BASE - 13,
ERROR_NO_LICENSE = MEDIA_ERROR_BASE - 14,
+
+ // Heartbeat Error Codes
+ HEARTBEAT_ERROR_BASE = -3000,
+
+ ERROR_HEARTBEAT_AUTHENTICATION_FAILURE = HEARTBEAT_ERROR_BASE,
+ ERROR_HEARTBEAT_NO_ACTIVE_PURCHASE_AGREEMENT = HEARTBEAT_ERROR_BASE - 1,
+ ERROR_HEARTBEAT_CONCURRENT_PLAYBACK = HEARTBEAT_ERROR_BASE - 2,
+ ERROR_HEARTBEAT_UNUSUAL_ACTIVITY = HEARTBEAT_ERROR_BASE - 3,
+ ERROR_HEARTBEAT_STREAMING_UNAVAILABLE = HEARTBEAT_ERROR_BASE - 4,
+ ERROR_HEARTBEAT_CANNOT_ACTIVATE_RENTAL = HEARTBEAT_ERROR_BASE - 5,
+ ERROR_HEARTBEAT_TERMINATE_REQUESTED = HEARTBEAT_ERROR_BASE - 6,
};
} // namespace android
diff --git a/libs/hwui/Layer.h b/libs/hwui/Layer.h
index 26e240f..6c4a2a9 100644
--- a/libs/hwui/Layer.h
+++ b/libs/hwui/Layer.h
@@ -53,6 +53,23 @@
}
/**
+ * Sets this layer's region to a rectangle. Computes the appropriate
+ * texture coordinates.
+ */
+ void setRegionAsRect() {
+ const android::Rect& bounds = region.getBounds();
+ regionRect.set(bounds.leftTop().x, bounds.leftTop().y,
+ bounds.rightBottom().x, bounds.rightBottom().y);
+
+ const float texX = 1.0f / float(width);
+ const float texY = 1.0f / float(height);
+ const float height = layer.getHeight();
+ texCoords.set(
+ regionRect.left * texX, (height - regionRect.top) * texY,
+ regionRect.right * texX, (height - regionRect.bottom) * texY);
+ }
+
+ /**
* Bounds of the layer.
*/
Rect layer;
diff --git a/libs/hwui/LayerRenderer.cpp b/libs/hwui/LayerRenderer.cpp
index ba110ec..ca1e7ae 100644
--- a/libs/hwui/LayerRenderer.cpp
+++ b/libs/hwui/LayerRenderer.cpp
@@ -102,19 +102,7 @@
mLayer->meshElementCount = 0;
}
- const android::Rect& bounds = mLayer->region.getBounds();
- mLayer->regionRect.set(bounds.leftTop().x, bounds.leftTop().y,
- bounds.rightBottom().x, bounds.rightBottom().y);
-
- const float texX = 1.0f / float(mLayer->width);
- const float texY = 1.0f / float(mLayer->height);
- const float height = mLayer->layer.getHeight();
- mLayer->texCoords.set(
- mLayer->regionRect.left * texX,
- (height - mLayer->regionRect.top) * texY,
- mLayer->regionRect.right * texX,
- (height - mLayer->regionRect.bottom) * texY);
-
+ mLayer->setRegionAsRect();
return;
}
diff --git a/libs/hwui/OpenGLRenderer.cpp b/libs/hwui/OpenGLRenderer.cpp
index dd0cca22..ea42838 100644
--- a/libs/hwui/OpenGLRenderer.cpp
+++ b/libs/hwui/OpenGLRenderer.cpp
@@ -647,10 +647,10 @@
void OpenGLRenderer::composeLayerRegion(Layer* layer, const Rect& rect) {
#if RENDER_LAYERS_AS_REGIONS
if (layer->region.isRect()) {
- const android::Rect& bounds = layer->region.getBounds();
- layer->regionRect.set(bounds.leftTop().x, bounds.leftTop().y,
- bounds.rightBottom().x, bounds.rightBottom().y);
+ layer->setRegionAsRect();
+
composeLayerRect(layer, layer->regionRect);
+
layer->region.clear();
return;
}
@@ -979,8 +979,8 @@
}
}
-void OpenGLRenderer::setupDrawModelViewIdentity() {
- mCaches.currentProgram->set(mOrthoMatrix, mIdentity, *mSnapshot->transform);
+void OpenGLRenderer::setupDrawModelViewIdentity(bool offset) {
+ mCaches.currentProgram->set(mOrthoMatrix, mIdentity, *mSnapshot->transform, offset);
}
void OpenGLRenderer::setupDrawModelView(float left, float top, float right, float bottom,
@@ -1389,13 +1389,46 @@
}
}
-void OpenGLRenderer::drawLinesAsQuads(float *points, int count, bool isAA, bool isHairline,
- float strokeWidth) {
+void OpenGLRenderer::drawLines(float* points, int count, SkPaint* paint) {
+ if (mSnapshot->isIgnored()) return;
+
+ const bool isAA = paint->isAntiAlias();
+ float strokeWidth = paint->getStrokeWidth() * 0.5f;
+ // A stroke width of 0 has a special meaning in Skia:
+ // it draws a line 1 px wide regardless of current transform
+ bool isHairLine = paint->getStrokeWidth() == 0.0f;
+ int alpha;
+ SkXfermode::Mode mode;
+ int generatedVerticesCount = 0;
int verticesCount = count;
if (count > 4) {
// Polyline: account for extra vertices needed for continous tri-strip
verticesCount += (count -4);
}
+
+ getAlphaAndMode(paint, &alpha, &mode);
+ setupDraw();
+ if (isAA) {
+ setupDrawAALine();
+ }
+ setupDrawColor(paint->getColor(), alpha);
+ setupDrawColorFilter();
+ setupDrawShader();
+ if (isAA) {
+ setupDrawBlending(true, mode);
+ } else {
+ setupDrawBlending(mode);
+ }
+ setupDrawProgram();
+ setupDrawModelViewIdentity(true);
+ setupDrawColorUniforms();
+ setupDrawColorFilterUniforms();
+ setupDrawShaderIdentityUniforms();
+
+ if (isHairLine) {
+ // Set a real stroke width to be used in quad construction
+ strokeWidth = .5;
+ }
if (isAA) {
// Expand boundary to enable AA calculations on the quad border
strokeWidth += .5f;
@@ -1407,25 +1440,22 @@
if (!isAA) {
setupDrawVertices(vertices);
} else {
- AlphaVertex* alphaCoords = aaVertices + gVertexAlphaOffset;
+ void* alphaCoords = ((GLbyte*) aaVertices) + gVertexAlphaOffset;
// innerProportion is the ratio of the inner (non-AA) port of the line to the total
// AA stroke width (the base stroke width expanded by a half pixel on either side).
// This value is used in the fragment shader to determine how to fill fragments.
float innerProportion = fmax(strokeWidth - 1.0f, 0) / (strokeWidth + .5f);
- setupDrawAALine((void*) aaVertices, (void*) alphaCoords, innerProportion);
+ setupDrawAALine((void*) aaVertices, alphaCoords, innerProportion);
}
- int generatedVerticesCount = 0;
AlphaVertex *prevAAVertex = NULL;
Vertex *prevVertex = NULL;
float inverseScaleX = 1.0f;
float inverseScaleY = 1.0f;
- if (isHairline) {
+ if (isHairLine) {
// The quad that we use for AA hairlines needs to account for scaling because the line
// should always be one pixel wide regardless of scale.
- inverseScaleX = 1.0f;
- inverseScaleY = 1.0f;
if (!mSnapshot->transform->isPureTranslate()) {
Matrix4 *mat = mSnapshot->transform;
float m00 = mat->data[Matrix4::kScaleX];
@@ -1446,22 +1476,19 @@
vec2 a(points[i], points[i + 1]);
vec2 b(points[i + 2], points[i + 3]);
- // Bias to snap to the same pixels as Skia
- a += 0.375;
- b += 0.375;
-
// Find the normal to the line
vec2 n = (b - a).copyNormalized() * strokeWidth;
- if (isHairline) {
- float wideningFactor;
- if (fabs(n.x) >= fabs(n.y)) {
- wideningFactor = fabs(1.0f / n.x);
- } else {
- wideningFactor = fabs(1.0f / n.y);
+ if (isHairLine) {
+ n *= inverseScaleX;
+ if (isAA) {
+ float wideningFactor;
+ if (fabs(n.x) >= fabs(n.y)) {
+ wideningFactor = fabs(1.0f / n.x);
+ } else {
+ wideningFactor = fabs(1.0f / n.y);
+ }
+ n *= wideningFactor;
}
- n.x *= inverseScaleX;
- n.y *= inverseScaleY;
- n *= wideningFactor;
}
float x = n.x;
n.x = -n.y;
@@ -1525,69 +1552,6 @@
}
}
-void OpenGLRenderer::drawLines(float* points, int count, SkPaint* paint) {
- if (mSnapshot->isIgnored()) return;
-
- const bool isAA = paint->isAntiAlias();
- const float strokeWidth = paint->getStrokeWidth() * 0.5f;
- // A stroke width of 0 has a special meaning in Skia:
- // it draws a line 1 px wide regardless of current transform
- bool isHairLine = paint->getStrokeWidth() == 0.0f;
-
- int alpha;
- SkXfermode::Mode mode;
- getAlphaAndMode(paint, &alpha, &mode);
- int generatedVerticesCount = 0;
-
- setupDraw();
- if (isAA) {
- setupDrawAALine();
- }
- setupDrawColor(paint->getColor(), alpha);
- setupDrawColorFilter();
- setupDrawShader();
- if (isAA) {
- setupDrawBlending(true, mode);
- } else {
- setupDrawBlending(mode);
- }
- setupDrawProgram();
- setupDrawModelViewIdentity();
- setupDrawColorUniforms();
- setupDrawColorFilterUniforms();
- setupDrawShaderIdentityUniforms();
-
- if (!isHairLine) {
- drawLinesAsQuads(points, count, isAA, isHairLine, strokeWidth);
- } else {
- if (isAA) {
- drawLinesAsQuads(points, count, isAA, isHairLine, .5f);
- } else {
- int verticesCount = count >> 1;
- Vertex lines[verticesCount];
- Vertex* vertices = &lines[0];
- setupDrawVertices(vertices);
- for (int i = 0; i < count; i += 4) {
-
- const float left = fmin(points[i], points[i + 2]);
- const float right = fmax(points[i], points[i + 2]);
- const float top = fmin(points[i + 1], points[i + 3]);
- const float bottom = fmax(points[i + 1], points[i + 3]);
-
- Vertex::set(vertices++, points[i], points[i + 1]);
- Vertex::set(vertices++, points[i + 2], points[i + 3]);
- generatedVerticesCount += 2;
- dirtyLayer(left, top,
- right == left ? left + 1 : right, bottom == top ? top + 1 : bottom,
- *mSnapshot->transform);
- }
-
- glLineWidth(1.0f);
- glDrawArrays(GL_LINES, 0, generatedVerticesCount);
- }
- }
-}
-
void OpenGLRenderer::drawPoints(float* points, int count, SkPaint* paint) {
if (mSnapshot->isIgnored()) return;
@@ -1596,8 +1560,13 @@
// A stroke width of 0 has a special meaning in Skia:
// it draws an unscaled 1px point
+ float strokeWidth = paint->getStrokeWidth();
const bool isHairLine = paint->getStrokeWidth() == 0.0f;
-
+ if (isHairLine) {
+ // Now that we know it's hairline, we can set the effective width, to be used later
+ strokeWidth = 1.0f;
+ }
+ const float halfWidth = strokeWidth / 2;
int alpha;
SkXfermode::Mode mode;
getAlphaAndMode(paint, &alpha, &mode);
@@ -1609,13 +1578,13 @@
TextureVertex* vertex = &pointsData[0];
setupDraw();
- setupDrawPoint(isHairLine ? 1.0f : paint->getStrokeWidth());
+ setupDrawPoint(strokeWidth);
setupDrawColor(paint->getColor(), alpha);
setupDrawColorFilter();
setupDrawShader();
setupDrawBlending(mode);
setupDrawProgram();
- setupDrawModelViewIdentity();
+ setupDrawModelViewIdentity(true);
setupDrawColorUniforms();
setupDrawColorFilterUniforms();
setupDrawPointUniforms();
@@ -1625,6 +1594,11 @@
for (int i = 0; i < count; i += 2) {
TextureVertex::set(vertex++, points[i], points[i + 1], 0.0f, 0.0f);
generatedVerticesCount++;
+ float left = points[i] - halfWidth;
+ float right = points[i] + halfWidth;
+ float top = points[i + 1] - halfWidth;
+ float bottom = points [i + 1] + halfWidth;
+ dirtyLayer(left, top, right, bottom, *mSnapshot->transform);
}
glDrawArrays(GL_POINTS, 0, generatedVerticesCount);
diff --git a/libs/hwui/OpenGLRenderer.h b/libs/hwui/OpenGLRenderer.h
index 0276095..918e1fb 100644
--- a/libs/hwui/OpenGLRenderer.h
+++ b/libs/hwui/OpenGLRenderer.h
@@ -283,19 +283,6 @@
void drawAlphaBitmap(Texture* texture, float left, float top, SkPaint* paint);
/**
- * Draws a line as a quad. Called by drawLines() for all cases except hairline without AA.
- *
- * @param points The vertices of the lines. Every four entries specifies the x/y points
- * of a single line segment.
- * @param count The number of entries in the points array.
- * @param isAA Whether the line is anti-aliased
- * @param isHairline Whether the line has strokeWidth==0, which results in the line being
- * one pixel wide on the display regardless of scale.
- */
- void drawLinesAsQuads(float *points, int count, bool isAA, bool isHairline,
- float strokeWidth);
-
- /**
* Draws a textured rectangle with the specified texture. The specified coordinates
* are transformed by the current snapshot's transform matrix.
*
@@ -453,7 +440,7 @@
bool swapSrcDst = false);
void setupDrawProgram();
void setupDrawDirtyRegionsDisabled();
- void setupDrawModelViewIdentity();
+ void setupDrawModelViewIdentity(bool offset = false);
void setupDrawModelView(float left, float top, float right, float bottom,
bool ignoreTransform = false, bool ignoreModelView = false);
void setupDrawModelViewTranslate(float left, float top, float right, float bottom,
diff --git a/libs/hwui/Program.cpp b/libs/hwui/Program.cpp
index 2187f24..972dd87 100644
--- a/libs/hwui/Program.cpp
+++ b/libs/hwui/Program.cpp
@@ -124,8 +124,15 @@
}
void Program::set(const mat4& projectionMatrix, const mat4& modelViewMatrix,
- const mat4& transformMatrix) {
+ const mat4& transformMatrix, bool offset) {
mat4 t(projectionMatrix);
+ if (offset) {
+ // offset screenspace xy by an amount that compensates for typical precision issues
+ // in GPU hardware that tends to paint hor/vert lines in pixels shifted up and to the left.
+ // This offset value is based on an assumption that some hardware may use as little
+ // as 12.4 precision, so we offset by slightly more than 1/16.
+ t.translate(.375, .375, 0);
+ }
t.multiply(transformMatrix);
t.multiply(modelViewMatrix);
diff --git a/libs/hwui/Program.h b/libs/hwui/Program.h
index afc6f3d..764cb05 100644
--- a/libs/hwui/Program.h
+++ b/libs/hwui/Program.h
@@ -81,7 +81,7 @@
* transform matrices.
*/
void set(const mat4& projectionMatrix, const mat4& modelViewMatrix,
- const mat4& transformMatrix);
+ const mat4& transformMatrix, bool offset = false);
/**
* Sets the color associated with this shader.
diff --git a/libs/rs/RenderScript.h b/libs/rs/RenderScript.h
index cb6d7e0..f4e3f57 100644
--- a/libs/rs/RenderScript.h
+++ b/libs/rs/RenderScript.h
@@ -26,20 +26,6 @@
#include "RenderScriptDefines.h"
-RsDevice rsDeviceCreate();
-void rsDeviceDestroy(RsDevice);
-void rsDeviceSetConfig(RsDevice, RsDeviceParam, int32_t value);
-
-RsContext rsContextCreate(RsDevice, uint32_t version);
-RsContext rsContextCreateGL(RsDevice, uint32_t version,
- RsSurfaceConfig sc, uint32_t dpi);
-void rsContextDestroy(RsContext);
-
-RsMessageToClientType rsContextGetMessage(RsContext vrsc, void *data, size_t *receiveLen, uint32_t *subID, size_t bufferLen, bool wait);
-RsMessageToClientType rsContextPeekMessage(RsContext vrsc, size_t *receiveLen, uint32_t *subID, bool wait);
-void rsContextInitToClient(RsContext);
-void rsContextDeinitToClient(RsContext);
-
//
// A3D loading and object update code.
// Should only be called at object creation, not thread safe
@@ -63,18 +49,8 @@
void rsaElementGetNativeData(RsContext, RsElement, uint32_t *elemData, uint32_t elemDataSize);
void rsaElementGetSubElements(RsContext, RsElement, uint32_t *ids, const char **names, uint32_t dataSize);
-// Async commands for returning new IDS
-RsType rsaTypeCreate(RsContext, RsElement, uint32_t dimX, uint32_t dimY,
- uint32_t dimZ, bool mips, bool faces);
-RsAllocation rsaAllocationCreateTyped(RsContext rsc, RsType vtype,
- RsAllocationMipmapControl mips,
- uint32_t usages);
-RsAllocation rsaAllocationCreateFromBitmap(RsContext con, RsType vtype,
- RsAllocationMipmapControl mips,
- const void *data, uint32_t usages);
-RsAllocation rsaAllocationCubeCreateFromBitmap(RsContext con, RsType vtype,
- RsAllocationMipmapControl mips,
- const void *data, uint32_t usages);
+
+
#ifdef ANDROID_RS_SERIALIZE
#define NO_RS_FUNCS
#endif
diff --git a/libs/rs/rs.spec b/libs/rs/rs.spec
index dac5cec..998296b 100644
--- a/libs/rs/rs.spec
+++ b/libs/rs/rs.spec
@@ -1,4 +1,110 @@
+DeviceCreate {
+ direct
+ nocontext
+ ret RsDevice
+}
+
+DeviceDestroy {
+ direct
+ nocontext
+ param RsDevice dev
+}
+
+DeviceSetConfig {
+ direct
+ nocontext
+ param RsDevice dev
+ param RsDeviceParam p
+ param int32_t value
+}
+
+ContextCreate {
+ direct
+ nocontext
+ param RsDevice dev
+ param uint32_t version
+ ret RsContext
+}
+
+ContextCreateGL {
+ direct
+ nocontext
+ param RsDevice dev
+ param uint32_t version
+ param RsSurfaceConfig sc
+ param uint32_t dpi
+ ret RsContext
+}
+
+ContextDestroy {
+ direct
+}
+
+ContextGetMessage {
+ direct
+ param void *data
+ param size_t *receiveLen
+ param uint32_t *subID
+ param bool wait
+ ret RsMessageToClientType
+}
+
+ContextPeekMessage {
+ direct
+ param size_t *receiveLen
+ param uint32_t *subID
+ param bool wait
+ ret RsMessageToClientType
+}
+
+ContextInitToClient {
+ direct
+}
+
+ContextDeinitToClient {
+ direct
+}
+
+aTypeCreate {
+ direct
+ param RsElement e
+ param uint32_t dimX
+ param uint32_t dimY
+ param uint32_t dimZ
+ param bool mips
+ param bool faces
+ ret RsType
+}
+
+aAllocationCreateTyped {
+ direct
+ param RsType vtype
+ param RsAllocationMipmapControl mips
+ param uint32_t usages
+ ret RsAllocation
+}
+
+aAllocationCreateFromBitmap {
+ direct
+ param RsType vtype
+ param RsAllocationMipmapControl mips
+ param const void *data
+ param uint32_t usages
+ ret RsAllocation
+}
+
+aAllocationCubeCreateFromBitmap {
+ direct
+ param RsType vtype
+ param RsAllocationMipmapControl mips
+ param const void *data
+ param uint32_t usages
+ ret RsAllocation
+}
+
+
+
ContextFinish {
handcodeApi
}
@@ -82,23 +188,21 @@
Allocation1DData {
+ handcodeApi
param RsAllocation va
param uint32_t xoff
param uint32_t lod
param uint32_t count
param const void *data
- handcodeApi
- togglePlay
}
Allocation1DElementData {
+ handcodeApi
param RsAllocation va
param uint32_t x
param uint32_t lod
param const void *data
param uint32_t comp_offset
- handcodeApi
- togglePlay
}
Allocation2DData {
@@ -186,11 +290,10 @@
}
ScriptInvokeV {
+ handcodeApi
param RsScript s
param uint32_t slot
param const void * data
- handcodeApi
- togglePlay
}
ScriptSetVarI {
@@ -224,11 +327,10 @@
}
ScriptSetVarV {
+ handcodeApi
param RsScript s
param uint32_t slot
param const void * data
- handcodeApi
- togglePlay
}
@@ -330,3 +432,4 @@
MeshInitVertexAttribs {
param RsMesh mesh
}
+
diff --git a/libs/rs/rsAllocation.cpp b/libs/rs/rsAllocation.cpp
index a759004..743b2c4 100644
--- a/libs/rs/rsAllocation.cpp
+++ b/libs/rs/rsAllocation.cpp
@@ -833,7 +833,7 @@
RsAllocation rsaAllocationCreateFromBitmap(RsContext con, RsType vtype,
RsAllocationMipmapControl mips,
- const void *data, uint32_t usages) {
+ const void *data, size_t data_length, uint32_t usages) {
Context *rsc = static_cast<Context *>(con);
Type *t = static_cast<Type *>(vtype);
@@ -855,7 +855,7 @@
RsAllocation rsaAllocationCubeCreateFromBitmap(RsContext con, RsType vtype,
RsAllocationMipmapControl mips,
- const void *data, uint32_t usages) {
+ const void *data, size_t data_length, uint32_t usages) {
Context *rsc = static_cast<Context *>(con);
Type *t = static_cast<Type *>(vtype);
diff --git a/libs/rs/rsContext.cpp b/libs/rs/rsContext.cpp
index 0ca892d..50f5f55 100644
--- a/libs/rs/rsContext.cpp
+++ b/libs/rs/rsContext.cpp
@@ -768,14 +768,20 @@
return rsc;
}
-RsMessageToClientType rsContextPeekMessage(RsContext vrsc, size_t *receiveLen, uint32_t *subID, bool wait) {
+RsMessageToClientType rsContextPeekMessage(RsContext vrsc,
+ size_t * receiveLen, size_t receiveLen_length,
+ uint32_t * subID, size_t subID_length, bool wait) {
Context * rsc = static_cast<Context *>(vrsc);
return rsc->peekMessageToClient(receiveLen, subID, wait);
}
-RsMessageToClientType rsContextGetMessage(RsContext vrsc, void *data, size_t *receiveLen, uint32_t *subID, size_t bufferLen, bool wait) {
+RsMessageToClientType rsContextGetMessage(RsContext vrsc, void * data, size_t data_length,
+ size_t * receiveLen, size_t receiveLen_length,
+ uint32_t * subID, size_t subID_length, bool wait) {
Context * rsc = static_cast<Context *>(vrsc);
- return rsc->getMessageToClient(data, receiveLen, subID, bufferLen, wait);
+ rsAssert(subID_length == sizeof(uint32_t));
+ rsAssert(receiveLen_length == sizeof(size_t));
+ return rsc->getMessageToClient(data, receiveLen, subID, data_length, wait);
}
void rsContextInitToClient(RsContext vrsc) {
diff --git a/libs/rs/rsg_generator.c b/libs/rs/rsg_generator.c
index 4ac5b7f..14b380a 100644
--- a/libs/rs/rsg_generator.c
+++ b/libs/rs/rsg_generator.c
@@ -4,7 +4,7 @@
void printFileHeader(FILE *f) {
fprintf(f, "/*\n");
- fprintf(f, " * Copyright (C) 2010 The Android Open Source Project\n");
+ fprintf(f, " * Copyright (C) 2011 The Android Open Source Project\n");
fprintf(f, " *\n");
fprintf(f, " * Licensed under the Apache License, Version 2.0 (the \"License\");\n");
fprintf(f, " * you may not use this file except in compliance with the License.\n");
@@ -96,12 +96,14 @@
void printFuncDecl(FILE *f, const ApiEntry *api, const char *prefix, int addContext) {
printVarType(f, &api->ret);
fprintf(f, " %s%s (", prefix, api->name);
- if (addContext) {
- fprintf(f, "Context *");
- } else {
- fprintf(f, "RsContext rsc");
+ if (!api->nocontext) {
+ if (addContext) {
+ fprintf(f, "Context *");
+ } else {
+ fprintf(f, "RsContext rsc");
+ }
}
- printArgList(f, api, 1);
+ printArgList(f, api, !api->nocontext);
fprintf(f, ")");
}
@@ -117,6 +119,10 @@
void printPlaybackFuncs(FILE *f, const char *prefix) {
int ct;
for (ct=0; ct < apiCount; ct++) {
+ if (apis[ct].direct) {
+ continue;
+ }
+
fprintf(f, "void %s%s (Context *, const void *);\n", prefix, apis[ct].name);
}
}
@@ -140,6 +146,10 @@
int needFlush = 0;
const ApiEntry * api = &apis[ct];
+ if (api->direct) {
+ continue;
+ }
+
printFuncDecl(f, api, "rs", 0);
fprintf(f, "\n{\n");
if (api->handcodeApi) {
@@ -198,31 +208,37 @@
for (ct=0; ct < apiCount; ct++) {
const ApiEntry * api = &apis[ct];
+ if (api->direct) {
+ continue;
+ }
+
fprintf(f, "void rsp_%s(Context *con, const void *vp)\n", api->name);
fprintf(f, "{\n");
- if (api->handcodePlay) {
- fprintf(f, " rsHCPLAY_%s(con, vp);\n", api->name);
- } else {
- //fprintf(f, " LOGE(\"play command %s\\n\");\n", api->name);
- fprintf(f, " const RS_CMD_%s *cmd = static_cast<const RS_CMD_%s *>(vp);\n", api->name, api->name);
- fprintf(f, " ");
- if (api->ret.typeName[0]) {
- fprintf(f, "con->mIO.mToCoreRet = (intptr_t)");
- }
- fprintf(f, "rsi_%s(con", api->name);
- for (ct2=0; ct2 < api->paramCount; ct2++) {
- const VarType *vt = &api->params[ct2];
- fprintf(f, ",\n cmd->%s", vt->name);
- }
- fprintf(f, ");\n");
+
+ //fprintf(f, " LOGE(\"play command %s\\n\");\n", api->name);
+ fprintf(f, " const RS_CMD_%s *cmd = static_cast<const RS_CMD_%s *>(vp);\n", api->name, api->name);
+ fprintf(f, " ");
+ if (api->ret.typeName[0]) {
+ fprintf(f, "con->mIO.mToCoreRet = (intptr_t)");
}
+ fprintf(f, "rsi_%s(con", api->name);
+ for (ct2=0; ct2 < api->paramCount; ct2++) {
+ const VarType *vt = &api->params[ct2];
+ fprintf(f, ",\n cmd->%s", vt->name);
+ }
+ fprintf(f, ");\n");
+
fprintf(f, "};\n\n");
}
fprintf(f, "RsPlaybackFunc gPlaybackFuncs[%i] = {\n", apiCount + 1);
fprintf(f, " NULL,\n");
for (ct=0; ct < apiCount; ct++) {
- fprintf(f, " %s%s,\n", "rsp_", apis[ct].name);
+ if (apis[ct].direct) {
+ fprintf(f, " NULL,\n");
+ } else {
+ fprintf(f, " %s%s,\n", "rsp_", apis[ct].name);
+ }
}
fprintf(f, "};\n");
diff --git a/libs/rs/spec.h b/libs/rs/spec.h
index 82650a7..ecc5cc7 100644
--- a/libs/rs/spec.h
+++ b/libs/rs/spec.h
@@ -25,7 +25,8 @@
char name[256];
int sync;
int handcodeApi;
- int handcodePlay;
+ int direct;
+ int nocontext;
int paramCount;
VarType ret;
VarType params[16];
diff --git a/libs/rs/spec.l b/libs/rs/spec.l
index c8af891..dcd4435 100644
--- a/libs/rs/spec.l
+++ b/libs/rs/spec.l
@@ -44,6 +44,7 @@
<comment>"*"+"/" BEGIN(INITIAL);
<*>" " //printf("found ' '\n");
+<*>"\t" //printf("found ' '\n");
<*>"\n" ++num_lines; //printf("found lf \n");
{ID} {
@@ -64,8 +65,12 @@
apis[apiCount].handcodeApi = 1;
}
-<api_entry2>"handcodePlay" {
- apis[apiCount].handcodePlay = 1;
+<api_entry2>"direct" {
+ apis[apiCount].direct = 1;
+ }
+
+<api_entry2>"nocontext" {
+ apis[apiCount].nocontext = 1;
}
<api_entry2>"ret" {
diff --git a/media/jni/android_media_MediaRecorder.cpp b/media/jni/android_media_MediaRecorder.cpp
index 2c246958..66cb71c 100644
--- a/media/jni/android_media_MediaRecorder.cpp
+++ b/media/jni/android_media_MediaRecorder.cpp
@@ -33,6 +33,7 @@
#include "JNIHelp.h"
#include "android_runtime/AndroidRuntime.h"
+#include <hardware/audio.h>
// ----------------------------------------------------------------------------
@@ -177,7 +178,7 @@
android_media_MediaRecorder_setAudioSource(JNIEnv *env, jobject thiz, jint as)
{
LOGV("setAudioSource(%d)", as);
- if (as < AUDIO_SOURCE_DEFAULT || as >= AUDIO_SOURCE_LIST_END) {
+ if (as < AUDIO_SOURCE_DEFAULT || as >= AUDIO_SOURCE_CNT) {
jniThrowException(env, "java/lang/IllegalArgumentException", "Invalid audio source");
return;
}
diff --git a/media/jni/soundpool/SoundPool.cpp b/media/jni/soundpool/SoundPool.cpp
index 1e2d6ce..bfbf04a 100644
--- a/media/jni/soundpool/SoundPool.cpp
+++ b/media/jni/soundpool/SoundPool.cpp
@@ -27,6 +27,8 @@
#include <media/AudioTrack.h>
#include <media/mediaplayer.h>
+#include <hardware/audio.h>
+
#include "SoundPool.h"
#include "SoundPoolThread.h"
@@ -584,7 +586,7 @@
if (loop) {
frameCount = sample->size()/numChannels/
- ((sample->format() == AudioSystem::PCM_16_BIT) ? sizeof(int16_t) : sizeof(uint8_t));
+ ((sample->format() == AUDIO_FORMAT_PCM_16_BIT) ? sizeof(int16_t) : sizeof(uint8_t));
}
#ifndef USE_SHARED_MEM_BUFFER
@@ -602,7 +604,7 @@
unsigned long toggle = mToggle ^ 1;
void *userData = (void *)((unsigned long)this | toggle);
uint32_t channels = (numChannels == 2) ?
- AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO;
+ AUDIO_CHANNEL_OUT_STEREO : AUDIO_CHANNEL_OUT_MONO;
// do not create a new audio track if current track is compatible with sample parameters
#ifdef USE_SHARED_MEM_BUFFER
@@ -865,7 +867,7 @@
Mutex::Autolock lock(&mLock);
if (mAudioTrack != 0 && mSample.get() != 0) {
uint32_t loopEnd = mSample->size()/mNumChannels/
- ((mSample->format() == AudioSystem::PCM_16_BIT) ? sizeof(int16_t) : sizeof(uint8_t));
+ ((mSample->format() == AUDIO_FORMAT_PCM_16_BIT) ? sizeof(int16_t) : sizeof(uint8_t));
mAudioTrack->setLoop(0, loopEnd, loop);
mLoop = loop;
}
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index ca7441a..121e38a4 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -1,4 +1,14 @@
LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ AudioParameter.cpp
+LOCAL_MODULE:= libmedia_helper
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_STATIC_LIBRARY)
+
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
@@ -41,6 +51,8 @@
libcamera_client libstagefright_foundation \
libgui
+LOCAL_WHOLE_STATIC_LIBRARY := libmedia_helper
+
LOCAL_MODULE:= libmedia
ifeq ($(TARGET_OS)-$(TARGET_SIMULATOR),linux-true)
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
new file mode 100644
index 0000000..59ccfd0
--- /dev/null
+++ b/media/libmedia/AudioParameter.cpp
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2006-2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioParameter"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include <media/AudioParameter.h>
+
+namespace android {
+
+const char *AudioParameter::keyRouting = "routing";
+const char *AudioParameter::keySamplingRate = "sampling_rate";
+const char *AudioParameter::keyFormat = "format";
+const char *AudioParameter::keyChannels = "channels";
+const char *AudioParameter::keyFrameCount = "frame_count";
+const char *AudioParameter::keyInputSource = "input_source";
+
+AudioParameter::AudioParameter(const String8& keyValuePairs)
+{
+ char *str = new char[keyValuePairs.length()+1];
+ mKeyValuePairs = keyValuePairs;
+
+ strcpy(str, keyValuePairs.string());
+ char *pair = strtok(str, ";");
+ while (pair != NULL) {
+ if (strlen(pair) != 0) {
+ size_t eqIdx = strcspn(pair, "=");
+ String8 key = String8(pair, eqIdx);
+ String8 value;
+ if (eqIdx == strlen(pair)) {
+ value = String8("");
+ } else {
+ value = String8(pair + eqIdx + 1);
+ }
+ if (mParameters.indexOfKey(key) < 0) {
+ mParameters.add(key, value);
+ } else {
+ mParameters.replaceValueFor(key, value);
+ }
+ } else {
+ LOGV("AudioParameter() cstor empty key value pair");
+ }
+ pair = strtok(NULL, ";");
+ }
+
+ delete[] str;
+}
+
+AudioParameter::~AudioParameter()
+{
+ mParameters.clear();
+}
+
+String8 AudioParameter::toString()
+{
+ String8 str = String8("");
+
+ size_t size = mParameters.size();
+ for (size_t i = 0; i < size; i++) {
+ str += mParameters.keyAt(i);
+ str += "=";
+ str += mParameters.valueAt(i);
+ if (i < (size - 1)) str += ";";
+ }
+ return str;
+}
+
+status_t AudioParameter::add(const String8& key, const String8& value)
+{
+ if (mParameters.indexOfKey(key) < 0) {
+ mParameters.add(key, value);
+ return NO_ERROR;
+ } else {
+ mParameters.replaceValueFor(key, value);
+ return ALREADY_EXISTS;
+ }
+}
+
+status_t AudioParameter::addInt(const String8& key, const int value)
+{
+ char str[12];
+ if (snprintf(str, 12, "%d", value) > 0) {
+ String8 str8 = String8(str);
+ return add(key, str8);
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+status_t AudioParameter::addFloat(const String8& key, const float value)
+{
+ char str[23];
+ if (snprintf(str, 23, "%.10f", value) > 0) {
+ String8 str8 = String8(str);
+ return add(key, str8);
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+status_t AudioParameter::remove(const String8& key)
+{
+ if (mParameters.indexOfKey(key) >= 0) {
+ mParameters.removeItem(key);
+ return NO_ERROR;
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+status_t AudioParameter::get(const String8& key, String8& value)
+{
+ if (mParameters.indexOfKey(key) >= 0) {
+ value = mParameters.valueFor(key);
+ return NO_ERROR;
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+status_t AudioParameter::getInt(const String8& key, int& value)
+{
+ String8 str8;
+ status_t result = get(key, str8);
+ value = 0;
+ if (result == NO_ERROR) {
+ int val;
+ if (sscanf(str8.string(), "%d", &val) == 1) {
+ value = val;
+ } else {
+ result = INVALID_OPERATION;
+ }
+ }
+ return result;
+}
+
+status_t AudioParameter::getFloat(const String8& key, float& value)
+{
+ String8 str8;
+ status_t result = get(key, str8);
+ value = 0;
+ if (result == NO_ERROR) {
+ float val;
+ if (sscanf(str8.string(), "%f", &val) == 1) {
+ value = val;
+ } else {
+ result = INVALID_OPERATION;
+ }
+ }
+ return result;
+}
+
+status_t AudioParameter::getAt(size_t index, String8& key, String8& value)
+{
+ if (mParameters.size() > index) {
+ key = mParameters.keyAt(index);
+ value = mParameters.valueAt(index);
+ return NO_ERROR;
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+}; // namespace android
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 5d74a0a..8438714 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -37,6 +37,9 @@
#include <utils/Timers.h>
#include <utils/Atomic.h>
+#include <hardware/audio.h>
+#include <cutils/bitops.h>
+
#define LIKELY( exp ) (__builtin_expect( (exp) != 0, true ))
#define UNLIKELY( exp ) (__builtin_expect( (exp) != 0, false ))
@@ -66,8 +69,8 @@
// We double the size of input buffer for ping pong use of record buffer.
size <<= 1;
- if (AudioSystem::isLinearPCM(format)) {
- size /= channelCount * (format == AudioSystem::PCM_16_BIT ? 2 : 1);
+ if (audio_is_linear_pcm(format)) {
+ size /= channelCount * (format == AUDIO_FORMAT_PCM_16_BIT ? 2 : 1);
}
*frameCount = size;
@@ -145,22 +148,22 @@
}
// these below should probably come from the audioFlinger too...
if (format == 0) {
- format = AudioSystem::PCM_16_BIT;
+ format = AUDIO_FORMAT_PCM_16_BIT;
}
// validate parameters
- if (!AudioSystem::isValidFormat(format)) {
+ if (!audio_is_valid_format(format)) {
LOGE("Invalid format");
return BAD_VALUE;
}
- if (!AudioSystem::isInputChannel(channels)) {
+ if (!audio_is_input_channel(channels)) {
return BAD_VALUE;
}
- int channelCount = AudioSystem::popCount(channels);
+ int channelCount = popcount(channels);
audio_io_handle_t input = AudioSystem::getInput(inputSource,
- sampleRate, format, channels, (AudioSystem::audio_in_acoustics)flags);
+ sampleRate, format, channels, (audio_in_acoustics_t)flags);
if (input == 0) {
LOGE("Could not get audio input for record source %d", inputSource);
return BAD_VALUE;
@@ -254,8 +257,8 @@
int AudioRecord::frameSize() const
{
- if (AudioSystem::isLinearPCM(mFormat)) {
- return channelCount()*((format() == AudioSystem::PCM_8_BIT) ? sizeof(uint8_t) : sizeof(int16_t));
+ if (audio_is_linear_pcm(mFormat)) {
+ return channelCount()*((format() == AUDIO_FORMAT_PCM_8_BIT) ? sizeof(uint8_t) : sizeof(int16_t));
} else {
return sizeof(uint8_t);
}
@@ -587,7 +590,7 @@
mInput = AudioSystem::getInput(mInputSource,
mCblk->sampleRate,
mFormat, mChannels,
- (AudioSystem::audio_in_acoustics)mFlags);
+ (audio_in_acoustics_t)mFlags);
return mInput;
}
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 2f694ba..e08a55b 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -23,6 +23,8 @@
#include <media/IAudioPolicyService.h>
#include <math.h>
+#include <hardware/audio.h>
+
// ----------------------------------------------------------------------------
// the sim build doesn't have gettid
@@ -45,7 +47,7 @@
// Cached values for recording queries
uint32_t AudioSystem::gPrevInSamplingRate = 16000;
-int AudioSystem::gPrevInFormat = AudioSystem::PCM_16_BIT;
+int AudioSystem::gPrevInFormat = AUDIO_FORMAT_PCM_16_BIT;
int AudioSystem::gPrevInChannelCount = 1;
size_t AudioSystem::gInBuffSize = 0;
@@ -127,7 +129,7 @@
status_t AudioSystem::setStreamVolume(int stream, float value, int output)
{
- if (uint32_t(stream) >= NUM_STREAM_TYPES) return BAD_VALUE;
+ if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE;
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
af->setStreamVolume(stream, value, output);
@@ -136,7 +138,7 @@
status_t AudioSystem::setStreamMute(int stream, bool mute)
{
- if (uint32_t(stream) >= NUM_STREAM_TYPES) return BAD_VALUE;
+ if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE;
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
af->setStreamMute(stream, mute);
@@ -145,7 +147,7 @@
status_t AudioSystem::getStreamVolume(int stream, float* volume, int output)
{
- if (uint32_t(stream) >= NUM_STREAM_TYPES) return BAD_VALUE;
+ if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE;
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
*volume = af->streamVolume(stream, output);
@@ -154,7 +156,7 @@
status_t AudioSystem::getStreamMute(int stream, bool* mute)
{
- if (uint32_t(stream) >= NUM_STREAM_TYPES) return BAD_VALUE;
+ if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE;
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
*mute = af->streamMute(stream);
@@ -163,7 +165,7 @@
status_t AudioSystem::setMode(int mode)
{
- if (mode >= NUM_MODES) return BAD_VALUE;
+ if (mode >= AUDIO_MODE_CNT) return BAD_VALUE;
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
return af->setMode(mode);
@@ -213,11 +215,11 @@
OutputDescriptor *outputDesc;
audio_io_handle_t output;
- if (streamType == DEFAULT) {
- streamType = MUSIC;
+ if (streamType == AUDIO_STREAM_DEFAULT) {
+ streamType = AUDIO_STREAM_MUSIC;
}
- output = getOutput((stream_type)streamType);
+ output = getOutput((audio_stream_type_t)streamType);
if (output == 0) {
return PERMISSION_DENIED;
}
@@ -246,11 +248,11 @@
OutputDescriptor *outputDesc;
audio_io_handle_t output;
- if (streamType == DEFAULT) {
- streamType = MUSIC;
+ if (streamType == AUDIO_STREAM_DEFAULT) {
+ streamType = AUDIO_STREAM_MUSIC;
}
- output = getOutput((stream_type)streamType);
+ output = getOutput((audio_stream_type_t)streamType);
if (output == 0) {
return PERMISSION_DENIED;
}
@@ -277,11 +279,11 @@
OutputDescriptor *outputDesc;
audio_io_handle_t output;
- if (streamType == DEFAULT) {
- streamType = MUSIC;
+ if (streamType == AUDIO_STREAM_DEFAULT) {
+ streamType = AUDIO_STREAM_MUSIC;
}
- output = getOutput((stream_type)streamType);
+ output = getOutput((audio_stream_type_t)streamType);
if (output == 0) {
return PERMISSION_DENIED;
}
@@ -338,11 +340,11 @@
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
- if (stream == DEFAULT) {
- stream = MUSIC;
+ if (stream == AUDIO_STREAM_DEFAULT) {
+ stream = AUDIO_STREAM_MUSIC;
}
- return af->getRenderPosition(halFrames, dspFrames, getOutput((stream_type)stream));
+ return af->getRenderPosition(halFrames, dspFrames, getOutput((audio_stream_type_t)stream));
}
unsigned int AudioSystem::getInputFramesLost(audio_io_handle_t ioHandle) {
@@ -455,10 +457,10 @@
bool AudioSystem::routedToA2dpOutput(int streamType) {
switch(streamType) {
- case MUSIC:
- case VOICE_CALL:
- case BLUETOOTH_SCO:
- case SYSTEM:
+ case AUDIO_STREAM_MUSIC:
+ case AUDIO_STREAM_VOICE_CALL:
+ case AUDIO_STREAM_BLUETOOTH_SCO:
+ case AUDIO_STREAM_SYSTEM:
return true;
default:
return false;
@@ -497,9 +499,9 @@
return gAudioPolicyService;
}
-status_t AudioSystem::setDeviceConnectionState(audio_devices device,
- device_connection_state state,
- const char *device_address)
+status_t AudioSystem::setDeviceConnectionState(audio_devices_t device,
+ audio_policy_dev_state_t state,
+ const char *device_address)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
@@ -507,11 +509,11 @@
return aps->setDeviceConnectionState(device, state, device_address);
}
-AudioSystem::device_connection_state AudioSystem::getDeviceConnectionState(audio_devices device,
+audio_policy_dev_state_t AudioSystem::getDeviceConnectionState(audio_devices_t device,
const char *device_address)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
- if (aps == 0) return DEVICE_STATE_UNAVAILABLE;
+ if (aps == 0) return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
return aps->getDeviceConnectionState(device, device_address);
}
@@ -531,26 +533,26 @@
return aps->setRingerMode(mode, mask);
}
-status_t AudioSystem::setForceUse(force_use usage, forced_config config)
+status_t AudioSystem::setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
return aps->setForceUse(usage, config);
}
-AudioSystem::forced_config AudioSystem::getForceUse(force_use usage)
+audio_policy_forced_cfg_t AudioSystem::getForceUse(audio_policy_force_use_t usage)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
- if (aps == 0) return FORCE_NONE;
+ if (aps == 0) return AUDIO_POLICY_FORCE_NONE;
return aps->getForceUse(usage);
}
-audio_io_handle_t AudioSystem::getOutput(stream_type stream,
+audio_io_handle_t AudioSystem::getOutput(audio_stream_type_t stream,
uint32_t samplingRate,
uint32_t format,
uint32_t channels,
- output_flags flags)
+ audio_policy_output_flags_t flags)
{
audio_io_handle_t output = 0;
// Do not use stream to output map cache if the direct output
@@ -561,9 +563,9 @@
// be reworked for proper operation with direct outputs. This code is too specific
// to the first use case we want to cover (Voice Recognition and Voice Dialer over
// Bluetooth SCO
- if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) == 0 &&
- ((stream != AudioSystem::VOICE_CALL && stream != AudioSystem::BLUETOOTH_SCO) ||
- channels != AudioSystem::CHANNEL_OUT_MONO ||
+ if ((flags & AUDIO_POLICY_OUTPUT_FLAG_DIRECT) == 0 &&
+ ((stream != AUDIO_STREAM_VOICE_CALL && stream != AUDIO_STREAM_BLUETOOTH_SCO) ||
+ channels != AUDIO_CHANNEL_OUT_MONO ||
(samplingRate != 8000 && samplingRate != 16000))) {
Mutex::Autolock _l(gLock);
output = AudioSystem::gStreamOutputMap.valueFor(stream);
@@ -573,7 +575,7 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return 0;
output = aps->getOutput(stream, samplingRate, format, channels, flags);
- if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) == 0) {
+ if ((flags & AUDIO_POLICY_OUTPUT_FLAG_DIRECT) == 0) {
Mutex::Autolock _l(gLock);
AudioSystem::gStreamOutputMap.add(stream, output);
}
@@ -582,7 +584,7 @@
}
status_t AudioSystem::startOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
@@ -591,7 +593,7 @@
}
status_t AudioSystem::stopOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
@@ -610,7 +612,7 @@
uint32_t samplingRate,
uint32_t format,
uint32_t channels,
- audio_in_acoustics acoustics)
+ audio_in_acoustics_t acoustics)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return 0;
@@ -638,7 +640,7 @@
aps->releaseInput(input);
}
-status_t AudioSystem::initStreamVolume(stream_type stream,
+status_t AudioSystem::initStreamVolume(audio_stream_type_t stream,
int indexMin,
int indexMax)
{
@@ -647,28 +649,28 @@
return aps->initStreamVolume(stream, indexMin, indexMax);
}
-status_t AudioSystem::setStreamVolumeIndex(stream_type stream, int index)
+status_t AudioSystem::setStreamVolumeIndex(audio_stream_type_t stream, int index)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
return aps->setStreamVolumeIndex(stream, index);
}
-status_t AudioSystem::getStreamVolumeIndex(stream_type stream, int *index)
+status_t AudioSystem::getStreamVolumeIndex(audio_stream_type_t stream, int *index)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
return aps->getStreamVolumeIndex(stream, index);
}
-uint32_t AudioSystem::getStrategyForStream(AudioSystem::stream_type stream)
+uint32_t AudioSystem::getStrategyForStream(audio_stream_type_t stream)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return 0;
return aps->getStrategyForStream(stream);
}
-uint32_t AudioSystem::getDevicesForStream(AudioSystem::stream_type stream)
+uint32_t AudioSystem::getDevicesForStream(audio_stream_type_t stream)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return 0;
@@ -717,276 +719,5 @@
LOGW("AudioPolicyService server died!");
}
-// ---------------------------------------------------------------------------
-
-
-// use emulated popcount optimization
-// http://www.df.lth.se/~john_e/gems/gem002d.html
-uint32_t AudioSystem::popCount(uint32_t u)
-{
- u = ((u&0x55555555) + ((u>>1)&0x55555555));
- u = ((u&0x33333333) + ((u>>2)&0x33333333));
- u = ((u&0x0f0f0f0f) + ((u>>4)&0x0f0f0f0f));
- u = ((u&0x00ff00ff) + ((u>>8)&0x00ff00ff));
- u = ( u&0x0000ffff) + (u>>16);
- return u;
-}
-
-bool AudioSystem::isOutputDevice(audio_devices device)
-{
- if ((popCount(device) == 1 ) &&
- ((device & ~AudioSystem::DEVICE_OUT_ALL) == 0)) {
- return true;
- } else {
- return false;
- }
-}
-
-bool AudioSystem::isInputDevice(audio_devices device)
-{
- if ((popCount(device) == 1 ) &&
- ((device & ~AudioSystem::DEVICE_IN_ALL) == 0)) {
- return true;
- } else {
- return false;
- }
-}
-
-bool AudioSystem::isA2dpDevice(audio_devices device)
-{
- if ((popCount(device) == 1 ) &&
- (device & (AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP |
- AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
- AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER))) {
- return true;
- } else {
- return false;
- }
-}
-
-bool AudioSystem::isBluetoothScoDevice(audio_devices device)
-{
- if ((popCount(device) == 1 ) &&
- (device & (AudioSystem::DEVICE_OUT_BLUETOOTH_SCO |
- AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_HEADSET |
- AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_CARKIT |
- AudioSystem::DEVICE_IN_BLUETOOTH_SCO_HEADSET))) {
- return true;
- } else {
- return false;
- }
-}
-
-bool AudioSystem::isLowVisibility(stream_type stream)
-{
- if (stream == AudioSystem::SYSTEM ||
- stream == AudioSystem::NOTIFICATION ||
- stream == AudioSystem::RING) {
- return true;
- } else {
- return false;
- }
-}
-
-bool AudioSystem::isInputChannel(uint32_t channel)
-{
- if ((channel & ~AudioSystem::CHANNEL_IN_ALL) == 0) {
- return true;
- } else {
- return false;
- }
-}
-
-bool AudioSystem::isOutputChannel(uint32_t channel)
-{
- if ((channel & ~AudioSystem::CHANNEL_OUT_ALL) == 0) {
- return true;
- } else {
- return false;
- }
-}
-
-bool AudioSystem::isValidFormat(uint32_t format)
-{
- switch (format & MAIN_FORMAT_MASK) {
- case PCM:
- case MP3:
- case AMR_NB:
- case AMR_WB:
- case AAC:
- case HE_AAC_V1:
- case HE_AAC_V2:
- case VORBIS:
- return true;
- default:
- return false;
- }
-}
-
-bool AudioSystem::isLinearPCM(uint32_t format)
-{
- switch (format) {
- case PCM_16_BIT:
- case PCM_8_BIT:
- return true;
- default:
- return false;
- }
-}
-
-//------------------------- AudioParameter class implementation ---------------
-
-const char *AudioParameter::keyRouting = "routing";
-const char *AudioParameter::keySamplingRate = "sampling_rate";
-const char *AudioParameter::keyFormat = "format";
-const char *AudioParameter::keyChannels = "channels";
-const char *AudioParameter::keyFrameCount = "frame_count";
-const char *AudioParameter::keyInputSource = "input_source";
-
-AudioParameter::AudioParameter(const String8& keyValuePairs)
-{
- char *str = new char[keyValuePairs.length()+1];
- mKeyValuePairs = keyValuePairs;
-
- strcpy(str, keyValuePairs.string());
- char *pair = strtok(str, ";");
- while (pair != NULL) {
- if (strlen(pair) != 0) {
- size_t eqIdx = strcspn(pair, "=");
- String8 key = String8(pair, eqIdx);
- String8 value;
- if (eqIdx == strlen(pair)) {
- value = String8("");
- } else {
- value = String8(pair + eqIdx + 1);
- }
- if (mParameters.indexOfKey(key) < 0) {
- mParameters.add(key, value);
- } else {
- mParameters.replaceValueFor(key, value);
- }
- } else {
- LOGV("AudioParameter() cstor empty key value pair");
- }
- pair = strtok(NULL, ";");
- }
-
- delete[] str;
-}
-
-AudioParameter::~AudioParameter()
-{
- mParameters.clear();
-}
-
-String8 AudioParameter::toString()
-{
- String8 str = String8("");
-
- size_t size = mParameters.size();
- for (size_t i = 0; i < size; i++) {
- str += mParameters.keyAt(i);
- str += "=";
- str += mParameters.valueAt(i);
- if (i < (size - 1)) str += ";";
- }
- return str;
-}
-
-status_t AudioParameter::add(const String8& key, const String8& value)
-{
- if (mParameters.indexOfKey(key) < 0) {
- mParameters.add(key, value);
- return NO_ERROR;
- } else {
- mParameters.replaceValueFor(key, value);
- return ALREADY_EXISTS;
- }
-}
-
-status_t AudioParameter::addInt(const String8& key, const int value)
-{
- char str[12];
- if (snprintf(str, 12, "%d", value) > 0) {
- String8 str8 = String8(str);
- return add(key, str8);
- } else {
- return BAD_VALUE;
- }
-}
-
-status_t AudioParameter::addFloat(const String8& key, const float value)
-{
- char str[23];
- if (snprintf(str, 23, "%.10f", value) > 0) {
- String8 str8 = String8(str);
- return add(key, str8);
- } else {
- return BAD_VALUE;
- }
-}
-
-status_t AudioParameter::remove(const String8& key)
-{
- if (mParameters.indexOfKey(key) >= 0) {
- mParameters.removeItem(key);
- return NO_ERROR;
- } else {
- return BAD_VALUE;
- }
-}
-
-status_t AudioParameter::get(const String8& key, String8& value)
-{
- if (mParameters.indexOfKey(key) >= 0) {
- value = mParameters.valueFor(key);
- return NO_ERROR;
- } else {
- return BAD_VALUE;
- }
-}
-
-status_t AudioParameter::getInt(const String8& key, int& value)
-{
- String8 str8;
- status_t result = get(key, str8);
- value = 0;
- if (result == NO_ERROR) {
- int val;
- if (sscanf(str8.string(), "%d", &val) == 1) {
- value = val;
- } else {
- result = INVALID_OPERATION;
- }
- }
- return result;
-}
-
-status_t AudioParameter::getFloat(const String8& key, float& value)
-{
- String8 str8;
- status_t result = get(key, str8);
- value = 0;
- if (result == NO_ERROR) {
- float val;
- if (sscanf(str8.string(), "%f", &val) == 1) {
- value = val;
- } else {
- result = INVALID_OPERATION;
- }
- }
- return result;
-}
-
-status_t AudioParameter::getAt(size_t index, String8& key, String8& value)
-{
- if (mParameters.size() > index) {
- key = mParameters.keyAt(index);
- value = mParameters.valueAt(index);
- return NO_ERROR;
- } else {
- return BAD_VALUE;
- }
-}
}; // namespace android
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 66e11d2..2673df9 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -37,6 +37,11 @@
#include <utils/Timers.h>
#include <utils/Atomic.h>
+#include <cutils/bitops.h>
+
+#include <hardware/audio.h>
+#include <hardware/audio_policy.h>
+
#define LIKELY( exp ) (__builtin_expect( (exp) != 0, true ))
#define UNLIKELY( exp ) (__builtin_expect( (exp) != 0, false ))
@@ -165,39 +170,41 @@
}
// handle default values first.
- if (streamType == AudioSystem::DEFAULT) {
- streamType = AudioSystem::MUSIC;
+ if (streamType == AUDIO_STREAM_DEFAULT) {
+ streamType = AUDIO_STREAM_MUSIC;
}
if (sampleRate == 0) {
sampleRate = afSampleRate;
}
// these below should probably come from the audioFlinger too...
if (format == 0) {
- format = AudioSystem::PCM_16_BIT;
+ format = AUDIO_FORMAT_PCM_16_BIT;
}
if (channels == 0) {
- channels = AudioSystem::CHANNEL_OUT_STEREO;
+ channels = AUDIO_CHANNEL_OUT_STEREO;
}
// validate parameters
- if (!AudioSystem::isValidFormat(format)) {
+ if (!audio_is_valid_format(format)) {
LOGE("Invalid format");
return BAD_VALUE;
}
// force direct flag if format is not linear PCM
- if (!AudioSystem::isLinearPCM(format)) {
- flags |= AudioSystem::OUTPUT_FLAG_DIRECT;
+ if (!audio_is_linear_pcm(format)) {
+ flags |= AUDIO_POLICY_OUTPUT_FLAG_DIRECT;
}
- if (!AudioSystem::isOutputChannel(channels)) {
+ if (!audio_is_output_channel(channels)) {
LOGE("Invalid channel mask");
return BAD_VALUE;
}
- uint32_t channelCount = AudioSystem::popCount(channels);
+ uint32_t channelCount = popcount(channels);
- audio_io_handle_t output = AudioSystem::getOutput((AudioSystem::stream_type)streamType,
- sampleRate, format, channels, (AudioSystem::output_flags)flags);
+ audio_io_handle_t output = AudioSystem::getOutput(
+ (audio_stream_type_t)streamType,
+ sampleRate,format, channels,
+ (audio_policy_output_flags_t)flags);
if (output == 0) {
LOGE("Could not get audio output for stream type %d", streamType);
@@ -290,8 +297,8 @@
int AudioTrack::frameSize() const
{
- if (AudioSystem::isLinearPCM(mFormat)) {
- return channelCount()*((format() == AudioSystem::PCM_8_BIT) ? sizeof(uint8_t) : sizeof(int16_t));
+ if (audio_is_linear_pcm(mFormat)) {
+ return channelCount()*((format() == AUDIO_FORMAT_PCM_8_BIT) ? sizeof(uint8_t) : sizeof(int16_t));
} else {
return sizeof(uint8_t);
}
@@ -673,8 +680,8 @@
// must be called with mLock held
audio_io_handle_t AudioTrack::getOutput_l()
{
- return AudioSystem::getOutput((AudioSystem::stream_type)mStreamType,
- mCblk->sampleRate, mFormat, mChannels, (AudioSystem::output_flags)mFlags);
+ return AudioSystem::getOutput((audio_stream_type_t)mStreamType,
+ mCblk->sampleRate, mFormat, mChannels, (audio_policy_output_flags_t)mFlags);
}
int AudioTrack::getSessionId()
@@ -727,7 +734,7 @@
}
mNotificationFramesAct = mNotificationFramesReq;
- if (!AudioSystem::isLinearPCM(format)) {
+ if (!audio_is_linear_pcm(format)) {
if (sharedBuffer != 0) {
frameCount = sharedBuffer->size();
}
@@ -923,8 +930,8 @@
audioBuffer->channelCount = mChannelCount;
audioBuffer->frameCount = framesReq;
audioBuffer->size = framesReq * cblk->frameSize;
- if (AudioSystem::isLinearPCM(mFormat)) {
- audioBuffer->format = AudioSystem::PCM_16_BIT;
+ if (audio_is_linear_pcm(mFormat)) {
+ audioBuffer->format = AUDIO_FORMAT_PCM_16_BIT;
} else {
audioBuffer->format = mFormat;
}
@@ -982,7 +989,7 @@
size_t toWrite;
- if (mFormat == AudioSystem::PCM_8_BIT && !(mFlags & AudioSystem::OUTPUT_FLAG_DIRECT)) {
+ if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_POLICY_OUTPUT_FLAG_DIRECT)) {
// Divide capacity by 2 to take expansion into account
toWrite = audioBuffer.size>>1;
// 8 to 16 bit conversion
@@ -1085,7 +1092,7 @@
// Divide buffer size by 2 to take into account the expansion
// due to 8 to 16 bit conversion: the callback must fill only half
// of the destination buffer
- if (mFormat == AudioSystem::PCM_8_BIT && !(mFlags & AudioSystem::OUTPUT_FLAG_DIRECT)) {
+ if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_POLICY_OUTPUT_FLAG_DIRECT)) {
audioBuffer.size >>= 1;
}
@@ -1104,7 +1111,7 @@
}
if (writtenSize > reqSize) writtenSize = reqSize;
- if (mFormat == AudioSystem::PCM_8_BIT && !(mFlags & AudioSystem::OUTPUT_FLAG_DIRECT)) {
+ if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_POLICY_OUTPUT_FLAG_DIRECT)) {
// 8 to 16 bit conversion
const int8_t *src = audioBuffer.i8 + writtenSize-1;
int count = writtenSize;
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index b89a278..88a9ae0 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -25,6 +25,8 @@
#include <media/IAudioPolicyService.h>
+#include <hardware/audio.h>
+
namespace android {
enum {
@@ -62,8 +64,8 @@
}
virtual status_t setDeviceConnectionState(
- AudioSystem::audio_devices device,
- AudioSystem::device_connection_state state,
+ audio_devices_t device,
+ audio_policy_dev_state_t state,
const char *device_address)
{
Parcel data, reply;
@@ -75,8 +77,8 @@
return static_cast <status_t> (reply.readInt32());
}
- virtual AudioSystem::device_connection_state getDeviceConnectionState(
- AudioSystem::audio_devices device,
+ virtual audio_policy_dev_state_t getDeviceConnectionState(
+ audio_devices_t device,
const char *device_address)
{
Parcel data, reply;
@@ -84,7 +86,7 @@
data.writeInt32(static_cast <uint32_t>(device));
data.writeCString(device_address);
remote()->transact(GET_DEVICE_CONNECTION_STATE, data, &reply);
- return static_cast <AudioSystem::device_connection_state>(reply.readInt32());
+ return static_cast <audio_policy_dev_state_t>(reply.readInt32());
}
virtual status_t setPhoneState(int state)
@@ -106,7 +108,7 @@
return static_cast <status_t> (reply.readInt32());
}
- virtual status_t setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config)
+ virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -116,21 +118,21 @@
return static_cast <status_t> (reply.readInt32());
}
- virtual AudioSystem::forced_config getForceUse(AudioSystem::force_use usage)
+ virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.writeInt32(static_cast <uint32_t>(usage));
remote()->transact(GET_FORCE_USE, data, &reply);
- return static_cast <AudioSystem::forced_config> (reply.readInt32());
+ return static_cast <audio_policy_forced_cfg_t> (reply.readInt32());
}
virtual audio_io_handle_t getOutput(
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
uint32_t samplingRate,
uint32_t format,
uint32_t channels,
- AudioSystem::output_flags flags)
+ audio_policy_output_flags_t flags)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -144,7 +146,7 @@
}
virtual status_t startOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session)
{
Parcel data, reply;
@@ -157,7 +159,7 @@
}
virtual status_t stopOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session)
{
Parcel data, reply;
@@ -182,7 +184,7 @@
uint32_t samplingRate,
uint32_t format,
uint32_t channels,
- AudioSystem::audio_in_acoustics acoustics)
+ audio_in_acoustics_t acoustics)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -221,7 +223,7 @@
remote()->transact(RELEASE_INPUT, data, &reply);
}
- virtual status_t initStreamVolume(AudioSystem::stream_type stream,
+ virtual status_t initStreamVolume(audio_stream_type_t stream,
int indexMin,
int indexMax)
{
@@ -234,7 +236,7 @@
return static_cast <status_t> (reply.readInt32());
}
- virtual status_t setStreamVolumeIndex(AudioSystem::stream_type stream, int index)
+ virtual status_t setStreamVolumeIndex(audio_stream_type_t stream, int index)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -244,7 +246,7 @@
return static_cast <status_t> (reply.readInt32());
}
- virtual status_t getStreamVolumeIndex(AudioSystem::stream_type stream, int *index)
+ virtual status_t getStreamVolumeIndex(audio_stream_type_t stream, int *index)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -255,7 +257,7 @@
return static_cast <status_t> (reply.readInt32());
}
- virtual uint32_t getStrategyForStream(AudioSystem::stream_type stream)
+ virtual uint32_t getStrategyForStream(audio_stream_type_t stream)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -264,7 +266,7 @@
return reply.readInt32();
}
- virtual uint32_t getDevicesForStream(AudioSystem::stream_type stream)
+ virtual uint32_t getDevicesForStream(audio_stream_type_t stream)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -330,10 +332,10 @@
switch(code) {
case SET_DEVICE_CONNECTION_STATE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::audio_devices device =
- static_cast <AudioSystem::audio_devices>(data.readInt32());
- AudioSystem::device_connection_state state =
- static_cast <AudioSystem::device_connection_state>(data.readInt32());
+ audio_devices_t device =
+ static_cast <audio_devices_t>(data.readInt32());
+ audio_policy_dev_state_t state =
+ static_cast <audio_policy_dev_state_t>(data.readInt32());
const char *device_address = data.readCString();
reply->writeInt32(static_cast<uint32_t> (setDeviceConnectionState(device,
state,
@@ -343,8 +345,8 @@
case GET_DEVICE_CONNECTION_STATE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::audio_devices device =
- static_cast<AudioSystem::audio_devices> (data.readInt32());
+ audio_devices_t device =
+ static_cast<audio_devices_t> (data.readInt32());
const char *device_address = data.readCString();
reply->writeInt32(static_cast<uint32_t> (getDeviceConnectionState(device,
device_address)));
@@ -367,29 +369,29 @@
case SET_FORCE_USE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::force_use usage = static_cast <AudioSystem::force_use>(data.readInt32());
- AudioSystem::forced_config config =
- static_cast <AudioSystem::forced_config>(data.readInt32());
+ audio_policy_force_use_t usage = static_cast <audio_policy_force_use_t>(data.readInt32());
+ audio_policy_forced_cfg_t config =
+ static_cast <audio_policy_forced_cfg_t>(data.readInt32());
reply->writeInt32(static_cast <uint32_t>(setForceUse(usage, config)));
return NO_ERROR;
} break;
case GET_FORCE_USE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::force_use usage = static_cast <AudioSystem::force_use>(data.readInt32());
+ audio_policy_force_use_t usage = static_cast <audio_policy_force_use_t>(data.readInt32());
reply->writeInt32(static_cast <uint32_t>(getForceUse(usage)));
return NO_ERROR;
} break;
case GET_OUTPUT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::stream_type stream =
- static_cast <AudioSystem::stream_type>(data.readInt32());
+ audio_stream_type_t stream =
+ static_cast <audio_stream_type_t>(data.readInt32());
uint32_t samplingRate = data.readInt32();
uint32_t format = data.readInt32();
uint32_t channels = data.readInt32();
- AudioSystem::output_flags flags =
- static_cast <AudioSystem::output_flags>(data.readInt32());
+ audio_policy_output_flags_t flags =
+ static_cast <audio_policy_output_flags_t>(data.readInt32());
audio_io_handle_t output = getOutput(stream,
samplingRate,
@@ -406,7 +408,7 @@
uint32_t stream = data.readInt32();
int session = data.readInt32();
reply->writeInt32(static_cast <uint32_t>(startOutput(output,
- (AudioSystem::stream_type)stream,
+ (audio_stream_type_t)stream,
session)));
return NO_ERROR;
} break;
@@ -417,7 +419,7 @@
uint32_t stream = data.readInt32();
int session = data.readInt32();
reply->writeInt32(static_cast <uint32_t>(stopOutput(output,
- (AudioSystem::stream_type)stream,
+ (audio_stream_type_t)stream,
session)));
return NO_ERROR;
} break;
@@ -435,8 +437,8 @@
uint32_t samplingRate = data.readInt32();
uint32_t format = data.readInt32();
uint32_t channels = data.readInt32();
- AudioSystem::audio_in_acoustics acoustics =
- static_cast <AudioSystem::audio_in_acoustics>(data.readInt32());
+ audio_in_acoustics_t acoustics =
+ static_cast <audio_in_acoustics_t>(data.readInt32());
audio_io_handle_t input = getInput(inputSource,
samplingRate,
format,
@@ -469,8 +471,8 @@
case INIT_STREAM_VOLUME: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::stream_type stream =
- static_cast <AudioSystem::stream_type>(data.readInt32());
+ audio_stream_type_t stream =
+ static_cast <audio_stream_type_t>(data.readInt32());
int indexMin = data.readInt32();
int indexMax = data.readInt32();
reply->writeInt32(static_cast <uint32_t>(initStreamVolume(stream, indexMin,indexMax)));
@@ -479,8 +481,8 @@
case SET_STREAM_VOLUME: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::stream_type stream =
- static_cast <AudioSystem::stream_type>(data.readInt32());
+ audio_stream_type_t stream =
+ static_cast <audio_stream_type_t>(data.readInt32());
int index = data.readInt32();
reply->writeInt32(static_cast <uint32_t>(setStreamVolumeIndex(stream, index)));
return NO_ERROR;
@@ -488,8 +490,8 @@
case GET_STREAM_VOLUME: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::stream_type stream =
- static_cast <AudioSystem::stream_type>(data.readInt32());
+ audio_stream_type_t stream =
+ static_cast <audio_stream_type_t>(data.readInt32());
int index;
status_t status = getStreamVolumeIndex(stream, &index);
reply->writeInt32(index);
@@ -499,16 +501,16 @@
case GET_STRATEGY_FOR_STREAM: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::stream_type stream =
- static_cast <AudioSystem::stream_type>(data.readInt32());
+ audio_stream_type_t stream =
+ static_cast <audio_stream_type_t>(data.readInt32());
reply->writeInt32(getStrategyForStream(stream));
return NO_ERROR;
} break;
case GET_DEVICES_FOR_STREAM: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- AudioSystem::stream_type stream =
- static_cast <AudioSystem::stream_type>(data.readInt32());
+ audio_stream_type_t stream =
+ static_cast <audio_stream_type_t>(data.readInt32());
reply->writeInt32(static_cast <int>(getDevicesForStream(stream)));
return NO_ERROR;
} break;
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
index ee9e1d8..88157d2 100644
--- a/media/libmedia/JetPlayer.cpp
+++ b/media/libmedia/JetPlayer.cpp
@@ -96,10 +96,10 @@
// create the output AudioTrack
mAudioTrack = new AudioTrack();
- mAudioTrack->set(AudioSystem::MUSIC, //TODO parametrize this
+ mAudioTrack->set(AUDIO_STREAM_MUSIC, //TODO parametrize this
pLibConfig->sampleRate,
1, // format = PCM 16bits per sample,
- (pLibConfig->numChannels == 2) ? AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO,
+ (pLibConfig->numChannels == 2) ? AUDIO_CHANNEL_OUT_STEREO : AUDIO_CHANNEL_OUT_MONO,
mTrackBufferSize,
0);
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 82fe2d4..9f1b3d6 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -1026,8 +1026,8 @@
mpAudioTrack->set(mStreamType,
0,
- AudioSystem::PCM_16_BIT,
- AudioSystem::CHANNEL_OUT_MONO,
+ AUDIO_FORMAT_PCM_16_BIT,
+ AUDIO_CHANNEL_OUT_MONO,
0,
0,
audioCallback,
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index 43571cf..366707c 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -24,6 +24,8 @@
#include <sys/types.h>
#include <limits.h>
+#include <cutils/bitops.h>
+
#include <media/Visualizer.h>
extern void fixed_fft_real(int n, int32_t *v);
@@ -127,7 +129,7 @@
{
if (size > VISUALIZER_CAPTURE_SIZE_MAX ||
size < VISUALIZER_CAPTURE_SIZE_MIN ||
- AudioSystem::popCount(size) != 1) {
+ popcount(size) != 1) {
return BAD_VALUE;
}
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index e80e742..9daa80f 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -37,6 +37,8 @@
#include <utils/KeyedVector.h>
#include <utils/String8.h>
+#include <hardware/audio.h>
+
namespace android {
MediaPlayer::MediaPlayer()
@@ -45,7 +47,7 @@
mListener = NULL;
mCookie = NULL;
mDuration = -1;
- mStreamType = AudioSystem::MUSIC;
+ mStreamType = AUDIO_STREAM_MUSIC;
mCurrentPosition = -1;
mSeekPosition = -1;
mCurrentState = MEDIA_PLAYER_IDLE;
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 6b97708..9dd353b 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -53,6 +53,8 @@
#include <media/AudioTrack.h>
#include <media/MemoryLeakTrackUtil.h>
+#include <hardware/audio.h>
+
#include <private/android_filesystem_config.h>
#include "MediaRecorderClient.h"
@@ -1209,7 +1211,7 @@
mSessionId(sessionId) {
LOGV("AudioOutput(%d)", sessionId);
mTrack = 0;
- mStreamType = AudioSystem::MUSIC;
+ mStreamType = AUDIO_STREAM_MUSIC;
mLeftVolume = 1.0;
mRightVolume = 1.0;
mLatency = 0;
@@ -1319,7 +1321,7 @@
mStreamType,
sampleRate,
format,
- (channelCount == 2) ? AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO,
+ (channelCount == 2) ? AUDIO_CHANNEL_OUT_STEREO : AUDIO_CHANNEL_OUT_MONO,
frameCount,
0 /* flags */,
CallbackWrapper,
@@ -1331,7 +1333,7 @@
mStreamType,
sampleRate,
format,
- (channelCount == 2) ? AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO,
+ (channelCount == 2) ? AUDIO_CHANNEL_OUT_STEREO : AUDIO_CHANNEL_OUT_MONO,
frameCount,
0,
NULL,
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 5539a37..31b518e 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -30,6 +30,8 @@
#include <media/MediaPlayerInterface.h>
#include <media/Metadata.h>
+#include <hardware/audio.h>
+
namespace android {
class IMediaRecorder;
@@ -130,7 +132,7 @@
virtual ssize_t bufferSize() const { return frameSize() * mFrameCount; }
virtual ssize_t frameCount() const { return mFrameCount; }
virtual ssize_t channelCount() const { return (ssize_t)mChannelCount; }
- virtual ssize_t frameSize() const { return ssize_t(mChannelCount * ((mFormat == AudioSystem::PCM_16_BIT)?sizeof(int16_t):sizeof(u_int8_t))); }
+ virtual ssize_t frameSize() const { return ssize_t(mChannelCount * ((mFormat == AUDIO_FORMAT_PCM_16_BIT)?sizeof(int16_t):sizeof(u_int8_t))); }
virtual uint32_t latency() const;
virtual float msecsPerFrame() const;
virtual status_t getPosition(uint32_t *position);
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 1a1780c..5a47384 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -35,6 +35,8 @@
#include <media/AudioTrack.h>
+#include <hardware/audio.h>
+
#include "MediaRecorderClient.h"
#include "MediaPlayerService.h"
@@ -102,7 +104,7 @@
LOGE("recorder is not initialized");
return NO_INIT;
}
- return mRecorder->setAudioSource((audio_source)as);
+ return mRecorder->setAudioSource((audio_source_t)as);
}
status_t MediaRecorderClient::setOutputFormat(int of)
diff --git a/media/libmediaplayerservice/MidiFile.cpp b/media/libmediaplayerservice/MidiFile.cpp
index 1b0b05f..37a3db3 100644
--- a/media/libmediaplayerservice/MidiFile.cpp
+++ b/media/libmediaplayerservice/MidiFile.cpp
@@ -30,6 +30,8 @@
#include <sys/types.h>
#include <sys/stat.h>
+#include <hardware/audio.h>
+
#include "MidiFile.h"
#ifdef HAVE_GETTID
@@ -58,7 +60,7 @@
MidiFile::MidiFile() :
mEasData(NULL), mEasHandle(NULL), mAudioBuffer(NULL),
mPlayTime(-1), mDuration(-1), mState(EAS_STATE_ERROR),
- mStreamType(AudioSystem::MUSIC), mLoop(false), mExit(false),
+ mStreamType(AUDIO_STREAM_MUSIC), mLoop(false), mExit(false),
mPaused(false), mRender(false), mTid(-1)
{
LOGV("constructor");
@@ -423,7 +425,7 @@
}
status_t MidiFile::createOutputTrack() {
- if (mAudioSink->open(pLibConfig->sampleRate, pLibConfig->numChannels, AudioSystem::PCM_16_BIT, 2) != NO_ERROR) {
+ if (mAudioSink->open(pLibConfig->sampleRate, pLibConfig->numChannels, AUDIO_FORMAT_PCM_16_BIT, 2) != NO_ERROR) {
LOGE("mAudioSink open failed");
return ERROR_OPEN_FAILED;
}
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index e3dfabb..01fbea1 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -46,6 +46,8 @@
#include <ctype.h>
#include <unistd.h>
+#include <hardware/audio.h>
+
#include "ARTPWriter.h"
namespace android {
@@ -64,7 +66,7 @@
StagefrightRecorder::StagefrightRecorder()
: mWriter(NULL), mWriterAux(NULL),
mOutputFd(-1), mOutputFdAux(-1),
- mAudioSource(AUDIO_SOURCE_LIST_END),
+ mAudioSource(AUDIO_SOURCE_CNT),
mVideoSource(VIDEO_SOURCE_LIST_END),
mStarted(false) {
@@ -82,10 +84,10 @@
return OK;
}
-status_t StagefrightRecorder::setAudioSource(audio_source as) {
+status_t StagefrightRecorder::setAudioSource(audio_source_t as) {
LOGV("setAudioSource: %d", as);
if (as < AUDIO_SOURCE_DEFAULT ||
- as >= AUDIO_SOURCE_LIST_END) {
+ as >= AUDIO_SOURCE_CNT) {
LOGE("Invalid audio source: %d", as);
return BAD_VALUE;
}
@@ -800,7 +802,7 @@
mStarted = true;
uint32_t params = IMediaPlayerService::kBatteryDataCodecStarted;
- if (mAudioSource != AUDIO_SOURCE_LIST_END) {
+ if (mAudioSource != AUDIO_SOURCE_CNT) {
params |= IMediaPlayerService::kBatteryDataTrackAudio;
}
if (mVideoSource != VIDEO_SOURCE_LIST_END) {
@@ -874,7 +876,7 @@
mOutputFormat == OUTPUT_FORMAT_AAC_ADTS);
CHECK(mAudioEncoder == AUDIO_ENCODER_AAC);
- CHECK(mAudioSource != AUDIO_SOURCE_LIST_END);
+ CHECK(mAudioSource != AUDIO_SOURCE_CNT);
CHECK(0 == "AACWriter is not implemented yet");
@@ -900,7 +902,7 @@
}
}
- if (mAudioSource >= AUDIO_SOURCE_LIST_END) {
+ if (mAudioSource >= AUDIO_SOURCE_CNT) {
LOGE("Invalid audio source: %d", mAudioSource);
return BAD_VALUE;
}
@@ -933,9 +935,9 @@
status_t StagefrightRecorder::startRTPRecording() {
CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_RTP_AVP);
- if ((mAudioSource != AUDIO_SOURCE_LIST_END
+ if ((mAudioSource != AUDIO_SOURCE_CNT
&& mVideoSource != VIDEO_SOURCE_LIST_END)
- || (mAudioSource == AUDIO_SOURCE_LIST_END
+ || (mAudioSource == AUDIO_SOURCE_CNT
&& mVideoSource == VIDEO_SOURCE_LIST_END)) {
// Must have exactly one source.
return BAD_VALUE;
@@ -947,7 +949,7 @@
sp<MediaSource> source;
- if (mAudioSource != AUDIO_SOURCE_LIST_END) {
+ if (mAudioSource != AUDIO_SOURCE_CNT) {
source = createAudioSource();
} else {
@@ -975,7 +977,7 @@
sp<MediaWriter> writer = new MPEG2TSWriter(mOutputFd);
- if (mAudioSource != AUDIO_SOURCE_LIST_END) {
+ if (mAudioSource != AUDIO_SOURCE_CNT) {
if (mAudioEncoder != AUDIO_ENCODER_AAC) {
return ERROR_UNSUPPORTED;
}
@@ -1383,7 +1385,7 @@
// Audio source is added at the end if it exists.
// This help make sure that the "recoding" sound is suppressed for
// camcorder applications in the recorded files.
- if (!mCaptureTimeLapse && (mAudioSource != AUDIO_SOURCE_LIST_END)) {
+ if (!mCaptureTimeLapse && (mAudioSource != AUDIO_SOURCE_CNT)) {
err = setupAudioEncoder(writer);
if (err != OK) return err;
*totalBitRate += mAudioBitRate;
@@ -1504,7 +1506,7 @@
mStarted = false;
uint32_t params = 0;
- if (mAudioSource != AUDIO_SOURCE_LIST_END) {
+ if (mAudioSource != AUDIO_SOURCE_CNT) {
params |= IMediaPlayerService::kBatteryDataTrackAudio;
}
if (mVideoSource != VIDEO_SOURCE_LIST_END) {
@@ -1555,7 +1557,7 @@
mStarted = false;
uint32_t params = 0;
- if (mAudioSource != AUDIO_SOURCE_LIST_END) {
+ if (mAudioSource != AUDIO_SOURCE_CNT) {
params |= IMediaPlayerService::kBatteryDataTrackAudio;
}
if (mVideoSource != VIDEO_SOURCE_LIST_END) {
@@ -1581,7 +1583,7 @@
stop();
// No audio or video source by default
- mAudioSource = AUDIO_SOURCE_LIST_END;
+ mAudioSource = AUDIO_SOURCE_CNT;
mVideoSource = VIDEO_SOURCE_LIST_END;
// Default parameters
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 2c440c1..3d463ea 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -22,6 +22,8 @@
#include <camera/CameraParameters.h>
#include <utils/String8.h>
+#include <hardware/audio.h>
+
namespace android {
class Camera;
@@ -39,7 +41,7 @@
virtual ~StagefrightRecorder();
virtual status_t init();
- virtual status_t setAudioSource(audio_source as);
+ virtual status_t setAudioSource(audio_source_t as);
virtual status_t setVideoSource(video_source vs);
virtual status_t setOutputFormat(output_format of);
virtual status_t setAudioEncoder(audio_encoder ae);
@@ -69,7 +71,7 @@
sp<MediaWriter> mWriter, mWriterAux;
sp<AudioSource> mAudioSourceNode;
- audio_source mAudioSource;
+ audio_source_t mAudioSource;
video_source mVideoSource;
output_format mOutputFormat;
audio_encoder mAudioEncoder;
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index fcea848..69f9c23 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -110,7 +110,7 @@
if (mAudioSink.get() != NULL) {
status_t err = mAudioSink->open(
- mSampleRate, numChannels, AudioSystem::PCM_16_BIT,
+ mSampleRate, numChannels, AUDIO_FORMAT_PCM_16_BIT,
DEFAULT_AUDIOSINK_BUFFERCOUNT,
&AudioPlayer::AudioSinkCallback, this);
if (err != OK) {
@@ -132,10 +132,10 @@
mAudioSink->start();
} else {
mAudioTrack = new AudioTrack(
- AudioSystem::MUSIC, mSampleRate, AudioSystem::PCM_16_BIT,
+ AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT,
(numChannels == 2)
- ? AudioSystem::CHANNEL_OUT_STEREO
- : AudioSystem::CHANNEL_OUT_MONO,
+ ? AUDIO_CHANNEL_OUT_STEREO
+ : AUDIO_CHANNEL_OUT_MONO,
0, 0, &AudioCallback, this, 0);
if ((err = mAudioTrack->initCheck()) != OK) {
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index bbdec02..99c3682 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -60,8 +60,8 @@
AudioRecord::RECORD_NS_ENABLE |
AudioRecord::RECORD_IIR_ENABLE;
mRecord = new AudioRecord(
- inputSource, sampleRate, AudioSystem::PCM_16_BIT,
- channels > 1? AudioSystem::CHANNEL_IN_STEREO: AudioSystem::CHANNEL_IN_MONO,
+ inputSource, sampleRate, AUDIO_FORMAT_PCM_16_BIT,
+ channels > 1? AUDIO_CHANNEL_IN_STEREO: AUDIO_CHANNEL_IN_MONO,
4 * kMaxBufferSize / sizeof(int16_t), /* Enable ping-pong buffers */
flags,
AudioRecordCallbackFunction,
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 08db902..ef4d3d0 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -453,6 +453,10 @@
}
if (left == mNumSampleSizes) {
+ if (flags == kFlagAfter) {
+ return ERROR_OUT_OF_RANGE;
+ }
+
--left;
}
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 4f28855..54c0d77 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -487,14 +487,12 @@
{ "audio/3gpp",
"file:///sdcard/media_api/video/H263_500_AMRNB_12.3gp" },
{ "audio/amr-wb",
- "file:///sdcard/media_api/music_perf/AMRWB/"
- "NIN_AMR-WB_15.85kbps_16kbps.amr" },
+ "file:///sdcard/media_api/music/"
+ "AI_AMR-WB_12.65kbps(13kbps)_16khz_mono_NMC.awb" },
{ "audio/mp4a-latm",
- "file:///sdcard/media_api/music_perf/AAC/"
- "WC_AAC_80kbps_32khz_Stereo_1pCBR_SSE.mp4" },
+ "file:///sdcard/media_api/video/H264_AAC.3gp" },
{ "audio/mpeg",
- "file:///sdcard/media_api/music_perf/MP3/"
- "WC_256kbps_44.1khz_mono_CBR_DPA.mp3" }
+ "file:///sdcard/media_api/music/MP3CBR.mp3" }
};
for (size_t i = 0; i < sizeof(kMimeToURL) / sizeof(kMimeToURL[0]); ++i) {
@@ -626,8 +624,10 @@
requestedSeekTimeUs, requestedSeekTimeUs / 1E6);
}
- MediaBuffer *buffer;
- options.setSeekTo(requestedSeekTimeUs);
+ MediaBuffer *buffer = NULL;
+ options.setSeekTo(
+ requestedSeekTimeUs, MediaSource::ReadOptions::SEEK_NEXT_SYNC);
+
if (seekSource->read(&buffer, &options) != OK) {
CHECK_EQ(buffer, NULL);
actualSeekTimeUs = -1;
diff --git a/services/audioflinger/A2dpAudioInterface.cpp b/services/audioflinger/A2dpAudioInterface.cpp
deleted file mode 100644
index d926cb1..0000000
--- a/services/audioflinger/A2dpAudioInterface.cpp
+++ /dev/null
@@ -1,498 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <math.h>
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "A2dpAudioInterface"
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-#include "A2dpAudioInterface.h"
-#include "audio/liba2dp.h"
-#include <hardware_legacy/power.h>
-
-namespace android {
-
-static const char *sA2dpWakeLock = "A2dpOutputStream";
-#define MAX_WRITE_RETRIES 5
-
-// ----------------------------------------------------------------------------
-
-//AudioHardwareInterface* A2dpAudioInterface::createA2dpInterface()
-//{
-// AudioHardwareInterface* hw = 0;
-//
-// hw = AudioHardwareInterface::create();
-// LOGD("new A2dpAudioInterface(hw: %p)", hw);
-// hw = new A2dpAudioInterface(hw);
-// return hw;
-//}
-
-A2dpAudioInterface::A2dpAudioInterface(AudioHardwareInterface* hw) :
- mOutput(0), mHardwareInterface(hw), mBluetoothEnabled(true), mSuspended(false)
-{
-}
-
-A2dpAudioInterface::~A2dpAudioInterface()
-{
- closeOutputStream((AudioStreamOut *)mOutput);
- delete mHardwareInterface;
-}
-
-status_t A2dpAudioInterface::initCheck()
-{
- if (mHardwareInterface == 0) return NO_INIT;
- return mHardwareInterface->initCheck();
-}
-
-AudioStreamOut* A2dpAudioInterface::openOutputStream(
- uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate, status_t *status)
-{
- if (!AudioSystem::isA2dpDevice((AudioSystem::audio_devices)devices)) {
- LOGV("A2dpAudioInterface::openOutputStream() open HW device: %x", devices);
- return mHardwareInterface->openOutputStream(devices, format, channels, sampleRate, status);
- }
-
- status_t err = 0;
-
- // only one output stream allowed
- if (mOutput) {
- if (status)
- *status = -1;
- return NULL;
- }
-
- // create new output stream
- A2dpAudioStreamOut* out = new A2dpAudioStreamOut();
- if ((err = out->set(devices, format, channels, sampleRate)) == NO_ERROR) {
- mOutput = out;
- mOutput->setBluetoothEnabled(mBluetoothEnabled);
- mOutput->setSuspended(mSuspended);
- } else {
- delete out;
- }
-
- if (status)
- *status = err;
- return mOutput;
-}
-
-void A2dpAudioInterface::closeOutputStream(AudioStreamOut* out) {
- if (mOutput == 0 || mOutput != out) {
- mHardwareInterface->closeOutputStream(out);
- }
- else {
- delete mOutput;
- mOutput = 0;
- }
-}
-
-
-AudioStreamIn* A2dpAudioInterface::openInputStream(
- uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate, status_t *status,
- AudioSystem::audio_in_acoustics acoustics)
-{
- return mHardwareInterface->openInputStream(devices, format, channels, sampleRate, status, acoustics);
-}
-
-void A2dpAudioInterface::closeInputStream(AudioStreamIn* in)
-{
- return mHardwareInterface->closeInputStream(in);
-}
-
-status_t A2dpAudioInterface::setMode(int mode)
-{
- return mHardwareInterface->setMode(mode);
-}
-
-status_t A2dpAudioInterface::setMicMute(bool state)
-{
- return mHardwareInterface->setMicMute(state);
-}
-
-status_t A2dpAudioInterface::getMicMute(bool* state)
-{
- return mHardwareInterface->getMicMute(state);
-}
-
-status_t A2dpAudioInterface::setParameters(const String8& keyValuePairs)
-{
- AudioParameter param = AudioParameter(keyValuePairs);
- String8 value;
- String8 key;
- status_t status = NO_ERROR;
-
- LOGV("setParameters() %s", keyValuePairs.string());
-
- key = "bluetooth_enabled";
- if (param.get(key, value) == NO_ERROR) {
- mBluetoothEnabled = (value == "true");
- if (mOutput) {
- mOutput->setBluetoothEnabled(mBluetoothEnabled);
- }
- param.remove(key);
- }
- key = String8("A2dpSuspended");
- if (param.get(key, value) == NO_ERROR) {
- mSuspended = (value == "true");
- if (mOutput) {
- mOutput->setSuspended(mSuspended);
- }
- param.remove(key);
- }
-
- if (param.size()) {
- status_t hwStatus = mHardwareInterface->setParameters(param.toString());
- if (status == NO_ERROR) {
- status = hwStatus;
- }
- }
-
- return status;
-}
-
-String8 A2dpAudioInterface::getParameters(const String8& keys)
-{
- AudioParameter param = AudioParameter(keys);
- AudioParameter a2dpParam = AudioParameter();
- String8 value;
- String8 key;
-
- key = "bluetooth_enabled";
- if (param.get(key, value) == NO_ERROR) {
- value = mBluetoothEnabled ? "true" : "false";
- a2dpParam.add(key, value);
- param.remove(key);
- }
- key = "A2dpSuspended";
- if (param.get(key, value) == NO_ERROR) {
- value = mSuspended ? "true" : "false";
- a2dpParam.add(key, value);
- param.remove(key);
- }
-
- String8 keyValuePairs = a2dpParam.toString();
-
- if (param.size()) {
- if (keyValuePairs != "") {
- keyValuePairs += ";";
- }
- keyValuePairs += mHardwareInterface->getParameters(param.toString());
- }
-
- LOGV("getParameters() %s", keyValuePairs.string());
- return keyValuePairs;
-}
-
-size_t A2dpAudioInterface::getInputBufferSize(uint32_t sampleRate, int format, int channelCount)
-{
- return mHardwareInterface->getInputBufferSize(sampleRate, format, channelCount);
-}
-
-status_t A2dpAudioInterface::setVoiceVolume(float v)
-{
- return mHardwareInterface->setVoiceVolume(v);
-}
-
-status_t A2dpAudioInterface::setMasterVolume(float v)
-{
- return mHardwareInterface->setMasterVolume(v);
-}
-
-status_t A2dpAudioInterface::dump(int fd, const Vector<String16>& args)
-{
- return mHardwareInterface->dumpState(fd, args);
-}
-
-// ----------------------------------------------------------------------------
-
-A2dpAudioInterface::A2dpAudioStreamOut::A2dpAudioStreamOut() :
- mFd(-1), mStandby(true), mStartCount(0), mRetryCount(0), mData(NULL),
- // assume BT enabled to start, this is safe because its only the
- // enabled->disabled transition we are worried about
- mBluetoothEnabled(true), mDevice(0), mClosing(false), mSuspended(false)
-{
- // use any address by default
- strcpy(mA2dpAddress, "00:00:00:00:00:00");
- init();
-}
-
-status_t A2dpAudioInterface::A2dpAudioStreamOut::set(
- uint32_t device, int *pFormat, uint32_t *pChannels, uint32_t *pRate)
-{
- int lFormat = pFormat ? *pFormat : 0;
- uint32_t lChannels = pChannels ? *pChannels : 0;
- uint32_t lRate = pRate ? *pRate : 0;
-
- LOGD("A2dpAudioStreamOut::set %x, %d, %d, %d\n", device, lFormat, lChannels, lRate);
-
- // fix up defaults
- if (lFormat == 0) lFormat = format();
- if (lChannels == 0) lChannels = channels();
- if (lRate == 0) lRate = sampleRate();
-
- // check values
- if ((lFormat != format()) ||
- (lChannels != channels()) ||
- (lRate != sampleRate())){
- if (pFormat) *pFormat = format();
- if (pChannels) *pChannels = channels();
- if (pRate) *pRate = sampleRate();
- return BAD_VALUE;
- }
-
- if (pFormat) *pFormat = lFormat;
- if (pChannels) *pChannels = lChannels;
- if (pRate) *pRate = lRate;
-
- mDevice = device;
- mBufferDurationUs = ((bufferSize() * 1000 )/ frameSize() / sampleRate()) * 1000;
- return NO_ERROR;
-}
-
-A2dpAudioInterface::A2dpAudioStreamOut::~A2dpAudioStreamOut()
-{
- LOGV("A2dpAudioStreamOut destructor");
- close();
- LOGV("A2dpAudioStreamOut destructor returning from close()");
-}
-
-ssize_t A2dpAudioInterface::A2dpAudioStreamOut::write(const void* buffer, size_t bytes)
-{
- status_t status = -1;
- {
- Mutex::Autolock lock(mLock);
-
- size_t remaining = bytes;
-
- if (!mBluetoothEnabled || mClosing || mSuspended) {
- LOGV("A2dpAudioStreamOut::write(), but bluetooth disabled \
- mBluetoothEnabled %d, mClosing %d, mSuspended %d",
- mBluetoothEnabled, mClosing, mSuspended);
- goto Error;
- }
-
- if (mStandby) {
- acquire_wake_lock (PARTIAL_WAKE_LOCK, sA2dpWakeLock);
- mStandby = false;
- mLastWriteTime = systemTime();
- }
-
- status = init();
- if (status < 0)
- goto Error;
-
- int retries = MAX_WRITE_RETRIES;
- while (remaining > 0 && retries) {
- status = a2dp_write(mData, buffer, remaining);
- if (status < 0) {
- LOGE("a2dp_write failed err: %d\n", status);
- goto Error;
- }
- if (status == 0) {
- retries--;
- }
- remaining -= status;
- buffer = (char *)buffer + status;
- }
-
- // if A2DP sink runs abnormally fast, sleep a little so that audioflinger mixer thread
- // does no spin and starve other threads.
- // NOTE: It is likely that the A2DP headset is being disconnected
- nsecs_t now = systemTime();
- if ((uint32_t)ns2us(now - mLastWriteTime) < (mBufferDurationUs >> 2)) {
- LOGV("A2DP sink runs too fast");
- usleep(mBufferDurationUs - (uint32_t)ns2us(now - mLastWriteTime));
- }
- mLastWriteTime = now;
- return bytes;
-
- }
-Error:
-
- standby();
-
- // Simulate audio output timing in case of error
- usleep(mBufferDurationUs);
-
- return status;
-}
-
-status_t A2dpAudioInterface::A2dpAudioStreamOut::init()
-{
- if (!mData) {
- status_t status = a2dp_init(44100, 2, &mData);
- if (status < 0) {
- LOGE("a2dp_init failed err: %d\n", status);
- mData = NULL;
- return status;
- }
- a2dp_set_sink(mData, mA2dpAddress);
- }
-
- return 0;
-}
-
-status_t A2dpAudioInterface::A2dpAudioStreamOut::standby()
-{
- Mutex::Autolock lock(mLock);
- return standby_l();
-}
-
-status_t A2dpAudioInterface::A2dpAudioStreamOut::standby_l()
-{
- int result = NO_ERROR;
-
- if (!mStandby) {
- LOGV_IF(mClosing || !mBluetoothEnabled, "Standby skip stop: closing %d enabled %d",
- mClosing, mBluetoothEnabled);
- if (!mClosing && mBluetoothEnabled) {
- result = a2dp_stop(mData);
- }
- release_wake_lock(sA2dpWakeLock);
- mStandby = true;
- }
-
- return result;
-}
-
-status_t A2dpAudioInterface::A2dpAudioStreamOut::setParameters(const String8& keyValuePairs)
-{
- AudioParameter param = AudioParameter(keyValuePairs);
- String8 value;
- String8 key = String8("a2dp_sink_address");
- status_t status = NO_ERROR;
- int device;
- LOGV("A2dpAudioStreamOut::setParameters() %s", keyValuePairs.string());
-
- if (param.get(key, value) == NO_ERROR) {
- if (value.length() != strlen("00:00:00:00:00:00")) {
- status = BAD_VALUE;
- } else {
- setAddress(value.string());
- }
- param.remove(key);
- }
- key = String8("closing");
- if (param.get(key, value) == NO_ERROR) {
- mClosing = (value == "true");
- if (mClosing) {
- standby();
- }
- param.remove(key);
- }
- key = AudioParameter::keyRouting;
- if (param.getInt(key, device) == NO_ERROR) {
- if (AudioSystem::isA2dpDevice((AudioSystem::audio_devices)device)) {
- mDevice = device;
- status = NO_ERROR;
- } else {
- status = BAD_VALUE;
- }
- param.remove(key);
- }
-
- if (param.size()) {
- status = BAD_VALUE;
- }
- return status;
-}
-
-String8 A2dpAudioInterface::A2dpAudioStreamOut::getParameters(const String8& keys)
-{
- AudioParameter param = AudioParameter(keys);
- String8 value;
- String8 key = String8("a2dp_sink_address");
-
- if (param.get(key, value) == NO_ERROR) {
- value = mA2dpAddress;
- param.add(key, value);
- }
- key = AudioParameter::keyRouting;
- if (param.get(key, value) == NO_ERROR) {
- param.addInt(key, (int)mDevice);
- }
-
- LOGV("A2dpAudioStreamOut::getParameters() %s", param.toString().string());
- return param.toString();
-}
-
-status_t A2dpAudioInterface::A2dpAudioStreamOut::setAddress(const char* address)
-{
- Mutex::Autolock lock(mLock);
-
- if (strlen(address) != strlen("00:00:00:00:00:00"))
- return -EINVAL;
-
- strcpy(mA2dpAddress, address);
- if (mData)
- a2dp_set_sink(mData, mA2dpAddress);
-
- return NO_ERROR;
-}
-
-status_t A2dpAudioInterface::A2dpAudioStreamOut::setBluetoothEnabled(bool enabled)
-{
- LOGD("setBluetoothEnabled %d", enabled);
-
- Mutex::Autolock lock(mLock);
-
- mBluetoothEnabled = enabled;
- if (!enabled) {
- return close_l();
- }
- return NO_ERROR;
-}
-
-status_t A2dpAudioInterface::A2dpAudioStreamOut::setSuspended(bool onOff)
-{
- LOGV("setSuspended %d", onOff);
- mSuspended = onOff;
- standby();
- return NO_ERROR;
-}
-
-status_t A2dpAudioInterface::A2dpAudioStreamOut::close()
-{
- Mutex::Autolock lock(mLock);
- LOGV("A2dpAudioStreamOut::close() calling close_l()");
- return close_l();
-}
-
-status_t A2dpAudioInterface::A2dpAudioStreamOut::close_l()
-{
- standby_l();
- if (mData) {
- LOGV("A2dpAudioStreamOut::close_l() calling a2dp_cleanup(mData)");
- a2dp_cleanup(mData);
- mData = NULL;
- }
- return NO_ERROR;
-}
-
-status_t A2dpAudioInterface::A2dpAudioStreamOut::dump(int fd, const Vector<String16>& args)
-{
- return NO_ERROR;
-}
-
-status_t A2dpAudioInterface::A2dpAudioStreamOut::getRenderPosition(uint32_t *driverFrames)
-{
- //TODO: enable when supported by driver
- return INVALID_OPERATION;
-}
-
-}; // namespace android
diff --git a/services/audioflinger/A2dpAudioInterface.h b/services/audioflinger/A2dpAudioInterface.h
deleted file mode 100644
index dbe2c6a..0000000
--- a/services/audioflinger/A2dpAudioInterface.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef A2DP_AUDIO_HARDWARE_H
-#define A2DP_AUDIO_HARDWARE_H
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <utils/threads.h>
-
-#include <hardware_legacy/AudioHardwareBase.h>
-
-
-namespace android {
-
-class A2dpAudioInterface : public AudioHardwareBase
-{
- class A2dpAudioStreamOut;
-
-public:
- A2dpAudioInterface(AudioHardwareInterface* hw);
- virtual ~A2dpAudioInterface();
- virtual status_t initCheck();
-
- virtual status_t setVoiceVolume(float volume);
- virtual status_t setMasterVolume(float volume);
-
- virtual status_t setMode(int mode);
-
- // mic mute
- virtual status_t setMicMute(bool state);
- virtual status_t getMicMute(bool* state);
-
- virtual status_t setParameters(const String8& keyValuePairs);
- virtual String8 getParameters(const String8& keys);
-
- virtual size_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount);
-
- // create I/O streams
- virtual AudioStreamOut* openOutputStream(
- uint32_t devices,
- int *format=0,
- uint32_t *channels=0,
- uint32_t *sampleRate=0,
- status_t *status=0);
- virtual void closeOutputStream(AudioStreamOut* out);
-
- virtual AudioStreamIn* openInputStream(
- uint32_t devices,
- int *format,
- uint32_t *channels,
- uint32_t *sampleRate,
- status_t *status,
- AudioSystem::audio_in_acoustics acoustics);
- virtual void closeInputStream(AudioStreamIn* in);
-// static AudioHardwareInterface* createA2dpInterface();
-
-protected:
- virtual status_t dump(int fd, const Vector<String16>& args);
-
-private:
- class A2dpAudioStreamOut : public AudioStreamOut {
- public:
- A2dpAudioStreamOut();
- virtual ~A2dpAudioStreamOut();
- status_t set(uint32_t device,
- int *pFormat,
- uint32_t *pChannels,
- uint32_t *pRate);
- virtual uint32_t sampleRate() const { return 44100; }
- // SBC codec wants a multiple of 512
- virtual size_t bufferSize() const { return 512 * 20; }
- virtual uint32_t channels() const { return AudioSystem::CHANNEL_OUT_STEREO; }
- virtual int format() const { return AudioSystem::PCM_16_BIT; }
- virtual uint32_t latency() const { return ((1000*bufferSize())/frameSize())/sampleRate() + 200; }
- virtual status_t setVolume(float left, float right) { return INVALID_OPERATION; }
- virtual ssize_t write(const void* buffer, size_t bytes);
- status_t standby();
- virtual status_t dump(int fd, const Vector<String16>& args);
- virtual status_t setParameters(const String8& keyValuePairs);
- virtual String8 getParameters(const String8& keys);
- virtual status_t getRenderPosition(uint32_t *dspFrames);
-
- private:
- friend class A2dpAudioInterface;
- status_t init();
- status_t close();
- status_t close_l();
- status_t setAddress(const char* address);
- status_t setBluetoothEnabled(bool enabled);
- status_t setSuspended(bool onOff);
- status_t standby_l();
-
- private:
- int mFd;
- bool mStandby;
- int mStartCount;
- int mRetryCount;
- char mA2dpAddress[20];
- void* mData;
- Mutex mLock;
- bool mBluetoothEnabled;
- uint32_t mDevice;
- bool mClosing;
- bool mSuspended;
- nsecs_t mLastWriteTime;
- uint32_t mBufferDurationUs;
- };
-
- friend class A2dpAudioStreamOut;
-
- A2dpAudioStreamOut* mOutput;
- AudioHardwareInterface *mHardwareInterface;
- char mA2dpAddress[20];
- bool mBluetoothEnabled;
- bool mSuspended;
-};
-
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
-#endif // A2DP_AUDIO_HARDWARE_H
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 69a4adc..2222e8b 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -1,77 +1,5 @@
LOCAL_PATH:= $(call my-dir)
-#AUDIO_POLICY_TEST := true
-#ENABLE_AUDIO_DUMP := true
-
-include $(CLEAR_VARS)
-
-
-ifeq ($(AUDIO_POLICY_TEST),true)
- ENABLE_AUDIO_DUMP := true
-endif
-
-
-LOCAL_SRC_FILES:= \
- AudioHardwareGeneric.cpp \
- AudioHardwareStub.cpp \
- AudioHardwareInterface.cpp
-
-ifeq ($(ENABLE_AUDIO_DUMP),true)
- LOCAL_SRC_FILES += AudioDumpInterface.cpp
- LOCAL_CFLAGS += -DENABLE_AUDIO_DUMP
-endif
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- libutils \
- libbinder \
- libmedia \
- libhardware_legacy
-
-ifeq ($(strip $(BOARD_USES_GENERIC_AUDIO)),true)
- LOCAL_CFLAGS += -DGENERIC_AUDIO
-endif
-
-LOCAL_MODULE:= libaudiointerface
-
-ifeq ($(BOARD_HAVE_BLUETOOTH),true)
- LOCAL_SRC_FILES += A2dpAudioInterface.cpp
- LOCAL_SHARED_LIBRARIES += liba2dp
- LOCAL_CFLAGS += -DWITH_BLUETOOTH -DWITH_A2DP
- LOCAL_C_INCLUDES += $(call include-path-for, bluez)
-endif
-
-include $(BUILD_STATIC_LIBRARY)
-
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- AudioPolicyManagerBase.cpp
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- libutils \
- libmedia
-
-ifeq ($(TARGET_SIMULATOR),true)
- LOCAL_LDLIBS += -ldl
-else
- LOCAL_SHARED_LIBRARIES += libdl
-endif
-
-LOCAL_MODULE:= libaudiopolicybase
-
-ifeq ($(BOARD_HAVE_BLUETOOTH),true)
- LOCAL_CFLAGS += -DWITH_A2DP
-endif
-
-ifeq ($(AUDIO_POLICY_TEST),true)
- LOCAL_CFLAGS += -DAUDIO_POLICY_TEST
-endif
-
-include $(BUILD_STATIC_LIBRARY)
-
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
@@ -87,15 +15,12 @@
libutils \
libbinder \
libmedia \
+ libhardware \
libhardware_legacy \
libeffects
-ifeq ($(strip $(BOARD_USES_GENERIC_AUDIO)),true)
- LOCAL_STATIC_LIBRARIES += libaudiointerface libaudiopolicybase
- LOCAL_CFLAGS += -DGENERIC_AUDIO
-else
- LOCAL_SHARED_LIBRARIES += libaudio libaudiopolicy
-endif
+LOCAL_STATIC_LIBRARIES := \
+ libmedia_helper
ifeq ($(TARGET_SIMULATOR),true)
LOCAL_LDLIBS += -ldl
@@ -105,15 +30,6 @@
LOCAL_MODULE:= libaudioflinger
-ifeq ($(BOARD_HAVE_BLUETOOTH),true)
- LOCAL_CFLAGS += -DWITH_BLUETOOTH -DWITH_A2DP
- LOCAL_SHARED_LIBRARIES += liba2dp
-endif
-
-ifeq ($(AUDIO_POLICY_TEST),true)
- LOCAL_CFLAGS += -DAUDIO_POLICY_TEST
-endif
-
ifeq ($(TARGET_SIMULATOR),true)
ifeq ($(HOST_OS),linux)
LOCAL_LDLIBS += -lrt -lpthread
diff --git a/services/audioflinger/AudioDumpInterface.cpp b/services/audioflinger/AudioDumpInterface.cpp
deleted file mode 100644
index 6c11114..0000000
--- a/services/audioflinger/AudioDumpInterface.cpp
+++ /dev/null
@@ -1,573 +0,0 @@
-/* //device/servers/AudioFlinger/AudioDumpInterface.cpp
-**
-** Copyright 2008, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#define LOG_TAG "AudioFlingerDump"
-//#define LOG_NDEBUG 0
-
-#include <stdint.h>
-#include <sys/types.h>
-#include <utils/Log.h>
-
-#include <stdlib.h>
-#include <unistd.h>
-
-#include "AudioDumpInterface.h"
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-AudioDumpInterface::AudioDumpInterface(AudioHardwareInterface* hw)
- : mPolicyCommands(String8("")), mFileName(String8(""))
-{
- if(hw == 0) {
- LOGE("Dump construct hw = 0");
- }
- mFinalInterface = hw;
- LOGV("Constructor %p, mFinalInterface %p", this, mFinalInterface);
-}
-
-
-AudioDumpInterface::~AudioDumpInterface()
-{
- for (size_t i = 0; i < mOutputs.size(); i++) {
- closeOutputStream((AudioStreamOut *)mOutputs[i]);
- }
-
- for (size_t i = 0; i < mInputs.size(); i++) {
- closeInputStream((AudioStreamIn *)mInputs[i]);
- }
-
- if(mFinalInterface) delete mFinalInterface;
-}
-
-
-AudioStreamOut* AudioDumpInterface::openOutputStream(
- uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate, status_t *status)
-{
- AudioStreamOut* outFinal = NULL;
- int lFormat = AudioSystem::PCM_16_BIT;
- uint32_t lChannels = AudioSystem::CHANNEL_OUT_STEREO;
- uint32_t lRate = 44100;
-
-
- outFinal = mFinalInterface->openOutputStream(devices, format, channels, sampleRate, status);
- if (outFinal != 0) {
- lFormat = outFinal->format();
- lChannels = outFinal->channels();
- lRate = outFinal->sampleRate();
- } else {
- if (format != 0) {
- if (*format != 0) {
- lFormat = *format;
- } else {
- *format = lFormat;
- }
- }
- if (channels != 0) {
- if (*channels != 0) {
- lChannels = *channels;
- } else {
- *channels = lChannels;
- }
- }
- if (sampleRate != 0) {
- if (*sampleRate != 0) {
- lRate = *sampleRate;
- } else {
- *sampleRate = lRate;
- }
- }
- if (status) *status = NO_ERROR;
- }
- LOGV("openOutputStream(), outFinal %p", outFinal);
-
- AudioStreamOutDump *dumOutput = new AudioStreamOutDump(this, mOutputs.size(), outFinal,
- devices, lFormat, lChannels, lRate);
- mOutputs.add(dumOutput);
-
- return dumOutput;
-}
-
-void AudioDumpInterface::closeOutputStream(AudioStreamOut* out)
-{
- AudioStreamOutDump *dumpOut = (AudioStreamOutDump *)out;
-
- if (mOutputs.indexOf(dumpOut) < 0) {
- LOGW("Attempt to close invalid output stream");
- return;
- }
-
- LOGV("closeOutputStream() output %p", out);
-
- dumpOut->standby();
- if (dumpOut->finalStream() != NULL) {
- mFinalInterface->closeOutputStream(dumpOut->finalStream());
- }
-
- mOutputs.remove(dumpOut);
- delete dumpOut;
-}
-
-AudioStreamIn* AudioDumpInterface::openInputStream(uint32_t devices, int *format, uint32_t *channels,
- uint32_t *sampleRate, status_t *status, AudioSystem::audio_in_acoustics acoustics)
-{
- AudioStreamIn* inFinal = NULL;
- int lFormat = AudioSystem::PCM_16_BIT;
- uint32_t lChannels = AudioSystem::CHANNEL_IN_MONO;
- uint32_t lRate = 8000;
-
- inFinal = mFinalInterface->openInputStream(devices, format, channels, sampleRate, status, acoustics);
- if (inFinal != 0) {
- lFormat = inFinal->format();
- lChannels = inFinal->channels();
- lRate = inFinal->sampleRate();
- } else {
- if (format != 0) {
- if (*format != 0) {
- lFormat = *format;
- } else {
- *format = lFormat;
- }
- }
- if (channels != 0) {
- if (*channels != 0) {
- lChannels = *channels;
- } else {
- *channels = lChannels;
- }
- }
- if (sampleRate != 0) {
- if (*sampleRate != 0) {
- lRate = *sampleRate;
- } else {
- *sampleRate = lRate;
- }
- }
- if (status) *status = NO_ERROR;
- }
- LOGV("openInputStream(), inFinal %p", inFinal);
-
- AudioStreamInDump *dumInput = new AudioStreamInDump(this, mInputs.size(), inFinal,
- devices, lFormat, lChannels, lRate);
- mInputs.add(dumInput);
-
- return dumInput;
-}
-void AudioDumpInterface::closeInputStream(AudioStreamIn* in)
-{
- AudioStreamInDump *dumpIn = (AudioStreamInDump *)in;
-
- if (mInputs.indexOf(dumpIn) < 0) {
- LOGW("Attempt to close invalid input stream");
- return;
- }
- dumpIn->standby();
- if (dumpIn->finalStream() != NULL) {
- mFinalInterface->closeInputStream(dumpIn->finalStream());
- }
-
- mInputs.remove(dumpIn);
- delete dumpIn;
-}
-
-
-status_t AudioDumpInterface::setParameters(const String8& keyValuePairs)
-{
- AudioParameter param = AudioParameter(keyValuePairs);
- String8 value;
- int valueInt;
- LOGV("setParameters %s", keyValuePairs.string());
-
- if (param.get(String8("test_cmd_file_name"), value) == NO_ERROR) {
- mFileName = value;
- param.remove(String8("test_cmd_file_name"));
- }
- if (param.get(String8("test_cmd_policy"), value) == NO_ERROR) {
- Mutex::Autolock _l(mLock);
- param.remove(String8("test_cmd_policy"));
- mPolicyCommands = param.toString();
- LOGV("test_cmd_policy command %s written", mPolicyCommands.string());
- return NO_ERROR;
- }
-
- if (mFinalInterface != 0 ) return mFinalInterface->setParameters(keyValuePairs);
- return NO_ERROR;
-}
-
-String8 AudioDumpInterface::getParameters(const String8& keys)
-{
- AudioParameter param = AudioParameter(keys);
- AudioParameter response;
- String8 value;
-
-// LOGV("getParameters %s", keys.string());
- if (param.get(String8("test_cmd_policy"), value) == NO_ERROR) {
- Mutex::Autolock _l(mLock);
- if (mPolicyCommands.length() != 0) {
- response = AudioParameter(mPolicyCommands);
- response.addInt(String8("test_cmd_policy"), 1);
- } else {
- response.addInt(String8("test_cmd_policy"), 0);
- }
- param.remove(String8("test_cmd_policy"));
-// LOGV("test_cmd_policy command %s read", mPolicyCommands.string());
- }
-
- if (param.get(String8("test_cmd_file_name"), value) == NO_ERROR) {
- response.add(String8("test_cmd_file_name"), mFileName);
- param.remove(String8("test_cmd_file_name"));
- }
-
- String8 keyValuePairs = response.toString();
-
- if (param.size() && mFinalInterface != 0 ) {
- keyValuePairs += ";";
- keyValuePairs += mFinalInterface->getParameters(param.toString());
- }
-
- return keyValuePairs;
-}
-
-status_t AudioDumpInterface::setMode(int mode)
-{
- return mFinalInterface->setMode(mode);
-}
-
-size_t AudioDumpInterface::getInputBufferSize(uint32_t sampleRate, int format, int channelCount)
-{
- return mFinalInterface->getInputBufferSize(sampleRate, format, channelCount);
-}
-
-// ----------------------------------------------------------------------------
-
-AudioStreamOutDump::AudioStreamOutDump(AudioDumpInterface *interface,
- int id,
- AudioStreamOut* finalStream,
- uint32_t devices,
- int format,
- uint32_t channels,
- uint32_t sampleRate)
- : mInterface(interface), mId(id),
- mSampleRate(sampleRate), mFormat(format), mChannels(channels), mLatency(0), mDevice(devices),
- mBufferSize(1024), mFinalStream(finalStream), mFile(0), mFileCount(0)
-{
- LOGV("AudioStreamOutDump Constructor %p, mInterface %p, mFinalStream %p", this, mInterface, mFinalStream);
-}
-
-
-AudioStreamOutDump::~AudioStreamOutDump()
-{
- LOGV("AudioStreamOutDump destructor");
- Close();
-}
-
-ssize_t AudioStreamOutDump::write(const void* buffer, size_t bytes)
-{
- ssize_t ret;
-
- if (mFinalStream) {
- ret = mFinalStream->write(buffer, bytes);
- } else {
- usleep((((bytes * 1000) / frameSize()) / sampleRate()) * 1000);
- ret = bytes;
- }
- if(!mFile) {
- if (mInterface->fileName() != "") {
- char name[255];
- sprintf(name, "%s_out_%d_%d.pcm", mInterface->fileName().string(), mId, ++mFileCount);
- mFile = fopen(name, "wb");
- LOGV("Opening dump file %s, fh %p", name, mFile);
- }
- }
- if (mFile) {
- fwrite(buffer, bytes, 1, mFile);
- }
- return ret;
-}
-
-status_t AudioStreamOutDump::standby()
-{
- LOGV("AudioStreamOutDump standby(), mFile %p, mFinalStream %p", mFile, mFinalStream);
-
- Close();
- if (mFinalStream != 0 ) return mFinalStream->standby();
- return NO_ERROR;
-}
-
-uint32_t AudioStreamOutDump::sampleRate() const
-{
- if (mFinalStream != 0 ) return mFinalStream->sampleRate();
- return mSampleRate;
-}
-
-size_t AudioStreamOutDump::bufferSize() const
-{
- if (mFinalStream != 0 ) return mFinalStream->bufferSize();
- return mBufferSize;
-}
-
-uint32_t AudioStreamOutDump::channels() const
-{
- if (mFinalStream != 0 ) return mFinalStream->channels();
- return mChannels;
-}
-int AudioStreamOutDump::format() const
-{
- if (mFinalStream != 0 ) return mFinalStream->format();
- return mFormat;
-}
-uint32_t AudioStreamOutDump::latency() const
-{
- if (mFinalStream != 0 ) return mFinalStream->latency();
- return 0;
-}
-status_t AudioStreamOutDump::setVolume(float left, float right)
-{
- if (mFinalStream != 0 ) return mFinalStream->setVolume(left, right);
- return NO_ERROR;
-}
-status_t AudioStreamOutDump::setParameters(const String8& keyValuePairs)
-{
- LOGV("AudioStreamOutDump::setParameters %s", keyValuePairs.string());
-
- if (mFinalStream != 0 ) {
- return mFinalStream->setParameters(keyValuePairs);
- }
-
- AudioParameter param = AudioParameter(keyValuePairs);
- String8 value;
- int valueInt;
- status_t status = NO_ERROR;
-
- if (param.getInt(String8("set_id"), valueInt) == NO_ERROR) {
- mId = valueInt;
- }
-
- if (param.getInt(String8("format"), valueInt) == NO_ERROR) {
- if (mFile == 0) {
- mFormat = valueInt;
- } else {
- status = INVALID_OPERATION;
- }
- }
- if (param.getInt(String8("channels"), valueInt) == NO_ERROR) {
- if (valueInt == AudioSystem::CHANNEL_OUT_STEREO || valueInt == AudioSystem::CHANNEL_OUT_MONO) {
- mChannels = valueInt;
- } else {
- status = BAD_VALUE;
- }
- }
- if (param.getInt(String8("sampling_rate"), valueInt) == NO_ERROR) {
- if (valueInt > 0 && valueInt <= 48000) {
- if (mFile == 0) {
- mSampleRate = valueInt;
- } else {
- status = INVALID_OPERATION;
- }
- } else {
- status = BAD_VALUE;
- }
- }
- return status;
-}
-
-String8 AudioStreamOutDump::getParameters(const String8& keys)
-{
- if (mFinalStream != 0 ) return mFinalStream->getParameters(keys);
-
- AudioParameter param = AudioParameter(keys);
- return param.toString();
-}
-
-status_t AudioStreamOutDump::dump(int fd, const Vector<String16>& args)
-{
- if (mFinalStream != 0 ) return mFinalStream->dump(fd, args);
- return NO_ERROR;
-}
-
-void AudioStreamOutDump::Close()
-{
- if(mFile) {
- fclose(mFile);
- mFile = 0;
- }
-}
-
-status_t AudioStreamOutDump::getRenderPosition(uint32_t *dspFrames)
-{
- if (mFinalStream != 0 ) return mFinalStream->getRenderPosition(dspFrames);
- return INVALID_OPERATION;
-}
-
-// ----------------------------------------------------------------------------
-
-AudioStreamInDump::AudioStreamInDump(AudioDumpInterface *interface,
- int id,
- AudioStreamIn* finalStream,
- uint32_t devices,
- int format,
- uint32_t channels,
- uint32_t sampleRate)
- : mInterface(interface), mId(id),
- mSampleRate(sampleRate), mFormat(format), mChannels(channels), mDevice(devices),
- mBufferSize(1024), mFinalStream(finalStream), mFile(0), mFileCount(0)
-{
- LOGV("AudioStreamInDump Constructor %p, mInterface %p, mFinalStream %p", this, mInterface, mFinalStream);
-}
-
-
-AudioStreamInDump::~AudioStreamInDump()
-{
- Close();
-}
-
-ssize_t AudioStreamInDump::read(void* buffer, ssize_t bytes)
-{
- ssize_t ret;
-
- if (mFinalStream) {
- ret = mFinalStream->read(buffer, bytes);
- if(!mFile) {
- if (mInterface->fileName() != "") {
- char name[255];
- sprintf(name, "%s_in_%d_%d.pcm", mInterface->fileName().string(), mId, ++mFileCount);
- mFile = fopen(name, "wb");
- LOGV("Opening input dump file %s, fh %p", name, mFile);
- }
- }
- if (mFile) {
- fwrite(buffer, bytes, 1, mFile);
- }
- } else {
- usleep((((bytes * 1000) / frameSize()) / sampleRate()) * 1000);
- ret = bytes;
- if(!mFile) {
- char name[255];
- strcpy(name, "/sdcard/music/sine440");
- if (channels() == AudioSystem::CHANNEL_IN_MONO) {
- strcat(name, "_mo");
- } else {
- strcat(name, "_st");
- }
- if (format() == AudioSystem::PCM_16_BIT) {
- strcat(name, "_16b");
- } else {
- strcat(name, "_8b");
- }
- if (sampleRate() < 16000) {
- strcat(name, "_8k");
- } else if (sampleRate() < 32000) {
- strcat(name, "_22k");
- } else if (sampleRate() < 48000) {
- strcat(name, "_44k");
- } else {
- strcat(name, "_48k");
- }
- strcat(name, ".wav");
- mFile = fopen(name, "rb");
- LOGV("Opening input read file %s, fh %p", name, mFile);
- if (mFile) {
- fseek(mFile, AUDIO_DUMP_WAVE_HDR_SIZE, SEEK_SET);
- }
- }
- if (mFile) {
- ssize_t bytesRead = fread(buffer, bytes, 1, mFile);
- if (bytesRead >=0 && bytesRead < bytes) {
- fseek(mFile, AUDIO_DUMP_WAVE_HDR_SIZE, SEEK_SET);
- fread((uint8_t *)buffer+bytesRead, bytes-bytesRead, 1, mFile);
- }
- }
- }
-
- return ret;
-}
-
-status_t AudioStreamInDump::standby()
-{
- LOGV("AudioStreamInDump standby(), mFile %p, mFinalStream %p", mFile, mFinalStream);
-
- Close();
- if (mFinalStream != 0 ) return mFinalStream->standby();
- return NO_ERROR;
-}
-
-status_t AudioStreamInDump::setGain(float gain)
-{
- if (mFinalStream != 0 ) return mFinalStream->setGain(gain);
- return NO_ERROR;
-}
-
-uint32_t AudioStreamInDump::sampleRate() const
-{
- if (mFinalStream != 0 ) return mFinalStream->sampleRate();
- return mSampleRate;
-}
-
-size_t AudioStreamInDump::bufferSize() const
-{
- if (mFinalStream != 0 ) return mFinalStream->bufferSize();
- return mBufferSize;
-}
-
-uint32_t AudioStreamInDump::channels() const
-{
- if (mFinalStream != 0 ) return mFinalStream->channels();
- return mChannels;
-}
-
-int AudioStreamInDump::format() const
-{
- if (mFinalStream != 0 ) return mFinalStream->format();
- return mFormat;
-}
-
-status_t AudioStreamInDump::setParameters(const String8& keyValuePairs)
-{
- LOGV("AudioStreamInDump::setParameters()");
- if (mFinalStream != 0 ) return mFinalStream->setParameters(keyValuePairs);
- return NO_ERROR;
-}
-
-String8 AudioStreamInDump::getParameters(const String8& keys)
-{
- if (mFinalStream != 0 ) return mFinalStream->getParameters(keys);
-
- AudioParameter param = AudioParameter(keys);
- return param.toString();
-}
-
-unsigned int AudioStreamInDump::getInputFramesLost() const
-{
- if (mFinalStream != 0 ) return mFinalStream->getInputFramesLost();
- return 0;
-}
-
-status_t AudioStreamInDump::dump(int fd, const Vector<String16>& args)
-{
- if (mFinalStream != 0 ) return mFinalStream->dump(fd, args);
- return NO_ERROR;
-}
-
-void AudioStreamInDump::Close()
-{
- if(mFile) {
- fclose(mFile);
- mFile = 0;
- }
-}
-}; // namespace android
diff --git a/services/audioflinger/AudioDumpInterface.h b/services/audioflinger/AudioDumpInterface.h
deleted file mode 100644
index 814ce5f..0000000
--- a/services/audioflinger/AudioDumpInterface.h
+++ /dev/null
@@ -1,170 +0,0 @@
-/* //device/servers/AudioFlinger/AudioDumpInterface.h
-**
-** Copyright 2008, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#ifndef ANDROID_AUDIO_DUMP_INTERFACE_H
-#define ANDROID_AUDIO_DUMP_INTERFACE_H
-
-#include <stdint.h>
-#include <sys/types.h>
-#include <utils/String8.h>
-#include <utils/SortedVector.h>
-
-#include <hardware_legacy/AudioHardwareBase.h>
-
-namespace android {
-
-#define AUDIO_DUMP_WAVE_HDR_SIZE 44
-
-class AudioDumpInterface;
-
-class AudioStreamOutDump : public AudioStreamOut {
-public:
- AudioStreamOutDump(AudioDumpInterface *interface,
- int id,
- AudioStreamOut* finalStream,
- uint32_t devices,
- int format,
- uint32_t channels,
- uint32_t sampleRate);
- ~AudioStreamOutDump();
-
- virtual ssize_t write(const void* buffer, size_t bytes);
- virtual uint32_t sampleRate() const;
- virtual size_t bufferSize() const;
- virtual uint32_t channels() const;
- virtual int format() const;
- virtual uint32_t latency() const;
- virtual status_t setVolume(float left, float right);
- virtual status_t standby();
- virtual status_t setParameters(const String8& keyValuePairs);
- virtual String8 getParameters(const String8& keys);
- virtual status_t dump(int fd, const Vector<String16>& args);
- void Close(void);
- AudioStreamOut* finalStream() { return mFinalStream; }
- uint32_t device() { return mDevice; }
- int getId() { return mId; }
- virtual status_t getRenderPosition(uint32_t *dspFrames);
-
-private:
- AudioDumpInterface *mInterface;
- int mId;
- uint32_t mSampleRate; //
- uint32_t mFormat; //
- uint32_t mChannels; // output configuration
- uint32_t mLatency; //
- uint32_t mDevice; // current device this output is routed to
- size_t mBufferSize;
- AudioStreamOut *mFinalStream;
- FILE *mFile; // output file
- int mFileCount;
-};
-
-class AudioStreamInDump : public AudioStreamIn {
-public:
- AudioStreamInDump(AudioDumpInterface *interface,
- int id,
- AudioStreamIn* finalStream,
- uint32_t devices,
- int format,
- uint32_t channels,
- uint32_t sampleRate);
- ~AudioStreamInDump();
-
- virtual uint32_t sampleRate() const;
- virtual size_t bufferSize() const;
- virtual uint32_t channels() const;
- virtual int format() const;
-
- virtual status_t setGain(float gain);
- virtual ssize_t read(void* buffer, ssize_t bytes);
- virtual status_t standby();
- virtual status_t setParameters(const String8& keyValuePairs);
- virtual String8 getParameters(const String8& keys);
- virtual unsigned int getInputFramesLost() const;
- virtual status_t dump(int fd, const Vector<String16>& args);
- void Close(void);
- AudioStreamIn* finalStream() { return mFinalStream; }
- uint32_t device() { return mDevice; }
-
-private:
- AudioDumpInterface *mInterface;
- int mId;
- uint32_t mSampleRate; //
- uint32_t mFormat; //
- uint32_t mChannels; // output configuration
- uint32_t mDevice; // current device this output is routed to
- size_t mBufferSize;
- AudioStreamIn *mFinalStream;
- FILE *mFile; // output file
- int mFileCount;
-};
-
-class AudioDumpInterface : public AudioHardwareBase
-{
-
-public:
- AudioDumpInterface(AudioHardwareInterface* hw);
- virtual AudioStreamOut* openOutputStream(
- uint32_t devices,
- int *format=0,
- uint32_t *channels=0,
- uint32_t *sampleRate=0,
- status_t *status=0);
- virtual void closeOutputStream(AudioStreamOut* out);
-
- virtual ~AudioDumpInterface();
-
- virtual status_t initCheck()
- {return mFinalInterface->initCheck();}
- virtual status_t setVoiceVolume(float volume)
- {return mFinalInterface->setVoiceVolume(volume);}
- virtual status_t setMasterVolume(float volume)
- {return mFinalInterface->setMasterVolume(volume);}
-
- virtual status_t setMode(int mode);
-
- // mic mute
- virtual status_t setMicMute(bool state)
- {return mFinalInterface->setMicMute(state);}
- virtual status_t getMicMute(bool* state)
- {return mFinalInterface->getMicMute(state);}
-
- virtual status_t setParameters(const String8& keyValuePairs);
- virtual String8 getParameters(const String8& keys);
-
- virtual size_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount);
-
- virtual AudioStreamIn* openInputStream(uint32_t devices, int *format, uint32_t *channels,
- uint32_t *sampleRate, status_t *status, AudioSystem::audio_in_acoustics acoustics);
- virtual void closeInputStream(AudioStreamIn* in);
-
- virtual status_t dump(int fd, const Vector<String16>& args) { return mFinalInterface->dumpState(fd, args); }
-
- String8 fileName() const { return mFileName; }
-protected:
-
- AudioHardwareInterface *mFinalInterface;
- SortedVector<AudioStreamOutDump *> mOutputs;
- SortedVector<AudioStreamInDump *> mInputs;
- Mutex mLock;
- String8 mPolicyCommands;
- String8 mFileName;
-};
-
-}; // namespace android
-
-#endif // ANDROID_AUDIO_DUMP_INTERFACE_H
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 04cfa08..f8ad5bb 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -33,6 +33,7 @@
#include <utils/threads.h>
#include <utils/Atomic.h>
+#include <cutils/bitops.h>
#include <cutils/properties.h>
#include <media/AudioTrack.h>
@@ -41,15 +42,13 @@
#include <private/media/AudioTrackShared.h>
#include <private/media/AudioEffectShared.h>
-#include <hardware_legacy/AudioHardwareInterface.h>
+
+#include <hardware/audio.h>
+#include <hardware/audio_hal.h>
#include "AudioMixer.h"
#include "AudioFlinger.h"
-#ifdef WITH_A2DP
-#include "A2dpAudioInterface.h"
-#endif
-
#include <media/EffectsFactoryApi.h>
#include <media/EffectVisualizerApi.h>
@@ -137,34 +136,109 @@
service->addBatteryData(params);
}
+static int load_audio_interface(const char *if_name, const hw_module_t **mod,
+ audio_hw_device_t **dev)
+{
+ int rc;
+
+ rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, mod);
+ if (rc)
+ goto out;
+
+ rc = audio_hw_device_open(*mod, dev);
+ LOGE_IF(rc, "couldn't open audio hw device in %s.%s (%s)",
+ AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
+ if (rc)
+ goto out;
+
+ return 0;
+
+out:
+ *mod = NULL;
+ *dev = NULL;
+ return rc;
+}
+
+static const char *audio_interfaces[] = {
+ "primary",
+ "a2dp",
+ "usb",
+};
+#define ARRAY_SIZE(x) (sizeof((x))/sizeof(((x)[0])))
+
// ----------------------------------------------------------------------------
AudioFlinger::AudioFlinger()
: BnAudioFlinger(),
- mAudioHardware(0), mMasterVolume(1.0f), mMasterMute(false), mNextUniqueId(1)
+ mPrimaryHardwareDev(0), mMasterVolume(1.0f), mMasterMute(false), mNextUniqueId(1)
{
+}
+
+void AudioFlinger::onFirstRef()
+{
+ int rc = 0;
+
Mutex::Autolock _l(mLock);
+ /* TODO: move all this work into an Init() function */
mHardwareStatus = AUDIO_HW_IDLE;
- mAudioHardware = AudioHardwareInterface::create();
+ for (size_t i = 0; i < ARRAY_SIZE(audio_interfaces); i++) {
+ const hw_module_t *mod;
+ audio_hw_device_t *dev;
+
+ rc = load_audio_interface(audio_interfaces[i], &mod, &dev);
+ if (rc)
+ continue;
+
+ LOGI("Loaded %s audio interface from %s (%s)", audio_interfaces[i],
+ mod->name, mod->id);
+ mAudioHwDevs.push(dev);
+
+ if (!mPrimaryHardwareDev) {
+ mPrimaryHardwareDev = dev;
+ LOGI("Using '%s' (%s.%s) as the primary audio interface",
+ mod->name, mod->id, audio_interfaces[i]);
+ }
+ }
mHardwareStatus = AUDIO_HW_INIT;
- if (mAudioHardware->initCheck() == NO_ERROR) {
- AutoMutex lock(mHardwareLock);
- mMode = AudioSystem::MODE_NORMAL;
- mHardwareStatus = AUDIO_HW_SET_MODE;
- mAudioHardware->setMode(mMode);
- mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
- mAudioHardware->setMasterVolume(1.0f);
- mHardwareStatus = AUDIO_HW_IDLE;
- } else {
- LOGE("Couldn't even initialize the stubbed audio hardware!");
+
+ if (!mPrimaryHardwareDev || mAudioHwDevs.size() == 0) {
+ LOGE("Primary audio interface not found");
+ return;
}
+
+ for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+ audio_hw_device_t *dev = mAudioHwDevs[i];
+
+ mHardwareStatus = AUDIO_HW_INIT;
+ rc = dev->init_check(dev);
+ if (rc == 0) {
+ AutoMutex lock(mHardwareLock);
+
+ mMode = AUDIO_MODE_NORMAL;
+ mHardwareStatus = AUDIO_HW_SET_MODE;
+ dev->set_mode(dev, mMode);
+ mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
+ dev->set_master_volume(dev, 1.0f);
+ mHardwareStatus = AUDIO_HW_IDLE;
+ }
+ }
+}
+
+status_t AudioFlinger::initCheck() const
+{
+ Mutex::Autolock _l(mLock);
+ if (mPrimaryHardwareDev == NULL || mAudioHwDevs.size() == 0)
+ return NO_INIT;
+ return NO_ERROR;
}
AudioFlinger::~AudioFlinger()
{
+ int num_devs = mAudioHwDevs.size();
+
while (!mRecordThreads.isEmpty()) {
// closeInput() will remove first entry from mRecordThreads
closeInput(mRecordThreads.keyAt(0));
@@ -173,12 +247,24 @@
// closeOutput() will remove first entry from mPlaybackThreads
closeOutput(mPlaybackThreads.keyAt(0));
}
- if (mAudioHardware) {
- delete mAudioHardware;
+
+ for (int i = 0; i < num_devs; i++) {
+ audio_hw_device_t *dev = mAudioHwDevs[i];
+ audio_hw_device_close(dev);
}
+ mAudioHwDevs.clear();
}
-
+audio_hw_device_t* AudioFlinger::findSuitableHwDev_l(uint32_t devices)
+{
+ /* first matching HW device is returned */
+ for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+ audio_hw_device_t *dev = mAudioHwDevs[i];
+ if ((dev->get_supported_devices(dev) & devices) == devices)
+ return dev;
+ }
+ return NULL;
+}
status_t AudioFlinger::dumpClients(int fd, const Vector<String16>& args)
{
@@ -277,8 +363,10 @@
mRecordThreads.valueAt(i)->dump(fd, args);
}
- if (mAudioHardware) {
- mAudioHardware->dumpState(fd, args);
+ // dump all hardware devs
+ for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+ audio_hw_device_t *dev = mAudioHwDevs[i];
+ dev->dump(dev, fd);
}
if (locked) mLock.unlock();
}
@@ -309,7 +397,7 @@
status_t lStatus;
int lSessionId;
- if (streamType >= AudioSystem::NUM_STREAM_TYPES) {
+ if (streamType >= AUDIO_STREAM_CNT) {
LOGE("invalid stream type");
lStatus = BAD_VALUE;
goto Exit;
@@ -335,7 +423,7 @@
}
LOGV("createTrack() sessionId: %d", (sessionId == NULL) ? -2 : *sessionId);
- if (sessionId != NULL && *sessionId != AudioSystem::SESSION_OUTPUT_MIX) {
+ if (sessionId != NULL && *sessionId != AUDIO_SESSION_OUTPUT_MIX) {
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
sp<PlaybackThread> t = mPlaybackThreads.valueAt(i);
if (mPlaybackThreads.keyAt(i) != output) {
@@ -454,7 +542,7 @@
{ // scope for the lock
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
- if (mAudioHardware->setMasterVolume(value) == NO_ERROR) {
+ if (mPrimaryHardwareDev->set_master_volume(mPrimaryHardwareDev, value) == NO_ERROR) {
value = 1.0f;
}
mHardwareStatus = AUDIO_HW_IDLE;
@@ -476,7 +564,7 @@
if (!settingsAllowed()) {
return PERMISSION_DENIED;
}
- if ((mode < 0) || (mode >= AudioSystem::NUM_MODES)) {
+ if ((mode < 0) || (mode >= AUDIO_MODE_CNT)) {
LOGW("Illegal value: setMode(%d)", mode);
return BAD_VALUE;
}
@@ -484,7 +572,7 @@
{ // scope for the lock
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_HW_SET_MODE;
- ret = mAudioHardware->setMode(mode);
+ ret = mPrimaryHardwareDev->set_mode(mPrimaryHardwareDev, mode);
mHardwareStatus = AUDIO_HW_IDLE;
}
@@ -507,16 +595,16 @@
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_HW_SET_MIC_MUTE;
- status_t ret = mAudioHardware->setMicMute(state);
+ status_t ret = mPrimaryHardwareDev->set_mic_mute(mPrimaryHardwareDev, state);
mHardwareStatus = AUDIO_HW_IDLE;
return ret;
}
bool AudioFlinger::getMicMute() const
{
- bool state = AudioSystem::MODE_INVALID;
+ bool state = AUDIO_MODE_INVALID;
mHardwareStatus = AUDIO_HW_GET_MIC_MUTE;
- mAudioHardware->getMicMute(&state);
+ mPrimaryHardwareDev->get_mic_mute(mPrimaryHardwareDev, &state);
mHardwareStatus = AUDIO_HW_IDLE;
return state;
}
@@ -553,7 +641,7 @@
return PERMISSION_DENIED;
}
- if (stream < 0 || uint32_t(stream) >= AudioSystem::NUM_STREAM_TYPES) {
+ if (stream < 0 || uint32_t(stream) >= AUDIO_STREAM_CNT) {
return BAD_VALUE;
}
@@ -586,8 +674,8 @@
return PERMISSION_DENIED;
}
- if (stream < 0 || uint32_t(stream) >= AudioSystem::NUM_STREAM_TYPES ||
- uint32_t(stream) == AudioSystem::ENFORCED_AUDIBLE) {
+ if (stream < 0 || uint32_t(stream) >= AUDIO_STREAM_CNT ||
+ uint32_t(stream) == AUDIO_STREAM_ENFORCED_AUDIBLE) {
return BAD_VALUE;
}
@@ -601,7 +689,7 @@
float AudioFlinger::streamVolume(int stream, int output) const
{
- if (stream < 0 || uint32_t(stream) >= AudioSystem::NUM_STREAM_TYPES) {
+ if (stream < 0 || uint32_t(stream) >= AUDIO_STREAM_CNT) {
return 0.0f;
}
@@ -622,7 +710,7 @@
bool AudioFlinger::streamMute(int stream) const
{
- if (stream < 0 || stream >= (int)AudioSystem::NUM_STREAM_TYPES) {
+ if (stream < 0 || stream >= (int)AUDIO_STREAM_CNT) {
return true;
}
@@ -644,9 +732,14 @@
if (ioHandle == 0) {
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_SET_PARAMETER;
- result = mAudioHardware->setParameters(keyValuePairs);
+ status_t final_result = NO_ERROR;
+ for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+ audio_hw_device_t *dev = mAudioHwDevs[i];
+ result = dev->set_parameters(dev, keyValuePairs.string());
+ final_result = result ?: final_result;
+ }
mHardwareStatus = AUDIO_HW_IDLE;
- return result;
+ return final_result;
}
// hold a strong ref on thread in case closeOutput() or closeInput() is called
@@ -672,7 +765,15 @@
// ioHandle, keys.string(), gettid(), IPCThreadState::self()->getCallingPid());
if (ioHandle == 0) {
- return mAudioHardware->getParameters(keys);
+ String8 out_s8;
+
+ for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+ audio_hw_device_t *dev = mAudioHwDevs[i];
+ char *s = dev->get_parameters(dev, keys.string());
+ out_s8 += String8(s);
+ free(s);
+ }
+ return out_s8;
}
Mutex::Autolock _l(mLock);
@@ -690,7 +791,7 @@
size_t AudioFlinger::getInputBufferSize(uint32_t sampleRate, int format, int channelCount)
{
- return mAudioHardware->getInputBufferSize(sampleRate, format, channelCount);
+ return mPrimaryHardwareDev->get_input_buffer_size(mPrimaryHardwareDev, sampleRate, format, channelCount);
}
unsigned int AudioFlinger::getInputFramesLost(int ioHandle)
@@ -717,7 +818,7 @@
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_SET_VOICE_VOLUME;
- status_t ret = mAudioHardware->setVoiceVolume(value);
+ status_t ret = mPrimaryHardwareDev->set_voice_volume(mPrimaryHardwareDev, value);
mHardwareStatus = AUDIO_HW_IDLE;
return ret;
@@ -968,7 +1069,7 @@
mMasterVolume = mAudioFlinger->masterVolume();
mMasterMute = mAudioFlinger->masterMute();
- for (int stream = 0; stream < AudioSystem::NUM_STREAM_TYPES; stream++) {
+ for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
mStreamTypes[stream].volume = mAudioFlinger->streamVolumeInternal(stream);
mStreamTypes[stream].mute = mAudioFlinger->streamMute(stream);
}
@@ -1131,12 +1232,12 @@
// conflicts will happen when tracks are moved from one output to another by audio policy
// manager
uint32_t strategy =
- AudioSystem::getStrategyForStream((AudioSystem::stream_type)streamType);
+ AudioSystem::getStrategyForStream((audio_stream_type_t)streamType);
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> t = mTracks[i];
if (t != 0) {
if (sessionId == t->sessionId() &&
- strategy != AudioSystem::getStrategyForStream((AudioSystem::stream_type)t->type())) {
+ strategy != AudioSystem::getStrategyForStream((audio_stream_type_t)t->type())) {
lStatus = BAD_VALUE;
goto Exit;
}
@@ -1155,7 +1256,7 @@
if (chain != 0) {
LOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
track->setMainBuffer(chain->inBuffer());
- chain->setStrategy(AudioSystem::getStrategyForStream((AudioSystem::stream_type)track->type()));
+ chain->setStrategy(AudioSystem::getStrategyForStream((audio_stream_type_t)track->type()));
}
}
lStatus = NO_ERROR;
@@ -1170,7 +1271,7 @@
uint32_t AudioFlinger::PlaybackThread::latency() const
{
if (mOutput) {
- return mOutput->latency();
+ return mOutput->stream->get_latency(mOutput->stream);
}
else {
return 0;
@@ -1264,7 +1365,13 @@
String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys)
{
- return mOutput->getParameters(keys);
+ String8 out_s8;
+ char *s;
+
+ s = mOutput->stream->common.get_parameters(&mOutput->stream->common, keys.string());
+ out_s8 = String8(s);
+ free(s);
+ return out_s8;
}
// destroyTrack_l() must be called with AudioFlinger::mLock held
@@ -1296,12 +1403,12 @@
void AudioFlinger::PlaybackThread::readOutputParameters()
{
- mSampleRate = mOutput->sampleRate();
- mChannels = mOutput->channels();
- mChannelCount = (uint16_t)AudioSystem::popCount(mChannels);
- mFormat = mOutput->format();
- mFrameSize = (uint16_t)mOutput->frameSize();
- mFrameCount = mOutput->bufferSize() / mFrameSize;
+ mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common);
+ mChannels = mOutput->stream->common.get_channels(&mOutput->stream->common);
+ mChannelCount = (uint16_t)popcount(mChannels);
+ mFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
+ mFrameSize = (uint16_t)audio_stream_frame_size(&mOutput->stream->common);
+ mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize;
// FIXME - Current mixer implementation only supports stereo output: Always
// Allocate a stereo buffer even if HW output is mono.
@@ -1329,9 +1436,9 @@
if (mOutput == 0) {
return INVALID_OPERATION;
}
- *halFrames = mBytesWritten/mOutput->frameSize();
+ *halFrames = mBytesWritten / audio_stream_frame_size(&mOutput->stream->common);
- return mOutput->getRenderPosition(dspFrames);
+ return mOutput->stream->get_render_position(mOutput->stream, dspFrames);
}
uint32_t AudioFlinger::PlaybackThread::hasAudioSession(int sessionId)
@@ -1356,19 +1463,19 @@
uint32_t AudioFlinger::PlaybackThread::getStrategyForSession_l(int sessionId)
{
- // session AudioSystem::SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that
+ // session AUDIO_SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that
// it is moved to correct output by audio policy manager when A2DP is connected or disconnected
- if (sessionId == AudioSystem::SESSION_OUTPUT_MIX) {
- return AudioSystem::getStrategyForStream(AudioSystem::MUSIC);
+ if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
+ return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
}
for (size_t i = 0; i < mTracks.size(); i++) {
sp<Track> track = mTracks[i];
if (sessionId == track->sessionId() &&
!(track->mCblk->flags & CBLK_INVALID_MSK)) {
- return AudioSystem::getStrategyForStream((AudioSystem::stream_type) track->type());
+ return AudioSystem::getStrategyForStream((audio_stream_type_t) track->type());
}
}
- return AudioSystem::getStrategyForStream(AudioSystem::MUSIC);
+ return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
}
sp<AudioFlinger::EffectChain> AudioFlinger::PlaybackThread::getEffectChain(int sessionId)
@@ -1461,7 +1568,7 @@
mSuspended) {
if (!mStandby) {
LOGV("Audio hardware entering standby, mixer %p, mSuspended %d\n", this, mSuspended);
- mOutput->standby();
+ mOutput->stream->common.standby(&mOutput->stream->common);
mStandby = true;
mBytesWritten = 0;
}
@@ -1538,7 +1645,7 @@
mInWrite = true;
mBytesWritten += mixBufferSize;
- int bytesWritten = (int)mOutput->write(mMixBuffer, mixBufferSize);
+ int bytesWritten = (int)mOutput->stream->write(mOutput->stream, mMixBuffer, mixBufferSize);
if (bytesWritten < 0) mBytesWritten -= mixBufferSize;
mNumWrites++;
mInWrite = false;
@@ -1573,7 +1680,7 @@
}
if (!mStandby) {
- mOutput->standby();
+ mOutput->stream->common.standby(&mOutput->stream->common);
}
LOGV("MixerThread %p exiting", this);
@@ -1597,7 +1704,7 @@
masterVolume = 0;
}
// Delegate master volume control to effect in output mix effect chain if needed
- sp<EffectChain> chain = getEffectChain_l(AudioSystem::SESSION_OUTPUT_MIX);
+ sp<EffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
if (chain != 0) {
uint32_t v = (uint32_t)(masterVolume * (1 << 24));
chain->setVolume_l(&v, &v);
@@ -1823,14 +1930,14 @@
reconfig = true;
}
if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
- if (value != AudioSystem::PCM_16_BIT) {
+ if (value != AUDIO_FORMAT_PCM_16_BIT) {
status = BAD_VALUE;
} else {
reconfig = true;
}
}
if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
- if (value != AudioSystem::CHANNEL_OUT_STEREO) {
+ if (value != AUDIO_CHANNEL_OUT_STEREO) {
status = BAD_VALUE;
} else {
reconfig = true;
@@ -1852,12 +1959,12 @@
if (mDevice != value) {
uint32_t params = 0;
// check whether speaker is on
- if (value & AudioSystem::DEVICE_OUT_SPEAKER) {
+ if (value & AUDIO_DEVICE_OUT_SPEAKER) {
params |= IMediaPlayerService::kBatteryDataSpeakerOn;
}
int deviceWithoutSpeaker
- = AudioSystem::DEVICE_OUT_ALL & ~AudioSystem::DEVICE_OUT_SPEAKER;
+ = AUDIO_DEVICE_OUT_ALL & ~AUDIO_DEVICE_OUT_SPEAKER;
// check if any other device (except speaker) is on
if (value & deviceWithoutSpeaker ) {
params |= IMediaPlayerService::kBatteryDataOtherAudioDeviceOn;
@@ -1877,12 +1984,14 @@
}
if (status == NO_ERROR) {
- status = mOutput->setParameters(keyValuePair);
+ status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+ keyValuePair.string());
if (!mStandby && status == INVALID_OPERATION) {
- mOutput->standby();
+ mOutput->stream->common.standby(&mOutput->stream->common);
mStandby = true;
mBytesWritten = 0;
- status = mOutput->setParameters(keyValuePair);
+ status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+ keyValuePair.string());
}
if (status == NO_ERROR && reconfig) {
delete mAudioMixer;
@@ -1926,7 +2035,7 @@
uint32_t AudioFlinger::MixerThread::activeSleepTimeUs()
{
- return (uint32_t)(mOutput->latency() * 1000) / 2;
+ return (uint32_t)(mOutput->stream->get_latency(mOutput->stream) * 1000) / 2;
}
uint32_t AudioFlinger::MixerThread::idleSleepTimeUs()
@@ -1976,12 +2085,12 @@
void AudioFlinger::DirectOutputThread::applyVolume(uint16_t leftVol, uint16_t rightVol, bool ramp)
{
// Do not apply volume on compressed audio
- if (!AudioSystem::isLinearPCM(mFormat)) {
+ if (!audio_is_linear_pcm(mFormat)) {
return;
}
// convert to signed 16 bit before volume calculation
- if (mFormat == AudioSystem::PCM_8_BIT) {
+ if (mFormat == AUDIO_FORMAT_PCM_8_BIT) {
size_t count = mFrameCount * mChannelCount;
uint8_t *src = (uint8_t *)mMixBuffer + count-1;
int16_t *dst = mMixBuffer + count-1;
@@ -2034,7 +2143,7 @@
}
// convert back to unsigned 8 bit after volume calculation
- if (mFormat == AudioSystem::PCM_8_BIT) {
+ if (mFormat == AUDIO_FORMAT_PCM_8_BIT) {
size_t count = mFrameCount * mChannelCount;
int16_t *src = mMixBuffer;
uint8_t *dst = (uint8_t *)mMixBuffer;
@@ -2090,7 +2199,7 @@
// wait until we have something to do...
if (!mStandby) {
LOGV("Audio hardware entering standby, mixer %p\n", this);
- mOutput->standby();
+ mOutput->stream->common.standby(&mOutput->stream->common);
mStandby = true;
mBytesWritten = 0;
}
@@ -2175,7 +2284,7 @@
// If audio HAL implements volume control,
// force software volume to nominal value
- if (mOutput->setVolume(left, right) == NO_ERROR) {
+ if (mOutput->stream->set_volume(mOutput->stream, left, right) == NO_ERROR) {
left = 1.0f;
right = 1.0f;
}
@@ -2276,7 +2385,7 @@
} else {
sleepTime = idleSleepTime;
}
- } else if (mBytesWritten != 0 && AudioSystem::isLinearPCM(mFormat)) {
+ } else if (mBytesWritten != 0 && audio_is_linear_pcm(mFormat)) {
memset (mMixBuffer, 0, mFrameCount * mFrameSize);
sleepTime = 0;
}
@@ -2298,7 +2407,7 @@
mLastWriteTime = systemTime();
mInWrite = true;
mBytesWritten += mixBufferSize;
- int bytesWritten = (int)mOutput->write(mMixBuffer, mixBufferSize);
+ int bytesWritten = (int)mOutput->stream->write(mOutput->stream, mMixBuffer, mixBufferSize);
if (bytesWritten < 0) mBytesWritten -= mixBufferSize;
mNumWrites++;
mInWrite = false;
@@ -2320,7 +2429,7 @@
}
if (!mStandby) {
- mOutput->standby();
+ mOutput->stream->common.standby(&mOutput->stream->common);
}
LOGV("DirectOutputThread %p exiting", this);
@@ -2360,12 +2469,14 @@
}
}
if (status == NO_ERROR) {
- status = mOutput->setParameters(keyValuePair);
+ status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+ keyValuePair.string());
if (!mStandby && status == INVALID_OPERATION) {
- mOutput->standby();
+ mOutput->stream->common.standby(&mOutput->stream->common);
mStandby = true;
mBytesWritten = 0;
- status = mOutput->setParameters(keyValuePair);
+ status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+ keyValuePair.string());
}
if (status == NO_ERROR && reconfig) {
readOutputParameters();
@@ -2385,8 +2496,8 @@
uint32_t AudioFlinger::DirectOutputThread::activeSleepTimeUs()
{
uint32_t time;
- if (AudioSystem::isLinearPCM(mFormat)) {
- time = (uint32_t)(mOutput->latency() * 1000) / 2;
+ if (audio_is_linear_pcm(mFormat)) {
+ time = (uint32_t)(mOutput->stream->get_latency(mOutput->stream) * 1000) / 2;
} else {
time = 10000;
}
@@ -2396,7 +2507,7 @@
uint32_t AudioFlinger::DirectOutputThread::idleSleepTimeUs()
{
uint32_t time;
- if (AudioSystem::isLinearPCM(mFormat)) {
+ if (audio_is_linear_pcm(mFormat)) {
time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000) / 2;
} else {
time = 10000;
@@ -2407,7 +2518,7 @@
uint32_t AudioFlinger::DirectOutputThread::suspendSleepTimeUs()
{
uint32_t time;
- if (AudioSystem::isLinearPCM(mFormat)) {
+ if (audio_is_linear_pcm(mFormat)) {
time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000);
} else {
time = 10000;
@@ -2588,7 +2699,7 @@
mChannelCount,
frameCount);
if (outputTrack->cblk() != NULL) {
- thread->setStreamVolume(AudioSystem::NUM_STREAM_TYPES, 1.0f);
+ thread->setStreamVolume(AUDIO_STREAM_CNT, 1.0f);
mOutputTracks.add(outputTrack);
LOGV("addOutputTrack() track %p, on thread %p", outputTrack, thread);
updateWaitTime();
@@ -2833,7 +2944,7 @@
mStreamType = streamType;
// NOTE: audio_track_cblk_t::frameSize for 8 bit PCM data is based on a sample size of
// 16 bit because data is converted to 16 bit before being stored in buffer by AudioTrack
- mCblk->frameSize = AudioSystem::isLinearPCM(format) ? channelCount * sizeof(int16_t) : sizeof(int8_t);
+ mCblk->frameSize = audio_is_linear_pcm(format) ? channelCount * sizeof(int16_t) : sizeof(int8_t);
}
}
@@ -2864,7 +2975,7 @@
if (!isOutputTrack()) {
if (mState == ACTIVE || mState == RESUMING) {
AudioSystem::stopOutput(thread->id(),
- (AudioSystem::stream_type)mStreamType,
+ (audio_stream_type_t)mStreamType,
mSessionId);
// to track the speaker usage
@@ -2976,7 +3087,7 @@
if (!isOutputTrack() && state != ACTIVE && state != RESUMING) {
thread->mLock.unlock();
status = AudioSystem::startOutput(thread->id(),
- (AudioSystem::stream_type)mStreamType,
+ (audio_stream_type_t)mStreamType,
mSessionId);
thread->mLock.lock();
@@ -3016,7 +3127,7 @@
if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) {
thread->mLock.unlock();
AudioSystem::stopOutput(thread->id(),
- (AudioSystem::stream_type)mStreamType,
+ (audio_stream_type_t)mStreamType,
mSessionId);
thread->mLock.lock();
@@ -3038,7 +3149,7 @@
if (!isOutputTrack()) {
thread->mLock.unlock();
AudioSystem::stopOutput(thread->id(),
- (AudioSystem::stream_type)mStreamType,
+ (audio_stream_type_t)mStreamType,
mSessionId);
thread->mLock.lock();
@@ -3132,9 +3243,9 @@
{
if (mCblk != NULL) {
LOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer);
- if (format == AudioSystem::PCM_16_BIT) {
+ if (format == AUDIO_FORMAT_PCM_16_BIT) {
mCblk->frameSize = channelCount * sizeof(int16_t);
- } else if (format == AudioSystem::PCM_8_BIT) {
+ } else if (format == AUDIO_FORMAT_PCM_8_BIT) {
mCblk->frameSize = channelCount * sizeof(int8_t);
} else {
mCblk->frameSize = sizeof(int8_t);
@@ -3237,7 +3348,7 @@
int format,
int channelCount,
int frameCount)
- : Track(thread, NULL, AudioSystem::NUM_STREAM_TYPES, sampleRate, format, channelCount, frameCount, NULL, 0),
+ : Track(thread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelCount, frameCount, NULL, 0),
mActive(false), mSourceThread(sourceThread)
{
@@ -3609,7 +3720,7 @@
}
// If no audio session id is provided, create one here
- if (sessionId != NULL && *sessionId != AudioSystem::SESSION_OUTPUT_MIX) {
+ if (sessionId != NULL && *sessionId != AUDIO_SESSION_OUTPUT_MIX) {
lSessionId = *sessionId;
} else {
lSessionId = nextUniqueId_l();
@@ -3679,7 +3790,7 @@
ThreadBase(audioFlinger, id),
mInput(input), mResampler(0), mRsmpOutBuffer(0), mRsmpInBuffer(0)
{
- mReqChannelCount = AudioSystem::popCount(channels);
+ mReqChannelCount = popcount(channels);
mReqSampleRate = sampleRate;
readInputParameters();
}
@@ -3721,7 +3832,7 @@
checkForNewParameters_l();
if (mActiveTrack == 0 && mConfigEvents.isEmpty()) {
if (!mStandby) {
- mInput->standby();
+ mInput->stream->common.standby(&mInput->stream->common);
mStandby = true;
}
@@ -3736,7 +3847,7 @@
if (mActiveTrack != 0) {
if (mActiveTrack->mState == TrackBase::PAUSING) {
if (!mStandby) {
- mInput->standby();
+ mInput->stream->common.standby(&mInput->stream->common);
mStandby = true;
}
mActiveTrack.clear();
@@ -3781,7 +3892,7 @@
mRsmpInIndex += framesIn;
framesOut -= framesIn;
if ((int)mChannelCount == mReqChannelCount ||
- mFormat != AudioSystem::PCM_16_BIT) {
+ mFormat != AUDIO_FORMAT_PCM_16_BIT) {
memcpy(dst, src, framesIn * mFrameSize);
} else {
int16_t *src16 = (int16_t *)src;
@@ -3801,11 +3912,11 @@
}
if (framesOut && mFrameCount == mRsmpInIndex) {
if (framesOut == mFrameCount &&
- ((int)mChannelCount == mReqChannelCount || mFormat != AudioSystem::PCM_16_BIT)) {
- mBytesRead = mInput->read(buffer.raw, mInputBytes);
+ ((int)mChannelCount == mReqChannelCount || mFormat != AUDIO_FORMAT_PCM_16_BIT)) {
+ mBytesRead = mInput->stream->read(mInput->stream, buffer.raw, mInputBytes);
framesOut = 0;
} else {
- mBytesRead = mInput->read(mRsmpInBuffer, mInputBytes);
+ mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mInputBytes);
mRsmpInIndex = 0;
}
if (mBytesRead < 0) {
@@ -3813,7 +3924,7 @@
if (mActiveTrack->mState == TrackBase::ACTIVE) {
// Force input into standby so that it tries to
// recover at next read attempt
- mInput->standby();
+ mInput->stream->common.standby(&mInput->stream->common);
usleep(5000);
}
mRsmpInIndex = mFrameCount;
@@ -3868,7 +3979,7 @@
}
if (!mStandby) {
- mInput->standby();
+ mInput->stream->common.standby(&mInput->stream->common);
}
mActiveTrack.clear();
@@ -4000,13 +4111,13 @@
int channelCount;
if (framesReady == 0) {
- mBytesRead = mInput->read(mRsmpInBuffer, mInputBytes);
+ mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mInputBytes);
if (mBytesRead < 0) {
LOGE("RecordThread::getNextBuffer() Error reading audio input");
if (mActiveTrack->mState == TrackBase::ACTIVE) {
// Force input into standby so that it tries to
// recover at next read attempt
- mInput->standby();
+ mInput->stream->common.standby(&mInput->stream->common);
usleep(5000);
}
buffer->raw = 0;
@@ -4059,7 +4170,7 @@
reconfig = true;
}
if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
- reqChannelCount = AudioSystem::popCount(value);
+ reqChannelCount = popcount(value);
reconfig = true;
}
if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
@@ -4073,16 +4184,18 @@
}
}
if (status == NO_ERROR) {
- status = mInput->setParameters(keyValuePair);
+ status = mInput->stream->common.set_parameters(&mInput->stream->common, keyValuePair.string());
if (status == INVALID_OPERATION) {
- mInput->standby();
- status = mInput->setParameters(keyValuePair);
+ mInput->stream->common.standby(&mInput->stream->common);
+ status = mInput->stream->common.set_parameters(&mInput->stream->common, keyValuePair.string());
}
if (reconfig) {
if (status == BAD_VALUE &&
- reqFormat == mInput->format() && reqFormat == AudioSystem::PCM_16_BIT &&
- ((int)mInput->sampleRate() <= 2 * reqSamplingRate) &&
- (AudioSystem::popCount(mInput->channels()) < 3) && (reqChannelCount < 3)) {
+ reqFormat == mInput->stream->common.get_format(&mInput->stream->common) &&
+ reqFormat == AUDIO_FORMAT_PCM_16_BIT &&
+ ((int)mInput->stream->common.get_sample_rate(&mInput->stream->common) <= (2 * reqSamplingRate)) &&
+ (popcount(mInput->stream->common.get_channels(&mInput->stream->common)) < 3) &&
+ (reqChannelCount < 3)) {
status = NO_ERROR;
}
if (status == NO_ERROR) {
@@ -4103,7 +4216,13 @@
String8 AudioFlinger::RecordThread::getParameters(const String8& keys)
{
- return mInput->getParameters(keys);
+ char *s;
+ String8 out_s8;
+
+ s = mInput->stream->common.get_parameters(&mInput->stream->common, keys.string());
+ out_s8 = String8(s);
+ free(s);
+ return out_s8;
}
void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param) {
@@ -4135,12 +4254,12 @@
if (mResampler) delete mResampler;
mResampler = 0;
- mSampleRate = mInput->sampleRate();
- mChannels = mInput->channels();
- mChannelCount = (uint16_t)AudioSystem::popCount(mChannels);
- mFormat = mInput->format();
- mFrameSize = (uint16_t)mInput->frameSize();
- mInputBytes = mInput->bufferSize();
+ mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);
+ mChannels = mInput->stream->common.get_channels(&mInput->stream->common);
+ mChannelCount = (uint16_t)popcount(mChannels);
+ mFormat = mInput->stream->common.get_format(&mInput->stream->common);
+ mFrameSize = (uint16_t)audio_stream_frame_size(&mInput->stream->common);
+ mInputBytes = mInput->stream->common.get_buffer_size(&mInput->stream->common);
mFrameCount = mInputBytes / mFrameSize;
mRsmpInBuffer = new int16_t[mFrameCount * mChannelCount];
@@ -4170,7 +4289,7 @@
unsigned int AudioFlinger::RecordThread::getInputFramesLost()
{
- return mInput->getInputFramesLost();
+ return mInput->stream->get_input_frames_lost(mInput->stream);
}
// ----------------------------------------------------------------------------
@@ -4189,6 +4308,8 @@
uint32_t format = pFormat ? *pFormat : 0;
uint32_t channels = pChannels ? *pChannels : 0;
uint32_t latency = pLatencyMs ? *pLatencyMs : 0;
+ audio_stream_out_t *outStream;
+ audio_hw_device_t *outHwDev;
LOGV("openOutput(), Device %x, SamplingRate %d, Format %d, Channels %x, flags %x",
pDevices ? *pDevices : 0,
@@ -4200,26 +4321,30 @@
if (pDevices == NULL || *pDevices == 0) {
return 0;
}
+
Mutex::Autolock _l(mLock);
- AudioStreamOut *output = mAudioHardware->openOutputStream(*pDevices,
- (int *)&format,
- &channels,
- &samplingRate,
- &status);
+ outHwDev = findSuitableHwDev_l(*pDevices);
+ if (outHwDev == NULL)
+ return 0;
+
+ status = outHwDev->open_output_stream(outHwDev, *pDevices, (int *)&format,
+ &channels, &samplingRate, &outStream);
LOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, Channels %x, status %d",
- output,
+ outStream,
samplingRate,
format,
channels,
status);
mHardwareStatus = AUDIO_HW_IDLE;
- if (output != 0) {
+ if (outStream != NULL) {
+ AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream);
int id = nextUniqueId_l();
- if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) ||
- (format != AudioSystem::PCM_16_BIT) ||
- (channels != AudioSystem::CHANNEL_OUT_STEREO)) {
+
+ if ((flags & AUDIO_POLICY_OUTPUT_FLAG_DIRECT) ||
+ (format != AUDIO_FORMAT_PCM_16_BIT) ||
+ (channels != AUDIO_CHANNEL_OUT_STEREO)) {
thread = new DirectOutputThread(this, output, id, *pDevices);
LOGV("openOutput() created direct output: ID %d thread %p", id, thread);
} else {
@@ -4290,7 +4415,9 @@
thread->exit();
if (thread->type() != PlaybackThread::DUPLICATING) {
- mAudioHardware->closeOutputStream(thread->getOutput());
+ AudioStreamOut *out = thread->getOutput();
+ out->hwDev->close_output_stream(out->hwDev, out->stream);
+ delete out;
}
return NO_ERROR;
}
@@ -4340,20 +4467,25 @@
uint32_t reqSamplingRate = samplingRate;
uint32_t reqFormat = format;
uint32_t reqChannels = channels;
+ audio_stream_in_t *inStream;
+ audio_hw_device_t *inHwDev;
if (pDevices == NULL || *pDevices == 0) {
return 0;
}
+
Mutex::Autolock _l(mLock);
- AudioStreamIn *input = mAudioHardware->openInputStream(*pDevices,
- (int *)&format,
- &channels,
- &samplingRate,
- &status,
- (AudioSystem::audio_in_acoustics)acoustics);
+ inHwDev = findSuitableHwDev_l(*pDevices);
+ if (inHwDev == NULL)
+ return 0;
+
+ status = inHwDev->open_input_stream(inHwDev, *pDevices, (int *)&format,
+ &channels, &samplingRate,
+ (audio_in_acoustics_t)acoustics,
+ &inStream);
LOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, acoustics %x, status %d",
- input,
+ inStream,
samplingRate,
format,
channels,
@@ -4363,20 +4495,20 @@
// If the input could not be opened with the requested parameters and we can handle the conversion internally,
// try to open again with the proposed parameters. The AudioFlinger can resample the input and do mono to stereo
// or stereo to mono conversions on 16 bit PCM inputs.
- if (input == 0 && status == BAD_VALUE &&
- reqFormat == format && format == AudioSystem::PCM_16_BIT &&
+ if (inStream == NULL && status == BAD_VALUE &&
+ reqFormat == format && format == AUDIO_FORMAT_PCM_16_BIT &&
(samplingRate <= 2 * reqSamplingRate) &&
- (AudioSystem::popCount(channels) < 3) && (AudioSystem::popCount(reqChannels) < 3)) {
+ (popcount(channels) < 3) && (popcount(reqChannels) < 3)) {
LOGV("openInput() reopening with proposed sampling rate and channels");
- input = mAudioHardware->openInputStream(*pDevices,
- (int *)&format,
- &channels,
- &samplingRate,
- &status,
- (AudioSystem::audio_in_acoustics)acoustics);
+ status = inHwDev->open_input_stream(inHwDev, *pDevices, (int *)&format,
+ &channels, &samplingRate,
+ (audio_in_acoustics_t)acoustics,
+ &inStream);
}
- if (input != 0) {
+ if (inStream != NULL) {
+ AudioStreamIn *input = new AudioStreamIn(inHwDev, inStream);
+
int id = nextUniqueId_l();
// Start record thread
thread = new RecordThread(this, input, reqSamplingRate, reqChannels, id);
@@ -4386,7 +4518,7 @@
if (pFormat) *pFormat = format;
if (pChannels) *pChannels = reqChannels;
- input->standby();
+ input->stream->common.standby(&input->stream->common);
// notify client processes of the new input creation
thread->audioConfigChanged_l(AudioSystem::INPUT_OPENED);
@@ -4415,7 +4547,9 @@
}
thread->exit();
- mAudioHardware->closeInputStream(thread->getInput());
+ AudioStreamIn *in = thread->getInput();
+ in->hwDev->close_input_stream(in->hwDev, in->stream);
+ delete in;
return NO_ERROR;
}
@@ -4569,14 +4703,14 @@
}
// check audio settings permission for global effects
- if (sessionId == AudioSystem::SESSION_OUTPUT_MIX && !settingsAllowed()) {
+ if (sessionId == AUDIO_SESSION_OUTPUT_MIX && !settingsAllowed()) {
lStatus = PERMISSION_DENIED;
goto Exit;
}
- // Session AudioSystem::SESSION_OUTPUT_STAGE is reserved for output stage effects
+ // Session AUDIO_SESSION_OUTPUT_STAGE is reserved for output stage effects
// that can only be created by audio policy manager (running in same process)
- if (sessionId == AudioSystem::SESSION_OUTPUT_STAGE && getpid() != pid) {
+ if (sessionId == AUDIO_SESSION_OUTPUT_STAGE && getpid() != pid) {
lStatus = PERMISSION_DENIED;
goto Exit;
}
@@ -4590,12 +4724,12 @@
}
if (output == 0) {
- if (sessionId == AudioSystem::SESSION_OUTPUT_STAGE) {
+ if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
// output must be specified by AudioPolicyManager when using session
- // AudioSystem::SESSION_OUTPUT_STAGE
+ // AUDIO_SESSION_OUTPUT_STAGE
lStatus = BAD_VALUE;
goto Exit;
- } else if (sessionId == AudioSystem::SESSION_OUTPUT_MIX) {
+ } else if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
// if the output returned by getOutputForEffect() is removed before we lock the
// mutex below, the call to checkPlaybackThread_l(output) below will detect it
// and we will exit safely
@@ -4643,7 +4777,7 @@
// an auxiliary version of this effect type is available
found = true;
memcpy(&d, &desc, sizeof(effect_descriptor_t));
- if (sessionId != AudioSystem::SESSION_OUTPUT_MIX ||
+ if (sessionId != AUDIO_SESSION_OUTPUT_MIX ||
(desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
break;
}
@@ -4656,14 +4790,14 @@
}
// For same effect type, chose auxiliary version over insert version if
// connect to output mix (Compliance to OpenSL ES)
- if (sessionId == AudioSystem::SESSION_OUTPUT_MIX &&
+ if (sessionId == AUDIO_SESSION_OUTPUT_MIX &&
(d.flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_AUXILIARY) {
memcpy(&desc, &d, sizeof(effect_descriptor_t));
}
}
// Do not allow auxiliary effects on a session different from 0 (output mix)
- if (sessionId != AudioSystem::SESSION_OUTPUT_MIX &&
+ if (sessionId != AUDIO_SESSION_OUTPUT_MIX &&
(desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
lStatus = INVALID_OPERATION;
goto Exit;
@@ -4836,7 +4970,7 @@
// Do not allow auxiliary effect on session other than 0
if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY &&
- sessionId != AudioSystem::SESSION_OUTPUT_MIX) {
+ sessionId != AUDIO_SESSION_OUTPUT_MIX) {
LOGW("createEffect_l() Cannot add auxiliary effect %s to session %d",
desc->name, sessionId);
lStatus = BAD_VALUE;
@@ -4845,7 +4979,7 @@
// Do not allow effects with session ID 0 on direct output or duplicating threads
// TODO: add rule for hw accelerated effects on direct outputs with non PCM format
- if (sessionId == AudioSystem::SESSION_OUTPUT_MIX && mType != MIXER) {
+ if (sessionId == AUDIO_SESSION_OUTPUT_MIX && mType != MIXER) {
LOGW("createEffect_l() Cannot add auxiliary effect %s to session %d",
desc->name, sessionId);
lStatus = BAD_VALUE;
@@ -5032,13 +5166,13 @@
chain->setInBuffer(buffer, ownsBuffer);
chain->setOutBuffer(mMixBuffer);
- // Effect chain for session AudioSystem::SESSION_OUTPUT_STAGE is inserted at end of effect
+ // Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted at end of effect
// chains list in order to be processed last as it contains output stage effects
- // Effect chain for session AudioSystem::SESSION_OUTPUT_MIX is inserted before
- // session AudioSystem::SESSION_OUTPUT_STAGE to be processed
+ // Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before
+ // session AUDIO_SESSION_OUTPUT_STAGE to be processed
// after track specific effects and before output stage
- // It is therefore mandatory that AudioSystem::SESSION_OUTPUT_MIX == 0 and
- // that AudioSystem::SESSION_OUTPUT_STAGE < AudioSystem::SESSION_OUTPUT_MIX
+ // It is therefore mandatory that AUDIO_SESSION_OUTPUT_MIX == 0 and
+ // that AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX
// Effect chain for other sessions are inserted at beginning of effect
// chains list to be processed before output mix effects. Relative order between other
// sessions is not important
@@ -5118,8 +5252,8 @@
if (EffectId == 0) {
track->setAuxBuffer(0, NULL);
} else {
- // Auxiliary effects are always in audio session AudioSystem::SESSION_OUTPUT_MIX
- sp<EffectModule> effect = getEffect_l(AudioSystem::SESSION_OUTPUT_MIX, EffectId);
+ // Auxiliary effects are always in audio session AUDIO_SESSION_OUTPUT_MIX
+ sp<EffectModule> effect = getEffect_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
if (effect != 0) {
if ((effect->desc().flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
track->setAuxBuffer(EffectId, (int32_t *)effect->inBuffer());
@@ -5403,7 +5537,7 @@
mConfig.outputCfg.bufferProvider.releaseBuffer = NULL;
mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
// Insert effect:
- // - in session AudioSystem::SESSION_OUTPUT_MIX or AudioSystem::SESSION_OUTPUT_STAGE,
+ // - in session AUDIO_SESSION_OUTPUT_MIX or AUDIO_SESSION_OUTPUT_STAGE,
// always overwrites output buffer: input buffer == output buffer
// - in other sessions:
// last effect in the chain accumulates in output buffer: input buffer != output buffer
@@ -5684,17 +5818,17 @@
// update this table when AudioSystem::audio_devices or audio_device_e (in EffectApi.h) are modified
const uint32_t AudioFlinger::EffectModule::sDeviceConvTable[] = {
- DEVICE_EARPIECE, // AudioSystem::DEVICE_OUT_EARPIECE
- DEVICE_SPEAKER, // AudioSystem::DEVICE_OUT_SPEAKER
- DEVICE_WIRED_HEADSET, // case AudioSystem::DEVICE_OUT_WIRED_HEADSET
- DEVICE_WIRED_HEADPHONE, // AudioSystem::DEVICE_OUT_WIRED_HEADPHONE
- DEVICE_BLUETOOTH_SCO, // AudioSystem::DEVICE_OUT_BLUETOOTH_SCO
- DEVICE_BLUETOOTH_SCO_HEADSET, // AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_HEADSET
- DEVICE_BLUETOOTH_SCO_CARKIT, // AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_CARKIT
- DEVICE_BLUETOOTH_A2DP, // AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP
- DEVICE_BLUETOOTH_A2DP_HEADPHONES, // AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES
- DEVICE_BLUETOOTH_A2DP_SPEAKER, // AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER
- DEVICE_AUX_DIGITAL // AudioSystem::DEVICE_OUT_AUX_DIGITAL
+ DEVICE_EARPIECE, // AUDIO_DEVICE_OUT_EARPIECE
+ DEVICE_SPEAKER, // AUDIO_DEVICE_OUT_SPEAKER
+ DEVICE_WIRED_HEADSET, // case AUDIO_DEVICE_OUT_WIRED_HEADSET
+ DEVICE_WIRED_HEADPHONE, // AUDIO_DEVICE_OUT_WIRED_HEADPHONE
+ DEVICE_BLUETOOTH_SCO, // AUDIO_DEVICE_OUT_BLUETOOTH_SCO
+ DEVICE_BLUETOOTH_SCO_HEADSET, // AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET
+ DEVICE_BLUETOOTH_SCO_CARKIT, // AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT
+ DEVICE_BLUETOOTH_A2DP, // AUDIO_DEVICE_OUT_BLUETOOTH_A2DP
+ DEVICE_BLUETOOTH_A2DP_HEADPHONES, // AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES
+ DEVICE_BLUETOOTH_A2DP_SPEAKER, // AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER
+ DEVICE_AUX_DIGITAL // AUDIO_DEVICE_OUT_AUX_DIGITAL
};
uint32_t AudioFlinger::EffectModule::deviceAudioSystemToEffectApi(uint32_t device)
@@ -5714,10 +5848,10 @@
// update this table when AudioSystem::audio_mode or audio_mode_e (in EffectApi.h) are modified
const uint32_t AudioFlinger::EffectModule::sModeConvTable[] = {
- AUDIO_MODE_NORMAL, // AudioSystem::MODE_NORMAL
- AUDIO_MODE_RINGTONE, // AudioSystem::MODE_RINGTONE
- AUDIO_MODE_IN_CALL, // AudioSystem::MODE_IN_CALL
- AUDIO_MODE_IN_CALL // AudioSystem::MODE_IN_COMMUNICATION, same conversion as for MODE_IN_CALL
+ AUDIO_EFFECT_MODE_NORMAL, // AUDIO_MODE_NORMAL
+ AUDIO_EFFECT_MODE_RINGTONE, // AUDIO_MODE_RINGTONE
+ AUDIO_EFFECT_MODE_IN_CALL, // AUDIO_MODE_IN_CALL
+ AUDIO_EFFECT_MODE_IN_CALL // AUDIO_MODE_IN_COMMUNICATION, same conversion as for AUDIO_MODE_IN_CALL
};
int AudioFlinger::EffectModule::modeAudioSystemToEffectApi(uint32_t mode)
@@ -6027,7 +6161,7 @@
mVolumeCtrlIdx(-1), mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX),
mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX)
{
- mStrategy = AudioSystem::getStrategyForStream(AudioSystem::MUSIC);
+ mStrategy = AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
}
AudioFlinger::EffectChain::~EffectChain()
@@ -6078,8 +6212,8 @@
return;
}
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
- bool isGlobalSession = (mSessionId == AudioSystem::SESSION_OUTPUT_MIX) ||
- (mSessionId == AudioSystem::SESSION_OUTPUT_STAGE);
+ bool isGlobalSession = (mSessionId == AUDIO_SESSION_OUTPUT_MIX) ||
+ (mSessionId == AUDIO_SESSION_OUTPUT_STAGE);
bool tracksOnSession = false;
if (!isGlobalSession) {
tracksOnSession =
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index ec3d202..22e5116 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -32,12 +32,14 @@
#include <utils/Errors.h>
#include <utils/threads.h>
#include <utils/SortedVector.h>
+#include <utils/TypeHelpers.h>
#include <utils/Vector.h>
#include <binder/BinderService.h>
#include <binder/MemoryDealer.h>
-#include <hardware_legacy/AudioHardwareInterface.h>
+#include <hardware/audio.h>
+#include <hardware/audio_hal.h>
#include "AudioBufferProvider.h"
@@ -49,7 +51,6 @@
class AudioBuffer;
class AudioResampler;
-
// ----------------------------------------------------------------------------
#define LIKELY( exp ) (__builtin_expect( (exp) != 0, true ))
@@ -211,6 +212,9 @@
AudioFlinger();
virtual ~AudioFlinger();
+ status_t initCheck() const;
+ virtual void onFirstRef();
+ audio_hw_device_t* findSuitableHwDev_l(uint32_t devices);
// Internal dump utilites.
status_t dumpPermissionDenial(int fd, const Vector<String16>& args);
@@ -268,6 +272,8 @@
class EffectModule;
class EffectHandle;
class EffectChain;
+ struct AudioStreamOut;
+ struct AudioStreamIn;
class ThreadBase : public Thread {
public:
@@ -495,7 +501,7 @@
void reset();
bool isOutputTrack() const {
- return (mStreamType == AudioSystem::NUM_STREAM_TYPES);
+ return (mStreamType == AUDIO_STREAM_CNT);
}
// we don't really need a lock for these
@@ -689,7 +695,7 @@
SortedVector< sp<Track> > mTracks;
// mStreamTypes[] uses 1 additionnal stream type internally for the OutputTrack used by DuplicatingThread
- stream_type_t mStreamTypes[AudioSystem::NUM_STREAM_TYPES + 1];
+ stream_type_t mStreamTypes[AUDIO_STREAM_CNT + 1];
AudioStreamOut* mOutput;
float mMasterVolume;
nsecs_t mLastWriteTime;
@@ -1159,21 +1165,37 @@
uint32_t mStrategy; // strategy for this effect chain
};
+ struct AudioStreamOut {
+ audio_hw_device_t *hwDev;
+ audio_stream_out_t *stream;
+
+ AudioStreamOut(audio_hw_device_t *dev, audio_stream_out_t *out) :
+ hwDev(dev), stream(out) {}
+ };
+
+ struct AudioStreamIn {
+ audio_hw_device_t *hwDev;
+ audio_stream_in_t *stream;
+
+ AudioStreamIn(audio_hw_device_t *dev, audio_stream_in_t *in) :
+ hwDev(dev), stream(in) {}
+ };
+
friend class RecordThread;
friend class PlaybackThread;
-
mutable Mutex mLock;
DefaultKeyedVector< pid_t, wp<Client> > mClients;
mutable Mutex mHardwareLock;
- AudioHardwareInterface* mAudioHardware;
+ audio_hw_device_t* mPrimaryHardwareDev;
+ Vector<audio_hw_device_t*> mAudioHwDevs;
mutable int mHardwareStatus;
DefaultKeyedVector< int, sp<PlaybackThread> > mPlaybackThreads;
- PlaybackThread::stream_type_t mStreamTypes[AudioSystem::NUM_STREAM_TYPES];
+ PlaybackThread::stream_type_t mStreamTypes[AUDIO_STREAM_CNT];
float mMasterVolume;
bool mMasterMute;
@@ -1185,6 +1207,7 @@
};
+
// ----------------------------------------------------------------------------
}; // namespace android
diff --git a/services/audioflinger/AudioHardwareGeneric.cpp b/services/audioflinger/AudioHardwareGeneric.cpp
deleted file mode 100644
index d63c031..0000000
--- a/services/audioflinger/AudioHardwareGeneric.cpp
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
-**
-** Copyright 2007, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <sched.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-
-#define LOG_TAG "AudioHardware"
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-#include "AudioHardwareGeneric.h"
-#include <media/AudioRecord.h>
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-static char const * const kAudioDeviceName = "/dev/eac";
-
-// ----------------------------------------------------------------------------
-
-AudioHardwareGeneric::AudioHardwareGeneric()
- : mOutput(0), mInput(0), mFd(-1), mMicMute(false)
-{
- mFd = ::open(kAudioDeviceName, O_RDWR);
-}
-
-AudioHardwareGeneric::~AudioHardwareGeneric()
-{
- if (mFd >= 0) ::close(mFd);
- closeOutputStream((AudioStreamOut *)mOutput);
- closeInputStream((AudioStreamIn *)mInput);
-}
-
-status_t AudioHardwareGeneric::initCheck()
-{
- if (mFd >= 0) {
- if (::access(kAudioDeviceName, O_RDWR) == NO_ERROR)
- return NO_ERROR;
- }
- return NO_INIT;
-}
-
-AudioStreamOut* AudioHardwareGeneric::openOutputStream(
- uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate, status_t *status)
-{
- AutoMutex lock(mLock);
-
- // only one output stream allowed
- if (mOutput) {
- if (status) {
- *status = INVALID_OPERATION;
- }
- return 0;
- }
-
- // create new output stream
- AudioStreamOutGeneric* out = new AudioStreamOutGeneric();
- status_t lStatus = out->set(this, mFd, devices, format, channels, sampleRate);
- if (status) {
- *status = lStatus;
- }
- if (lStatus == NO_ERROR) {
- mOutput = out;
- } else {
- delete out;
- }
- return mOutput;
-}
-
-void AudioHardwareGeneric::closeOutputStream(AudioStreamOut* out) {
- if (mOutput && out == mOutput) {
- delete mOutput;
- mOutput = 0;
- }
-}
-
-AudioStreamIn* AudioHardwareGeneric::openInputStream(
- uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate,
- status_t *status, AudioSystem::audio_in_acoustics acoustics)
-{
- // check for valid input source
- if (!AudioSystem::isInputDevice((AudioSystem::audio_devices)devices)) {
- return 0;
- }
-
- AutoMutex lock(mLock);
-
- // only one input stream allowed
- if (mInput) {
- if (status) {
- *status = INVALID_OPERATION;
- }
- return 0;
- }
-
- // create new output stream
- AudioStreamInGeneric* in = new AudioStreamInGeneric();
- status_t lStatus = in->set(this, mFd, devices, format, channels, sampleRate, acoustics);
- if (status) {
- *status = lStatus;
- }
- if (lStatus == NO_ERROR) {
- mInput = in;
- } else {
- delete in;
- }
- return mInput;
-}
-
-void AudioHardwareGeneric::closeInputStream(AudioStreamIn* in) {
- if (mInput && in == mInput) {
- delete mInput;
- mInput = 0;
- }
-}
-
-status_t AudioHardwareGeneric::setVoiceVolume(float v)
-{
- // Implement: set voice volume
- return NO_ERROR;
-}
-
-status_t AudioHardwareGeneric::setMasterVolume(float v)
-{
- // Implement: set master volume
- // return error - software mixer will handle it
- return INVALID_OPERATION;
-}
-
-status_t AudioHardwareGeneric::setMicMute(bool state)
-{
- mMicMute = state;
- return NO_ERROR;
-}
-
-status_t AudioHardwareGeneric::getMicMute(bool* state)
-{
- *state = mMicMute;
- return NO_ERROR;
-}
-
-status_t AudioHardwareGeneric::dumpInternals(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
- result.append("AudioHardwareGeneric::dumpInternals\n");
- snprintf(buffer, SIZE, "\tmFd: %d mMicMute: %s\n", mFd, mMicMute? "true": "false");
- result.append(buffer);
- ::write(fd, result.string(), result.size());
- return NO_ERROR;
-}
-
-status_t AudioHardwareGeneric::dump(int fd, const Vector<String16>& args)
-{
- dumpInternals(fd, args);
- if (mInput) {
- mInput->dump(fd, args);
- }
- if (mOutput) {
- mOutput->dump(fd, args);
- }
- return NO_ERROR;
-}
-
-// ----------------------------------------------------------------------------
-
-status_t AudioStreamOutGeneric::set(
- AudioHardwareGeneric *hw,
- int fd,
- uint32_t devices,
- int *pFormat,
- uint32_t *pChannels,
- uint32_t *pRate)
-{
- int lFormat = pFormat ? *pFormat : 0;
- uint32_t lChannels = pChannels ? *pChannels : 0;
- uint32_t lRate = pRate ? *pRate : 0;
-
- // fix up defaults
- if (lFormat == 0) lFormat = format();
- if (lChannels == 0) lChannels = channels();
- if (lRate == 0) lRate = sampleRate();
-
- // check values
- if ((lFormat != format()) ||
- (lChannels != channels()) ||
- (lRate != sampleRate())) {
- if (pFormat) *pFormat = format();
- if (pChannels) *pChannels = channels();
- if (pRate) *pRate = sampleRate();
- return BAD_VALUE;
- }
-
- if (pFormat) *pFormat = lFormat;
- if (pChannels) *pChannels = lChannels;
- if (pRate) *pRate = lRate;
-
- mAudioHardware = hw;
- mFd = fd;
- mDevice = devices;
- return NO_ERROR;
-}
-
-AudioStreamOutGeneric::~AudioStreamOutGeneric()
-{
-}
-
-ssize_t AudioStreamOutGeneric::write(const void* buffer, size_t bytes)
-{
- Mutex::Autolock _l(mLock);
- return ssize_t(::write(mFd, buffer, bytes));
-}
-
-status_t AudioStreamOutGeneric::standby()
-{
- // Implement: audio hardware to standby mode
- return NO_ERROR;
-}
-
-status_t AudioStreamOutGeneric::dump(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
- snprintf(buffer, SIZE, "AudioStreamOutGeneric::dump\n");
- result.append(buffer);
- snprintf(buffer, SIZE, "\tsample rate: %d\n", sampleRate());
- result.append(buffer);
- snprintf(buffer, SIZE, "\tbuffer size: %d\n", bufferSize());
- result.append(buffer);
- snprintf(buffer, SIZE, "\tchannels: %d\n", channels());
- result.append(buffer);
- snprintf(buffer, SIZE, "\tformat: %d\n", format());
- result.append(buffer);
- snprintf(buffer, SIZE, "\tdevice: %d\n", mDevice);
- result.append(buffer);
- snprintf(buffer, SIZE, "\tmAudioHardware: %p\n", mAudioHardware);
- result.append(buffer);
- snprintf(buffer, SIZE, "\tmFd: %d\n", mFd);
- result.append(buffer);
- ::write(fd, result.string(), result.size());
- return NO_ERROR;
-}
-
-status_t AudioStreamOutGeneric::setParameters(const String8& keyValuePairs)
-{
- AudioParameter param = AudioParameter(keyValuePairs);
- String8 key = String8(AudioParameter::keyRouting);
- status_t status = NO_ERROR;
- int device;
- LOGV("setParameters() %s", keyValuePairs.string());
-
- if (param.getInt(key, device) == NO_ERROR) {
- mDevice = device;
- param.remove(key);
- }
-
- if (param.size()) {
- status = BAD_VALUE;
- }
- return status;
-}
-
-String8 AudioStreamOutGeneric::getParameters(const String8& keys)
-{
- AudioParameter param = AudioParameter(keys);
- String8 value;
- String8 key = String8(AudioParameter::keyRouting);
-
- if (param.get(key, value) == NO_ERROR) {
- param.addInt(key, (int)mDevice);
- }
-
- LOGV("getParameters() %s", param.toString().string());
- return param.toString();
-}
-
-status_t AudioStreamOutGeneric::getRenderPosition(uint32_t *dspFrames)
-{
- return INVALID_OPERATION;
-}
-
-// ----------------------------------------------------------------------------
-
-// record functions
-status_t AudioStreamInGeneric::set(
- AudioHardwareGeneric *hw,
- int fd,
- uint32_t devices,
- int *pFormat,
- uint32_t *pChannels,
- uint32_t *pRate,
- AudioSystem::audio_in_acoustics acoustics)
-{
- if (pFormat == 0 || pChannels == 0 || pRate == 0) return BAD_VALUE;
- LOGV("AudioStreamInGeneric::set(%p, %d, %d, %d, %u)", hw, fd, *pFormat, *pChannels, *pRate);
- // check values
- if ((*pFormat != format()) ||
- (*pChannels != channels()) ||
- (*pRate != sampleRate())) {
- LOGE("Error opening input channel");
- *pFormat = format();
- *pChannels = channels();
- *pRate = sampleRate();
- return BAD_VALUE;
- }
-
- mAudioHardware = hw;
- mFd = fd;
- mDevice = devices;
- return NO_ERROR;
-}
-
-AudioStreamInGeneric::~AudioStreamInGeneric()
-{
-}
-
-ssize_t AudioStreamInGeneric::read(void* buffer, ssize_t bytes)
-{
- AutoMutex lock(mLock);
- if (mFd < 0) {
- LOGE("Attempt to read from unopened device");
- return NO_INIT;
- }
- return ::read(mFd, buffer, bytes);
-}
-
-status_t AudioStreamInGeneric::dump(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
- snprintf(buffer, SIZE, "AudioStreamInGeneric::dump\n");
- result.append(buffer);
- snprintf(buffer, SIZE, "\tsample rate: %d\n", sampleRate());
- result.append(buffer);
- snprintf(buffer, SIZE, "\tbuffer size: %d\n", bufferSize());
- result.append(buffer);
- snprintf(buffer, SIZE, "\tchannels: %d\n", channels());
- result.append(buffer);
- snprintf(buffer, SIZE, "\tformat: %d\n", format());
- result.append(buffer);
- snprintf(buffer, SIZE, "\tdevice: %d\n", mDevice);
- result.append(buffer);
- snprintf(buffer, SIZE, "\tmAudioHardware: %p\n", mAudioHardware);
- result.append(buffer);
- snprintf(buffer, SIZE, "\tmFd: %d\n", mFd);
- result.append(buffer);
- ::write(fd, result.string(), result.size());
- return NO_ERROR;
-}
-
-status_t AudioStreamInGeneric::setParameters(const String8& keyValuePairs)
-{
- AudioParameter param = AudioParameter(keyValuePairs);
- String8 key = String8(AudioParameter::keyRouting);
- status_t status = NO_ERROR;
- int device;
- LOGV("setParameters() %s", keyValuePairs.string());
-
- if (param.getInt(key, device) == NO_ERROR) {
- mDevice = device;
- param.remove(key);
- }
-
- if (param.size()) {
- status = BAD_VALUE;
- }
- return status;
-}
-
-String8 AudioStreamInGeneric::getParameters(const String8& keys)
-{
- AudioParameter param = AudioParameter(keys);
- String8 value;
- String8 key = String8(AudioParameter::keyRouting);
-
- if (param.get(key, value) == NO_ERROR) {
- param.addInt(key, (int)mDevice);
- }
-
- LOGV("getParameters() %s", param.toString().string());
- return param.toString();
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
diff --git a/services/audioflinger/AudioHardwareGeneric.h b/services/audioflinger/AudioHardwareGeneric.h
deleted file mode 100644
index aa4e78d..0000000
--- a/services/audioflinger/AudioHardwareGeneric.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
-**
-** Copyright 2007, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#ifndef ANDROID_AUDIO_HARDWARE_GENERIC_H
-#define ANDROID_AUDIO_HARDWARE_GENERIC_H
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <utils/threads.h>
-
-#include <hardware_legacy/AudioHardwareBase.h>
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-class AudioHardwareGeneric;
-
-class AudioStreamOutGeneric : public AudioStreamOut {
-public:
- AudioStreamOutGeneric() : mAudioHardware(0), mFd(-1) {}
- virtual ~AudioStreamOutGeneric();
-
- virtual status_t set(
- AudioHardwareGeneric *hw,
- int mFd,
- uint32_t devices,
- int *pFormat,
- uint32_t *pChannels,
- uint32_t *pRate);
-
- virtual uint32_t sampleRate() const { return 44100; }
- virtual size_t bufferSize() const { return 4096; }
- virtual uint32_t channels() const { return AudioSystem::CHANNEL_OUT_STEREO; }
- virtual int format() const { return AudioSystem::PCM_16_BIT; }
- virtual uint32_t latency() const { return 20; }
- virtual status_t setVolume(float left, float right) { return INVALID_OPERATION; }
- virtual ssize_t write(const void* buffer, size_t bytes);
- virtual status_t standby();
- virtual status_t dump(int fd, const Vector<String16>& args);
- virtual status_t setParameters(const String8& keyValuePairs);
- virtual String8 getParameters(const String8& keys);
- virtual status_t getRenderPosition(uint32_t *dspFrames);
-
-private:
- AudioHardwareGeneric *mAudioHardware;
- Mutex mLock;
- int mFd;
- uint32_t mDevice;
-};
-
-class AudioStreamInGeneric : public AudioStreamIn {
-public:
- AudioStreamInGeneric() : mAudioHardware(0), mFd(-1) {}
- virtual ~AudioStreamInGeneric();
-
- virtual status_t set(
- AudioHardwareGeneric *hw,
- int mFd,
- uint32_t devices,
- int *pFormat,
- uint32_t *pChannels,
- uint32_t *pRate,
- AudioSystem::audio_in_acoustics acoustics);
-
- virtual uint32_t sampleRate() const { return 8000; }
- virtual size_t bufferSize() const { return 320; }
- virtual uint32_t channels() const { return AudioSystem::CHANNEL_IN_MONO; }
- virtual int format() const { return AudioSystem::PCM_16_BIT; }
- virtual status_t setGain(float gain) { return INVALID_OPERATION; }
- virtual ssize_t read(void* buffer, ssize_t bytes);
- virtual status_t dump(int fd, const Vector<String16>& args);
- virtual status_t standby() { return NO_ERROR; }
- virtual status_t setParameters(const String8& keyValuePairs);
- virtual String8 getParameters(const String8& keys);
- virtual unsigned int getInputFramesLost() const { return 0; }
-
-private:
- AudioHardwareGeneric *mAudioHardware;
- Mutex mLock;
- int mFd;
- uint32_t mDevice;
-};
-
-
-class AudioHardwareGeneric : public AudioHardwareBase
-{
-public:
- AudioHardwareGeneric();
- virtual ~AudioHardwareGeneric();
- virtual status_t initCheck();
- virtual status_t setVoiceVolume(float volume);
- virtual status_t setMasterVolume(float volume);
-
- // mic mute
- virtual status_t setMicMute(bool state);
- virtual status_t getMicMute(bool* state);
-
- // create I/O streams
- virtual AudioStreamOut* openOutputStream(
- uint32_t devices,
- int *format=0,
- uint32_t *channels=0,
- uint32_t *sampleRate=0,
- status_t *status=0);
- virtual void closeOutputStream(AudioStreamOut* out);
-
- virtual AudioStreamIn* openInputStream(
- uint32_t devices,
- int *format,
- uint32_t *channels,
- uint32_t *sampleRate,
- status_t *status,
- AudioSystem::audio_in_acoustics acoustics);
- virtual void closeInputStream(AudioStreamIn* in);
-
- void closeOutputStream(AudioStreamOutGeneric* out);
- void closeInputStream(AudioStreamInGeneric* in);
-protected:
- virtual status_t dump(int fd, const Vector<String16>& args);
-
-private:
- status_t dumpInternals(int fd, const Vector<String16>& args);
-
- Mutex mLock;
- AudioStreamOutGeneric *mOutput;
- AudioStreamInGeneric *mInput;
- int mFd;
- bool mMicMute;
-};
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
-#endif // ANDROID_AUDIO_HARDWARE_GENERIC_H
diff --git a/services/audioflinger/AudioHardwareInterface.cpp b/services/audioflinger/AudioHardwareInterface.cpp
deleted file mode 100644
index f58e4c0..0000000
--- a/services/audioflinger/AudioHardwareInterface.cpp
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
-**
-** Copyright 2007, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#include <cutils/properties.h>
-#include <string.h>
-#include <unistd.h>
-//#define LOG_NDEBUG 0
-
-#define LOG_TAG "AudioHardwareInterface"
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-#include "AudioHardwareStub.h"
-#include "AudioHardwareGeneric.h"
-#ifdef WITH_A2DP
-#include "A2dpAudioInterface.h"
-#endif
-
-#ifdef ENABLE_AUDIO_DUMP
-#include "AudioDumpInterface.h"
-#endif
-
-
-// change to 1 to log routing calls
-#define LOG_ROUTING_CALLS 1
-
-namespace android {
-
-#if LOG_ROUTING_CALLS
-static const char* routingModeStrings[] =
-{
- "OUT OF RANGE",
- "INVALID",
- "CURRENT",
- "NORMAL",
- "RINGTONE",
- "IN_CALL",
- "IN_COMMUNICATION"
-};
-
-static const char* routeNone = "NONE";
-
-static const char* displayMode(int mode)
-{
- if ((mode < AudioSystem::MODE_INVALID) || (mode >= AudioSystem::NUM_MODES))
- return routingModeStrings[0];
- return routingModeStrings[mode+3];
-}
-#endif
-
-// ----------------------------------------------------------------------------
-
-AudioHardwareInterface* AudioHardwareInterface::create()
-{
- /*
- * FIXME: This code needs to instantiate the correct audio device
- * interface. For now - we use compile-time switches.
- */
- AudioHardwareInterface* hw = 0;
- char value[PROPERTY_VALUE_MAX];
-
-#ifdef GENERIC_AUDIO
- hw = new AudioHardwareGeneric();
-#else
- // if running in emulation - use the emulator driver
- if (property_get("ro.kernel.qemu", value, 0)) {
- LOGD("Running in emulation - using generic audio driver");
- hw = new AudioHardwareGeneric();
- }
- else {
- LOGV("Creating Vendor Specific AudioHardware");
- hw = createAudioHardware();
- }
-#endif
- if (hw->initCheck() != NO_ERROR) {
- LOGW("Using stubbed audio hardware. No sound will be produced.");
- delete hw;
- hw = new AudioHardwareStub();
- }
-
-#ifdef WITH_A2DP
- hw = new A2dpAudioInterface(hw);
-#endif
-
-#ifdef ENABLE_AUDIO_DUMP
- // This code adds a record of buffers in a file to write calls made by AudioFlinger.
- // It replaces the current AudioHardwareInterface object by an intermediate one which
- // will record buffers in a file (after sending them to hardware) for testing purpose.
- // This feature is enabled by defining symbol ENABLE_AUDIO_DUMP.
- // The output file is set with setParameters("test_cmd_file_name=<name>"). Pause are not recorded in the file.
- LOGV("opening PCM dump interface");
- hw = new AudioDumpInterface(hw); // replace interface
-#endif
- return hw;
-}
-
-AudioStreamOut::~AudioStreamOut()
-{
-}
-
-AudioStreamIn::~AudioStreamIn() {}
-
-AudioHardwareBase::AudioHardwareBase()
-{
- mMode = 0;
-}
-
-status_t AudioHardwareBase::setMode(int mode)
-{
-#if LOG_ROUTING_CALLS
- LOGD("setMode(%s)", displayMode(mode));
-#endif
- if ((mode < 0) || (mode >= AudioSystem::NUM_MODES))
- return BAD_VALUE;
- if (mMode == mode)
- return ALREADY_EXISTS;
- mMode = mode;
- return NO_ERROR;
-}
-
-// default implementation
-status_t AudioHardwareBase::setParameters(const String8& keyValuePairs)
-{
- return NO_ERROR;
-}
-
-// default implementation
-String8 AudioHardwareBase::getParameters(const String8& keys)
-{
- AudioParameter param = AudioParameter(keys);
- return param.toString();
-}
-
-// default implementation
-size_t AudioHardwareBase::getInputBufferSize(uint32_t sampleRate, int format, int channelCount)
-{
- if (sampleRate != 8000) {
- LOGW("getInputBufferSize bad sampling rate: %d", sampleRate);
- return 0;
- }
- if (format != AudioSystem::PCM_16_BIT) {
- LOGW("getInputBufferSize bad format: %d", format);
- return 0;
- }
- if (channelCount != 1) {
- LOGW("getInputBufferSize bad channel count: %d", channelCount);
- return 0;
- }
-
- return 320;
-}
-
-status_t AudioHardwareBase::dumpState(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
- snprintf(buffer, SIZE, "AudioHardwareBase::dumpState\n");
- result.append(buffer);
- snprintf(buffer, SIZE, "\tmMode: %d\n", mMode);
- result.append(buffer);
- ::write(fd, result.string(), result.size());
- dump(fd, args); // Dump the state of the concrete child.
- return NO_ERROR;
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
diff --git a/services/audioflinger/AudioHardwareStub.cpp b/services/audioflinger/AudioHardwareStub.cpp
deleted file mode 100644
index d481150..0000000
--- a/services/audioflinger/AudioHardwareStub.cpp
+++ /dev/null
@@ -1,209 +0,0 @@
-/* //device/servers/AudioFlinger/AudioHardwareStub.cpp
-**
-** Copyright 2007, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <stdlib.h>
-#include <unistd.h>
-#include <utils/String8.h>
-
-#include "AudioHardwareStub.h"
-#include <media/AudioRecord.h>
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-AudioHardwareStub::AudioHardwareStub() : mMicMute(false)
-{
-}
-
-AudioHardwareStub::~AudioHardwareStub()
-{
-}
-
-status_t AudioHardwareStub::initCheck()
-{
- return NO_ERROR;
-}
-
-AudioStreamOut* AudioHardwareStub::openOutputStream(
- uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate, status_t *status)
-{
- AudioStreamOutStub* out = new AudioStreamOutStub();
- status_t lStatus = out->set(format, channels, sampleRate);
- if (status) {
- *status = lStatus;
- }
- if (lStatus == NO_ERROR)
- return out;
- delete out;
- return 0;
-}
-
-void AudioHardwareStub::closeOutputStream(AudioStreamOut* out)
-{
- delete out;
-}
-
-AudioStreamIn* AudioHardwareStub::openInputStream(
- uint32_t devices, int *format, uint32_t *channels, uint32_t *sampleRate,
- status_t *status, AudioSystem::audio_in_acoustics acoustics)
-{
- // check for valid input source
- if (!AudioSystem::isInputDevice((AudioSystem::audio_devices)devices)) {
- return 0;
- }
-
- AudioStreamInStub* in = new AudioStreamInStub();
- status_t lStatus = in->set(format, channels, sampleRate, acoustics);
- if (status) {
- *status = lStatus;
- }
- if (lStatus == NO_ERROR)
- return in;
- delete in;
- return 0;
-}
-
-void AudioHardwareStub::closeInputStream(AudioStreamIn* in)
-{
- delete in;
-}
-
-status_t AudioHardwareStub::setVoiceVolume(float volume)
-{
- return NO_ERROR;
-}
-
-status_t AudioHardwareStub::setMasterVolume(float volume)
-{
- return NO_ERROR;
-}
-
-status_t AudioHardwareStub::dumpInternals(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
- result.append("AudioHardwareStub::dumpInternals\n");
- snprintf(buffer, SIZE, "\tmMicMute: %s\n", mMicMute? "true": "false");
- result.append(buffer);
- ::write(fd, result.string(), result.size());
- return NO_ERROR;
-}
-
-status_t AudioHardwareStub::dump(int fd, const Vector<String16>& args)
-{
- dumpInternals(fd, args);
- return NO_ERROR;
-}
-
-// ----------------------------------------------------------------------------
-
-status_t AudioStreamOutStub::set(int *pFormat, uint32_t *pChannels, uint32_t *pRate)
-{
- if (pFormat) *pFormat = format();
- if (pChannels) *pChannels = channels();
- if (pRate) *pRate = sampleRate();
-
- return NO_ERROR;
-}
-
-ssize_t AudioStreamOutStub::write(const void* buffer, size_t bytes)
-{
- // fake timing for audio output
- usleep(bytes * 1000000 / sizeof(int16_t) / AudioSystem::popCount(channels()) / sampleRate());
- return bytes;
-}
-
-status_t AudioStreamOutStub::standby()
-{
- return NO_ERROR;
-}
-
-status_t AudioStreamOutStub::dump(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
- snprintf(buffer, SIZE, "AudioStreamOutStub::dump\n");
- snprintf(buffer, SIZE, "\tsample rate: %d\n", sampleRate());
- snprintf(buffer, SIZE, "\tbuffer size: %d\n", bufferSize());
- snprintf(buffer, SIZE, "\tchannels: %d\n", channels());
- snprintf(buffer, SIZE, "\tformat: %d\n", format());
- result.append(buffer);
- ::write(fd, result.string(), result.size());
- return NO_ERROR;
-}
-
-String8 AudioStreamOutStub::getParameters(const String8& keys)
-{
- AudioParameter param = AudioParameter(keys);
- return param.toString();
-}
-
-status_t AudioStreamOutStub::getRenderPosition(uint32_t *dspFrames)
-{
- return INVALID_OPERATION;
-}
-
-// ----------------------------------------------------------------------------
-
-status_t AudioStreamInStub::set(int *pFormat, uint32_t *pChannels, uint32_t *pRate,
- AudioSystem::audio_in_acoustics acoustics)
-{
- return NO_ERROR;
-}
-
-ssize_t AudioStreamInStub::read(void* buffer, ssize_t bytes)
-{
- // fake timing for audio input
- usleep(bytes * 1000000 / sizeof(int16_t) / AudioSystem::popCount(channels()) / sampleRate());
- memset(buffer, 0, bytes);
- return bytes;
-}
-
-status_t AudioStreamInStub::dump(int fd, const Vector<String16>& args)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
- snprintf(buffer, SIZE, "AudioStreamInStub::dump\n");
- result.append(buffer);
- snprintf(buffer, SIZE, "\tsample rate: %d\n", sampleRate());
- result.append(buffer);
- snprintf(buffer, SIZE, "\tbuffer size: %d\n", bufferSize());
- result.append(buffer);
- snprintf(buffer, SIZE, "\tchannels: %d\n", channels());
- result.append(buffer);
- snprintf(buffer, SIZE, "\tformat: %d\n", format());
- result.append(buffer);
- ::write(fd, result.string(), result.size());
- return NO_ERROR;
-}
-
-String8 AudioStreamInStub::getParameters(const String8& keys)
-{
- AudioParameter param = AudioParameter(keys);
- return param.toString();
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
diff --git a/services/audioflinger/AudioHardwareStub.h b/services/audioflinger/AudioHardwareStub.h
deleted file mode 100644
index 06a29de..0000000
--- a/services/audioflinger/AudioHardwareStub.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/* //device/servers/AudioFlinger/AudioHardwareStub.h
-**
-** Copyright 2007, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#ifndef ANDROID_AUDIO_HARDWARE_STUB_H
-#define ANDROID_AUDIO_HARDWARE_STUB_H
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <hardware_legacy/AudioHardwareBase.h>
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-class AudioStreamOutStub : public AudioStreamOut {
-public:
- virtual status_t set(int *pFormat, uint32_t *pChannels, uint32_t *pRate);
- virtual uint32_t sampleRate() const { return 44100; }
- virtual size_t bufferSize() const { return 4096; }
- virtual uint32_t channels() const { return AudioSystem::CHANNEL_OUT_STEREO; }
- virtual int format() const { return AudioSystem::PCM_16_BIT; }
- virtual uint32_t latency() const { return 0; }
- virtual status_t setVolume(float left, float right) { return NO_ERROR; }
- virtual ssize_t write(const void* buffer, size_t bytes);
- virtual status_t standby();
- virtual status_t dump(int fd, const Vector<String16>& args);
- virtual status_t setParameters(const String8& keyValuePairs) { return NO_ERROR;}
- virtual String8 getParameters(const String8& keys);
- virtual status_t getRenderPosition(uint32_t *dspFrames);
-};
-
-class AudioStreamInStub : public AudioStreamIn {
-public:
- virtual status_t set(int *pFormat, uint32_t *pChannels, uint32_t *pRate, AudioSystem::audio_in_acoustics acoustics);
- virtual uint32_t sampleRate() const { return 8000; }
- virtual size_t bufferSize() const { return 320; }
- virtual uint32_t channels() const { return AudioSystem::CHANNEL_IN_MONO; }
- virtual int format() const { return AudioSystem::PCM_16_BIT; }
- virtual status_t setGain(float gain) { return NO_ERROR; }
- virtual ssize_t read(void* buffer, ssize_t bytes);
- virtual status_t dump(int fd, const Vector<String16>& args);
- virtual status_t standby() { return NO_ERROR; }
- virtual status_t setParameters(const String8& keyValuePairs) { return NO_ERROR;}
- virtual String8 getParameters(const String8& keys);
- virtual unsigned int getInputFramesLost() const { return 0; }
-};
-
-class AudioHardwareStub : public AudioHardwareBase
-{
-public:
- AudioHardwareStub();
- virtual ~AudioHardwareStub();
- virtual status_t initCheck();
- virtual status_t setVoiceVolume(float volume);
- virtual status_t setMasterVolume(float volume);
-
- // mic mute
- virtual status_t setMicMute(bool state) { mMicMute = state; return NO_ERROR; }
- virtual status_t getMicMute(bool* state) { *state = mMicMute ; return NO_ERROR; }
-
- // create I/O streams
- virtual AudioStreamOut* openOutputStream(
- uint32_t devices,
- int *format=0,
- uint32_t *channels=0,
- uint32_t *sampleRate=0,
- status_t *status=0);
- virtual void closeOutputStream(AudioStreamOut* out);
-
- virtual AudioStreamIn* openInputStream(
- uint32_t devices,
- int *format,
- uint32_t *channels,
- uint32_t *sampleRate,
- status_t *status,
- AudioSystem::audio_in_acoustics acoustics);
- virtual void closeInputStream(AudioStreamIn* in);
-
-protected:
- virtual status_t dump(int fd, const Vector<String16>& args);
-
- bool mMicMute;
-private:
- status_t dumpInternals(int fd, const Vector<String16>& args);
-};
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
-#endif // ANDROID_AUDIO_HARDWARE_STUB_H
diff --git a/services/audioflinger/AudioPolicyManagerBase.cpp b/services/audioflinger/AudioPolicyManagerBase.cpp
deleted file mode 100644
index 32d92dc..0000000
--- a/services/audioflinger/AudioPolicyManagerBase.cpp
+++ /dev/null
@@ -1,2287 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AudioPolicyManagerBase"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-#include <hardware_legacy/AudioPolicyManagerBase.h>
-#include <media/mediarecorder.h>
-#include <math.h>
-
-namespace android {
-
-
-// ----------------------------------------------------------------------------
-// AudioPolicyInterface implementation
-// ----------------------------------------------------------------------------
-
-
-status_t AudioPolicyManagerBase::setDeviceConnectionState(AudioSystem::audio_devices device,
- AudioSystem::device_connection_state state,
- const char *device_address)
-{
-
- LOGV("setDeviceConnectionState() device: %x, state %d, address %s", device, state, device_address);
-
- // connect/disconnect only 1 device at a time
- if (AudioSystem::popCount(device) != 1) return BAD_VALUE;
-
- if (strlen(device_address) >= MAX_DEVICE_ADDRESS_LEN) {
- LOGE("setDeviceConnectionState() invalid address: %s", device_address);
- return BAD_VALUE;
- }
-
- // handle output devices
- if (AudioSystem::isOutputDevice(device)) {
-
-#ifndef WITH_A2DP
- if (AudioSystem::isA2dpDevice(device)) {
- LOGE("setDeviceConnectionState() invalid device: %x", device);
- return BAD_VALUE;
- }
-#endif
-
- switch (state)
- {
- // handle output device connection
- case AudioSystem::DEVICE_STATE_AVAILABLE:
- if (mAvailableOutputDevices & device) {
- LOGW("setDeviceConnectionState() device already connected: %x", device);
- return INVALID_OPERATION;
- }
- LOGV("setDeviceConnectionState() connecting device %x", device);
-
- // register new device as available
- mAvailableOutputDevices |= device;
-
-#ifdef WITH_A2DP
- // handle A2DP device connection
- if (AudioSystem::isA2dpDevice(device)) {
- status_t status = handleA2dpConnection(device, device_address);
- if (status != NO_ERROR) {
- mAvailableOutputDevices &= ~device;
- return status;
- }
- } else
-#endif
- {
- if (AudioSystem::isBluetoothScoDevice(device)) {
- LOGV("setDeviceConnectionState() BT SCO device, address %s", device_address);
- // keep track of SCO device address
- mScoDeviceAddress = String8(device_address, MAX_DEVICE_ADDRESS_LEN);
- }
- }
- break;
- // handle output device disconnection
- case AudioSystem::DEVICE_STATE_UNAVAILABLE: {
- if (!(mAvailableOutputDevices & device)) {
- LOGW("setDeviceConnectionState() device not connected: %x", device);
- return INVALID_OPERATION;
- }
-
-
- LOGV("setDeviceConnectionState() disconnecting device %x", device);
- // remove device from available output devices
- mAvailableOutputDevices &= ~device;
-
-#ifdef WITH_A2DP
- // handle A2DP device disconnection
- if (AudioSystem::isA2dpDevice(device)) {
- status_t status = handleA2dpDisconnection(device, device_address);
- if (status != NO_ERROR) {
- mAvailableOutputDevices |= device;
- return status;
- }
- } else
-#endif
- {
- if (AudioSystem::isBluetoothScoDevice(device)) {
- mScoDeviceAddress = "";
- }
- }
- } break;
-
- default:
- LOGE("setDeviceConnectionState() invalid state: %x", state);
- return BAD_VALUE;
- }
-
- // request routing change if necessary
- uint32_t newDevice = getNewDevice(mHardwareOutput, false);
-#ifdef WITH_A2DP
- checkA2dpSuspend();
- checkOutputForAllStrategies();
- // A2DP outputs must be closed after checkOutputForAllStrategies() is executed
- if (state == AudioSystem::DEVICE_STATE_UNAVAILABLE && AudioSystem::isA2dpDevice(device)) {
- closeA2dpOutputs();
- }
-#endif
- updateDeviceForStrategy();
- setOutputDevice(mHardwareOutput, newDevice);
-
- if (device == AudioSystem::DEVICE_OUT_WIRED_HEADSET) {
- device = AudioSystem::DEVICE_IN_WIRED_HEADSET;
- } else if (device == AudioSystem::DEVICE_OUT_BLUETOOTH_SCO ||
- device == AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_HEADSET ||
- device == AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_CARKIT) {
- device = AudioSystem::DEVICE_IN_BLUETOOTH_SCO_HEADSET;
- } else {
- return NO_ERROR;
- }
- }
- // handle input devices
- if (AudioSystem::isInputDevice(device)) {
-
- switch (state)
- {
- // handle input device connection
- case AudioSystem::DEVICE_STATE_AVAILABLE: {
- if (mAvailableInputDevices & device) {
- LOGW("setDeviceConnectionState() device already connected: %d", device);
- return INVALID_OPERATION;
- }
- mAvailableInputDevices |= device;
- }
- break;
-
- // handle input device disconnection
- case AudioSystem::DEVICE_STATE_UNAVAILABLE: {
- if (!(mAvailableInputDevices & device)) {
- LOGW("setDeviceConnectionState() device not connected: %d", device);
- return INVALID_OPERATION;
- }
- mAvailableInputDevices &= ~device;
- } break;
-
- default:
- LOGE("setDeviceConnectionState() invalid state: %x", state);
- return BAD_VALUE;
- }
-
- audio_io_handle_t activeInput = getActiveInput();
- if (activeInput != 0) {
- AudioInputDescriptor *inputDesc = mInputs.valueFor(activeInput);
- uint32_t newDevice = getDeviceForInputSource(inputDesc->mInputSource);
- if (newDevice != inputDesc->mDevice) {
- LOGV("setDeviceConnectionState() changing device from %x to %x for input %d",
- inputDesc->mDevice, newDevice, activeInput);
- inputDesc->mDevice = newDevice;
- AudioParameter param = AudioParameter();
- param.addInt(String8(AudioParameter::keyRouting), (int)newDevice);
- mpClientInterface->setParameters(activeInput, param.toString());
- }
- }
-
- return NO_ERROR;
- }
-
- LOGW("setDeviceConnectionState() invalid device: %x", device);
- return BAD_VALUE;
-}
-
-AudioSystem::device_connection_state AudioPolicyManagerBase::getDeviceConnectionState(AudioSystem::audio_devices device,
- const char *device_address)
-{
- AudioSystem::device_connection_state state = AudioSystem::DEVICE_STATE_UNAVAILABLE;
- String8 address = String8(device_address);
- if (AudioSystem::isOutputDevice(device)) {
- if (device & mAvailableOutputDevices) {
-#ifdef WITH_A2DP
- if (AudioSystem::isA2dpDevice(device) &&
- address != "" && mA2dpDeviceAddress != address) {
- return state;
- }
-#endif
- if (AudioSystem::isBluetoothScoDevice(device) &&
- address != "" && mScoDeviceAddress != address) {
- return state;
- }
- state = AudioSystem::DEVICE_STATE_AVAILABLE;
- }
- } else if (AudioSystem::isInputDevice(device)) {
- if (device & mAvailableInputDevices) {
- state = AudioSystem::DEVICE_STATE_AVAILABLE;
- }
- }
-
- return state;
-}
-
-void AudioPolicyManagerBase::setPhoneState(int state)
-{
- LOGV("setPhoneState() state %d", state);
- uint32_t newDevice = 0;
- if (state < 0 || state >= AudioSystem::NUM_MODES) {
- LOGW("setPhoneState() invalid state %d", state);
- return;
- }
-
- if (state == mPhoneState ) {
- LOGW("setPhoneState() setting same state %d", state);
- return;
- }
-
- // if leaving call state, handle special case of active streams
- // pertaining to sonification strategy see handleIncallSonification()
- if (isInCall()) {
- LOGV("setPhoneState() in call state management: new state is %d", state);
- for (int stream = 0; stream < AudioSystem::NUM_STREAM_TYPES; stream++) {
- handleIncallSonification(stream, false, true);
- }
- }
-
- // store previous phone state for management of sonification strategy below
- int oldState = mPhoneState;
- mPhoneState = state;
- bool force = false;
-
- // are we entering or starting a call
- if (!isStateInCall(oldState) && isStateInCall(state)) {
- LOGV(" Entering call in setPhoneState()");
- // force routing command to audio hardware when starting a call
- // even if no device change is needed
- force = true;
- } else if (isStateInCall(oldState) && !isStateInCall(state)) {
- LOGV(" Exiting call in setPhoneState()");
- // force routing command to audio hardware when exiting a call
- // even if no device change is needed
- force = true;
- } else if (isStateInCall(state) && (state != oldState)) {
- LOGV(" Switching between telephony and VoIP in setPhoneState()");
- // force routing command to audio hardware when switching between telephony and VoIP
- // even if no device change is needed
- force = true;
- }
-
- // check for device and output changes triggered by new phone state
- newDevice = getNewDevice(mHardwareOutput, false);
-#ifdef WITH_A2DP
- checkA2dpSuspend();
- checkOutputForAllStrategies();
-#endif
- updateDeviceForStrategy();
-
- AudioOutputDescriptor *hwOutputDesc = mOutputs.valueFor(mHardwareOutput);
-
- // force routing command to audio hardware when ending call
- // even if no device change is needed
- if (isStateInCall(oldState) && newDevice == 0) {
- newDevice = hwOutputDesc->device();
- }
-
- // when changing from ring tone to in call mode, mute the ringing tone
- // immediately and delay the route change to avoid sending the ring tone
- // tail into the earpiece or headset.
- int delayMs = 0;
- if (isStateInCall(state) && oldState == AudioSystem::MODE_RINGTONE) {
- // delay the device change command by twice the output latency to have some margin
- // and be sure that audio buffers not yet affected by the mute are out when
- // we actually apply the route change
- delayMs = hwOutputDesc->mLatency*2;
- setStreamMute(AudioSystem::RING, true, mHardwareOutput);
- }
-
- // change routing is necessary
- setOutputDevice(mHardwareOutput, newDevice, force, delayMs);
-
- // if entering in call state, handle special case of active streams
- // pertaining to sonification strategy see handleIncallSonification()
- if (isStateInCall(state)) {
- LOGV("setPhoneState() in call state management: new state is %d", state);
- // unmute the ringing tone after a sufficient delay if it was muted before
- // setting output device above
- if (oldState == AudioSystem::MODE_RINGTONE) {
- setStreamMute(AudioSystem::RING, false, mHardwareOutput, MUTE_TIME_MS);
- }
- for (int stream = 0; stream < AudioSystem::NUM_STREAM_TYPES; stream++) {
- handleIncallSonification(stream, true, true);
- }
- }
-
- // Flag that ringtone volume must be limited to music volume until we exit MODE_RINGTONE
- if (state == AudioSystem::MODE_RINGTONE &&
- isStreamActive(AudioSystem::MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY)) {
- mLimitRingtoneVolume = true;
- } else {
- mLimitRingtoneVolume = false;
- }
-}
-
-void AudioPolicyManagerBase::setRingerMode(uint32_t mode, uint32_t mask)
-{
- LOGV("setRingerMode() mode %x, mask %x", mode, mask);
-
- mRingerMode = mode;
-}
-
-void AudioPolicyManagerBase::setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config)
-{
- LOGV("setForceUse() usage %d, config %d, mPhoneState %d", usage, config, mPhoneState);
-
- bool forceVolumeReeval = false;
- switch(usage) {
- case AudioSystem::FOR_COMMUNICATION:
- if (config != AudioSystem::FORCE_SPEAKER && config != AudioSystem::FORCE_BT_SCO &&
- config != AudioSystem::FORCE_NONE) {
- LOGW("setForceUse() invalid config %d for FOR_COMMUNICATION", config);
- return;
- }
- forceVolumeReeval = true;
- mForceUse[usage] = config;
- break;
- case AudioSystem::FOR_MEDIA:
- if (config != AudioSystem::FORCE_HEADPHONES && config != AudioSystem::FORCE_BT_A2DP &&
- config != AudioSystem::FORCE_WIRED_ACCESSORY &&
- config != AudioSystem::FORCE_ANALOG_DOCK &&
- config != AudioSystem::FORCE_DIGITAL_DOCK && config != AudioSystem::FORCE_NONE) {
- LOGW("setForceUse() invalid config %d for FOR_MEDIA", config);
- return;
- }
- mForceUse[usage] = config;
- break;
- case AudioSystem::FOR_RECORD:
- if (config != AudioSystem::FORCE_BT_SCO && config != AudioSystem::FORCE_WIRED_ACCESSORY &&
- config != AudioSystem::FORCE_NONE) {
- LOGW("setForceUse() invalid config %d for FOR_RECORD", config);
- return;
- }
- mForceUse[usage] = config;
- break;
- case AudioSystem::FOR_DOCK:
- if (config != AudioSystem::FORCE_NONE && config != AudioSystem::FORCE_BT_CAR_DOCK &&
- config != AudioSystem::FORCE_BT_DESK_DOCK &&
- config != AudioSystem::FORCE_WIRED_ACCESSORY &&
- config != AudioSystem::FORCE_ANALOG_DOCK &&
- config != AudioSystem::FORCE_DIGITAL_DOCK) {
- LOGW("setForceUse() invalid config %d for FOR_DOCK", config);
- }
- forceVolumeReeval = true;
- mForceUse[usage] = config;
- break;
- default:
- LOGW("setForceUse() invalid usage %d", usage);
- break;
- }
-
- // check for device and output changes triggered by new phone state
- uint32_t newDevice = getNewDevice(mHardwareOutput, false);
-#ifdef WITH_A2DP
- checkA2dpSuspend();
- checkOutputForAllStrategies();
-#endif
- updateDeviceForStrategy();
- setOutputDevice(mHardwareOutput, newDevice);
- if (forceVolumeReeval) {
- applyStreamVolumes(mHardwareOutput, newDevice, 0, true);
- }
-
- audio_io_handle_t activeInput = getActiveInput();
- if (activeInput != 0) {
- AudioInputDescriptor *inputDesc = mInputs.valueFor(activeInput);
- newDevice = getDeviceForInputSource(inputDesc->mInputSource);
- if (newDevice != inputDesc->mDevice) {
- LOGV("setForceUse() changing device from %x to %x for input %d",
- inputDesc->mDevice, newDevice, activeInput);
- inputDesc->mDevice = newDevice;
- AudioParameter param = AudioParameter();
- param.addInt(String8(AudioParameter::keyRouting), (int)newDevice);
- mpClientInterface->setParameters(activeInput, param.toString());
- }
- }
-
-}
-
-AudioSystem::forced_config AudioPolicyManagerBase::getForceUse(AudioSystem::force_use usage)
-{
- return mForceUse[usage];
-}
-
-void AudioPolicyManagerBase::setSystemProperty(const char* property, const char* value)
-{
- LOGV("setSystemProperty() property %s, value %s", property, value);
- if (strcmp(property, "ro.camera.sound.forced") == 0) {
- if (atoi(value)) {
- LOGV("ENFORCED_AUDIBLE cannot be muted");
- mStreams[AudioSystem::ENFORCED_AUDIBLE].mCanBeMuted = false;
- } else {
- LOGV("ENFORCED_AUDIBLE can be muted");
- mStreams[AudioSystem::ENFORCED_AUDIBLE].mCanBeMuted = true;
- }
- }
-}
-
-audio_io_handle_t AudioPolicyManagerBase::getOutput(AudioSystem::stream_type stream,
- uint32_t samplingRate,
- uint32_t format,
- uint32_t channels,
- AudioSystem::output_flags flags)
-{
- audio_io_handle_t output = 0;
- uint32_t latency = 0;
- routing_strategy strategy = getStrategy((AudioSystem::stream_type)stream);
- uint32_t device = getDeviceForStrategy(strategy);
- LOGV("getOutput() stream %d, samplingRate %d, format %d, channels %x, flags %x", stream, samplingRate, format, channels, flags);
-
-#ifdef AUDIO_POLICY_TEST
- if (mCurOutput != 0) {
- LOGV("getOutput() test output mCurOutput %d, samplingRate %d, format %d, channels %x, mDirectOutput %d",
- mCurOutput, mTestSamplingRate, mTestFormat, mTestChannels, mDirectOutput);
-
- if (mTestOutputs[mCurOutput] == 0) {
- LOGV("getOutput() opening test output");
- AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();
- outputDesc->mDevice = mTestDevice;
- outputDesc->mSamplingRate = mTestSamplingRate;
- outputDesc->mFormat = mTestFormat;
- outputDesc->mChannels = mTestChannels;
- outputDesc->mLatency = mTestLatencyMs;
- outputDesc->mFlags = (AudioSystem::output_flags)(mDirectOutput ? AudioSystem::OUTPUT_FLAG_DIRECT : 0);
- outputDesc->mRefCount[stream] = 0;
- mTestOutputs[mCurOutput] = mpClientInterface->openOutput(&outputDesc->mDevice,
- &outputDesc->mSamplingRate,
- &outputDesc->mFormat,
- &outputDesc->mChannels,
- &outputDesc->mLatency,
- outputDesc->mFlags);
- if (mTestOutputs[mCurOutput]) {
- AudioParameter outputCmd = AudioParameter();
- outputCmd.addInt(String8("set_id"),mCurOutput);
- mpClientInterface->setParameters(mTestOutputs[mCurOutput],outputCmd.toString());
- addOutput(mTestOutputs[mCurOutput], outputDesc);
- }
- }
- return mTestOutputs[mCurOutput];
- }
-#endif //AUDIO_POLICY_TEST
-
- // open a direct output if required by specified parameters
- if (needsDirectOuput(stream, samplingRate, format, channels, flags, device)) {
-
- LOGV("getOutput() opening direct output device %x", device);
- AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();
- outputDesc->mDevice = device;
- outputDesc->mSamplingRate = samplingRate;
- outputDesc->mFormat = format;
- outputDesc->mChannels = channels;
- outputDesc->mLatency = 0;
- outputDesc->mFlags = (AudioSystem::output_flags)(flags | AudioSystem::OUTPUT_FLAG_DIRECT);
- outputDesc->mRefCount[stream] = 0;
- outputDesc->mStopTime[stream] = 0;
- output = mpClientInterface->openOutput(&outputDesc->mDevice,
- &outputDesc->mSamplingRate,
- &outputDesc->mFormat,
- &outputDesc->mChannels,
- &outputDesc->mLatency,
- outputDesc->mFlags);
-
- // only accept an output with the requeted parameters
- if (output == 0 ||
- (samplingRate != 0 && samplingRate != outputDesc->mSamplingRate) ||
- (format != 0 && format != outputDesc->mFormat) ||
- (channels != 0 && channels != outputDesc->mChannels)) {
- LOGV("getOutput() failed opening direct output: samplingRate %d, format %d, channels %d",
- samplingRate, format, channels);
- if (output != 0) {
- mpClientInterface->closeOutput(output);
- }
- delete outputDesc;
- return 0;
- }
- addOutput(output, outputDesc);
- return output;
- }
-
- if (channels != 0 && channels != AudioSystem::CHANNEL_OUT_MONO &&
- channels != AudioSystem::CHANNEL_OUT_STEREO) {
- return 0;
- }
- // open a non direct output
-
- // get which output is suitable for the specified stream. The actual routing change will happen
- // when startOutput() will be called
- uint32_t a2dpDevice = device & AudioSystem::DEVICE_OUT_ALL_A2DP;
- if (AudioSystem::popCount((AudioSystem::audio_devices)device) == 2) {
-#ifdef WITH_A2DP
- if (a2dpUsedForSonification() && a2dpDevice != 0) {
- // if playing on 2 devices among which one is A2DP, use duplicated output
- LOGV("getOutput() using duplicated output");
- LOGW_IF((mA2dpOutput == 0), "getOutput() A2DP device in multiple %x selected but A2DP output not opened", device);
- output = mDuplicatedOutput;
- } else
-#endif
- {
- // if playing on 2 devices among which none is A2DP, use hardware output
- output = mHardwareOutput;
- }
- LOGV("getOutput() using output %d for 2 devices %x", output, device);
- } else {
-#ifdef WITH_A2DP
- if (a2dpDevice != 0) {
- // if playing on A2DP device, use a2dp output
- LOGW_IF((mA2dpOutput == 0), "getOutput() A2DP device %x selected but A2DP output not opened", device);
- output = mA2dpOutput;
- } else
-#endif
- {
- // if playing on not A2DP device, use hardware output
- output = mHardwareOutput;
- }
- }
-
-
- LOGW_IF((output == 0), "getOutput() could not find output for stream %d, samplingRate %d, format %d, channels %x, flags %x",
- stream, samplingRate, format, channels, flags);
-
- return output;
-}
-
-status_t AudioPolicyManagerBase::startOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
- int session)
-{
- LOGV("startOutput() output %d, stream %d, session %d", output, stream, session);
- ssize_t index = mOutputs.indexOfKey(output);
- if (index < 0) {
- LOGW("startOutput() unknow output %d", output);
- return BAD_VALUE;
- }
-
- AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
- routing_strategy strategy = getStrategy((AudioSystem::stream_type)stream);
-
-#ifdef WITH_A2DP
- if (mA2dpOutput != 0 && !a2dpUsedForSonification() && strategy == STRATEGY_SONIFICATION) {
- setStrategyMute(STRATEGY_MEDIA, true, mA2dpOutput);
- }
-#endif
-
- // incremenent usage count for this stream on the requested output:
- // NOTE that the usage count is the same for duplicated output and hardware output which is
- // necassary for a correct control of hardware output routing by startOutput() and stopOutput()
- outputDesc->changeRefCount(stream, 1);
-
- setOutputDevice(output, getNewDevice(output));
-
- // handle special case for sonification while in call
- if (isInCall()) {
- handleIncallSonification(stream, true, false);
- }
-
- // apply volume rules for current stream and device if necessary
- checkAndSetVolume(stream, mStreams[stream].mIndexCur, output, outputDesc->device());
-
- return NO_ERROR;
-}
-
-status_t AudioPolicyManagerBase::stopOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
- int session)
-{
- LOGV("stopOutput() output %d, stream %d, session %d", output, stream, session);
- ssize_t index = mOutputs.indexOfKey(output);
- if (index < 0) {
- LOGW("stopOutput() unknow output %d", output);
- return BAD_VALUE;
- }
-
- AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
- routing_strategy strategy = getStrategy((AudioSystem::stream_type)stream);
-
- // handle special case for sonification while in call
- if (isInCall()) {
- handleIncallSonification(stream, false, false);
- }
-
- if (outputDesc->mRefCount[stream] > 0) {
- // decrement usage count of this stream on the output
- outputDesc->changeRefCount(stream, -1);
- // store time at which the stream was stopped - see isStreamActive()
- outputDesc->mStopTime[stream] = systemTime();
-
- setOutputDevice(output, getNewDevice(output), false, outputDesc->mLatency*2);
-
-#ifdef WITH_A2DP
- if (mA2dpOutput != 0 && !a2dpUsedForSonification() &&
- strategy == STRATEGY_SONIFICATION) {
- setStrategyMute(STRATEGY_MEDIA,
- false,
- mA2dpOutput,
- mOutputs.valueFor(mHardwareOutput)->mLatency*2);
- }
-#endif
- if (output != mHardwareOutput) {
- setOutputDevice(mHardwareOutput, getNewDevice(mHardwareOutput), true);
- }
- return NO_ERROR;
- } else {
- LOGW("stopOutput() refcount is already 0 for output %d", output);
- return INVALID_OPERATION;
- }
-}
-
-void AudioPolicyManagerBase::releaseOutput(audio_io_handle_t output)
-{
- LOGV("releaseOutput() %d", output);
- ssize_t index = mOutputs.indexOfKey(output);
- if (index < 0) {
- LOGW("releaseOutput() releasing unknown output %d", output);
- return;
- }
-
-#ifdef AUDIO_POLICY_TEST
- int testIndex = testOutputIndex(output);
- if (testIndex != 0) {
- AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
- if (outputDesc->refCount() == 0) {
- mpClientInterface->closeOutput(output);
- delete mOutputs.valueAt(index);
- mOutputs.removeItem(output);
- mTestOutputs[testIndex] = 0;
- }
- return;
- }
-#endif //AUDIO_POLICY_TEST
-
- if (mOutputs.valueAt(index)->mFlags & AudioSystem::OUTPUT_FLAG_DIRECT) {
- mpClientInterface->closeOutput(output);
- delete mOutputs.valueAt(index);
- mOutputs.removeItem(output);
- }
-}
-
-audio_io_handle_t AudioPolicyManagerBase::getInput(int inputSource,
- uint32_t samplingRate,
- uint32_t format,
- uint32_t channels,
- AudioSystem::audio_in_acoustics acoustics)
-{
- audio_io_handle_t input = 0;
- uint32_t device = getDeviceForInputSource(inputSource);
-
- LOGV("getInput() inputSource %d, samplingRate %d, format %d, channels %x, acoustics %x", inputSource, samplingRate, format, channels, acoustics);
-
- if (device == 0) {
- return 0;
- }
-
- // adapt channel selection to input source
- switch(inputSource) {
- case AUDIO_SOURCE_VOICE_UPLINK:
- channels = AudioSystem::CHANNEL_IN_VOICE_UPLINK;
- break;
- case AUDIO_SOURCE_VOICE_DOWNLINK:
- channels = AudioSystem::CHANNEL_IN_VOICE_DNLINK;
- break;
- case AUDIO_SOURCE_VOICE_CALL:
- channels = (AudioSystem::CHANNEL_IN_VOICE_UPLINK | AudioSystem::CHANNEL_IN_VOICE_DNLINK);
- break;
- default:
- break;
- }
-
- AudioInputDescriptor *inputDesc = new AudioInputDescriptor();
-
- inputDesc->mInputSource = inputSource;
- inputDesc->mDevice = device;
- inputDesc->mSamplingRate = samplingRate;
- inputDesc->mFormat = format;
- inputDesc->mChannels = channels;
- inputDesc->mAcoustics = acoustics;
- inputDesc->mRefCount = 0;
- input = mpClientInterface->openInput(&inputDesc->mDevice,
- &inputDesc->mSamplingRate,
- &inputDesc->mFormat,
- &inputDesc->mChannels,
- inputDesc->mAcoustics);
-
- // only accept input with the exact requested set of parameters
- if (input == 0 ||
- (samplingRate != inputDesc->mSamplingRate) ||
- (format != inputDesc->mFormat) ||
- (channels != inputDesc->mChannels)) {
- LOGV("getInput() failed opening input: samplingRate %d, format %d, channels %d",
- samplingRate, format, channels);
- if (input != 0) {
- mpClientInterface->closeInput(input);
- }
- delete inputDesc;
- return 0;
- }
- mInputs.add(input, inputDesc);
- return input;
-}
-
-status_t AudioPolicyManagerBase::startInput(audio_io_handle_t input)
-{
- LOGV("startInput() input %d", input);
- ssize_t index = mInputs.indexOfKey(input);
- if (index < 0) {
- LOGW("startInput() unknow input %d", input);
- return BAD_VALUE;
- }
- AudioInputDescriptor *inputDesc = mInputs.valueAt(index);
-
-#ifdef AUDIO_POLICY_TEST
- if (mTestInput == 0)
-#endif //AUDIO_POLICY_TEST
- {
- // refuse 2 active AudioRecord clients at the same time
- if (getActiveInput() != 0) {
- LOGW("startInput() input %d failed: other input already started", input);
- return INVALID_OPERATION;
- }
- }
-
- AudioParameter param = AudioParameter();
- param.addInt(String8(AudioParameter::keyRouting), (int)inputDesc->mDevice);
-
- param.addInt(String8(AudioParameter::keyInputSource), (int)inputDesc->mInputSource);
- LOGV("AudioPolicyManager::startInput() input source = %d", inputDesc->mInputSource);
-
- mpClientInterface->setParameters(input, param.toString());
-
- inputDesc->mRefCount = 1;
- return NO_ERROR;
-}
-
-status_t AudioPolicyManagerBase::stopInput(audio_io_handle_t input)
-{
- LOGV("stopInput() input %d", input);
- ssize_t index = mInputs.indexOfKey(input);
- if (index < 0) {
- LOGW("stopInput() unknow input %d", input);
- return BAD_VALUE;
- }
- AudioInputDescriptor *inputDesc = mInputs.valueAt(index);
-
- if (inputDesc->mRefCount == 0) {
- LOGW("stopInput() input %d already stopped", input);
- return INVALID_OPERATION;
- } else {
- AudioParameter param = AudioParameter();
- param.addInt(String8(AudioParameter::keyRouting), 0);
- mpClientInterface->setParameters(input, param.toString());
- inputDesc->mRefCount = 0;
- return NO_ERROR;
- }
-}
-
-void AudioPolicyManagerBase::releaseInput(audio_io_handle_t input)
-{
- LOGV("releaseInput() %d", input);
- ssize_t index = mInputs.indexOfKey(input);
- if (index < 0) {
- LOGW("releaseInput() releasing unknown input %d", input);
- return;
- }
- mpClientInterface->closeInput(input);
- delete mInputs.valueAt(index);
- mInputs.removeItem(input);
- LOGV("releaseInput() exit");
-}
-
-void AudioPolicyManagerBase::initStreamVolume(AudioSystem::stream_type stream,
- int indexMin,
- int indexMax)
-{
- LOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
- if (indexMin < 0 || indexMin >= indexMax) {
- LOGW("initStreamVolume() invalid index limits for stream %d, min %d, max %d", stream , indexMin, indexMax);
- return;
- }
- mStreams[stream].mIndexMin = indexMin;
- mStreams[stream].mIndexMax = indexMax;
-}
-
-status_t AudioPolicyManagerBase::setStreamVolumeIndex(AudioSystem::stream_type stream, int index)
-{
-
- if ((index < mStreams[stream].mIndexMin) || (index > mStreams[stream].mIndexMax)) {
- return BAD_VALUE;
- }
-
- // Force max volume if stream cannot be muted
- if (!mStreams[stream].mCanBeMuted) index = mStreams[stream].mIndexMax;
-
- LOGV("setStreamVolumeIndex() stream %d, index %d", stream, index);
- mStreams[stream].mIndexCur = index;
-
- // compute and apply stream volume on all outputs according to connected device
- status_t status = NO_ERROR;
- for (size_t i = 0; i < mOutputs.size(); i++) {
- status_t volStatus = checkAndSetVolume(stream, index, mOutputs.keyAt(i), mOutputs.valueAt(i)->device());
- if (volStatus != NO_ERROR) {
- status = volStatus;
- }
- }
- return status;
-}
-
-status_t AudioPolicyManagerBase::getStreamVolumeIndex(AudioSystem::stream_type stream, int *index)
-{
- if (index == 0) {
- return BAD_VALUE;
- }
- LOGV("getStreamVolumeIndex() stream %d", stream);
- *index = mStreams[stream].mIndexCur;
- return NO_ERROR;
-}
-
-audio_io_handle_t AudioPolicyManagerBase::getOutputForEffect(effect_descriptor_t *desc)
-{
- LOGV("getOutputForEffect()");
- // apply simple rule where global effects are attached to the same output as MUSIC streams
- return getOutput(AudioSystem::MUSIC);
-}
-
-status_t AudioPolicyManagerBase::registerEffect(effect_descriptor_t *desc,
- audio_io_handle_t output,
- uint32_t strategy,
- int session,
- int id)
-{
- ssize_t index = mOutputs.indexOfKey(output);
- if (index < 0) {
- LOGW("registerEffect() unknown output %d", output);
- return INVALID_OPERATION;
- }
-
- if (mTotalEffectsCpuLoad + desc->cpuLoad > getMaxEffectsCpuLoad()) {
- LOGW("registerEffect() CPU Load limit exceeded for Fx %s, CPU %f MIPS",
- desc->name, (float)desc->cpuLoad/10);
- return INVALID_OPERATION;
- }
- if (mTotalEffectsMemory + desc->memoryUsage > getMaxEffectsMemory()) {
- LOGW("registerEffect() memory limit exceeded for Fx %s, Memory %d KB",
- desc->name, desc->memoryUsage);
- return INVALID_OPERATION;
- }
- mTotalEffectsCpuLoad += desc->cpuLoad;
- mTotalEffectsMemory += desc->memoryUsage;
- LOGV("registerEffect() effect %s, output %d, strategy %d session %d id %d",
- desc->name, output, strategy, session, id);
-
- LOGV("registerEffect() CPU %d, memory %d", desc->cpuLoad, desc->memoryUsage);
- LOGV(" total CPU %d, total memory %d", mTotalEffectsCpuLoad, mTotalEffectsMemory);
-
- EffectDescriptor *pDesc = new EffectDescriptor();
- memcpy (&pDesc->mDesc, desc, sizeof(effect_descriptor_t));
- pDesc->mOutput = output;
- pDesc->mStrategy = (routing_strategy)strategy;
- pDesc->mSession = session;
- mEffects.add(id, pDesc);
-
- return NO_ERROR;
-}
-
-status_t AudioPolicyManagerBase::unregisterEffect(int id)
-{
- ssize_t index = mEffects.indexOfKey(id);
- if (index < 0) {
- LOGW("unregisterEffect() unknown effect ID %d", id);
- return INVALID_OPERATION;
- }
-
- EffectDescriptor *pDesc = mEffects.valueAt(index);
-
- if (mTotalEffectsCpuLoad < pDesc->mDesc.cpuLoad) {
- LOGW("unregisterEffect() CPU load %d too high for total %d",
- pDesc->mDesc.cpuLoad, mTotalEffectsCpuLoad);
- pDesc->mDesc.cpuLoad = mTotalEffectsCpuLoad;
- }
- mTotalEffectsCpuLoad -= pDesc->mDesc.cpuLoad;
- if (mTotalEffectsMemory < pDesc->mDesc.memoryUsage) {
- LOGW("unregisterEffect() memory %d too big for total %d",
- pDesc->mDesc.memoryUsage, mTotalEffectsMemory);
- pDesc->mDesc.memoryUsage = mTotalEffectsMemory;
- }
- mTotalEffectsMemory -= pDesc->mDesc.memoryUsage;
- LOGV("unregisterEffect() effect %s, ID %d, CPU %d, memory %d",
- pDesc->mDesc.name, id, pDesc->mDesc.cpuLoad, pDesc->mDesc.memoryUsage);
- LOGV(" total CPU %d, total memory %d", mTotalEffectsCpuLoad, mTotalEffectsMemory);
-
- mEffects.removeItem(id);
- delete pDesc;
-
- return NO_ERROR;
-}
-
-bool AudioPolicyManagerBase::isStreamActive(int stream, uint32_t inPastMs) const
-{
- nsecs_t sysTime = systemTime();
- for (size_t i = 0; i < mOutputs.size(); i++) {
- if (mOutputs.valueAt(i)->mRefCount[stream] != 0 ||
- ns2ms(sysTime - mOutputs.valueAt(i)->mStopTime[stream]) < inPastMs) {
- return true;
- }
- }
- return false;
-}
-
-
-status_t AudioPolicyManagerBase::dump(int fd)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- snprintf(buffer, SIZE, "\nAudioPolicyManager Dump: %p\n", this);
- result.append(buffer);
- snprintf(buffer, SIZE, " Hardware Output: %d\n", mHardwareOutput);
- result.append(buffer);
-#ifdef WITH_A2DP
- snprintf(buffer, SIZE, " A2DP Output: %d\n", mA2dpOutput);
- result.append(buffer);
- snprintf(buffer, SIZE, " Duplicated Output: %d\n", mDuplicatedOutput);
- result.append(buffer);
- snprintf(buffer, SIZE, " A2DP device address: %s\n", mA2dpDeviceAddress.string());
- result.append(buffer);
-#endif
- snprintf(buffer, SIZE, " SCO device address: %s\n", mScoDeviceAddress.string());
- result.append(buffer);
- snprintf(buffer, SIZE, " Output devices: %08x\n", mAvailableOutputDevices);
- result.append(buffer);
- snprintf(buffer, SIZE, " Input devices: %08x\n", mAvailableInputDevices);
- result.append(buffer);
- snprintf(buffer, SIZE, " Phone state: %d\n", mPhoneState);
- result.append(buffer);
- snprintf(buffer, SIZE, " Ringer mode: %d\n", mRingerMode);
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for communications %d\n", mForceUse[AudioSystem::FOR_COMMUNICATION]);
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for media %d\n", mForceUse[AudioSystem::FOR_MEDIA]);
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for record %d\n", mForceUse[AudioSystem::FOR_RECORD]);
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for dock %d\n", mForceUse[AudioSystem::FOR_DOCK]);
- result.append(buffer);
- write(fd, result.string(), result.size());
-
- snprintf(buffer, SIZE, "\nOutputs dump:\n");
- write(fd, buffer, strlen(buffer));
- for (size_t i = 0; i < mOutputs.size(); i++) {
- snprintf(buffer, SIZE, "- Output %d dump:\n", mOutputs.keyAt(i));
- write(fd, buffer, strlen(buffer));
- mOutputs.valueAt(i)->dump(fd);
- }
-
- snprintf(buffer, SIZE, "\nInputs dump:\n");
- write(fd, buffer, strlen(buffer));
- for (size_t i = 0; i < mInputs.size(); i++) {
- snprintf(buffer, SIZE, "- Input %d dump:\n", mInputs.keyAt(i));
- write(fd, buffer, strlen(buffer));
- mInputs.valueAt(i)->dump(fd);
- }
-
- snprintf(buffer, SIZE, "\nStreams dump:\n");
- write(fd, buffer, strlen(buffer));
- snprintf(buffer, SIZE, " Stream Index Min Index Max Index Cur Can be muted\n");
- write(fd, buffer, strlen(buffer));
- for (size_t i = 0; i < AudioSystem::NUM_STREAM_TYPES; i++) {
- snprintf(buffer, SIZE, " %02d", i);
- mStreams[i].dump(buffer + 3, SIZE);
- write(fd, buffer, strlen(buffer));
- }
-
- snprintf(buffer, SIZE, "\nTotal Effects CPU: %f MIPS, Total Effects memory: %d KB\n",
- (float)mTotalEffectsCpuLoad/10, mTotalEffectsMemory);
- write(fd, buffer, strlen(buffer));
-
- snprintf(buffer, SIZE, "Registered effects:\n");
- write(fd, buffer, strlen(buffer));
- for (size_t i = 0; i < mEffects.size(); i++) {
- snprintf(buffer, SIZE, "- Effect %d dump:\n", mEffects.keyAt(i));
- write(fd, buffer, strlen(buffer));
- mEffects.valueAt(i)->dump(fd);
- }
-
-
- return NO_ERROR;
-}
-
-// ----------------------------------------------------------------------------
-// AudioPolicyManagerBase
-// ----------------------------------------------------------------------------
-
-AudioPolicyManagerBase::AudioPolicyManagerBase(AudioPolicyClientInterface *clientInterface)
- :
-#ifdef AUDIO_POLICY_TEST
- Thread(false),
-#endif //AUDIO_POLICY_TEST
- mPhoneState(AudioSystem::MODE_NORMAL), mRingerMode(0),
- mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
- mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0),
- mA2dpSuspended(false)
-{
- mpClientInterface = clientInterface;
-
- for (int i = 0; i < AudioSystem::NUM_FORCE_USE; i++) {
- mForceUse[i] = AudioSystem::FORCE_NONE;
- }
-
- initializeVolumeCurves();
-
- // devices available by default are speaker, ear piece and microphone
- mAvailableOutputDevices = AudioSystem::DEVICE_OUT_EARPIECE |
- AudioSystem::DEVICE_OUT_SPEAKER;
- mAvailableInputDevices = AudioSystem::DEVICE_IN_BUILTIN_MIC;
-
-#ifdef WITH_A2DP
- mA2dpOutput = 0;
- mDuplicatedOutput = 0;
- mA2dpDeviceAddress = String8("");
-#endif
- mScoDeviceAddress = String8("");
-
- // open hardware output
- AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();
- outputDesc->mDevice = (uint32_t)AudioSystem::DEVICE_OUT_SPEAKER;
- mHardwareOutput = mpClientInterface->openOutput(&outputDesc->mDevice,
- &outputDesc->mSamplingRate,
- &outputDesc->mFormat,
- &outputDesc->mChannels,
- &outputDesc->mLatency,
- outputDesc->mFlags);
-
- if (mHardwareOutput == 0) {
- LOGE("Failed to initialize hardware output stream, samplingRate: %d, format %d, channels %d",
- outputDesc->mSamplingRate, outputDesc->mFormat, outputDesc->mChannels);
- } else {
- addOutput(mHardwareOutput, outputDesc);
- setOutputDevice(mHardwareOutput, (uint32_t)AudioSystem::DEVICE_OUT_SPEAKER, true);
- //TODO: configure audio effect output stage here
- }
-
- updateDeviceForStrategy();
-#ifdef AUDIO_POLICY_TEST
- if (mHardwareOutput != 0) {
- AudioParameter outputCmd = AudioParameter();
- outputCmd.addInt(String8("set_id"), 0);
- mpClientInterface->setParameters(mHardwareOutput, outputCmd.toString());
-
- mTestDevice = AudioSystem::DEVICE_OUT_SPEAKER;
- mTestSamplingRate = 44100;
- mTestFormat = AudioSystem::PCM_16_BIT;
- mTestChannels = AudioSystem::CHANNEL_OUT_STEREO;
- mTestLatencyMs = 0;
- mCurOutput = 0;
- mDirectOutput = false;
- for (int i = 0; i < NUM_TEST_OUTPUTS; i++) {
- mTestOutputs[i] = 0;
- }
-
- const size_t SIZE = 256;
- char buffer[SIZE];
- snprintf(buffer, SIZE, "AudioPolicyManagerTest");
- run(buffer, ANDROID_PRIORITY_AUDIO);
- }
-#endif //AUDIO_POLICY_TEST
-}
-
-AudioPolicyManagerBase::~AudioPolicyManagerBase()
-{
-#ifdef AUDIO_POLICY_TEST
- exit();
-#endif //AUDIO_POLICY_TEST
- for (size_t i = 0; i < mOutputs.size(); i++) {
- mpClientInterface->closeOutput(mOutputs.keyAt(i));
- delete mOutputs.valueAt(i);
- }
- mOutputs.clear();
- for (size_t i = 0; i < mInputs.size(); i++) {
- mpClientInterface->closeInput(mInputs.keyAt(i));
- delete mInputs.valueAt(i);
- }
- mInputs.clear();
-}
-
-status_t AudioPolicyManagerBase::initCheck()
-{
- return (mHardwareOutput == 0) ? NO_INIT : NO_ERROR;
-}
-
-#ifdef AUDIO_POLICY_TEST
-bool AudioPolicyManagerBase::threadLoop()
-{
- LOGV("entering threadLoop()");
- while (!exitPending())
- {
- String8 command;
- int valueInt;
- String8 value;
-
- Mutex::Autolock _l(mLock);
- mWaitWorkCV.waitRelative(mLock, milliseconds(50));
-
- command = mpClientInterface->getParameters(0, String8("test_cmd_policy"));
- AudioParameter param = AudioParameter(command);
-
- if (param.getInt(String8("test_cmd_policy"), valueInt) == NO_ERROR &&
- valueInt != 0) {
- LOGV("Test command %s received", command.string());
- String8 target;
- if (param.get(String8("target"), target) != NO_ERROR) {
- target = "Manager";
- }
- if (param.getInt(String8("test_cmd_policy_output"), valueInt) == NO_ERROR) {
- param.remove(String8("test_cmd_policy_output"));
- mCurOutput = valueInt;
- }
- if (param.get(String8("test_cmd_policy_direct"), value) == NO_ERROR) {
- param.remove(String8("test_cmd_policy_direct"));
- if (value == "false") {
- mDirectOutput = false;
- } else if (value == "true") {
- mDirectOutput = true;
- }
- }
- if (param.getInt(String8("test_cmd_policy_input"), valueInt) == NO_ERROR) {
- param.remove(String8("test_cmd_policy_input"));
- mTestInput = valueInt;
- }
-
- if (param.get(String8("test_cmd_policy_format"), value) == NO_ERROR) {
- param.remove(String8("test_cmd_policy_format"));
- int format = AudioSystem::INVALID_FORMAT;
- if (value == "PCM 16 bits") {
- format = AudioSystem::PCM_16_BIT;
- } else if (value == "PCM 8 bits") {
- format = AudioSystem::PCM_8_BIT;
- } else if (value == "Compressed MP3") {
- format = AudioSystem::MP3;
- }
- if (format != AudioSystem::INVALID_FORMAT) {
- if (target == "Manager") {
- mTestFormat = format;
- } else if (mTestOutputs[mCurOutput] != 0) {
- AudioParameter outputParam = AudioParameter();
- outputParam.addInt(String8("format"), format);
- mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
- }
- }
- }
- if (param.get(String8("test_cmd_policy_channels"), value) == NO_ERROR) {
- param.remove(String8("test_cmd_policy_channels"));
- int channels = 0;
-
- if (value == "Channels Stereo") {
- channels = AudioSystem::CHANNEL_OUT_STEREO;
- } else if (value == "Channels Mono") {
- channels = AudioSystem::CHANNEL_OUT_MONO;
- }
- if (channels != 0) {
- if (target == "Manager") {
- mTestChannels = channels;
- } else if (mTestOutputs[mCurOutput] != 0) {
- AudioParameter outputParam = AudioParameter();
- outputParam.addInt(String8("channels"), channels);
- mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
- }
- }
- }
- if (param.getInt(String8("test_cmd_policy_sampleRate"), valueInt) == NO_ERROR) {
- param.remove(String8("test_cmd_policy_sampleRate"));
- if (valueInt >= 0 && valueInt <= 96000) {
- int samplingRate = valueInt;
- if (target == "Manager") {
- mTestSamplingRate = samplingRate;
- } else if (mTestOutputs[mCurOutput] != 0) {
- AudioParameter outputParam = AudioParameter();
- outputParam.addInt(String8("sampling_rate"), samplingRate);
- mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
- }
- }
- }
-
- if (param.get(String8("test_cmd_policy_reopen"), value) == NO_ERROR) {
- param.remove(String8("test_cmd_policy_reopen"));
-
- mpClientInterface->closeOutput(mHardwareOutput);
- delete mOutputs.valueFor(mHardwareOutput);
- mOutputs.removeItem(mHardwareOutput);
-
- AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();
- outputDesc->mDevice = (uint32_t)AudioSystem::DEVICE_OUT_SPEAKER;
- mHardwareOutput = mpClientInterface->openOutput(&outputDesc->mDevice,
- &outputDesc->mSamplingRate,
- &outputDesc->mFormat,
- &outputDesc->mChannels,
- &outputDesc->mLatency,
- outputDesc->mFlags);
- if (mHardwareOutput == 0) {
- LOGE("Failed to reopen hardware output stream, samplingRate: %d, format %d, channels %d",
- outputDesc->mSamplingRate, outputDesc->mFormat, outputDesc->mChannels);
- } else {
- AudioParameter outputCmd = AudioParameter();
- outputCmd.addInt(String8("set_id"), 0);
- mpClientInterface->setParameters(mHardwareOutput, outputCmd.toString());
- addOutput(mHardwareOutput, outputDesc);
- }
- }
-
-
- mpClientInterface->setParameters(0, String8("test_cmd_policy="));
- }
- }
- return false;
-}
-
-void AudioPolicyManagerBase::exit()
-{
- {
- AutoMutex _l(mLock);
- requestExit();
- mWaitWorkCV.signal();
- }
- requestExitAndWait();
-}
-
-int AudioPolicyManagerBase::testOutputIndex(audio_io_handle_t output)
-{
- for (int i = 0; i < NUM_TEST_OUTPUTS; i++) {
- if (output == mTestOutputs[i]) return i;
- }
- return 0;
-}
-#endif //AUDIO_POLICY_TEST
-
-// ---
-
-void AudioPolicyManagerBase::addOutput(audio_io_handle_t id, AudioOutputDescriptor *outputDesc)
-{
- outputDesc->mId = id;
- mOutputs.add(id, outputDesc);
-}
-
-
-#ifdef WITH_A2DP
-status_t AudioPolicyManagerBase::handleA2dpConnection(AudioSystem::audio_devices device,
- const char *device_address)
-{
- // when an A2DP device is connected, open an A2DP and a duplicated output
- LOGV("opening A2DP output for device %s", device_address);
- AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();
- outputDesc->mDevice = device;
- mA2dpOutput = mpClientInterface->openOutput(&outputDesc->mDevice,
- &outputDesc->mSamplingRate,
- &outputDesc->mFormat,
- &outputDesc->mChannels,
- &outputDesc->mLatency,
- outputDesc->mFlags);
- if (mA2dpOutput) {
- // add A2DP output descriptor
- addOutput(mA2dpOutput, outputDesc);
-
- //TODO: configure audio effect output stage here
-
- // set initial stream volume for A2DP device
- applyStreamVolumes(mA2dpOutput, device);
- if (a2dpUsedForSonification()) {
- mDuplicatedOutput = mpClientInterface->openDuplicateOutput(mA2dpOutput, mHardwareOutput);
- }
- if (mDuplicatedOutput != 0 ||
- !a2dpUsedForSonification()) {
- // If both A2DP and duplicated outputs are open, send device address to A2DP hardware
- // interface
- AudioParameter param;
- param.add(String8("a2dp_sink_address"), String8(device_address));
- mpClientInterface->setParameters(mA2dpOutput, param.toString());
- mA2dpDeviceAddress = String8(device_address, MAX_DEVICE_ADDRESS_LEN);
-
- if (a2dpUsedForSonification()) {
- // add duplicated output descriptor
- AudioOutputDescriptor *dupOutputDesc = new AudioOutputDescriptor();
- dupOutputDesc->mOutput1 = mOutputs.valueFor(mHardwareOutput);
- dupOutputDesc->mOutput2 = mOutputs.valueFor(mA2dpOutput);
- dupOutputDesc->mSamplingRate = outputDesc->mSamplingRate;
- dupOutputDesc->mFormat = outputDesc->mFormat;
- dupOutputDesc->mChannels = outputDesc->mChannels;
- dupOutputDesc->mLatency = outputDesc->mLatency;
- addOutput(mDuplicatedOutput, dupOutputDesc);
- applyStreamVolumes(mDuplicatedOutput, device);
- }
- } else {
- LOGW("getOutput() could not open duplicated output for %d and %d",
- mHardwareOutput, mA2dpOutput);
- mpClientInterface->closeOutput(mA2dpOutput);
- mOutputs.removeItem(mA2dpOutput);
- mA2dpOutput = 0;
- delete outputDesc;
- return NO_INIT;
- }
- } else {
- LOGW("setDeviceConnectionState() could not open A2DP output for device %x", device);
- delete outputDesc;
- return NO_INIT;
- }
- AudioOutputDescriptor *hwOutputDesc = mOutputs.valueFor(mHardwareOutput);
-
- if (!a2dpUsedForSonification()) {
- // mute music on A2DP output if a notification or ringtone is playing
- uint32_t refCount = hwOutputDesc->strategyRefCount(STRATEGY_SONIFICATION);
- for (uint32_t i = 0; i < refCount; i++) {
- setStrategyMute(STRATEGY_MEDIA, true, mA2dpOutput);
- }
- }
-
- mA2dpSuspended = false;
-
- return NO_ERROR;
-}
-
-status_t AudioPolicyManagerBase::handleA2dpDisconnection(AudioSystem::audio_devices device,
- const char *device_address)
-{
- if (mA2dpOutput == 0) {
- LOGW("setDeviceConnectionState() disconnecting A2DP and no A2DP output!");
- return INVALID_OPERATION;
- }
-
- if (mA2dpDeviceAddress != device_address) {
- LOGW("setDeviceConnectionState() disconnecting unknow A2DP sink address %s", device_address);
- return INVALID_OPERATION;
- }
-
- // mute media strategy to avoid outputting sound on hardware output while music stream
- // is switched from A2DP output and before music is paused by music application
- setStrategyMute(STRATEGY_MEDIA, true, mHardwareOutput);
- setStrategyMute(STRATEGY_MEDIA, false, mHardwareOutput, MUTE_TIME_MS);
-
- if (!a2dpUsedForSonification()) {
- // unmute music on A2DP output if a notification or ringtone is playing
- uint32_t refCount = mOutputs.valueFor(mHardwareOutput)->strategyRefCount(STRATEGY_SONIFICATION);
- for (uint32_t i = 0; i < refCount; i++) {
- setStrategyMute(STRATEGY_MEDIA, false, mA2dpOutput);
- }
- }
- mA2dpDeviceAddress = "";
- mA2dpSuspended = false;
- return NO_ERROR;
-}
-
-void AudioPolicyManagerBase::closeA2dpOutputs()
-{
-
- LOGV("setDeviceConnectionState() closing A2DP and duplicated output!");
-
- if (mDuplicatedOutput != 0) {
- AudioOutputDescriptor *dupOutputDesc = mOutputs.valueFor(mDuplicatedOutput);
- AudioOutputDescriptor *hwOutputDesc = mOutputs.valueFor(mHardwareOutput);
- // As all active tracks on duplicated output will be deleted,
- // and as they were also referenced on hardware output, the reference
- // count for their stream type must be adjusted accordingly on
- // hardware output.
- for (int i = 0; i < (int)AudioSystem::NUM_STREAM_TYPES; i++) {
- int refCount = dupOutputDesc->mRefCount[i];
- hwOutputDesc->changeRefCount((AudioSystem::stream_type)i,-refCount);
- }
-
- mpClientInterface->closeOutput(mDuplicatedOutput);
- delete mOutputs.valueFor(mDuplicatedOutput);
- mOutputs.removeItem(mDuplicatedOutput);
- mDuplicatedOutput = 0;
- }
- if (mA2dpOutput != 0) {
- AudioParameter param;
- param.add(String8("closing"), String8("true"));
- mpClientInterface->setParameters(mA2dpOutput, param.toString());
-
- mpClientInterface->closeOutput(mA2dpOutput);
- delete mOutputs.valueFor(mA2dpOutput);
- mOutputs.removeItem(mA2dpOutput);
- mA2dpOutput = 0;
- }
-}
-
-void AudioPolicyManagerBase::checkOutputForStrategy(routing_strategy strategy)
-{
- uint32_t prevDevice = getDeviceForStrategy(strategy);
- uint32_t curDevice = getDeviceForStrategy(strategy, false);
- bool a2dpWasUsed = AudioSystem::isA2dpDevice((AudioSystem::audio_devices)(prevDevice & ~AudioSystem::DEVICE_OUT_SPEAKER));
- bool a2dpIsUsed = AudioSystem::isA2dpDevice((AudioSystem::audio_devices)(curDevice & ~AudioSystem::DEVICE_OUT_SPEAKER));
- audio_io_handle_t srcOutput = 0;
- audio_io_handle_t dstOutput = 0;
-
- if (a2dpWasUsed && !a2dpIsUsed) {
- bool dupUsed = a2dpUsedForSonification() && a2dpWasUsed && (AudioSystem::popCount(prevDevice) == 2);
- dstOutput = mHardwareOutput;
- if (dupUsed) {
- LOGV("checkOutputForStrategy() moving strategy %d from duplicated", strategy);
- srcOutput = mDuplicatedOutput;
- } else {
- LOGV("checkOutputForStrategy() moving strategy %d from a2dp", strategy);
- srcOutput = mA2dpOutput;
- }
- }
- if (a2dpIsUsed && !a2dpWasUsed) {
- bool dupUsed = a2dpUsedForSonification() && a2dpIsUsed && (AudioSystem::popCount(curDevice) == 2);
- srcOutput = mHardwareOutput;
- if (dupUsed) {
- LOGV("checkOutputForStrategy() moving strategy %d to duplicated", strategy);
- dstOutput = mDuplicatedOutput;
- } else {
- LOGV("checkOutputForStrategy() moving strategy %d to a2dp", strategy);
- dstOutput = mA2dpOutput;
- }
- }
-
- if (srcOutput != 0 && dstOutput != 0) {
- // Move effects associated to this strategy from previous output to new output
- for (size_t i = 0; i < mEffects.size(); i++) {
- EffectDescriptor *desc = mEffects.valueAt(i);
- if (desc->mSession != AudioSystem::SESSION_OUTPUT_STAGE &&
- desc->mStrategy == strategy &&
- desc->mOutput == srcOutput) {
- LOGV("checkOutputForStrategy() moving effect %d to output %d", mEffects.keyAt(i), dstOutput);
- mpClientInterface->moveEffects(desc->mSession, srcOutput, dstOutput);
- desc->mOutput = dstOutput;
- }
- }
- // Move tracks associated to this strategy from previous output to new output
- for (int i = 0; i < (int)AudioSystem::NUM_STREAM_TYPES; i++) {
- if (getStrategy((AudioSystem::stream_type)i) == strategy) {
- mpClientInterface->setStreamOutput((AudioSystem::stream_type)i, dstOutput);
- }
- }
- }
-}
-
-void AudioPolicyManagerBase::checkOutputForAllStrategies()
-{
- checkOutputForStrategy(STRATEGY_PHONE);
- checkOutputForStrategy(STRATEGY_SONIFICATION);
- checkOutputForStrategy(STRATEGY_MEDIA);
- checkOutputForStrategy(STRATEGY_DTMF);
-}
-
-void AudioPolicyManagerBase::checkA2dpSuspend()
-{
- // suspend A2DP output if:
- // (NOT already suspended) &&
- // ((SCO device is connected &&
- // (forced usage for communication || for record is SCO))) ||
- // (phone state is ringing || in call)
- //
- // restore A2DP output if:
- // (Already suspended) &&
- // ((SCO device is NOT connected ||
- // (forced usage NOT for communication && NOT for record is SCO))) &&
- // (phone state is NOT ringing && NOT in call)
- //
- if (mA2dpOutput == 0) {
- return;
- }
-
- if (mA2dpSuspended) {
- if (((mScoDeviceAddress == "") ||
- ((mForceUse[AudioSystem::FOR_COMMUNICATION] != AudioSystem::FORCE_BT_SCO) &&
- (mForceUse[AudioSystem::FOR_RECORD] != AudioSystem::FORCE_BT_SCO))) &&
- ((mPhoneState != AudioSystem::MODE_IN_CALL) &&
- (mPhoneState != AudioSystem::MODE_RINGTONE))) {
-
- mpClientInterface->restoreOutput(mA2dpOutput);
- mA2dpSuspended = false;
- }
- } else {
- if (((mScoDeviceAddress != "") &&
- ((mForceUse[AudioSystem::FOR_COMMUNICATION] == AudioSystem::FORCE_BT_SCO) ||
- (mForceUse[AudioSystem::FOR_RECORD] == AudioSystem::FORCE_BT_SCO))) ||
- ((mPhoneState == AudioSystem::MODE_IN_CALL) ||
- (mPhoneState == AudioSystem::MODE_RINGTONE))) {
-
- mpClientInterface->suspendOutput(mA2dpOutput);
- mA2dpSuspended = true;
- }
- }
-}
-
-
-#endif
-
-uint32_t AudioPolicyManagerBase::getNewDevice(audio_io_handle_t output, bool fromCache)
-{
- uint32_t device = 0;
-
- AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
- // check the following by order of priority to request a routing change if necessary:
- // 1: we are in call or the strategy phone is active on the hardware output:
- // use device for strategy phone
- // 2: the strategy sonification is active on the hardware output:
- // use device for strategy sonification
- // 3: the strategy media is active on the hardware output:
- // use device for strategy media
- // 4: the strategy DTMF is active on the hardware output:
- // use device for strategy DTMF
- if (isInCall() ||
- outputDesc->isUsedByStrategy(STRATEGY_PHONE)) {
- device = getDeviceForStrategy(STRATEGY_PHONE, fromCache);
- } else if (outputDesc->isUsedByStrategy(STRATEGY_SONIFICATION)) {
- device = getDeviceForStrategy(STRATEGY_SONIFICATION, fromCache);
- } else if (outputDesc->isUsedByStrategy(STRATEGY_MEDIA)) {
- device = getDeviceForStrategy(STRATEGY_MEDIA, fromCache);
- } else if (outputDesc->isUsedByStrategy(STRATEGY_DTMF)) {
- device = getDeviceForStrategy(STRATEGY_DTMF, fromCache);
- }
-
- LOGV("getNewDevice() selected device %x", device);
- return device;
-}
-
-uint32_t AudioPolicyManagerBase::getStrategyForStream(AudioSystem::stream_type stream) {
- return (uint32_t)getStrategy(stream);
-}
-
-uint32_t AudioPolicyManagerBase::getDevicesForStream(AudioSystem::stream_type stream) {
- uint32_t devices;
- // By checking the range of stream before calling getStrategy, we avoid
- // getStrategy's behavior for invalid streams. getStrategy would do a LOGE
- // and then return STRATEGY_MEDIA, but we want to return the empty set.
- if (stream < (AudioSystem::stream_type) 0 || stream >= AudioSystem::NUM_STREAM_TYPES) {
- devices = 0;
- } else {
- AudioPolicyManagerBase::routing_strategy strategy = getStrategy(stream);
- devices = getDeviceForStrategy(strategy, true);
- }
- return devices;
-}
-
-AudioPolicyManagerBase::routing_strategy AudioPolicyManagerBase::getStrategy(
- AudioSystem::stream_type stream) {
- // stream to strategy mapping
- switch (stream) {
- case AudioSystem::VOICE_CALL:
- case AudioSystem::BLUETOOTH_SCO:
- return STRATEGY_PHONE;
- case AudioSystem::RING:
- case AudioSystem::NOTIFICATION:
- case AudioSystem::ALARM:
- case AudioSystem::ENFORCED_AUDIBLE:
- return STRATEGY_SONIFICATION;
- case AudioSystem::DTMF:
- return STRATEGY_DTMF;
- default:
- LOGE("unknown stream type");
- case AudioSystem::SYSTEM:
- // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
- // while key clicks are played produces a poor result
- case AudioSystem::TTS:
- case AudioSystem::MUSIC:
- return STRATEGY_MEDIA;
- }
-}
-
-uint32_t AudioPolicyManagerBase::getDeviceForStrategy(routing_strategy strategy, bool fromCache)
-{
- uint32_t device = 0;
-
- if (fromCache) {
- LOGV("getDeviceForStrategy() from cache strategy %d, device %x", strategy, mDeviceForStrategy[strategy]);
- return mDeviceForStrategy[strategy];
- }
-
- switch (strategy) {
- case STRATEGY_DTMF:
- if (!isInCall()) {
- // when off call, DTMF strategy follows the same rules as MEDIA strategy
- device = getDeviceForStrategy(STRATEGY_MEDIA, false);
- break;
- }
- // when in call, DTMF and PHONE strategies follow the same rules
- // FALL THROUGH
-
- case STRATEGY_PHONE:
- // for phone strategy, we first consider the forced use and then the available devices by order
- // of priority
- switch (mForceUse[AudioSystem::FOR_COMMUNICATION]) {
- case AudioSystem::FORCE_BT_SCO:
- if (!isInCall() || strategy != STRATEGY_DTMF) {
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
- if (device) break;
- }
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
- if (device) break;
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_SCO;
- if (device) break;
- // if SCO device is requested but no SCO device is available, fall back to default case
- // FALL THROUGH
-
- default: // FORCE_NONE
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_WIRED_HEADPHONE;
- if (device) break;
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_WIRED_HEADSET;
- if (device) break;
-#ifdef WITH_A2DP
- // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
- if (!isInCall() && !mA2dpSuspended) {
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP;
- if (device) break;
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
- if (device) break;
- }
-#endif
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_AUX_DIGITAL;
- if (device) break;
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_DGTL_DOCK_HEADSET;
- if (device) break;
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_ANLG_DOCK_HEADSET;
- if (device) break;
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_EARPIECE;
- if (device == 0) {
- LOGE("getDeviceForStrategy() earpiece device not found");
- }
- break;
-
- case AudioSystem::FORCE_SPEAKER:
-#ifdef WITH_A2DP
- // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to
- // A2DP speaker when forcing to speaker output
- if (!isInCall() && !mA2dpSuspended) {
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
- if (device) break;
- }
-#endif
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_AUX_DIGITAL;
- if (device) break;
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_DGTL_DOCK_HEADSET;
- if (device) break;
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_ANLG_DOCK_HEADSET;
- if (device) break;
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_SPEAKER;
- if (device == 0) {
- LOGE("getDeviceForStrategy() speaker device not found");
- }
- break;
- }
- break;
-
- case STRATEGY_SONIFICATION:
-
- // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
- // handleIncallSonification().
- if (isInCall()) {
- device = getDeviceForStrategy(STRATEGY_PHONE, false);
- break;
- }
- device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_SPEAKER;
- if (device == 0) {
- LOGE("getDeviceForStrategy() speaker device not found");
- }
- // The second device used for sonification is the same as the device used by media strategy
- // FALL THROUGH
-
- case STRATEGY_MEDIA: {
- uint32_t device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_WIRED_HEADPHONE;
- if (device2 == 0) {
- device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_WIRED_HEADSET;
- }
-#ifdef WITH_A2DP
- if ((mA2dpOutput != 0) && !mA2dpSuspended &&
- (strategy != STRATEGY_SONIFICATION || a2dpUsedForSonification())) {
- if (device2 == 0) {
- device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP;
- }
- if (device2 == 0) {
- device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
- }
- if (device2 == 0) {
- device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
- }
- }
-#endif
- if (device2 == 0) {
- device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_AUX_DIGITAL;
- }
- if (device2 == 0) {
- device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_DGTL_DOCK_HEADSET;
- }
- if (device2 == 0) {
- device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_ANLG_DOCK_HEADSET;
- }
- if (device2 == 0) {
- device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_SPEAKER;
- }
-
- // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION, 0 otherwise
- device |= device2;
- if (device == 0) {
- LOGE("getDeviceForStrategy() speaker device not found");
- }
- } break;
-
- default:
- LOGW("getDeviceForStrategy() unknown strategy: %d", strategy);
- break;
- }
-
- LOGV("getDeviceForStrategy() strategy %d, device %x", strategy, device);
- return device;
-}
-
-void AudioPolicyManagerBase::updateDeviceForStrategy()
-{
- for (int i = 0; i < NUM_STRATEGIES; i++) {
- mDeviceForStrategy[i] = getDeviceForStrategy((routing_strategy)i, false);
- }
-}
-
-void AudioPolicyManagerBase::setOutputDevice(audio_io_handle_t output, uint32_t device, bool force, int delayMs)
-{
- LOGV("setOutputDevice() output %d device %x delayMs %d", output, device, delayMs);
- AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
-
-
- if (outputDesc->isDuplicated()) {
- setOutputDevice(outputDesc->mOutput1->mId, device, force, delayMs);
- setOutputDevice(outputDesc->mOutput2->mId, device, force, delayMs);
- return;
- }
-#ifdef WITH_A2DP
- // filter devices according to output selected
- if (output == mA2dpOutput) {
- device &= AudioSystem::DEVICE_OUT_ALL_A2DP;
- } else {
- device &= ~AudioSystem::DEVICE_OUT_ALL_A2DP;
- }
-#endif
-
- uint32_t prevDevice = (uint32_t)outputDesc->device();
- // Do not change the routing if:
- // - the requestede device is 0
- // - the requested device is the same as current device and force is not specified.
- // Doing this check here allows the caller to call setOutputDevice() without conditions
- if ((device == 0 || device == prevDevice) && !force) {
- LOGV("setOutputDevice() setting same device %x or null device for output %d", device, output);
- return;
- }
-
- outputDesc->mDevice = device;
- // mute media streams if both speaker and headset are selected
- if (output == mHardwareOutput && AudioSystem::popCount(device) == 2) {
- setStrategyMute(STRATEGY_MEDIA, true, output);
- // wait for the PCM output buffers to empty before proceeding with the rest of the command
- usleep(outputDesc->mLatency*2*1000);
- }
-
- // do the routing
- AudioParameter param = AudioParameter();
- param.addInt(String8(AudioParameter::keyRouting), (int)device);
- mpClientInterface->setParameters(mHardwareOutput, param.toString(), delayMs);
- // update stream volumes according to new device
- applyStreamVolumes(output, device, delayMs);
-
- // if changing from a combined headset + speaker route, unmute media streams
- if (output == mHardwareOutput && AudioSystem::popCount(prevDevice) == 2) {
- setStrategyMute(STRATEGY_MEDIA, false, output, delayMs);
- }
-}
-
-uint32_t AudioPolicyManagerBase::getDeviceForInputSource(int inputSource)
-{
- uint32_t device;
-
- switch(inputSource) {
- case AUDIO_SOURCE_DEFAULT:
- case AUDIO_SOURCE_MIC:
- case AUDIO_SOURCE_VOICE_RECOGNITION:
- case AUDIO_SOURCE_VOICE_COMMUNICATION:
- if (mForceUse[AudioSystem::FOR_RECORD] == AudioSystem::FORCE_BT_SCO &&
- mAvailableInputDevices & AudioSystem::DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
- device = AudioSystem::DEVICE_IN_BLUETOOTH_SCO_HEADSET;
- } else if (mAvailableInputDevices & AudioSystem::DEVICE_IN_WIRED_HEADSET) {
- device = AudioSystem::DEVICE_IN_WIRED_HEADSET;
- } else {
- device = AudioSystem::DEVICE_IN_BUILTIN_MIC;
- }
- break;
- case AUDIO_SOURCE_CAMCORDER:
- if (hasBackMicrophone()) {
- device = AudioSystem::DEVICE_IN_BACK_MIC;
- } else {
- device = AudioSystem::DEVICE_IN_BUILTIN_MIC;
- }
- break;
- case AUDIO_SOURCE_VOICE_UPLINK:
- case AUDIO_SOURCE_VOICE_DOWNLINK:
- case AUDIO_SOURCE_VOICE_CALL:
- device = AudioSystem::DEVICE_IN_VOICE_CALL;
- break;
- default:
- LOGW("getInput() invalid input source %d", inputSource);
- device = 0;
- break;
- }
- LOGV("getDeviceForInputSource()input source %d, device %08x", inputSource, device);
- return device;
-}
-
-audio_io_handle_t AudioPolicyManagerBase::getActiveInput()
-{
- for (size_t i = 0; i < mInputs.size(); i++) {
- if (mInputs.valueAt(i)->mRefCount > 0) {
- return mInputs.keyAt(i);
- }
- }
- return 0;
-}
-
-float AudioPolicyManagerBase::volIndexToAmpl(uint32_t device, const StreamDescriptor& streamDesc,
- int indexInUi) {
- // the volume index in the UI is relative to the min and max volume indices for this stream type
- int nbSteps = 1 + streamDesc.mVolIndex[StreamDescriptor::VOLMAX] -
- streamDesc.mVolIndex[StreamDescriptor::VOLMIN];
- int volIdx = (nbSteps * (indexInUi - streamDesc.mIndexMin)) /
- (streamDesc.mIndexMax - streamDesc.mIndexMin);
-
- // find what part of the curve this index volume belongs to, or if it's out of bounds
- int segment = 0;
- if (volIdx < streamDesc.mVolIndex[StreamDescriptor::VOLMIN]) { // out of bounds
- return 0.0f;
- } else if (volIdx < streamDesc.mVolIndex[StreamDescriptor::VOLKNEE1]) {
- segment = 0;
- } else if (volIdx < streamDesc.mVolIndex[StreamDescriptor::VOLKNEE2]) {
- segment = 1;
- } else if (volIdx <= streamDesc.mVolIndex[StreamDescriptor::VOLMAX]) {
- segment = 2;
- } else { // out of bounds
- return 1.0f;
- }
-
- // linear interpolation in the attenuation table in dB
- float decibels = streamDesc.mVolDbAtt[segment] +
- ((float)(volIdx - streamDesc.mVolIndex[segment])) *
- ( (streamDesc.mVolDbAtt[segment+1] - streamDesc.mVolDbAtt[segment]) /
- ((float)(streamDesc.mVolIndex[segment+1] - streamDesc.mVolIndex[segment])) );
-
- float amplification = exp( decibels * 0.115129f); // exp( dB * ln(10) / 20 )
-
- LOGV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f] ampl=%.5f",
- streamDesc.mVolIndex[segment], volIdx, streamDesc.mVolIndex[segment+1],
- streamDesc.mVolDbAtt[segment], decibels, streamDesc.mVolDbAtt[segment+1],
- amplification);
-
- return amplification;
-}
-
-void AudioPolicyManagerBase::initializeVolumeCurves() {
- // initialize the volume curves to a (-49.5 - 0 dB) attenuation in 0.5dB steps
- for (int i=0 ; i< AudioSystem::NUM_STREAM_TYPES ; i++) {
- mStreams[i].mVolIndex[StreamDescriptor::VOLMIN] = 1;
- mStreams[i].mVolDbAtt[StreamDescriptor::VOLMIN] = -49.5f;
- mStreams[i].mVolIndex[StreamDescriptor::VOLKNEE1] = 33;
- mStreams[i].mVolDbAtt[StreamDescriptor::VOLKNEE1] = -33.5f;
- mStreams[i].mVolIndex[StreamDescriptor::VOLKNEE2] = 66;
- mStreams[i].mVolDbAtt[StreamDescriptor::VOLKNEE2] = -17.0f;
- // here we use 100 steps to avoid rounding errors
- // when computing the volume in volIndexToAmpl()
- mStreams[i].mVolIndex[StreamDescriptor::VOLMAX] = 100;
- mStreams[i].mVolDbAtt[StreamDescriptor::VOLMAX] = 0.0f;
- }
-
- // Modification for music: more attenuation for lower volumes, finer steps at high volumes
- mStreams[AudioSystem::MUSIC].mVolIndex[StreamDescriptor::VOLMIN] = 1;
- mStreams[AudioSystem::MUSIC].mVolDbAtt[StreamDescriptor::VOLMIN] = -58.0f;
- mStreams[AudioSystem::MUSIC].mVolIndex[StreamDescriptor::VOLKNEE1] = 20;
- mStreams[AudioSystem::MUSIC].mVolDbAtt[StreamDescriptor::VOLKNEE1] = -40.0f;
- mStreams[AudioSystem::MUSIC].mVolIndex[StreamDescriptor::VOLKNEE2] = 60;
- mStreams[AudioSystem::MUSIC].mVolDbAtt[StreamDescriptor::VOLKNEE2] = -17.0f;
- mStreams[AudioSystem::MUSIC].mVolIndex[StreamDescriptor::VOLMAX] = 100;
- mStreams[AudioSystem::MUSIC].mVolDbAtt[StreamDescriptor::VOLMAX] = 0.0f;
-}
-
-float AudioPolicyManagerBase::computeVolume(int stream, int index, audio_io_handle_t output, uint32_t device)
-{
- float volume = 1.0;
- AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
- StreamDescriptor &streamDesc = mStreams[stream];
-
- if (device == 0) {
- device = outputDesc->device();
- }
-
- // if volume is not 0 (not muted), force media volume to max on digital output
- if (stream == AudioSystem::MUSIC &&
- index != mStreams[stream].mIndexMin &&
- device == AudioSystem::DEVICE_OUT_AUX_DIGITAL) {
- return 1.0;
- }
-
- volume = volIndexToAmpl(device, streamDesc, index);
-
- // if a headset is connected, apply the following rules to ring tones and notifications
- // to avoid sound level bursts in user's ears:
- // - always attenuate ring tones and notifications volume by 6dB
- // - if music is playing, always limit the volume to current music volume,
- // with a minimum threshold at -36dB so that notification is always perceived.
- if ((device &
- (AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP |
- AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
- AudioSystem::DEVICE_OUT_WIRED_HEADSET |
- AudioSystem::DEVICE_OUT_WIRED_HEADPHONE)) &&
- ((getStrategy((AudioSystem::stream_type)stream) == STRATEGY_SONIFICATION) ||
- (stream == AudioSystem::SYSTEM)) &&
- streamDesc.mCanBeMuted) {
- volume *= SONIFICATION_HEADSET_VOLUME_FACTOR;
- // when the phone is ringing we must consider that music could have been paused just before
- // by the music application and behave as if music was active if the last music track was
- // just stopped
- if (outputDesc->mRefCount[AudioSystem::MUSIC] || mLimitRingtoneVolume) {
- float musicVol = computeVolume(AudioSystem::MUSIC, mStreams[AudioSystem::MUSIC].mIndexCur, output, device);
- float minVol = (musicVol > SONIFICATION_HEADSET_VOLUME_MIN) ? musicVol : SONIFICATION_HEADSET_VOLUME_MIN;
- if (volume > minVol) {
- volume = minVol;
- LOGV("computeVolume limiting volume to %f musicVol %f", minVol, musicVol);
- }
- }
- }
-
- return volume;
-}
-
-status_t AudioPolicyManagerBase::checkAndSetVolume(int stream, int index, audio_io_handle_t output, uint32_t device, int delayMs, bool force)
-{
-
- // do not change actual stream volume if the stream is muted
- if (mOutputs.valueFor(output)->mMuteCount[stream] != 0) {
- LOGV("checkAndSetVolume() stream %d muted count %d", stream, mOutputs.valueFor(output)->mMuteCount[stream]);
- return NO_ERROR;
- }
-
- // do not change in call volume if bluetooth is connected and vice versa
- if ((stream == AudioSystem::VOICE_CALL && mForceUse[AudioSystem::FOR_COMMUNICATION] == AudioSystem::FORCE_BT_SCO) ||
- (stream == AudioSystem::BLUETOOTH_SCO && mForceUse[AudioSystem::FOR_COMMUNICATION] != AudioSystem::FORCE_BT_SCO)) {
- LOGV("checkAndSetVolume() cannot set stream %d volume with force use = %d for comm",
- stream, mForceUse[AudioSystem::FOR_COMMUNICATION]);
- return INVALID_OPERATION;
- }
-
- float volume = computeVolume(stream, index, output, device);
- // We actually change the volume if:
- // - the float value returned by computeVolume() changed
- // - the force flag is set
- if (volume != mOutputs.valueFor(output)->mCurVolume[stream] ||
- force) {
- mOutputs.valueFor(output)->mCurVolume[stream] = volume;
- LOGV("setStreamVolume() for output %d stream %d, volume %f, delay %d", output, stream, volume, delayMs);
- if (stream == AudioSystem::VOICE_CALL ||
- stream == AudioSystem::DTMF ||
- stream == AudioSystem::BLUETOOTH_SCO) {
- // offset value to reflect actual hardware volume that never reaches 0
- // 1% corresponds roughly to first step in VOICE_CALL stream volume setting (see AudioService.java)
- volume = 0.01 + 0.99 * volume;
- // Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is
- // enabled
- if (stream == AudioSystem::BLUETOOTH_SCO) {
- mpClientInterface->setStreamVolume(AudioSystem::VOICE_CALL, volume, output, delayMs);
- }
- }
-
- mpClientInterface->setStreamVolume((AudioSystem::stream_type)stream, volume, output, delayMs);
- }
-
- if (stream == AudioSystem::VOICE_CALL ||
- stream == AudioSystem::BLUETOOTH_SCO) {
- float voiceVolume;
- // Force voice volume to max for bluetooth SCO as volume is managed by the headset
- if (stream == AudioSystem::VOICE_CALL) {
- voiceVolume = (float)index/(float)mStreams[stream].mIndexMax;
- } else {
- voiceVolume = 1.0;
- }
-
- if (voiceVolume != mLastVoiceVolume && output == mHardwareOutput) {
- mpClientInterface->setVoiceVolume(voiceVolume, delayMs);
- mLastVoiceVolume = voiceVolume;
- }
- }
-
- return NO_ERROR;
-}
-
-void AudioPolicyManagerBase::applyStreamVolumes(audio_io_handle_t output, uint32_t device, int delayMs, bool force)
-{
- LOGV("applyStreamVolumes() for output %d and device %x", output, device);
-
- for (int stream = 0; stream < AudioSystem::NUM_STREAM_TYPES; stream++) {
- checkAndSetVolume(stream, mStreams[stream].mIndexCur, output, device, delayMs, force);
- }
-}
-
-void AudioPolicyManagerBase::setStrategyMute(routing_strategy strategy, bool on, audio_io_handle_t output, int delayMs)
-{
- LOGV("setStrategyMute() strategy %d, mute %d, output %d", strategy, on, output);
- for (int stream = 0; stream < AudioSystem::NUM_STREAM_TYPES; stream++) {
- if (getStrategy((AudioSystem::stream_type)stream) == strategy) {
- setStreamMute(stream, on, output, delayMs);
- }
- }
-}
-
-void AudioPolicyManagerBase::setStreamMute(int stream, bool on, audio_io_handle_t output, int delayMs)
-{
- StreamDescriptor &streamDesc = mStreams[stream];
- AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
-
- LOGV("setStreamMute() stream %d, mute %d, output %d, mMuteCount %d", stream, on, output, outputDesc->mMuteCount[stream]);
-
- if (on) {
- if (outputDesc->mMuteCount[stream] == 0) {
- if (streamDesc.mCanBeMuted) {
- checkAndSetVolume(stream, 0, output, outputDesc->device(), delayMs);
- }
- }
- // increment mMuteCount after calling checkAndSetVolume() so that volume change is not ignored
- outputDesc->mMuteCount[stream]++;
- } else {
- if (outputDesc->mMuteCount[stream] == 0) {
- LOGW("setStreamMute() unmuting non muted stream!");
- return;
- }
- if (--outputDesc->mMuteCount[stream] == 0) {
- checkAndSetVolume(stream, streamDesc.mIndexCur, output, outputDesc->device(), delayMs);
- }
- }
-}
-
-void AudioPolicyManagerBase::handleIncallSonification(int stream, bool starting, bool stateChange)
-{
- // if the stream pertains to sonification strategy and we are in call we must
- // mute the stream if it is low visibility. If it is high visibility, we must play a tone
- // in the device used for phone strategy and play the tone if the selected device does not
- // interfere with the device used for phone strategy
- // if stateChange is true, we are called from setPhoneState() and we must mute or unmute as
- // many times as there are active tracks on the output
-
- if (getStrategy((AudioSystem::stream_type)stream) == STRATEGY_SONIFICATION) {
- AudioOutputDescriptor *outputDesc = mOutputs.valueFor(mHardwareOutput);
- LOGV("handleIncallSonification() stream %d starting %d device %x stateChange %d",
- stream, starting, outputDesc->mDevice, stateChange);
- if (outputDesc->mRefCount[stream]) {
- int muteCount = 1;
- if (stateChange) {
- muteCount = outputDesc->mRefCount[stream];
- }
- if (AudioSystem::isLowVisibility((AudioSystem::stream_type)stream)) {
- LOGV("handleIncallSonification() low visibility, muteCount %d", muteCount);
- for (int i = 0; i < muteCount; i++) {
- setStreamMute(stream, starting, mHardwareOutput);
- }
- } else {
- LOGV("handleIncallSonification() high visibility");
- if (outputDesc->device() & getDeviceForStrategy(STRATEGY_PHONE)) {
- LOGV("handleIncallSonification() high visibility muted, muteCount %d", muteCount);
- for (int i = 0; i < muteCount; i++) {
- setStreamMute(stream, starting, mHardwareOutput);
- }
- }
- if (starting) {
- mpClientInterface->startTone(ToneGenerator::TONE_SUP_CALL_WAITING, AudioSystem::VOICE_CALL);
- } else {
- mpClientInterface->stopTone();
- }
- }
- }
- }
-}
-
-bool AudioPolicyManagerBase::isInCall()
-{
- return isStateInCall(mPhoneState);
-}
-
-bool AudioPolicyManagerBase::isStateInCall(int state) {
- return ((state == AudioSystem::MODE_IN_CALL) ||
- (state == AudioSystem::MODE_IN_COMMUNICATION));
-}
-
-bool AudioPolicyManagerBase::needsDirectOuput(AudioSystem::stream_type stream,
- uint32_t samplingRate,
- uint32_t format,
- uint32_t channels,
- AudioSystem::output_flags flags,
- uint32_t device)
-{
- return ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) ||
- (format != 0 && !AudioSystem::isLinearPCM(format)));
-}
-
-uint32_t AudioPolicyManagerBase::getMaxEffectsCpuLoad()
-{
- return MAX_EFFECTS_CPU_LOAD;
-}
-
-uint32_t AudioPolicyManagerBase::getMaxEffectsMemory()
-{
- return MAX_EFFECTS_MEMORY;
-}
-
-// --- AudioOutputDescriptor class implementation
-
-AudioPolicyManagerBase::AudioOutputDescriptor::AudioOutputDescriptor()
- : mId(0), mSamplingRate(0), mFormat(0), mChannels(0), mLatency(0),
- mFlags((AudioSystem::output_flags)0), mDevice(0), mOutput1(0), mOutput2(0)
-{
- // clear usage count for all stream types
- for (int i = 0; i < AudioSystem::NUM_STREAM_TYPES; i++) {
- mRefCount[i] = 0;
- mCurVolume[i] = -1.0;
- mMuteCount[i] = 0;
- mStopTime[i] = 0;
- }
-}
-
-uint32_t AudioPolicyManagerBase::AudioOutputDescriptor::device()
-{
- uint32_t device = 0;
- if (isDuplicated()) {
- device = mOutput1->mDevice | mOutput2->mDevice;
- } else {
- device = mDevice;
- }
- return device;
-}
-
-void AudioPolicyManagerBase::AudioOutputDescriptor::changeRefCount(AudioSystem::stream_type stream, int delta)
-{
- // forward usage count change to attached outputs
- if (isDuplicated()) {
- mOutput1->changeRefCount(stream, delta);
- mOutput2->changeRefCount(stream, delta);
- }
- if ((delta + (int)mRefCount[stream]) < 0) {
- LOGW("changeRefCount() invalid delta %d for stream %d, refCount %d", delta, stream, mRefCount[stream]);
- mRefCount[stream] = 0;
- return;
- }
- mRefCount[stream] += delta;
- LOGV("changeRefCount() delta %d, stream %d, refCount %d", delta, stream, mRefCount[stream]);
-}
-
-uint32_t AudioPolicyManagerBase::AudioOutputDescriptor::refCount()
-{
- uint32_t refcount = 0;
- for (int i = 0; i < (int)AudioSystem::NUM_STREAM_TYPES; i++) {
- refcount += mRefCount[i];
- }
- return refcount;
-}
-
-uint32_t AudioPolicyManagerBase::AudioOutputDescriptor::strategyRefCount(routing_strategy strategy)
-{
- uint32_t refCount = 0;
- for (int i = 0; i < (int)AudioSystem::NUM_STREAM_TYPES; i++) {
- if (getStrategy((AudioSystem::stream_type)i) == strategy) {
- refCount += mRefCount[i];
- }
- }
- return refCount;
-}
-
-status_t AudioPolicyManagerBase::AudioOutputDescriptor::dump(int fd)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate);
- result.append(buffer);
- snprintf(buffer, SIZE, " Format: %d\n", mFormat);
- result.append(buffer);
- snprintf(buffer, SIZE, " Channels: %08x\n", mChannels);
- result.append(buffer);
- snprintf(buffer, SIZE, " Latency: %d\n", mLatency);
- result.append(buffer);
- snprintf(buffer, SIZE, " Flags %08x\n", mFlags);
- result.append(buffer);
- snprintf(buffer, SIZE, " Devices %08x\n", device());
- result.append(buffer);
- snprintf(buffer, SIZE, " Stream volume refCount muteCount\n");
- result.append(buffer);
- for (int i = 0; i < AudioSystem::NUM_STREAM_TYPES; i++) {
- snprintf(buffer, SIZE, " %02d %.03f %02d %02d\n", i, mCurVolume[i], mRefCount[i], mMuteCount[i]);
- result.append(buffer);
- }
- write(fd, result.string(), result.size());
-
- return NO_ERROR;
-}
-
-// --- AudioInputDescriptor class implementation
-
-AudioPolicyManagerBase::AudioInputDescriptor::AudioInputDescriptor()
- : mSamplingRate(0), mFormat(0), mChannels(0),
- mAcoustics((AudioSystem::audio_in_acoustics)0), mDevice(0), mRefCount(0),
- mInputSource(0)
-{
-}
-
-status_t AudioPolicyManagerBase::AudioInputDescriptor::dump(int fd)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate);
- result.append(buffer);
- snprintf(buffer, SIZE, " Format: %d\n", mFormat);
- result.append(buffer);
- snprintf(buffer, SIZE, " Channels: %08x\n", mChannels);
- result.append(buffer);
- snprintf(buffer, SIZE, " Acoustics %08x\n", mAcoustics);
- result.append(buffer);
- snprintf(buffer, SIZE, " Devices %08x\n", mDevice);
- result.append(buffer);
- snprintf(buffer, SIZE, " Ref Count %d\n", mRefCount);
- result.append(buffer);
- write(fd, result.string(), result.size());
-
- return NO_ERROR;
-}
-
-// --- StreamDescriptor class implementation
-
-void AudioPolicyManagerBase::StreamDescriptor::dump(char* buffer, size_t size)
-{
- snprintf(buffer, size, " %02d %02d %02d %d\n",
- mIndexMin,
- mIndexMax,
- mIndexCur,
- mCanBeMuted);
-}
-
-// --- EffectDescriptor class implementation
-
-status_t AudioPolicyManagerBase::EffectDescriptor::dump(int fd)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- snprintf(buffer, SIZE, " Output: %d\n", mOutput);
- result.append(buffer);
- snprintf(buffer, SIZE, " Strategy: %d\n", mStrategy);
- result.append(buffer);
- snprintf(buffer, SIZE, " Session: %d\n", mSession);
- result.append(buffer);
- snprintf(buffer, SIZE, " Name: %s\n", mDesc.name);
- result.append(buffer);
- write(fd, result.string(), result.size());
-
- return NO_ERROR;
-}
-
-
-
-}; // namespace android
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index b614c48..eebc1b3 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -30,11 +30,15 @@
#include <utils/String16.h>
#include <utils/threads.h>
#include "AudioPolicyService.h"
-#include <hardware_legacy/AudioPolicyManagerBase.h>
#include <cutils/properties.h>
#include <dlfcn.h>
#include <hardware_legacy/power.h>
+#include <hardware/hardware.h>
+#include <hardware/audio.h>
+#include <hardware/audio_policy.h>
+#include <hardware/audio_policy_hal.h>
+
// ----------------------------------------------------------------------------
// the sim build doesn't have gettid
@@ -44,7 +48,6 @@
namespace android {
-
static const char *kDeadlockedString = "AudioPolicyService may be deadlocked\n";
static const char *kCmdDeadlockedString = "AudioPolicyService command thread may be deadlocked\n";
@@ -61,12 +64,19 @@
return ok;
}
+namespace {
+ extern struct audio_policy_service_ops aps_ops;
+};
+
// ----------------------------------------------------------------------------
AudioPolicyService::AudioPolicyService()
- : BnAudioPolicyService() , mpPolicyManager(NULL)
+ : BnAudioPolicyService() , mpAudioPolicyDev(NULL) , mpAudioPolicy(NULL)
{
char value[PROPERTY_VALUE_MAX];
+ const struct hw_module_t *module;
+ int forced_val;
+ int rc;
Mutex::Autolock _l(mLock);
@@ -75,33 +85,32 @@
// start audio commands thread
mAudioCommandThread = new AudioCommandThread(String8("ApmCommandThread"));
-#if (defined GENERIC_AUDIO) || (defined AUDIO_POLICY_TEST)
- mpPolicyManager = new AudioPolicyManagerBase(this);
- LOGV("build for GENERIC_AUDIO - using generic audio policy");
-#else
- // if running in emulation - use the emulator driver
- if (property_get("ro.kernel.qemu", value, 0)) {
- LOGV("Running in emulation - using generic audio policy");
- mpPolicyManager = new AudioPolicyManagerBase(this);
- }
- else {
- LOGV("Using hardware specific audio policy");
- mpPolicyManager = createAudioPolicyManager(this);
- }
-#endif
+ /* instantiate the audio policy manager */
+ rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
+ if (rc)
+ return;
- if ((mpPolicyManager != NULL) && (mpPolicyManager->initCheck() != NO_ERROR)) {
- delete mpPolicyManager;
- mpPolicyManager = NULL;
- }
+ rc = audio_policy_dev_open(module, &mpAudioPolicyDev);
+ LOGE_IF(rc, "couldn't open audio policy device (%s)", strerror(-rc));
+ if (rc)
+ return;
- if (mpPolicyManager == NULL) {
- LOGE("Could not create AudioPolicyManager");
- } else {
- // load properties
- property_get("ro.camera.sound.forced", value, "0");
- mpPolicyManager->setSystemProperty("ro.camera.sound.forced", value);
- }
+ rc = mpAudioPolicyDev->create_audio_policy(mpAudioPolicyDev, &aps_ops, this,
+ &mpAudioPolicy);
+ LOGE_IF(rc, "couldn't create audio policy (%s)", strerror(-rc));
+ if (rc)
+ return;
+
+ rc = mpAudioPolicy->init_check(mpAudioPolicy);
+ LOGE_IF(rc, "couldn't init_check the audio policy (%s)", strerror(-rc));
+ if (rc)
+ return;
+
+ property_get("ro.camera.sound.forced", value, "0");
+ forced_val = strtol(value, NULL, 0);
+ mpAudioPolicy->set_can_mute_enforced_audible(mpAudioPolicy, !forced_val);
+
+ LOGI("Loaded audio policy from %s (%s)", module->name, module->id);
}
AudioPolicyService::~AudioPolicyService()
@@ -111,57 +120,59 @@
mAudioCommandThread->exit();
mAudioCommandThread.clear();
- if (mpPolicyManager) {
- delete mpPolicyManager;
- }
+ if (mpAudioPolicy && mpAudioPolicyDev)
+ mpAudioPolicyDev->destroy_audio_policy(mpAudioPolicyDev, mpAudioPolicy);
+ if (mpAudioPolicyDev)
+ audio_policy_dev_close(mpAudioPolicyDev);
}
-
-status_t AudioPolicyService::setDeviceConnectionState(AudioSystem::audio_devices device,
- AudioSystem::device_connection_state state,
+status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
+ audio_policy_dev_state_t state,
const char *device_address)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return NO_INIT;
}
if (!checkPermission()) {
return PERMISSION_DENIED;
}
- if (!AudioSystem::isOutputDevice(device) && !AudioSystem::isInputDevice(device)) {
+ if (!audio_is_output_device(device) && !audio_is_input_device(device)) {
return BAD_VALUE;
}
- if (state != AudioSystem::DEVICE_STATE_AVAILABLE &&
- state != AudioSystem::DEVICE_STATE_UNAVAILABLE) {
+ if (state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE &&
+ state != AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
return BAD_VALUE;
}
LOGV("setDeviceConnectionState() tid %d", gettid());
Mutex::Autolock _l(mLock);
- return mpPolicyManager->setDeviceConnectionState(device, state, device_address);
+ return mpAudioPolicy->set_device_connection_state(mpAudioPolicy, device,
+ state, device_address);
}
-AudioSystem::device_connection_state AudioPolicyService::getDeviceConnectionState(
- AudioSystem::audio_devices device,
+audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState(
+ audio_devices_t device,
const char *device_address)
{
- if (mpPolicyManager == NULL) {
- return AudioSystem::DEVICE_STATE_UNAVAILABLE;
+ if (mpAudioPolicy == NULL) {
+ return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
}
if (!checkPermission()) {
- return AudioSystem::DEVICE_STATE_UNAVAILABLE;
+ return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
}
- return mpPolicyManager->getDeviceConnectionState(device, device_address);
+ return mpAudioPolicy->get_device_connection_state(mpAudioPolicy, device,
+ device_address);
}
status_t AudioPolicyService::setPhoneState(int state)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return NO_INIT;
}
if (!checkPermission()) {
return PERMISSION_DENIED;
}
- if (state < 0 || state >= AudioSystem::NUM_MODES) {
+ if (state < 0 || state >= AUDIO_MODE_CNT) {
return BAD_VALUE;
}
@@ -171,215 +182,215 @@
AudioSystem::setMode(state);
Mutex::Autolock _l(mLock);
- mpPolicyManager->setPhoneState(state);
+ mpAudioPolicy->set_phone_state(mpAudioPolicy, state);
return NO_ERROR;
}
status_t AudioPolicyService::setRingerMode(uint32_t mode, uint32_t mask)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return NO_INIT;
}
if (!checkPermission()) {
return PERMISSION_DENIED;
}
- mpPolicyManager->setRingerMode(mode, mask);
+ mpAudioPolicy->set_ringer_mode(mpAudioPolicy, mode, mask);
return NO_ERROR;
}
-status_t AudioPolicyService::setForceUse(AudioSystem::force_use usage,
- AudioSystem::forced_config config)
+status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage,
+ audio_policy_forced_cfg_t config)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return NO_INIT;
}
if (!checkPermission()) {
return PERMISSION_DENIED;
}
- if (usage < 0 || usage >= AudioSystem::NUM_FORCE_USE) {
+ if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
return BAD_VALUE;
}
- if (config < 0 || config >= AudioSystem::NUM_FORCE_CONFIG) {
+ if (config < 0 || config >= AUDIO_POLICY_FORCE_CFG_CNT) {
return BAD_VALUE;
}
LOGV("setForceUse() tid %d", gettid());
Mutex::Autolock _l(mLock);
- mpPolicyManager->setForceUse(usage, config);
+ mpAudioPolicy->set_force_use(mpAudioPolicy, usage, config);
return NO_ERROR;
}
-AudioSystem::forced_config AudioPolicyService::getForceUse(AudioSystem::force_use usage)
+audio_policy_forced_cfg_t AudioPolicyService::getForceUse(audio_policy_force_use_t usage)
{
- if (mpPolicyManager == NULL) {
- return AudioSystem::FORCE_NONE;
+ if (mpAudioPolicy == NULL) {
+ return AUDIO_POLICY_FORCE_NONE;
}
if (!checkPermission()) {
- return AudioSystem::FORCE_NONE;
+ return AUDIO_POLICY_FORCE_NONE;
}
- if (usage < 0 || usage >= AudioSystem::NUM_FORCE_USE) {
- return AudioSystem::FORCE_NONE;
+ if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
+ return AUDIO_POLICY_FORCE_NONE;
}
- return mpPolicyManager->getForceUse(usage);
+ return mpAudioPolicy->get_force_use(mpAudioPolicy, usage);
}
-audio_io_handle_t AudioPolicyService::getOutput(AudioSystem::stream_type stream,
+audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream,
uint32_t samplingRate,
uint32_t format,
uint32_t channels,
- AudioSystem::output_flags flags)
+ audio_policy_output_flags_t flags)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return 0;
}
LOGV("getOutput() tid %d", gettid());
Mutex::Autolock _l(mLock);
- return mpPolicyManager->getOutput(stream, samplingRate, format, channels, flags);
+ return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate, format, channels, flags);
}
status_t AudioPolicyService::startOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return NO_INIT;
}
LOGV("startOutput() tid %d", gettid());
Mutex::Autolock _l(mLock);
- return mpPolicyManager->startOutput(output, stream, session);
+ return mpAudioPolicy->start_output(mpAudioPolicy, output, stream, session);
}
status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return NO_INIT;
}
LOGV("stopOutput() tid %d", gettid());
Mutex::Autolock _l(mLock);
- return mpPolicyManager->stopOutput(output, stream, session);
+ return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session);
}
void AudioPolicyService::releaseOutput(audio_io_handle_t output)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return;
}
LOGV("releaseOutput() tid %d", gettid());
Mutex::Autolock _l(mLock);
- mpPolicyManager->releaseOutput(output);
+ mpAudioPolicy->release_output(mpAudioPolicy, output);
}
audio_io_handle_t AudioPolicyService::getInput(int inputSource,
uint32_t samplingRate,
uint32_t format,
uint32_t channels,
- AudioSystem::audio_in_acoustics acoustics)
+ audio_in_acoustics_t acoustics)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return 0;
}
Mutex::Autolock _l(mLock);
- return mpPolicyManager->getInput(inputSource, samplingRate, format, channels, acoustics);
+ return mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate, format, channels, acoustics);
}
status_t AudioPolicyService::startInput(audio_io_handle_t input)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return NO_INIT;
}
Mutex::Autolock _l(mLock);
- return mpPolicyManager->startInput(input);
+ return mpAudioPolicy->start_input(mpAudioPolicy, input);
}
status_t AudioPolicyService::stopInput(audio_io_handle_t input)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return NO_INIT;
}
Mutex::Autolock _l(mLock);
- return mpPolicyManager->stopInput(input);
+ return mpAudioPolicy->stop_input(mpAudioPolicy, input);
}
void AudioPolicyService::releaseInput(audio_io_handle_t input)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return;
}
Mutex::Autolock _l(mLock);
- mpPolicyManager->releaseInput(input);
+ mpAudioPolicy->release_input(mpAudioPolicy, input);
}
-status_t AudioPolicyService::initStreamVolume(AudioSystem::stream_type stream,
+status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream,
int indexMin,
int indexMax)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return NO_INIT;
}
if (!checkPermission()) {
return PERMISSION_DENIED;
}
- if (stream < 0 || stream >= AudioSystem::NUM_STREAM_TYPES) {
+ if (stream < 0 || stream >= AUDIO_STREAM_CNT) {
return BAD_VALUE;
}
- mpPolicyManager->initStreamVolume(stream, indexMin, indexMax);
+ mpAudioPolicy->init_stream_volume(mpAudioPolicy, stream, indexMin, indexMax);
return NO_ERROR;
}
-status_t AudioPolicyService::setStreamVolumeIndex(AudioSystem::stream_type stream, int index)
+status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream, int index)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return NO_INIT;
}
if (!checkPermission()) {
return PERMISSION_DENIED;
}
- if (stream < 0 || stream >= AudioSystem::NUM_STREAM_TYPES) {
+ if (stream < 0 || stream >= AUDIO_STREAM_CNT) {
return BAD_VALUE;
}
- return mpPolicyManager->setStreamVolumeIndex(stream, index);
+ return mpAudioPolicy->set_stream_volume_index(mpAudioPolicy, stream, index);
}
-status_t AudioPolicyService::getStreamVolumeIndex(AudioSystem::stream_type stream, int *index)
+status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream, int *index)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return NO_INIT;
}
if (!checkPermission()) {
return PERMISSION_DENIED;
}
- if (stream < 0 || stream >= AudioSystem::NUM_STREAM_TYPES) {
+ if (stream < 0 || stream >= AUDIO_STREAM_CNT) {
return BAD_VALUE;
}
- return mpPolicyManager->getStreamVolumeIndex(stream, index);
+ return mpAudioPolicy->get_stream_volume_index(mpAudioPolicy, stream, index);
}
-uint32_t AudioPolicyService::getStrategyForStream(AudioSystem::stream_type stream)
+uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return 0;
}
- return mpPolicyManager->getStrategyForStream(stream);
+ return mpAudioPolicy->get_strategy_for_stream(mpAudioPolicy, stream);
}
-uint32_t AudioPolicyService::getDevicesForStream(AudioSystem::stream_type stream)
+uint32_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return 0;
}
- return mpPolicyManager->getDevicesForStream(stream);
+ return mpAudioPolicy->get_devices_for_stream(mpAudioPolicy, stream);
}
audio_io_handle_t AudioPolicyService::getOutputForEffect(effect_descriptor_t *desc)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return NO_INIT;
}
Mutex::Autolock _l(mLock);
- return mpPolicyManager->getOutputForEffect(desc);
+ return mpAudioPolicy->get_output_for_effect(mpAudioPolicy, desc);
}
status_t AudioPolicyService::registerEffect(effect_descriptor_t *desc,
@@ -388,27 +399,27 @@
int session,
int id)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return NO_INIT;
}
- return mpPolicyManager->registerEffect(desc, output, strategy, session, id);
+ return mpAudioPolicy->register_effect(mpAudioPolicy, desc, output, strategy, session, id);
}
status_t AudioPolicyService::unregisterEffect(int id)
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return NO_INIT;
}
- return mpPolicyManager->unregisterEffect(id);
+ return mpAudioPolicy->unregister_effect(mpAudioPolicy, id);
}
bool AudioPolicyService::isStreamActive(int stream, uint32_t inPastMs) const
{
- if (mpPolicyManager == NULL) {
+ if (mpAudioPolicy == NULL) {
return 0;
}
Mutex::Autolock _l(mLock);
- return mpPolicyManager->isStreamActive(stream, inPastMs);
+ return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs);
}
void AudioPolicyService::binderDied(const wp<IBinder>& who) {
@@ -435,7 +446,7 @@
char buffer[SIZE];
String8 result;
- snprintf(buffer, SIZE, "PolicyManager Interface: %p\n", mpPolicyManager);
+ snprintf(buffer, SIZE, "PolicyManager Interface: %p\n", mpAudioPolicy);
result.append(buffer);
snprintf(buffer, SIZE, "Command Thread: %p\n", mAudioCommandThread.get());
result.append(buffer);
@@ -465,8 +476,8 @@
mTonePlaybackThread->dump(fd);
}
- if (mpPolicyManager) {
- mpPolicyManager->dump(fd);
+ if (mpAudioPolicy) {
+ mpAudioPolicy->dump(mpAudioPolicy, fd);
}
if (locked) mLock.unlock();
@@ -495,154 +506,6 @@
}
-// ----------------------------------------------------------------------------
-// AudioPolicyClientInterface implementation
-// ----------------------------------------------------------------------------
-
-
-audio_io_handle_t AudioPolicyService::openOutput(uint32_t *pDevices,
- uint32_t *pSamplingRate,
- uint32_t *pFormat,
- uint32_t *pChannels,
- uint32_t *pLatencyMs,
- AudioSystem::output_flags flags)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- LOGW("openOutput() could not get AudioFlinger");
- return 0;
- }
-
- return af->openOutput(pDevices,
- pSamplingRate,
- (uint32_t *)pFormat,
- pChannels,
- pLatencyMs,
- flags);
-}
-
-audio_io_handle_t AudioPolicyService::openDuplicateOutput(audio_io_handle_t output1,
- audio_io_handle_t output2)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- LOGW("openDuplicateOutput() could not get AudioFlinger");
- return 0;
- }
- return af->openDuplicateOutput(output1, output2);
-}
-
-status_t AudioPolicyService::closeOutput(audio_io_handle_t output)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) return PERMISSION_DENIED;
-
- return af->closeOutput(output);
-}
-
-
-status_t AudioPolicyService::suspendOutput(audio_io_handle_t output)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- LOGW("suspendOutput() could not get AudioFlinger");
- return PERMISSION_DENIED;
- }
-
- return af->suspendOutput(output);
-}
-
-status_t AudioPolicyService::restoreOutput(audio_io_handle_t output)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- LOGW("restoreOutput() could not get AudioFlinger");
- return PERMISSION_DENIED;
- }
-
- return af->restoreOutput(output);
-}
-
-audio_io_handle_t AudioPolicyService::openInput(uint32_t *pDevices,
- uint32_t *pSamplingRate,
- uint32_t *pFormat,
- uint32_t *pChannels,
- uint32_t acoustics)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- LOGW("openInput() could not get AudioFlinger");
- return 0;
- }
-
- return af->openInput(pDevices, pSamplingRate, (uint32_t *)pFormat, pChannels, acoustics);
-}
-
-status_t AudioPolicyService::closeInput(audio_io_handle_t input)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) return PERMISSION_DENIED;
-
- return af->closeInput(input);
-}
-
-status_t AudioPolicyService::setStreamVolume(AudioSystem::stream_type stream,
- float volume,
- audio_io_handle_t output,
- int delayMs)
-{
- return mAudioCommandThread->volumeCommand((int)stream, volume, (int)output, delayMs);
-}
-
-status_t AudioPolicyService::setStreamOutput(AudioSystem::stream_type stream,
- audio_io_handle_t output)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) return PERMISSION_DENIED;
-
- return af->setStreamOutput(stream, output);
-}
-
-status_t AudioPolicyService::moveEffects(int session, audio_io_handle_t srcOutput,
- audio_io_handle_t dstOutput)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) return PERMISSION_DENIED;
-
- return af->moveEffects(session, (int)srcOutput, (int)dstOutput);
-}
-
-void AudioPolicyService::setParameters(audio_io_handle_t ioHandle,
- const String8& keyValuePairs,
- int delayMs)
-{
- mAudioCommandThread->parametersCommand((int)ioHandle, keyValuePairs, delayMs);
-}
-
-String8 AudioPolicyService::getParameters(audio_io_handle_t ioHandle, const String8& keys)
-{
- String8 result = AudioSystem::getParameters(ioHandle, keys);
- return result;
-}
-
-status_t AudioPolicyService::startTone(ToneGenerator::tone_type tone,
- AudioSystem::stream_type stream)
-{
- mTonePlaybackThread->startToneCommand(tone, stream);
- return NO_ERROR;
-}
-
-status_t AudioPolicyService::stopTone()
-{
- mTonePlaybackThread->stopToneCommand();
- return NO_ERROR;
-}
-
-status_t AudioPolicyService::setVoiceVolume(float volume, int delayMs)
-{
- return mAudioCommandThread->voiceVolumeCommand(volume, delayMs);
-}
-
// ----------- AudioPolicyService::AudioCommandThread implementation ----------
AudioPolicyService::AudioCommandThread::AudioCommandThread(String8 name)
@@ -859,7 +722,7 @@
}
status_t AudioPolicyService::AudioCommandThread::parametersCommand(int ioHandle,
- const String8& keyValuePairs,
+ const char *keyValuePairs,
int delayMs)
{
status_t status = NO_ERROR;
@@ -868,7 +731,7 @@
command->mCommand = SET_PARAMETERS;
ParametersData *data = new ParametersData();
data->mIO = ioHandle;
- data->mKeyValuePairs = keyValuePairs;
+ data->mKeyValuePairs = String8(keyValuePairs);
command->mParam = data;
if (delayMs == 0) {
command->mWaitStatus = true;
@@ -878,7 +741,7 @@
Mutex::Autolock _l(mLock);
insertCommand_l(command, delayMs);
LOGV("AudioCommandThread() adding set parameter string %s, io %d ,delay %d",
- keyValuePairs.string(), ioHandle, delayMs);
+ keyValuePairs, ioHandle, delayMs);
mWaitWorkCV.signal();
if (command->mWaitStatus) {
command->mCond.wait(mLock);
@@ -1023,4 +886,226 @@
mParam);
}
+/******* helpers for the service_ops callbacks defined below *********/
+void AudioPolicyService::setParameters(audio_io_handle_t ioHandle,
+ const char *keyValuePairs,
+ int delayMs)
+{
+ mAudioCommandThread->parametersCommand((int)ioHandle, keyValuePairs,
+ delayMs);
+}
+
+int AudioPolicyService::setStreamVolume(audio_stream_type_t stream,
+ float volume,
+ audio_io_handle_t output,
+ int delayMs)
+{
+ return (int)mAudioCommandThread->volumeCommand((int)stream, volume,
+ (int)output, delayMs);
+}
+
+int AudioPolicyService::startTone(audio_policy_tone_t tone,
+ audio_stream_type_t stream)
+{
+ if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION)
+ LOGE("startTone: illegal tone requested (%d)", tone);
+ if (stream != AUDIO_STREAM_VOICE_CALL)
+ LOGE("startTone: illegal stream (%d) requested for tone %d", stream,
+ tone);
+ mTonePlaybackThread->startToneCommand(ToneGenerator::TONE_SUP_CALL_WAITING,
+ AUDIO_STREAM_VOICE_CALL);
+ return 0;
+}
+
+int AudioPolicyService::stopTone()
+{
+ mTonePlaybackThread->stopToneCommand();
+ return 0;
+}
+
+int AudioPolicyService::setVoiceVolume(float volume, int delayMs)
+{
+ return (int)mAudioCommandThread->voiceVolumeCommand(volume, delayMs);
+}
+
+/* implementation of the interface to the policy manager */
+extern "C" {
+
+static audio_io_handle_t aps_open_output(void *service,
+ uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t *pLatencyMs,
+ audio_policy_output_flags_t flags)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == NULL) {
+ LOGW("%s: could not get AudioFlinger", __func__);
+ return 0;
+ }
+
+ return af->openOutput(pDevices, pSamplingRate, pFormat, pChannels,
+ pLatencyMs, flags);
+}
+
+static audio_io_handle_t aps_open_dup_output(void *service,
+ audio_io_handle_t output1,
+ audio_io_handle_t output2)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == NULL) {
+ LOGW("%s: could not get AudioFlinger", __func__);
+ return 0;
+ }
+ return af->openDuplicateOutput(output1, output2);
+}
+
+static int aps_close_output(void *service, audio_io_handle_t output)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == NULL)
+ return PERMISSION_DENIED;
+
+ return af->closeOutput(output);
+}
+
+static int aps_suspend_output(void *service, audio_io_handle_t output)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == NULL) {
+ LOGW("%s: could not get AudioFlinger", __func__);
+ return PERMISSION_DENIED;
+ }
+
+ return af->suspendOutput(output);
+}
+
+static int aps_restore_output(void *service, audio_io_handle_t output)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == NULL) {
+ LOGW("%s: could not get AudioFlinger", __func__);
+ return PERMISSION_DENIED;
+ }
+
+ return af->restoreOutput(output);
+}
+
+static audio_io_handle_t aps_open_input(void *service,
+ uint32_t *pDevices,
+ uint32_t *pSamplingRate,
+ uint32_t *pFormat,
+ uint32_t *pChannels,
+ uint32_t acoustics)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == NULL) {
+ LOGW("%s: could not get AudioFlinger", __func__);
+ return 0;
+ }
+
+ return af->openInput(pDevices, pSamplingRate, pFormat, pChannels,
+ acoustics);
+}
+
+static int aps_close_input(void *service, audio_io_handle_t input)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == NULL)
+ return PERMISSION_DENIED;
+
+ return af->closeInput(input);
+}
+
+static int aps_set_stream_output(void *service, audio_stream_type_t stream,
+ audio_io_handle_t output)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == NULL)
+ return PERMISSION_DENIED;
+
+ return af->setStreamOutput(stream, output);
+}
+
+static int aps_move_effects(void *service, int session,
+ audio_io_handle_t src_output,
+ audio_io_handle_t dst_output)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == NULL)
+ return PERMISSION_DENIED;
+
+ return af->moveEffects(session, (int)src_output, (int)dst_output);
+}
+
+static char * aps_get_parameters(void *service, audio_io_handle_t io_handle,
+ const char *keys)
+{
+ String8 result = AudioSystem::getParameters(io_handle, String8(keys));
+ return strdup(result.string());
+}
+
+static void aps_set_parameters(void *service, audio_io_handle_t io_handle,
+ const char *kv_pairs, int delay_ms)
+{
+ AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
+
+ audioPolicyService->setParameters(io_handle, kv_pairs, delay_ms);
+}
+
+static int aps_set_stream_volume(void *service, audio_stream_type_t stream,
+ float volume, audio_io_handle_t output,
+ int delay_ms)
+{
+ AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
+
+ return audioPolicyService->setStreamVolume(stream, volume, output,
+ delay_ms);
+}
+
+static int aps_start_tone(void *service, audio_policy_tone_t tone,
+ audio_stream_type_t stream)
+{
+ AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
+
+ return audioPolicyService->startTone(tone, stream);
+}
+
+static int aps_stop_tone(void *service)
+{
+ AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
+
+ return audioPolicyService->stopTone();
+}
+
+static int aps_set_voice_volume(void *service, float volume, int delay_ms)
+{
+ AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
+
+ return audioPolicyService->setVoiceVolume(volume, delay_ms);
+}
+
+}; // extern "C"
+
+namespace {
+ struct audio_policy_service_ops aps_ops = {
+ open_output : aps_open_output,
+ open_duplicate_output : aps_open_dup_output,
+ close_output : aps_close_output,
+ suspend_output : aps_suspend_output,
+ restore_output : aps_restore_output,
+ open_input : aps_open_input,
+ close_input : aps_close_input,
+ set_stream_volume : aps_set_stream_volume,
+ set_stream_output : aps_set_stream_output,
+ set_parameters : aps_set_parameters,
+ get_parameters : aps_get_parameters,
+ start_tone : aps_start_tone,
+ stop_tone : aps_stop_tone,
+ set_voice_volume : aps_set_voice_volume,
+ move_effects : aps_move_effects,
+ };
+}; // namespace <unnamed>
+
}; // namespace android
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
index faad893..01e592b 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audioflinger/AudioPolicyService.h
@@ -18,11 +18,14 @@
#define ANDROID_AUDIOPOLICYSERVICE_H
#include <media/IAudioPolicyService.h>
-#include <hardware_legacy/AudioPolicyInterface.h>
#include <media/ToneGenerator.h>
#include <utils/Vector.h>
#include <binder/BinderService.h>
+#include <hardware/audio.h>
+#include <hardware/audio_policy.h>
+#include <hardware/audio_policy_hal.h>
+
namespace android {
class String8;
@@ -32,7 +35,7 @@
class AudioPolicyService :
public BinderService<AudioPolicyService>,
public BnAudioPolicyService,
- public AudioPolicyClientInterface,
+// public AudioPolicyClientInterface,
public IBinder::DeathRecipient
{
friend class BinderService<AudioPolicyService>;
@@ -47,46 +50,46 @@
// BnAudioPolicyService (see AudioPolicyInterface for method descriptions)
//
- virtual status_t setDeviceConnectionState(AudioSystem::audio_devices device,
- AudioSystem::device_connection_state state,
+ virtual status_t setDeviceConnectionState(audio_devices_t device,
+ audio_policy_dev_state_t state,
const char *device_address);
- virtual AudioSystem::device_connection_state getDeviceConnectionState(
- AudioSystem::audio_devices device,
+ virtual audio_policy_dev_state_t getDeviceConnectionState(
+ audio_devices_t device,
const char *device_address);
virtual status_t setPhoneState(int state);
virtual status_t setRingerMode(uint32_t mode, uint32_t mask);
- virtual status_t setForceUse(AudioSystem::force_use usage, AudioSystem::forced_config config);
- virtual AudioSystem::forced_config getForceUse(AudioSystem::force_use usage);
- virtual audio_io_handle_t getOutput(AudioSystem::stream_type stream,
+ virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
+ virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
+ virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
uint32_t samplingRate = 0,
- uint32_t format = AudioSystem::FORMAT_DEFAULT,
+ uint32_t format = AUDIO_FORMAT_DEFAULT,
uint32_t channels = 0,
- AudioSystem::output_flags flags =
- AudioSystem::OUTPUT_FLAG_INDIRECT);
+ audio_policy_output_flags_t flags =
+ AUDIO_POLICY_OUTPUT_FLAG_INDIRECT);
virtual status_t startOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session = 0);
virtual status_t stopOutput(audio_io_handle_t output,
- AudioSystem::stream_type stream,
+ audio_stream_type_t stream,
int session = 0);
virtual void releaseOutput(audio_io_handle_t output);
virtual audio_io_handle_t getInput(int inputSource,
uint32_t samplingRate = 0,
- uint32_t format = AudioSystem::FORMAT_DEFAULT,
+ uint32_t format = AUDIO_FORMAT_DEFAULT,
uint32_t channels = 0,
- AudioSystem::audio_in_acoustics acoustics =
- (AudioSystem::audio_in_acoustics)0);
+ audio_in_acoustics_t acoustics =
+ (audio_in_acoustics_t)0);
virtual status_t startInput(audio_io_handle_t input);
virtual status_t stopInput(audio_io_handle_t input);
virtual void releaseInput(audio_io_handle_t input);
- virtual status_t initStreamVolume(AudioSystem::stream_type stream,
+ virtual status_t initStreamVolume(audio_stream_type_t stream,
int indexMin,
int indexMax);
- virtual status_t setStreamVolumeIndex(AudioSystem::stream_type stream, int index);
- virtual status_t getStreamVolumeIndex(AudioSystem::stream_type stream, int *index);
+ virtual status_t setStreamVolumeIndex(audio_stream_type_t stream, int index);
+ virtual status_t getStreamVolumeIndex(audio_stream_type_t stream, int *index);
- virtual uint32_t getStrategyForStream(AudioSystem::stream_type stream);
- virtual uint32_t getDevicesForStream(AudioSystem::stream_type stream);
+ virtual uint32_t getStrategyForStream(audio_stream_type_t stream);
+ virtual uint32_t getDevicesForStream(audio_stream_type_t stream);
virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc);
virtual status_t registerEffect(effect_descriptor_t *desc,
@@ -107,40 +110,21 @@
virtual void binderDied(const wp<IBinder>& who);
//
- // AudioPolicyClientInterface
+ // Helpers for the struct audio_policy_service_ops implementation.
+ // This is used by the audio policy manager for certain operations that
+ // are implemented by the policy service.
//
- virtual audio_io_handle_t openOutput(uint32_t *pDevices,
- uint32_t *pSamplingRate,
- uint32_t *pFormat,
- uint32_t *pChannels,
- uint32_t *pLatencyMs,
- AudioSystem::output_flags flags);
- virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
- audio_io_handle_t output2);
- virtual status_t closeOutput(audio_io_handle_t output);
- virtual status_t suspendOutput(audio_io_handle_t output);
- virtual status_t restoreOutput(audio_io_handle_t output);
- virtual audio_io_handle_t openInput(uint32_t *pDevices,
- uint32_t *pSamplingRate,
- uint32_t *pFormat,
- uint32_t *pChannels,
- uint32_t acoustics);
- virtual status_t closeInput(audio_io_handle_t input);
- virtual status_t setStreamVolume(AudioSystem::stream_type stream,
+ virtual void setParameters(audio_io_handle_t ioHandle,
+ const char *keyValuePairs,
+ int delayMs);
+
+ virtual status_t setStreamVolume(audio_stream_type_t stream,
float volume,
audio_io_handle_t output,
int delayMs = 0);
- virtual status_t setStreamOutput(AudioSystem::stream_type stream, audio_io_handle_t output);
- virtual void setParameters(audio_io_handle_t ioHandle,
- const String8& keyValuePairs,
- int delayMs = 0);
- virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
- virtual status_t startTone(ToneGenerator::tone_type tone, AudioSystem::stream_type stream);
+ virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
virtual status_t stopTone();
virtual status_t setVoiceVolume(float volume, int delayMs = 0);
- virtual status_t moveEffects(int session,
- audio_io_handle_t srcOutput,
- audio_io_handle_t dstOutput);
private:
AudioPolicyService();
@@ -180,7 +164,7 @@
void startToneCommand(int type = 0, int stream = 0);
void stopToneCommand();
status_t volumeCommand(int stream, float volume, int output, int delayMs = 0);
- status_t parametersCommand(int ioHandle, const String8& keyValuePairs, int delayMs = 0);
+ status_t parametersCommand(int ioHandle, const char *keyValuePairs, int delayMs = 0);
status_t voiceVolumeCommand(float volume, int delayMs = 0);
void insertCommand_l(AudioCommand *command, int delayMs = 0);
@@ -240,9 +224,11 @@
mutable Mutex mLock; // prevents concurrent access to AudioPolicy manager functions changing
// device connection state or routing
- AudioPolicyInterface* mpPolicyManager; // the platform specific policy manager
sp <AudioCommandThread> mAudioCommandThread; // audio commands thread
sp <AudioCommandThread> mTonePlaybackThread; // tone playback thread
+
+ struct audio_policy_device *mpAudioPolicyDev;
+ struct audio_policy *mpAudioPolicy;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 7e3c643..f4859ec 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -244,7 +244,7 @@
static MediaPlayer* newMediaPlayer(const char *file) {
MediaPlayer* mp = new MediaPlayer();
if (mp->setDataSource(file, NULL) == NO_ERROR) {
- mp->setAudioStreamType(AudioSystem::ENFORCED_AUDIBLE);
+ mp->setAudioStreamType(AUDIO_STREAM_ENFORCED_AUDIBLE);
mp->prepare();
} else {
LOGE("Failed to load CameraService sounds: %s", file);
@@ -283,7 +283,7 @@
// do not play the sound if stream volume is 0
// (typically because ringer mode is silent).
int index;
- AudioSystem::getStreamVolumeIndex(AudioSystem::ENFORCED_AUDIBLE, &index);
+ AudioSystem::getStreamVolumeIndex(AUDIO_STREAM_ENFORCED_AUDIBLE, &index);
if (index != 0) {
player->seekTo(0);
player->start();
diff --git a/services/java/com/android/server/accessibility/AccessibilityManagerService.java b/services/java/com/android/server/accessibility/AccessibilityManagerService.java
index 1ad8047..d96369b 100644
--- a/services/java/com/android/server/accessibility/AccessibilityManagerService.java
+++ b/services/java/com/android/server/accessibility/AccessibilityManagerService.java
@@ -642,14 +642,16 @@
}
/**
- * Installs or removes the accessibility input filter when accessibility is enabled
- * or disabled.
+ * Sets the input filter state. If the filter is in enabled it is registered
+ * in the window manager, otherwise the filter is removed from the latter.
+ *
+ * @param enabled Whether the input filter is enabled.
*/
- private void updateInputFilterLocked() {
+ private void setInputFilterEnabledLocked(boolean enabled) {
WindowManagerService wm = (WindowManagerService)ServiceManager.getService(
Context.WINDOW_SERVICE);
if (wm != null) {
- if (mIsEnabled) {
+ if (enabled) {
if (mInputFilter == null) {
mInputFilter = new AccessibilityInputFilter(mContext);
}
@@ -681,7 +683,7 @@
if (enabledServices.size() > 0
&& service.mFeedbackType == AccessibilityServiceInfo.FEEDBACK_SPOKEN) {
updateClientsLocked();
- updateInputFilterLocked();
+ setInputFilterEnabledLocked(true);
}
}
@@ -697,7 +699,7 @@
if (enabledServices.isEmpty()
&& service.mFeedbackType == AccessibilityServiceInfo.FEEDBACK_SPOKEN) {
updateClientsLocked();
- updateInputFilterLocked();
+ setInputFilterEnabledLocked(false);
}
}
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index c4027e0..b2f95cd 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -986,8 +986,16 @@
ssize_t index = mActiveBufferIndex;
if (index >= 0) {
if (!mFailover) {
- Image& texture(mBufferData[index].texture);
- err = mTextureManager.initEglImage(&texture, dpy, buffer);
+ {
+ // Without that lock, there is a chance of race condition
+ // where while composing a specific index, requestBuf
+ // with the same index can be executed and touch the same data
+ // that is being used in initEglImage.
+ // (e.g. dirty flag in texture)
+ Mutex::Autolock _l(mLock);
+ Image& texture(mBufferData[index].texture);
+ err = mTextureManager.initEglImage(&texture, dpy, buffer);
+ }
// if EGLImage fails, we switch to regular texture mode, and we
// free all resources associated with using EGLImages.
if (err == NO_ERROR) {
diff --git a/tests/DumpRenderTree/assets/run_page_cycler.py b/tests/DumpRenderTree/assets/run_page_cycler.py
index 692f32e..f995086 100755
--- a/tests/DumpRenderTree/assets/run_page_cycler.py
+++ b/tests/DumpRenderTree/assets/run_page_cycler.py
@@ -33,10 +33,16 @@
# Include all tests if none are specified.
if not args:
print "need a URL, e.g. file:///sdcard/webkit/page_cycler/moz/start.html\?auto=1\&iterations=10"
+ print " or remote:android-browser-test:80/page_cycler/"
sys.exit(1)
else:
path = ' '.join(args);
+ if path[:7] == "remote:":
+ remote_path = path[7:]
+ else:
+ remote_path = None
+
adb_cmd = "adb ";
if options.adb_options:
adb_cmd += options.adb_options
@@ -56,7 +62,20 @@
run_load_test_cmd_postfix = " -w com.android.dumprendertree/.LayoutTestsAutoRunner"
# Call LoadTestsAutoTest::runTest.
- run_load_test_cmd = run_load_test_cmd_prefix + " -e class com.android.dumprendertree.LoadTestsAutoTest#runPageCyclerTest -e path \"" + path + "\" -e timeout " + timeout_ms
+ run_load_test_cmd = run_load_test_cmd_prefix + " -e class com.android.dumprendertree.LoadTestsAutoTest#runPageCyclerTest -e timeout " + timeout_ms
+
+ if remote_path:
+ if options.suite:
+ run_load_test_cmd += " -e suite %s -e forward %s " % (options.suite,
+ remote_path)
+ else:
+ print "for network mode, need to specify --suite as well."
+ sys.exit(1)
+ if options.iteration:
+ run_load_test_cmd += " -e iteration %s" % options.iteration
+ else:
+ run_load_test_cmd += " -e path \"%s\" " % path
+
if options.drawtime:
run_load_test_cmd += " -e drawtime true "
@@ -130,5 +149,15 @@
default=None,
help="stores rendered page to a location on device.")
+ option_parser.add_option("-u", "--suite",
+ default=None,
+ help="(for network mode) specify the suite to"
+ " run by name")
+
+ option_parser.add_option("-i", "--iteration",
+ default="5",
+ help="(for network mode) specify how many iterations"
+ " to run")
+
options, args = option_parser.parse_args();
main(options, args)
diff --git a/tests/DumpRenderTree/src/com/android/dumprendertree/LayoutTestsAutoRunner.java b/tests/DumpRenderTree/src/com/android/dumprendertree/LayoutTestsAutoRunner.java
index e058f32..3ba3488 100755
--- a/tests/DumpRenderTree/src/com/android/dumprendertree/LayoutTestsAutoRunner.java
+++ b/tests/DumpRenderTree/src/com/android/dumprendertree/LayoutTestsAutoRunner.java
@@ -25,7 +25,7 @@
/**
* Instrumentation Test Runner for all DumpRenderTree tests.
- *
+ *
* Running all tests:
*
* adb shell am instrument \
@@ -57,7 +57,7 @@
e.printStackTrace();
}
}
-
+
String delay_str = (String) icicle.get("delay");
if(delay_str != null) {
try {
@@ -66,30 +66,37 @@
}
}
- String r = (String)icicle.get("rebaseline");
+ String r = icicle.getString("rebaseline");
this.mRebaseline = (r != null && r.toLowerCase().equals("true"));
- String logtime = (String) icicle.get("logtime");
+ String logtime = icicle.getString("logtime");
this.mLogtime = (logtime != null
&& logtime.toLowerCase().equals("true"));
- String drawTime = (String) icicle.get("drawtime");
+ String drawTime = icicle.getString("drawtime");
this.mGetDrawTime = (drawTime != null
&& drawTime.toLowerCase().equals("true"));
- mSaveImagePath = (String) icicle.get("saveimage");
+ mSaveImagePath = icicle.getString("saveimage");
- mJsEngine = (String) icicle.get("jsengine");
+ mJsEngine = icicle.getString("jsengine");
+
+ mPageCyclerSuite = icicle.getString("suite");
+ mPageCyclerForwardHost = icicle.getString("forward");
+ mPageCyclerIteration = icicle.getString("iteration", "5");
super.onCreate(icicle);
}
-
- public String mTestPath;
- public String mSaveImagePath;
- public int mTimeoutInMillis;
- public int mDelay;
- public boolean mRebaseline;
- public boolean mLogtime;
- public boolean mGetDrawTime;
- public String mJsEngine;
+
+ String mPageCyclerSuite;
+ String mPageCyclerForwardHost;
+ String mPageCyclerIteration;
+ String mTestPath;
+ String mSaveImagePath;
+ int mTimeoutInMillis;
+ int mDelay;
+ boolean mRebaseline;
+ boolean mLogtime;
+ boolean mGetDrawTime;
+ String mJsEngine;
}
diff --git a/tests/DumpRenderTree/src/com/android/dumprendertree/LayoutTestsAutoTest.java b/tests/DumpRenderTree/src/com/android/dumprendertree/LayoutTestsAutoTest.java
index 050b779..7ac0665 100644
--- a/tests/DumpRenderTree/src/com/android/dumprendertree/LayoutTestsAutoTest.java
+++ b/tests/DumpRenderTree/src/com/android/dumprendertree/LayoutTestsAutoTest.java
@@ -401,15 +401,6 @@
activity.setDefaultDumpDataType(DumpDataType.EXT_REPR);
// Run tests.
- int addr = -1;
- try{
- addr = AdbUtils.resolve("android-browser-test.mtv.corp.google.com");
- } catch (IOException ioe) {
- Log.w(LOGTAG, "error while resolving test host name", ioe);
- }
- if(addr == -1) {
- Log.w(LOGTAG, "failed to resolve test host. http tests will fail.");
- }
for (int i = 0; i < mTestList.size(); i++) {
String s = mTestList.elementAt(i);
boolean ignoreResult = mTestListIgnoreResult.elementAt(i);
diff --git a/tests/DumpRenderTree/src/com/android/dumprendertree/LoadTestsAutoTest.java b/tests/DumpRenderTree/src/com/android/dumprendertree/LoadTestsAutoTest.java
index 622fb0e..ee5bb5d 100644
--- a/tests/DumpRenderTree/src/com/android/dumprendertree/LoadTestsAutoTest.java
+++ b/tests/DumpRenderTree/src/com/android/dumprendertree/LoadTestsAutoTest.java
@@ -16,6 +16,9 @@
package com.android.dumprendertree;
+import com.android.dumprendertree.forwarder.AdbUtils;
+import com.android.dumprendertree.forwarder.ForwardServer;
+
import android.app.Instrumentation;
import android.content.Context;
import android.content.Intent;
@@ -34,6 +37,8 @@
import java.io.PrintStream;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
public class LoadTestsAutoTest extends ActivityInstrumentationTestCase2<TestShellActivity> {
@@ -41,13 +46,15 @@
private final static String LOAD_TEST_RESULT =
Environment.getExternalStorageDirectory() + "/load_test_result.txt";
private final static int MAX_GC_WAIT_SEC = 10;
+ private final static int LOCAL_PORT = 17171;
private boolean mFinished;
static final String LOAD_TEST_RUNNER_FILES[] = {
"run_page_cycler.py"
};
+ private ForwardServer mForwardServer;
public LoadTestsAutoTest() {
- super("com.android.dumprendertree", TestShellActivity.class);
+ super(TestShellActivity.class);
}
// This function writes the result of the layout test to
@@ -59,14 +66,38 @@
inst.sendStatus(0, bundle);
}
+ private String setUpForwarding(String forwardInfo, String suite, String iteration) throws IOException {
+ // read forwarding information first
+ Pattern forwardPattern = Pattern.compile("(.*):(\\d+)/(.*)/");
+ Matcher matcher = forwardPattern.matcher(forwardInfo);
+ if (!matcher.matches()) {
+ throw new RuntimeException("Invalid forward information");
+ }
+ String host = matcher.group(1);
+ int port = Integer.parseInt(matcher.group(2));
+ mForwardServer = new ForwardServer(LOCAL_PORT, AdbUtils.resolve(host), port);
+ mForwardServer.start();
+ return String.format("http://127.0.0.1:%d/%s/%s/start.html?auto=1&iterations=%s",
+ LOCAL_PORT, matcher.group(3), suite, iteration);
+ }
+
// Invokes running of layout tests
// and waits till it has finished running.
- public void runPageCyclerTest() {
+ public void runPageCyclerTest() throws IOException {
LayoutTestsAutoRunner runner = (LayoutTestsAutoRunner) getInstrumentation();
+ if (runner.mPageCyclerSuite != null) {
+ // start forwarder to use page cycler suites hosted on external web server
+ if (runner.mPageCyclerForwardHost == null) {
+ throw new RuntimeException("no forwarder information provided");
+ }
+ runner.mTestPath = setUpForwarding(runner.mPageCyclerForwardHost,
+ runner.mPageCyclerSuite, runner.mPageCyclerIteration);
+ Log.d(LOGTAG, "using path: " + runner.mTestPath);
+ }
+
if (runner.mTestPath == null) {
- Log.e(LOGTAG, "No test specified");
- return;
+ throw new RuntimeException("No test specified");
}
TestShellActivity activity = (TestShellActivity) getActivity();
@@ -79,6 +110,10 @@
runner.mGetDrawTime, runner.mSaveImagePath);
activity.clearCache();
+ if (mForwardServer != null) {
+ mForwardServer.stop();
+ mForwardServer = null;
+ }
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
@@ -92,7 +127,9 @@
private void freeMem() {
Log.v(LOGTAG, "freeMem: calling gc...");
final CountDownLatch latch = new CountDownLatch(1);
+ @SuppressWarnings("unused")
Object dummy = new Object() {
+ // this object instance is used to track gc
@Override
protected void finalize() throws Throwable {
latch.countDown();
diff --git a/tests/DumpRenderTree/src/com/android/dumprendertree/forwarder/Forwarder.java b/tests/DumpRenderTree/src/com/android/dumprendertree/forwarder/Forwarder.java
index a1f3cdf..a971e7b 100644
--- a/tests/DumpRenderTree/src/com/android/dumprendertree/forwarder/Forwarder.java
+++ b/tests/DumpRenderTree/src/com/android/dumprendertree/forwarder/Forwarder.java
@@ -36,6 +36,7 @@
private Socket from, to;
private static final String LOGTAG = "Forwarder";
+ private static final int BUFFER_SIZE = 16384;
public Forwarder (Socket from, Socket to, ForwardServer server) {
this.server = server;
@@ -90,7 +91,7 @@
int length;
InputStream is = in.getInputStream();
OutputStream os = out.getOutputStream();
- byte[] buffer = new byte[4096];
+ byte[] buffer = new byte[BUFFER_SIZE];
while ((length = is.read(buffer)) > 0) {
os.write(buffer, 0, length);
}
diff --git a/tests/HwAccelerationTest/AndroidManifest.xml b/tests/HwAccelerationTest/AndroidManifest.xml
index c763b1d..31b2ddd 100644
--- a/tests/HwAccelerationTest/AndroidManifest.xml
+++ b/tests/HwAccelerationTest/AndroidManifest.xml
@@ -406,6 +406,15 @@
</activity>
<activity
+ android:name="PointsActivity"
+ android:label="_Points">
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+ <category android:name="android.intent.category.LAUNCHER" />
+ </intent-filter>
+ </activity>
+
+ <activity
android:name="Transform3dActivity"
android:label="_3d">
<intent-filter>
diff --git a/tests/HwAccelerationTest/src/com/android/test/hwui/Lines2Activity.java b/tests/HwAccelerationTest/src/com/android/test/hwui/Lines2Activity.java
index ccf0631..55fab3f 100644
--- a/tests/HwAccelerationTest/src/com/android/test/hwui/Lines2Activity.java
+++ b/tests/HwAccelerationTest/src/com/android/test/hwui/Lines2Activity.java
@@ -42,8 +42,9 @@
swView.setLayerType(View.LAYER_TYPE_SOFTWARE, null);
frame.addView(swView);
final LinesView hwBothView = new LinesView(this, 850, Color.GREEN);
- // BUG: some lines not drawn or drawn with alpha when enabling hw layers
-// hwBothView.setLayerType(View.LAYER_TYPE_HARDWARE, null);
+ // Don't actually need to render to a hw layer, but it's a good sanity-check that
+ // we're rendering to/from layers correctly
+ hwBothView.setLayerType(View.LAYER_TYPE_HARDWARE, null);
frame.addView(hwBothView);
final LinesView swBothView = new LinesView(this, 854, Color.RED);
swBothView.setLayerType(View.LAYER_TYPE_SOFTWARE, null);
diff --git a/tests/HwAccelerationTest/src/com/android/test/hwui/PointsActivity.java b/tests/HwAccelerationTest/src/com/android/test/hwui/PointsActivity.java
new file mode 100644
index 0000000..b3fb7a1
--- /dev/null
+++ b/tests/HwAccelerationTest/src/com/android/test/hwui/PointsActivity.java
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.test.hwui;
+
+import android.animation.ObjectAnimator;
+import android.app.Activity;
+import android.content.Context;
+import android.graphics.Canvas;
+import android.graphics.Color;
+import android.graphics.Paint;
+import android.graphics.drawable.ColorDrawable;
+import android.os.Bundle;
+import android.view.View;
+import android.widget.FrameLayout;
+import android.widget.LinearLayout;
+import android.widget.ProgressBar;
+import android.widget.SeekBar;
+
+@SuppressWarnings({"UnusedDeclaration"})
+public class PointsActivity extends Activity {
+
+ float mSeekValue = .5f;
+
+ @Override
+ protected void onCreate(Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+ getWindow().setBackgroundDrawable(new ColorDrawable(0xff000000));
+ SeekBar slider = new SeekBar(this);
+ LinearLayout container = new LinearLayout(this);
+ container.setOrientation(LinearLayout.VERTICAL);
+ setContentView(container);
+
+ container.addView(slider);
+ slider.setMax(100);
+ slider.setProgress(50);
+ FrameLayout frame = new FrameLayout(this);
+ final RenderingView gpuView = new RenderingView(this, Color.GREEN);
+ frame.addView(gpuView);
+ final RenderingView swView = new RenderingView(this, Color.RED);
+ swView.setLayerType(View.LAYER_TYPE_SOFTWARE, null);
+ frame.addView(swView);
+ container.addView(frame);
+
+ slider.setOnSeekBarChangeListener(new SeekBar.OnSeekBarChangeListener() {
+ @Override
+ public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {
+ mSeekValue = (float)progress / 100.0f;
+ float gpuAlpha = Math.min(2.0f * mSeekValue, 1f);
+ gpuView.setAlpha(gpuAlpha);
+ float swAlpha = Math.min((1 - mSeekValue) * 2.0f, 1f);
+ System.out.println("(gpuAlpha, swAlpha = " + gpuAlpha + ", " + swAlpha);
+ swView.setAlpha(swAlpha);
+ }
+
+ @Override
+ public void onStartTrackingTouch(SeekBar seekBar) {
+ }
+
+ @Override
+ public void onStopTrackingTouch(SeekBar seekBar) {
+ }
+ });
+ }
+
+ @Override
+ protected void onDestroy() {
+ super.onDestroy();
+ }
+
+ public static class RenderingView extends View {
+
+ private int mColor;
+
+ public RenderingView(Context c, int color) {
+ super(c);
+ mColor = color;
+ }
+
+ private void drawPoints(Canvas canvas, Paint p, float xOffset, float yOffset) {
+ }
+
+ @Override
+ protected void onDraw(Canvas canvas) {
+ super.onDraw(canvas);
+ Paint p = new Paint();
+ p.setColor(mColor);
+
+ float yOffset = 0;
+ for (int i = 0; i < 2; ++i) {
+ float xOffset = 0;
+
+ p.setStrokeWidth(0f);
+ p.setStrokeCap(Paint.Cap.SQUARE);
+ canvas.drawPoint(100 + xOffset, 100 + yOffset, p);
+ xOffset += 5;
+
+ p.setStrokeWidth(1f);
+ canvas.drawPoint(100 + xOffset, 100 + yOffset, p);
+ xOffset += 15;
+
+ p.setStrokeWidth(20);
+ canvas.drawPoint(100 + xOffset, 100 + yOffset, p);
+ xOffset += 30;
+
+ p.setStrokeCap(Paint.Cap.ROUND);
+ canvas.drawPoint(100 + xOffset, 100 + yOffset, p);
+
+ p.setAntiAlias(true);
+ yOffset += 30;
+ }
+
+ }
+ }
+}
diff --git a/tests/RenderScriptTests/ImageProcessing/src/com/android/rs/image/ImageProcessingActivity.java b/tests/RenderScriptTests/ImageProcessing/src/com/android/rs/image/ImageProcessingActivity.java
index 7462701..e776463 100644
--- a/tests/RenderScriptTests/ImageProcessing/src/com/android/rs/image/ImageProcessingActivity.java
+++ b/tests/RenderScriptTests/ImageProcessing/src/com/android/rs/image/ImageProcessingActivity.java
@@ -88,9 +88,8 @@
}
// This is a hack to work around an invalidation bug
- mBitmapOut = Bitmap.createBitmap(mBitmapOut);
+ mBitmapOut.setPixel(0, 0, 0);
mOutPixelsAllocation.copyTo(mBitmapOut);
- mDisplayView.setImageBitmap(mBitmapOut);
mDisplayView.invalidate();
}
};
diff --git a/voip/jni/rtp/AudioGroup.cpp b/voip/jni/rtp/AudioGroup.cpp
index 41fedce..8b31996 100644
--- a/voip/jni/rtp/AudioGroup.cpp
+++ b/voip/jni/rtp/AudioGroup.cpp
@@ -41,6 +41,8 @@
#include <media/AudioTrack.h>
#include <media/mediarecorder.h>
+#include <hardware/audio.h>
+
#include "jni.h"
#include "JNIHelp.h"
@@ -767,10 +769,10 @@
// Find out the frame count for AudioTrack and AudioRecord.
int output = 0;
int input = 0;
- if (AudioTrack::getMinFrameCount(&output, AudioSystem::VOICE_CALL,
+ if (AudioTrack::getMinFrameCount(&output, AUDIO_STREAM_VOICE_CALL,
sampleRate) != NO_ERROR || output <= 0 ||
AudioRecord::getMinFrameCount(&input, sampleRate,
- AudioSystem::PCM_16_BIT, 1) != NO_ERROR || input <= 0) {
+ AUDIO_FORMAT_PCM_16_BIT, 1) != NO_ERROR || input <= 0) {
LOGE("cannot compute frame count");
return false;
}
@@ -787,10 +789,10 @@
// Initialize AudioTrack and AudioRecord.
AudioTrack track;
AudioRecord record;
- if (track.set(AudioSystem::VOICE_CALL, sampleRate, AudioSystem::PCM_16_BIT,
- AudioSystem::CHANNEL_OUT_MONO, output) != NO_ERROR || record.set(
- AUDIO_SOURCE_VOICE_COMMUNICATION, sampleRate, AudioSystem::PCM_16_BIT,
- AudioSystem::CHANNEL_IN_MONO, input) != NO_ERROR) {
+ if (track.set(AUDIO_STREAM_VOICE_CALL, sampleRate, AUDIO_FORMAT_PCM_16_BIT,
+ AUDIO_CHANNEL_OUT_MONO, output) != NO_ERROR || record.set(
+ AUDIO_SOURCE_VOICE_COMMUNICATION, sampleRate, AUDIO_FORMAT_PCM_16_BIT,
+ AUDIO_CHANNEL_IN_MONO, input) != NO_ERROR) {
LOGE("cannot initialize audio device");
return false;
}