Merge tag android-5.1.0_r1 into AOSP_5.1_MERGE

Change-Id: Ida2f60315b718c2b06f5e0d1d4cd64bbceb557b2
diff --git a/Doxyfile b/Doxyfile
index c01ff14..04f62f1 100644
--- a/Doxyfile
+++ b/Doxyfile
@@ -925,7 +925,7 @@
 # If a relative path is entered the value of OUTPUT_DIRECTORY will be 
 # put in front of it. If left blank `html' will be used as the default path.
 
-HTML_OUTPUT            = reference
+HTML_OUTPUT            = halref
 
 # The HTML_FILE_EXTENSION tag can be used to specify the file extension for 
 # each generated HTML page (for example: .htm,.php,.asp). If it is left blank 
@@ -944,13 +944,13 @@
 # have to redo this when upgrading to a newer version of doxygen or when 
 # changing the value of configuration settings such as GENERATE_TREEVIEW!
 
-HTML_HEADER            = 
+HTML_HEADER            = ./docs/source.android.com/hal_header.html 
 
 # The HTML_FOOTER tag can be used to specify a personal HTML footer for 
 # each generated HTML page. If it is left blank doxygen will generate a 
 # standard footer.
 
-HTML_FOOTER            = 
+HTML_FOOTER            = ./docs/source.android.com/hal_footer.html 
 
 # The HTML_STYLESHEET tag can be used to specify a user-defined cascading 
 # style sheet that is used by each HTML page. It can be used to 
@@ -969,7 +969,7 @@
 # robust against future updates. Doxygen will copy the style sheet file to 
 # the output directory.
 
-HTML_EXTRA_STYLESHEET  = 
+HTML_EXTRA_STYLESHEET  = ./docs/source.android.com/hal_doxygen.css
 
 # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or 
 # other source files which should be copied to the HTML output directory. Note 
@@ -994,7 +994,7 @@
 # the colors in the HTML output. For a value of 0 the output will use 
 # grayscales only. A value of 255 will produce the most vivid colors.
 
-HTML_COLORSTYLE_SAT    = 100
+HTML_COLORSTYLE_SAT    = 0
 
 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to 
 # the luminance component of the colors in the HTML output. Values below 
@@ -1185,7 +1185,7 @@
 # navigation tree you can set this option to NO if you already set 
 # GENERATE_TREEVIEW to YES.
 
-DISABLE_INDEX          = NO
+DISABLE_INDEX          = YES
 
 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index 
 # structure should be generated to display hierarchical information. 
@@ -1197,7 +1197,7 @@
 # Since the tree basically has the same information as the tab index you 
 # could consider to set DISABLE_INDEX to NO when enabling this option.
 
-GENERATE_TREEVIEW      = NO
+GENERATE_TREEVIEW      = YES
 
 # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values 
 # (range [0,1..20]) that doxygen will group on one line in the generated HTML 
diff --git a/DoxygenLayout.xml b/DoxygenLayout.xml
index f264386..4b6ad40 100644
--- a/DoxygenLayout.xml
+++ b/DoxygenLayout.xml
@@ -2,7 +2,7 @@
   <!-- Generated by doxygen 1.8.3.1 -->
   <!-- Navigation index tabs for HTML output -->
   <navindex>
-    <tab type="mainpage" visible="yes" title="" intro="fdsfds"/>
+    <tab type="mainpage" visible="yes" title="" intro=""/>
     <tab type="pages" visible="yes" title="" intro=""/>
     <tab type="modules" visible="yes" title="" intro=""/>
     <tab type="namespaces" visible="yes" title="">
@@ -18,7 +18,7 @@
       <tab type="filelist" visible="yes" title="" intro=""/>
       <tab type="globals" visible="yes" title="" intro=""/>
     </tab>
-        <tab type="user" url="/devices/index.html" title="Back to source.android.com">
+        <tab type="user" url="/devices/tech/index.html" title="Devices on source.android.com">
     </tab>
   </navindex>
 
diff --git a/hal_doxygen.css b/hal_doxygen.css
new file mode 100644
index 0000000..a3e7d9c
--- /dev/null
+++ b/hal_doxygen.css
@@ -0,0 +1,43 @@
+.title {
+    color:#333;
+    font-size: 34px;
+    margin: 36px 0 27px;
+    padding:0 0 10px;
+    font-weight:300;
+}
+
+#header {
+  padding: 1.2em 0 0.2em 0;
+  margin: 10px 120px;
+}
+
+a:hover,
+acronym:hover {
+  color: #7aa1b0 !important; }
+
+a:link, a:visited {
+  color: #258aaf;
+  text-decoration: none;
+}
+
+a:focus, a:hover, a:active {
+  color: #33b5e5;
+  text-decoration: none;
+}
+
+a.toptab {
+  color: #333;
+  font-size: 16px;
+}
+
+a.devices {
+  color: #F80;
+  font-size: 16px;
+}
+
+hr.light {
+    border: 0;
+    height: 0;
+    border-top: 1px solid rgba(0, 0, 0, 0.1);
+    border-bottom: 1px solid rgba(255, 255, 255, 0.3);
+}
diff --git a/hal_footer.html b/hal_footer.html
new file mode 100644
index 0000000..2ad5ca3
--- /dev/null
+++ b/hal_footer.html
@@ -0,0 +1,22 @@
+<!-- HTML footer for doxygen 1.8.4-->
+<!-- start footer part -->
+<!--BEGIN GENERATE_TREEVIEW-->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    $navpath
+    <li class="footer">$generatedby
+    <a href="/index.html">
+    <img class="footer" src="/images/Android_Robot_100.png" alt="Android"/></a> $doxygenversion </li>
+  </ul>
+</div>
+<!--END GENERATE_TREEVIEW-->
+<!--BEGIN !GENERATE_TREEVIEW-->
+<hr class="footer"/><address class="footer"><small>
+$generatedby &#160;<a href="/index.html">
+<img class="footer"
+src="/images/Android_Robot_100.png" alt="Android"/>
+</a> $doxygenversion
+</small></address>
+<!--END !GENERATE_TREEVIEW-->
+</body>
+</html>
diff --git a/hal_header.html b/hal_header.html
new file mode 100644
index 0000000..dc4e671
--- /dev/null
+++ b/hal_header.html
@@ -0,0 +1,46 @@
+<!-- HTML header for doxygen 1.8.4-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen $doxygenversion"/>
+<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
+<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
+<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="//www.gstatic.com/external_hosted/source_android_com/sac_doxygen_jquery.js"></script>
+<script type="text/javascript" src="$relpath^dynsections.js"></script>
+$treeview
+$search
+$mathjax
+<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
+$extrastylesheet
+<script type="text/javascript">
+  var _gaq = _gaq || [];
+  _gaq.push(['_setAccount', 'UA-45455297-1']);
+  _gaq.push(['_trackPageview']);
+
+  (function() {
+    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
+    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
+    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
+  })();
+</script>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+
+    <div id="header">
+            <table width=80%>
+              <tr>
+                <td><a href="/index.html"><img src="/assets/images/sac_logo.png" width="123" height="25" alt="Android Developers" /></td>
+                <td><a class=toptab href="/source/index.html">Source</a></td>
+                <td><a class=devices href="/devices/index.html">Devices</a></td>
+                <td><a class=toptab href="/accessories/index.html">Accessories</a></td>
+                <td><a class=toptab href="/compatibility/index.html">Compatibility</a></td>
+                <td width=20%>$searchbox</td>
+              </tr>
+            </table>
+  </div>
+<hr class=light>
+<!-- end header part -->
diff --git a/src/accessories/accessories_toc.cs b/src/accessories/accessories_toc.cs
index d1badbe..654ff7b 100644
--- a/src/accessories/accessories_toc.cs
+++ b/src/accessories/accessories_toc.cs
@@ -1,5 +1,5 @@
 <!--
-    Copyright 2013 The Android Open Source Project
+    Copyright 2014 The Android Open Source Project
 
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
 <ul id="nav">
   <!-- Accessories -->
   <li class="nav-section">
-    <div class="nav-section-header">
+<div class="nav-section-header">
       <a href="<?cs var:toroot ?>accessories/index.html">
         <span class="en">Overview</span>
       </a>
@@ -25,17 +25,32 @@
     <ul>
       <li class="nav-section">
         <div class="nav-section-header">
-          <a href="<?cs var:toroot ?>accessories/protocol.html">
-            <span class="en">Open Accessory Protocol</span>
+          <a href="<?cs var:toroot ?>accessories/audio.html">
+            <span class="en">Audio Accessories</span>
           </a>
         </div>
         <ul>
+          <li><a href="<?cs var:toroot ?>accessories/headset-spec.html">Headset specification</a></li>
+        </ul>
+      </li>    
+  <li class="nav-section">
+<div class="nav-section-header">
+      <a href="<?cs var:toroot ?>accessories/custom.html">
+        <span class="en">Custom Accessories</span>
+      </a>
+    </div>
+    <ul>
+      <li class="nav-section">
+        <div class="nav-section-header"><a href="<?cs var:toroot ?>accessories/protocol.html"><span class="en">Open Accessory Protocol</span>
+        </a>
+        </div>
+        <ul>
           <li><a href="<?cs var:toroot ?>accessories/aoa2.html">Version 2.0</a></li>
           <li><a href="<?cs var:toroot ?>accessories/aoa.html">Version 1.0</a></li>
         </ul>
-      </li>
-      <li><a href="<?cs var:toroot ?>accessories/audio.html">Building Audio Accessories</a></li>
-      <li><a href="<?cs var:toroot ?>accessories/custom.html">Building Custom Accessories</a></li>
+        </li>
+       </ul>
+     </li>
   </li>
   <!-- End Accessories -->
 </ul>
diff --git a/src/accessories/audio.jd b/src/accessories/audio.jd
index 240f4ea..1674710 100644
--- a/src/accessories/audio.jd
+++ b/src/accessories/audio.jd
@@ -2,7 +2,7 @@
 @jd:body
 
 <!--
-    Copyright 2013 The Android Open Source Project
+    Copyright 2014 The Android Open Source Project
 
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
@@ -16,33 +16,40 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 -->
-<p>In building an audio accessory, such as an audio dock or other playback device, you should
-consider how your accessory will connect with Android devices. In particular, you should decide
-if your accessory will use Universal Serial Bus (USB) or a Bluetooth connection to stream music or
-other audio content.</p>
+<div id="qv-wrapper">
+  <div id="qv">
+    <h2>In this document</h2>
+    <ol id="auto-toc">
+    </ol>
+  </div>
+</div>
+
+<p>In implementing an audio accessory, such as a headset, headphone amplifier,
+microphone, DAC/ADC, or dock, you should consider how your accessory will
+connect with Android devices. In particular, you should decide
+if your accessory will use wired a 3.5 mm headset connector, Universal Serial
+Bus (USB), or a Bluetooth connection to stream music or other audio content.</p>
+<h2 id="audio-over-35mm">Audio over 3.5 mm headset connector</h2>
+<p>Many Android-based devices include a 3.5 mm (“mini”) headset connector. In
+addition to the traditional stereo output and mono input features, the <a
+href="headset-spec.html">Wired audio headset specification</a> defines standard
+impedances and functions so a range of Android devices and headsets can inter-operate.</p>
+
 <h2 id="audio-over-usb">Audio over USB</h2>
-<p>An accessory that connects with Android over USB connection must use the Android Open
-Accessory (AOA) protocol version 2.0. This version of the AOA protocol is supported on Android 4.1
-(API Level 16) and higher. Once an Android device connects to an accessory that supports this
-protocol, the Android system treats it as a standard audio output device and routes all audio to
-that accessory. No secondary software application is required on the Android device.</p>
-<p><strong>Note:</strong> Due to the low power output of Android devices, the Android Open Accessory
-Protocol requires that accessories act as a USB host, which means that the connecting accessory
-must power the bus.</p>
-<h3 id="next-steps">Next steps</h3>
-<p>To get started on building an audio accessory that uses a USB connection:</p>
-<ul>
-<li>Select a hardware platform or build a hardware device that can support USB host mode.</li>
-<li>Review the <a href="{@docRoot}accessories/aoa2.html">AOA 2.0 protocol</a> specification to understand
-  how to implement this protocol on your accessory hardware.</li>
-<li>Review the ADK 2012 <a href="http://developer.android.com/tools/adk/adk2.html#src-download">firmware source code</a>
-  (<code>&lt;adk-src&gt;/adk2012/board/library/ADK2/</code>), which includes an example implementation
-  of an audio playback accessory using a USB connection.</li>
-</ul>
-<p><strong>Note:</strong> The AOA 2.0 protocol also supports the
-<a href="{@docRoot}accessories/aoa2.html#hid-support">human interface device</a> (HID) protocol through a USB
-connection, enabling accessories such as audio docks to provide hardware play back controls such
-as pause, fast-forward or volume buttons.</p>
+<p>Android can use USB in several modes:</p>
+  <ul>
+    <li>debug
+    <li>accessory
+    <li>host
+  </ul>
+<p>In the traditional debug mode, there is no audio capability.</p>
+<p>Accessory mode is provided by the Open Accessory (AOA) protocol version 2.0.
+There is limited audio capability in accessory mode, as described in <a
+href="custom.html#audio-over-usb">Connecting custom audio over USB</a>.</p>
+<p>Host mode enables the Android device to drive the USB bus and operate with a
+wide range of USB-based peripherals, including audio interfaces. Host mode
+audio is described in <a href="{@docRoot}devices/audio_usb.html">USB Digital Audio</a>
+
 <h2 id="audio-over-bluetooth">Audio over Bluetooth</h2>
 <p>An accessory that connects with Android over Bluetooth can use an Advanced Audio Distribution
 Profile (A2DP) connection stream music for playback. Playing audio over a Bluetooth with A2DP is
@@ -67,4 +74,3 @@
 <p><strong>Note:</strong> The ADK 2012 source code includes an open source Bluetooth stack that
 is built for the Texas Instruments CC2564 chip, but can work with any Bluetooth chip that
 implements a standard Host/Controller Interface (HCI).</p>
-
diff --git a/src/accessories/custom.jd b/src/accessories/custom.jd
index 3f84d50..560f182 100644
--- a/src/accessories/custom.jd
+++ b/src/accessories/custom.jd
@@ -1,8 +1,8 @@
-page.title=Building Custom Accessories
+page.title=Custom Accessories
 @jd:body
 
 <!--
-    Copyright 2013 The Android Open Source Project
+    Copyright 2014 The Android Open Source Project
 
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
@@ -16,6 +16,14 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 -->
+<div id="qv-wrapper">
+  <div id="qv">
+    <h2>In this document</h2>
+    <ol id="auto-toc">
+    </ol>
+  </div>
+</div>
+
 <p>An accessory for Android can be anything: keyboard, thermometer, robot, lighting control or
 anything else you can imagine. Accessories for Android all have one thing in common; they all
 connect to an Android device in some way. When starting out to build an accessory, you should
@@ -77,3 +85,27 @@
 <p><strong>Note:</strong> The ADK 2012 source code includes an open source Bluetooth stack which
 is built for the Texas Instruments CC2564 chip, but can work with any Bluetooth chip that
 supports a standard Host/Controller Interface (HCI).</p>
+
+<h2 id="audio-over-usb">Connecting audio over USB</h2>
+<p>An accessory that connects with Android over USB connection may use the Android Open
+Accessory (AOA) protocol version 2.0. This version of the AOA protocol is supported on Android 4.1
+(API Level 16) and higher. Once an Android device connects to an accessory that supports this
+protocol, the Android system treats it as a standard audio output device and routes all audio to
+that accessory. No secondary software application is required on the Android device.</p>
+<p><strong>Note:</strong> Due to the low power output of Android devices, the Android Open Accessory
+Protocol requires that accessories act as a USB host, which means that the connecting accessory
+must power the bus.</p>
+<h3 id="next-steps">Next steps</h3>
+<p>To get started on building an audio accessory that uses a USB connection:</p>
+<ul>
+<li>Select a hardware platform or build a hardware device that can support USB host mode.</li>
+<li>Review the <a href="{@docRoot}accessories/aoa2.html">AOA 2.0 protocol</a> specification to understand
+  how to implement this protocol on your accessory hardware.</li>
+<li>Review the ADK 2012 <a href="http://developer.android.com/tools/adk/adk2.html#src-download">firmware source code</a>
+  (<code>&lt;adk-src&gt;/adk2012/board/library/ADK2/</code>), which includes an example implementation
+  of an audio playback accessory using a USB connection.</li>
+</ul>
+<p><strong>Note:</strong> The AOA 2.0 protocol also supports the
+<a href="{@docRoot}accessories/aoa2.html#hid-support">human interface device</a> (HID) protocol through a USB
+connection, enabling accessories such as audio docks to provide hardware play back controls such
+as pause, fast-forward or volume buttons.</p>
diff --git a/src/accessories/headset-spec.jd b/src/accessories/headset-spec.jd
new file mode 100644
index 0000000..d87c19a
--- /dev/null
+++ b/src/accessories/headset-spec.jd
@@ -0,0 +1,682 @@
+page.title=Wired audio headset specification
+@jd:body
+
+<!--
+    Copyright 2014 The Android Open Source Project
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+<div id="qv-wrapper">
+  <div id="qv">
+    <h2>In this document</h2>
+    <ol id="auto-toc">
+    </ol>
+  </div>
+</div>
+
+<p><em>Version 1.0</em></p>
+
+<p>This document specifies the requirements for headsets and mobile devices to
+function uniformly across the Android ecosystem. It is separated into two
+sections beginning with the specifications for the headset
+accessory followed by the specifications for the mobile device.</p>
+
+<h2 id=headset_accessory_plug_specifications>Headset Accessory (Plug) Specifications</h2>
+
+<p>The requirements in the following section apply to the headset accessory.</p>
+
+<h3 id=functions>Functions</h3>
+
+<table>
+ <tr>
+    <th>
+<p><strong>Function</strong></p>
+</th>
+    <th>
+<p><strong>Accessory Support</strong></p>
+</th>
+ </tr>
+ <tr>
+    <td>
+<p>Stereo Audio Out</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Audio in (Mic)</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Ground</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+ </tr>
+</table>
+
+<h3 id=control-function_mapping>Control-Function Mapping</h3>
+
+<table>
+ <tr>
+    <th>
+<p><strong>Control Function</strong></p>
+</th>
+    <th>
+<p><strong>Accessory Support</strong></p>
+</th>
+    <th>
+<p><strong>Description</strong></p>
+</th>
+ </tr>
+ <tr>
+    <td>
+<p>Function A</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>Play/pause/hook (Short Press), Trigger Assist (Long Press), Next (double press)</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Function B</p>
+</td>
+    <td>
+<p>Optional</p>
+</td>
+    <td>
+<p>Vol+</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Function C</p>
+</td>
+    <td>
+<p>Optional</p>
+</td>
+    <td>
+<p>Vol-</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Function D</p>
+</td>
+    <td>
+<p>Optional</p>
+</td>
+    <td>
+<p>Reserved (Nexus devices will use this reserved function to launch Google Now
+voice search)</p>
+</td>
+ </tr>
+</table>
+
+<p><strong>Assign functions to buttons as follows</strong>:</p>
+
+<ul>
+  <li> All one-button headsets must implement Function A.
+  <li> Headsets with multiple buttons must implement functions according to the
+following pattern:
+  <ul>
+    <li> 2 functions: A and D
+    <li> 3 functions: A, B, C
+    <li> 4 functions: A, B, C, D
+  </ul>
+</ul>
+
+<h3 id=mechanical>Mechanical</h3>
+
+<table>
+ <tr>
+    <th>
+<p><strong>Function</strong></p>
+</th>
+    <th>
+<p><strong>Accessory Support</strong></p>
+</th>
+    <th>
+<p><strong>Notes</strong></p>
+</th>
+ </tr>
+ <tr>
+    <td>
+<p>4 conductor 3.5mm plug</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>Ref: EIAJ-RC5325A standard</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>CTIA pinout order (LRGM)</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>Except in regions with legal requirements for OMTP pinout</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>OMTP pinout order  (LRMG)</p>
+</td>
+    <td>
+<p>Optional</p>
+</td>
+    <td></td>
+ </tr>
+ <tr>
+    <td>
+<p>Microphone</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>Must not be obstructed when operating headset controls</p>
+</td>
+ </tr>
+</table>
+
+<h3 id=electrical>Electrical</h3>
+
+<table>
+ <tr>
+    <th>
+<p><strong>Function</strong></p>
+</th>
+    <th>
+<p><strong>Accessory Support</strong></p>
+</th>
+    <th>
+<p><strong>Description</strong></p>
+</th>
+ </tr>
+ <tr>
+    <td>
+<p>Ear speaker impedance</p>
+</td>
+    <td>
+<p><strong>16 ohms or higher</strong></p>
+</td>
+    <td>
+<p>Recommend 32 - 300 ohms</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Mic DC resistance</p>
+</td>
+    <td>
+<p><strong>1000 ohms or higher</strong></p>
+</td>
+    <td>
+<p>Mic characteristics must be compliant with section 5.4 “Audio Recording” of
+current Android Compatibility Definition Document (CDD)</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Control Function Equivalent impedance*</p>
+
+<p>*Total impedance from positive mic terminal to GND when button is pressed with
+2.2 V mic bias applied through 2.2 kOhm resistor</p>
+</td>
+    <td>
+<p><strong>0 ohm</strong></p>
+</td>
+    <td>
+<p>[Function A]  Play/Pause/Hook</p>
+</td>
+ </tr>
+ <tr>
+    <td></td>
+    <td>
+<p><strong>240 ohm</strong> +/- 1% resistance</p>
+</td>
+    <td>
+<p>[Function B]</p>
+</td>
+ </tr>
+ <tr>
+    <td></td>
+    <td>
+<p><strong>470 ohm</strong> +/- 1% resistance</p>
+</td>
+    <td>
+<p>[Function C] </p>
+</td>
+ </tr>
+ <tr>
+    <td></td>
+    <td>
+<p><strong>135 ohm</strong> +/- 1% resistance</p>
+</td>
+    <td>
+<p>[Function D]</p>
+</td>
+ </tr>
+</table>
+
+<h3 id=reference_headset_test_circuit_1>Reference Headset Test Circuit 1</h3>
+
+<img src="images/headset-circuit1.png" alt="Reference Headset Test Circuit 1" />
+<p class="img-caption"><strong>Figure 1.</strong> Reference headset test circuit 1</p>
+
+<p class="note"><strong>Note:</strong> Four-segment plug shows CTIA pinout. For
+OMTP pinout, please swap MIC and GND segments.</p>
+
+<h3 id=reference_headset_test_circuit_2>Reference Headset Test Circuit 2</h3>
+
+<img src="images/headset-circuit2.png" alt="Reference Headset Test Circuit 2" />
+<p class="img-caption"><strong>Figure 2.</strong> Reference headset test circuit 2</p>
+
+<p class="note"><strong>Note:</strong> The second reference circuit above
+illustrates how the actual resistor values (R1 - R4) will change based on the
+microphone capsule resistance to achieve the equivalent impedance values as
+required by the specification. The example above assumes a 5 kOhm microphone
+impedance. Therefore, as an example, to achieve an equivalent R4 impedance of
+135 Ohm, the actual R4 resistor value needs to be 139 Ohms.</p>
+
+<h2 id=mobile_device_jack_specifications>Mobile Device (Jack) Specifications</h2>
+
+<p class="caution"><strong>Caution:</strong> To achieve compatibility with the
+headset specification above, devices that include a 4 conductor 3.5mm audio
+jack must meet the following specifications. Please see the <em>Analog audio
+ports</em> section of the <a
+href="{@docRoot}compatibility/android-cdd.pdf">Android Compatibility Definition
+Document (CDD)</a> for Android compatibility requirements.</p>
+
+<p><strong>Headset Jack Functions</strong></p>
+<table>
+ <tr>
+    <th>
+<p><strong>Function</strong></p>
+</th>
+    <th>
+<p><strong>Device Support</strong></p>
+</th>
+ </tr>
+ <tr>
+    <td>
+<p>Stereo Audio Out</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Audio in (Mic)</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Ground</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+ </tr>
+</table>
+
+<h3 id=software_mapping>Software mapping</h3>
+
+<table>
+ <tr>
+    <th>
+<p><strong>Function</strong></p>
+</th>
+    <th>
+<p><strong>Device Support</strong></p>
+</th>
+    <th>
+<p><strong>Description</strong></p>
+</th>
+ </tr>
+ <tr>
+    <td>
+<p>Function A control event </p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>input event KEY_MEDIA</p>
+
+<p>Android key  KEYCODE_HEADSETHOOK</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Function D control event</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>input event KEY_VOICECOMMAND</p>
+
+<p>Android key KEYCODE_VOICE_ASSIST</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Function B control event</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>input event KEY_VOLUMEUP</p>
+
+<p>Android key  VOLUME_UP</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Function C control event</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>input event KEY_VOLUMEDOWN</p>
+
+<p>Android key  VOLUME_DOWN</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Headset insertion detection</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>input event SW_JACK_PHYSICAL_INSERT 7</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Headset type detection</p>
+</td>
+    <td>
+<p><strong>Mic</strong></p>
+</td>
+    <td>
+<p>input event SW_MICROPHONE_INSERT 4</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Headset type detection</p>
+</td>
+    <td>
+<p><strong>No Mic</strong></p>
+</td>
+    <td>
+<p>input event SW_HEADPHONE_INSERT 2</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Headset speaker impedance</p>
+</td>
+    <td>
+<p><strong>Required Headphone (low)</strong></p>
+</td>
+    <td>
+<p>Failure mode is to indicate headphones so that limitation would be on</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Headset speaker impedance</p>
+</td>
+    <td>
+<p><strong>Required Line In (high)</strong></p>
+</td>
+    <td>
+<p>input event SW_LINEOUT_INSERT 6</p>
+</td>
+ </tr>
+</table>
+
+<h3 id=mechanical11>Mechanical</h3>
+
+<table>
+ <tr>
+    <th>
+<p><strong>Function</strong></p>
+</th>
+    <th>
+<p><strong>Device Support</strong></p>
+</th>
+    <th>
+<p><strong>Description</strong></p>
+</th>
+ </tr>
+ <tr>
+    <td>
+<p>4 conductor 3.5mm jack</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td></td>
+ </tr>
+ <tr>
+    <td>
+<p>CTIA pinout order (LRGM)</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>3 Pin & Mono Plug Compatible</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>OMTP pinout order  (LRMG)</p>
+</td>
+    <td>
+<p>Optional but <strong>Strongly Recommended</strong></p>
+</td>
+    <td></td>
+ </tr>
+ <tr>
+    <td>
+<p>Headset detect sequence</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>Plug insert notification must only be triggered after all contacts on plug are
+touching their relevant segments. This will prevent unreliable headset
+detection due to slow insertion. </p>
+</td>
+ </tr>
+</table>
+
+<h3 id=electrical12>Electrical</h3>
+
+<h4 id=general>General</h4>
+
+<table>
+ <tr>
+    <th>
+<p><strong>Function</strong></p>
+</th>
+    <th>
+<p><strong>Device Support</strong></p>
+</th>
+    <th>
+<p><strong>Notes</strong></p>
+</th>
+ </tr>
+ <tr>
+    <td>
+<p>Minimum output voltage drive capacity </p>
+</td>
+    <td>
+<p>150mV </p>
+</td>
+    <td>
+<p>&gt;= 150mV on 32 ohm</p>
+
+<p>Test conditions: EN50332-2</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Mic bias resistance </p>
+</td>
+    <td>
+<p>Required</p>
+</td>
+    <td>
+<p>Flexible on detection method used and microphone bias resistor selection.
+Require that all button resistance value ranges specified below be detected and
+related to their respective function</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>Mic bias voltage</p>
+</td>
+    <td>
+<p>1.8V - 2.9V</p>
+</td>
+    <td>
+<p>To guarantee compatibility to common microphone capsules.</p>
+</td>
+ </tr>
+</table>
+
+<h4 id=function_impedance_and_threshold_detection>Function Impedance and Threshold Detection</h4>
+
+<p>Devices must detect the following resistor ladder on the accessories. The
+accessories will be tested to the standardized circuit diagram in the diagram
+illustrated earlier (Reference Headset Test Circuit) where the total impedance
+is measured from MIC terminal to GND when a button is pressed with 2.2V mic
+bias applied through 2.2 kOhm resistor. This is the same effective resistance
+as the button detection circuit with the microphone in parallel with the button
+resistor.</p>
+<table>
+ <tr>
+    <th>
+<p><strong>Button Impedance Level</strong></p>
+</th>
+    <th>
+<p><strong>Device Support</strong></p>
+</th>
+    <th>
+<p><strong>Notes</strong></p>
+</th>
+ </tr>
+ <tr>
+    <td>
+<p><strong>70 ohm or less</strong></p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>[Function A]</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p><strong>110 - 180 ohm </strong></p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>[Function D]</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p><strong>210 - 290 ohm</strong></p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>[Function B]</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p><strong>360 - 680 ohm </strong></p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>[Function C]</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p><strong>Headset speaker impedance level</strong></p>
+</td>
+    <td></td>
+    <td></td>
+ </tr>
+ <tr>
+    <td>
+<p>Low Threshold Detection</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>Headphone (low) &lt; 1 Kohm</p>
+</td>
+ </tr>
+ <tr>
+    <td>
+<p>High Threshold Detection</p>
+</td>
+    <td>
+<p><strong>Required</strong></p>
+</td>
+    <td>
+<p>Line In (high) &gt; 5 Kohm</p>
+</td>
+ </tr>
+</table>
diff --git a/src/accessories/images/headset-circuit1.png b/src/accessories/images/headset-circuit1.png
new file mode 100644
index 0000000..c69df98
--- /dev/null
+++ b/src/accessories/images/headset-circuit1.png
Binary files differ
diff --git a/src/accessories/images/headset-circuit2.png b/src/accessories/images/headset-circuit2.png
new file mode 100644
index 0000000..67bd5b4
--- /dev/null
+++ b/src/accessories/images/headset-circuit2.png
Binary files differ
diff --git a/src/accessories/index.jd b/src/accessories/index.jd
index adf3f5c..1157d5d 100644
--- a/src/accessories/index.jd
+++ b/src/accessories/index.jd
@@ -1,8 +1,8 @@
-page.title=Build Accessories for Android
+page.title=Accessories for Android
 @jd:body
 
 <!--
-    Copyright 2013 The Android Open Source Project
+    Copyright 2014 The Android Open Source Project
 
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
@@ -16,28 +16,27 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 -->
-<p>Build compelling USB and Bluetooth Accessories to extend
-  the capabilities of your user's Android-powered devices. Android defines an standard
-  protocol that you can implement in your accessories and have it compatible with 
-  a wide range of Android-powered devices.
+<p>Implement compelling accessories to extend the capabilities of your users'
+Android-powered devices. Android relies on a suite of standard protocols you
+can implement in your accessories to be compatible with a wide range of
+Android-powered devices.
 </p>
 
 <div class="layout-content-row">  
 
   <div class="layout-content-col span-6">
 	<h4 id="audio-accessories">Audio Accessories</h4>
-	<p>Android 4.1 and higher has support for audio output over a USB connection or Bluetooth. Find out
-	how to build audio docks and other plug-in audio output hardware for Android.</p>
-	<p><a href="{@docRoot}accessories/audio.html">&raquo; Build Audio Accessories</a></p>
+	<p>Android supports local on-device audio and remote off-device audio
+        over a wired 3.5 mm headset jack, USB connection, or Bluetooth.</p>
+	<p><a href="{@docRoot}accessories/audio.html">&raquo; Audio Accessories</a></p>
   </div>
     
   <div class="layout-content-col span-6">
 	<h4 id="custom-accessories">Custom Accessories</h4>
 	<p>What do you want to connect to your Android device? Alarm clock? Keyboard? Thermostat? Robot?
-	Learn how to connect existing equipment or your own unique hardware to Android.</p>
-	<p><a href="{@docRoot}accessories/custom.html">&raquo; Build Custom Accessories</a></p>
+	Learn how to connect existing equipment or your own unique hardware to
+        Android using the Android Open Accessory Protocol.</p>
+	<p><a href="{@docRoot}accessories/custom.html">&raquo; Custom Accessories</a></p>
  </div>
 
 </div>
-
-
diff --git a/src/compatibility/4.4/versions.jd b/src/compatibility/4.4/versions.jd
index d2118df..f9c3ce7 100644
--- a/src/compatibility/4.4/versions.jd
+++ b/src/compatibility/4.4/versions.jd
@@ -11,9 +11,12 @@
 <p>Because subsequent releases of the Android software may revise this string,
 but not change any API behavior, such releases may not be accompanied by a new
 Compatibility Definition Document. This page lists the versions that are
-allowable by an Android 4.3-based system. The only permitted values for
+allowable by an Android 4.4-based system. The only permitted values for
 <code>android.os.Build.VERSION.RELEASE</code> for Android 4.4 are:</p>
 <ul>
 <li>4.4</li>
 <li>4.4.1</li>
+<li>4.4.2</li>
+<li>4.4.3</li>
+<li>4.4.4</li>
 </ul>
diff --git a/src/compatibility/5.0/versions.jd b/src/compatibility/5.0/versions.jd
new file mode 100644
index 0000000..78a11cb
--- /dev/null
+++ b/src/compatibility/5.0/versions.jd
@@ -0,0 +1,18 @@
+page.title=Permitted Version Strings for Android 5.0
+@jd:body
+
+<p>As described in Section 3.2.2 of the <a
+href="/compatibility/android-5.0-cdd.pdf">Android 5.0 Compatibility Definition</a>,
+only certain strings are allowable for the system property
+<code>android.os.Build.VERSION.RELEASE</code>. The reason for this is that
+applications and web sites may rely on predictable values for this string, and
+so that end users can easily and reliably identify the version of Android
+running on their devices.</p>
+<p>Because subsequent releases of the Android software may revise this string,
+but not change any API behavior, such releases may not be accompanied by a new
+Compatibility Definition Document. This page lists the versions that are
+allowable by an Android 5.0-based system. The only permitted values for
+<code>android.os.Build.VERSION.RELEASE</code> for Android 5.0 are:</p>
+<ul>
+<li>5.0</li>
+</ul>
diff --git a/src/compatibility/android-cts-manual.pdf b/src/compatibility/android-cts-manual.pdf
index c996b71..ac6a125 100644
--- a/src/compatibility/android-cts-manual.pdf
+++ b/src/compatibility/android-cts-manual.pdf
Binary files differ
diff --git a/src/compatibility/cts-intro.jd b/src/compatibility/cts-intro.jd
index 2813aef..f489fc6 100644
--- a/src/compatibility/cts-intro.jd
+++ b/src/compatibility/cts-intro.jd
@@ -2,7 +2,7 @@
 @jd:body
 
 <!--
-    Copyright 2013 The Android Open Source Project     
+    Copyright 2014 The Android Open Source Project     
 
     Licensed under the Apache License, Version 2.0 (the "License");    
     you may not use this file except in compliance with the License.   
@@ -140,7 +140,7 @@
 </tr>
 <tr>
 <td>Dalvik VM Tests</td>
-<td>The tests focus on testing the Dalvik VM.</td>
+<td>The tests focus on testing the Dalvik Executable Format.</td>
 </tr>
 <tr>
 <td>Platform Data Model</td>
diff --git a/src/devices/audio.jd b/src/devices/audio.jd
index 1b85d5e..660ec7e 100644
--- a/src/devices/audio.jd
+++ b/src/devices/audio.jd
@@ -78,7 +78,7 @@
     and that you must implement to have your audio hardware function correctly. The audio HAL
     interfaces are located in
 <code>hardware/libhardware/include/hardware</code>. See <a
-href="http://source.android.com/devices/reference/audio_8h_source.html">audio.h</a> for additional details.
+href="{@docRoot}devices/halref/audio_8h_source.html">audio.h</a> for additional details.
   </dd>
   <dt>
     Kernel Driver
diff --git a/src/devices/audio/images/ape_audio_tv_hdmi_tuner.png b/src/devices/audio/images/ape_audio_tv_hdmi_tuner.png
new file mode 100644
index 0000000..43a89ea
--- /dev/null
+++ b/src/devices/audio/images/ape_audio_tv_hdmi_tuner.png
Binary files differ
diff --git a/src/devices/audio/images/ape_audio_tv_tif.png b/src/devices/audio/images/ape_audio_tv_tif.png
new file mode 100644
index 0000000..f013cfa
--- /dev/null
+++ b/src/devices/audio/images/ape_audio_tv_tif.png
Binary files differ
diff --git a/src/devices/audio/images/ape_audio_tv_tuner.png b/src/devices/audio/images/ape_audio_tv_tuner.png
new file mode 100644
index 0000000..a25dcfb
--- /dev/null
+++ b/src/devices/audio/images/ape_audio_tv_tuner.png
Binary files differ
diff --git a/src/devices/audio_attributes.jd b/src/devices/audio_attributes.jd
new file mode 100644
index 0000000..473a04e
--- /dev/null
+++ b/src/devices/audio_attributes.jd
@@ -0,0 +1,257 @@
+page.title=Audio Attributes
+@jd:body
+
+<!--
+    Copyright 2014 The Android Open Source Project
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+<div id="qv-wrapper">
+  <div id="qv">
+    <h2>In this document</h2>
+    <ol id="auto-toc">
+    </ol>
+  </div>
+</div>
+
+<p>Audio players support attributes that define how the audio system handles routing, volume, and
+focus decisions for the specified source. Applications can attach attributes to an audio playback
+(such as music played by a streaming service or a notification for a new email) then pass the audio
+source attributes to the framework, where the audio system uses the attributes to make mixing
+decisions and to notify applications about the state of the system.</p>
+
+<p class="note"><strong>Note:</strong> Applications can also attach attributes to an audio
+recording (such as audio captured in a video recording), but this functionality is not exposed in
+the public API.</p>
+
+<p>In Android 4.4 and earlier, the framework made mixing decisions using only the audio stream type.
+However, basing such decisions on stream type was too limiting to produce quality output across
+multiple applications and devices. For example, on a mobile device, some applications (i.e.
+Google Maps) played driving directions on the STREAM_MUSIC stream type; however, on mobile
+devices in projection mode (i.e. Android Auto), applications cannot mix driving directions with
+other media streams.</p>
+
+<p>Using the <a href="http://developer.android.com/reference/android/media/AudioAttributes.html">
+audio attribute API</a>, applications can now provide the audio system with detailed information
+about a specific audio source:</p>
+
+<ul>
+<li><b>Usage</b>. Specifies why the source is playing and controls routing, focus, and volume
+decisions.</li>
+<li><b>Content type</b>. Specifies what the source is playing (music, movie, speech,
+sonification, unknown).</li>
+<li><b>Flags</b>. Specifies how the source should be played. Includes support for audibility
+enforcement (camera shutter sounds required in some countries) and hardware audio/video
+synchronization.</li>
+</ul>
+
+<p>For dynamics processing, applications must distinguish between movie, music, and speech content.
+Information about the data itself may also matter, such as loudness and peak sample value.</p>
+
+<h2 id="using">Using attributes</h2>
+
+<p>Usage specifies the context in which the stream is used, providing information about why the
+sound is playing and what the sound is used for. Usage information is more expressive than a stream
+type and allows platforms or routing policies to refine volume or routing decisions.</p>
+
+<p>Supply one of the following usage values for any instance:</p>
+
+<ul>
+<li><code>USAGE_UNKNOWN</code></li>
+<li><code>USAGE_MEDIA</code></li>
+<li><code>USAGE_VOICE_COMMUNICATION</code></li>
+<li><code>USAGE_VOICE_COMMUNICATION_SIGNALLING</code></li>
+<li><code>USAGE_ALARM</code></li>
+<li><code>USAGE_NOTIFICATION</code></li>
+<li><code>USAGE_NOTIFICATION_RINGTONE</code></li>
+<li><code>USAGE_NOTIFICATION_COMMUNICATION_INSTANT</code></li>
+<li><code>USAGE_NOTIFICATION_COMMUNICATION_DELAYED</code></li>
+<li><code>USAGE_NOTIFICATION_EVENT</code></li>
+<li><code>USAGE_ASSISTANCE_ACCESSIBILITY</code></li>
+<li><code>USAGE_ASSISTANCE_NAVIGATION_GUIDANCE</code></li>
+<li><code>USAGE_ASSISTANCE_SONIFICATION</code></li>
+<li><code>USAGE_GAME</code></li>
+</ul>
+
+<p>Audio attribute usage values are mutually exclusive. For examples, refer to <code>
+<a href="http://developer.android.com/reference/android/media/AudioAttributes.html#USAGE_MEDIA">
+USAGE_MEDIA</a></code> and <code>
+<a href="http://developer.android.com/reference/android/media/AudioAttributes.html#USAGE_ALARM">
+USAGE_ALARM</a></code> definitions; for exceptions, refer to the <code>
+<a href="http://developer.android.com/reference/android/media/AudioAttributes.Builder.html">
+AudioAttributes.Builder</a></code> definition.</p>
+
+<h2 id="content-type">Content type</h2>
+
+<p>Content type defines what the sound is and expresses the general category of the content such as
+movie, speech, or beep/ringtone. The audio framework uses content type information to selectively
+configure audio post-processing blocks. While supplying the content type is optional, you should
+include type information whenever the content type is known, such as using
+<code>CONTENT_TYPE_MOVIE</code> for a movie streaming service or <code>CONTENT_TYPE_MUSIC</code>
+for a music playback application.</p>
+
+<p>Supply one of the following content type values for any instance:</p>
+
+<ul>
+<li><code>CONTENT_TYPE_UNKNOWN</code> (default)</li>
+<li><code>CONTENT_TYPE_MOVIE</code></li>
+<li><code>CONTENT_TYPE_MUSIC</code></li>
+<li><code>CONTENT_TYPE_SONIFICATION</code></li>
+<li><code>CONTENT_TYPE_SPEECH</code></li>
+</ul>
+
+<p>Audio attribute content type values are mutually exclusive. For details on content types,
+refer to the <a href="http://developer.android.com/reference/android/media/AudioAttributes.html">
+audio attribute API</a>.</p>
+
+<h2 id="flags">Flags</h2>
+
+<p>Flags specify how the audio framework applies effects to the audio playback. Supply one or more
+of the following flags for an instance:</p>
+
+<ul>
+<li><code>FLAG_AUDIBILITY_ENFORCED</code>. Requests the system ensure the audibility of the
+sound. Use to address the needs of legacy <code>STREAM_SYSTEM_ENFORCED</code> (such as forcing
+camera shutter sounds).</li>
+<li><code>HW_AV_SYNC</code>. Requests the system select an output stream that supports hardware A/V
+synchronization.</li>
+</ul>
+
+<p>Audio attribute flags are non-exclusive (can be combined). For details on these flags,
+refer to the <a href="http://developer.android.com/reference/android/media/AudioAttributes.html">
+audio attribute API</a>.</p>
+
+<h2 id="example">Example</h2>
+
+<p>In this example, AudioAttributes.Builder defines the AudioAttributes to be used by a new
+AudioTrack instance:</p>
+
+<pre>
+AudioTrack myTrack = new AudioTrack(
+  new AudioAttributes.Builder()
+ .setUsage(AudioAttributes.USAGE_MEDIA)
+    .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
+    .build(),
+  myFormat, myBuffSize, AudioTrack.MODE_STREAM, mySession);
+</pre>
+
+<h2 id="compatibility">Compatibility</h2>
+
+<p>Application developers should use audio attributes when creating or updating applications for
+Android 5.0. However, applications are not required to take advantage of attributes; they can
+handle legacy stream types only or remain unaware of attributes (i.e. a generic media player that
+doesn’t know anything about the content it’s playing).</p>
+
+<p>In such cases, the framework maintains backwards compatibility with older devices and Android
+releases by automatically translating legacy audio stream types to audio attributes. However, the
+framework does not enforce or guarantee this mapping across devices, manufacturers, or Android
+releases.</p>
+
+<p>Compatibility mappings:</p>
+
+<table>
+<tr>
+  <th>Android 5.0</th>
+  <th>Android 4.4 and earlier</th>
+</tr>
+<tr>
+  <td>
+  <code>CONTENT_TYPE_SPEECH</code><br>
+  <code>USAGE_VOICE_COMMUNICATION</code>
+  </td>
+  <td>
+  <code>STREAM_VOICE_CALL</code>
+  </td>
+</tr>
+<tr>
+  <td>
+  <code>CONTENT_TYPE_SONIFICATION</code><br>
+  <code>USAGE_ASSISTANCE_SONIFICATION</code>
+  </td>
+  <td>
+  <code>STREAM_SYSTEM</code>
+  </td>
+</tr>
+<tr>
+  <td>
+  <code>CONTENT_TYPE_SONIFICATION</code><br>
+  <code>USAGE_NOTIFICATION_RINGTONE</code>
+  </td>
+  <td>
+  <code>STREAM_RING</code>
+  </td>
+</tr>
+<tr>
+  <td>
+  <code>CONTENT_TYPE_MUSIC</code><br>
+  <code>USAGE_UNKNOWN</code><br>
+  <code>USAGE_MEDIA</code><br>
+  <code>USAGE_GAME</code><br>
+  <code>USAGE_ASSISTANCE_ACCESSIBILITY</code><br>
+  <code>USAGE_ASSISTANCE_NAVIGATION_GUIDANCE</code><br>
+  </td>
+  <td>
+  <code>STREAM_MUSIC</code>
+  </td>
+</tr>
+<tr>
+  <td>
+  <code>CONTENT_TYPE_SONIFICATION</code><br>
+  <code>USAGE_ALARM</code>
+  </td>
+  <td>
+  <code>STREAM_ALARM</code>
+  </td>
+</tr>
+<tr>
+  <td>
+  <code>CONTENT_TYPE_SONIFICATION</code><br>
+  <code>USAGE_NOTIFICATION</code><br>
+  <code>USAGE_NOTIFICATION_COMMUNICATION_REQUEST</code><br>
+  <code>USAGE_NOTIFICATION_COMMUNICATION_INSTANT</code><br>
+  <code>USAGE_NOTIFICATION_COMMUNICATION_DELAYED</code><br>
+  <code>USAGE_NOTIFICATION_EVENT</code><br>
+  </td>
+  <td>
+  <code>STREAM_NOTIFICATION</code>
+  </td>
+</tr>
+<tr>
+  <td>
+  <code>CONTENT_TYPE_SPEECH</code>
+  </td>
+  <td>
+  (@hide)<code> STREAM_BLUETOOTH_SCO</code>
+  </td>
+</tr>
+<tr>
+  <td>
+  <code>FLAG_AUDIBILITY_ENFORCED</code>
+  </td>
+  <td>
+  (@hide)<code> STREAM_SYSTEM_ENFORCED</code>
+  </td>
+</tr>
+<tr>
+  <td>
+  <code>CONTENT_TYPE_SONIFICATION</code><br>
+  <code>USAGE_VOICE_COMMUNICATION_SIGNALLING</code>
+  </td>
+  <td>
+  (@hide)<code> STREAM_DTMF</code>
+  </td>
+</tr>
+</table>
+
+<p class="note"><strong>Note:</strong> @hide streams are used internally by the framework but are
+not part of the public API.</p>
\ No newline at end of file
diff --git a/src/devices/audio_avoiding_pi.jd b/src/devices/audio_avoiding_pi.jd
index a7aab75..ec407d7 100644
--- a/src/devices/audio_avoiding_pi.jd
+++ b/src/devices/audio_avoiding_pi.jd
@@ -57,14 +57,14 @@
 avoiding artifacts due to underruns.
 </p>
 
-<h2 id="priorityInversion">Priority Inversion</h2>
+<h2 id="priorityInversion">Priority inversion</h2>
 
 <p>
 <a href="http://en.wikipedia.org/wiki/Priority_inversion">Priority inversion</a>
 is a classic failure mode of real-time systems,
 where a higher-priority task is blocked for an unbounded time waiting
-for a lower-priority task to release a resource such as [shared
-state protected by] a
+for a lower-priority task to release a resource such as (shared
+state protected by) a
 <a href="http://en.wikipedia.org/wiki/Mutual_exclusion">mutex</a>.
 </p>
 
@@ -79,7 +79,7 @@
 
 <p>
 In the Android audio implementation, priority inversion is most
-likely to occur in these places. And so we focus attention here:
+likely to occur in these places. And so you should focus your attention here:
 </p>
 
 <ul>
@@ -114,10 +114,10 @@
 similar to those for AudioTrack.
 </p>
 
-<h2 id="commonSolutions">Common Solutions</h2>
+<h2 id="commonSolutions">Common solutions</h2>
 
 <p>
-The typical solutions listed in the Wikipedia article include:
+The typical solutions include:
 </p>
 
 <ul>
@@ -145,18 +145,17 @@
 in Linux kernel, but are not currently exposed by the Android C
 runtime library
 <a href="http://en.wikipedia.org/wiki/Bionic_(software)">Bionic</a>.
-We chose not to use them in the audio system
-because they are relatively heavyweight, and because they rely on
-a trusted client.
+They are not used in the audio system because they are relatively heavyweight,
+and because they rely on a trusted client.
 </p>
 
 <h2 id="androidTechniques">Techniques used by Android</h2>
 
 <p>
-We started with "try lock" and lock with timeout. These are
+Experiments started with "try lock" and lock with timeout. These are
 non-blocking and bounded blocking variants of the mutex lock
-operation. Try lock and lock with timeout worked fairly well for
-us, but were susceptible to a couple of obscure failure modes: the
+operation. Try lock and lock with timeout worked fairly well but were
+susceptible to a couple of obscure failure modes: the
 server was not guaranteed to be able to access the shared state if
 the client happened to be busy, and the cumulative timeout could
 be too long if there was a long sequence of unrelated locks that
@@ -182,10 +181,9 @@
 In practice, we've found that the retries are not a problem.
 </p>
 
-<p>
-<strong>Note</strong>: Atomic operations and their interactions with memory barriers
-are notoriously badly misunderstood and used incorrectly. We include
-these methods here for completeness but recommend you also read the article
+<p class="note"><strong>Note:</strong> Atomic operations and their interactions with memory barriers
+are notoriously badly misunderstood and used incorrectly. We include these methods
+here for completeness but recommend you also read the article
 <a href="https://developer.android.com/training/articles/smp.html">
 SMP Primer for Android</a>
 for further information.
@@ -249,7 +247,7 @@
 
 </ul>
 
-<h2 id="nonBlockingAlgorithms">Non-Blocking Algorithms</h2>
+<h2 id="nonBlockingAlgorithms">Non-blocking algorithms</h2>
 
 <p>
 <a href="http://en.wikipedia.org/wiki/Non-blocking_algorithm">Non-blocking algorithms</a>
@@ -288,9 +286,8 @@
 </p>
 
 <p>
-For developers, we may update some of the sample OpenSL ES application
-code to use non-blocking algorithms or reference a non-Android open source
-library.
+For developers, some of the sample OpenSL ES application code should be updated to
+use non-blocking algorithms or reference a non-Android open source library.
 </p>
 
 <h2 id="tools">Tools</h2>
@@ -312,7 +309,7 @@
 not tell you in advance.
 </p>
 
-<h2 id="aFinalWord">A Final Word</h2>
+<h2 id="aFinalWord">A final word</h2>
 
 <p>
 After all of this discussion, don't be afraid of mutexes. Mutexes
diff --git a/src/devices/audio_debugging.jd b/src/devices/audio_debugging.jd
index 1ac9fb3..9c9b2fb 100644
--- a/src/devices/audio_debugging.jd
+++ b/src/devices/audio_debugging.jd
@@ -54,12 +54,12 @@
 
 <ol>
 <li><code>cd frameworks/av/services/audioflinger</code></li>
-<li>edit <code>Configuration.h</code></li>
-<li>uncomment <code>#define TEE_SINK</code></li>
-<li>re-build <code>libaudioflinger.so</code></li>
+<li>Edit <code>Configuration.h</code>.</li>
+<li>Uncomment <code>#define TEE_SINK</code>.</li>
+<li>Re-build <code>libaudioflinger.so</code>.</li>
 <li><code>adb root</code></li>
 <li><code>adb remount</code></li>
-<li>push or sync the new <code>libaudioflinger.so</code> to the device's <code>/system/lib</code></li>
+<li>Push or sync the new <code>libaudioflinger.so</code> to the device's <code>/system/lib</code>.</li>
 </ol>
 
 <h3>Run-time setup</h3>
@@ -87,7 +87,7 @@
 </code>
 </li>
 <li><code>echo af.tee=# &gt; /data/local.prop</code>
-<br />where the <code>af.tee</code> value is a number described below
+<br />Where the <code>af.tee</code> value is a number described below.
 </li>
 <li><code>chmod 644 /data/local.prop</code></li>
 <li><code>reboot</code></li>
@@ -115,17 +115,17 @@
 <h3>Test and acquire data</h3>
 
 <ol>
-<li>Run your audio test</li>
+<li>Run your audio test.</li>
 <li><code>adb shell dumpsys media.audio_flinger</code></li>
 <li>Look for a line in dumpsys output such as this:<br />
 <code>tee copied to /data/misc/media/20131010101147_2.wav</code>
-<br />This is a PCM .wav file</br>
+<br />This is a PCM .wav file.</br>
 </li>
 <li><code>adb pull</code> any <code>/data/misc/media/*.wav</code> files of interest;
 note that track-specific dump filenames do not appear in the dumpsys output,
-but are still saved to <code>/data/misc/media</code> upon track closure
+but are still saved to <code>/data/misc/media</code> upon track closure.
 </li>
-<li>Review the dump files for privacy concerns before sharing with others</li>
+<li>Review the dump files for privacy concerns before sharing with others.</li>
 </ol>
 
 <h4>Suggestions</h4>
@@ -133,15 +133,15 @@
 <p>Try these ideas for more useful results:</p>
 
 <ul>
-<li>Disable touch sounds and key clicks</li>
-<li>Maximize all volumes</li>
+<li>Disable touch sounds and key clicks.</li>
+<li>Maximize all volumes.</li>
 <li>Disable apps that make sound or record from microphone,
-if they are not of interest to your test
+if they are not of interest to your test.
 </li>
 <li>Track-specific dumps are only saved when the track is closed;
 you may need to force close an app in order to dump its track-specific data
 <li>Do the <code>dumpsys</code> immediately after test;
-there is a limited amount of recording space available</li>
+there is a limited amount of recording space available.</li>
 <li>To make sure you don't lose your dump files,
 upload them to your host periodically.
 Only a limited number of dump files are preserved;
@@ -155,10 +155,10 @@
 Restore your build and device as follows:
 </p>
 <ol>
-<li>Revert the source code changes to <code>Configuration.h</code></li>
-<li>Re-build <code>libaudioflinger.so</code></li>
+<li>Revert the source code changes to <code>Configuration.h</code>.</li>
+<li>Re-build <code>libaudioflinger.so</code>.</li>
 <li>Push or sync the restored <code>libaudioflinger.so</code>
-to the device's <code>/system/lib</code>
+to the device's <code>/system/lib</code>.
 </li>
 <li><code>adb shell</code></li>
 <li><code>rm /data/local.prop</code></li>
@@ -172,7 +172,7 @@
 
 <p>
 The standard Java language logging API in Android SDK is
-<a class="external-link" href="http://developer.android.com/reference/android/util/Log.html" target="_android">android.util.Log</a>.
+<a href="http://developer.android.com/reference/android/util/Log.html">android.util.Log</a>.
 </p>
 
 <p>
@@ -243,15 +243,14 @@
 <h3>Benefits</h3>
 
 <p>
-The benefits of the <code>media.log</code> system include:
+The benefits of the <code>media.log</code> system are that it:
 </p>
 <ul>
-<li>doesn't spam the main log unless and until it is needed</li>
-<li>can be examined even when <code>mediaserver</code> crashes or hangs</li>
-<li>is non-blocking per timeline</li>
-<li>
-less disturbance to performance
-(of course no form of logging is completely non-intrusive)
+<li>Doesn't spam the main log unless and until it is needed.</li>
+<li>Can be examined even when <code>mediaserver</code> crashes or hangs.</li>
+<li>Is non-blocking per timeline.</li>
+<li>Offers less disturbance to performance.
+(Of course no form of logging is completely non-intrusive.)
 </li>
 </ul>
 
@@ -266,9 +265,9 @@
 Notable points:
 </p>
 <ul>
-<li><code>init</code> forks and execs <code>mediaserver</code></li>
-<li><code>init</code> detects the death of <code>mediaserver</code>, and re-forks as necessary</li>
-<li><code>ALOGx</code> logging is not shown
+<li><code>init</code> forks and execs <code>mediaserver</code>.</li>
+<li><code>init</code> detects the death of <code>mediaserver</code>, and re-forks as necessary.</li>
+<li><code>ALOGx</code> logging is not shown.
 </ul>
 
 <p>
@@ -363,8 +362,7 @@
 After you have added the logs, re-build AudioFlinger.
 </p>
 
-<b>Caution:</b>
-<p>
+<p class="caution"><strong>Caution:</strong>
 A separate <code>NBLog::Writer</code> timeline is required per thread,
 to ensure thread safety, since timelines omit mutexes by design.  If you
 want more than one thread to use the same timeline, you can protect with an
diff --git a/src/devices/audio_implement.jd b/src/devices/audio_implement.jd
index 26aa5f5..4535640 100644
--- a/src/devices/audio_implement.jd
+++ b/src/devices/audio_implement.jd
@@ -2,7 +2,7 @@
 @jd:body
 
 <!--
-    Copyright 2013 The Android Open Source Project
+    Copyright 2014 The Android Open Source Project
 
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
@@ -24,63 +24,58 @@
   </div>
 </div>
 
-<p>
-  This page exlains how to implement the audio Hardware Abstraction Layer (HAL)
-and configure the shared library.
-</p>
+<p>This page explains how to implement the audio Hardware Abstraction Layer (HAL) and configure the
+shared library.</p>
 
-<h2 id="implementing">
-  Implementing the HAL
-</h2>
-<p>
-  The audio HAL is composed of three different interfaces that you must implement:
-</p>
+<h2 id="implementing">Implementing the HAL</h2>
+
+<p>The audio HAL is composed of three different interfaces that you must implement:</p>
+
 <ul>
-  <li>
-    <code>hardware/libhardware/include/hardware/audio.h</code> - represents the main functions of
-    an audio device.
-  </li>
-  <li>
-    <code>hardware/libhardware/include/hardware/audio_policy.h</code> - represents the audio policy
-    manager, which handles things like audio routing and volume control policies.
-  </li>
-  <li>
-    <code>hardware/libhardware/include/hardware/audio_effect.h</code> - represents effects that can
-    be applied to audio such as downmixing, echo, or noise suppression.
-  </li>
+<li><code>hardware/libhardware/include/hardware/audio.h</code> - represents the main functions
+of an audio device.</li>
+<li><code>hardware/libhardware/include/hardware/audio_policy.h</code> - represents the audio policy
+manager, which handles things like audio routing and volume control policies.</li>
+<li><code>hardware/libhardware/include/hardware/audio_effect.h</code> - represents effects that can
+be applied to audio such as downmixing, echo, or noise suppression.</li>
 </ul>
-<p>See the implementation for the Galaxy Nexus at <code>device/samsung/tuna/audio</code> for an example.</p>
+
+<p>For an example, refer to the implementation for the Galaxy Nexus at
+<code>device/samsung/tuna/audio</code>.</p>
 
 <p>In addition to implementing the HAL, you need to create a
-  <code>device/&lt;company_name&gt;/&lt;device_name&gt;/audio/audio_policy.conf</code> file
-  that declares the audio devices present on your product. For an example, see the file for
-  the Galaxy Nexus audio hardware in <code>device/samsung/tuna/audio/audio_policy.conf</code>. 
-Also, see
-  the <code>system/core/include/system/audio.h</code> and <code>system/core/include/system/audio_policy.h</code>
-   header files for a reference of the properties that you can define.
-</p>
-<h3 id="multichannel">Multi-channel support</h3>
-<p>If your hardware and driver supports multichannel audio via HDMI, you can output the audio stream
-  directly to the audio hardware. This bypasses the AudioFlinger mixer so it doesn't get downmixed to two channels. 
-  
-  <p>
-  The audio HAL must expose whether an output stream profile supports multichannel audio capabilities.
-  If the HAL exposes its capabilities, the default policy manager allows multichannel playback over 
-  HDMI.</p>
- <p>For more implementation details, see the
-<code>device/samsung/tuna/audio/audio_hw.c</code> in the Android 4.1 release.</p>
+<code>device/&lt;company_name&gt;/&lt;device_name&gt;/audio/audio_policy.conf</code> file that
+declares the audio devices present on your product. For an example, see the file for the Galaxy
+Nexus audio hardware in <code>device/samsung/tuna/audio/audio_policy.conf</code>. Also, see the
+<code>system/core/include/system/audio.h</code> and
+<code>system/core/include/system/audio_policy.h</code> header files for a reference of the
+properties that you can define.</p>
 
-  <p>
-  To specify that your product contains a multichannel audio output, edit the <code>audio_policy.conf</code> file to describe the multichannel
-  output for your product. The following is an example from the Galaxy Nexus that shows a "dynamic" channel mask, which means the audio policy manager
-  queries the actual channel masks supported by the HDMI sink after connection. You can also specify a static channel mask like <code>AUDIO_CHANNEL_OUT_5POINT1</code>
-  </p>
+<h3 id="multichannel">Multi-channel support</h3>
+
+<p>If your hardware and driver supports multichannel audio via HDMI, you can output the audio
+stream  directly to the audio hardware. This bypasses the AudioFlinger mixer so it doesn't get
+downmixed to two channels.</p>
+
+<p>The audio HAL must expose whether an output stream profile supports multichannel audio
+capabilities. If the HAL exposes its capabilities, the default policy manager allows multichannel
+playback over HDMI.</p>
+
+<p>For more implementation details, see the <code>device/samsung/tuna/audio/audio_hw.c</code> in
+the Android 4.1 release.</p>
+
+<p>To specify that your product contains a multichannel audio output, edit the
+<code>audio_policy.conf</code> file to describe the multichannel output for your product. The
+following is an example from the Galaxy Nexus that shows a "dynamic" channel mask, which means the
+audio policy manager queries the actual channel masks supported by the HDMI sink after connection.
+You can also specify a static channel mask like <code>AUDIO_CHANNEL_OUT_5POINT1</code>.</p>
+
 <pre>
 audio_hw_modules {
   primary {
     outputs {
         ...
-        hdmi {  
+        hdmi {
           sampling_rates 44100|48000
           channel_masks dynamic
           formats AUDIO_FORMAT_PCM_16_BIT
@@ -95,42 +90,38 @@
 }
 </pre>
 
-
-  <p>AudioFlinger's mixer downmixes the content to stereo
-    automatically when sent to an audio device that does not support multichannel audio.</p>
+<p>AudioFlinger's mixer downmixes the content to stereo automatically when sent to an audio device
+that does not support multichannel audio.</p>
 
 <h3 id="codecs">Media codecs</h3>
 
-<p>Ensure the audio codecs your hardware and drivers support are properly declared for your product. See
-  <a href="media.html#expose"> Exposing Codecs to the Framework</a> for information on how to do this.
-</p>
+<p>Ensure the audio codecs your hardware and drivers support are properly declared for your
+product. For details on declaring supported codecs, see <a href="media.html#expose"> Exposing Codecs
+to the Framework</a>.</p>
 
-<h2 id="configuring">
-  Configuring the shared library
-</h2>
-<p>
-  You need to package the HAL implementation into a shared library and copy it to the
-  appropriate location by creating an <code>Android.mk</code> file:
-</p>
+<h2 id="configuring">Configuring the shared library</h2>
+
+<p>You need to package the HAL implementation into a shared library and copy it to the appropriate
+location by creating an <code>Android.mk</code> file:</p>
+
 <ol>
-  <li>Create a <code>device/&lt;company_name&gt;/&lt;device_name&gt;/audio</code> directory
-  to contain your library's source files.
-  </li>
-  <li>Create an <code>Android.mk</code> file to build the shared library. Ensure that the
-  Makefile contains the following line:
+<li>Create a <code>device/&lt;company_name&gt;/&lt;device_name&gt;/audio</code> directory to
+contain your library's source files.</li>
+<li>Create an <code>Android.mk</code> file to build the shared library. Ensure that the Makefile
+contains the following line:
 <pre>
 LOCAL_MODULE := audio.primary.&lt;device_name&gt;
 </pre>
-    <p>
-      Notice your library must be named <code>audio_primary.&lt;device_name&gt;.so</code> so
-      that Android can correctly load the library. The "<code>primary</code>" portion of this
-      filename indicates that this shared library is for the primary audio hardware located on the
-      device. The module names <code>audio.a2dp.&lt;device_name&gt;</code> and
-      <code>audio.usb.&lt;device_name&gt;</code> are also available for bluetooth and USB audio
-      interfaces. Here is an example of an <code>Android.mk</code> from the Galaxy
-      Nexus audio hardware:
-    </p>
-    <pre>
+
+<p>Notice your library must be named <code>audio_primary.&lt;device_name&gt;.so</code> so
+that Android can correctly load the library. The "<code>primary</code>" portion of this filename
+indicates that this shared library is for the primary audio hardware located on the device. The
+module names <code>audio.a2dp.&lt;device_name&gt;</code> and
+<code>audio.usb.&lt;device_name&gt;</code> are also available for bluetooth and USB audio
+interfaces. Here is an example of an <code>Android.mk</code> from the Galaxy Nexus audio hardware:
+</p>
+
+<pre>
 LOCAL_PATH := $(call my-dir)
 
 include $(CLEAR_VARS)
@@ -147,59 +138,73 @@
 
 include $(BUILD_SHARED_LIBRARY)
 </pre>
-  </li>
-  <li>If your product supports low latency audio as specified by the Android CDD, copy the
-  corresponding XML feature file into your product. For example, in your product's
-   <code>device/&lt;company_name&gt;/&lt;device_name&gt;/device.mk</code> 
-  Makefile:
-    <pre>
+
+</li>
+
+<li>If your product supports low latency audio as specified by the Android CDD, copy the
+corresponding XML feature file into your product. For example, in your product's
+<code>device/&lt;company_name&gt;/&lt;device_name&gt;/device.mk</code> Makefile:
+
+<pre>
 PRODUCT_COPY_FILES := ...
 
 PRODUCT_COPY_FILES += \
 frameworks/native/data/etc/android.android.hardware.audio.low_latency.xml:system/etc/permissions/android.hardware.audio.low_latency.xml \
 </pre>
-  </li>
- 
-  <li>Copy the <code>audio_policy.conf</code> file that you created earlier to the <code>system/etc/</code> directory
-  in your product's <code>device/&lt;company_name&gt;/&lt;device_name&gt;/device.mk</code> 
-  Makefile. For example:
-    <pre>
+
+</li>
+
+<li>Copy the <code>audio_policy.conf</code> file that you created earlier to the
+<code>system/etc/</code> directory in your product's
+<code>device/&lt;company_name&gt;/&lt;device_name&gt;/device.mk</code> Makefile. For example:
+
+<pre>
 PRODUCT_COPY_FILES += \
         device/samsung/tuna/audio/audio_policy.conf:system/etc/audio_policy.conf
 </pre>
-  </li>
-  <li>Declare the shared modules of your audio HAL that are required by your product in the product's
-    <code>device/&lt;company_name&gt;/&lt;device_name&gt;/device.mk</code> Makefile. For example, the
-  Galaxy Nexus requires the primary and bluetooth audio HAL modules:
+
+</li>
+
+<li>Declare the shared modules of your audio HAL that are required by your product in the
+product's <code>device/&lt;company_name&gt;/&lt;device_name&gt;/device.mk</code> Makefile. For
+example, the Galaxy Nexus requires the primary and bluetooth audio HAL modules:
+
 <pre>
 PRODUCT_PACKAGES += \
         audio.primary.tuna \
         audio.a2dp.default
 </pre>
-  </li>
+
+</li>
 </ol>
 
 <h2 id="preprocessing">Audio pre-processing effects</h2>
-<p>
-The Android platform provides audio effects on supported devices in the
-<a href="http://developer.android.com/reference/android/media/audiofx/package-summary.html">audiofx</a>
-package, which is available for developers to access. For example, on the Nexus 10, the following pre-processing effects are supported: </p>
+
+<p>The Android platform provides audio effects on supported devices in the
+<a href="http://developer.android.com/reference/android/media/audiofx/package-summary.html">audiofx
+</a> package, which is available for developers to access. For example, on the Nexus 10, the
+following pre-processing effects are supported:</p>
+
 <ul>
-  <li><a
-href="http://developer.android.com/reference/android/media/audiofx/AcousticEchoCanceler.html">Acoustic Echo Cancellation</a></li>
-  <li><a
-href="http://developer.android.com/reference/android/media/audiofx/AutomaticGainControl.html">Automatic Gain Control</a></li>
-  <li><a
-href="http://developer.android.com/reference/android/media/audiofx/NoiseSuppressor.html">Noise Suppression</a></li>
+<li>
+<a href="http://developer.android.com/reference/android/media/audiofx/AcousticEchoCanceler.html">
+Acoustic Echo Cancellation</a></li>
+<li>
+<a href="http://developer.android.com/reference/android/media/audiofx/AutomaticGainControl.html">
+Automatic Gain Control</a></li>
+<li>
+<a href="http://developer.android.com/reference/android/media/audiofx/NoiseSuppressor.html">
+Noise Suppression</a></li>
 </ul>
 
 
-<p>Pre-processing effects are always paired with the use case mode in which the pre-processing is requested. In Android
-app development, a use case is referred to as an <code>AudioSource</code>; and app developers
-request to use the <code>AudioSource</code> abstraction instead of the actual audio hardware device.
-The Android Audio Policy Manager maps an <code>AudioSource</code> to the actual hardware with <code>AudioPolicyManagerBase::getDeviceForInputSource(int 
-inputSource)</code>. The following sources are exposed to developers:
-</p>
+<p>Pre-processing effects are paired with the use case mode in which the pre-processing is requested
+. In Android app development, a use case is referred to as an <code>AudioSource</code>; and app
+developers request to use the <code>AudioSource</code> abstraction instead of the actual audio
+hardware device. The Android Audio Policy Manager maps an <code>AudioSource</code> to the actual
+hardware with <code>AudioPolicyManagerBase::getDeviceForInputSource(int inputSource)</code>. The
+following sources are exposed to developers:</p>
+
 <ul>
 <li><code>android.media.MediaRecorder.AudioSource.CAMCORDER</code></li>
 <li><code>android.media.MediaRecorder.AudioSource.VOICE_COMMUNICATION</code></li>
@@ -208,25 +213,26 @@
 <li><code>android.media.MediaRecorder.AudioSource.VOICE_UPLINK</code></li>
 <li><code>android.media.MediaRecorder.AudioSource.VOICE_RECOGNITION</code></li>
 <li><code>android.media.MediaRecorder.AudioSource.MIC</code></li>
-<li><code>android.media.MediaRecorder.AudioSource.DEFAULT</code></li>
-</ul>
+<li><code>android.media.MediaRecorder.AudioSource.DEFAULT</code></li> </ul>
 
-<p>The default pre-processing effects that are applied for each <code>AudioSource</code> are
-specified in the <code>/system/etc/audio_effects.conf</code> file. To specify
-your own default effects for every <code>AudioSource</code>, create a <code>/system/vendor/etc/audio_effects.conf</code> file
-and specify any pre-processing effects that you need to turn on. For an example, 
-see the implementation for the Nexus 10 in <code>device/samsung/manta/audio_effects.conf</code></p>
+<p>The default pre-processing effects applied for each <code>AudioSource</code> are specified in
+the <code>/system/etc/audio_effects.conf</code> file. To specify your own default effects for every
+<code>AudioSource</code>, create a <code>/system/vendor/etc/audio_effects.conf</code> file and
+specify the pre-processing effects to turn on. For an example, see the implementation for the Nexus
+10 in <code>device/samsung/manta/audio_effects.conf</code>. AudioEffect instances acquire and
+release a session when created and destroyed, enabling the effects (such as the Loudness Enhancer)
+to persist throughout the duration of the session. </p>
 
-<p class="warning"><strong>Warning:</strong> For the <code>VOICE_RECOGNITION</code> use case, do not enable
-the noise suppression pre-processing effect. It should not be turned on by default when recording from this audio source,
-and you should not enable it in your own audio_effects.conf file. Turning on the effect by default will cause the device to fail
-the <a href="/compatibility/index.html">compatibility requirement</a> regardless of whether this was on by default due to 
-configuration file, or the audio HAL implementation's default behavior.</p>
+<p class="warning"><strong>Warning:</strong> For the <code>VOICE_RECOGNITION</code> use case, do
+not enable the noise suppression pre-processing effect. It should not be turned on by default when
+recording from this audio source, and you should not enable it in your own audio_effects.conf file.
+Turning on the effect by default will cause the device to fail the <a
+href="/compatibility/index.html"> compatibility requirement</a> regardless of whether this was on by
+default due to configuration file , or the audio HAL implementation's default behavior.</p>
 
-<p>The following example enables pre-processing for the VoIP <code>AudioSource</code> and Camcorder <code>AudioSource</code>.
-By declaring the <code>AudioSource</code> configuration in this manner, the
-framework will automatically request from the audio HAL the use of those
-effects.</p>
+<p>The following example enables pre-processing for the VoIP <code>AudioSource</code> and Camcorder
+<code>AudioSource</code>. By declaring the <code>AudioSource</code> configuration in this manner,
+the framework will automatically request from the audio HAL the use of those effects.</p>
 
 <pre>
 pre_processing {
@@ -241,10 +247,11 @@
 </pre>
 
 <h3 id="tuning">Source tuning</h3>
-<p>For <code>AudioSource</code> tuning, there are no explicit requirements on audio gain or audio processing
-with the exception of voice recognition (<code>VOICE_RECOGNITION</code>).</p>
 
-<p>The following are the requirements for voice recognition:</p>
+<p>For <code>AudioSource</code> tuning, there are no explicit requirements on audio gain or audio
+processing with the exception of voice recognition (<code>VOICE_RECOGNITION</code>).</p>
+
+<p>The requirements for voice recognition are:</p>
 
 <ul>
 <li>"flat" frequency response (+/- 3dB) from 100Hz to 4kHz</li>
@@ -252,32 +259,38 @@
 <li>level tracks linearly from -18dB to +12dB relative to 90dB SPL</li>
 <li>THD < 1% (90dB SPL in 100 to 4000Hz range)</li>
 <li>8kHz sampling rate (anti-aliasing)</li>
-<li>Effects / pre-processing must be disabled by default</li>
+<li>Effects/pre-processing must be disabled by default</li>
 </ul>
 
 <p>Examples of tuning different effects for different sources are:</p>
 
 <ul>
-  <li>Noise Suppressor
-    <ul>
-      <li>Tuned for wind noise suppressor for <code>CAMCORDER</code></li>
-      <li>Tuned for stationary noise suppressor for <code>VOICE_COMMUNICATION</code></li>
-    </ul>
-  </li>
-  <li>Automatic Gain Control
-    <ul>
-      <li>Tuned for close-talk for <code>VOICE_COMMUNICATION</code> and main phone mic</li>
-      <li>Tuned for far-talk for <code>CAMCORDER</code></li>
-    </ul>
-  </li>
+<li>Noise Suppressor
+<ul>
+<li>Tuned for wind noise suppressor for <code>CAMCORDER</code></li>
+<li>Tuned for stationary noise suppressor for <code>VOICE_COMMUNICATION</code></li>
+</ul>
+</li>
+<li>Automatic Gain Control
+<ul>
+<li>Tuned for close-talk for <code>VOICE_COMMUNICATION</code> and main phone mic</li>
+<li>Tuned for far-talk for <code>CAMCORDER</code></li>
+</ul>
+</li>
 </ul>
 
 <h3 id="more">More information</h3>
-<p>For more information, see:</p>
-<ul>
-<li>Android documentation for <a href="http://developer.android.com/reference/android/media/audiofx/package-summary.html">audiofx 
-package</a>
 
-<li>Android documentation for <a href="http://developer.android.com/reference/android/media/audiofx/NoiseSuppressor.html">Noise Suppression audio effect</a></li>
+<p>For more information, see:</p>
+
+<ul>
+<li>Android documentation for
+<a href="http://developer.android.com/reference/android/media/audiofx/package-summary.html">
+audiofx package</a>
+
+<li>Android documentation for
+<a href="http://developer.android.com/reference/android/media/audiofx/NoiseSuppressor.html">
+Noise Suppression audio effect</a></li>
+
 <li><code>device/samsung/manta/audio_effects.conf</code> file for the Nexus 10</li>
 </ul>
diff --git a/src/devices/audio_src.jd b/src/devices/audio_src.jd
index d03609b..6238770 100644
--- a/src/devices/audio_src.jd
+++ b/src/devices/audio_src.jd
@@ -24,9 +24,11 @@
   </div>
 </div>
 
+<h2 id="srcIntro">Introduction</h2>
+
 <p>
 See the Wikipedia article
-<a class="external-link" href="http://en.wikipedia.org/wiki/Resampling_(audio)" target="_android">Resampling (audio)</a>
+<a href="http://en.wikipedia.org/wiki/Resampling_(audio)">Resampling (audio)</a>
 for a generic definition of sample rate conversion, also known as "resampling."
 The remainder of this article describes resampling within Android.
 </p>
@@ -84,49 +86,6 @@
 and identifies where they should typically be used.
 </p>
 
-<h2 id="srcTerms">Terminology</h2>
-
-<dl>
-
-<dt>downsample</dt>
-<dd>to resample, where sink sample rate &lt; source sample rate</dd>
-
-<dt>Nyquist frequency</dt>
-<dd>
-The Nyquist frequency, equal to 1/2 of a given sample rate, is the
-maximum frequency component that can be represented by a discretized
-signal at that sample rate.  For example, the human hearing range is
-typically assumed to extend up to approximately 20 kHz, and so a digital
-audio signal must have a sample rate of at least 40 kHz to represent that
-range.  In practice, sample rates of 44.1 kHz and 48 kHz are commonly
-used, with Nyquist frequencies of 22.05 kHz and 24 kHz respectively.
-See the Wikipedia articles
-<a class="external-link" href="http://en.wikipedia.org/wiki/Nyquist_frequency" target="_android">Nyquist frequency</a>
-and
-<a class="external-link" href="http://en.wikipedia.org/wiki/Hearing_range" target="_android">Hearing range</a>
-for more information.
-</dd>
-
-<dt>resampler</dt>
-<dd>synonym for sample rate converter</dd>
-
-<dt>resampling</dt>
-<dd>the process of converting sample rate</dd>
-
-<dt>sample rate converter</dt>
-<dd>a module that resamples</dd>
-
-<dt>sink</dt>
-<dd>the output of a resampler</dd>
-
-<dt>source</dt>
-<dd>the input to a resampler</dd>
-
-<dt>upsample</dt>
-<dd>to resample, where sink sample rate &gt; source sample rate</dd>
-
-</dl>
-
 <h2 id="srcResamplers">Resampler implementations</h2>
 
 <p>
diff --git a/src/devices/audio_terminology.jd b/src/devices/audio_terminology.jd
index 25cfb73..753d402 100644
--- a/src/devices/audio_terminology.jd
+++ b/src/devices/audio_terminology.jd
@@ -2,7 +2,7 @@
 @jd:body
 
 <!--
-    Copyright 2013 The Android Open Source Project
+    Copyright 2014 The Android Open Source Project
 
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
@@ -216,24 +216,18 @@
 <dd>
 A short range wireless technology.
 The major audio-related
-<a class="external-link" href="http://en.wikipedia.org/wiki/Bluetooth_profile"
-target="_android">Bluetooth profiles</a>
+<a href="http://en.wikipedia.org/wiki/Bluetooth_profile">Bluetooth profiles</a>
 and
-<a class="external-link" href="http://en.wikipedia.org/wiki/Bluetooth_protocols"
-target="_android">Bluetooth protocols</a>
+<a href="http://en.wikipedia.org/wiki/Bluetooth_protocols">Bluetooth protocols</a>
 are described at these Wikipedia articles:
 
 <ul>
 
-<li><a class="external-link"
-href="http://en.wikipedia.org/wiki/Bluetooth_profile#Advanced_Audio_Distribution_Profile_.28A2DP.29"
-target="_android">A2DP</a>
+<li><a href="http://en.wikipedia.org/wiki/Bluetooth_profile#Advanced_Audio_Distribution_Profile_.28A2DP.29">A2DP</a>
 for music
 </li>
 
-<li><a class="external-link"
-href="http://en.wikipedia.org/wiki/Bluetooth_protocols#Synchronous_connection-oriented_.28SCO.29_link"
-target="_android">SCO</a>
+<li><a href="http://en.wikipedia.org/wiki/Bluetooth_protocols#Synchronous_connection-oriented_.28SCO.29_link">SCO</a>
 for telephony
 </li>
 
@@ -272,14 +266,13 @@
 <dt>S/PDIF</dt>
 <dd>
 Sony/Philips Digital Interface Format is an interconnect for uncompressed PCM.
-See Wikipedia article <a class="external-link" href="http://en.wikipedia.org/wiki/S/PDIF"
-target="_android">S/PDIF</a>.
+See Wikipedia article <a href="http://en.wikipedia.org/wiki/S/PDIF">S/PDIF</a>.
 </dd>
 
 <dt>USB</dt>
 <dd>
 Universal Serial Bus.
-See Wikipedia article <a class="external-link" href="http://en.wikipedia.org/wiki/USB" target="_android">USB</a>.
+See Wikipedia article <a href="http://en.wikipedia.org/wiki/USB">USB</a>.
 </dd>
 
 </dl>
@@ -294,14 +287,12 @@
 
 See these Wikipedia articles:
 <ul>
-<li><a class="external-link" href="http://en.wikipedia.org/wiki/General-purpose_input/output"
-target="_android">GPIO</a></li>
-<li><a class="external-link" href="http://en.wikipedia.org/wiki/I%C2%B2C" target="_android">I²C</a></li>
-<li><a class="external-link" href="http://en.wikipedia.org/wiki/I%C2%B2S" target="_android">I²S</a></li>
-<li><a class="external-link" href="http://en.wikipedia.org/wiki/McASP" target="_android">McASP</a></li>
-<li><a class="external-link" href="http://en.wikipedia.org/wiki/SLIMbus" target="_android">SLIMbus</a></li>
-<li><a class="external-link" href="http://en.wikipedia.org/wiki/Serial_Peripheral_Interface_Bus"
-target="_android">SPI</a></li>
+<li><a href="http://en.wikipedia.org/wiki/General-purpose_input/output">GPIO</a></li>
+<li><a href="http://en.wikipedia.org/wiki/I%C2%B2C">I²C</a></li>
+<li><a href="http://en.wikipedia.org/wiki/I%C2%B2S">I²S</a></li>
+<li><a href="http://en.wikipedia.org/wiki/McASP">McASP</a></li>
+<li><a href="http://en.wikipedia.org/wiki/SLIMbus">SLIMbus</a></li>
+<li><a href="http://en.wikipedia.org/wiki/Serial_Peripheral_Interface_Bus">SPI</a></li>
 </ul>
 
 <h3 id="signalTerms">Audio Signal Path</h3>
@@ -322,8 +313,7 @@
 be implemented that way.  An ADC is usually preceded by a low-pass filter
 to remove any high frequency components that are not representable using
 the desired sample rate.  See Wikipedia article
-<a class="external-link" href="http://en.wikipedia.org/wiki/Analog-to-digital_converter"
-target="_android">Analog-to-digital_converter</a>.
+<a href="http://en.wikipedia.org/wiki/Analog-to-digital_converter">Analog-to-digital_converter</a>.
 </dd>
 
 <dt>AP</dt>
@@ -338,7 +328,7 @@
 Strictly, the term "codec" is reserved for modules that both encode and decode,
 however it can also more loosely refer to only one of these.
 See Wikipedia article
-<a class="external-link" href="http://en.wikipedia.org/wiki/Audio_codec" target="_android">Audio codec</a>.
+<a href="http://en.wikipedia.org/wiki/Audio_codec">Audio codec</a>.
 </dd>
 
 <dt>DAC</dt>
@@ -349,8 +339,7 @@
 a low-pass filter to remove any high frequency components introduced
 by digital quantization.
 See Wikipedia article
-<a class="external-link" href="http://en.wikipedia.org/wiki/Digital-to-analog_converter"
-target="_android">Digital-to-analog converter</a>.
+<a href="http://en.wikipedia.org/wiki/Digital-to-analog_converter">Digital-to-analog converter</a>.
 </dd>
 
 <dt>DSP</dt>
@@ -368,8 +357,7 @@
 where the relative density of 1s versus 0s indicates the signal level.
 It is commonly used by digital to analog converters.
 See Wikipedia article
-<a class="external-link" href="http://en.wikipedia.org/wiki/Pulse-density_modulation"
-target="_android">Pulse-density modulation</a>.
+<a href="http://en.wikipedia.org/wiki/Pulse-density_modulation">Pulse-density modulation</a>.
 </dd>
 
 <dt>PWM</dt>
@@ -379,12 +367,54 @@
 where the relative width of a digital pulse indicates the signal level.
 It is commonly used by analog to digital converters.
 See Wikipedia article
-<a class="external-link" href="http://en.wikipedia.org/wiki/Pulse-width_modulation"
-target="_android">Pulse-width modulation</a>.
+<a href="http://en.wikipedia.org/wiki/Pulse-width_modulation">Pulse-width modulation</a>.
 </dd>
 
 </dl>
 
+<h3 id="srcTerms">Sample Rate Conversion</h3>
+
+<dl>
+
+<dt>downsample</dt>
+<dd>To resample, where sink sample rate &lt; source sample rate.</dd>
+
+<dt>Nyquist frequency</dt>
+<dd>
+The Nyquist frequency, equal to 1/2 of a given sample rate, is the
+maximum frequency component that can be represented by a discretized
+signal at that sample rate.  For example, the human hearing range is
+typically assumed to extend up to approximately 20 kHz, and so a digital
+audio signal must have a sample rate of at least 40 kHz to represent that
+range.  In practice, sample rates of 44.1 kHz and 48 kHz are commonly
+used, with Nyquist frequencies of 22.05 kHz and 24 kHz respectively.
+See
+<a href="http://en.wikipedia.org/wiki/Nyquist_frequency" target="_android">Nyquist frequency</a>
+and
+<a href="http://en.wikipedia.org/wiki/Hearing_range" target="_android">Hearing range</a>
+for more information.
+</dd>
+
+<dt>resampler</dt>
+<dd>Synonym for sample rate converter.</dd>
+
+<dt>resampling</dt>
+<dd>The process of converting sample rate.</dd>
+
+<dt>sample rate converter</dt>
+<dd>A module that resamples.</dd>
+
+<dt>sink</dt>
+<dd>The output of a resampler.</dd>
+
+<dt>source</dt>
+<dd>The input to a resampler.</dd>
+
+<dt>upsample</dt>
+<dd>To resample, where sink sample rate &gt; source sample rate.</dd>
+
+</dl>
+
 <h2 id="androidSpecificTerms">Android-Specific Terms</h2>
 
 <p>
@@ -399,7 +429,7 @@
 Advanced Linux Sound Architecture.  As the name suggests, it is an audio
 framework primarily for Linux, but it has influenced other systems.
 See Wikipedia article
-<a class="external-link" href="http://en.wikipedia.org/wiki/Advanced_Linux_Sound_Architecture" target="_android">ALSA</a>
+<a href="http://en.wikipedia.org/wiki/Advanced_Linux_Sound_Architecture">ALSA</a>
 for the general definition. As used within Android, it refers primarily
 to the kernel audio framework and drivers, not to the user-mode API. See
 tinyalsa.
@@ -409,14 +439,14 @@
 <dd>
 An API and implementation framework for output (post-processing) effects
 and input (pre-processing) effects.  The API is defined at
-<a href="http://developer.android.com/reference/android/media/audiofx/AudioEffect.html" target="_android">android.media.audiofx.AudioEffect</a>.
+<a href="http://developer.android.com/reference/android/media/audiofx/AudioEffect.html">android.media.audiofx.AudioEffect</a>.
 </dd>
 
 <dt>AudioFlinger</dt>
 <dd>
 The sound server implementation for Android. AudioFlinger
 runs within the mediaserver process. See Wikipedia article
-<a class="external-link" href="http://en.wikipedia.org/wiki/Sound_server" target="_android">Sound server</a>
+<a href="http://en.wikipedia.org/wiki/Sound_server">Sound server</a>
 for the generic definition.
 </dd>
 
@@ -433,7 +463,7 @@
 The module within AudioFlinger responsible for
 combining multiple tracks and applying attenuation
 (volume) and certain effects. The Wikipedia article
-<a class="external-link" href="http://en.wikipedia.org/wiki/Audio_mixing_(recorded_music)" target="_android">Audio mixing (recorded music)</a>
+<a href="http://en.wikipedia.org/wiki/Audio_mixing_(recorded_music)">Audio mixing (recorded music)</a>
 may be useful for understanding the generic
 concept. But that article describes a mixer more as a hardware device
 or a software application, rather than a software module within a system.
@@ -452,8 +482,7 @@
 input device such as microphone.  The data is usually in pulse-code modulation
 (PCM) format.
 The API is defined at
-<a href="http://developer.android.com/reference/android/media/AudioRecord.html"
-target="_android">android.media.AudioRecord</a>.
+<a href="http://developer.android.com/reference/android/media/AudioRecord.html">android.media.AudioRecord</a>.
 </dd>
 
 <dt>AudioResampler</dt>
@@ -467,8 +496,7 @@
 The primary low-level client API for sending data to an audio output
 device such as a speaker.  The data is usually in PCM format.
 The API is defined at
-<a href="http://developer.android.com/reference/android/media/AudioTrack.html"
-target="_android">android.media.AudioTrack</a>.
+<a href="http://developer.android.com/reference/android/media/AudioTrack.html">android.media.AudioTrack</a>.
 </dd>
 
 <dt>client</dt>
@@ -549,8 +577,7 @@
 A higher-level client API than AudioTrack, used for playing sampled
 audio clips. It is useful for triggering UI feedback, game sounds, etc.
 The API is defined at
-<a href="http://developer.android.com/reference/android/media/SoundPool.html"
-target="_android">android.media.SoundPool</a>.
+<a href="http://developer.android.com/reference/android/media/SoundPool.html">android.media.SoundPool</a>.
 </dd>
 
 <dt>Stagefright</dt>
@@ -595,11 +622,9 @@
 <dd>
 A higher-level client API than AudioTrack, used for playing DTMF signals.
 See the Wikipedia article
-<a class="external-link" href="http://en.wikipedia.org/wiki/Dual-tone_multi-frequency_signaling"
-target="_android">Dual-tone multi-frequency signaling</a>,
+<a href="http://en.wikipedia.org/wiki/Dual-tone_multi-frequency_signaling">Dual-tone multi-frequency signaling</a>,
 and the API definition at
-<a href="http://developer.android.com/reference/android/media/ToneGenerator.html"
-target="_android">android.media.ToneGenerator</a>.
+<a href="http://developer.android.com/reference/android/media/ToneGenerator.html">android.media.ToneGenerator</a>.
 </dd>
 
 <dt>track</dt>
@@ -622,11 +647,3 @@
 </dd>
 
 </dl>
-
-<h2 id="srcTerms">Sample Rate Conversion</h2>
-
-<p>
-For terms related to sample rate conversion, see the separate article
-<a href="audio_src.html">Sample Rate Conversion</a>.
-</p>
-
diff --git a/src/devices/audio_tv.jd b/src/devices/audio_tv.jd
new file mode 100644
index 0000000..4bcb55e
--- /dev/null
+++ b/src/devices/audio_tv.jd
@@ -0,0 +1,296 @@
+page.title=TV Audio
+@jd:body
+
+<!--
+    Copyright 2014 The Android Open Source Project
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+<div id="qv-wrapper">
+  <div id="qv">
+    <h2>In this document</h2>
+    <ol id="auto-toc">
+    </ol>
+  </div>
+</div>
+
+<p>The TV Input Framework (TIF) manager works with the audio routing API to support flexible audio
+path changes. When a System on Chip (SoC) implements the TV hardware abstraction layer (HAL), each
+TV input (HDMI IN, Tuner, etc.) provides <code>TvInputHardwareInfo</code> that specifies AudioPort information for audio type and address.</p>
+
+<ul>
+<li><b>Physical</b> audio input/output devices have a corresponding AudioPort.</li>
+<li><b>Software</b> audio output/input streams are represented as AudioMixPort (child class of
+AudioPort).</li>
+</ul>
+
+<p>The TIF then uses AudioPort information for the audio routing API.</p>
+
+<p><img src="audio/images/ape_audio_tv_tif.png" alt="Android TV Input Framework (TIF)" />
+<p class="img-caption"><strong>Figure 1.</strong> TV Input Framework (TIF)</p>
+
+<h2 id="Requirements">Requirements</h2>
+
+<p>A SoC must implement the audio HAL with the following audio routing API support:</p>
+
+<table>
+<tbody>
+<tr>
+<th>Audio Ports</th>
+<td>
+<ul>
+<li>TV Audio Input has a corresponding audio source port implementation.</li>
+<li>TV Audio Output has a corresponding audio sink port implementation.</li>
+<li>Can create audio patch between any TV input audio port and any TV output audio port.</li>
+</ul>
+</td>
+</tr>
+<tr>
+<th>Default Input</th>
+<td>AudioRecord (created with DEFAULT input source) must seize <i>virtual null input source</i> for
+AUDIO_DEVICE_IN_DEFAULT acquisition on Android TV.</td>
+</tr>
+<tr>
+<th>Device Loopback</th>
+<td>Requires supporting an AUDIO_DEVICE_IN_LOOPBACK input that is a complete mix of all audio output
+of all the TV output (11Khz, 16bit mono or 48Khz, 16bit mono). Used only for audio capture.
+</td>
+</tr>
+</tbody>
+</table>
+
+
+<h2 id="Audio Devices">TV audio devices</h2>
+
+<p>Android supports the following audio devices for TV audio input/output.</p>
+
+<h4>system/core/include/system/audio.h</h4>
+
+<pre>
+/* output devices */
+AUDIO_DEVICE_OUT_AUX_DIGITAL  = 0x400,
+AUDIO_DEVICE_OUT_HDMI   = AUDIO_DEVICE_OUT_AUX_DIGITAL,
+/* HDMI Audio Return Channel */
+AUDIO_DEVICE_OUT_HDMI_ARC   = 0x40000,
+/* S/PDIF out */
+AUDIO_DEVICE_OUT_SPDIF    = 0x80000,
+/* input devices */
+AUDIO_DEVICE_IN_AUX_DIGITAL   = AUDIO_DEVICE_BIT_IN | 0x20,
+AUDIO_DEVICE_IN_HDMI      = AUDIO_DEVICE_IN_AUX_DIGITAL,
+/* TV tuner input */
+AUDIO_DEVICE_IN_TV_TUNER    = AUDIO_DEVICE_BIT_IN | 0x4000,
+/* S/PDIF in */
+AUDIO_DEVICE_IN_SPDIF   = AUDIO_DEVICE_BIT_IN | 0x10000,
+AUDIO_DEVICE_IN_LOOPBACK    = AUDIO_DEVICE_BIT_IN | 0x40000,
+</pre>
+
+
+<h2 id="HAL extension">Audio HAL extension</h2>
+
+<p>The Audio HAL extension for the audio routing API is defined by following:</p>
+
+<h4>system/core/include/system/audio.h</h4>
+
+<pre>
+/* audio port configuration structure used to specify a particular configuration of an audio port */
+struct audio_port_config {
+    audio_port_handle_t      id;           /* port unique ID */
+    audio_port_role_t        role;         /* sink or source */
+    audio_port_type_t        type;         /* device, mix ... */
+    unsigned int             config_mask;  /* e.g AUDIO_PORT_CONFIG_ALL */
+    unsigned int             sample_rate;  /* sampling rate in Hz */
+    audio_channel_mask_t     channel_mask; /* channel mask if applicable */
+    audio_format_t           format;       /* format if applicable */
+    struct audio_gain_config gain;         /* gain to apply if applicable */
+    union {
+        struct audio_port_config_device_ext  device;  /* device specific info */
+        struct audio_port_config_mix_ext     mix;     /* mix specific info */
+        struct audio_port_config_session_ext session; /* session specific info */
+    } ext;
+};
+struct audio_port {
+    audio_port_handle_t      id;                /* port unique ID */
+    audio_port_role_t        role;              /* sink or source */
+    audio_port_type_t        type;              /* device, mix ... */
+    unsigned int             num_sample_rates;  /* number of sampling rates in following array */
+    unsigned int             sample_rates[AUDIO_PORT_MAX_SAMPLING_RATES];
+    unsigned int             num_channel_masks; /* number of channel masks in following array */
+    audio_channel_mask_t     channel_masks[AUDIO_PORT_MAX_CHANNEL_MASKS];
+    unsigned int             num_formats;       /* number of formats in following array */
+    audio_format_t           formats[AUDIO_PORT_MAX_FORMATS];
+    unsigned int             num_gains;         /* number of gains in following array */
+    struct audio_gain        gains[AUDIO_PORT_MAX_GAINS];
+    struct audio_port_config active_config;     /* current audio port configuration */
+    union {
+        struct audio_port_device_ext  device;
+        struct audio_port_mix_ext     mix;
+        struct audio_port_session_ext session;
+    } ext;
+};
+</pre>
+
+<h4>hardware/libhardware/include/hardware/audio.h</h4>
+
+<pre>
+struct audio_hw_device {
+  :
+    /**
+     * Routing control
+     */
+
+    /* Creates an audio patch between several source and sink ports.
+     * The handle is allocated by the HAL and should be unique for this
+     * audio HAL module. */
+    int (*create_audio_patch)(struct audio_hw_device *dev,
+                               unsigned int num_sources,
+                               const struct audio_port_config *sources,
+                               unsigned int num_sinks,
+                               const struct audio_port_config *sinks,
+                               audio_patch_handle_t *handle);
+
+    /* Release an audio patch */
+    int (*release_audio_patch)(struct audio_hw_device *dev,
+                               audio_patch_handle_t handle);
+
+    /* Fills the list of supported attributes for a given audio port.
+     * As input, "port" contains the information (type, role, address etc...)
+     * needed by the HAL to identify the port.
+     * As output, "port" contains possible attributes (sampling rates, formats,
+     * channel masks, gain controllers...) for this port.
+     */
+    int (*get_audio_port)(struct audio_hw_device *dev,
+                          struct audio_port *port);
+
+    /* Set audio port configuration */
+    int (*set_audio_port_config)(struct audio_hw_device *dev,
+                         const struct audio_port_config *config);
+</pre>
+
+<h2 id="Testing">Testing DEVICE_IN_LOOPBACK</h2>
+
+<p>To test DEVICE_IN_LOOPBACK for TV monitoring, use the following testing code. After running the
+test, the captured audio saves to <code>/sdcard/record_loopback.raw</code>, where you can listen to
+it using <code>ffmeg</code>.</p>
+
+<pre>
+&lt;uses-permission android:name="android.permission.MODIFY_AUDIO_ROUTING" /&gt;
+&lt;uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" /&gt;
+
+   AudioRecord mRecorder;
+   Handler mHandler = new Handler();
+   int mMinBufferSize = AudioRecord.getMinBufferSize(RECORD_SAMPLING_RATE,
+           AudioFormat.CHANNEL_IN_MONO,
+           AudioFormat.ENCODING_PCM_16BIT);;
+   static final int RECORD_SAMPLING_RATE = 48000;
+   public void doCapture() {
+       mRecorder = new AudioRecord(MediaRecorder.AudioSource.DEFAULT, RECORD_SAMPLING_RATE,
+               AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, mMinBufferSize * 10);
+       AudioManager am = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
+       ArrayList&lt;AudioPort&gt; audioPorts = new ArrayList&lt;AudioPort&gt;();
+       am.listAudioPorts(audioPorts);
+       AudioPortConfig srcPortConfig = null;
+       AudioPortConfig sinkPortConfig = null;
+       for (AudioPort audioPort : audioPorts) {
+           if (srcPortConfig == null
+                   && audioPort.role() == AudioPort.ROLE_SOURCE
+                   && audioPort instanceof AudioDevicePort) {
+               AudioDevicePort audioDevicePort = (AudioDevicePort) audioPort;
+               if (audioDevicePort.type() == AudioManager.DEVICE_IN_LOOPBACK) {
+                   srcPortConfig = audioPort.buildConfig(48000, AudioFormat.CHANNEL_IN_DEFAULT,
+                           AudioFormat.ENCODING_DEFAULT, null);
+                   Log.d(LOG_TAG, "Found loopback audio source port : " + audioPort);
+               }
+           }
+           else if (sinkPortConfig == null
+                   && audioPort.role() == AudioPort.ROLE_SINK
+                   && audioPort instanceof AudioMixPort) {
+               sinkPortConfig = audioPort.buildConfig(48000, AudioFormat.CHANNEL_OUT_DEFAULT,
+                       AudioFormat.ENCODING_DEFAULT, null);
+               Log.d(LOG_TAG, "Found recorder audio mix port : " + audioPort);
+           }
+       }
+       if (srcPortConfig != null && sinkPortConfig != null) {
+           AudioPatch[] patches = new AudioPatch[] { null };
+           int status = am.createAudioPatch(
+                   patches,
+                   new AudioPortConfig[] { srcPortConfig },
+                   new AudioPortConfig[] { sinkPortConfig });
+           Log.d(LOG_TAG, "Result of createAudioPatch(): " + status);
+       }
+       mRecorder.startRecording();
+       processAudioData();
+       mRecorder.stop();
+       mRecorder.release();
+   }
+   private void processAudioData() {
+       OutputStream rawFileStream = null;
+       byte data[] = new byte[mMinBufferSize];
+       try {
+           rawFileStream = new BufferedOutputStream(
+                   new FileOutputStream(new File("/sdcard/record_loopback.raw")));
+       } catch (FileNotFoundException e) {
+           Log.d(LOG_TAG, "Can't open file.", e);
+       }
+       long startTimeMs = System.currentTimeMillis();
+       while (System.currentTimeMillis() - startTimeMs &lt; 5000) {
+           int nbytes = mRecorder.read(data, 0, mMinBufferSize);
+           if (nbytes &lt;= 0) {
+               continue;
+           }
+           try {
+               rawFileStream.write(data);
+           } catch (IOException e) {
+               Log.e(LOG_TAG, "Error on writing raw file.", e);
+           }
+       }
+       try {
+           rawFileStream.close();
+       } catch (IOException e) {
+       }
+       Log.d(LOG_TAG, "Exit audio recording.");
+   }
+</pre>
+
+<p>Locate the captured audio file in <code>/sdcard/record_loopback.raw</code> and listen to it using
+<code>ffmeg</code>:</p>
+
+<pre>
+adb pull /sdcard/record_loopback.raw
+ffmpeg -f s16le -ar 48k -ac 1 -i record_loopback.raw record_loopback.wav
+ffplay record_loopback.wav
+</pre>
+
+<h2 id="Use cases">Use cases</h2>
+
+<p>This section includes common use cases for TV audio.</p>
+
+<h3>TV tuner with speaker output</h3>
+
+<p>When a TV tuner becomes active, the audio routing API creates an audio patch between the tuner
+and the default output (e.g. the speaker). The tuner output does not require decoding, but final
+audio output is mixed with software output_stream.</p>
+
+<p><img src="audio/images/ape_audio_tv_tuner.png" alt="Android TV Tuner Audio Patch" />
+<p class="img-caption">
+<strong>Figure 2.</strong> Audio Patch for TV tuner with speaker output.</p>
+
+
+<h3>HDMI OUT during live TV</h3>
+
+<p>A user is watching live TV then switches to the HDMI audio output (Intent.ACTION_HDMI_AUDIO_PLUG)
+. The output device of all output_streams changes to the HDMI_OUT port, and the TIF manager changes
+the sink port of the existing tuner audio patch to the HDMI_OUT port.</p>
+
+<p><p><img src="audio/images/ape_audio_tv_hdmi_tuner.png" alt="Android TV HDMI-OUT Audio Patch" />
+<p class="img-caption">
+<strong>Figure 3.</strong> Audio Patch for HDMI OUT from live TV.</p>
\ No newline at end of file
diff --git a/src/devices/audio_warmup.jd b/src/devices/audio_warmup.jd
index 0a0ec04..777650b 100644
--- a/src/devices/audio_warmup.jd
+++ b/src/devices/audio_warmup.jd
@@ -24,7 +24,7 @@
   </div>
 </div>
 
-<p>Audio warmup is the time for the audio amplifier circuit in your device to
+<p>Audio warmup is the time it takes for the audio amplifier circuit in your device to
 be fully powered and reach its normal operation state. The major contributors
 to audio warmup time are power management and any "de-pop" logic to stabilize
 the circuit.
diff --git a/src/devices/camera/camera.jd b/src/devices/camera/camera.jd
index 577224b..a1e837f 100644
--- a/src/devices/camera/camera.jd
+++ b/src/devices/camera/camera.jd
@@ -1,4 +1,4 @@
-page.title=Camera HAL overview
+page.title=Camera
 @jd:body
 
 <!--
diff --git a/src/devices/camera/camera3.jd b/src/devices/camera/camera3.jd
index 6fe9770..4d4d3a1 100644
--- a/src/devices/camera/camera3.jd
+++ b/src/devices/camera/camera3.jd
@@ -39,18 +39,19 @@
   capable front-facing camera with version 1 of the HAL and a more advanced 
   back-facing camera with version 3 of the HAL. Version 2 was a stepping stone to 
   version 3 and is not supported.</p>
+
 <p>
 There is only one camera HAL module (with its own version number, currently 1, 2,
 or 2.1), which lists multiple independent camera devices that each have
 their own version. Camera module v2 or newer is required to support devices v2 or newer, and such
 camera modules can have a mix of camera device versions. This is what we mean
-when we say we Android supports implementing both HALs.
+when we say Android supports implementing both HALs.
 </p>
-<p><strong>Note:</strong> The new camera HAL is in active development and can change at any 
-  time. This document describes at a high level the design of the camera subsystem 
-  and omits many details. Stay tuned for more updates to the PDK repository and 
-  look out for updates to the Camera HAL and reference implementation for more 
-  information.</p>
+
+<p class="note"><strong>Note:</strong> The new camera HAL is in active
+development and can change at any time. This document describes at a high level
+the design of the camera subsystem and omits many details. See <a
+href="versioning.html">Camera version support</a> for our plans.</p>
 
 <h2 id="overview">Overview</h2>
 
diff --git a/src/devices/camera/versioning.jd b/src/devices/camera/versioning.jd
new file mode 100644
index 0000000..9f97ce3
--- /dev/null
+++ b/src/devices/camera/versioning.jd
@@ -0,0 +1,132 @@
+page.title=Camera version support
+@jd:body
+
+<!--
+    Copyright 2014 The Android Open Source Project
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+<div id="qv-wrapper">
+  <div id="qv">
+    <h2>In this document</h2>
+    <ol id="auto-toc">
+    </ol>
+  </div>
+</div>
+
+<p>The Android 5.0 (Lollipop) platform release adds a new app-level camera framework. This
+document outlines some logistical details that OEMs and SoC vendors need to
+know.</p>
+
+<h2 id=glossary>Terms</h2>
+
+<p>The following terms are used in this document:</p>
+
+<ul>
+  <li><em>Camera API1</em>: The app-level camera framework on KitKat and earlier devices, exposed
+through the <code>android.hardware.Camera</code> class.
+  <li><em>Camera API2</em>: The app-level camera framework on 5.0 and later
+devices, exposed through the<code> android.hardware.camera2</code> package.
+  <li><em>Camera HAL</em>: The camera module layer that SoC vendors implement. The app-level public
+frameworks are built on top of the camera HAL.
+  <li><em>Camera HAL3.2</em>: The version of the camera device HAL that is
+being released with Lollipop. KitKat launched with an earlier version (Camera HAL3.1).
+  <li><em>Camera API1 CTS</em>: The set of camera Compatibility Test Suite (CTS) tests that run on top of
+Camera API1.
+  <li><em>Camera API2 CTS</em>: An additional set of camera CTS tests that run on top of Camera API2.
+</ul>
+
+<h2 id=camera_api2_overview>Camera API2 overview</h2>
+
+<p>The new camera frameworks expose lower-level camera control to the app,
+including efficient zero-copy burst/streaming flows and per-frame controls of
+exposure, gain, white balance gains, color conversion, denoising, sharpening,
+and more. See this <a
+href="https://www.youtube.com/watch?v=92fgcUNCHic&feature=youtu.be&t=29m50s">brief
+video overview from the Google I/O 2014 conference</a> for additional details.
+</p>
+
+<h2 id=camera_api1_availability_and_deprecation_in_l>Camera API1 availability and deprecation in Android 5.0</h2>
+
+<p>The Camera API1 interfaces are still available for apps to use on Android
+5.0 and later devices, and camera apps built on top of Camera API1 should work
+as before. Camera API1 is being marked as deprecated in Lollipop, indicating that it
+will be phased out over time and new platform development will focus on Camera
+API2. However, we expect this phase-out period to be lengthy, and Camera API1
+apps will continue to be supported in Android for some time to come.</p>
+
+<p>All earlier camera HAL versions, including Camera HAL1.0, will also continue to
+be supported.</p>
+
+<h2 id=camera_api2_capabilities_and_support_levels>Camera API2 capabilities and support levels</h2>
+
+<p>Android 5.0 and later devices feature Camera API2, however they may not fully support all of
+the new features of Camera API2. The
+<code>android.info.supportedHardwareLevel</code> property that apps can query
+through the Camera API2 interfaces report one of three support levels:
+<code>LEGACY</code>, <code>FULL</code>, and <code>LIMITED</code>.</p>
+
+<p><em>Legacy</em> devices expose a level of capabilities through the Camera API2 interfaces that
+are approximately the same as is exposed to apps through the Camera API1
+interfaces; the legacy frameworks code conceptually translates Camera API2
+calls into Camera API1 calls under the hood. Legacy devices do not support
+the new Camera API2 features including per-frame controls.</p>
+
+<p><em>Full</em> devices support all of the major capabilities of Camera API2. Full devices by
+necessity must have a Camera HAL version of 3.2 (shipping with Android 5.0) or later.</p>
+
+<p><em>Limited</em> devices are in between: They support some of the new Camera API2 capabilities,
+but not all of them, and must also comprise a Camera HAL version of 3.2 or later.</p>
+
+<p>Individual capabilities are exposed via the<code>
+android.request.availableCapabilities</code> property in the Camera API2
+interfaces. Full devices require both the <code>MANUAL_SENSOR</code> and
+<code>MANUAL_POST_PROCESSING</code> capabilities, among others. There is also a
+<code>RAW</code> capability that is optional even for full devices. Limited
+devices can advertise any subset of these capabilities, including none of them. However,
+the <code>BACKWARD_COMPATIBLE</code> capability must always be defined.</p>
+
+<p>The supported hardware level of the device, as well as the specific Camera API2
+capabilities it supports, are available as the following feature flags to allow
+Play Store filtering of Camera API2 camera apps; a device must define the
+feature flag if any of its attached camera devices supports the feature.</p>
+
+<ul>
+  <li><code>android.hardware.camera.hardware_level.full</code>
+  <li><code>android.hardware.camera.capability.raw</code>
+  <li><code>android.hardware.camera.capability.manual_sensor</code>
+  <li><code>android.hardware.camera.capability.manual_post_processing</code>
+</ul>
+
+<h2 id=cts_requirements>CTS requirements</h2>
+
+<p>Android 5.0 and later devices must pass both Camera API1 CTS and Camera API2
+CTS. And as always, devices are required to pass the CTS Verifier camera
+tests.</p>
+
+<p>To add some context: For devices that don’t feature a Camera HAL3.2
+implementation and are not capable of supporting the full Camera API2
+interfaces, the Camera API2 CTS tests must still be passed. However, in this
+case the device will be running in Camera API2 <em>legacy</em> mode (in which
+the Camera API2 calls are conceptually just mapped to Camera
+API1 calls); and any Camera API2 CTS tests that relate to features or
+capabilities beyond Camera API1 have logic that will skip them in the case of
+old (legacy) devices.</p>
+
+<p>On a legacy device, the Camera API2 CTS tests that are not skipped are purely
+using the existing public Camera API1 interfaces and capabilities (with no new
+requirements), and any bugs that are exposed (which will in turn cause a Camera
+API2 CTS failure) are bugs that were already present in the device’s existing
+Camera HAL and would also be a bug that could be easily hit by existing Camera
+API1 apps. The expectation is that there should be very few bugs of this
+nature. Nevertheless, any such bugs will need to be fixed.</p>
diff --git a/src/devices/devices_toc.cs b/src/devices/devices_toc.cs
index c5075d9..8a5353f 100644
--- a/src/devices/devices_toc.cs
+++ b/src/devices/devices_toc.cs
@@ -20,7 +20,7 @@
   <li class="nav-section">
     <div class="nav-section-header">
       <a href="<?cs var:toroot ?>devices/index.html">
-        <span class="en">Porting</span>
+        <span class="en">Interfaces</span>
       </a>
     </div>
     <ul>
@@ -31,7 +31,9 @@
         </a>
       </div>
         <ul>
+          <li><a href="<?cs var:toroot ?>devices/audio_terminology.html">Terminology</a></li>
           <li><a href="<?cs var:toroot ?>devices/audio_implement.html">Implementation</a></li>
+          <li><a href="<?cs var:toroot ?>devices/audio_attributes.html">Attributes</a></li>
           <li><a href="<?cs var:toroot ?>devices/audio_warmup.html">Warmup</a></li>
           <li class="nav-section">
             <div class="nav-section-header">
@@ -47,9 +49,9 @@
           </li>
           <li><a href="<?cs var:toroot ?>devices/audio_avoiding_pi.html">Priority Inversion</a></li>
           <li><a href="<?cs var:toroot ?>devices/audio_src.html">Sample Rate Conversion</a></li>
-          <li><a href="<?cs var:toroot ?>devices/audio_terminology.html">Terminology</a></li>
           <li><a href="<?cs var:toroot ?>devices/audio_debugging.html">Debugging</a></li>
           <li><a href="<?cs var:toroot ?>devices/audio_usb.html">USB Digital Audio</a></li>
+          <li><a href="<?cs var:toroot ?>devices/audio_tv.html">TV Audio</a></li>
         </ul>
       </li>
       <li><a href="<?cs var:toroot ?>devices/bluetooth.html">Bluetooth</a></li>
@@ -67,6 +69,7 @@
           <li><a href="<?cs var:toroot ?>devices/camera/camera3_crop_reprocess.html">Output and Cropping</a></li>
           <li><a href="<?cs var:toroot ?>devices/camera/camera3_error_stream.html">Errors and Streams</a></li>
           <li><a href="<?cs var:toroot ?>devices/camera/camera3_requests_methods.html">Request Creation</a></li>
+          <li><a href="<?cs var:toroot ?>devices/camera/versioning.html">Version Support</a></li>
         </ul>
       </li>
 
@@ -89,7 +92,8 @@
           </a>
         </div>
         <ul>
-          <li><a href="<?cs var:toroot ?>devices/graphics/architecture.html">System-Level Architecture</a></li>
+          <li><a href="<?cs var:toroot ?>devices/graphics/architecture.html">Architecture</a></li>
+          <li><a href="<?cs var:toroot ?>devices/graphics/implement.html">Implementation</a></li>
         </ul>
       </li>
       <li class="nav-section">
@@ -111,64 +115,9 @@
           <li><a href="<?cs var:toroot ?>devices/tech/input/validate-keymaps.html">Validate Keymaps</a></li>
         </ul>
       </li>
-      <li><a href="<?cs var:toroot ?>devices/low-ram.html">Low RAM</a></li>
       <li><a href="<?cs var:toroot ?>devices/media.html">Media</a></li>
      <li class="nav-section">
           <div class="nav-section-header">
-            <a href="<?cs var:toroot ?>devices/tech/security/index.html">
-              <span class="en">Security</span>
-            </a>
-          </div>
-        <ul>
-            <li>
-              <a href="<?cs var:toroot ?>devices/tech/security/acknowledgements.html">
-                <span class="en">Acknowledgements</span>
-              </a>
-            </li>
-          <li class="nav-section">
-            <div class="nav-section-header">
-              <a href="<?cs var:toroot ?>devices/tech/security/enhancements.html">
-                <span class="en">Enhancements</span>
-              </a>
-            </div>
-            <ul>
-              <li><a href="<?cs var:toroot ?>devices/tech/security/enhancements44.html">Android 4.4</a></li>
-              <li><a href="<?cs var:toroot ?>devices/tech/security/enhancements43.html">Android 4.3</a></li>
-              <li><a href="<?cs var:toroot ?>devices/tech/security/enhancements42.html">Android 4.2</a></li>
-            </ul>
-          </li>
-            <li>
-              <a href="<?cs var:toroot ?>devices/tech/security/best-practices.html">
-                <span class="en">Best practices</span>
-              </a>
-            </li>
-            <li>
-              <a href="<?cs var:toroot ?>devices/tech/security/dm-verity.html">
-                <span class="en">dm-verity on boot</span>
-              </a>
-            </li>
-            <li>
-              <a href="<?cs var:toroot ?>devices/tech/encryption/index.html">
-                <span class="en">Encryption</span>
-              </a>
-            </li>
-          <li class="nav-section">
-            <div class="nav-section-header">
-              <a href="<?cs var:toroot ?>devices/tech/security/se-linux.html">
-                <span class="en">Security-Enhanced Linux</span>
-              </a>
-            </div>
-            <ul>
-              <li><a href="<?cs var:toroot ?>devices/tech/security/selinux/concepts.html">Concepts</a></li>
-              <li><a href="<?cs var:toroot ?>devices/tech/security/selinux/implement.html">Implementation</a></li>
-              <li><a href="<?cs var:toroot ?>devices/tech/security/selinux/customize.html">Customization</a></li>
-              <li><a href="<?cs var:toroot ?>devices/tech/security/selinux/validate.html">Validation</a></li>
-            </ul>
-          </li>
-          </ul>
-      </li>
-     <li class="nav-section">
-          <div class="nav-section-header">
             <a href="<?cs var:toroot ?>devices/sensors/index.html">
               <span class="en">Sensors</span>
             </a>
@@ -221,6 +170,17 @@
             </li>
           </ul>
       </li>
+      <li class="nav-section">
+        <div class="nav-section-header">
+          <a href="<?cs var:toroot ?>devices/tv/index.html">
+            <span class="en">TV</span>
+          </a>
+        </div>
+        <ul>
+          <li><a href="<?cs var:toroot ?>devices/tv/HDMI-CEC.html">HDMI-CEC control service</a></li>
+        </ul>
+      </li>
+
     </ul>
   </li>
 <!-- End Porting Android -->
@@ -230,7 +190,7 @@
   <li class="nav-section">
     <div class="nav-section-header">
       <a href="<?cs var:toroot ?>devices/tech/index.html">
-        <span class="en">Technical Information</span>
+        <span class="en">Core Technologies</span>
       </a>
     </div>
 
@@ -238,13 +198,12 @@
       <li class="nav-section">
         <div class="nav-section-header">
           <a href="<?cs var:toroot ?>devices/tech/dalvik/index.html">
-          <span class="en">Dalvik</span></a>
+          <span class="en">ART and Dalvik</span></a>
         </div>
         <ul>
           <li><a href="<?cs var:toroot ?>devices/tech/dalvik/dalvik-bytecode.html">Bytecode Format</a></li>
           <li><a href="<?cs var:toroot ?>devices/tech/dalvik/dex-format.html">.Dex Format</a></li>
           <li><a href="<?cs var:toroot ?>devices/tech/dalvik/instruction-formats.html">Instruction Formats</a></li>
-          <li><a href="<?cs var:toroot ?>devices/tech/dalvik/art.html">Introducing ART</a></li>
         </ul>
       </li>
 
@@ -278,7 +237,7 @@
 
       <li class="nav-section">
         <div class="nav-section-header empty">
-          <a href="<?cs var:toroot ?>devices/reference/files.html">
+          <a href="<?cs var:toroot ?>devices/halref/index.html">
             <span class="en">HAL File Reference</span>
           </a>
         </div>
@@ -291,15 +250,77 @@
       </li>
 
       <li>
+          <a href="<?cs var:toroot ?>devices/low-ram.html">
+            <span class="en">Low RAM</span>
+          </a>
+      </li>
+
+      <li>
           <a href="<?cs var:toroot ?>devices/tech/power.html">
             <span class="en">Power</span>
           </a>
       </li>
 
+     <li class="nav-section">
+          <div class="nav-section-header">
+            <a href="<?cs var:toroot ?>devices/tech/security/index.html">
+              <span class="en">Security</span>
+            </a>
+          </div>
+        <ul>
+            <li>
+              <a href="<?cs var:toroot ?>devices/tech/security/acknowledgements.html">
+                <span class="en">Acknowledgements</span>
+              </a>
+            </li>
+          <li class="nav-section">
+            <div class="nav-section-header">
+              <a href="<?cs var:toroot ?>devices/tech/security/enhancements.html">
+                <span class="en">Enhancements</span>
+              </a>
+            </div>
+            <ul>
+              <li><a href="<?cs var:toroot ?>devices/tech/security/enhancements50.html">Android 5.0</a></li>
+              <li><a href="<?cs var:toroot ?>devices/tech/security/enhancements44.html">Android 4.4</a></li>
+              <li><a href="<?cs var:toroot ?>devices/tech/security/enhancements43.html">Android 4.3</a></li>
+              <li><a href="<?cs var:toroot ?>devices/tech/security/enhancements42.html">Android 4.2</a></li>
+            </ul>
+          </li>
+            <li>
+              <a href="<?cs var:toroot ?>devices/tech/security/best-practices.html">
+                <span class="en">Best practices</span>
+              </a>
+            </li>
+            <li>
+              <a href="<?cs var:toroot ?>devices/tech/security/dm-verity.html">
+                <span class="en">dm-verity on boot</span>
+              </a>
+            </li>
+            <li>
+              <a href="<?cs var:toroot ?>devices/tech/encryption/index.html">
+                <span class="en">Encryption</span>
+              </a>
+            </li>
+          <li class="nav-section">
+            <div class="nav-section-header">
+              <a href="<?cs var:toroot ?>devices/tech/security/se-linux.html">
+                <span class="en">Security-Enhanced Linux</span>
+              </a>
+            </div>
+            <ul>
+              <li><a href="<?cs var:toroot ?>devices/tech/security/selinux/concepts.html">Concepts</a></li>
+              <li><a href="<?cs var:toroot ?>devices/tech/security/selinux/implement.html">Implementation</a></li>
+              <li><a href="<?cs var:toroot ?>devices/tech/security/selinux/customize.html">Customization</a></li>
+              <li><a href="<?cs var:toroot ?>devices/tech/security/selinux/validate.html">Validation</a></li>
+            </ul>
+          </li>
+          </ul>
+      </li>
+
       <li class="nav-section">
         <div class="nav-section-header">
           <a href="<?cs var:toroot ?>devices/tech/test_infra/tradefed/index.html">
-            <span class="en">Trade Federation Testing Infrastructure</span>
+            <span class="en">Testing Infrastructure</span>
           </a>
         </div>
         <ul>
diff --git a/src/devices/drm.jd b/src/devices/drm.jd
index 828f41b..9a7c673 100644
--- a/src/devices/drm.jd
+++ b/src/devices/drm.jd
@@ -2,19 +2,19 @@
 @jd:body
 
 <!--
-    Copyright 2013 The Android Open Source Project     
+    Copyright 2014 The Android Open Source Project
 
-    Licensed under the Apache License, Version 2.0 (the "License");    
-    you may not use this file except in compliance with the License.   
-    You may obtain a copy of the License at    
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
 
         http://www.apache.org/licenses/LICENSE-2.0
 
-    Unless required by applicable law or agreed to in writing, software    
-    distributed under the License is distributed on an "AS IS" BASIS,    
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   
-    See the License for the specific language governing permissions and    
-    limitations under the License.   
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
 -->
 
 <div id="qv-wrapper">
@@ -25,154 +25,234 @@
   </div>
 </div>
 
-<p>This document introduces Widevine DRM security levels
-  and certification requirements. It explains how to integrate and distribute Widevine DRM
-  for your product. Android provides the Widevine DRM solution with a royalty-free
-  license and we recommend that you use it for
-  your protected playback solution. </p>
+<p>This document provides an overview of the Android DRM framework, and
+introduces the interfaces a DRM plug-in must implement. This document does not
+describe robustness rules or compliance rules that may be defined by a DRM
+scheme.</p>
 
-<h2 id="overview">Overview</h2>
+<h2 id="introduction">Introduction</h2>
+
+<p>The Android platform provides an extensible DRM framework that lets
+applications manage rights-protected content according to the license
+constraints associated with the content. The DRM framework supports many DRM
+schemes; which DRM schemes a device supports is up to the device manufacturer.
+The DRM framework introduced in Android 3.0 provides a unified interface for
+application developers and hides the complexity of DRM operations. The DRM
+framework provides a consistent operation mode for protected and non-protected
+content. DRM schemes can define very complex usage models by license metadata.
+The DRM framework provides the association between DRM content and license, and
+handles the rights management. This enables the media player to be abstracted
+from DRM-protected or non-protected content. See <a
+href="https://developer.android.com/reference/android/media/MediaDrm.html">MediaDrm</a>
+for the class to obtain keys for decrypting protected media streams.</p>
+
+ <img src="images/drm_hal.png" alt="Android DRM HAL" />
+
+<p class="img-caption"><strong>Figure 1.</strong> DRM Hardware Abastraction
+Layer</p>
+
 <p>
-Availability of rich digital content is important to users on mobile devices. To make their content widely available,
-Android developers and digital content publishers need a consistent DRM implementation supported across the Android
-ecosystem. In order to make that digital content available on Android devices and to ensure that there is at least
-one consistent DRM available across all devices, Google provides Widevine DRM for free on compatible Android devices.
-On Android 3.0 and higher platforms, the Widevine DRM plugin is integrated with the Android DRM framework and uses
-hardware-backed protection to secure movie content and user credentials.
+Availability of rich digital content is important to users on mobile devices. To
+make their content widely available, Android developers and digital content
+publishers need a consistent DRM implementation supported across the Android
+ecosystem. In order to make that digital content available on Android devices
+and to ensure that there is at least one consistent DRM available across all
+devices, Google provides DRM without any license fees on compatible Android devices.
+On Android 3.0 and higher platforms, the DRM plug-in is integrated with the
+Android DRM framework and can use hardware-backed protection to secure premium
+content and user credentials.
 </p>
 
 <p>
-The content protection provided by the Widevine DRM plugin depends on the security and content protection capabilities of the underlying hardware platform. The hardware capabilities of the device include hardware secure boot to establish a chain of trust of security and protection of cryptographic keys. Content protection capabilities of the device include protection of decrypted frames in the device and content output protection via a trusted output protection mechanism. Not all hardware platforms support all the above security and content protection features. Security is never implemented in a single place in the stack, but instead relies on the integration of hardware, software, and services. The combination of hardware security functions, a trusted boot mechanism, and an isolated secure OS for handling security functions is critical to provide a secure device.</p>
+The content protection provided by the DRM plug-in depends on the security and
+content protection capabilities of the underlying hardware platform. The
+hardware capabilities of the device include hardware secure boot to establish a
+chain of trust of security and protection of cryptographic keys. Content
+protection capabilities of the device include protection of decrypted frames in
+the device and content protection via a trusted output protection mechanism. Not
+all hardware platforms support all of the above security and content protection
+features. Security is never implemented in a single place in the
+stack, but instead relies on the integration of hardware, software, and
+services. The combination of hardware security functions, a trusted boot
+mechanism, and an isolated secure OS for handling security functions is critical
+to providing a secure device.</p>
 
 
-<h3 id="framework">Android DRM Framework</h3>
-<p>Android 3.0 and higher platforms provide an extensible DRM framework that lets applications manage protected content using a
-    choice of DRM mechanisms. For application developers, the framework offers an
-    abstract, unified API that simplifies the management of protected content.
-    The API hides the complexity of DRM operations and allows a consistent operation mode for both protected and unprotected
-    content across a variety of DRM schemes. For device manufacturers, content owners, and Internet digital media providers
-    the DRM framework plugin API provides a means of adding support for a DRM scheme of choice into the Android system, for
-    secure enforcement of content protection.
+<h2 id="architecture">Architecture</h2>
+<p>The DRM framework is designed to be implementation agnostic and
+abstracts the details of the specific DRM scheme implementation in a
+scheme-specific DRM plug-in. The DRM framework includes simple APIs to handle
+complex DRM operations, register users and devices to online DRM services,
+extract constraint information from the license, associate DRM content and its
+license, and finally decrypt DRM content.</p>
 
-    <p><strong>Note:</strong> We recommend that you integrate the Widevine
-    solution as it is already implemented and ready for you to use. </p>
-</p>
-
-<h3 id="plugin">Widevine DRM Plugin</h3>
-
-<p>
-Built on top of the Android DRM framework, the Widevine DRM plugin offers DRM and advanced copy protection features on Android devices. Widevine DRM is available in binary form under a royalty free license from Widevine. The Widevine DRM plugin provides the capability to license, securely distribute, and protect playback of multimedia content. Protected content is secured using an encryption scheme based on the open AES (Advanced Encryption Standard). An application can decrypt the content only if it obtains a license from the Widevine DRM licensing server for the current user. Widevine DRM functions on Android in the same way as it does on other platforms. Figure 1 shows how the WideVine Crypto Plugin fits into the Android stack:</p>
-
-
- <img src="images/drm_hal.png" alt="" />
-
- <p class="img-caption"><strong>Figure 1.</strong> Widevine Crypto Plugin</p>
-
-
-<h2 id="integrating">Integrating Widevine into Your Product</h2>
-
-<p>The following sections go over the different security levels that Widevine supports and the requirements that your product must meet to
-support Widevine. After reading the information, you need to determine the security level for your target hardware, integration, and Widevine keybox provisioning requirements.
-</p>
-<p >
-To integrate and distribute Widevine DRM on Android devices, contact your Android technical account manager to begin Widevine DRM integration.
-We recommend you engage early in your device development process with the Widevine team to provide the highest level of content protection on the device. 
-Certify devices using the Widevine test player and submit results to your Android technical account manager for approval.
-</p>
-
-<h3 id="security">
-Widevine DRM security levels
-</h3>
-
-<p>Security is never implemented in a single place in the stack, but instead relies on the integration of hardware, software, and services. The combination of hardware security functions, a trusted boot mechanism, and an isolated secure OS for handling security functions is critical to provide a secure device.</p>
-
-<p>
-At the system level, Android offers the core security features of the Linux kernel, extended and customized for mobile devices. In the application framework, Android provides an extensible DRM framework and system architecture for checking and enforcing digital rights. The Widevine DRM plugin integrates with the hardware platform to leverage the available security capabilities. The level of security offered is determined by a combination of the security capabilities of the hardware platform and the integration with Android and the Widevine DRM plugin. Widevine DRM security supports the three levels of security shown in the table below. 
-</p>
-
-<table>
-
-<tr>
-<th>Security Level</th>
-<th>Secure Bootloader</th>
-<th>Widevine Key Provisioning</th>
-<th>Security Hardware or ARM Trust Zone</th>
-<th>Widevine Keybox and Video Key Processing</th>
-<th>Hardware Video Path</th>
-</tr>
-<tr>
-  <td>Level 1</td>
-  <td>Yes</td>
-  <td>Factory provisioned Widevine Keys</td>
-  <td>Yes</td>
-  <td>Keys never exposed in clear to host CPU</td>
-  <td>Hardware protected video path</td>
-<tr>
-
-<tr>
-  <td>Level 2</td>
-  <td>Yes</td>
-  <td>Factory provisioned Widevine Keys</td>
-  <td>Yes</td>
-  <td>Keys never exposed in clear to host CPU</td>
-  <td>Hardware protected video path</td>
-<tr>
-
-<tr>
-  <td>Level 3</td>
-  <td>Yes*</td>
-  <td>Field provisioned Widevine Keys</td>
-  <td>No</td>
-  <td>Clear keys exposed to host CPU</td>
-  <td>Clear video streams delivered to video decoder</td>
-<tr>
-
-</table>
-
-<p><superscript>*</superscript>Device implementations may use a trusted bootloader, where in the bootloader is authenticated via an OEM key stored on a system partition.</p>
-
-<h3 id="security-details">
-Security level details
-</h3>
-<h4>
-Level 1
-</h4>
-<p>In this implementation Widevine DRM keys and decrypted content are never exposed to the host CPU. Only security hardware or a protected security co-processor uses clear key values and the media content is decrypted by the secure hardware. This level of security requires factory provisioning of the Widevine key-box or requires the Widevine key-box to be protected by a device key installed at the time of manufacturing. The following describes some key points to this security level:
-</p>
-
+<p>The Android DRM framework is implemented in two architectural layers:</p>
 <ul>
-  <li>Device manufacturers must provide a secure bootloader. The chain of trust from the bootloader must extend through any software or firmware components involved in the security implementation, such as the ARM TrustZone protected application and any components involved in the enforcement of the secure video path. </li>
-  <li>The Widevine key-box must be encrypted with a device-unique secret key that is not visible to software or probing methods outside of the TrustZone.</li>
-  <li>The Widevine key-box must be installed in the factory or delivered to the device using an approved secure delivery mechanism.</li>
-  <li>Device manufacturers must provide an implementation of the Widevine Level 1 OEMCrypto API that performs all key processing and decryption in a trusted environment.</li>
+<li>A DRM framework API, which is exposed to applications through the Android
+  application framework and runs through the Dalvik VM for standard
+  applications.</li>
+<li>A native code DRM manager, which implements the DRM framework and exposes an
+  interface for DRM plug-ins (agents) to handle rights management and decryption
+  for various DRM schemes.</li>
 </ul>
 
-<h4>Level 2</h4>
-<p>
-  In this security level, the Widevine keys are never exposed to the host CPU. Only security hardware or a protected security co-processor uses clear key values. An AES crypto block performs the high throughput AES decryption of the media stream.  The resulting clear media buffers are returned to the CPU for delivery to the video decoder. This level of security requires factory provisioning of the Widevine key-box or requires the Widevine key box to be protected by a key-box installed at the time of manufacturing.
-  The following list describes some key requirements of this security level:
-</p>
+ <img src="images/drm_framework.png" alt="Android DRM Framework" />
+
+<p class="img-caption"><strong>Figure 2.</strong> DRM framework</p>
+
+<p>See the <a
+href="http://developer.android.com/reference/android/drm/package-summary.html">Android
+DRM package reference</a> for additional details.</p>
+
+<h2 id="plug-ins">Plug-ins</h2>
+<p>As shown in the figure below, the DRM framework uses a plug-in architecture
+to support various DRM schemes. The DRM manager service runs in an independent
+process to ensure isolated execution of DRM plug-ins. Each API call from
+DrmManagerClient to DrmManagerService goes across process boundaries by using
+the binder IPC mechanism. The DrmManagerClient provides a Java programming
+language implementation as a common interface to runtime applications; it
+also provides a DrmManagerClient-native implementation as the interface to
+native modules. The caller of DRM framework accesses only the DrmManagerClient
+and does not have to be aware of each DRM scheme. </p>
+
+ <img src="images/drm_plugin.png" alt="Android DRM Plug-in" />
+
+<p class="img-caption"><strong>Figure 3.</strong> DRM framework with plug-ins</p>
+
+<p>Plug-ins are loaded automatically when DrmManagerService is launched. As
+shown in the figure below, the DRM plug-in manager loads/unloads all the
+available plug-ins. The DRM framework loads plug-ins automatically by finding
+them under:<br/>
+<code>/system/lib/drm/plugins/native/</code></p>
+ 
+<img src="images/drm_plugin_lifecycle.png" alt="Android DRM Plug-in Lifecycle" />
+
+<p class="img-caption"><strong>Figure 4.</strong> DRM plug-in lifecycle</p>
+
+<p>The plug-in developer should ensure the plug-in is located in the DRM
+framework plug-in discovery directory. See implementation instructions below for details.</p>
+
+<h2 id="implementation">Implementation</h2>
+
+<h3 id="IDrmEngine">IDrmEngine</h3>
+
+<p>IDrmEngine is an interface with a set of APIs to suit DRM use cases. Plug-in
+developers must implement the interfaces specified in IDrmEngine and the
+listener interfaces specified below. This document assumes the plug-in developer
+has access to the Android source tree. The interface definition is available in
+the source tree at:<br/>
+<code>
+<&lt;platform_root&gt;/frameworks/base/drm/libdrmframework/plugins/common/include
+</code></p>
+
+<h3 id="DrmInfo">DRM Info</h3>
+<p>DrmInfo is a wrapper class that wraps the protocol for communicating with the
+DRM server. Server registration, deregistration, license acquisition, or any other
+server-related transaction can be achieved by processing an instance of DrmInfo.
+The protocol should be described by the plug-in in XML format. Each DRM plug-in
+would accomplish the transaction by interpreting the protocol. The DRM framework
+defines an API to retrieve an instance of DrmInfo called acquireDrmInfo().</p>
+
+<code>DrmInfo* acquireDrmInfo(int uniqueId, const DrmInfoRequest* drmInfoRequest);</code>
+<p>Retrieves necessary information for registration, deregistration or rights
+acquisition information. See <a
+href="http://developer.android.com/reference/android/drm/DrmInfoRequest.html">DrmInfoRequest</a> for more information.</p>
+
+<code>DrmInfoStatus* processDrmInfo(int uniqueId, const DrmInfo* drmInfo);</code>
+<p>processDrmInfo() behaves asynchronously and the results of the transaction can
+be retrieved either from OnEventListener or OnErrorListener.</p>
+
+<h3 id="drm-rights">DRM rights</h3>
+
+<p>The association of DRM content and the license is required to allow playback
+of DRM content. Once the association has been made, the license will be handled in
+the DRM framework so the Media Player application is abstracted from the existence
+of license.</p>
+
+<code>int checkRightsStatus(int uniqueId, const String8&amp; path, int
+action);</code>
+<p>Save DRM rights to the specified rights path and make association with content path.
+The input parameters are DrmRights to be saved, rights file path where rights
+are to be saved and content file path where content was saved.</p>
+
+<code>status_t saveRights(int uniqueId, const DrmRights&amp; drmRights,
+            const String8&amp; rightsPath, const String8&amp;
+contentPath);</code>
+<p>Save DRM rights to specified rights path and make association with content
+path.</p>
+
+<h3 id="metadata">License Metadata</h3>
+<p>License metadata such as license expiry time, repeatable count and etc., may be
+embedded inside the rights of the protected content. The Android DRM framework
+provides APIs to return constraints associated with input content. See <a
+href="http://developer.android.com/reference/android/drm/DrmManagerClient.html">DrmManagerClient</a>
+for more information.</p>
+
+<code>DrmConstraints* getConstraints(int uniqueId, const String path, int
+action);</code>
+<p>The getConstraint function call returns key-value pairs of constraints
+embedded in protected content. To retrieve the constraints, the uniqueIds (the
+Unique identifier for a session and path of the protected content) are required.
+The action, defined as Action::DEFAULT, Action::PLAY, etc., is also required.</p>
+
+ <img src="images/drm_license_metadata.png" alt="Android DRM License Metadata" />
+
+<p class="img-caption"><strong>Figure 5.</strong> Retrieve license metadata</p>
+
+<code>DrmMetadata* getMetadata(int uniqueId, const String path);</code>
+<p>Get metadata information associated with input content for a given path of the
+protected content to return key-value pairs of metadata.</p>
+
+<h3 id="metadata">Decrypt session</h3>
+<p>To maintain the decryption session, the caller of the DRM framework has to
+invoke openDecryptSession() at the beginning of the decryption sequence.
+openDecryptSession() asks each DRM plug-in if it can handle input DRM
+content.</p>
+<code>
+status_t openDecryptSession(
+   int uniqueId, DecryptHandle* decryptHandle, int fd, off64_t offset, off64_t length);
+</code>
+
+<p>The above call allows you to save DRM rights to specified rights path and make
+association with content path. DrmRights parameter is the rights to be saved,
+file path where rights should be and content file path where content should be
+saved.</p>
+
+<h3 id="listeners">DRM plug-in Listeners</h3>
+
+<p>Some APIs in DRM framework behave asynchronously in a DRM transaction. An
+application can register three listener classes to DRM framework.</p>
 
 <ul>
-  <li>Device manufacturers must provide a secure bootloader. The chain of trust from the bootloader must extend through any software or firmware components involved in the security implementation, such as the TrustZone protected application. </li>
-  <li>The Widevine key-box must be encrypted with a device-unique secret key that is not visible to software or probing methods outside of the TrustZone.</li>
-  <li>The Widevine key-box must be installed in the factory or delivered to the device using an approved secure delivery mechanism.</li>
-  <li>Device manufacturers must provide an implementation of the Widevine Level 2 OEMCrypto API that performs all key processing and decryption in a trusted environment.</li>
-  <li>Device manufacturers must provide a bootloader that loads signed system images only. For devices that allow users to load a custom operating system or gain root privileges on the device by unlocking the bootloader, device manufacturers must support the following:
-    <ul>
-      <li>Device manufacturers must provide a bootloader that allows a Widevine key-box to be written only when the bootloader is in a locked state.</li>
-      <li>The Widevine key-box must be stored in a region of memory that is erased or is inaccessible when the device bootloader is in an unlocked state.</li>
-    </ul>
-  </li>
+<li>OnEventListener for results of asynchronous APIs</li>
+<li>OnErrorListener for recieving errors of asynchronous APIs</li>
+<li>OnInfoListener for any supplementary information during DRM
+transactions.</li>
 </ul>
 
-<h4>Level 3</h4>
-<p>
-This security level relies on the secure bootloader to verify the system image. An AES crypto block performs the AES decryption of the media stream and the resulting clear media buffers are returned to the CPU for delivery to the video decoder.
-</p>
+<h3 id="source">Source</h3>
 
-<p>Device manufacturers must provide a bootloader that loads signed system images only. For devices that allow users to load a custom operating system or gain root privileges on the device by unlocking the bootloader, device manufacturers must support the following:</p>
-    <ul>
-      <li>Device manufacturers must provide a bootloader that allows a Widevine key-box to be written only when the bootloader is in a locked state.</li>
-      <li>The Widevine key-box must be stored in a region of memory that is erased or is inaccessible when the device bootloader is in an unlocked state.</li>
-    </ul>
+<p>The Android DRM framework includes a passthru plug-in as a sample plug-in.
+The implementation for passthru plug-in can be found in the Android source tree
+at:<br/>
+<code>
+&lt;platform_root&gt;/frameworks/base/drm/libdrmframework/plugins/passthru
+</code></p>
+
+<h3 id="build">Build and Integration</h3>
+
+<p>Add the following to the Android.mk of the plug-in implementation. The
+passthruplugin is used as a sample.</p>
+
+<code>
+PRODUCT_COPY_FILES +=
+$(TARGET_OUT_SHARED_LIBRARIES)/&lt;plugin_library&gt;:system/lib/drm/plugins/native/&lt;plugin_library&gt;
+e.g.,<br/>
+PRODUCT_COPY_FILES += $(TARGET_OUT_SHARED_LIBRARIES)/
+libdrmpassthruplugin.so:system/lib/drm/plugins/native/libdrmpassthruplugin.so
+</code>
+<br/>
+<br/>
+<p>Plug-in developers must  locate their respective plug-ins under this
+directory like so:<br/>
+<code>/system/lib/drm/plugins/native/libdrmpassthruplugin.so</code></p>
diff --git a/src/devices/graphics.jd b/src/devices/graphics.jd
index 45ebfae..c8f11e8 100644
--- a/src/devices/graphics.jd
+++ b/src/devices/graphics.jd
@@ -2,7 +2,7 @@
 @jd:body
 
 <!--
-    Copyright 2013 The Android Open Source Project
+    Copyright 2014 The Android Open Source Project
 
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
@@ -16,6 +16,7 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 -->
+
 <div id="qv-wrapper">
   <div id="qv">
     <h2>In this document</h2>
@@ -24,354 +25,203 @@
   </div>
 </div>
 
-<p>
-  The Android framework has a variety of graphics rendering APIs for 2D and 3D that interact with
-  your HAL implementations and graphics drivers, so it is important to have a good understanding of
-  how they work at a higher level. There are two general ways that app developers can draw things
-  to the screen: with Canvas or OpenGL.
-</p>
-<p>
-  <a href="http://developer.android.com/reference/android/graphics/Canvas.html">android.graphics.Canvas</a>
-  is a 2D graphics API and is the most widely used graphics API by
-  developers. Canvas operations draw all the stock <a href="http://developer.android.com/reference/android/view/View.html">android.view.View</a>s
-  and custom <a href="http://developer.android.com/reference/android/view/View.html">android.view.View</a>s in Android. Prior to Android 3.0, Canvas always
-  used the non-hardware accelerated Skia 2D drawing library to draw.
-</p>
-<p>
-  Introduced in Android 3.0, hardware acceleration for Canvas APIs uses a new drawing library
-  called OpenGLRenderer that translates Canvas operations to OpenGL operations so that they can
-  execute on the GPU. Developers had to opt-in to this feature previously, but beginning in Android
-  4.0, hardware-accelerated Canvas is enabled by default. Consequently, a hardware GPU that
-  supports OpenGL ES 2.0 is mandatory for Android 4.0 devices.
-</p>
-<p>
-  Additionally, the <a href="https://developer.android.com/guide/topics/graphics/hardware-accel.html">Hardware Acceleration guide</a>
-  explains how the hardware-accelerated drawing path works and identifies the differences in behavior from the software drawing path.
-</p>
-<p>
-  The other main way that developers render graphics is by using OpenGL ES 1.x or 2.0 to directly
-  render to a surface.  Android provides OpenGL ES interfaces in the
-  <a href="http://developer.android.com/reference/android/opengl/package-summary.html">android.opengl</a> package
-  that a developer can use to call into your GL implementation with the SDK or with native APIs
-  provided in the Android NDK. 
+<p>The Android framework offers a variety of graphics rendering APIs for 2D and
+3D that interact with manufacturer implementations of graphics drivers, so it
+is important to have a good understanding of how those APIs work at a higher
+level. This page introduces the graphics hardware abstraction layer (HAL) upon
+which those drivers are built.</p>
 
-  <p class="note"><strong>Note:</strong>A third option, Renderscript, was introduced in Android 3.0 to
-  serve as a platform-agnostic graphics rendering API (it used OpenGL ES 2.0 under the hood), but
-  will be deprecated starting in the Android 4.1 release.
-</p>
-<h2 id="render">
-  How Android Renders Graphics
-</h2>
-<p>
-  No matter what rendering API developers use, everything is rendered onto a buffer of pixel data
-  called a "surface." Every window that is created on the Android platform is backed by a surface.
-  All of the visible surfaces that are rendered to are composited onto the display
-  by the SurfaceFlinger, Android's system service that manages composition of surfaces.
-  Of course, there are more components that are involved in graphics rendering, and the
-  main ones are described below:
-</p>
+<p>Application developers draw images to the screen in two ways: with Canvas or
+OpenGL. See <a
+href="{@docRoot}devices/graphics/architecture.html">System-level graphics
+architecture</a> for a detailed description of Android graphics
+components.</p>
 
-<dl>
-  <dt>
-    <strong>Image Stream Producers</strong>
-  </dt>
-    <dd>Image stream producers can be things such as an OpenGL ES game, video buffers from the media server,
-      a Canvas 2D application, or basically anything that produces graphic buffers for consumption.
-    </dd>
+<p><a
+href="http://developer.android.com/reference/android/graphics/Canvas.html">android.graphics.Canvas</a>
+is a 2D graphics API and is the most popular graphics API among developers.
+Canvas operations draw all the stock and custom <a
+href="http://developer.android.com/reference/android/view/View.html">android.view.View</a>s
+in Android. In Android, hardware acceleration for Canvas APIs is accomplished
+with a drawing library called OpenGLRenderer that translates Canvas operations
+to OpenGL operations so they can execute on the GPU.</p>
 
-  <dt>
-    <strong>Image Stream Consumers</strong>
-  </dt>
-  <dd>The most common consumer of image streams is SurfaceFlinger, the system service that consumes
-    the currently visible surfaces and composites them onto the display using
-    information provided by the Window Manager. SurfaceFlinger is the only service that can
-    modify the content of the display. SurfaceFlinger uses OpenGL and the
-    hardware composer to compose a group of surfaces. Other OpenGL ES apps can consume image
-    streams as well, such as the camera app consuming a camera preview image stream.
-  </dd>
-  <dt>
-    <strong>SurfaceTexture</strong>
-  </dt>
-  <dd>SurfaceTexture contains the logic that ties image stream producers and image stream consumers together
-    and is made of three parts: <code>SurfaceTextureClient</code>, <code>ISurfaceTexture</code>, and
-    <code>SurfaceTexture</code> (in this case, <code>SurfaceTexture</code> is the actual C++ class and not
-    the name of the overall component). These three parts facilitate the producer (<code>SurfaceTextureClient</code>),
-    binder (<code>ISurfaceTexture</code>), and consumer (<code>SurfaceTexture</code>)
-    components of SurfaceTexture in processes such as requesting memory from Gralloc,
-    sharing memory across process boundaries, synchronizing access to buffers, and pairing the appropriate consumer with the producer.
-    SurfaceTexture can operate in both asynchronous (producer never blocks waiting for consumer and drops frames) and
-    synchronous (producer waits for consumer to process textures) modes. Some examples of image
-    producers are the camera preview produced by the camera HAL or an OpenGL ES game. Some examples
-    of image consumers are SurfaceFlinger or another app that wants to display an OpenGL ES stream
-    such as the camera app displaying the camera viewfinder.
-  </dd>
+<p>Beginning in Android 4.0, hardware-accelerated Canvas is enabled by default.
+Consequently, a hardware GPU that supports OpenGL ES 2.0 is mandatory for
+Android 4.0 and later devices. See the <a
+href="https://developer.android.com/guide/topics/graphics/hardware-accel.html">Hardware
+Acceleration guide</a> for an explanation of how the hardware-accelerated
+drawing path works and the differences in its behavior from that of the
+software drawing path.</p>
 
- <dt>
-    <strong>Window Manager</strong>
-  </dt>
-  <dd>
-    The Android system service that controls window lifecycles, input and focus events, screen
-    orientation, transitions, animations, position, transforms, z-order, and many other aspects of
-    a window (a container for views). A window is always backed by a surface. The Window Manager
-    sends all of the window metadata to SurfaceFlinger, so SurfaceFlinger can use that data
-    to figure out how to composite surfaces on the display.
-  </dd>
-  
-  <dt>
-    <strong>Hardware Composer</strong>
-  </dt>
-  <dd>
-    The hardware abstraction for the display subsystem. SurfaceFlinger can delegate certain
-    composition work to the hardware composer to offload work from the OpenGL and the GPU. This makes
-    compositing faster than having SurfaceFlinger do all the work. Starting with Jellybean MR1,
-    new versions of the hardware composer have been introduced. See the <code>hardware/libhardware/include/hardware/gralloc.h</code> <a href="#hwc">Hardware composer</a> section
-    for more information.
-  </dd>
+<p>In addition to Canvas, the other main way that developers render graphics is
+by using OpenGL ES to directly render to a surface. Android provides OpenGL ES
+interfaces in the <a
+href="http://developer.android.com/reference/android/opengl/package-summary.html">android.opengl</a>
+package that developers can use to call into their GL implementations with the
+SDK or with native APIs provided in the <a
+href="https://developer.android.com/tools/sdk/ndk/index.html">Android
+NDK</a>.</p>
 
-    <dt>
-    <strong>Gralloc</strong>
-  </dt>
-  <dd>Allocates memory for graphics buffers. See the  If you
-    are using version 1.1 or later of the <a href="#hwc">hardware composer</a>, this HAL is no longer needed.</dd>
-  
- 
-</dl>
-<p>
-  The following diagram shows how these components work together:
-</p><img src="images/graphics_surface.png">
-<p class="img-caption">
-  <strong>Figure 1.</strong> How surfaces are rendered
-</p>
+<h2 id=android_graphics_components>Android graphics components</h2>
 
-</p>
-<h2 id="provide">
-  What You Need to Provide
-</h2>
-<p>
- The following list and sections describe what you need to provide to support graphics in your product:
-</p>
-<ul>
-  <li>OpenGL ES 1.x Driver
-  </li>
-  <li>OpenGL ES 2.0 Driver
-  </li>
-  <li>EGL Driver
-  </li>
-  <li>Gralloc HAL implementation
-  </li>
-  <li>Hardware Composer HAL implementation
-  </li>
-  <li>Framebuffer HAL implementation
-  </li>
-</ul>
-<h3 id="gl">
-  OpenGL and EGL drivers
-</h3>
-<p>
-  You must provide drivers for OpenGL ES 1.x, OpenGL ES 2.0, and EGL. Some key things to keep in
-  mind are:
-</p>
-<ul>
-  <li>The GL driver needs to be robust and conformant to OpenGL ES standards.
-  </li>
-  <li>Do not limit the number of GL contexts. Because Android allows apps in the background and
-  tries to keep GL contexts alive, you should not limit the number of contexts in your driver. It
-  is not uncommon to have 20-30 active GL contexts at once, so you should also be careful with the
-  amount of memory allocated for each context.
-  </li>
-  <li>Support the YV12 image format and any other YUV image formats that come from other
-    components in the system such as media codecs or the camera.
-  </li>
-  <li>Support the mandatory extensions: <code>GL_OES_texture_external</code>,
-  <code>EGL_ANDROID_image_native_buffer</code>, and <code>EGL_ANDROID_recordable</code>. We highly
-  recommend supporting <code>EGL_ANDROID_blob_cache</code> and <code>EGL_KHR_fence_sync</code> as
-  well.</li>
-</ul>
+<p>No matter what rendering API developers use, everything is rendered onto a
+"surface." The surface represents the producer side of a buffer queue that is
+often consumed by SurfaceFlinger. Every window that is created on the Android
+platform is backed by a surface. All of the visible surfaces rendered are
+composited onto the display by SurfaceFlinger.</p>
 
-<p>
-  Note that the OpenGL API exposed to app developers is different from the OpenGL interface that
-  you are implementing. Apps do not have access to the GL driver layer, and must go through the
-  interface provided by the APIs.
-</p>
-<h4>
-  Pre-rotation
-</h4>
-<p>Many times, hardware overlays do not support rotation, so the solution is to pre-transform the buffer before
-  it reaches SurfaceFlinger. A query hint in ANativeWindow was added (<code>NATIVE_WINDOW_TRANSFORM_HINT</code>)
-  that represents the most likely transform to be be applied to the buffer by SurfaceFlinger.
+<p>The following diagram shows how the key components work together:</p>
 
-  Your GL driver can use this hint to pre-transform the buffer before it reaches SurfaceFlinger, so when the buffer
-  actually reaches SurfaceFlinger, it is correctly transformed. See the ANativeWindow
-  interface defined in <code>system/core/include/system/window.h</code> for more details. The following
-  is some pseudo-code that implements this in the hardware composer:
-</p>
+<img src="graphics/images/graphics_surface.png" alt="image-rendering components">
 
-<pre>
-ANativeWindow->query(ANativeWindow, NATIVE_WINDOW_DEFAULT_WIDTH, &w);
-ANativeWindow->query(ANativeWindow, NATIVE_WINDOW_DEFAULT_HEIGHT, &h);
-ANativeWindow->query(ANativeWindow, NATIVE_WINDOW_TRANSFORM_HINT, &hintTransform);
-if (hintTransform & HAL_TRANSFORM_ROT_90)
-swap(w, h);
+<p class="img-caption"><strong>Figure 1.</strong> How surfaces are rendered</p>
 
-native_window_set_buffers_dimensions(anw, w, h);
-ANativeWindow->dequeueBuffer(...);
+<p>The main components are described below:</p>
 
-// here GL driver renders content transformed by " hintTransform "
+<h3 id=image_stream_producers>Image Stream Producers</h3>
 
-int inverseTransform;
-inverseTransform = hintTransform;
-if (hintTransform & HAL_TRANSFORM_ROT_90)
-   inverseTransform ^= HAL_TRANSFORM_ROT_180;
+<p>An image stream producer can be anything that produces graphic buffers for
+consumption. Examples include OpenGL ES, Canvas 2D, and mediaserver video
+decoders.</p>
 
-native_window_set_buffers_transform(anw, inverseTransform);
+<h3 id=image_stream_consumers>Image Stream Consumers</h3>
 
-ANativeWindow->queueBuffer(...);
-</pre>
+<p>The most common consumer of image streams is SurfaceFlinger, the system
+service that consumes the currently visible surfaces and composites them onto
+the display using information provided by the Window Manager. SurfaceFlinger is
+the only service that can modify the content of the display. SurfaceFlinger
+uses OpenGL and the Hardware Composer to compose a group of surfaces.</p>
 
-<h3 id="gralloc">
-  Gralloc HAL
-</h3>
-<p>
-  The graphics memory allocator is needed to allocate memory that is requested by
-  SurfaceTextureClient in image producers. You can find a stub implementation of the HAL at
-  <code>hardware/libhardware/modules/gralloc.h</code>
-</p>
-<h4>
-  Protected buffers
-</h4>
-<p>
-  There is a gralloc usage flag <code>GRALLOC_USAGE_PROTECTED</code> that allows
-  the graphics buffer to be displayed only through a hardware protected path.
-</p>
-<h3 id="hwc">
-  Hardware Composer HAL
-</h3>
-<p>
-  The hardware composer is used by SurfaceFlinger to composite surfaces to the screen. The hardware
-  composer abstracts things like overlays and 2D blitters and helps offload some things that would
-  normally be done with OpenGL. 
-</p>
+<p>Other OpenGL ES apps can consume image streams as well, such as the camera
+app consuming a camera preview image stream. Non-GL applications can be
+consumers too, for example the ImageReader class.</p>
 
-<p>Jellybean MR1 introduces a new version of the HAL. We recommend that you start using version 1.1 of the hardware
-  composer HAL as it will provide support for the newest features (explicit synchronization, external displays, etc).
-  Keep in mind that in addition to 1.1 version, there is also a 1.0 version of the HAL that we used for internal
-  compatibility reasons and a 1.2 draft mode of the hardware composer HAL. We recommend that you implement
-  version 1.1 until 1.2 is out of draft mode.
-</p>
+<h3 id=window_manager>Window Manager</h3>
 
- <p>Because the physical display hardware behind the hardware composer
-  abstraction layer can vary from device to device, it is difficult to define recommended features, but
-  here is some guidance:</p>
+<p>The Android system service that controls a window, which is a container for
+views. A window is always backed by a surface. This service oversees
+lifecycles, input and focus events, screen orientation, transitions,
+animations, position, transforms, z-order, and many other aspects of a window.
+The Window Manager sends all of the window metadata to SurfaceFlinger so
+SurfaceFlinger can use that data to composite surfaces on the display.</p>
 
-<ul>
-  <li>The hardware composer should support at least 4 overlays (status bar, system bar, application,
-  and live wallpaper) for phones and 3 overlays for tablets (no status bar).</li>
-  <li>Layers can be bigger than the screen, so the hardware composer should be able to handle layers
-    that are larger than the display (For example, a wallpaper).</li>
-  <li>Pre-multiplied per-pixel alpha blending and per-plane alpha blending should be supported at the same time.</li>
-  <li>The hardware composer should be able to consume the same buffers that the GPU, camera, video decoder, and Skia buffers are producing,
-    so supporting some of the following properties is helpful:
-   <ul>
-     <li>RGBA packing order</li>
-     <li>YUV formats</li>
-     <li>Tiling, swizzling, and stride properties</li>
-   </ul>
-  </li>
-  <li>A hardware path for protected video playback must be present if you want to support protected content.</li>
-</ul>
-<p>
-  The general recommendation when implementing your hardware composer is to implement a no-op
-  hardware composer first. Once you have the structure done, implement a simple algorithm to
-  delegate composition to the hardware composer. For example, just delegate the first three or four
-  surfaces to the overlay hardware of the hardware composer. After that focus on common use cases,
-  such as:
-</p>
-<ul>
-  <li>Full-screen games in portrait and landscape mode
-  </li>
-  <li>Full-screen video with closed captioning and playback control
-  </li>
-  <li>The home screen (compositing the status bar, system bar, application window, and live
-  wallpapers)
-  </li>
-  <li>Protected video playback
-  </li>
-  <li>Multiple display support
-  </li>
-</ul>
-<p>
-  After implementing the common use cases, you can focus on optimizations such as intelligently
-  selecting the surfaces to send to the overlay hardware that maximizes the load taken off of the
-  GPU. Another optimization is to detect whether the screen is updating. If not, delegate composition
-  to OpenGL instead of the hardware composer to save power. When the screen updates again, contin`ue to
-  offload composition to the hardware composer.
-</p>
+<h3 id=hardware_composer>Hardware Composer</h3>
 
-<p>
-  You can find the HAL for the hardware composer in the
-  <code>hardware/libhardware/include/hardware/hwcomposer.h</code> and <code>hardware/libhardware/include/hardware/hwcomposer_defs.h</code>
-  files. A stub implementation is available in the <code>hardware/libhardware/modules/hwcomposer</code> directory.
-</p>
+<p>The hardware abstraction for the display subsystem. SurfaceFlinger can
+delegate certain composition work to the Hardware Composer to offload work from
+OpenGL and the GPU. SurfaceFlinger acts as just another OpenGL ES client. So
+when SurfaceFlinger is actively compositing one buffer or two into a third, for
+instance, it is using OpenGL ES. This makes compositing lower power than having
+the GPU conduct all computation.</p>
 
-<h4>
-  VSYNC
-</h4>
-<p>
-  VSYNC synchronizes certain events to the refresh cycle of the display. Applications always
-  start drawing on a VSYNC boundary and SurfaceFlinger always composites on a VSYNC boundary.
-  This eliminates stutters and improves visual performance of graphics.
-  The hardware composer has a function pointer</p>
+<p>The Hardware Composer HAL conducts the other half of the work. This HAL is
+the central point for all Android graphics rendering. Hardware Composer must
+support events, one of which is VSYNC. Another is hotplug for plug-and-play
+HDMI support.</p>
 
-    <pre>int (waitForVsync*) (int64_t *timestamp)</pre>
+<p>See the <a href="{@docRoot}devices/graphics.html#hardware_composer_hal">Hardware Composer
+HAL</a> section for more information.</p>
 
-  <p>that points to a function you must implement for VSYNC. This function blocks until
-    a VSYNC happens and returns the timestamp of the actual VSYNC.
-    A client can receive a VSYNC timestamps once, at specified intervals, or continously (interval of 1). 
-    You must implement VSYNC to have no more than a 1ms lag at the maximum (1/2ms or less is recommended), and
-    the timestamps returned must be extremely accurate.
-</p>
+<h3 id=gralloc>Gralloc</h3>
 
-<h4>Explicit synchronization</h4>
-<p>Explicit synchronization is required in Jellybean MR1 and later and provides a mechanism
-for Gralloc buffers to be acquired and released in a synchronized way.
-Explicit synchronization allows producers and consumers of graphics buffers to signal when
-they are done with a buffer. This allows the Android system to asynchronously queue buffers
-to be read or written with the certainty that another consumer or producer does not currently need them.</p>
-<p>
-This communication is facilitated with the use of synchronization fences, which are now required when requesting
-a buffer for consuming or producing. The
- synchronization framework consists of three main parts:</p>
-<ul>
-  <li><code>sync_timeline</code>: a monotonically increasing timeline that should be implemented
-    for each driver instance. This basically is a counter of jobs submitted to the kernel for a particular piece of hardware.</li>
-    <li><code>sync_pt</code>: a single value or point on a <code>sync_timeline</code>. A point
-      has three states: active, signaled, and error. Points start in the active state and transition
-      to the signaled or error states. For instance, when a buffer is no longer needed by an image
-      consumer, this <code>sync_point</code> is signaled so that image producers
-      know that it is okay to write into the buffer again.</li>
-    <li><code>sync_fence</code>: a collection of <code>sync_pt</code>s that often have different
-      <code>sync_timeline</code> parents (such as for the display controller and GPU). This allows
-      multiple consumers or producers to signal that
-      they are using a buffer and to allow this information to be communicated with one function parameter.
-      Fences are backed by a file descriptor and can be passed from kernel-space to user-space.
-      For instance, a fence can contain two <code>sync_point</code>s that signify when two separate
-      image consumers are done reading a buffer. When the fence is signaled,
-      the image producers now know that both consumers are done consuming.</li>
-    </ul>
+<p>The graphics memory allocator is needed to allocate memory that is requested
+by image producers. See the <a
+href="{@docRoot}devices/graphics.html#gralloc">Gralloc HAL</a> section for more
+information.</p>
 
-<p>To implement explicit synchronization, you need to do provide the following:
+<h2 id=data_flow>Data flow</h2>
 
-<ul>
-  <li>A kernel-space driver that implements a synchronization timeline for a particular piece of hardware. Drivers that
-    need to be fence-aware are generally anything that accesses or communicates with the hardware composer.
-    See the <code>system/core/include/sync/sync.h</code> file for more implementation details. The
-    <code>system/core/libsync</code> directory includes a library to communicate with the kernel-space </li>
-  <li>A hardware composer HAL module (version 1.1 or later) that supports the new synchronization functionality. You will need to provide
-  the appropriate synchronization fences as parameters to the <code>set()</code> and <code>prepare()</code> functions in the HAL. As a last resort,
-you can pass in -1 for the file descriptor parameters if you cannot support explicit synchronization for some reason. This
-is not recommended, however.</li>
-  <li>Two GL specific extensions related to fences, <code>EGL_ANDROID_native_fence_sync</code> and <code>EGL_ANDROID_wait_sync</code>,
-    along with incorporating fence support into your graphics drivers.</ul>
+<p>See the following diagram for a depiction of the Android graphics
+pipeline:</p>
 
+<img src="graphics/images/graphics_pipeline.png" alt="graphics data flow">
 
+<p class="img-caption"><strong>Figure 2.</strong> How graphic data flow through
+Android</p>
 
+<p>The objects on the left are renderers producing graphics buffers, such as
+the home screen, status bar, and system UI. SurfaceFlinger is the compositor
+and Hardware Composer is the composer.</p>
+
+<h3 id=bufferqueue>BufferQueue</h3>
+
+<p>BufferQueues provide the glue between the Android graphics components. These
+are a pair of queues that mediate the constant cycle of buffers from the
+producer to the consumer. Once the producers hand off their buffers,
+SurfaceFlinger is responsible for compositing everything onto the display.</p>
+
+<p>See the following diagram for the BufferQueue communication process.</p>
+
+<img src="graphics/images/bufferqueue.png" alt="BufferQueue communication process">
+
+<p class="img-caption"><strong>Figure 3.</strong> BufferQueue communication
+process</p>
+
+<p>BufferQueue contains the logic that ties image stream producers and image
+stream consumers together. Some examples of image producers are the camera
+previews produced by the camera HAL or OpenGL ES games. Some examples of image
+consumers are SurfaceFlinger or another app that displays an OpenGL ES stream,
+such as the camera app displaying the camera viewfinder.</p>
+
+<p>BufferQueue is a data structure that combines a buffer pool with a queue and
+uses Binder IPC to pass buffers between processes. The producer interface, or
+what you pass to somebody who wants to generate graphic buffers, is
+IGraphicBufferProducer (part of <a
+href="http://developer.android.com/reference/android/graphics/SurfaceTexture.html">SurfaceTexture</a>).
+BufferQueue is often used to render to a Surface and consume with a GL
+Consumer, among other tasks.
+
+BufferQueue can operate in three different modes:</p>
+
+<p><em>Synchronous-like mode</em> - BufferQueue by default operates in a
+synchronous-like mode, in which every buffer that comes in from the producer
+goes out at the consumer. No buffer is ever discarded in this mode. And if the
+producer is too fast and creates buffers faster than they are being drained, it
+will block and wait for free buffers.</p>
+
+<p><em>Non-blocking mode</em> - BufferQueue can also operate in a non-blocking
+mode where it generates an error rather than waiting for a buffer in those
+cases. No buffer is ever discarded in this mode either. This is useful for
+avoiding potential deadlocks in application software that may not understand
+the complex dependencies of the graphics framework.</p>
+
+<p><em>Discard mode</em> - Finally, BufferQueue may be configured to discard
+old buffers rather than generate errors or wait. For instance, if conducting GL
+rendering to a texture view and drawing as quickly as possible, buffers must be
+dropped.</p>
+
+<p>To conduct most of this work, SurfaceFlinger acts as just another OpenGL ES
+client. So when SurfaceFlinger is actively compositing one buffer or two into a
+third, for instance, it is using OpenGL ES.</p>
+
+<p>The Hardware Composer HAL conducts the other half of the work. This HAL acts
+as the central point for all Android graphics rendering.</p>
+
+<h3 id=synchronization_framework>Synchronization framework</h3>
+
+<p>Since Android graphics offer no explicit parallelism, vendors have long
+implemented their own implicit synchronization within their own drivers. This
+is no longer required with the Android graphics synchronization framework. See
+the <a href="#explicit_synchronization">Explicit synchronization</a> section
+for implementation instructions.</p>
+
+<p>The synchronization framework explicitly describes dependencies between
+different asynchronous operations in the system. The framework provides a
+simple API that lets components signal when buffers are released. It also
+allows synchronization primitives to be passed between drivers from the kernel
+to userspace and between userspace processes themselves.</p>
+
+<p>For example, an application may queue up work to be carried out in the GPU.
+The GPU then starts drawing that image. Although the image hasn’t been drawn
+into memory yet, the buffer pointer can still be passed to the window
+compositor along with a fence that indicates when the GPU work will be
+finished. The window compositor may then start processing ahead of time and
+hand off the work to the display controller. In this manner, the CPU work can
+be done ahead of time. Once the GPU finishes, the display controller can
+immediately display the image.</p>
+
+<p>The synchronization framework also allows implementers to leverage
+synchronization resources in their own hardware components. Finally, the
+framework provides visibility into the graphics pipeline to aid in
+debugging.</p>
diff --git a/src/devices/graphics/architecture.jd b/src/devices/graphics/architecture.jd
index 6842dd7..75623cc 100644
--- a/src/devices/graphics/architecture.jd
+++ b/src/devices/graphics/architecture.jd
@@ -1,4 +1,4 @@
-page.title=Architecture
+page.title=Graphics architecture
 @jd:body
 
 <!--
diff --git a/src/devices/graphics/images/bufferqueue.png b/src/devices/graphics/images/bufferqueue.png
new file mode 100644
index 0000000..1951f46
--- /dev/null
+++ b/src/devices/graphics/images/bufferqueue.png
Binary files differ
diff --git a/src/devices/graphics/images/dispsync.png b/src/devices/graphics/images/dispsync.png
new file mode 100644
index 0000000..d97765c
--- /dev/null
+++ b/src/devices/graphics/images/dispsync.png
Binary files differ
diff --git a/src/devices/graphics/images/graphics_pipeline.png b/src/devices/graphics/images/graphics_pipeline.png
new file mode 100644
index 0000000..983a517
--- /dev/null
+++ b/src/devices/graphics/images/graphics_pipeline.png
Binary files differ
diff --git a/src/devices/graphics/images/graphics_surface.png b/src/devices/graphics/images/graphics_surface.png
new file mode 100644
index 0000000..6cd86ef
--- /dev/null
+++ b/src/devices/graphics/images/graphics_surface.png
Binary files differ
diff --git a/src/devices/graphics/implement.jd b/src/devices/graphics/implement.jd
new file mode 100644
index 0000000..59aca16
--- /dev/null
+++ b/src/devices/graphics/implement.jd
@@ -0,0 +1,605 @@
+page.title=Implementing graphics
+@jd:body
+
+<!--
+    Copyright 2014 The Android Open Source Project
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+
+<div id="qv-wrapper">
+  <div id="qv">
+    <h2>In this document</h2>
+    <ol id="auto-toc">
+    </ol>
+  </div>
+</div>
+
+
+<p>Follow the instructions here to implement the Android graphics HAL.</p>
+
+<h2 id=requirements>Requirements</h2>
+
+<p>The following list and sections describe what you need to provide to support
+graphics in your product:</p>
+
+<ul> <li> OpenGL ES 1.x Driver <li> OpenGL ES 2.0 Driver <li> OpenGL ES 3.0
+Driver (optional) <li> EGL Driver <li> Gralloc HAL implementation <li> Hardware
+Composer HAL implementation <li> Framebuffer HAL implementation </ul>
+
+<h2 id=implementation>Implementation</h2>
+
+<h3 id=opengl_and_egl_drivers>OpenGL and EGL drivers</h3>
+
+<p>You must provide drivers for OpenGL ES 1.x, OpenGL ES 2.0, and EGL. Here are
+some key considerations:</p>
+
+<ul> <li> The GL driver needs to be robust and conformant to OpenGL ES
+standards.  <li> Do not limit the number of GL contexts. Because Android allows
+apps in the background and tries to keep GL contexts alive, you should not
+limit the number of contexts in your driver.  <li> It is not uncommon to have
+20-30 active GL contexts at once, so you should also be careful with the amount
+of memory allocated for each context.  <li> Support the YV12 image format and
+any other YUV image formats that come from other components in the system such
+as media codecs or the camera.  <li> Support the mandatory extensions:
+<code>GL_OES_texture_external</code>,
+<code>EGL_ANDROID_image_native_buffer</code>, and
+<code>EGL_ANDROID_recordable</code>. The
+<code>EGL_ANDROID_framebuffer_target</code> extension is required for Hardware
+Composer 1.1 and higher, as well.  <li> We highly recommend also supporting
+<code>EGL_ANDROID_blob_cache</code>, <code>EGL_KHR_fence_sync</code>,
+<code>EGL_KHR_wait_sync</code>, and <code>EGL_ANDROID_native_fence_sync</code>.
+</ul>
+
+<p>Note the OpenGL API exposed to app developers is different from the OpenGL
+interface that you are implementing. Apps do not have access to the GL driver
+layer and must go through the interface provided by the APIs.</p>
+
+<h3 id=pre-rotation>Pre-rotation</h3>
+
+<p>Many hardware overlays do not support rotation, and even if they do it costs
+processing power. So the solution is to pre-transform the buffer before it
+reaches SurfaceFlinger. A query hint in <code>ANativeWindow</code> was added
+(<code>NATIVE_WINDOW_TRANSFORM_HINT</code>) that represents the most likely
+transform to be applied to the buffer by SurfaceFlinger. Your GL driver can use
+this hint to pre-transform the buffer before it reaches SurfaceFlinger so when
+the buffer arrives, it is correctly transformed.</p>
+
+<p>For example, you may receive a hint to rotate 90 degrees. You must generate
+a matrix and apply it to the buffer to prevent it from running off the end of
+the page. To save power, this should be done in pre-rotation. See the
+<code>ANativeWindow</code> interface defined in
+<code>system/core/include/system/window.h</code> for more details.</p>
+
+<h3 id=gralloc_hal>Gralloc HAL</h3>
+
+<p>The graphics memory allocator is needed to allocate memory that is requested
+by image producers. You can find the interface definition of the HAL at:
+<code>hardware/libhardware/modules/gralloc.h</code></p>
+
+<h3 id=protected_buffers>Protected buffers</h3>
+
+<p>The gralloc usage flag <code>GRALLOC_USAGE_PROTECTED</code> allows the
+graphics buffer to be displayed only through a hardware-protected path. These
+overlay planes are the only way to display DRM content. DRM-protected buffers
+cannot be accessed by SurfaceFlinger or the OpenGL ES driver.</p>
+
+<p>DRM-protected video can be presented only on an overlay plane. Video players
+that support protected content must be implemented with SurfaceView. Software
+running on unprotected hardware cannot read or write the buffer.
+Hardware-protected paths must appear on the Hardware Composer overlay. For
+instance, protected videos will disappear from the display if Hardware Composer
+switches to OpenGL ES composition.</p>
+
+<p>See the <a href="{@docRoot}devices/drm.html">DRM</a> page for a description
+of protected content.</p>
+
+<h3 id=hardware_composer_hal>Hardware Composer HAL</h3>
+
+<p>The Hardware Composer HAL is used by SurfaceFlinger to composite surfaces to
+the screen. The Hardware Composer abstracts objects like overlays and 2D
+blitters and helps offload some work that would normally be done with
+OpenGL.</p>
+
+<p>We recommend you start using version 1.3 of the Hardware Composer HAL as it
+will provide support for the newest features (explicit synchronization,
+external displays, and more). Because the physical display hardware behind the
+Hardware Composer abstraction layer can vary from device to device, it is
+difficult to define recommended features. But here is some guidance:</p>
+
+<ul> <li> The Hardware Composer should support at least four overlays (status
+bar, system bar, application, and wallpaper/background).  <li> Layers can be
+bigger than the screen, so the Hardware Composer should be able to handle
+layers that are larger than the display (for example, a wallpaper).  <li>
+Pre-multiplied per-pixel alpha blending and per-plane alpha blending should be
+supported at the same time.  <li> The Hardware Composer should be able to
+consume the same buffers that the GPU, camera, video decoder, and Skia buffers
+are producing, so supporting some of the following properties is helpful: <ul>
+<li> RGBA packing order <li> YUV formats <li> Tiling, swizzling, and stride
+properties </ul> <li> A hardware path for protected video playback must be
+present if you want to support protected content.  </ul>
+
+<p>The general recommendation when implementing your Hardware Composer is to
+implement a non-operational Hardware Composer first. Once you have the
+structure done, implement a simple algorithm to delegate composition to the
+Hardware Composer. For example, just delegate the first three or four surfaces
+to the overlay hardware of the Hardware Composer.</p>
+
+<p>Focus on optimization, such as intelligently selecting the surfaces to send
+to the overlay hardware that maximizes the load taken off of the GPU. Another
+optimization is to detect whether the screen is updating. If not, delegate
+composition to OpenGL instead of the Hardware Composer to save power. When the
+screen updates again, continue to offload composition to the Hardware
+Composer.</p>
+
+<p>Devices must report the display mode (or resolution). Android uses the first
+mode reported by the device. To support televisions, have the TV device report
+the mode selected for it by the manufacturer to Hardware Composer. See
+hwcomposer.h for more details.</p>
+
+<p>Prepare for common use cases, such as:</p>
+
+<ul> <li> Full-screen games in portrait and landscape mode <li> Full-screen
+video with closed captioning and playback control <li> The home screen
+(compositing the status bar, system bar, application window, and live
+wallpapers) <li> Protected video playback <li> Multiple display support </ul>
+
+<p>These use cases should address regular, predictable uses rather than edge
+cases that are rarely encountered. Otherwise, any optimization will have little
+benefit. Implementations must balance two competing goals: animation smoothness
+and interaction latency.</p>
+
+<p>Further, to make best use of Android graphics, you must develop a robust
+clocking strategy. Performance matters little if clocks have been turned down
+to make every operation slow. You need a clocking strategy that puts the clocks
+at high speed when needed, such as to make animations seamless, and then slows
+the clocks whenever the increased speed is no longer needed.</p>
+
+<p>Use the <code>adb shell dumpsys SurfaceFlinger</code> command to see
+precisely what SurfaceFlinger is doing. See the <a
+href="{@docRoot}devices/graphics/architecture.html#hwcomposer">Hardware
+Composer</a> section of the Architecture page for example output and a
+description of relevant fields.</p>
+
+<p>You can find the HAL for the Hardware Composer and additional documentation
+in: <code>hardware/libhardware/include/hardware/hwcomposer.h
+hardware/libhardware/include/hardware/hwcomposer_defs.h</code></p>
+
+<p>A stub implementation is available in the
+<code>hardware/libhardware/modules/hwcomposer</code> directory.</p>
+
+<h3 id=vsync>VSYNC</h3>
+
+<p>VSYNC synchronizes certain events to the refresh cycle of the display.
+Applications always start drawing on a VSYNC boundary, and SurfaceFlinger
+always composites on a VSYNC boundary. This eliminates stutters and improves
+visual performance of graphics. The Hardware Composer has a function
+pointer:</p>
+
+<pre class=prettyprint> int (waitForVsync*) (int64_t *timestamp) </pre>
+
+
+<p>This points to a function you must implement for VSYNC. This function blocks
+until a VSYNC occurs and returns the timestamp of the actual VSYNC. A message
+must be sent every time VSYNC occurs. A client can receive a VSYNC timestamp
+once, at specified intervals, or continuously (interval of 1). You must
+implement VSYNC to have no more than a 1ms lag at the maximum (0.5ms or less is
+recommended), and the timestamps returned must be extremely accurate.</p>
+
+<h4 id=explicit_synchronization>Explicit synchronization</h4>
+
+<p>Explicit synchronization is required and provides a mechanism for Gralloc
+buffers to be acquired and released in a synchronized way. Explicit
+synchronization allows producers and consumers of graphics buffers to signal
+when they are done with a buffer. This allows the Android system to
+asynchronously queue buffers to be read or written with the certainty that
+another consumer or producer does not currently need them. See the <a
+href="#synchronization_framework">Synchronization framework</a> section for an overview of
+this mechanism.</p>
+
+<p>The benefits of explicit synchronization include less behavior variation
+between devices, better debugging support, and improved testing metrics. For
+instance, the sync framework output readily identifies problem areas and root
+causes. And centralized SurfaceFlinger presentation timestamps show when events
+occur in the normal flow of the system.</p>
+
+<p>This communication is facilitated by the use of synchronization fences,
+which are now required when requesting a buffer for consuming or producing. The
+synchronization framework consists of three main building blocks:
+sync_timeline, sync_pt, and sync_fence.</p>
+
+<h5 id=sync_timeline>sync_timeline</h5>
+
+<p>A sync_timeline is a monotonically increasing timeline that should be
+implemented for each driver instance, such as a GL context, display controller,
+or 2D blitter. This is essentially a counter of jobs submitted to the kernel
+for a particular piece of hardware. It provides guarantees about the order of
+operations and allows hardware-specific implementations.</p>
+
+<p>Please note, the sync_timeline is offered as a CPU-only reference
+implementation called sw_sync (which stands for software sync). If possible,
+use sw_sync instead of a sync_timeline to save resources and avoid complexity.
+If you’re not employing a hardware resource, sw_sync should be sufficient.</p>
+
+<p>If you must implement a sync_timeline, use the sw_sync driver as a starting
+point. Follow these guidelines:</p>
+
+<ul> <li> Provide useful names for all drivers, timelines, and fences. This
+simplifies debugging.  <li> Implement timeline_value str and pt_value_str
+operators in your timelines as they make debugging output much more readable.
+<li> If you want your userspace libraries (such as the GL library) to have
+access to the private data of your timelines, implement the fill driver_data
+operator. This lets you get information about the immutable sync_fence and
+sync_pts so you might build command lines based upon them.  </ul>
+
+<p>When implementing a sync_timeline, <strong>don’t</strong>:</p>
+
+<ul> <li> Base it on any real view of time, such as when a wall clock or other
+piece of work might finish. It is better to create an abstract timeline that
+you can control.  <li> Allow userspace to explicitly create or signal a fence.
+This can result in one piece of the user pipeline creating a denial-of-service
+attack that halts all functionality. This is because the userspace cannot make
+promises on behalf of the kernel.  <li> Access sync_timeline, sync_pt, or
+sync_fence elements explicitly, as the API should provide all required
+functions.  </ul>
+
+<h5 id=sync_pt>sync_pt</h5>
+
+<p>A sync_pt is a single value or point on a sync_timeline. A point has three
+states: active, signaled, and error. Points start in the active state and
+transition to the signaled or error states. For instance, when a buffer is no
+longer needed by an image consumer, this sync_point is signaled so that image
+producers know it is okay to write into the buffer again.</p>
+
+<h5 id=sync_fence>sync_fence</h5>
+
+<p>A sync_fence is a collection of sync_pts that often have different
+sync_timeline parents (such as for the display controller and GPU). These are
+the main primitives over which drivers and userspace communicate their
+dependencies. A fence is a promise from the kernel that it gives upon accepting
+work that has been queued and assures completion in a finite amount of
+time.</p>
+
+<p>This allows multiple consumers or producers to signal they are using a
+buffer and to allow this information to be communicated with one function
+parameter. Fences are backed by a file descriptor and can be passed from
+kernel-space to user-space. For instance, a fence can contain two sync_points
+that signify when two separate image consumers are done reading a buffer. When
+the fence is signaled, the image producers know both consumers are done
+consuming.
+
+Fences, like sync_pts, start active and then change state based upon the state
+of their points. If all sync_pts become signaled, the sync_fence becomes
+signaled. If one sync_pt falls into an error state, the entire sync_fence has
+an error state.
+
+Membership in the sync_fence is immutable once the fence is created. And since
+a sync_pt can be in only one fence, it is included as a copy. Even if two
+points have the same value, there will be two copies of the sync_pt in the
+fence.
+
+To get more than one point in a fence, a merge operation is conducted. In the
+merge, the points from two distinct fences are added to a third fence. If one
+of those points was signaled in the originating fence, and the other was not,
+the third fence will also not be in a signaled state.</p>
+
+<p>To implement explicit synchronization, you need to provide the
+following:</p>
+
+<ul> <li> A kernel-space driver that implements a synchronization timeline for
+a particular piece of hardware. Drivers that need to be fence-aware are
+generally anything that accesses or communicates with the Hardware Composer.
+Here are the key files (found in the android-3.4 kernel branch): <ul> <li> Core
+implementation: <ul> <li> <code>kernel/common/include/linux/sync.h</code> <li>
+<code>kernel/common/drivers/base/sync.c</code> </ul> <li> sw_sync: <ul> <li>
+<code>kernel/common/include/linux/sw_sync.h</code> <li>
+<code>kernel/common/drivers/base/sw_sync.c</code> </ul> <li> Documentation:
+<li> <code>kernel/common//Documentation/sync.txt</code> Finally, the
+<code>platform/system/core/libsync</code> directory includes a library to
+communicate with the kernel-space.  </ul> <li> A Hardware Composer HAL module
+(version 1.3 or later) that supports the new synchronization functionality. You
+will need to provide the appropriate synchronization fences as parameters to
+the set() and prepare() functions in the HAL.  <li> Two GL-specific extensions
+related to fences, <code>EGL_ANDROID_native_fence_sync</code> and
+<code>EGL_ANDROID_wait_sync</code>, along with incorporating fence support into
+your graphics drivers.  </ul>
+
+<p>For example, to use the API supporting the synchronization function, you
+might develop a display driver that has a display buffer function. Before the
+synchronization framework existed, this function would receive dma-bufs, put
+those buffers on the display, and block while the buffer is visible, like
+so:</p>
+
+<pre class=prettyprint>
+/*
+ * assumes buf is ready to be displayed.  returns when buffer is no longer on
+ * screen.
+ */
+void display_buffer(struct dma_buf *buf); </pre>
+
+
+<p>With the synchronization framework, the API call is slightly more complex.
+While putting a buffer on display, you associate it with a fence that says when
+the buffer will be ready. So you queue up the work, which you will initiate
+once the fence clears.</p>
+
+<p>In this manner, you are not blocking anything. You immediately return your
+own fence, which is a guarantee of when the buffer will be off of the display.
+As you queue up buffers, the kernel will list dependencies. With the
+synchronization framework:</p>
+
+<pre class=prettyprint>
+/*
+ * will display buf when fence is signaled.  returns immediately with a fence
+ * that will signal when buf is no longer displayed.
+ */
+struct sync_fence* display_buffer(struct dma_buf *buf, struct sync_fence
+*fence); </pre>
+
+
+<h4 id=sync_integration>Sync integration</h4>
+
+<h5 id=integration_conventions>Integration conventions</h5>
+
+<p>This section explains how to integrate the low-level sync framework with
+different parts of the Android framework and the drivers that need to
+communicate with one another.</p>
+
+<p>The Android HAL interfaces for graphics follow consistent conventions so
+when file descriptors are passed across a HAL interface, ownership of the file
+descriptor is always transferred. This means:</p>
+
+<ul> <li> if you receive a fence file descriptor from the sync framework, you
+must close it.  <li> if you return a fence file descriptor to the sync
+framework, the framework will close it.  <li> if you want to continue using the
+fence file descriptor, you must duplicate the descriptor.  </ul>
+
+<p>Every time a fence is passed through BufferQueue - such as for a window that
+passes a fence to BufferQueue saying when its new contents will be ready - the
+fence object is renamed. Since kernel fence support allows fences to have
+strings for names, the sync framework uses the window name and buffer index
+that is being queued to name the fence, for example:
+<code>SurfaceView:0</code></p>
+
+<p>This is helpful in debugging to identify the source of a deadlock. Those
+names appear in the output of <code>/d/sync</code> and bug reports when
+taken.</p>
+
+<h5 id=anativewindow_integration>ANativeWindow integration</h5>
+
+<p>ANativeWindow is fence aware. <code>dequeueBuffer</code>,
+<code>queueBuffer</code>, and <code>cancelBuffer</code> have fence
+parameters.</p>
+
+<h5 id=opengl_es_integration>OpenGL ES integration</h5>
+
+<p>OpenGL ES sync integration relies upon these two EGL extensions:</p>
+
+<ul> <li> <code>EGL_ANDROID_native_fence_sync</code> - provides a way to either
+wrap or create native Android fence file descriptors in EGLSyncKHR objects.
+<li> <code>EGL_ANDROID_wait_sync</code> - allows GPU-side stalls rather than in
+CPU, making the GPU wait for an EGLSyncKHR. This is essentially the same as the
+<code>EGL_KHR_wait_sync</code> extension. See the
+<code>EGL_KHR_wait_sync</code> specification for details.  </ul>
+
+<p>These extensions can be used independently and are controlled by a compile
+flag in libgui. To use them, first implement the
+<code>EGL_ANDROID_native_fence_sync</code> extension along with the associated
+kernel support. Next add a ANativeWindow support for fences to your driver and
+then turn on support in libgui to make use of the
+<code>EGL_ANDROID_native_fence_sync</code> extension.</p>
+
+<p>Then, as a second pass, enable the <code>EGL_ANDROID_wait_sync</code>
+extension in your driver and turn it on separately. The
+<code>EGL_ANDROID_native_fence_sync</code> extension consists of a distinct
+native fence EGLSync object type so extensions that apply to existing EGLSync
+object types don’t necessarily apply to <code>EGL_ANDROID_native_fence</code>
+objects to avoid unwanted interactions.</p>
+
+<p>The EGL_ANDROID_native_fence_sync extension employs a corresponding native
+fence file descriptor attribute that can be set only at creation time and
+cannot be directly queried onward from an existing sync object. This attribute
+can be set to one of two modes:</p>
+
+<ul> <li> A valid fence file descriptor - wraps an existing native Android
+fence file descriptor in an EGLSyncKHR object.  <li> -1 - creates a native
+Android fence file descriptor from an EGLSyncKHR object.  </ul>
+
+<p>The DupNativeFenceFD function call is used to extract the EGLSyncKHR object
+from the native Android fence file descriptor. This has the same result as
+querying the attribute that was set but adheres to the convention that the
+recipient closes the fence (hence the duplicate operation). Finally, destroying
+the EGLSync object should close the internal fence attribute.</p>
+
+<h5 id=hardware_composer_integration>Hardware Composer integration</h5>
+
+<p>Hardware Composer handles three types of sync fences:</p>
+
+<ul> <li> <em>Acquire fence</em> - one per layer, this is set before calling
+HWC::set. It signals when Hardware Composer may read the buffer.  <li>
+<em>Release fence</em> - one per layer, this is filled in by the driver in
+HWC::set. It signals when Hardware Composer is done reading the buffer so the
+framework can start using that buffer again for that particular layer.  <li>
+<em>Retire fence</em> - one per the entire frame, this is filled in by the
+driver each time HWC::set is called. This covers all of the layers for the set
+operation. It signals to the framework when all of the effects of this set
+operation has completed. The retire fence signals when the next set operation
+takes place on the screen.  </ul>
+
+<p>The retire fence can be used to determine how long each frame appears on the
+screen. This is useful in identifying the location and source of delays, such
+as a stuttering animation. </p>
+
+<h4 id=vsync_offset>VSYNC Offset</h4>
+
+<p>Application and SurfaceFlinger render loops should be synchronized to the
+hardware VSYNC. On a VSYNC event, the display begins showing frame N while
+SurfaceFlinger begins compositing windows for frame N+1. The app handles
+pending input and generates frame N+2.</p>
+
+<p>Synchronizing with VSYNC delivers consistent latency. It reduces errors in
+apps and SurfaceFlinger and the drifting of displays in and out of phase with
+each other. This, however, does assume application and SurfaceFlinger per-frame
+times don’t vary widely. Nevertheless, the latency is at least two frames.</p>
+
+<p>To remedy this, you may employ VSYNC offsets to reduce the input-to-display
+latency by making application and composition signal relative to hardware
+VSYNC. This is possible because application plus composition usually takes less
+than 33 ms.</p>
+
+<p>The result of VSYNC offset is three signals with same period, offset
+phase:</p>
+
+<ul> <li> <em>HW_VSYNC_0</em> - Display begins showing next frame <li>
+<em>VSYNC</em> - App reads input and generates next frame <li> <em>SF
+VSYNC</em> - SurfaceFlinger begins compositing for next frame </ul>
+
+<p>With VSYNC offset, SurfaceFlinger receives the buffer and composites the
+frame, while the application processes the input and renders the frame, all
+within a single frame of time.</p>
+
+<p>Please note, VSYNC offsets reduce the time available for app and composition
+and therefore provide a greater chance for error.</p>
+
+<h5 id=dispsync>DispSync</h5>
+
+<p>DispSync maintains a model of the periodic hardware-based VSYNC events of a
+display and uses that model to execute periodic callbacks at specific phase
+offsets from the hardware VSYNC events.</p>
+
+<p>DispSync is essentially a software phase lock loop (PLL) that generates the
+VSYNC and SF VSYNC signals used by Choreographer and SurfaceFlinger, even if
+not offset from hardware VSYNC.</p>
+
+<img src="images/dispsync.png" alt="DispSync flow">
+
+<p class="img-caption"><strong>Figure 4.</strong> DispSync flow</p>
+
+<p>DispSync has these qualities:</p>
+
+<ul> <li> <em>Reference</em> - HW_VSYNC_0 <li> <em>Output</em> - VSYNC and SF
+VSYNC <li> <em>Feedback</em> - Retire fence signal timestamps from Hardware
+Composer </ul>
+
+<h5 id=vsync_retire_offset>VSYNC/Retire Offset</h5>
+
+<p>The signal timestamp of retire fences must match HW VSYNC even on devices
+that don’t use the offset phase. Otherwise, errors appear to have greater
+severity than reality.</p>
+
+<p>“Smart” panels often have a delta. Retire fence is the end of direct memory
+access (DMA) to display memory. The actual display switch and HW VSYNC is some
+time later.</p>
+
+<p><code>PRESENT_TIME_OFFSET_FROM_VSYNC_NS</code> is set in the device’s
+BoardConfig.mk make file. It is based upon the display controller and panel
+characteristics. Time from retire fence timestamp to HW Vsync signal is
+measured in nanoseconds.</p>
+
+<h5 id=vsync_and_sf_vsync_offsets>VSYNC and SF_VSYNC Offsets</h5>
+
+<p>The <code>VSYNC_EVENT_PHASE_OFFSET_NS</code> and
+<code>SF_VSYNC_EVENT_PHASE_OFFSET_NS</code> are set conservatively based on
+high-load use cases, such as partial GPU composition during window transition
+or Chrome scrolling through a webpage containing animations. These offsets
+allow for long application render time and long GPU composition time.</p>
+
+<p>More than a millisecond or two of latency is noticeable. We recommend
+integrating thorough automated error testing to minimize latency without
+significantly increasing error counts.</p>
+
+<p>Note these offsets are also set in the device’s BoardConfig.mk make file.
+The default if not set is zero offset. Both settings are offset in nanoseconds
+after HW_VSYNC_0. Either can be negative.</p>
+
+<h3 id=virtual_displays>Virtual displays</h3>
+
+<p>Android added support for virtual displays to Hardware Composer in version
+1.3. This support was implemented in the Android platform and can be used by
+Miracast.</p>
+
+<p>The virtual display composition is similar to the physical display: Input
+layers are described in prepare(), SurfaceFlinger conducts GPU composition, and
+layers and GPU framebuffer are  provided to Hardware Composer in set().</p>
+
+<p>Instead of the output going to the screen, it is sent to a gralloc buffer.
+Hardware Composer writes output to a buffer and provides the completion fence.
+The buffer is sent to an arbitrary consumer: video encoder, GPU, CPU, etc.
+Virtual displays can use 2D/blitter or overlays if the display pipeline can
+write to memory.</p>
+
+<h4 id=modes>Modes</h4>
+
+<p>Each frame is in one of three modes after prepare():</p>
+
+<ul> <li> <em>GLES</em> - All layers composited by GPU. GPU writes directly to
+the output buffer while Hardware Composer does nothing. This is equivalent to
+virtual display composition with Hardware Composer <1.3.  <li> <em>MIXED</em> -
+GPU composites some layers to framebuffer, and Hardware Composer composites
+framebuffer and remaining layers. GPU writes to scratch buffer (framebuffer).
+Hardware Composer reads scratch buffer and writes to the output buffer. Buffers
+may have different formats, e.g. RGBA and YCbCr.  <li> <em>HWC</em> - All
+layers composited by Hardware Composer. Hardware Composer writes directly to
+the output buffer.  </ul>
+
+<h4 id=output_format>Output format</h4>
+
+<p><em>MIXED and HWC modes</em>: If the consumer needs CPU access, the consumer
+chooses the format. Otherwise, the format is IMPLEMENTATION_DEFINED. Gralloc
+can choose best format based on usage flags. For example, choose a YCbCr format
+if the consumer is video encoder, and Hardware Composer can write the format
+efficiently.</p>
+
+<p><em>GLES mode</em>: EGL driver chooses output buffer format in
+dequeueBuffer(), typically RGBA8888. The consumer must be able to accept this
+format.</p>
+
+<h4 id=egl_requirement>EGL requirement</h4>
+
+<p>Hardware Composer 1.3 virtual displays require that eglSwapBuffers() does
+not dequeue the next buffer immediately. Instead, it should defer dequeueing
+the buffer until rendering begins. Otherwise, EGL always owns the “next” output
+buffer. SurfaceFlinger can’t get the output buffer for Hardware Composer in
+MIXED/HWC mode. </p>
+
+<p>If Hardware Composer always sends all virtual display layers to GPU, all
+frames will be in GLES mode. Although it is not recommended, you may use this
+method if you need to support Hardware Composer 1.3 for some other reason but
+can’t conduct virtual display composition.</p>
+
+<h2 id=testing>Testing</h2>
+
+<p>For benchmarking, we suggest following this flow by phase:</p>
+
+<ul> <li> <em>Specification</em> - When initially specifying the device, such
+as when using immature drivers, you should use predefined (fixed) clocks and
+workloads to measure the frames per second rendered. This gives a clear view of
+what the hardware is capable of doing.  <li> <em>Development</em> - In the
+development phase as drivers mature, you should use a fixed set of user actions
+to measure the number of visible stutters (janks) in animations.  <li>
+<em>Production</em> - Once the device is ready for production and you want to
+compare against competitors, you should increase the workload until stutters
+increase. Determine if the current clock settings can keep up with the load.
+This can help you identify where you might be able to slow the clocks and
+reduce power use.  </ul>
+
+<p>For the specification phase, Android offers the Flatland tool to help derive
+device capabilities. It can be found at:
+<code>platform/frameworks/native/cmds/flatland/</code></p>
+
+<p>Flatland relies upon fixed clocks and shows the throughput that can be
+achieved with composition-based workloads. It uses gralloc buffers to simulate
+multiple window scenarios, filling in the window with GL and then measuring the
+compositing. Please note, Flatland uses the synchronization framework to
+measure time. So you must support the synchronization framework to readily use
+Flatland.</p>
diff --git a/src/devices/images/drm_framework.png b/src/devices/images/drm_framework.png
new file mode 100644
index 0000000..06afe05
--- /dev/null
+++ b/src/devices/images/drm_framework.png
Binary files differ
diff --git a/src/devices/images/drm_hal.png b/src/devices/images/drm_hal.png
index ef6379b..6c43422 100644
--- a/src/devices/images/drm_hal.png
+++ b/src/devices/images/drm_hal.png
Binary files differ
diff --git a/src/devices/images/drm_license_metadata.png b/src/devices/images/drm_license_metadata.png
new file mode 100644
index 0000000..2076866
--- /dev/null
+++ b/src/devices/images/drm_license_metadata.png
Binary files differ
diff --git a/src/devices/images/drm_plugin.png b/src/devices/images/drm_plugin.png
new file mode 100644
index 0000000..d332ce6
--- /dev/null
+++ b/src/devices/images/drm_plugin.png
Binary files differ
diff --git a/src/devices/images/drm_plugin_lifecycle.png b/src/devices/images/drm_plugin_lifecycle.png
new file mode 100644
index 0000000..b04acb5
--- /dev/null
+++ b/src/devices/images/drm_plugin_lifecycle.png
Binary files differ
diff --git a/src/devices/images/graphics_surface.png b/src/devices/images/graphics_surface.png
deleted file mode 100644
index e32792d..0000000
--- a/src/devices/images/graphics_surface.png
+++ /dev/null
Binary files differ
diff --git a/src/devices/index.jd b/src/devices/index.jd
index f0b4e42..483c3b0 100644
--- a/src/devices/index.jd
+++ b/src/devices/index.jd
@@ -1,8 +1,8 @@
-page.title=Porting Android to Devices
+page.title=Android Interfaces
 @jd:body
 
 <!--
-    Copyright 2013 The Android Open Source Project
+    Copyright 2014 The Android Open Source Project
 
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
@@ -33,7 +33,7 @@
   <p>To ensure that your devices maintain a high level of quality and offers a consistent
   experience for your users, they must must also
   pass the tests in the compatibility test suite (CTS). CTS ensures that anyone
-  building a device meets a quality standard that ensures apps run reliabaly well
+  building a device meets a quality standard that ensures apps run reliably well
   and gives users a good experience. For more information, see the
   <a href="{@docRoot}compatibility/index.html">Compatibility</a> section.</p>
 
diff --git a/src/devices/latency_design.jd b/src/devices/latency_design.jd
index 39601c2..a2ad236 100644
--- a/src/devices/latency_design.jd
+++ b/src/devices/latency_design.jd
@@ -68,11 +68,11 @@
 </p>
 
 <ul>
-<li>presence of a fast mixer thread for this output (see below)</li>
-<li>track sample rate</li>
-<li>presence of a client thread to execute callback handlers for this track</li>
-<li>track buffer size</li>
-<li>available fast track slots (see below)</li>
+<li>Presence of a fast mixer thread for this output (see below)</li>
+<li>Track sample rate</li>
+<li>Presence of a client thread to execute callback handlers for this track</li>
+<li>Track buffer size</li>
+<li>Available fast track slots (see below)</li>
 </ul>
 
 <p>
@@ -100,7 +100,7 @@
 </p>
 
 <ul>
-<li>mixing of the normal mixer's sub-mix and up to 7 client fast tracks</li>
+<li>Mixing of the normal mixer's sub-mix and up to 7 client fast tracks</li>
 <li>Per track attenuation</li>
 </ul>
 
diff --git a/src/devices/low-ram.jd b/src/devices/low-ram.jd
index 19845a9..f0892a7 100644
--- a/src/devices/low-ram.jd
+++ b/src/devices/low-ram.jd
@@ -1,4 +1,4 @@
-page.title=Running Android with low RAM
+page.title=Low RAM
 @jd:body
 
 <!--
diff --git a/src/devices/sensors/hal-interface.jd b/src/devices/sensors/hal-interface.jd
index 5c232fa..7feadf2 100644
--- a/src/devices/sensors/hal-interface.jd
+++ b/src/devices/sensors/hal-interface.jd
@@ -24,7 +24,7 @@
   </div>
 </div>
 
-<p>The HAL interface, declared in <a href="{@docRoot}devices/reference/sensors_8h.html">sensors.h</a>, represents the interface between the Android <a href="sensor-stack.html#framework">framework</a> and the hardware-specific software. A HAL implementation must define each
+<p>The HAL interface, declared in <a href="{@docRoot}devices/halref/sensors_8h.html">sensors.h</a>, represents the interface between the Android <a href="sensor-stack.html#framework">framework</a> and the hardware-specific software. A HAL implementation must define each
   function declared in sensors.h. The main functions are:</p>
 <ul>
   <li><code>get_sensors_list</code> - Returns the list of all sensors. </li>
@@ -47,7 +47,7 @@
   <li><code>sensor_t</code></li>
   <li><code>sensors_event_t</code></li>
 </ul>
-<p>In addition to the sections below, see <a href="{@docRoot}devices/reference/sensors_8h.html">sensors.h</a> for more information on those types.</p>
+<p>In addition to the sections below, see <a href="{@docRoot}devices/halref/sensors_8h.html">sensors.h</a> for more information on those types.</p>
 <h2 id="get_sensors_list_list">get_sensors_list(list)</h2>
 <pre>int (*get_sensors_list)(struct sensors_module_t* module, struct sensor_t
   const** list);</pre>
@@ -240,13 +240,13 @@
   <code>HAL_MODULE_INFO_SYM</code> of this type to expose the <a
   href="#get_sensors_list_list">get_sensors_list</a> function. See the definition
   of <code>sensors_module_t</code> in <a
-  href="{@docRoot}devices/reference/sensors_8h.html">sensors.h</a> and the
+  href="{@docRoot}devices/halref/sensors_8h.html">sensors.h</a> and the
   definition of <code>hw_module_t</code> for more information.</p>
 <h2 id="sensors_poll_device_t_sensors_poll_device_1_t">sensors_poll_device_t / sensors_poll_device_1_t</h2>
 <p><code>sensors_poll_device_1_t</code> contains the rest of the methods defined above:
   <code>activate</code>, <code>batch</code>, <code>flush</code> and
   <code>poll</code>. Its <code>common</code> field (of type <a
-  href="{@docRoot}devices/reference/structhw__device__t.html">hw_device_t</a>)
+  href="{@docRoot}devices/halref/structhw__device__t.html">hw_device_t</a>)
   defines the version number of the HAL.</p>
 <h2 id="sensor_t">sensor_t</h2>
 <p><code>sensor_t</code> represents an <a href="index.html">Android sensor</a>. Here are some of its important fields:</p>
@@ -268,7 +268,7 @@
   <em>Cool-product</em> team at Fictional-Company could use
   <code>stringType=”com.fictional_company.cool_product.unicorn_detector”</code>.
   The <code>stringType</code> is used to uniquely identify non-official sensors types. See <a
-  href="{@docRoot}devices/reference/sensors_8h.html">sensors.h</a> for more
+  href="{@docRoot}devices/halref/sensors_8h.html">sensors.h</a> for more
   information on types and string types.</p>
 <p><strong>requiredPermission:</strong> A string representing the permission that applications must
   possess to see the sensor, register to it and receive its data. An empty string
@@ -328,7 +328,7 @@
 <p>Sensor events generated by Android sensors and reported through the <a
 href="#poll">poll</a> function are of <code>type sensors_event_t</code>. Here are some
 important fields of <code>sensors_event_t</code>:</p>
-<p>version: must be <code>sizeof(struct sensors_event_t)</code></p>
+<p><strong>version:</strong> Must be <code>sizeof(struct sensors_event_t)</code></p>
 <p><strong>sensor:</strong> The handle of the sensor that generated the event, as defined by
   <code>sensor_t.handle</code>.</p>
 <p><strong>type:</strong> The sensor type of the sensor that generated the event, as defined by
@@ -342,9 +342,9 @@
   causes too high jitter, and using only the sensor chip time to set the
   timestamps can cause de-synchronization from the
   <code>elapsedRealtimeNano</code> clock, as the sensor clock drifts.</p>
-<p>data and overlapping fields: The values measured by the sensor. The meaning and
+<p><strong>data and overlapping fields:</strong> The values measured by the sensor. The meaning and
   units of those fields are specific to each sensor type. See <a
-  href="{@docRoot}devices/reference/sensors_8h.html">sensors.h</a> and the
+  href="{@docRoot}devices/halref/sensors_8h.html">sensors.h</a> and the
   definition of the different <a href="sensor-types.html">Sensor types</a> for a
   description of the data fields. For some sensors, the accuracy of the
   readings is also reported as part of the data, through a <code>status</code> field. This
@@ -355,10 +355,10 @@
 <p>Metadata events have the same type as normal sensor events:
   <code>sensors_event_meta_data_t = sensors_event_t</code>. They are returned together with
   other sensor events through poll. They possess the following fields:</p>
-<p>version must be <code>META_DATA_VERSION</code></p>
-<p>type must be <code>SENSOR_TYPE_META_DATA</code></p>
-<p>sensor, reserved, and <strong>timestamp </strong>must be 0</p>
-<p>meta_data.what contains the metadata type for this event. There is currently a
+<p><strong>version:</strong> Must be <code>META_DATA_VERSION</code></p>
+<p><strong>type:</strong> Must be <code>SENSOR_TYPE_META_DATA</code></p>
+<p><strong>sensor, reserved, and timestamp</strong>: Must be 0</p>
+<p><strong>meta_data.what:</strong> Contains the metadata type for this event. There is currently a
   single valid metadata type: <code>META_DATA_FLUSH_COMPLETE</code>.</p>
 <p><code>META_DATA_FLUSH_COMPLETE</code> events represent the completion of the flush of a
   sensor FIFO. When <code>meta_data.what=META_DATA_FLUSH_COMPLETE</code>, <code>meta_data.sensor</code>
diff --git a/src/devices/sensors/index.jd b/src/devices/sensors/index.jd
index 6f21488..dea285b 100644
--- a/src/devices/sensors/index.jd
+++ b/src/devices/sensors/index.jd
@@ -28,19 +28,19 @@
     <p>Android sensors give applications access to a mobile device's underlying
       physical sensors. They are data-providing virtual devices defined by the
       implementation of <a
-      href="{@docRoot}devices/reference/sensors_8h.html">sensors.h</a>,
+      href="{@docRoot}devices/halref/sensors_8h.html">sensors.h</a>,
       the sensor Hardware Abstraction Layer (HAL).</p>
     <ul>
       <li> Those virtual devices provide data coming from a set of physical sensors:
         accelerometers, gyroscopes, magnetometers, barometer, humidity, pressure,
-        light, proximity and heart rate sensors… </li>
+        light, proximity and heart rate sensors.</li>
       <li> Notably, camera, fingerprint sensor, microphone and touch screen are currently
         not in the list of physical devices providing data through “Android sensors.”
-        They have their own reporting mechanism. </li>
+        They have their own reporting mechanism.</li>
       <li> The separation is arbitrary, but in general, Android sensors provide lower
         bandwidth data. For example, “100hz x 3 channels” for an accelerometer versus
         “25hz x 8 MP x 3 channels” for a camera or “44kHz x 1 channel” for a
-        microphone. </li>
+        microphone.</li>
     </ul>
     <p>How the different physical sensors are connected to the system on chip
        (SoC) is not defined by Android.</p>
@@ -62,7 +62,7 @@
     <p>Each Android sensor has a “type” representing how the sensor behaves and what
       data it provides.</p>
     <ul>
-      <li> The official Android <a href="sensor-types.html">Sensor types</a> are defined in <a href="{@docRoot}devices/reference/sensors_8h.html">sensors.h</a> under the names SENSOR_TYPE_…
+      <li> The official Android <a href="sensor-types.html">Sensor types</a> are defined in <a href="{@docRoot}devices/halref/sensors_8h.html">sensors.h</a> under the names SENSOR_TYPE_…
         <ul>
           <li> The vast majority of sensors have an official sensor type. </li>
           <li> Those types are documented in the Android SDK. </li>
@@ -144,7 +144,7 @@
       </li>
       <li> Hardware abstraction layer (HAL)
         <ul>
-          <li> <a href="{@docRoot}devices/reference/sensors_8h_source.html">https://source.android.com/devices/reference/sensors_8h_source.html</a></li>
+          <li> <a href="{@docRoot}devices/halref/sensors_8h_source.html">https://source.android.com/devices/halref/sensors_8h_source.html</a></li>
           <li> Also known as “sensors.h” </li>
           <li> The source of truth. First document to be updated when new features are
             developed. </li>
diff --git a/src/devices/sensors/sensor-stack.jd b/src/devices/sensors/sensor-stack.jd
index 9776c44..5ffcce8 100644
--- a/src/devices/sensors/sensor-stack.jd
+++ b/src/devices/sensors/sensor-stack.jd
@@ -128,7 +128,7 @@
 <p>The interface is defined by Android and AOSP contributors, and the
   implementation is provided by the manufacturer of the device.</p>
 <p>The sensor HAL interface is located in <code>hardware/libhardware/include/hardware</code>.
-  See <a href="{@docRoot}devices/reference/sensors_8h.html">sensors.h</a> for additional details.</p>
+  See <a href="{@docRoot}devices/halref/sensors_8h.html">sensors.h</a> for additional details.</p>
 <h3 id="release_cycle">Release cycle</h3>
 <p>The HAL implementation specifies what version of the HAL interface it
   implements by setting <code>your_poll_device.common.version</code>. The existing HAL
diff --git a/src/devices/sensors/sensor-types.jd b/src/devices/sensors/sensor-types.jd
index 824680f..7ebac25 100644
--- a/src/devices/sensors/sensor-types.jd
+++ b/src/devices/sensors/sensor-types.jd
@@ -274,15 +274,17 @@
     <td><p>Continuous</p></td>
   </tr>
   <tr>
-    <td><img src="images/battery_icon.png" width="20" height="20" alt="Low power sensor" />
-      <p>i<a href="#geomagnetic_rotation_vector"Geomagnetic rotation vector</a></p></td>
+    <td><p><a href="#geomagnetic_rotation_vector">Geomagnetic rotation
+     vector</a> <img src="images/battery_icon.png" width="20" height="20" alt="Low
+     power sensor" /></p></td>
     <td><p>Attitude</p></td>
     <td><p>Accelerometer, Magnetometer, MUST NOT USE Gyroscope</p></td>
     <td><p>Continuous</p></td>
   </tr>
   <tr>
-    <td><img src="images/battery_icon.png" width="20" height="20" alt="Low power sensor" />
-      <p><a href="#glance_gesture">Glance gesture</a></p></td>
+    <td><a href="#glance_gesture">Glance gesture</a> <img
+     src="images/battery_icon.png" width="20" height="20" alt="Low power sensor"
+     /></p></td>
     <td><p>Interaction</p></td>
     <td><p>Undefined</p></td>
     <td><p>One-shot</p></td>
@@ -318,8 +320,9 @@
     <td><p>Continuous</p></td>
   </tr>
   <tr>
-    <td><img src="images/battery_icon.png" width="20" height="20" alt="Low power sensor" />
-      <p><a href="#pick_up_gesture">Pick up gesture</a></p></td>
+    <td><p><a href="#pick_up_gesture">Pick up gesture</a> <img
+     src="images/battery_icon.png" width="20" height="20" alt="Low power sensor"
+     /></p></td>
     <td><p>Interaction</p></td>
     <td><p>Undefined</p></td>
     <td><p>One-shot</p></td>
@@ -331,43 +334,46 @@
     <td><p>Continuous</p></td>
   </tr>
   <tr>
-    <td><img src="images/battery_icon.png" width="20" height="20" alt="Low power sensor" />
-      <p><a href="#significant_motion">Significant motion</a></p></td>
+    <td><p><a href="#significant_motion">Significant motion</a> <img
+     src="images/battery_icon.png" width="20" height="20" alt="Low power sensor"
+     /></p></td>
     <td><p>Activity</p></td>
     <td><p>Accelerometer (or another as long as very low power)</p></td>
     <td><p>One-shot</p></td>
   </tr>
   <tr>
-    <td><img src="images/battery_icon.png" width="20" height="20" alt="Low power sensor" />
-      <p><a href="#step_counter">Step counter</a></p></td>
+    <td><p><a href="#step_counter">Step counter</a> <img
+     src="images/battery_icon.png" width="20" height="20" alt="Low power sensor"
+     /></p></td>
     <td><p>Activity</p></td>
     <td><p>Accelerometer</p></td>
     <td><p>On-change</p></td>
   </tr>
   <tr>
-    <td><img src="images/battery_icon.png" width="20" height="20" alt="Low power sensor" />
-      <p><a href="#step_detector">Step detector</a></p></td>
+    <td><p><a href="#step_detector">Step detector</a> <img
+     src="images/battery_icon.png" width="20" height="20" alt="Low power sensor"
+     /></p></td> <td><p>Activity</p></td>
+    <td><p>Accelerometer</p></td>
+    <td><p>Special</p></td>
+  </tr>
+  <tr>
+    <td><p><a href="#tilt_detector">Tilt detector</a> <img
+     src="images/battery_icon.png" width="20" height="20" alt="Low power sensor"
+     /></p></td>
     <td><p>Activity</p></td>
     <td><p>Accelerometer</p></td>
     <td><p>Special</p></td>
   </tr>
   <tr>
-    <td><img src="images/battery_icon.png" width="20" height="20" alt="Low power sensor" />
-      <p><a href="#tilt_detector">Tilt detector</a></p></td>
-    <td><p>Activity</p></td>
-    <td><p>Accelerometer</p></td>
-    <td><p>Special</p></td>
-  </tr>
-  <tr>
-    <td><img src="images/battery_icon.png" width="20" height="20" alt="Low power sensor" />
-      <p><a href="#wake_up_gesture">Wake up gesture</a></p></td>
+    <td><p><a href="#wake_up_gesture">Wake up gesture</a> <img
+     src="images/battery_icon.png" width="20" height="20" alt="Low power sensor"
+     /></p></td>
     <td><p>Interaction</p></td>
     <td><p>Undefined</p></td>
     <td><p>One-shot</p></td>
   </tr>
 </table>
-<img src="images/battery_icon.png" width="20" height="20" alt="Low power sensor" />
-<p> = Low power sensor</p>
+<p><img src="images/battery_icon.png" width="20" height="20" alt="Low power sensor" /> = Low power sensor</p>
 <h2 id="activity_composite_sensors">Activity composite sensors</h2>
 <h3 id="linear_acceleration">Linear acceleration</h3>
 <p>Underlying physical sensors:  Accelerometer and (if present) Gyroscope (or
@@ -622,7 +628,7 @@
   direction. (Mathematically speaking, it should be positive in the
   counter-clockwise direction):</p>
 <div class="figure" style="width:264px">
-  <imgsrc="images/axis_positive_roll.png" alt="Depiction of orientation
+  <img src="images/axis_positive_roll.png" alt="Depiction of orientation
    relative to a device" height="253" />
   <p class="img-caption">
     <strong>Figure 2.</strong> Orientation relative to a device.
diff --git a/src/devices/tech/dalvik/art.jd b/src/devices/tech/dalvik/art.jd
deleted file mode 100644
index 902593e..0000000
--- a/src/devices/tech/dalvik/art.jd
+++ /dev/null
@@ -1,244 +0,0 @@
-page.title=Introducing ART
-@jd:body
-
-<!--
-    Copyright 2013 The Android Open Source Project
-
-    Licensed under the Apache License, Version 2.0 (the "License");
-    you may not use this file except in compliance with the License.
-    You may obtain a copy of the License at
-
-        http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
--->
-
-
-<div id="qv-wrapper">
-<div id="qv">
-  <h2 id="Contents">In this document</h2>
-  <ol id="auto-toc">
-  </ol>
-  <strong>See also</strong>
-  <ol>
-    <li><a
-    href="http://developer.android.com/guide/practices/verifying-apps-art.html">Verifying
-    App Behavior on the Android Runtime (ART)</a></li>
-  </ol>
-</div>
-</div>
-
-<p>
-ART is a new Android runtime being introduced experimentally in the 4.4
-release. This is a preview of work in progress in KitKat that can be turned on
-in <strong>Settings</strong> &gt; <strong>developer options</strong>. This is
-available for the purpose of obtaining early developer and partner feedback.</p>
-
-<p class="caution"><strong>Important:</strong> Dalvik must remain the default
-runtime or you risk breaking your Android implementations and third-party
-applications.</p>
-
-<p>Most existing apps should just work when running with ART. However, some
-techniques that work on Dalvik do not work on ART. For information about the
-most important issues, see <a
-href="http://developer.android.com/guide/practices/verifying-apps-art.html">Verifying
-App Behavior on the Android Runtime (ART)</a>.</p>
-
-<h2 id="features">ART Features</h2>
-
-<p>Here are some of the major new features implemented by ART.</p>
-
-<h3 id="AOT_compilation">Ahead-of-time (AOT) compilation</h3>
-
-<p>ART introduces ahead-of-time (AOT) compilation, which can improve app
-performance. ART also has tighter install-time verification than Dalvik.</p>
-
-<p>At install time, ART compiles apps using the on-device
-<strong>dex2oat</strong> tool. This utility accepts <a
-href="http://source.android.com/devices/tech/dalvik/dex-format.html">DEX</a> files as input and
-generates a compiled app executable for the target device. The utility should be
-able to compile all valid DEX files without difficulty. However, some
-post-processing tools produce invalid files that may be tolerated by Dalvik but
-cannot be compiled by ART. For more information, see  <a
-href="http://developer.android.com/guide/practices/verifying-apps-art.html#GC_Migration">Addressing
-Garbage Collection Issues</a>.</p>
-
-<h3 id="Improved_GC">Improved garbage collection</h3>
-
-<p>Garbage collection (GC) can impair an app's performance, resulting in choppy
-display, poor UI responsiveness, and other problems. ART improves garbage
-collection in several ways:</p>
-
-<ul>
-  <li>One GC pause instead of two</li>
-  <li>Parallelized processing during the remaining GC pause</li>
-  <li>Collector with lower pause time for the special case of cleaning up
-  recently-allocated, short-lived objects</li>
-  <li>Improved garbage collection ergonomics, making concurrent garbage
-  collections more timely, which makes <a
-  href="http://developer.android.com/tools/debugging/debugging-memory.html#LogMessages"><code>GC_FOR_ALLOC</code></a>
-  events extremely rare in typical use cases</li>
-</ul>
-
-<p>ART currently does not use compacting GC, but this feature is under
-development in the <a href="https://source.android.com">Android Open Source
-Project (AOSP)</a>. In the meantime, don't perform operations that are
-incompatible with compacting GC, such as storing pointers to object fields. For
-more information, see <a
-href="http://developer.android.com/guide/practices/verifying-apps-art.html#GC_Migration">Addressing
-Garbage Collection Issues</a>.</p>
-
-<h3 id="Debugging_Imp">Development and debugging improvements</h3>
-
-<p>ART offers a number of features to improve app development and debugging.</p>
-
-<h4 id="Sampling_Profiler">Support for sampling profiler</h4>
-
-<p>Historically, developers have used the <a
-href=" http://developer.android.com/tools/help/traceview.html">Traceview</a>
-tool (designed for tracing
-application execution) as a profiler. While Traceview gives useful information,
-its results on Dalvik have been skewed by the per-method-call overhead, and use
-of the tool noticeably affects run time performance.</p>
-
-<p>ART adds support for a dedicated sampling profiler that does not have these
-limitations. This gives a more accurate view of app execution without
-significant slowdown. Sampling support has also been added to Traceview for
-Dalvik.</p>
-
-<h4 id="Debugging_Features">Support for more debugging features</h4>
-
-<p>ART supports a number of new debugging options, particularly in monitor- and
-garbage collection-related functionality. For example, you can:</p>
-
-<ul>
-  <li>See what locks are held in stack traces, then jump to the thread that
-      holds a lock.</li>
-  <li>Ask how many live instances there are of a given class, ask to see the
-      instances, and see what references are keeping an object live.</li>
-  <li>Filter events (like breakpoint) for a specific instance.</li>
-  <li>See the value returned by a method when it exits (using “method-exit”
-      events).</li>
-  <li>Set field watchpoint to suspend the execution of a program when a specific
-      field is accessed and/or modified.</li>
-</ul>
-
-<h4 id="Crash_Reports">Improved diagnostic detail in exceptions and crash reports</h4>
-
-<p>ART gives you as much context and detail as possible when runtime exceptions
-occur.  ART provides expanded exception detail for <code><a
-href="http://developer.android.com/reference/java/lang/ClassCastException.html">java.lang.ClassCastException</a></code>,
-<code><a
-href="http://developer.android.com/reference/java/lang/ClassNotFoundException.html">java.lang.ClassNotFoundException</a></code>,
-and <code><a
-href="http://developer.android.com/reference/java/lang/NullPointerException.html">java.lang.NullPointerException</a></code>.
-(Later versions of Dalvik provided expanded exception detail for <code><a
-href="http://developer.android.com/reference/java/lang/ArrayIndexOutOfBoundsException.html">java.lang.ArrayIndexOutOfBoundsException</a></code>
-and <code><a
-href="http://developer.android.com/reference/java/lang/ArrayStoreException.html">java.lang.ArrayStoreException</a></code>,
-which now include the size of the array and the out-of-bounds offset, and ART
-does this as well.)</p>
-
-<p>For example, <code><a
-href="http://developer.android.com/reference/java/lang/NullPointerException.html">java.lang.NullPointerException</a></code>
-now shows information about what the app was trying to do with the null pointer,
-such as the field the app was trying to write to, or the method it was trying to
-call. Here are some typical examples:</p>
-
-<pre class="no-pretty-print">
-java.lang.NullPointerException: Attempt to write to field 'int
-android.accessibilityservice.AccessibilityServiceInfo.flags' on a null object
-reference</pre>
-
-<pre class="no-pretty-print">
-java.lang.NullPointerException: Attempt to invoke virtual method
-'java.lang.String java.lang.Object.toString()' on a null object reference</pre>
-
-<p>ART also provides improved context information in app native crash reports,
-by including both Java and native stack information. </p>
-
-<h2 id="Known_Issues">Known Issues</h2>
-
-<p>The following known issues are present in the 4.4.1 implementation of ART.</p>
-
-<ul>
-
-  <li><em>Compile-time issue:</em> As noted above, ART flags unbalanced
-  <code>monitorenter</code>/<code>moniterexit</code> instructions. We relaxed
-  this check in 4.4.1 but intend to restore this verification in the future once
-  tools are fixed, as this check is necessary for certain compiler
-  optimizations. <a
-  href="https://code.google.com/p/android/issues/detail?id=61916">https://code.google.com/p/android/issues/detail?id=61916</a></li>
-
-  <li><em>Run-time issue:</em> There was an issue where JNI
-  <code>GetFieldID</code> and <code>GetStaticFieldID</code> were using the wrong
-  class loader on unattached threads, often leading to later CheckJNI errors or
-  NoSuchFieldError exceptions. <a
-  href="http://code.google.com/p/android/issues/detail?id=63970">http://code.google.com/p/android/issues/detail?id=63970</a></li>
-
-  <li><em>Run-time issue:</em> Calling JNI <code>NewDirectByteBuffer()</code>
-  with byte size of <code>0</code> led to the following CheckJNI error: <pre
-  class="no-pretty-print"> JNI DETECTED ERROR IN APPLICATION: capacity must be
-  greater than 0: 0</pre> <a
-  href="http://code.google.com/p/android/issues/detail?id=63055">http://code.google.com/p/android/issues/detail?id=63055</a></li>
-
-</ul>
-
-<h3 id="Fixed_Issues">Fixed issues</h3>
-
-<ul>
-
-  <li><em>Compile-time issue:</em> Overly aggressive verification and
-  compilation of unused portions of dex files lead to corrupt package messages.
-  This was addressed in AOSP with: <a
-  href="https://android-review.googlesource.com/#/c/72374/">https://android-review.googlesource.com/#/c/72374/</a></li>
-
-  <li><em>Debug-time issue:</em> Interactive debugging performance was slow,
-  even in code without breakpoints. This has been addressed in the latest AOSP
-  code.</li>
-
-</ul>
-
-<h2 id="building">Enabling ART in Android Build</h2>
-
-<p> Two runtimes are now available, the existing Dalvik runtime
-(<code>libdvm.so</code>) and the ART runtime (<code>libart.so</code>). A device
-can be built using either or both runtimes. (You can dual boot from
-<strong>Developer options</strong> if both runtimes are installed.) See
-runtime_common.mk. That is included from build/target/product/runtime_libdvm.mk
-or build/target/product/runtime_libdvm.mk or both.</p>
-
-<p> The <code>dalvikvm</code> command line tool can run with either runtime now.
-It will default to using the runtime specified in <strong>developer
-options</strong>. The default can be overridden by specifying the desired
-runtime library, for example with <code>-XXlib:libart.so</code> </p>
-
-<p>
-A new <code>PRODUCT_RUNTIMES</code> variable controls which runtimes
-are included in a build. Include it within either
-<code>build/target/product/core_minimal.mk</code> or
-<code>build/target/product/core_base.mk</code>.
-</p>
-
-<p>
-Add this to the device makefile to have both runtimes
-built and installed, with Dalvik as the default:
-</br>
-<code>PRODUCT_RUNTIMES := runtime_libdvm_default</code>
-</br>
-<code>PRODUCT_RUNTIMES += runtime_libart</code>
-</p>
-
-<h2 id="Reporting_Problems">Reporting Problems</h2>
-
-<p>If you run into any issues that aren’t due to app JNI issues, please report
-them via the Android Open Source Project Issue Tracker at <a
-href="https://code.google.com/p/android/issues/list">https://code.google.com/p/android/issues/list</a>.
-Please include an <code>"adb bugreport"</code> and link to the app in Google
-Play store if available. Otherwise, if possible, attach an APK that reproduces
-the issue. Please note that issues (including attachments) are publicly
-visible.</p>
diff --git a/src/devices/tech/dalvik/dalvik-bytecode.jd b/src/devices/tech/dalvik/dalvik-bytecode.jd
index 8d4f52b..1b9b9e3 100644
--- a/src/devices/tech/dalvik/dalvik-bytecode.jd
+++ b/src/devices/tech/dalvik/dalvik-bytecode.jd
@@ -1,8 +1,8 @@
-page.title=Bytecode for the Dalvik VM
+page.title=Dalvik bytecode
 @jd:body
 
 <!--
-    Copyright 2013 The Android Open Source Project
+    Copyright 2014 The Android Open Source Project
 
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
@@ -16,13 +16,21 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 -->
-<h2>General Design</h2>
+<div id="qv-wrapper">
+  <div id="qv">
+    <h2>In this document</h2>
+    <ol id="auto-toc">
+    </ol>
+  </div>
+</div>
+
+<h2 id="design">General design</h2>
 
 <ul>
 <li>The machine model and calling conventions are meant to approximately
   imitate common real architectures and C-style calling conventions:
   <ul>
-  <li>The VM is register-based, and frames are fixed in size upon creation.
+  <li>The machine is register-based, and frames are fixed in size upon creation.
     Each frame consists of a particular number of registers (specified by
     the method) as well as any adjunct data needed to execute the method,
     such as (but not limited to) the program counter and a reference to the
@@ -149,7 +157,7 @@
 </li>
 </ul>
 
-<h2>Summary of Instruction Set</h2>
+<h2 id="instructions">Summary of bytecode set</h2>
 
 <table class="instruc">
 <thead>
@@ -987,7 +995,7 @@
 </tbody>
 </table>
 
-<h2>packed-switch-payload Format</h2>
+<h2 id="packed-switch">packed-switch-payload format</h2>
 
 <table class="supplement">
 <thead>
@@ -1026,7 +1034,7 @@
 <p><b>Note:</b> The total number of code units for an instance of this
 table is <code>(size * 2) + 4</code>.</p>
 
-<h2>sparse-switch-payload Format</h2>
+<h2 id="sparse-switch">sparse-switch-payload format</h2>
 
 <table class="supplement">
 <thead>
@@ -1066,7 +1074,7 @@
 <p><b>Note:</b> The total number of code units for an instance of this
 table is <code>(size * 4) + 2</code>.</p>
 
-<h2>fill-array-data-payload Format</h2>
+<h2 id="fill-array">fill-array-data-payload format</h2>
 
 <table class="supplement">
 <thead>
@@ -1104,7 +1112,7 @@
 table is <code>(size * element_width + 1) / 2 + 4</code>.</p>
 
 
-<h2>Mathematical Operation Details</h2>
+<h2 id="math">Mathematical operation details</h2>
 
 <p><b>Note:</b> Floating point operations must follow IEEE 754 rules, using
 round-to-nearest and gradual underflow, except where stated otherwise.</p>
diff --git a/src/devices/tech/dalvik/dex-format.jd b/src/devices/tech/dalvik/dex-format.jd
index 744eb86..bd167fb 100644
--- a/src/devices/tech/dalvik/dex-format.jd
+++ b/src/devices/tech/dalvik/dex-format.jd
@@ -1,4 +1,4 @@
-page.title=Dalvik Executable Format
+page.title=Dalvik Executable format
 @jd:body
 
 <!--
@@ -20,7 +20,7 @@
 files, which are used to hold a set of class definitions and their associated
 adjunct data.</p>
 
-<h2>Guide To Types</h2>
+<h2 id="types">Guide to types</h2>
 
 <table class="guide">
 <thead>
@@ -77,7 +77,7 @@
 </tbody>
 </table>
 
-<h3>LEB128</h3>
+<h3 id="leb128">LEB128</h3>
 
 <p>LEB128 ("<b>L</b>ittle-<b>E</b>ndian <b>B</b>ase <b>128</b>") is a
 variable-length encoding for
@@ -157,7 +157,7 @@
 </tbody>
 </table>
 
-<h2>Overall File Layout</h2>
+<h2 id="file-layout">File layout</h2>
 
 <table class="format">
 <thead>
@@ -256,9 +256,9 @@
 </tbody>
 </table>
 
-<h2>Bitfield, String, and Constant Definitions</h2>
+<h2 id="definitions">Bitfield, string and constant definitions</h2>
 
-<h3>DEX_FILE_MAGIC</h3>
+<h3 id="dex-file-magic">DEX_FILE_MAGIC</h3>
 <h4>embedded in header_item</h4>
 
 <p>The constant array/string <code>DEX_FILE_MAGIC</code> is the list of
@@ -284,7 +284,7 @@
 versions of the format differ significantly from the version described in this
 document.</p>
 
-<h3>ENDIAN_CONSTANT and REVERSE_ENDIAN_CONSTANT</h3>
+<h3 id="endian-constant">ENDIAN_CONSTANT and REVERSE_ENDIAN_CONSTANT</h3>
 <h4>embedded in header_item</h4>
 
 <p>The constant <code>ENDIAN_CONSTANT</code> is used to indicate the
@@ -300,7 +300,7 @@
 uint REVERSE_ENDIAN_CONSTANT = 0x78563412;
 </pre>
 
-<h3>NO_INDEX</h3>
+<h3 id="no-index">NO_INDEX</h3>
 <h4>embedded in class_def_item and debug_info_item</h4>
 
 <p>The constant <code>NO_INDEX</code> is used to indicate that
@@ -316,7 +316,7 @@
 uint NO_INDEX = 0xffffffff;    // == -1 if treated as a signed int
 </pre>
 
-<h3>access_flags Definitions</h3>
+<h3 id="access-flags">access_flags definitions</h3>
 <h4>embedded in class_def_item, encoded_field, encoded_method, and 
 InnerClass</h4>
 
@@ -492,7 +492,7 @@
 <p><super>*</super> Only allowed on for <code>InnerClass</code> annotations,
 and must not ever be on in a <code>class_def_item</code>.</p>
 
-<h3>MUTF-8 (Modified UTF-8) Encoding</h3>
+<h3 id="mutf-8">MUTF-8 (Modified UTF-8) Encoding</h3>
 
 <p>As a concession to easier legacy support, the <code>.dex</code> format
 encodes its string data in a de facto standard modified UTF-8 form, hereafter
@@ -531,7 +531,7 @@
 <a href="http://www.unicode.org/reports/tr26/">CESU-8</a> than to UTF-8
 per se.</p>
 
-<h3>encoded_value Encoding</h3>
+<h3 id="encoding">encoded_value encoding</h3>
 <h4>embedded in annotation_element and encoded_array_item </h4>
 
 <p>An <code>encoded_value</code> is an encoded piece of (nearly)
@@ -573,7 +573,7 @@
 </tbody>
 </table>
 
-<h3>Value Formats</h3>
+<h3 id="value-formats">Value formats</h3>
 
 <table class="encodedValue">
 <thead>
@@ -699,7 +699,7 @@
   <td><i>(none; must be <code>0</code>)</i></td>
   <td>encoded_array</td>
   <td>an array of values, in the format specified by
-    "<code>encoded_array</code> Format" below. The size
+    "<code>encoded_array</code> format" below. The size
     of the <code>value</code> is implicit in the encoding.
   </td>
 </tr>
@@ -709,7 +709,7 @@
   <td><i>(none; must be <code>0</code>)</i></td>
   <td>encoded_annotation</td>
   <td>a sub-annotation, in the format specified by
-    "<code>encoded_annotation</code> Format" below. The size
+    "<code>encoded_annotation</code> format" below. The size
     of the <code>value</code> is implicit in the encoding.
   </td>
 </tr>
@@ -733,7 +733,7 @@
 </tbody>
 </table>
 
-<h3>encoded_array Format</h3>
+<h3 id="encoded-array">encoded_array format</h3>
 
 <table class="format">
 <thead>
@@ -760,7 +760,7 @@
 </tbody>
 </table>
 
-<h3>encoded_annotation Format</h3>
+<h3 id="encoded-annotation">encoded_annotation format</h3>
 
 <table class="format">
 <thead>
@@ -794,7 +794,7 @@
 </tbody>
 </table>
 
-<h3>annotation_element Format</h3>
+<h3 id="annotation-element">annotation_element format</h3>
 
 <table class="format">
 <thead>
@@ -821,13 +821,13 @@
 </tbody>
 </table>
 
-<h2>String Syntax</h2>
+<h2 id="string-syntax">String syntax</h2>
 
 <p>There are several kinds of item in a <code>.dex</code> file which
 ultimately refer to a string. The following BNF-style definitions
 indicate the acceptable syntax for these strings.</p>
 
-<h3><i>SimpleName</i></h3>
+<h3 id="simplename"><i>SimpleName</i></h3>
 
 <p>A <i>SimpleName</i> is the basis for the syntax of the names of other
 things. The <code>.dex</code> format allows a fair amount of latitude
@@ -896,7 +896,7 @@
   </tr>
 </table>
 
-<h3><i>MemberName</i></h3>
+<h3 id="membername"><i>MemberName</i></h3>
 <h4>used by field_id_item and method_id_item</h4>
 
 <p>A <i>MemberName</i> is the name of a member of a class, members being
@@ -914,7 +914,7 @@
   </tr>
 </table>
 
-<h3><i>FullClassName</i></h3>
+<h3 id="fullclassname"><i>FullClassName</i></h3>
 
 <p>A <i>FullClassName</i> is a fully-qualified class name, including an
 optional package specifier followed by a required name.</p>
@@ -933,7 +933,7 @@
   </tr>
 </table>
 
-<h3><i>TypeDescriptor</i></h3>
+<h3 id="typedescriptor"><i>TypeDescriptor</i></h3>
 <h4>used by type_id_item</h4>
 
 <p>A <i>TypeDescriptor</i> is the representation of any type, including
@@ -1003,7 +1003,7 @@
   </tr>
 </table>
 
-<h3><i>ShortyDescriptor</i></h3>
+<h3 id="shortydescriptor"><i>ShortyDescriptor</i></h3>
 <h4>used by proto_id_item</h4>
 
 <p>A <i>ShortyDescriptor</i> is the short form representation of a method
@@ -1067,7 +1067,7 @@
   </tr>
 </table>
 
-<h3><i>TypeDescriptor</i> Semantics</h3>
+<h3 id="typedescriptor"><i>TypeDescriptor</i> Semantics</h3>
 
 <p>This is the meaning of each of the variants of <i>TypeDescriptor</i>.</p>
 
@@ -1129,12 +1129,12 @@
 </tbody>
 </table>
 
-<h2>Items and Related Structures</h2>
+<h2 id="items">Items and related structures</h2>
 
 <p>This section includes definitions for each of the top-level items that
 may appear in a <code>.dex</code> file.
 
-<h3>header_item</h3>
+<h3 id="header-item">header_item</h3>
 <h4>appears in the header section</h4>
 <h4>alignment: 4 bytes</h4>
 
@@ -1317,7 +1317,7 @@
 </tbody>
 </table>
 
-<h3>map_list</h3>
+<h3 id="map-list">map_list</h3>
 <h4>appears in the data section</h4>
 <h4>referenced from header_item</h4>
 <h4>alignment: 4 bytes</h4>
@@ -1354,7 +1354,7 @@
 </tbody>
 </table>
 
-<h3>map_item Format</h3>
+<h3 id="map-item">map_item format</h3>
 
 <table class="format">
 <thead>
@@ -1389,7 +1389,7 @@
 </table>
 
 
-<h3>Type Codes</h3>
+<h3 id="type-codes">Type Codes</h3>
 
 <table class="typeCodes">
 <thead>
@@ -1513,7 +1513,7 @@
 </table>
 
 
-<h3>string_id_item</h3>
+<h3 id="string-item">string_id_item</h3>
 <h4>appears in the string_ids section</h4>
 <h4>alignment: 4 bytes</h4>
 
@@ -1539,7 +1539,7 @@
 </tbody>
 </table>
 
-<h3>string_data_item</h3>
+<h3 id="string-data-item">string_data_item</h3>
 <h4>appears in the data section</h4>
 <h4>alignment: none (byte-aligned)</h4>
 
@@ -1578,7 +1578,7 @@
 </tbody>
 </table>
 
-<h3>type_id_item</h3>
+<h3 id="type-id-item">type_id_item</h3>
 <h4>appears in the type_ids section</h4>
 <h4>alignment: 4 bytes</h4>
 
@@ -1602,7 +1602,7 @@
 </tbody>
 </table>
 
-<h3>proto_id_item</h3>
+<h3 id="proto-id-item">proto_id_item</h3>
 <h4>appears in the proto_ids section</h4>
 <h4>alignment: 4 bytes</h4>
 
@@ -1645,7 +1645,7 @@
 </tbody>
 </table>
 
-<h3>field_id_item</h3>
+<h3 id="field-id-item">field_id_item</h3>
 <h4>appears in the field_ids section</h4>
 <h4>alignment: 4 bytes</h4>
 
@@ -1683,7 +1683,7 @@
 </tbody>
 </table>
 
-<h3>method_id_item</h3>
+<h3 id="method-id-item">method_id_item</h3>
 <h4>appears in the method_ids section</h4>
 <h4>alignment: 4 bytes</h4>
 
@@ -1721,7 +1721,7 @@
 </tbody>
 </table>
 
-<h3>class_def_item</h3>
+<h3 id="class-def-item">class_def_item</h3>
 <h4>appears in the class_defs section</h4>
 <h4>alignment: 4 bytes</h4>
 
@@ -1825,7 +1825,7 @@
 </tbody>
 </table>
 
-<h3>class_data_item</h3>
+<h3 id="class-data-item">class_data_item</h3>
 <h4>referenced from class_def_item</h4>
 <h4>appears in the data section</h4>
 <h4>alignment: none (byte-aligned)</h4>
@@ -1900,7 +1900,7 @@
 <p><b>Note:</b> All elements' <code>field_id</code>s and
 <code>method_id</code>s must refer to the same defining class.</p>
 
-<h3>encoded_field Format</h3>
+<h3 id="encoded-field-format">encoded_field format</h3>
 
 <table class="format">
 <thead>
@@ -1930,7 +1930,7 @@
 </tbody>
 </table>
 
-<h3>encoded_method Format</h3>
+<h3 id="encoded-method">encoded_method format</h3>
 
 <table class="format">
 <thead>
@@ -1970,7 +1970,7 @@
 </tbody>
 </table>
 
-<h3>type_list</h3>
+<h3 id="type-list">type_list</h3>
 <h4>referenced from class_def_item and proto_id_item</h4>
 <h4>appears in the data section</h4>
 <h4>alignment: 4 bytes</h4>
@@ -1997,7 +1997,7 @@
 </tbody>
 </table>
 
-<h3>type_item Format</h3>
+<h3 id="type-item-format">type_item format</h3>
 
 <table class="format">
 <thead>
@@ -2016,7 +2016,7 @@
 </tbody>
 </table>
 
-<h3>code_item</h3>
+<h3 id="code-item">code_item</h3>
 <h4>referenced from encoded_method</h4>
 <h4>appears in the data section</h4>
 <h4>alignment: 4 bytes</h4>
@@ -2076,7 +2076,7 @@
   <td>ushort[insns_size]</td>
   <td>actual array of bytecode. The format of code in an <code>insns</code>
     array is specified by the companion document
-    <a href="dalvik-bytecode.html">"Bytecode for the Dalvik VM"</a>. Note
+    <a href="dalvik-bytecode.html">Dalvik bytecode</a>. Note
     that though this is defined as an array of <code>ushort</code>, there
     are some internal structures that prefer four-byte alignment. Also,
     if this happens to be in an endian-swapped file, then the swapping is
@@ -2113,7 +2113,7 @@
 </tbody>
 </table>
 
-<h3>try_item Format</h3>
+<h3 id="type-item">try_item format</h3>
 
 <table class="format">
 <thead>
@@ -2151,7 +2151,7 @@
 </tbody>
 </table>
 
-<h3>encoded_catch_handler_list Format</h3>
+<h3 id="encoded-catch-handlerlist">encoded_catch_handler_list format</h3>
 
 <table class="format">
 <thead>
@@ -2176,7 +2176,7 @@
 </tbody>
 </table>
 
-<h3>encoded_catch_handler Format</h3>
+<h3 id="encoded-catch-handler">encoded_catch_handler format</h3>
 
 <table class="format">
 <thead>
@@ -2216,7 +2216,7 @@
 </tbody>
 </table>
 
-<h3>encoded_type_addr_pair Format</h3>
+<h3 id="encoded-type-addr-pair">encoded_type_addr_pair format</h3>
 
 <table class="format">
 <thead>
@@ -2242,7 +2242,7 @@
 </tbody>
 </table>
 
-<h3>debug_info_item</h3>
+<h3 id="debug-info-item">debug_info_item</h3>
 <h4>referenced from code_item</h4>
 <h4>appears in the data section</h4>
 <h4>alignment: none (byte-aligned)</h4>
@@ -2457,7 +2457,7 @@
 </tbody>
 </table>
 
-<h3>Special Opcodes</h3>
+<h3 id="opcodes">Special opcodes</h3>
 
 <p>Opcodes with values between <code>0x0a</code> and <code>0xff</code>
 (inclusive) move both the <code>line</code> and <code>address</code>
@@ -2475,7 +2475,7 @@
 address += (adjusted_opcode / DBG_LINE_RANGE)
 </pre>
 
-<h3>annotations_directory_item</h3>
+<h3 id="annotations-directory">annotations_directory_item</h3>
 <h4>referenced from class_def_item</h4>
 <h4>appears in the data section</h4>
 <h4>alignment: 4 bytes</h4>
@@ -2541,7 +2541,7 @@
 <p><b>Note:</b> All elements' <code>field_id</code>s and
 <code>method_id</code>s must refer to the same defining class.</p>
 
-<h3>field_annotation Format</h3>
+<h3 id="field-annotation">field_annotation format</h3>
 
 <table class="format">
 <thead>
@@ -2571,7 +2571,7 @@
 </tbody>
 </table>
 
-<h3>method_annotation Format</h3>
+<h3 id="method-annotation">method_annotation format</h3>
 
 <table class="format">
 <thead>
@@ -2601,7 +2601,7 @@
 </tbody>
 </table>
 
-<h3>parameter_annotation Format</h3>
+<h3 id="parameter-annotation">parameter_annotation format</h3>
 
 <table class="format">
 <thead>
@@ -2631,7 +2631,7 @@
 </tbody>
 </table>
 
-<h3>annotation_set_ref_list</h3>
+<h3 id="set-ref-list">annotation_set_ref_list</h3>
 <h4>referenced from parameter_annotations_item</h4>
 <h4>appears in the data section</h4>
 <h4>alignment: 4 bytes</h4>
@@ -2658,7 +2658,7 @@
 </tbody>
 </table>
 
-<h3>annotation_set_ref_item Format</h3>
+<h3 id="set-ref-item">annotation_set_ref_item format</h3>
 
 <table class="format">
 <thead>
@@ -2682,7 +2682,7 @@
 </tbody>
 </table>
 
-<h3>annotation_set_item</h3>
+<h3 id="annotation-set-item">annotation_set_item</h3>
 <h4>referenced from annotations_directory_item, field_annotations_item, 
 method_annotations_item, and annotation_set_ref_item</h4>
 <h4>appears in the data section</h4>
@@ -2712,7 +2712,7 @@
 </tbody>
 </table>
 
-<h3>annotation_off_item Format</h3>
+<h3 id="off-item">annotation_off_item format</h3>
 
 <table class="format">
 <thead>
@@ -2736,7 +2736,7 @@
 </table>
 
 
-<h3>annotation_item</h3>
+<h3 id="annotation-item">annotation_item</h3>
 <h4>referenced from annotation_set_item</h4>
 <h4>appears in the data section</h4>
 <h4>alignment: none (byte-aligned)</h4>
@@ -2759,14 +2759,14 @@
   <td>annotation</td>
   <td>encoded_annotation</td>
   <td>encoded annotation contents, in the format described by
-    "<code>encoded_annotation</code> Format" under
-    "<code>encoded_value</code> Encoding" above.
+    "<code>encoded_annotation</code> format" under
+    "<code>encoded_value</code> encoding" above.
   </td>
 </tr>
 </tbody>
 </table>
 
-<h3>Visibility values</h3>
+<h3 id="visibility">Visibility values</h3>
 
 <p>These are the options for the <code>visibility</code> field in an
 <code>annotation_item</code>:</p>
@@ -2802,7 +2802,7 @@
 </tbody>
 </table>
 
-<h3>encoded_array_item</h3>
+<h3 id="encoded-array-item">encoded_array_item</h3>
 <h4>referenced from class_def_item</h4>
 <h4>appears in the data section</h4>
 <h4>alignment: none (byte-aligned)</h4>
@@ -2827,7 +2827,7 @@
 </tbody>
 </table>
 
-<h2>System Annotations</h2>
+<h2 id="system-annotation">System annotations</h2>
 
 <p>System annotations are used to represent various pieces of reflective
 information about classes (and methods and fields). This information is
@@ -2836,7 +2836,7 @@
 <p>System annotations are represented in <code>.dex</code> files as
 annotations with visibility set to <code>VISIBILITY_SYSTEM</code>.
 
-<h3>dalvik.annotation.AnnotationDefault</h3>
+<h3 id="dalvik-annotation-default">dalvik.annotation.AnnotationDefault</h3>
 <h4>appears on methods in annotation interfaces</h4>
 
 <p>An <code>AnnotationDefault</code> annotation is attached to each
@@ -2862,7 +2862,7 @@
 </tbody>
 </table>
 
-<h3>dalvik.annotation.EnclosingClass</h3>
+<h3 id="dalvik-enclosingclass">dalvik.annotation.EnclosingClass</h3>
 <h4>appears on classes</h4>
 
 <p>An <code>EnclosingClass</code> annotation is attached to each class
@@ -2890,7 +2890,7 @@
 </tbody>
 </table>
 
-<h3>dalvik.annotation.EnclosingMethod</h3>
+<h3 id="dalvik-enclosingmethod">dalvik.annotation.EnclosingMethod</h3>
 <h4>appears on classes</h4>
 
 <p>An <code>EnclosingMethod</code> annotation is attached to each class
@@ -2916,7 +2916,7 @@
 </tbody>
 </table>
 
-<h3>dalvik.annotation.InnerClass</h3>
+<h3 id="dalvik-innerclass">dalvik.annotation.InnerClass</h3>
 <h4>appears on classes</h4>
 
 <p>An <code>InnerClass</code> annotation is attached to each class
@@ -2953,7 +2953,7 @@
 </tbody>
 </table>
 
-<h3>dalvik.annotation.MemberClasses</h3>
+<h3 id="dalvik-memberclasses">dalvik.annotation.MemberClasses</h3>
 <h4>appears on classes</h4>
 
 <p>A <code>MemberClasses</code> annotation is attached to each class
@@ -2977,7 +2977,7 @@
 </tbody>
 </table>
 
-<h3>dalvik.annotation.Signature</h3>
+<h3 id="dalvik-signature">dalvik.annotation.Signature</h3>
 <h4>appears on classes, fields, and methods</h4>
 
 <p>A <code>Signature</code> annotation is attached to each class,
@@ -3020,7 +3020,7 @@
 </tbody>
 </table>
 
-<h3>dalvik.annotation.Throws</h3>
+<h3 id="dalvik-throws">dalvik.annotation.Throws</h3>
 <h4>appears on methods</h4>
 
 <p>A <code>Throws</code> annotation is attached to each method which is
diff --git a/src/devices/tech/dalvik/index.jd b/src/devices/tech/dalvik/index.jd
index 71324d8..ed1bad1 100644
--- a/src/devices/tech/dalvik/index.jd
+++ b/src/devices/tech/dalvik/index.jd
@@ -1,8 +1,8 @@
-page.title=Dalvik Technical Information
+page.title=ART and Dalvik
 @jd:body
 
 <!--
-    Copyright 2013 The Android Open Source Project
+    Copyright 2014 The Android Open Source Project
 
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
@@ -16,14 +16,140 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 -->
-<p>Dalvik is the managed runtime used by applications and some system
-services on Android. Dalvik was originally created specifically for
-the Android project.</p>
-<p>Much of the documentation in this directory is intended to help
-with the ongoing development of Dalvik, as opposed to most of the
-other documentation on this site, which is geared more towards
-application development.</p>
 
-<p>Please note, in Android 4.4 a new virtual machine - ART - is being introduced
-experimentally that will eventually replace Dalvik. Please see <a
-href="{@docRoot}devices/tech/dalvik/art.html">Introducing ART</a> for details.
+
+<div id="qv-wrapper">
+<div id="qv">
+  <h2 id="Contents">In this document</h2>
+  <ol id="auto-toc">
+  </ol>
+</div>
+</div>
+
+<p>Android runtime (ART) is the managed runtime used by applications and some system
+services on Android. ART and its predecessor Dalvik were originally created
+specifically for the Android project. ART as the runtime executes the Dalvik
+Executable format and Dex bytecode specification.</p>
+
+<p>ART and Dalvik are compatible runtimes running Dex bytecode, so apps
+developed for Dalvik should work when running with ART. However, some
+techniques that work on Dalvik do not work on ART. For information about the
+most important issues, see <a
+href="http://developer.android.com/guide/practices/verifying-apps-art.html">Verifying
+App Behavior on the Android Runtime (ART)</a>.</p>
+
+<h2 id="features">ART Features</h2>
+
+<p>Here are some of the major features implemented by ART.</p>
+
+<h3 id="AOT_compilation">Ahead-of-time (AOT) compilation</h3>
+
+<p>ART introduces ahead-of-time (AOT) compilation, which can improve app
+performance. ART also has tighter install-time verification than Dalvik.</p>
+
+<p>At install time, ART compiles apps using the on-device
+<strong>dex2oat</strong> tool. This utility accepts <a
+href="http://source.android.com/devices/tech/dalvik/dex-format.html">DEX</a> files as input and
+generates a compiled app executable for the target device. The utility should be
+able to compile all valid DEX files without difficulty. However, some
+post-processing tools produce invalid files that may be tolerated by Dalvik but
+cannot be compiled by ART. For more information, see  <a
+href="http://developer.android.com/guide/practices/verifying-apps-art.html#GC_Migration">Addressing
+Garbage Collection Issues</a>.</p>
+
+<h3 id="Improved_GC">Improved garbage collection</h3>
+
+<p>Garbage collection (GC) can impair an app's performance, resulting in choppy
+display, poor UI responsiveness, and other problems. ART improves garbage
+collection in several ways:</p>
+
+<ul>
+  <li>One GC pause instead of two</li>
+  <li>Parallelized processing during the remaining GC pause</li>
+  <li>Collector with lower total GC time for the special case of cleaning up
+  recently-allocated, short-lived objects</li>
+  <li>Improved garbage collection ergonomics, making concurrent garbage
+  collections more timely, which makes <a
+  href="http://developer.android.com/tools/debugging/debugging-memory.html#LogMessages"><code>GC_FOR_ALLOC</code></a>
+  events extremely rare in typical use cases</li>
+  <li>Compacting GC to reduce background memory usage and fragmentation</li>
+</ul>
+
+<h3 id="Debugging_Imp">Development and debugging improvements</h3>
+
+<p>ART offers a number of features to improve app development and debugging.</p>
+
+<h4 id="Sampling_Profiler">Support for sampling profiler</h4>
+
+<p>Historically, developers have used the <a
+href=" http://developer.android.com/tools/help/traceview.html">Traceview</a>
+tool (designed for tracing
+application execution) as a profiler. While Traceview gives useful information,
+its results on Dalvik have been skewed by the per-method-call overhead, and use
+of the tool noticeably affects run time performance.</p>
+
+<p>ART adds support for a dedicated sampling profiler that does not have these
+limitations. This gives a more accurate view of app execution without
+significant slowdown. Sampling support was added to Traceview for
+Dalvik in the KitKat release.</p>
+
+<h4 id="Debugging_Features">Support for more debugging features</h4>
+
+<p>ART supports a number of new debugging options, particularly in monitor- and
+garbage collection-related functionality. For example, you can:</p>
+
+<ul>
+  <li>See what locks are held in stack traces, then jump to the thread that
+      holds a lock.</li>
+  <li>Ask how many live instances there are of a given class, ask to see the
+      instances, and see what references are keeping an object live.</li>
+  <li>Filter events (like breakpoint) for a specific instance.</li>
+  <li>See the value returned by a method when it exits (using “method-exit”
+      events).</li>
+  <li>Set field watchpoint to suspend the execution of a program when a specific
+      field is accessed and/or modified.</li>
+</ul>
+
+<h4 id="Crash_Reports">Improved diagnostic detail in exceptions and crash reports</h4>
+
+<p>ART gives you as much context and detail as possible when runtime exceptions
+occur.  ART provides expanded exception detail for <code><a
+href="http://developer.android.com/reference/java/lang/ClassCastException.html">java.lang.ClassCastException</a></code>,
+<code><a
+href="http://developer.android.com/reference/java/lang/ClassNotFoundException.html">java.lang.ClassNotFoundException</a></code>,
+and <code><a
+href="http://developer.android.com/reference/java/lang/NullPointerException.html">java.lang.NullPointerException</a></code>.
+(Later versions of Dalvik provided expanded exception detail for <code><a
+href="http://developer.android.com/reference/java/lang/ArrayIndexOutOfBoundsException.html">java.lang.ArrayIndexOutOfBoundsException</a></code>
+and <code><a
+href="http://developer.android.com/reference/java/lang/ArrayStoreException.html">java.lang.ArrayStoreException</a></code>,
+which now include the size of the array and the out-of-bounds offset, and ART
+does this as well.)</p>
+
+<p>For example, <code><a
+href="http://developer.android.com/reference/java/lang/NullPointerException.html">java.lang.NullPointerException</a></code>
+now shows information about what the app was trying to do with the null pointer,
+such as the field the app was trying to write to, or the method it was trying to
+call. Here are some typical examples:</p>
+
+<pre class="no-pretty-print">
+java.lang.NullPointerException: Attempt to write to field 'int
+android.accessibilityservice.AccessibilityServiceInfo.flags' on a null object
+reference</pre>
+
+<pre class="no-pretty-print">
+java.lang.NullPointerException: Attempt to invoke virtual method
+'java.lang.String java.lang.Object.toString()' on a null object reference</pre>
+
+<p>ART also provides improved context information in app native crash reports,
+by including both Java and native stack information. </p>
+
+<h2 id="Reporting_Problems">Reporting Problems</h2>
+
+<p>If you run into any issues that aren’t due to app JNI issues, please report
+them via the Android Open Source Project Issue Tracker at <a
+href="http://b.android.com">http://b.android.com</a>.
+Please include an <code>"adb bugreport"</code> and link to the app in Google
+Play store if available. Otherwise, if possible, attach an APK that reproduces
+the issue. Please note that issues (including attachments) are publicly
+visible.</p>
diff --git a/src/devices/tech/dalvik/instruction-formats.jd b/src/devices/tech/dalvik/instruction-formats.jd
index 37640da..91d876a 100644
--- a/src/devices/tech/dalvik/instruction-formats.jd
+++ b/src/devices/tech/dalvik/instruction-formats.jd
@@ -1,8 +1,8 @@
-page.title=Dalvik VM Instruction Formats
+page.title=Dalvik Executable instruction formats
 @jd:body
 
 <!--
-    Copyright 2013 The Android Open Source Project
+    Copyright 2014 The Android Open Source Project
 
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
@@ -16,13 +16,21 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 -->
-<h2>Introduction and Overview</h2>
+<div id="qv-wrapper">
+  <div id="qv">
+    <h2>In this document</h2>
+    <ol id="auto-toc">
+    </ol>
+  </div>
+</div>
 
-<p>This document lists the instruction formats used by Dalvik bytecode
-and is meant to be used in conjunction with the
+<h2 id="intro">Introduction</h2>
+
+<p>This document lists the instruction formats used by the Dalvik Executable
+format and Dalvik bytecode. It is meant to be used in conjunction with the
 <a href="dalvik-bytecode.html">bytecode reference document</a>.</p>
 
-<h3>Bitwise descriptions</h3>
+<h2 id="bitwise">Bitwise descriptions</h2>
 
 <p>The first column in the format table lists the bitwise layout of
 the format. It consists of one or more space-separated "words" each of
@@ -49,7 +57,7 @@
 values in the high eight bits; and the second word consists of a single
 16-bit value.</p>
 
-<h3>Format IDs</h3>
+<h2 id="format-ids">Format IDs</h2>
 
 <p>The second column in the format table indicates the short identifier
 for the format, which is used in other documents and in code to identify
@@ -69,7 +77,7 @@
 "<code>s</code>" suffix, making them four characters total. Similarly,
 suggested "inline" linking formats have an additional "<code>i</code>"
 suffix. (In this context, inline linking is like static linking,
-except with more direct ties into a virtual machine's implementation.)
+except with more direct ties into a machine's implementation.)
 Finally, a couple oddball suggested formats (e.g.,
 "<code>20bc</code>") include two pieces of data which are both
 represented in its format ID.</p>
@@ -147,7 +155,7 @@
 </tbody>
 </table>
 
-<h3>Syntax</h3>
+<h2 id="syntax">Syntax</h2>
 
 <p>The third column of the format table indicates the human-oriented
 syntax for instructions which use the indicated format. Each instruction
@@ -163,7 +171,7 @@
 <p>Arguments which name a register have the form "<code>v<i>X</i></code>".
 The prefix "<code>v</code>" was chosen instead of the more common
 "<code>r</code>" exactly to avoid conflicting with (non-virtual) architectures
-on which a Dalvik virtual machine might be implemented which themselves
+on which the Dalvik Executable format might be implemented which themselves
 use the prefix "<code>r</code>" for their registers. (That is, this
 decision makes it possible to talk about both virtual and real registers
 together without the need for circumlocution.)</p>
@@ -197,7 +205,7 @@
 "<code>[<i>X</i>=<i>N</i>]</code>" (e.g., "<code>[A=2]</code>") to indicate
 the correspondence.</p>
 
-<h2>The Formats</h2>
+<h2 id="formats">The formats</h2>
 
 <table class="format">
 <thead>
diff --git a/src/devices/tech/encryption/index.jd b/src/devices/tech/encryption/index.jd
index e3038d4..d7e9328 100644
--- a/src/devices/tech/encryption/index.jd
+++ b/src/devices/tech/encryption/index.jd
@@ -32,7 +32,7 @@
 automatically encrypted before committing it to disk and all reads
 automatically decrypt data before returning it to the calling process.</p>
 
-<h2 id=what_we’ve_added_for_android_l>What we’ve added for Android L</h2>
+<h2 id=what_we’ve_added_for_android_l>What we’ve added for Android 5.0</h2>
 
 <ul>
   <li>Created fast encryption, which only encrypts used blocks on the data partition
@@ -44,6 +44,10 @@
        href="#storing_the_encrypted_key">Storing the encrypted key</a> for more details.
 </ul>
 
+<p class="caution"><strong>Caution:</strong> Devices upgraded to Android 5.0 and then
+encrypted may be returned to an unencrypted state by factory data reset. New Android 5.0
+devices encrypted at first boot cannot be returned to an unencrypted state.</p>
+
 <h2 id=how_android_encryption_works>How Android encryption works</h2>
 
 <p>Android disk encryption is based on <code>dm-crypt</code>, which is a kernel feature that works at the block device layer. Because of
@@ -58,7 +62,7 @@
 
 <p class="note"><strong>Note:</strong> OEMs can use 128-bit or higher to encrypt the master key.</p>
 
-<p>In the L release, there are four kinds of encryption states: </p>
+<p>In the Android 5.0 release, there are four kinds of encryption states: </p>
 
 <ul>
   <li>default
@@ -115,7 +119,7 @@
   <li>Boot an encrypted device:
   <ul>
     <li>Starting an encrypted device with no password: Booting an encrypted device that
-has no set password (relevant for devices running Android L and later).
+has no set password (relevant for devices running Android 5.0 and later).
     <li> Starting an encrypted device with a password: Booting an encrypted device that
 has a set password.
   </ul>
@@ -123,9 +127,9 @@
 
 <p>In addition to these flows, the device can also fail to encrypt <code>/data</code>. Each of the flows are explained in detail below.</p>
 
-<h3 id=encrypt_a_new_device_with_forceencrypt>Encrypt a new device with <code>/forceencrypt</code></h3>
+<h3 id=encrypt_a_new_device_with_forceencrypt>Encrypt a new device with /forceencrypt</h3>
 
-<p>This is the normal first boot for an Android L device. </p>
+<p>This is the normal first boot for an Android 5.0 device. </p>
 
 <ol>
   <li><strong>Detect unencrypted filesystem with <code>/forceencrypt</code> flag</strong>
@@ -156,10 +160,13 @@
 
   <li><strong>When <code>/data</code> is encrypted, take down the framework</strong>
 
-<p><code>vold</code>  sets <code>vold.decrypt</code> to <code>trigger_default_encryption</code> which starts the <code>defaultcrypto</code> service. (This starts the flow below for mounting a default encrypted
-userdata.) <code>trigger_default_encryption</code> checks the encryption type to see if <code>/data</code> is  encrypted with or without a  password. Because Android L devices are
-encrypted on first boot, there should be no password set; therefore we decrypt
-and mount <code>/data</code>.</p>
+<p><code>vold</code>  sets <code>vold.decrypt</code> to
+<code>trigger_default_encryption</code> which starts the
+<code>defaultcrypto</code> service. (This starts the flow below for mounting a
+default encrypted userdata.) <code>trigger_default_encryption</code> checks the
+encryption type to see if <code>/data</code> is  encrypted with or without a
+password. Because Android 5.0 devices are encrypted on first boot, there should
+be no password set; therefore we decrypt and mount <code>/data</code>.</p>
 
   <li><strong>Mount <code>/data</code></strong>
 
@@ -231,7 +238,7 @@
 <h3 id=starting_an_encrypted_device_with_default_encryption>Starting an encrypted device with default encryption</h3>
 
 <p>This is what happens when you boot up an encrypted device with no password.
-Because Android L devices are encrypted on first boot, there should be no set
+Because Android 5.0 devices are encrypted on first boot, there should be no set
 password and therefore this is the <em>default encryption</em> state.</p>
 
 <ol>
@@ -351,13 +358,13 @@
 
 <ol>
   <li>Generate random 16-byte disk encryption key (DEK) and 16-byte salt.
-  <li>Apply scrypt to the user password and the salt to produce 16-byte intermediate
+  <li>Apply scrypt to the user password and the salt to produce 32-byte intermediate
 key 1 (IK1).
   <li>Pad IK1 with zero bytes to the size of the hardware-bound private key (HBK).
 Specifically, we pad as: 00 || IK1 || 00..00; one zero byte, 32 IK1 bytes, 223
 zero bytes.
   <li>Sign padded IK1 with HBK to produce 256-byte IK2.
-  <li>Apply scrypt to IK2 and salt (same salt as step 2) to produce 16-byte IK3.
+  <li>Apply scrypt to IK2 and salt (same salt as step 2) to produce 32-byte IK3.
   <li>Use the first 16 bytes of IK3 as KEK and the last 16 bytes as IV.
   <li>Encrypt DEK with AES_CBC, with key KEK, and initialization vector IV.
 </ol>
diff --git a/src/devices/tech/index.jd b/src/devices/tech/index.jd
index fbb4cc7..3eed8e9 100644
--- a/src/devices/tech/index.jd
+++ b/src/devices/tech/index.jd
@@ -1,4 +1,4 @@
-page.title=Android Technical Information
+page.title=Android Core Technologies
 @jd:body
 
 <!--
@@ -25,26 +25,26 @@
 </div>
 
 
-<p>Welcome to the Android technical documentation section of the site. Here you
-can find technical information useful to people and organizations who are
+<p>Welcome to the Android core technologies section of the site. Here you
+can find information on common features useful to people and organizations who are
 looking to modify, contribute to, or port the Android software. This is "under
 the hood" information intended for engineers.</p>
 
-<h2 id="accessory-protocol-information">Accessory Protocol Information</h2>
+<h2 id="accessory-protocol-information">Accessories</h2>
 <p>Android devices can connect to hardware accessories, such as audio docks,
 keyboards and custom hardware, through USB or Bluetooth. This document
 describes the Android Open Accessory protocol for accessory hardware builders.</p>
 <p><a href="{@docRoot}accessories/index.html">&raquo; Accessory Protocol Information</a></p>
 
-<h2 id="dalvik-technical-information">Dalvik Technical Information</h2>
-<p>The Dalvik Virtual Machine is the heart of Android. It's a fast, just-in-time
-compiled, optimized bytecode virtual machine. Android applications are
-compiled to Dalvik bytecode and run on the Dalvik VM. This section includes
-detailed information such as the Dalvik bytecode format specification,
-design information on the VM itself, and so on.</p>
-<p><a href="{@docRoot}devices/tech/dalvik/index.html">&raquo; Dalvik Information</a></p>
+<h2 id="art-technical-information">ART</h2>
+<p>The Android runtime (ART) is the heart of Android. It's a fast, ahead-of-time
+compiled runtime with modern garbage collection designed to scale. Android applications are
+compiled to Dalvik bytecode and run with ART. This section includes
+detailed information such as the Dalvik Executable format specification,
+and design information on the runtime itself.</p>
+<p><a href="{@docRoot}devices/tech/dalvik/index.html">&raquo; ART and Dalvik Information</a></p>
 
-<h2 id="data-usage-technical-information">Data Usage Technical Information</h2>
+<h2 id="data-usage-technical-information">Data Usage</h2>
 <p>Android's data usage features allow users to understand and control how their
 device uses network data. This document is designed for systems integrators
 and mobile operators, to help explain technical details they should be aware
@@ -60,19 +60,30 @@
 <p>Android's Hardware Abstraction Layer (HAL) provides the interface between
 software APIs and hardware drivers. This section contains the commented code
 files of the HAL.</p>
-<p><a href="{@docRoot}devices/reference/files.html">&raquo; HAL Reference</a></p>
+<p><a href="{@docRoot}devices/halref/files.html">&raquo; HAL Reference</a></p>
 
-<h2 id="kernel-technical-information">Kernel Technical Information</h2>
+<h2 id="kernel-technical-information">Kernel</h2>
 <p>The kernel configuration settings in this document are meant to be used as a base
 for an Android kernel configuration. All devices should have the options in android-base
 configuration enabled.</p>
 <p><a href="{@docRoot}devices/tech/kernel.html">&raquo; Kernel Information</a></p>
 
-<h2 id="power-technical-information">Power Technical Information</h2>
+<h2 id="lowram-technical-information">Low RAM</h2>
+<p>Android supports devices with limited memory through various optimizations,
+such as improved memory management, reduced system memory, and several
+build-time and kernel configuration settings.</p>
+<p><a href="{@docRoot}devices/low-ram.html">&raquo; Low RAM Information</a></p>
+
+<h2 id="power-technical-information">Power</h2>
 <p>Battery usage statistics are tracked by the framework. This involves keeping track of 
 time spent by different device components in different states.</p>
 <p><a href="{@docRoot}devices/tech/power.html">&raquo; Power Information</a></p>
 
+<h2 id="security-technical-information">Security</h2>
+<p>Android security comes with a variety of measures, including an application
+sandbox, SELinux, dm-verity, encryption, and more.</p>
+<p><a href="{@docRoot}devices/tech/security/index.html">&raquo; Security Information</a></p>
+
 <h2 id="tradefed-test-infrastructure">Trade Federation Testing Infrastructure</h2>
 <p>Trade Federation is a continuous test framework for running tests on Android devices. 
 Trade Federation's modularity makes it straightforward to slot into environments with existing build, 
diff --git a/src/devices/tech/input/keyboard-devices.jd b/src/devices/tech/input/keyboard-devices.jd
index 0c6ba08..74bafaa 100644
--- a/src/devices/tech/input/keyboard-devices.jd
+++ b/src/devices/tech/input/keyboard-devices.jd
@@ -6430,7 +6430,7 @@
 </ol>
 <h3 id="sources">Sources</h3>
 <ol>
-<li><a href="http://www.usb.org/developers/devclass_docs/Hut1_12v2.pdf">USB HID Usage Tables v1.12</a></li>
+<li><a href="http://www.usb.org/developers/hidpage/Hut1_12v2.pdf">USB HID Usage Tables v1.12</a></li>
 <li>Linux 2.6.39 kernel: include/linux/input.h, drivers/hid/hid-input.c</li>
 <li>Android ICS: qwerty.kl, Generic.kl, KeyEvent.java</li>
 </ol>
diff --git a/src/devices/tech/power.jd b/src/devices/tech/power.jd
index 0367f26..38e1e6a 100644
--- a/src/devices/tech/power.jd
+++ b/src/devices/tech/power.jd
@@ -2,7 +2,7 @@
 @jd:body
 
 <!--
-    Copyright 2013 The Android Open Source Project
+    Copyright 2014 The Android Open Source Project
 
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
@@ -23,268 +23,203 @@
   </div>
 </div>
 
-<p>
-Battery usage information is derived from battery usage statistics and power profile
-values.
-</p>
+<p>Battery usage information is derived from battery usage statistics and power profile values.</p>
 
-<h2>
-Battery Usage Statistics
-</h2>
+<h2 id="usage-statistics">Battery Usage Statistics</h2>
 
-<p>
-Battery usage statistics are tracked by the framework. This involves keeping track of time
-spent by different device components in different states. This includes components such as
-WiFi chipset, Cellular Radio, Bluetooth, GPS, Display and CPU. When these components change
-state from off to on, or from idle to full power, low brightness to high brightness, etc.,
-their controlling service reports to the framework’s BatteryStats service, which collects
-such information over time and persists to storage so that it is available across reboots.
-</p>
+<p>The framework automatically determines battery usage statistics by tracking how long device
+components spend in different states. As components (WiFi chipset, Cellular Radio, Bluetooth, GPS,
+Display, CPU) change states (OFF/ON, idle/full power, low/high brightness, etc.), the controlling
+service reports to the framework BatteryStats service, which collects information over time and
+stores it for use across reboots. The service doesn’t track battery current draw directly,
+but instead collects timing information that can be used to approximate battery
+consumption by different components.</p>
 
-<p>
-The service isn’t keeping track of battery current draw information directly. It’s collecting
-mostly timing information so that it can be used to approximate battery consumption by
-different components.
-</p>
+<p>The framework gathers statistics using the following methods:</p>
 
-<p>
-Consumption of these resources is also (where possible) attributed to the applications using
-the resources, sometimes sharing the blame across multiple applications using a resource
-simultaneously. For instance, multiple apps could be holding wakelocks, keeping the system
-from going into suspend state. Blame is shared across these apps (not necessarily equally).
-</p>
+<ul>
+<li><strong>Push</strong>. Services aware of component changes push state changes to the
+BatteryStats service.</li>
+<li><strong>Pull</strong>. For components such as the CPU usage by apps, the framework automatically
+pulls the data at transition points (such as starting or stopping an activity) to take a
+snapshot.</li>
+</ul>
 
-<p>
-Statistics are persisted to flash periodically (approximately every half hour or so) to avoid
-losing the information on a shutdown (such as due to battery reaching zero remaining
-capacity, which may indicate a battery power consumption problem).
-</p>
+<p>Resource consumption is associated with the application using the resource. When multiple
+applications simultaneously use a resource (such as wakelocks that prevent the system from
+suspending), the framework spreads consumption across those applications, although not necessarily
+equally.</p>
 
-<p>
-Statistics gathering can happen in two ways - push or pull. When services are aware of
-changes happening to a component, they will push state changes to the BatteryStats service.
-With components such as the CPU usage by apps, we pull the data periodically (at certain
-transition points such as starting or stopping an activity, etc) to take a snapshot.
-</p>
+<p>To avoid losing usage statistics for a shutdown event, which may indicate battery power
+consumption problems (i.e. shutdown occurs because the battery reached zero remaining capacity), the
+framework flashes statistics approximately every 30 minutes.</p>
 
-<p>
-All of the above is automatically done by the framework, and OEMs don’t need to do anything
-in addition to that.
-</p>
+<p>Battery usage statistics are handled entirely by the framework and do not require OEM
+modifications.</p>
 
-<h2>
-Power Profile Values
-</h2>
+<h2 id="profile-values">Power Profile Values</h2>
 
-<p>
-The power profile is where the device manufacturer needs to provide current consumption
-values for various components and their states in order to approximate the actual battery
-drain caused by these components over time. The power consumption of a component is specified
-in units of milliamps (mA) of current draw (at a nominal voltage) in the power profile, and
-can be a fractional value specifying microamps. The value specified should be the mA consumed
-at the battery (and not a value applicable to a power rail that does not correspond to
-current consumed from the battery).
-</p>
+<p>Device manufacturers must provide a component power profile that defines the current
+consumption value for the component and the approximate the actual battery drain caused by the
+component over time. Within a power profile, power consumption is specified in milliamps (mA) of
+current draw at a nominal voltage and can be a fractional value specified in microamps (uA). The
+value should be the mA consumed at the battery and not a value applicable to a power rail that does
+not correspond to current consumed from the battery.</p>
 
-<p>
-For instance, to attribute the cost of keeping the display on for a duration of time, the
-framework gathers brightness levels and times spent at each level (quantized to some number
-of bins). The power profile values specify how many milliamps of current are required to keep
-the display on at minimum brightness and at maximum brightness. The time spent at each
-brightness level can then be multiplied by an interpolated display brightness cost to compute
-an approximation of how much battery was drained by the display component.
-</p>
+<p>For example, a display power profile specifies the mA of current required to keep the display on
+at minimum brightness and at maximum brightness. To determine the power cost (i.e the battery
+drained by the display component) of keeping the display on, the framework tracks the time spent at
+each brightness level, then multiplies those time intervals by an interpolated display brightness
+cost.</p>
 
-<p>
-Similarly, CPU time for each application is multiplied by the mA to keep the CPU running at a
-certain speed to get a comparative ranking of how much battery each app is consuming due to
-executing CPU code (time as the foreground app and total time including background activity
-are reported separately).
-</p>
+<p>The framework also multiplies the CPU time for each application by the mA required to run the CPU
+at a specific speed. This calculation establishes a comparative ranking of how much battery an
+application consumes by executing CPU code (time as the foreground app and total time including
+background activity are reported separately).</p>
 
-<h2>
-Computing power consumption for individual components
-</h2>
+<h2 id="component-power">Measuring Component Power</h2>
+
+<p>You can determine individual component power consumption by comparing the current drawn by the
+device when the component is in the desired state (on, active, scanning, etc.) and when the
+component is off. Measure the average instantaneous current drawn on the device at a
+nominal voltage using an external power monitor, such as a bench power supply or specialized
+battery-monitoring tools (such as Monsoon Solution Inc. Power Monitor and Power Tool software).</p>
 
 <p class="note">
-<strong>Note:</strong> manufacturers usually supply information about how much current an
-individual component consumes. It may be possible to use these values if they are an accurate
-representation of the the current drawn from the device’s battery in practice. However, we
-encourage you validate manufacturer-provided values before entering them in your device’s
-power profile.
-</p>
+<strong>Note:</strong> Manufacturers often supply information about the current consumed by an
+individual component. Use this information if it accurately represents the current drawn from the
+device battery in practice. However, validate manufacturer-provided values before
+using those values in your device power profile.</p>
 
-<p>
-Current consumption for an individual component is calculated by:
-</p>
+<p>When measuring, ensure the device does not have a connection to an external charge source, such
+as a USB connection to a development host used when running Android Debug Bridge (adb). The device
+under test might draw current from the host, thus lowering measurements at the battery. Avoid USB
+On-The-Go (OTG) connections, as the OTG device might draw current from the device under test.</p>
+
+<p>Excluding the component being measured, the system should run at a constant level of power
+consumption to avoid inaccurate measurements caused by changes in other components. System
+activities that can introduce unwanted changes to power measurements include:</p>
 
 <ul>
-<li>
-Measuring the current drawn by the device when the component is in the desired state (e.g.,
-on, active, or scanning)
-</li>
-<li>
-Measuring the current drawn by the device when the component is
-off
-</li>
-<li>subtracting (2) from (1).</li>
+<li><strong>Cellular, Wi-Fi, and Bluetooth receive, transmit, or scanning activity</strong>. When
+not measuring cell radio power, set the device to airplane mode and enable Wi-Fi or Bluetooth as
+appropriate.</li>
+<li><strong>Screen on/off</strong>. Colors displayed while the screen is on can affect power draw on
+some screen technologies. Turn the screen off when measuring values for non-screen components.</li>
+<li><strong>System suspend/resume</strong>. A screen off state can trigger a system suspension,
+placing parts of the device in a low-power or off state. This can affect power consumption of the
+component being measured and introduce large variances in power readings as the system periodically
+resumes to send alarms, etc. For details, see <a href="#control-suspend">Controlling System
+Suspend</a>.</li>
+<li><strong>CPUs changing speed and entering/exiting low-power scheduler idle state</strong>. During
+normal operation, the system makes frequent adjustments to CPU speeds, the number of online CPU
+cores, and other system core states such as memory bus speed and voltages of power rails associated
+with CPUs and memory. During testing, these adjustments affect power measurements:
 
-
-<img></img><p class="img-caption"></p>
-<p>
-We recommend that you measure the current (usually the average instantaneous current) drawn
-on the device at a nominal voltage. This can be accomplished using a bench power supply or
-using specialized battery-monitoring tools (such as Monsoon Solution Inc.’s Power Monitor and
-Power Tool software).
-</p>
-<p>
-Take the measurements with no external charger connected to the device, including no USB
-connection to a host (as used for connecting to development hosts via the adb Android Debug
-Bridge), which may draw current from the host and lower the measurements at the battery. If
-the device supports USB On The Go (OTG) then having an OTG device connected may draw
-additional power from the device being measured, so disconnect any such OTG device.
-</p>
-<p>
-While taking measurements, you’ll want to try to keep the rest of the system other than the
-component being measured running at a constant level of power consumption, to avoid
-introducing inaccuracies in the measurements due to changes in other components. System
-activities that may introduce unwanted changes to power measurements include:
-</p>
 <ul>
-<li>
-Cellular, Wi-Fi, and Bluetooth receive, transmit, or scanning activity. You may want to put
-the device into airplane mode when not measuring cell radio power, and individually enable
-Wi-Fi or Bluetooth when appropriate.
-</li>
-<li>
-Screen/backlight on or off. The colors displayed while screen is on can also affect power
-draw on certain screen technologies. Measurements for components other than the screen on
-values should be made with screen turned off. But see the next item for an important
-consideration when the screen is off.
-</p>
-<li>
-System suspended/resumed state. When the screen is off the system may enter a suspend state
-where much of the device may be powered off or placed in a low-power state, probably
-affecting power consumption of the component being measured and possibly introducing large
-variances in power readings as the system periodically resumes to service alarms and such.
-See Controlling and Measuring System Suspend State for more instructions.
-</li>
-<li>
-CPUs changing speed and entering/exiting low-power scheduler idle state. The system may make
-frequent adjustments to the speeds of CPUs, how many CPU cores are online, and other system
-core state such as memory bus speed and voltages of power rails associated with CPUs and
-memory. If these are changing during your measurements then you may want to prevent CPU speed
-scaling operations, which may also reduce the amount of clock and voltage scaling of memory
-busses and other system core components. Scheduling activity may also affect what percentage
-of the time the CPUs spend in low-power idle states. See Controlling and Measuring CPU Speeds
-for more instructions.
+<li>CPU speed scaling operations can reduce the amount of clock and voltage scaling of memory buses
+and other system core components.</li>
+<li>Scheduling activity can affect the percentage of the time CPUs spend in low-power idle states.
+For details on preventing these adjustments from occurring during testing, see
+<a href="#control-cpu">Controlling CPU Speeds</a>.</li>
+</ul>
+
 </li>
 </ul>
-<p>
-For instance, to compute the value for <code>screen.on</code>, you would run the device in a stable state,
-with CPU speed held constant, device in airplane mode, with a partial wakelock held to
-prevent system suspend. The current readings in this state should be stable. Take the reading
-- say 200mA. Now turn on the screen at minimum brightness. If the power monitor shows 300mA,
-then <code>screen.on</code> = (300 - 200) = 100mA.
-</p>
-<p>
-For components that don’t have a flat waveform of current consumption when active (such as
-the cellular radio or wifi), you may need to measure an average current over a period of
-time. Your power monitoring tool may be able to compute this average for you.
-</p>
-<p>
-Replacing the battery with an external power source may require working around problems that
-can occur due to not connecting battery thermistor or integrated fuel gauge pins. For
-example, the system might take an invalid battery temperature reading or remaining battery
-capacity reading that could cause the kernel or Android system to shut down. Sometimes these
-problems are avoided through the use of “fake batteries” that replace normal batteries for
-power measurement purposes, constructed to match the dimensions and electrical properties of
-the batteries for the product being measured. Fake batteries can provide signals on
-thermistor or fuel gauge pins that mimic temperature and state of charge readings for a
-normally running system, and may also provide convenient leads for connecting to external
-power supplies. In other cases it may be easier to modify the system to ignore the invalid
-data from the missing battery.
-</p>
-<h3>
-Controlling and Measuring System Suspend State
-</h3>
-<p>
-As mentioned above, system suspend can introduce unwanted variance in power measurements and
-place system components in low power states not appropriate for measuring active power use.
-But at some point you’ll also need to measure the power draw of system suspend state. This
-section describes how to avoid system suspend state when you don’t want it to interfere with
-other measurements, and how to measure the power draw of system suspend state when you do
-want to measure it.
-</p>
-<p>
-To avoid system suspend you can temporarily connect the device to a development host and
-issue the following command to hold a “partial wakelock”:
-</p>
+
+<p>For example, Joe Droid wants to compute the <code>screen.on</code> value for a device. He enables
+airplane mode on the device, runs the device at a stable current state, holds the CPU speed constant
+, and uses a partial wakelock to prevent system suspend. Joe then turns the device screen off and
+takes a measurement (200mA). Next, Joe turns the device screen on at minimum brightness and takes
+another measurement (300mA). The <code>screen.on</code> value is 100mA (300 - 200).</p>
+
+<p>For components that don’t have a flat waveform of current consumption when active (such as
+cellular radio or Wi-Fi), measure the average current over time using a power monitoring tool.</p>
+
+<p>When using an external power source in place of the device battery, the system might experience
+problems due to an unconnected battery thermistor or integrated fuel gauge pins (i.e. an invalid
+reading for battery temperature or remaining battery capacity could shut down the kernel or Android
+system). Fake batteries can provide signals on thermistor or fuel gauge pins that mimic temperature
+and state of charge readings for a normal system, and may also provide convenient leads for
+connecting to external power supplies. Alternatively, you can modify the system to ignore the
+invalid data from the missing battery.</p>
+
+<a name="control-suspend"><h3 id="control-suspend">Controlling System Suspend</h3></a>
+
+<p>This section describes how to avoid system suspend state when you don’t want it to interfere with
+other measurements, and how to measure the power draw of system suspend state when you do want to
+measure it.</p>
+
+<h4>Preventing System Suspend</h4>
+
+<p>System suspend can introduce unwanted variance in power measurements and place system components
+in low-power states inappropriate for measuring active power use. To prevent the system from
+suspending while the screen is off, use a temporary partial wakelock. Using a USB cable, connect the
+device to a development host, then issue the following command:</p>
+
 <pre>
 $ adb shell "echo temporary &gt; /sys/power/wake_lock"
 </pre>
-<p>
-which will prevent the system from suspending while the screen is off. Disconnect the USB
-cable before making measurements.
-</p>
-<p>
-You can undo the effect of this later with:
-</p>
+
+<p>While in wake_lock, the screen off state does not trigger a system suspend. (Remember to
+disconnect the USB cable from the device before measuring power consumption.)</p>
+
+<p>To remove the wakelock:</p>
+
 <pre>
 $ adb shell "echo temporary &gt; /sys/power/wake_unlock"
 </pre>
-<p>
-The power consumption of the system suspend state is measured for the value of cpu.idle in
-the power profile. For this measurement it may be best to place the device in airplane mode
-to avoid any concurrent activity by the cellular radio, which may run on a processor separate
-from the portions of the SoC controlled by the system suspend. To ensure the measurement is
-made while the system is in the correct state, it may be necessary to first confirm the
-current readings settle to a steady value within the expected range for the consumption of
-the suspend state of the SoC entered plus the consumption of additional system components
-that remain powered (such as the USB PHY). A system console or other external indication of
-system status (such as turning off an LED when not in suspend) may also be observed during
-the measurement.
-</p>
-<h3>
-Controlling and Measuring CPU Speeds
-</h3>
-<p>
-While active, CPUs can be brought online or put offline, change clock speeds and associated
-voltages (possibly also affecting memory bus speeds and other system core power state), and
-can enter lower power idle states while in the kernel idle loop. Not only are these different
-CPU power states measured for the power profile, it may be necessary to avoid the power draw
-variance when measuring other parameters.
-</p>
-<p>
-The power profile currently assumes all CPUs have the same available speeds and power
-characteristics.
-</p>
-<p>
-While measuring CPU power, or holding CPU power constant in order to make other measurements,
-it may be best to hold the number of CPUs brought online constant, such as to have one CPU
-online and the rest offline / hotplugged out. Keeping all CPUs but one in scheduling idle may
-deliver acceptable results. Stopping the Android framework with adb shell stop can help
-reduce system scheduling activity.
-</p>
-<p>
-You’ll specify the available CPU speeds for your device in the power profile cpu.speeds
-entry. You can get a list of these using
-</p>
+
+<h4>Measuring System Suspend</h4>
+
+<p>To measure the power draw during the system suspend state, measure the value of cpu.idle in the
+power profile. Before measuring:
+
+<ul>
+<li>Remove existing wakelocks (as described above).</li>
+<li>Place the device in airplane mode to avoid concurrent activity by the cellular radio, which
+might run on a processor separate from the SoC portions controlled by the system suspend.</li>
+<li>Ensure the system is in suspend state by:
+<ul>
+<li>Confirming current readings settle to a steady value. Readings should be within the expected
+range for the power consumption of the SoC suspend state plus the power consumption of system
+components that remain powered (such as the USB PHY).</li>
+<li>Checking the system console output.</li>
+<li>Watching for external indications of system status (such as an LED turning off when not in
+suspend).</li>
+</ul>
+</li>
+</ul>
+
+<a name="control-cpu"><h3 id="control-cpu">Controlling CPU Speeds</h3></a>
+
+<p>Active CPUs can be brought online or put offline, have their clock speeds and associated voltages
+changed (possibly also affecting memory bus speeds and other system core power states), and
+can enter lower power idle states while in the kernel idle loop. When measuring different CPU power
+states for the power profile, avoid the power draw variance when measuring other parameters. The
+power profile assumes all CPUs have the same available speeds and power characteristics.</p>
+
+<p>While measuring CPU power, or while holding CPU power constant to make other measurements, keep
+the number of CPUs brought online constant (such as having one CPU online and the rest
+offline/hotplugged out). Keeping all CPUs except one in scheduling idle may product acceptable
+results. Stopping the Android framework with <code>adb shell stop</code> can reduce system
+scheduling activity.</p>
+
+<p>You must specify the available CPU speeds for your device in the power profile cpu.speeds
+entry. To get a list of available CPU speeds, run:</p>
+
 <pre>
 adb shell cat /sys/devices/system/cpu/cpu0/cpufreq/stats/time_in_state
 </pre>
-<p>
-These speeds are matched with their corresponding power measurements in value <code>cpu.active</code>.
-</p>
-<p>
-If your platform’s power consumption is significantly affected by how many cores are brought
-online then you may need to modify the cpufreq driver or governor for your platform to
-control this. For many platforms, the easiest way to control CPU speed is to use the
-“userspace” cpufreq governor and use sysfs interfaces to set the speed. The exact commands
-differ depending on your platform’s cpufreq implementation. The following commands from the
-system console or adb shell could be used to set a speed for 200MHz on a system with only 1
-CPU, or all CPUs sharing a common cpufreq policy:
-</p>
+
+<p>These speeds match the corresponding power measurements in value <code>cpu.active</code>.</p>
+
+<p>For platforms where number of cores brought online significantly affects power consumption, you
+might need to modify the cpufreq driver or governor for the platform. Most platforms support
+controlling CPU speed using the “userspace” cpufreq governor and using sysfs interfaces to
+set the speed. For example, to set speed for 200MHz on a system with only 1 CPU or all CPUs sharing
+a common cpufreq policy, use the system console or adb shell to run the following commands:</p>
+
 <pre>
 echo userspace &gt; /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
 echo 200000 &gt; /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
@@ -292,105 +227,347 @@
 echo 200000 &gt; /sys/devices/system/cpu/cpu0/cpufreq/scaling_setspeed
 cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq
 </pre>
-<p>
-which makes sure the new speed is not outside the allowed bounds, sets the new speed, and
-then prints which speed the CPU is actually running at for verification. (If the current
-minimum speed prior to executing the above is above 200000, you may have to reverse the order
-of the first two lines, or execute the first line again, to drop the minimum speed prior to
-setting the maximum speed.)
+
+<p class="note">
+<strong>Note</strong>: The exact commands differ depending on the platform cpufreq implementation.
 </p>
-<p>
-To measure current consumed by a CPU while running at various speeds, you may need to place a
-CPU in a CPU-bound loop such as:
-</p>
+
+<p>These commands ensure the new speed is not outside the allowed bounds, set the new speed, then
+print the speed at which the CPU is actually running (for verification). If the current
+minimum speed prior to execution is higher than 200000, you might need to reverse the order
+of the first two lines, or execute the first line again to drop the minimum speed prior to
+setting the maximum speed.</p>
+
+<p>To measure current consumed by a CPU running at various speeds, use the system console place the
+CPU in a CPU-bound loop using the command:</p>
 <pre>
 # while true; do true; done
 </pre>
-<p>
-on the system console and take the measurement while the above runs.
-</p>
-<p>
-If your device may limit maximum CPU speed while thermal throttling due to a high temperature
-measurement, possibly as a result of running CPUs at high speeds for sustained periods, then
-watch out for this while taking measurements. You may need to watch system console output, or
-check the kernel log after measuring.
-</p>
-<p>
-For the <code>cpu.active</code> value you can measure the power consumed when the system is not in suspend
-but not executing tasks. The CPU should be in a low-power scheduler “idle loop”, possibly
-executing an ARM Wait For Event instruction or in an SoC-specific low power state with a fast
-exit latency suitable for idle use. There may be more than one idle state in use on your
-platform with differing levels of power consumption; choose a representative idle state for
-longer periods of scheduler idle (several milliseconds). You may need to examine the power
-graph on your measurement equipment and choose samples where the CPU is at its lowest
-consumption, discarding higher samples where the CPU exited idle.
-</p>
-<h3>
-Measuring Screen Power
-</h3>
-<p>
-Screen on power is typically measured with all other devices that are turned on with the
-screen also enabled. For example, the touchscreen and any display backlight would normally
-also be turned on during the measurement, to get a more realistic example of screen on power
-usage.
-</p>
-<p>
-Some display technologies vary in power consumption according to the colors displayed, and so
-power measurements may vary considerably depending on what is on the screen at the time. It’s
-best to choose to display something that has power characteristics of a realistic screen,
-somewhere between the extremes of an all-black screen (which consumes the lowest power for
-some technologies) and an all-white screen. A common choice is a view of a schedule in the
-calendar app, which has a mix of white background and non-white elements.
-</p>
-<p>
-The cost of having the screen on is measured at two points: at minimum display/backlight
-brightness, and at maximum brightness. Setting the display brightness to minimum using the
-Settings app Display Brightness slider might not produce accurate results. The Android UI
-will typically only allow you to set the brightness to a minimum of about 10-20% of the
-possible panel/backlight brightness -- it doesn't allow the user to set brightness so low
-that the screen might not be visible without great effort. If you have a sysfs file that
-controls panel brightness all the way down to the minimum brightness supported by the
-hardware then that's even better.
-</p>
-<p>
-If your platform provides sysfs files that turns the LCD panel, backlight, and touchscreen on
-and off then that’s a good way to take measurements with the screen on and off. Otherwise,
-holding a partial wakelock so the system doesn't go to suspend, and turning on and off the
-screen with the power button, should be fine.
-</p>
-<h3>
-Measuring Wi-Fi Power
-</h3>
-<p>
-It’s recommended to perform Wi-Fi measurements on a relatively quiet network, without
-introducing a lot of additional work processing high volumes of broadcast traffic unrelated
-to the activity being measured.
-</p>
-<p>
-The <code>wifi.on</code> value measures the power consumed when Wi-Fi is enabled but not actively
+
+<p>Take the measurement while the loop executes.</p>
+
+<p>Some devices can limit maximum CPU speed while performing thermal throttling due to a high
+temperature measurement (i.e. after running CPUs at high speeds for sustained periods). Watch for
+such limiting, either using the system console output when taking measurements or by checking the
+kernel log after measuring.</p>
+
+<p>For the <code>cpu.active</code> value, measure the power consumed when the system is not in
+suspend and not executing tasks. The CPU should be in a low-power scheduler <em>idle loop
+</em>, possibly executing an ARM Wait For Event instruction or in an SoC-specific low power state
+with a fast exit latency suitable for idle use. Your platform might have more than one idle state in
+use with differing levels of power consumption; choose a representative idle state for
+longer periods of scheduler idle (several milliseconds). Examine the power graph on your measurement
+equipment and choose samples where the CPU is at its lowest consumption, discarding higher samples
+where the CPU exited idle.</p>
+
+<h3 id="screen-power">Measuring Screen Power</h3>
+
+<p>When measuring screen on power, ensure that other devices normally turned on when the screen is
+enabled are also on. For example, if the touchscreen and display backlight would normally be on when
+the screen is on, ensure these devices are on when you measure to get a realistic example of screen
+on power usage.</p>
+
+<p>Some display technologies vary in power consumption according to the colors displayed, causing
+power measurements to vary considerably depending on what is displayed on the screen at the time of
+measurement. When measuring, ensure the screen is displaying something that has power
+characteristics of a realistic screen. Aim between the extremes of an all-black screen (which
+consumes the lowest power for some technologies) and an all-white screen. A common choice is a view
+of a schedule in the calendar app, which has a mix of white background and non-white elements.</p>
+
+<p>Measure screen on power at <em>minimum</em> and <em>maximum</em> display/backlight brightness.
+To set minimum brightness:</p>
+
+<ul>
+<li><strong>Use the Android UI</strong> (not recommended). Set the Settings > Display Brightness
+slider to the minimum display brightness. However, the Android UI allows setting brightness only to
+a minimum of 10-20% of the possible panel/backlight brightness, and does not allow setting
+brightness so low that the screen might not be visible without great effort.</li>
+<li><strong>Use a sysfs file</strong> (recommended). If available, use a sysfs file to control panel
+brightness all the way down to the minimum brightness supported by the hardware.</li>
+</ul>
+
+<p>Additionally, if the platform sysfs file enables turning the LCD panel, backlight, and
+touchscreen on and off, use the file to take measurements with the screen on and off. Otherwise,
+set a partial wakelock so the system does not suspend, then turn on and off the
+screen with the power button.</p>
+
+<h3 id="wifi-power">Measuring Wi-Fi Power</h3>
+
+<p>Perform Wi-Fi measurements on a relatively quiet network. Avoid introducing additional work
+processing high volumes of broadcast traffic that is unrelated to the activity being measured.</p>
+
+<p>The <code>wifi.on</code> value measures the power consumed when Wi-Fi is enabled but not actively
 transmitting or receiving. This is often measured as the delta between the current draw in
-system suspend (sleep) state with Wi-Fi enabled vs. disabled.
-</p>
-<p>
-The <code>wifi.scan</code> value measures the power consumed during a Wi-Fi scan for access points. Wi-Fi
-scans can be triggered by an app using the WifiManager class <code>startScan()</code> API, which is
-documented at http://developer.android.com/reference/android/net/wifi/WifiManager.html . You
-can also open Settings &gt; Wi-Fi, which will perform scans for access points every few
-seconds with an apparent jump in power consumption, but the screen power must be subtracted
-from these measurements.
-</p>
-<p>
-Network receive and transmit traffic can be generated using controlled setup such as 
-<a href="http://en.wikipedia.org/wiki/Iperf">iperf</a> if desired.
-</p>
-<h2>
-List of values and their meaning
-</h2>
+system suspend (sleep) state with Wi-Fi enabled vs. disabled.</p>
+
+<p>The <code>wifi.scan</code> value measures the power consumed during a Wi-Fi scan for access
+points. Applications can trigger Wi-Fi scans using the WifiManager class
+<a href = "http://developer.android.com/reference/android/net/wifi/WifiManager.html">
+<code>startScan()</code>API</a>. You can also open Settings &gt; Wi-Fi, which performs access point
+scans every few seconds with an apparent jump in power consumption, but you must subtract screen
+power from these measurements.</p>
+
+<p class="note">
+<strong>Note</strong>: Use a controlled setup (such as
+<a href="http://en.wikipedia.org/wiki/Iperf">iperf</a>) to generate network receive and transmit
+traffic.</p>
+
+<h2 id="device-power">Measuring Device Power</h2>
+
+<p>You can determine device power consumption for Android devices that include a battery fuel gauge
+such as a Summit SMB347 or Maxim MAX17050 (available on many Nexus devices). Use the in-system
+battery fuel gauge when external measurement equipment is not available or is inconvenient to
+connect to a device (such as in mobile usage).</p>
+
+<p>Measurements can include instantaneous current, remaining charge, battery capacity at test start
+and end, and more depending on the supported properties of the device (see below). For best results,
+perform device power measurements during long-running A/B tests that use the same device type with
+the same fuel gauge and same current sense resistor. Ensure the starting battery charge is the same
+for each device to avoid differing fuel gauge behavior at different points in the battery discharge
+curve.</p>
+
+<p>Even with identical test environments, measurements are not guaranteed to be of high absolute
+accuracy. However, most inaccuracies specific to the fuel gauge and sense resistor are consistent
+between test runs, making comparisons between identical devices useful. We recommend running
+multiple tests in different configurations to identify significant differences and relative power
+consumption between configurations.</p>
+
+<h3 id="power-consumption">Reading Power Consumption</h3>
+
+<p>To read power consumption data, insert calls to the API in your testing code.</p>
+
+<pre>
+import android.os.BatteryManager;
+import android.os.ServiceManager;
+import android.content.Context;
+BatteryManager mBatteryManager =
+(BatteryManager)Context.getSystemService(Context.BATTERY_SERVICE);
+Long energy =
+mBatteryManager.getLongProperty(BatteryManager.BATTERY_PROPERTY_ENERGY_COUNTER);
+Slog.i(TAG, "Remaining energy = " + energy + "nWh");
+</pre>
+
+<h3 id="avail-props">Available Properties</h3>
+
+<p>Android supports the following battery fuel gauge properties:</p>
+
+<pre>
+BATTERY_PROPERTY_CHARGE_COUNTER   Remaining battery capacity in microampere-hours
+BATTERY_PROPERTY_CURRENT_NOW      Instantaneous battery current in microamperes
+BATTERY_PROPERTY_CURRENT_AVERAGE  Average battery current in microamperes
+BATTERY_PROPERTY_CAPACITY         Remaining battery capacity as an integer percentage
+BATTERY_PROPERTY_ENERGY_COUNTER   Remaining energy in nanowatt-hours
+</pre>
+
+<p>Most properties are read from kernel power_supply subsystem attributes of similar names.
+However, the exact properties, resolution of property values, and update frequency
+available for a specific device depend on:</p>
+
+<ul>
+<li>Fuel gauge hardware, such as a Summit SMB347 or Maxim MAX17050.</li>
+<li>Fuel gauge-to-system connection, such as the value of external current sense resistors.</li>
+<li>Fuel gauge chip software configuration, such as values chosen for average current computation
+intervals in the kernel driver.</li>
+</ul>
+
+<p>For details, see the properties available for <a href="#nexus-devices">Nexus devices</a>.</p>
+
+<h3 id="maxim-fuel">Maxim Fuel Gauge</h3>
+
+<p>When determining battery state-of-charge over a long period of time, the Maxim fuel gauge
+(MAX17050, BC15) corrects for coulomb-counter offset measurements. For measurements made over a
+short period of time (such as power consumption metering tests), the fuel gauge does not make
+corrections, making the offset the primary source of error when current measurements are too small
+(although no amount of time can eliminate the offset error completely).</p>
+
+<p>For a typical 10mOhm sense resistor design, the offset current should be better than 1.5mA,
+meaning any measurement is +/-1.5mA (PCBoard layout can also affect this variation). For example,
+when measuring a large current (200mA) you can expect the following:</p>
+
+<ul>
+<li>2mA (1% gain error of 200mA due to fuel gauge gain error)</li>
+<li>+2mA (1% gain error of 200mA due to sense resistor error)</li>
+<li>+1.5mA  (current sense offset error from fuel gauge)</li>
+</ul>
+
+<p>The total error is 5.5mA (2.75%). Compare this to a medium current (50mA) where the same error
+percentages give a total error of 7% or to a small current (15mA) where +/-1.5mA gives a total error
+of 10%.</p>
+
+<p>For best results, we recommend measuring greater than 20mA. Gain measurement errors are
+systematic and repeatable, enabling you to test a device in multiple modes and get clean relative
+measurements (with exceptions for the 1.5mA offset).</p>
+
+<p>For +/-100uA relative measurements, required measurement time depends on:</p>
+
+<ul>
+<li><b>ADC sampling noise</b>. The MAX17050 with its normal factory configuration produces +/-1.5mA
+sample-to-sample variation due to noise, with each sample delivered at 175.8ms. You can expect a
+rough +/-100uA for a 1 minute test window and a clean  3-sigma noise less than 100uA (or 1-sigma
+noise at 33uA) for a 6 minute test window.</li>
+<li><b>Sample Aliasing because of load variation</b>. Variation exaggerates errors, so for samples
+with variation inherent in the loading, consider using a longer test window.</li>
+</ul>
+
+<a name="nexus-devices"><h3>Supported Nexus Devices</h3></a>
+
+<h5><a name="nexus-5">Nexus 5</a></h5>
+
+<table>
+<tbody>
+<tr>
+<th>Model</th>
+<td>Nexus 5</td>
+</tr>
+<tr>
+<th>Fuel Gauge</th>
+<td>Maxim MAX17048 fuel gauge (ModelGauge™, no coulomb counter)</td>
+</tr>
+<tr>
+<th>Properties</th>
+<td>BATTERY_PROPERTY_CAPACITY</td>
+</tr>
+<tr>
+<th>Measurements</th>
+<td>The fuel gauge does not support any measurements other than battery State Of Charge to a
+resolution of %/256 (1/256th of a percent of full battery capacity).</td>
+</tr>
+</tbody>
+</table>
+
+
+<h5><a name="nexus-6">Nexus 6</a></h5>
+
+<table>
+<tbody>
+<tr>
+<th>Model</th>
+<td>Nexus 6</td>
+</tr>
+<tr>
+<th>Fuel Gauge</th>
+<td>Maxim MAX17050 fuel gauge (a coulomb counter with Maxim ModelGauge™ adjustments), and a 10mohm
+current sense resistor.</td>
+</tr>
+<tr>
+<th>Properties</th>
+<td>BATTERY_PROPERTY_CAPACITY<br>
+BATTERY_PROPERTY_CURRENT_NOW<br>
+BATTERY_PROPERTY_CURRENT_AVERAGE<br>
+BATTERY_PROPERTY_CHARGE_COUNTER<br>
+BATTERY_PROPERTY_ENERGY_COUNTER</td>
+</tr>
+<tr>
+<th>Measurements</th>
+<td>CURRENT_NOW resolution 156.25uA, update period is 175.8ms.<br>
+CURRENT_AVERAGE resolution 156.25uA, update period configurable 0.7s - 6.4h, default 11.25 secs.<br>
+CHARGE_COUNTER (accumulated current, non-extended precision) resolution is 500uAh (raw coulomb
+counter read, not adjusted by fuel gauge for coulomb counter offset, plus inputs from the ModelGauge
+m3 algorithm including empty compensation).<br>
+CHARGE_COUNTER_EXT (extended precision in kernel) resolution 8nAh.<br>
+ENERGY_COUNTER is CHARGE_COUNTER_EXT at nominal voltage of 3.7V.</td>
+</tr>
+</tbody>
+</table>
+
+
+<h5><a name="nexus-9">Nexus 9</a></h5>
+
+<table>
+<tbody>
+<tr>
+<th>Model</th>
+<td>Nexus 9</td>
+</tr>
+<tr>
+<th>Fuel Gauge</th>
+<td>Maxim MAX17050 fuel gauge (a coulomb counter with Maxim ModelGauge™ adjustments), and a 10mohm
+current sense resistor.</td>
+</tr>
+<tr>
+<th>Properties</th>
+<td>BATTERY_PROPERTY_CAPACITY<br>
+BATTERY_PROPERTY_CURRENT_NOW<br>
+BATTERY_PROPERTY_CURRENT_AVERAGE<br>
+BATTERY_PROPERTY_CHARGE_COUNTER<br>
+BATTERY_PROPERTY_ENERGY_COUNTER</td>
+</tr>
+<tr>
+<th>Measurements</th>
+<td>CURRENT_NOW resolution 156.25uA, update period is 175.8ms.<br>
+CURRENT_AVERAGE resolution 156.25uA, update period configurable 0.7s - 6.4h, default 11.25 secs.<br>
+CHARGE_COUNTER (accumulated current, non-extended precision) resolution is 500uAh.<br>
+CHARGE_COUNTER_EXT (extended precision in kernel) resolution 8nAh.<br>
+ENERGY_COUNTER is CHARGE_COUNTER_EXT at nominal voltage of 3.7V.<br>
+Accumulated current update period 175.8ms.<br>
+ADC sampled at 175ms quantization with a 4ms sample period. Can adjust duty cycle.</td>
+</tr>
+</tbody>
+</table>
+
+
+<h5><a name="nexus-10">Nexus 10</a></h5>
+
+<table>
+<tbody>
+<tr>
+<th>Model</th>
+<td>Nexus 10</td>
+</tr>
+<tr>
+<th>Fuel Gauge</th>
+<td>Dallas Semiconductor DS2784 fuel gauge (a coulomb counter), with a 10mohm current sense
+resistor.</td>
+</tr>
+<tr>
+<th>Properties</th>
+<td>BATTERY_PROPERTY_CAPACITY<br>
+BATTERY_PROPERTY_CURRENT_NOW<br>
+BATTERY_PROPERTY_CURRENT_AVERAGE<br>
+BATTERY_PROPERTY_CHARGE_COUNTER<br>
+BATTERY_PROPERTY_ENERGY_COUNTER</td>
+</tr>
+<tr>
+<th>Measurements</th>
+<td>Current measurement (instantaneous and average) resolution is 156.3uA.<br>
+CURRENT_NOW instantaneous current update period is 3.5 seconds.<br>
+CURRENT_AVERAGE update period is 28 seconds (not configurable).<br>
+CHARGE_COUNTER (accumulated current, non-extended precision) resolution is 625uAh.<br>
+CHARGE_COUNTER_EXT (extended precision in kernel) resolution is 144nAh.<br>
+ENERGY_COUNTER is CHARGE_COUNTER_EXT at nominal voltage of 3.7V.<br>
+Update period for all is 3.5 seconds.</td>
+</tr>
+</tbody>
+</table>
+
+
+<h2 id="viewing-usage">Viewing Battery Usage Data</h2>
+
+<p>The <code>dumpsys</code> <code>batterystats</code> command generates interesting statistical data
+about battery usage on a device, organized by unique application ID. You can view a history of
+battery-related events such as mobile radio state, Wi-Fi and Bluetooth power states, and wakelock
+reasons.</p>
+
+<p>Statistics include:</p>
+
+<ul>
+<li>History of battery-related events</li>
+<li>Global statistics for the device</li>
+<li>Approximate power use per UID and system component</li>
+<li>System UID aggregated statistics</li>
+</ul>
+
+<p>Use the output of the dumpsys command with the
+<a href="https://github.com/google/battery-historian">Battery Historian</a> tool to generate HTML
+visualizations of power-related events from logs.</p>
+
+
+<h2 id="power-values">Power Values</h2>
 <table>
 <tr>
   <th>Name</th>
-  <th>Meaning</th>
-  <th>Example value</th>
+  <th>Description</th>
+  <th>Example Value</th>
   <th>Notes</th>
 </tr>
 <tr>
@@ -402,166 +579,148 @@
 
 <tr>
   <td>screen.on</td>
-  <td>Additional power used when screen is turned on at minimum brightness </td>
+  <td>Additional power used when screen is turned on at minimum brightness.</td>
   <td>200mA</td>
   <td>Includes touch controller and display backlight. At 0 brightness, not the Android minimum which tends to be 10 or 20%.</td>
 </tr>
 
 <tr>
   <td>screen.full</td>
-  <td>Additional power used when screen is at maximum brightness, compared to screen at minimum brightness</td>
-  <td>100- 300mA</td>
+  <td>Additional power used when screen is at maximum brightness, compared to screen at minimum brightness.</td>
+  <td>100mA-300mA</td>
   <td>A fraction of this value (based on screen brightness) is added to the screen.on value to compute the power usage of the screen.</td>
 </tr>
 
 <tr>
-  <td>bluetooth.active </td>
-  <td>Additional power used when playing audio through bluetooth A2DP</td>
+  <td>bluetooth.active</td>
+  <td>Additional power used when playing audio through bluetooth A2DP.</td>
   <td>14mA</td>
   <td></td>
 </tr>
 
 <tr>
   <td>bluetooth.on</td>
-  <td> Additional power used when bluetooth
-is turned on but idle </td>
+  <td>Additional power used when bluetooth is turned on but idle.</td>
   <td>1.4mA</td>
   <td></td>
 </tr>
 
 <tr>
-  <td>wifi.on </td>
-  <td>Additional power used when wifi is turned on but not
-receiving, transmitting, or scanning</td>
-  <td> 2mA </td>
+  <td>wifi.on</td>
+  <td>Additional power used when Wi-Fi is turned on but not receiving, transmitting, or scanning.</td>
+  <td>2mA</td>
   <td></td>
 </tr>
 
 <tr>
-  <td>wifi.active  </td>
-  <td>Additional power used when transmitting
-or receiving over Wifi </td>
+  <td>wifi.active</td>
+  <td>Additional power used when transmitting or receiving over Wi-Fi.</td>
   <td>31mA</td>
   <td></td>
 </tr>
 
 <tr>
-  <td>wifi.scan </td>
-  <td>Additional power used when wifi is scanning for access
-points  </td>
+  <td>wifi.scan</td>
+  <td>Additional power used when Wi-Fi is scanning for access points.</td>
   <td>100mA</td>
   <td></td>
 </tr>
 
 <tr>
-  <td>dsp.audio </td>
-  <td>Additional power used when audio decoding/encoding via DSP  </td>
-  <td>14.1mA </td>
-  <td>Not
-currently used</td>
+  <td>dsp.audio</td>
+  <td>Additional power used when audio decoding/encoding via DSP.</td>
+  <td>14.1mA</td>
+  <td>Reserved for future use.</td>
 </tr>
 
 
 <tr>
-  <td>dsp.video </td>
-  <td>Additional power used when video decoding via DSP</td>
-  <td> 54mA</td>
-  <td> Not currently
-used </td>
+  <td>dsp.video</td>
+  <td>Additional power used when video decoding via DSP.</td>
+  <td>54mA</td>
+  <td>Reserved for future use.</td>
 </tr>
 
 <tr>
-  <td>gps.on </td>
-  <td>Additional power used when GPS is acquiring a signal  </td>
+  <td>gps.on</td>
+  <td>Additional power used when GPS is acquiring a signal.</td>
   <td>50mA</td>
   <td></td>
 </tr>
 
 <tr>
-  <td>radio.active </td>
-  <td>Additional
-power used when cellular radio is transmitting/receiving </td>
-  <td>100- 300mA </td>
+  <td>radio.active</td>
+  <td>Additional power used when cellular radio is transmitting/receiving.</td>
+  <td>100mA-300mA</td>
   <td></td>
 </tr>
 
 <tr>
-  <td>radio.scanning </td>
-  <td>Additional
-power used when cellular radio is paging the tower  </td>
+  <td>radio.scanning</td>
+  <td>Additional power used when cellular radio is paging the tower.</td>
   <td>1.2mA</td>
   <td></td>
 </tr>
 
 <tr>
-  <td>radio.on Additional power used when
-the cellular radio is on. </td>
-  <td>This is a multi-value entry, one per signal strength (no signal,
-weak, moderate, strong)  </td>
-  <td>1.2mA </td>
-  <td>Some radios boost up their power when there’s no signal and
-they’re trying to find a cell tower. So these numbers could all be the same or decreasing
-with increasing signal strength. If you provide only one value, the same value will be used
-for all strengths. If you provide 2 values, the first will be for no-signal and the second
-for all other strengths, and so on.</td>
+  <td>radio.on</td>
+  <td>Additional power used when the cellular radio is on. Multi-value entry, one per signal strength (no signal, weak, moderate, strong).</td>
+  <td>1.2mA</td>
+  <td>Some radios boost power when they search for a cell tower and do not detect a signal. These
+  numbers could all be the same or decreasing with increasing signal strength. If you provide only
+  one value, the same value will be used for all strengths. If you provide 2 values, the first will
+  be for no-signal and the second for all other strengths, and so on.</td>
 </tr>
 
 <tr>
-  <td>cpu.speeds </td>
-  <td>Multi-value entry that lists each possible CPU
-speed in KHz </td>
-  <td>125000, 250000, 500000, 1000000, 1500000 </td>
-  <td>The number and order of entries need to
-correspond to the mA entries in cpu.active </td>
+  <td>cpu.speeds</td>
+  <td>Multi-value entry that lists each possible CPU speed in KHz.</td>
+  <td>125000, 250000, 500000, 1000000, 1500000</td>
+  <td>The number and order of entries must correspond to the mA entries in cpu.active.</td>
 </tr>
 
 <tr>
-  <td>cpu.idle  </td>
-  <td>Total power drawn by the system when CPUs
-(and the SoC) are in system suspend state </td>
+  <td>cpu.idle</td>
+  <td>Total power drawn by the system when CPUs (and the SoC) are in system suspend state.</td>
   <td>3mA</td>
   <td></td>
 </tr>
 
 <tr>
-  <td>cpu.awake 
-</td>
-  <td>Additional power used when CPUs are
-in scheduling idle state (kernel idle loop); system is not in system suspend state </td>
+  <td>cpu.awake</td>
+  <td>Additional power used when CPUs are in scheduling idle state (kernel idle loop); system is not
+  in system suspend state.</td>
   <td>50mA</td>
   <td></td>
 </tr>
 
 <tr>
-  <td>cpu.active  </td>
-  <td>Additional power used by CPUs when running at different speeds </td>
-  <td>100, 120, 140, 160,
-200</td>
+  <td>cpu.active</td>
+  <td>Additional power used by CPUs when running at different speeds.</td>
+  <td>100, 120, 140, 160, 200</td>
   <td>Set the max speed in the kernel to each of the allowed speeds and peg the CPU at that
 speed. The number of entries here correspond to the number of entries in cpu.speeds, in the
-same order. </td>
+same order.</td>
 </tr>
 
 <tr>
-  <td>battery.capacity  </td>
-  <td>The total battery capacity in mAh</td>
+  <td>battery.capacity</td>
+  <td>The total battery capacity in mAh.</td>
   <td>3000mAh</td>
   <td></td>
 </tr>
 
 </table>
  
-<p>
-The power_profile.xml file is placed in an overlay in
-device///frameworks/base/core/res/res/xml/power_profile.xml
-</p>
-<h3>
-Sample file
-</h3>
+<p>The power_profile.xml file is placed in an overlay in
+device///frameworks/base/core/res/res/xml/power_profile.xml</p>
+
+<h3 id="sample">Sample file</h3>
+
 <pre>
 &lt;!-- Most values are the incremental current used by a feature, in mA (measured at
-nominal voltage). OEM's must measure and provide actual values before shipping a device.
-Example real-world values are given, but they are totally dependent on the platform
+nominal voltage). OEMs must measure and provide actual values before shipping a device.
+Example real-world values are given, but are dependent on the platform
 and can vary significantly, so should be measured on the shipping platform with a power meter.
 --&gt;
 0
@@ -605,4 +764,4 @@
 3000
 &lt;!-- Battery capacity is 3000 mAH (at 3.6 Volts) --&gt;
 
-</pre>
+</pre>
\ No newline at end of file
diff --git a/src/devices/tech/security/acknowledgements.jd b/src/devices/tech/security/acknowledgements.jd
index 4fc7f80..097da07 100644
--- a/src/devices/tech/security/acknowledgements.jd
+++ b/src/devices/tech/security/acknowledgements.jd
@@ -27,21 +27,21 @@
 
 <h2>2014</h2>
 
+<div style="LINE-HEIGHT:25px;">
 <p>Jeff Forristal of <a href="http://www.bluebox.com/blog/">Bluebox
 Security</a></p>
 
-<p>Aaron Mangeli of <a href="https://banno.com/">Banno</a> (<a
+<p>Aaron Mangel of <a href="https://banno.com/">Banno</a> (<a
 href="mailto:amangel@gmail.com">amangel@gmail.com</a>)</p>
 
 <p><a href="http://www.linkedin.com/in/tonytrummer/">Tony Trummer</a> of <a
-href="http://www.themeninthemiddle.com">The Men in the Middle</a> (<a
+href="http://www.themeninthemiddle.com">The Men in the Middle</a> <br>(<a
 href="https://twitter.com/SecBro1">@SecBro1</a>)</p>
 
 <p><a href="http://www.samsung.com">Samsung Mobile</a></p>
 
 <p>Henry Hoggard of <a href="https://labs.mwrinfosecurity.com/">MWR Labs</a> (<a
 href="https://twitter.com/henryhoggard">@HenryHoggard</a>)</p>
-<p></p>
 
 <p><a href="http://www.androbugs.com">Yu-Cheng Lin 林禹成</a> (<a
 href="https://twitter.com/AndroBugs">@AndroBugs</a>)</p>
@@ -52,12 +52,12 @@
 Engineering Group</a>, EC SPRIDE Technische Universität Darmstadt (<a
 href="mailto:siegfried.rasthofer@gmail.com">siegfried.rasthofer@gmail.com</a>)</p>
 
-<p>Steven Artz of <a href="http://sseblog.ec-spride.de/">Secure Software
+<p>Steven Arzt of <a href="http://sseblog.ec-spride.de/">Secure Software
 Engineering Group</a>, EC SPRIDE Technische Universität Darmstadt (<a
 href="mailto:Steven.Arzt@ec-spride.de">Steven.Arzt@ec-spride.de</a>)</p>
 
 <p><a href="http://blog.redfern.me/">Joseph Redfern</a> of <a
-href="https://labs.mwrinfosecurity.com/">MWR Labs</a> (<a
+href="https://labs.mwrinfosecurity.com/">MWR Labs</a> <br>(<a
 href="https://twitter.com/JosephRedfern">@JosephRedfern</a>)</p>
 
 <p><a href="https://plus.google.com/u/0/109528607786970714118">Valera
@@ -70,7 +70,8 @@
 
 <p>Stephan Huber of Testlab Mobile Security, <a
 href="https://www.sit.fraunhofer.de/">Fraunhofer SIT</a> (<a
-href="mailto:Stephan.Huber@sit.fraunhofer.de">Stephan.Huber@sit.fraunhofer.de</a>)</p>
+href="mailto:Stephan.Huber@sit.fraunhofer.de">Stephan.Huber@sit.fraunhofer.de</a>)
+</p>
 
 <p><a href="http://www.corkami.com">Ange Albertini</a> (<a
 href="https://twitter.com/angealbertini">@angealbertini</a>)</p>
@@ -84,7 +85,7 @@
 href="mailto:litongxin1991@gmail.com">litongxin1991@gmail.com</a>)</p>
 
 <p><a href="https://www.facebook.com/zhou.xiaoyong">Xiaoyong Zhou</a> of <a
-href="http://www.cs.indiana.edu/~zhou/">Indiana University Bloomington</a> (<a
+href="http://www.cs.indiana.edu/~zhou/">Indiana University Bloomington</a> <br>(<a
 href="https://twitter.com/xzhou">@xzhou</a>, <a
 href="mailto:zhou.xiaoyong@gmail.com">zhou.xiaoyong@gmail.com</a>)</p>
 
@@ -102,5 +103,178 @@
 <p>Xinhui Han of Peking University (<a
 href="mailto:hanxinhui@pku.edu.cn">hanxinhui@pku.edu.cn</a>)</p>
 
+<p><a href="http://thejh.net/">Jann Horn</a> <a href="https://android-review.googlesource.com/#/c/98197/">
+<img style="vertical-align:middle;" src="images/tiny-robot.png"
+alt="Green Droid Patch Symbol"
+title="This person contributed code that improved Android security">
+</a></p>
+
+<p>Robert Craig of <a href="https://www.nsa.gov/research/ia_research/">
+Trusted Systems Research Group</a>, US National Security Agency
+<a href="https://android-review.googlesource.com/#/q/owner:%22Robert+Craig+%253Crpcraig%2540tycho.ncsc.mil%253E%22+status:merged">
+<img style="vertical-align:middle" src="images/tiny-robot.png" alt="Patch Symbol"
+title="This person contributed code that improved Android security"></a></p>
+
+<p>Stephen Smalley of <a href="https://www.nsa.gov/research/ia_research/">
+Trusted Systems Research Group</a>, US National Security Agency
+<a href=
+"https://android-review.googlesource.com/#/q/owner:%22Stephen+Smalley+%253Csds%2540tycho.nsa.gov%253E%22+status:merged">
+<img style="vertical-align:middle" src="images/tiny-robot.png"
+alt="Patch Symbol" title="This person contributed code that improved Android security"></a></p>
+
+<p><a href="http://www.linkedin.com/in/billcroberts">
+William Roberts</a> (<a href="mailto:bill.c.roberts@gmail.com">bill.c.roberts@gmail.com</a>)
+<a href=
+"https://android-review.googlesource.com/#/q/owner:bill.c.roberts%2540gmail.com+status:merged">
+<img style="vertical-align:middle" src="images/tiny-robot.png"
+alt="Patch Symbol" title="This person contributed code that improved Android security"></a></p>
+
+<p>Scotty Bauer of University of Utah (<a href="mailto:sbauer@eng.utah.edu">sbauer@eng.utah.edu</a>)</p>
+
+<p><a href="http://www.cs.utah.edu/~rsas/">Raimondas Sasnauskas</a> of University of Utah</p>
+
+<p><a href="http://www.subodh.io">Subodh Iyengar</a> of <a href="https://www.facebook.com">Facebook</a></p>
+
+<p><a href="http://www.shackleton.io/">Will Shackleton</a> of <a href="https://www.facebook.com">Facebook</a></p>
+
+<p>Kunal Patel of <a href="https://www.samsungknox.com/">Samsung KNOX Security Team</a> (<a href="mailto:kunal.patel1@samsung.com">kunal.patel1@samsung.com</a>)</p>
+
+<p>Sebastian Brenza</p>
+
+<p>Wang Tao of <a href="http://sec.baidu.com">Baidu AdLab</a> (<a href="mailto:wintao@gmail.com">wintao@gmail.com</a>)</p>
+
+<p><a href="http://www.linkedin.com/in/danamodio">Dan Amodio</a> of <a href="https://www.aspectsecurity.com/">Aspect Security</a> (<a href="https://twitter.com/DanAmodio">@DanAmodio</a>)</p>
+
+<p><a href="http://davidmurdoch.com">David Murdoch</a></p>
+
+<p>Alexandru Gheorghita</p>
+
+<p>Mathew Solnik (<a href="https://twitter.com/msolnik">@msolnik</a>)</p>
+
+<p>Marc Blanchou (<a href="https://twitter.com/marcblanchou">@marcblanchou</a>)</p>
+
+</div>
+
+<h2>2013</h2>
+
+<div style="LINE-HEIGHT:25px;">
+
+<p>Jon Sawyer of <a href="http://appliedcybersecurity.com/">Applied Cybersecurity LLC
+</a> (<a href="mailto:jon@cunninglogic.com">jon@cunninglogic.com</a>)</p>
+
+<p>Joshua J. Drake of <a href="http://www.accuvant.com/">Accuvant LABS
+</a> (<a href="https://twitter.com/jduck">@jduck</a>)
+<a href="https://android-review.googlesource.com/#/q/change:72228+OR+change:72229">
+<img style="vertical-align:middle" src="images/patchreward.png"
+alt="Patch Rewards Symbol" title="This person qualified for the Patch Rewards program!"></a></p>
+
+<p>Ruben Santamarta of IOActive
+(<a href="https://twitter.com/reversemode">@reversemode</a>)</p>
+
+<p>Lucas Yang (amadoh4ck) of
+<a href="http://raonsecurity.com/">RaonSecurity</a>
+(<a href="mailto:amadoh4ck@gmail.com">amadoh4ck@gmail.com</a>)</p>
+
+<p><a href="https://tsarstva.bg/sh/">Ivaylo Marinkov</a>
+of <a href="http://www.ecommera.com/">eCommera</a> <br>
+(<a href="mailto:ivo@tsarstva.bg">ivo@tsarstva.bg</a>)</p>
+
+<p><a href="http://roeehay.blogspot.com/">Roee Hay</a>
+<br>(<a href="https://twitter.com/roeehay">@roeehay</a>,
+<a href="mailto:roeehay@gmail.com">roeehay@gmail.com</a>)</p>
+
+<p>Qualcomm Product Security Initiative</p>
+
+<p><a href="https://lacklustre.net/">Mike Ryan</a> of
+<a href="https://isecpartners.com/">iSEC Partners</a>
+<br>(<a href="https://twitter.com/mpeg4codec">@mpeg4codec</a>,
+<a href="mailto:mikeryan@isecpartners.com">mikeryan@isecpartners.com
+</a>)</p>
+
+<p><a href="http://cryptoonline.com/">Muhammad Naveed</a>
+of <a href="http://illinois.edu/">University of Illinois
+at Urbana-Champaign</a>
+<br>(<a href="mailto:naveed2@illinois.edu">naveed2@illinois.edu</a>)</p>
+
+<p>Robert Craig of <a href="https://www.nsa.gov/research/ia_research/">
+Trusted Systems Research Group</a>, US National Security Agency
+<a href="https://android-review.googlesource.com/#/q/owner:%22Robert+Craig+%253Crpcraig%2540tycho.ncsc.mil%253E%22+status:merged">
+<img style="vertical-align:middle" src="images/tiny-robot.png" alt="Patch Symbol"
+title="This person contributed code that improved Android security"></a></p>
+
+<p>Stephen Smalley of <a href="https://www.nsa.gov/research/ia_research/">
+Trusted Systems Research Group</a>, US National Security Agency
+<a href=
+"https://android-review.googlesource.com/#/q/owner:%22Stephen+Smalley+%253Csds%2540tycho.nsa.gov%253E%22+status:merged">
+<img style="vertical-align:middle" src="images/tiny-robot.png"
+alt="Patch Symbol" title="This person contributed code that improved Android security"></a></p>
+
+<p><a href="http://www.linkedin.com/in/billcroberts">
+William Roberts</a> (<a href="mailto:bill.c.roberts@gmail.com">bill.c.roberts@gmail.com</a>)
+<a href=
+"https://android-review.googlesource.com/#/q/owner:bill.c.roberts%2540gmail.com+status:merged">
+<img style="vertical-align:middle" src="images/tiny-robot.png"
+alt="Patch Symbol" title="This person contributed code that improved Android security"></a></p>
+
+<p><a href="http://roeehay.blogspot.com/">Roee Hay</a>
+<br>(<a href="https://twitter.com/roeehay">@roeehay</a>,
+<a href="mailto:roeehay@gmail.com">roeehay@gmail.com</a>)</p>
+
+</div>
+<h2>2012</h2>
+
+<div style="LINE-HEIGHT:25px;">
+
+<p>Robert Craig of <a href="https://www.nsa.gov/research/ia_research/">
+Trusted Systems Research Group</a>, US National Security Agency
+<a href="https://android-review.googlesource.com/#/q/owner:%22Robert+Craig+%253Crpcraig%2540tycho.ncsc.mil%253E%22+status:merged">
+<img style="vertical-align:middle" src="images/tiny-robot.png" alt="Patch Symbol"
+title="This person contributed code that improved Android security"></a></p>
+
+<p>Stephen Smalley of <a href="https://www.nsa.gov/research/ia_research/">
+Trusted Systems Research Group</a>, US National Security Agency
+<a href=
+"https://android-review.googlesource.com/#/q/owner:%22Stephen+Smalley+%253Csds%2540tycho.nsa.gov%253E%22+status:merged">
+<img style="vertical-align:middle" src="images/tiny-robot.png"
+alt="Patch Symbol" title="This person contributed code that improved Android security"></a></p>
+
+<p><a href="http://www.linkedin.com/in/billcroberts">
+William Roberts</a> (<a href="mailto:bill.c.roberts@gmail.com">bill.c.roberts@gmail.com</a>)
+<a href=
+"https://android-review.googlesource.com/#/q/owner:bill.c.roberts%2540gmail.com+status:merged">
+<img style="vertical-align:middle" src="images/tiny-robot.png"
+alt="Patch Symbol" title="This person contributed code that improved Android security"></a></p>
+
+<p><a href="http://thejh.net/">Jann Horn</a></p>
+
+<p><a href="http://web.ict.kth.se/~rbbo/ussdvul.html">Ravishankar
+Borgaonkar</a> of TU Berlin
+(<a href="https://twitter.com/raviborgaonkar">@raviborgaonkar</a>)</p>
+
+<p><a href="http://roeehay.blogspot.com/">Roee Hay</a>
+<br>(<a href="https://twitter.com/roeehay">@roeehay</a>,
+<a href="mailto:roeehay@gmail.com">roeehay@gmail.com</a>)</p>
+
+</div>
+
+<h2>2011</h2>
+
+<div style="LINE-HEIGHT:25px;">
+
+<p>Collin Mulliner of <a href="http://www.mulliner.org/collin/academic">MUlliNER.ORG</a> (<a href="https://twitter.com/collinrm">@collinrm</a>)</p>
+
+</div>
+
+<h2>2009</h2>
+
+<div style="LINE-HEIGHT:25px;">
+
+<p>Collin Mulliner of <a href="http://www.mulliner.org/collin/academic">MUlliNER.ORG</a> (<a href="https://twitter.com/collinrm">@collinrm</a>)</p>
+
+<p>Charlie Miller (<a href="https://twitter.com/0xcharlie">@0xcharlie</a>)</p>
+
+</div>
+
 <br>
-<p><small>If you have reported a vulnerability prior to 2014 and want to be included on this list, or to report a vulnerability in Android, contact <a href="mailto:security@android.com">security@android.com</a></small></p>
+<p><small>If you have reported a vulnerability prior to 2014 and want to be
+included on this list, or to report a vulnerability in Android, contact <a href="mailto:security@android.com">security@android.com</a></small></p>
diff --git a/src/devices/tech/security/enhancements50.jd b/src/devices/tech/security/enhancements50.jd
new file mode 100644
index 0000000..7a143b6
--- /dev/null
+++ b/src/devices/tech/security/enhancements50.jd
@@ -0,0 +1,56 @@
+page.title=Security Enhancements in Android 5.0
+@jd:body
+
+<p>Every Android release includes dozens of security enhancements to protect
+users.  Here are some of the major security enhancements available in Android
+5.0:</p>
+
+<ul>
+  <li><strong>Encrypted by default.</strong> On devices that ship with L
+out-of-the-box, full disk encryption is enabled by default to improve
+protection of data on lost or stolen devices. Devices that
+update to L can be encrypted in <strong>Settings</strong> &gt; <strong>Security</strong>. 
+  <li><strong>Improved full disk encryption.</strong> The user password is
+protected against brute-force attacks using <code>scrypt</code> and, where
+available, the key is bound to the hardware keystore to prevent
+off-device attacks.  As always, the Android screen lock secret and the device
+encryption key are not sent off the device or exposed to any application.
+  <li><strong>Android sandbox reinforced with SELinux</strong>. Android now
+requires SELinux in enforcing mode for all domains. SELinux is a
+mandatory access control (MAC) system in the Linux kernel used to augment the
+existing discretionary access control (DAC) security model. This new layer
+provides additional protection against potential security vulnerabilities.
+  <li><strong>Smart Lock. </strong>Android now includes trustlets that provide
+more flexibility for unlocking devices.  For example, trustlets can allow
+devices to be unlocked automatically when close to another trusted device (via
+NFC, Bluetooth) or being used by someone with a trusted face.
+  <li><strong>Multi user, restricted profile, and guest modes for phones &
+tablets.</strong> Android now provides for multiple users on phones and
+includes a guest mode that can be used to provide easy temporary access to your
+device without granting access to your data and apps.
+  <li><strong>Updates to WebView without OTA. </strong> WebView can now be
+updated independent of the framework and without a system
+OTA.  This will allow for faster response to potential security issues in
+WebView.
+  <li><strong>Updated cryptography for HTTPS and TLS/SSL.</strong> TLSv1.2 and
+TLSv1.1 is now enabled, Forward Secrecy is now preferred, AES-GCM
+is now enabled, and weak cipher suites (MD5, 3DES, and export cipher suites)
+are now disabled. See <a
+href="https://developer.android.com/reference/javax/net/ssl/SSLSocket.html">https://developer.android.com/reference/javax/net/ssl/SSLSocket.html</a>
+for more details.
+  <li><strong>non-PIE linker support removed.</strong> Android now requires all
+dynamically linked executables to support PIE
+(position-independent executables). This enhances Android’s address space
+layout randomization (ASLR) implementation.
+  <li><strong>FORTIFY_SOURCE improvements.</strong> The following libc
+functions now implement FORTIFY_SOURCE protections: <code>stpcpy()</code>,
+<code>stpncpy()</code>, <code>read()</code>, <code>recvfrom()</code>,
+<code>FD_CLR()</code>, <code>FD_SET()</code>, and <code>FD_ISSET()</code>. This
+provides protection against memory-corruption vulnerabilities involving
+those functions.
+  <li><strong>Security Fixes.</strong> Android 5.0 also includes fixes for
+Android-specific vulnerabilities. Information about these vulnerabilities has
+been provided to Open Handset Alliance members, and fixes are available in
+Android Open Source Project. To improve security, some devices with earlier
+versions of Android may also include these fixes.
+</ul>
diff --git a/src/devices/tech/security/images/patchreward.png b/src/devices/tech/security/images/patchreward.png
new file mode 100644
index 0000000..496fe64
--- /dev/null
+++ b/src/devices/tech/security/images/patchreward.png
Binary files differ
diff --git a/src/devices/tech/security/images/tiny-robot.png b/src/devices/tech/security/images/tiny-robot.png
new file mode 100644
index 0000000..2cb88ca
--- /dev/null
+++ b/src/devices/tech/security/images/tiny-robot.png
Binary files differ
diff --git a/src/devices/tech/security/index.jd b/src/devices/tech/security/index.jd
index 57962c9..b2982d7 100644
--- a/src/devices/tech/security/index.jd
+++ b/src/devices/tech/security/index.jd
@@ -78,9 +78,9 @@
 </li>
 <li>
 <p><strong>Android Application Runtime</strong>: Android applications are most often written
-in the Java programming language and run in the Dalvik virtual machine.
+in the Java programming language and run in the Android runtime (ART).
 However, many applications, including core Android services and applications
-are native applications or include native libraries. Both Dalvik and native
+are native applications or include native libraries. Both ART and native
 applications run within the same security environment, contained within the
 Application Sandbox. Applications get a dedicated part of the filesystem in
 which they can write private data, including databases and raw files.</p>
@@ -124,7 +124,7 @@
 cloud capabilities such as (<a href="https://developer.android.com/guide/topics/data/backup.html">backing
 up</a>) application
 data and settings and cloud-to-device messaging
-(<a href="https://code.google.com/android/c2dm/index.html">C2DM</a>)
+(<a href="https://developers.google.com/android/c2dm/">C2DM</a>)
 for push messaging.</p>
 </li>
 </ul>
@@ -305,7 +305,7 @@
 keys and certificate chains.
 </p>
 
-<h2>Memory Management Security Enhancements</h2>
+<h2 id="memory-mgmt">Memory Management Security Enhancements</h2>
 
 Android includes many features that make common security issues harder to
 exploit. The Android SDK, compilers, and OS use tools to make common memory
@@ -350,7 +350,7 @@
 
 </dl>
 
-<h2>Rooting of Devices</h2>
+<h2 id="rooting">Rooting of Devices</h2>
 <p>
 By default, on Android only the kernel and a small subset of the core
 applications run with root permissions. Android does not prevent a user or
@@ -396,7 +396,7 @@
 the bootloader or operating system is not sufficient to access user data
 without the user’s device password.
 </p>
-<h2>User Security Features</h2>
+<h2 id="user-sec">User Security Features</h2>
 
 <h3 id="filesystem-encryption">Filesystem Encryption</h3>
 
@@ -413,7 +413,7 @@
 administrator and enforced by the operating system. Filesystem encryption
 requires the use of a user password, pattern-based screen lock is not supported.</p>
 <p>More details on implementation of filesystem encryption are available at
-<a href="/devices/tech/encryption/android_crypto_implementation.html">https://source.android.com/devices/tech/encryption/android_crypto_implementation.html</a></p>
+<a href="{@docRoot}devices/tech/encryption/index.html">Encryption</a>.</p>
 
 <h2 id="password-protection">Password Protection</h2>
 <p>Android can be configured to verify a user-supplied password prior to providing
@@ -441,7 +441,7 @@
 <p>Android provides an open source platform and application environment for mobile
 devices. The core operating system is based on the Linux kernel. Android
 applications are most often written in the Java programming language and run in
-the Dalvik virtual machine. However, applications can also be written in native
+the ART runtime. However, applications can also be written in native
 code. Applications are installed from a single file with the .apk file
 extension.</p>
 <p>The main Android application building blocks are:</p>
@@ -623,7 +623,7 @@
 of code (in this case, the Browser) that knows how to handle that Intent, and
 runs it. Intents can also be used to broadcast interesting events (such as a
 notification) system-wide. See
-[https://developer.android.com/reference/android/content/Intent.html](https://developer.android.com/reference/android/content/Intent.html.</p>
+<a href="https://developer.android.com/reference/android/content/Intent.html">https://developer.android.com/reference/android/content/Intent.html</a>.</p>
 </li>
 <li>
 <p><strong>ContentProviders</strong>: A ContentProvider is a data storehouse that provides
@@ -767,7 +767,7 @@
 <ul>
 <li>
 <p>A DRM framework API, which is exposed to applications through the Android
-application framework and runs through the Dalvik VM for standard applications.</p>
+application framework and runs through the ART runtime for standard applications.</p>
 </li>
 <li>
 <p>A native code DRM manager, which implements the DRM framework and exposes an
diff --git a/src/devices/tech/security/se-linux.jd b/src/devices/tech/security/se-linux.jd
index 6c34a02..bbc6ed8 100644
--- a/src/devices/tech/security/se-linux.jd
+++ b/src/devices/tech/security/se-linux.jd
@@ -63,7 +63,7 @@
 Per-domain permissive mode also enables policy development for new services
 while keeping the rest of the system enforcing.</p>
 
-<p>In the L release, Android moves to full enforcement of SELinux. This builds
+<p>In the Android 5.0 (L) release, Android moves to full enforcement of SELinux. This builds
 upon the permissive release of 4.3 and the partial enforcement of 4.4. In
 short, Android is shifting from enforcement on a limited set of crucial domains
 (<code>installd</code>, <code>netd</code>, <code>vold</code> and <code>zygote</code>) to everything (more than 60 domains). This means manufacturers will have to
@@ -71,7 +71,7 @@
 devices. Understand that:</p>
 
 <ul>
-  <li> Everything is in enforcing mode in the L release
+  <li> Everything is in enforcing mode in the 5.0 release
   <li> No processes other than <code>init</code> should run in the <code>init</code> domain
   <li> Any generic denial (for a block_device, socket_device, default_service, etc.)
 indicates that device needs a special domain
@@ -81,7 +81,7 @@
 
 <p>See the documentation below for details on constructing useful policies:</p>
 
-<p><a href="https://seandroid.bitbucket.org/PapersandPresentations.html">https://seandroid.bitbucket.org/PapersandPresentations.html</a></p>
+<p><a href="http://seandroid.bitbucket.org/PapersandPresentations.html">http://seandroid.bitbucket.org/PapersandPresentations.html</a></p>
 
 <p><a href="https://www.codeproject.com/Articles/806904/Android-Security-Customization-with-SEAndroid">https://www.codeproject.com/Articles/806904/Android-Security-Customization-with-SEAndroid</a></p>
 
@@ -93,7 +93,7 @@
 
 <p><a href="https://www.gnu.org/software/m4/manual/index.html">https://www.gnu.org/software/m4/manual/index.html</a></p>
 
-<p><a href="https://freecomputerbooks.com/books/The_SELinux_Notebook-4th_Edition.pdf">https://freecomputerbooks.com/books/The_SELinux_Notebook-4th_Edition.pdf</a></p>
+<p><a href="http://freecomputerbooks.com/books/The_SELinux_Notebook-4th_Edition.pdf">http://freecomputerbooks.com/books/The_SELinux_Notebook-4th_Edition.pdf</a></p>
 
 <h2 id=help>Help</h2>
 
diff --git a/src/devices/tech/storage/index.jd b/src/devices/tech/storage/index.jd
index e50abe9..1d7192d 100644
--- a/src/devices/tech/storage/index.jd
+++ b/src/devices/tech/storage/index.jd
@@ -1,8 +1,8 @@
-page.title=External Storage Technical Information
+page.title=External Storage
 @jd:body
 
 <!--
-    Copyright 2013 The Android Open Source Project
+    Copyright 2014 The Android Open Source Project
 
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
diff --git a/src/devices/tv/HDMI-CEC.jd b/src/devices/tv/HDMI-CEC.jd
new file mode 100644
index 0000000..bbf6547
--- /dev/null
+++ b/src/devices/tv/HDMI-CEC.jd
@@ -0,0 +1,311 @@
+page.title=HDMI-CEC Control Service
+@jd:body
+
+<!--
+    Copyright 2014 The Android Open Source Project
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+<div id="qv-wrapper">
+  <div id="qv">
+    <h2>In this document</h2>
+    <ol id="auto-toc">
+   </ol>
+  </div>
+</div>
+
+<h2 id=intro>Introduction</h2>
+
+<p>The High-Definition Multimedia Interface Consumer Electronics Control (HDMI-CEC) standard allows mulitmedia consumer products to communicate and
+exchange information with each other. HDMI-CEC supports many features, like
+Remote Control Passthrough and System Audio Control, but one of the most
+popular is One Touch Play. One Touch Play lets a media source device turn on
+the TV and switch its input port automatically, so you don’t have to search for
+the TV remote to switch from your Chromecast to Blu-ray player.</p>
+
+<p>Most manufacturers have adopted HDMI-CEC so their devices work with other
+companies’ devices. But because each manufacturer implements the HDMI-CEC
+standard in different ways, devices don’t always understand each other and
+supported features vary between devices. Because of this variance, consumers
+can’t safely assume that two products that claim CEC support are completely
+compatible.</p>
+
+<h2 id=solution>Solution</h2>
+
+
+<p>With the introduction of the Android TV Input Framework (TIF), HDMI-CEC brings
+together all connected devices and minimizes compatibility issues. Android has
+created a system service called <code>HdmiControlService</code> to alleviate these pain points.</p>
+
+<p>By offering <code>HdmiControlService</code> as a part of the Android ecosystem, Android hopes to provide:</p>
+
+<ul>
+  <li>A standard implementation of HDMI-CEC for all manufacturers, which will reduce
+device incompatibility. Previously, manufacturers had to develop their own
+implementations of HDMI-CEC or use third-party solutions.</li>
+  <li>A service that is well-tested against numerous HDMI-CEC devices already in the
+market. Android has been conducting rigorous research on compatibility issues
+found among the products and collecting useful advice from partners experienced
+in the technology. The CEC service is designed to keep a healthy balance
+between the standard and modifications to that standard so that it works with
+the products that people already use.</li>
+</ul>
+
+<h2 id=overall_design>Overall design</h2>
+
+
+<p><code>HdmiControlService</code> is connected with the rest of the system like TV Input Framework (TIF), Audio service, and Power service to implement the various features the standard
+specifies.</p>
+
+<p>See the following diagram for a depiction of the switch from a custom CEC
+controller to an implementation of the simpler HDMI-CEC hardware abstraction
+layer (HAL).</p>
+
+<img src="images/HDMI_Control_Service.png" alt="Diagram that shows how HDMI-CEC was implemented before and after Android 5.0">
+
+<p class="img-caption"><strong>Figure 1.</strong> HDMI Control Service replacement</p>
+
+<h2 id=implementation>Implementation</h2>
+
+
+<p>See the following diagram for a detailed view of the HDMI control service.</p>
+
+<img src="images/HDMI_Control_Service_Flow.png" alt="Image that shows how HDMI Control service details">
+
+<p class="img-caption"><strong>Figure 2.</strong> HDMI Control Service details</p>
+
+<p>Here are the key ingredients to a proper Android HDMI-CEC implementation:</p>
+
+<ul>
+  <li> A manager class <code>HdmiControlManager</code> provides priviledged apps with the API. System services like TV Input Manager service and Audio service can grab the service directly.</li>
+  <li> The service is designed to allow hosting more than one type of logical device.</li>
+  <li> HDMI-CEC is connected with the hardware via a hardware abstraction layer (HAL)
+to simplify handling differences of the protocol and signalling mechanisms
+between the devices. The HAL definition is available for device manufacturers
+to use to implement the HAL layer.</li>
+</ul>
+
+<p class="note"><strong>Note</strong>: Device manufacturers should add the following line into <code>PRODUCT_COPY_FILES</code> in <code>device.mk</code>.</p>
+
+<pre>
+PRODUCT_COPY_FILES += \
+frameworks/native/data/etc/android.hardware.hdmi.cec.xml:system/etc/permissions/android.hardware.hdmi.cec.xml
+</pre>
+
+
+<p>Depending on whether your device is a HDMI sink device or a HDMI source device,
+device manufactureres need to set <code>ro.hdmi.device_type</code> in <code>device.mk</code> for <code>HdmiControlService</code> to work correctly.</p>
+
+<p>For HDMI source devices, like Over the Top (OTT) boxes, set:</p>
+
+<pre>
+PRODUCT_PROPERTY_OVERRIDES += ro.hdmi.device_type=<strong>4</strong>
+</pre>
+
+<p>For HDMI sink devices, like panel TVs, set:</p>
+
+<pre>
+PRODUCT_PROPERTY_OVERRIDES += ro.hdmi.device_type=<strong>0</strong></pre>
+</p>
+
+
+<ul>
+  <li> A device manufacturer-provided proprietary CEC controller cannot coexist with <code>HdmiControlService</code>. It must be disabled or removed. Common requirements for this come from the need to handle manufacturer-specific commands. The manufacturer-specific
+command handler should be incorporated into the service by extending/modifying
+it. This work is left to the device manufacturer and not specified by Android.
+Note that any change made in the service for manufacturer-specific commands
+must not interfere with the way standard commands are handled or the device
+will not be Android compatible.</li>
+  <li> Access to the HDMI-CEC service is guarded with the protection level <code>SignatureOrSystem</code>. Only system components or the apps placed in <code>/system/priv-app</code> can access the service. This is to protect the service from abuse by apps with malicous intent.</li>
+</ul>
+
+<p>Android supports type <code>TV/Display(0)</code> and <code>playback device(4)</code>, which can issue the One Touch Play command to display. The other types (tuner
+and recorder) are currently not supported.</p>
+
+<h2 id=hdmi-cec_hal_definition>HDMI-CEC HAL definition</h2>
+
+
+<p>In order to have the service in action, the HDMI-CEC HAL needs to be
+implemented to the definition provided by Android. It abstracts differences in
+the hardware level and exposes the primitive operations (allocate/read/write,
+etc.) to the upper layer through API.</p>
+
+<p>The API calls that device manufacturers must support are:</p>
+
+<h3 id=tx_rx_events>TX/RX/Events</h3>
+<ul>
+  <li><code>send_message</code></li>
+  <li><code>register_event_callback</code></li>
+</ul>
+
+<h3 id=info>Info</h3>
+<ul>
+  <li><code>get_physical_address</code></li>
+  <li><code>get_version</code></li>
+  <li><code>get_vendor_id</code></li>
+  <li><code>get_port_info</code></li>
+</ul>
+
+<h3 id=logical_address>Logical Address</h3>
+<ul>
+  <li><code>add_logical_address</code></li>
+  <li><code>clear_logical_address</code></li>
+</ul>
+
+<h3 id=status>Status</h3>
+<ul>
+  <li><code>is_connected set_option</code></li>
+  <li><code>set_audio_return_channel</code></li>
+</ul>
+
+<p>Here is an excerpt of the HDMI-CEC HAL definition regarding APIs:</p>
+
+<pre>
+#ifndef ANDROID_INCLUDE_HARDWARE_HDMI_CEC_H
+#define ANDROID_INCLUDE_HARDWARE_HDMI_CEC_H
+
+...
+
+/*
+ * HDMI-CEC HAL interface definition.
+ */
+typedef struct hdmi_cec_device {
+    /**
+     * Common methods of the HDMI-CEC device.  This *must* be the first member of
+     * hdmi_cec_device as users of this structure will cast a hw_device_t to hdmi_cec_device
+     * pointer in contexts where it's known the hw_device_t references a hdmi_cec_device.
+     */
+    struct hw_device_t common;
+
+    /*
+     * (*add_logical_address)() passes the logical address that will be used
+     * in this system.
+     *
+     * HAL may use it to configure the hardware so that the CEC commands addressed
+     * the given logical address can be filtered in. This method can be called
+     * as many times as necessary in order to support multiple logical devices.
+     * addr should be in the range of valid logical addresses for the call
+     * to succeed.
+     *
+     * Returns 0 on success or -errno on error.
+     */
+    int (*add_logical_address)(const struct hdmi_cec_device* dev, cec_logical_address_t addr);
+
+    /*
+     * (*clear_logical_address)() tells HAL to reset all the logical addresses.
+     *
+     * It is used when the system doesn't need to process CEC command any more,
+     * hence to tell HAL to stop receiving commands from the CEC bus, and change
+     * the state back to the beginning.
+     */
+    void (*clear_logical_address)(const struct hdmi_cec_device* dev);
+
+    /*
+     * (*get_physical_address)() returns the CEC physical address. The
+     * address is written to addr.
+     *
+     * The physical address depends on the topology of the network formed
+     * by connected HDMI devices. It is therefore likely to change if the cable
+     * is plugged off and on again. It is advised to call get_physical_address
+     * to get the updated address when hot plug event takes place.
+     *
+     * Returns 0 on success or -errno on error.
+     */
+    int (*get_physical_address)(const struct hdmi_cec_device* dev, uint16_t* addr);
+
+    /*
+     * (*send_message)() transmits HDMI-CEC message to other HDMI device.
+     *
+     * The method should be designed to return in a certain amount of time not
+     * hanging forever, which can happen if CEC signal line is pulled low for
+     * some reason. HAL implementation should take the situation into account
+     * so as not to wait forever for the message to get sent out.
+     *
+     * It should try retransmission at least once as specified in the standard.
+     *
+     * Returns error code. See HDMI_RESULT_SUCCESS, HDMI_RESULT_NACK, and
+     * HDMI_RESULT_BUSY.
+     */
+    int (*send_message)(const struct hdmi_cec_device* dev, const cec_message_t*);
+
+    /*
+     * (*register_event_callback)() registers a callback that HDMI-CEC HAL
+     * can later use for incoming CEC messages or internal HDMI events.
+     * When calling from C++, use the argument arg to pass the calling object.
+     * It will be passed back when the callback is invoked so that the context
+     * can be retrieved.
+     */
+    void (*register_event_callback)(const struct hdmi_cec_device* dev,
+            event_callback_t callback, void* arg);
+
+    /*
+     * (*get_version)() returns the CEC version supported by underlying hardware.
+     */
+    void (*get_version)(const struct hdmi_cec_device* dev, int* version);
+
+    /*
+     * (*get_vendor_id)() returns the identifier of the vendor. It is
+     * the 24-bit unique company ID obtained from the IEEE Registration
+     * Authority Committee (RAC).
+     */
+    void (*get_vendor_id)(const struct hdmi_cec_device* dev, uint32_t* vendor_id);
+
+    /*
+     * (*get_port_info)() returns the hdmi port information of underlying hardware.
+     * info is the list of HDMI port information, and 'total' is the number of
+     * HDMI ports in the system.
+     */
+    void (*get_port_info)(const struct hdmi_cec_device* dev,
+            struct hdmi_port_info* list[], int* total);
+
+    /*
+     * (*set_option)() passes flags controlling the way HDMI-CEC service works down
+     * to HAL implementation. Those flags will be used in case the feature needs
+     * update in HAL itself, firmware or microcontroller.
+     */
+    void (*set_option)(const struct hdmi_cec_device* dev, int flag, int value);
+
+    /*
+     * (*set_audio_return_channel)() configures ARC circuit in the hardware logic
+     * to start or stop the feature. Flag can be either 1 to start the feature
+     * or 0 to stop it.
+     *
+     * Returns 0 on success or -errno on error.
+     */
+    void (*set_audio_return_channel)(const struct hdmi_cec_device* dev, int flag);
+
+    /*
+     * (*is_connected)() returns the connection status of the specified port.
+     * Returns HDMI_CONNECTED if a device is connected, otherwise HDMI_NOT_CONNECTED.
+     * The HAL should watch for +5V power signal to determine the status.
+     */
+    int (*is_connected)(const struct hdmi_cec_device* dev, int port);
+
+    /* Reserved for future use to maximum 16 functions. Must be NULL. */
+    void* reserved[16 - 11];
+} hdmi_cec_device_t;
+
+#endif /* ANDROID_INCLUDE_HARDWARE_HDMI_CEC_H */
+</pre>
+
+
+<p>The API lets the service make use of the hardware resource to send/receive
+HDMI-CEC commands, configure necessary settings, and (optionally) communicate
+with the microprocessor in the underlying platform that will take over the CEC
+control while the Android system is in standby mode.</p>
+
+<h2 id=testing>Testing</h2>
+
+
+<p>Device manufacturers must test the APIs of the HDMI-CEC HAL with their own
+tools to make sure they provide expected functionality.</p>
diff --git a/src/devices/tv/images/Built-in_Tuner_TV_Input.png b/src/devices/tv/images/Built-in_Tuner_TV_Input.png
new file mode 100644
index 0000000..bff7fea
--- /dev/null
+++ b/src/devices/tv/images/Built-in_Tuner_TV_Input.png
Binary files differ
diff --git a/src/devices/tv/images/HDMI_Control_Service.png b/src/devices/tv/images/HDMI_Control_Service.png
new file mode 100644
index 0000000..cc8e43d
--- /dev/null
+++ b/src/devices/tv/images/HDMI_Control_Service.png
Binary files differ
diff --git a/src/devices/tv/images/HDMI_Control_Service_Flow.png b/src/devices/tv/images/HDMI_Control_Service_Flow.png
new file mode 100644
index 0000000..84fe4a6
--- /dev/null
+++ b/src/devices/tv/images/HDMI_Control_Service_Flow.png
Binary files differ
diff --git a/src/devices/tv/images/TIF_HDMI_TV_Input.png b/src/devices/tv/images/TIF_HDMI_TV_Input.png
new file mode 100644
index 0000000..5274588
--- /dev/null
+++ b/src/devices/tv/images/TIF_HDMI_TV_Input.png
Binary files differ
diff --git a/src/devices/tv/images/TIF_MHEG5_app.png b/src/devices/tv/images/TIF_MHEG5_app.png
new file mode 100644
index 0000000..f977ea4
--- /dev/null
+++ b/src/devices/tv/images/TIF_MHEG5_app.png
Binary files differ
diff --git a/src/devices/tv/images/TIF_Overview.png b/src/devices/tv/images/TIF_Overview.png
new file mode 100644
index 0000000..5041c17
--- /dev/null
+++ b/src/devices/tv/images/TIF_Overview.png
Binary files differ
diff --git a/src/devices/tv/images/TIF_PIP-PAP.png b/src/devices/tv/images/TIF_PIP-PAP.png
new file mode 100644
index 0000000..ea3a3a7
--- /dev/null
+++ b/src/devices/tv/images/TIF_PIP-PAP.png
Binary files differ
diff --git a/src/devices/tv/images/TIF_TV_Provider.png b/src/devices/tv/images/TIF_TV_Provider.png
new file mode 100644
index 0000000..d058d82
--- /dev/null
+++ b/src/devices/tv/images/TIF_TV_Provider.png
Binary files differ
diff --git a/src/devices/tv/images/TV_App_CEC_integration.png b/src/devices/tv/images/TV_App_CEC_integration.png
new file mode 100644
index 0000000..bf36fac
--- /dev/null
+++ b/src/devices/tv/images/TV_App_CEC_integration.png
Binary files differ
diff --git a/src/devices/tv/images/TV_Input_DVR.png b/src/devices/tv/images/TV_Input_DVR.png
new file mode 100644
index 0000000..3a652db
--- /dev/null
+++ b/src/devices/tv/images/TV_Input_DVR.png
Binary files differ
diff --git a/src/devices/tv/images/Third-party_Input_HDMI.png b/src/devices/tv/images/Third-party_Input_HDMI.png
new file mode 100644
index 0000000..684613a
--- /dev/null
+++ b/src/devices/tv/images/Third-party_Input_HDMI.png
Binary files differ
diff --git a/src/devices/tv/index.jd b/src/devices/tv/index.jd
new file mode 100644
index 0000000..1b8f75f
--- /dev/null
+++ b/src/devices/tv/index.jd
@@ -0,0 +1,483 @@
+page.title=TV Input Framework
+@jd:body
+
+<!--
+    Copyright 2014 The Android Open Source Project
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+<div id="qv-wrapper">
+  <div id="qv">
+    <h2>In this document</h2>
+    <ol id="auto-toc">
+    </ol>
+  </div>
+</div>
+
+<h2 id=introduction>Introduction</h2>
+
+<p>The Android TV Input Framework (TIF) simplifies the delivery of live content to
+Android TV. The Android TIF provides a standard API for manufacturers to use to
+create input modules for controlling Android TV. It also enables live TV search
+and recommendations via metadata published by the TV Input. The framework does
+not seek to implement TV standards or regional requirements.</p>
+
+<p>The Android TIF makes it easier for device manufacturers to meet regional digital TV
+broadcast standards without re-implementation. This document may also inform
+third-party app developers who would like to create custom TV Inputs.</p>
+
+<h2 id=components>Components</h2>
+
+<p>The Android TV Input Framework implementation includes a TV Input Manager.
+The TIF works with the TV App, a system app that can’t be replaced by a
+third-party app, to access built-in and IP tuner channels. The TV App
+communicates with TV Input modules supplied by the device manufacturer or other
+parties through the TV Input Manager.</p>
+
+<p>The TV Input Framework consists of:</p>
+
+<ul>
+  <li>TV Provider (<code>com.android.providers.tv.TvProvider</code>): a database of channels, programs, and associated permissions
+  <li>TV App (<code>com.android.tv.TvActivity</code>): the app that handles user interaction
+  <li>TV Input Manager (<code>android.media.tv.TvInputManager</code>): allows the TV Inputs to communicate with the TV App
+  <li>TV Input: an app representing physical or virtual tuners and input ports
+  <li>TV Input HAL (<code>tv_input</code> module): a hardware definition that allows system TV Inputs to access
+TV-specific hardware when implemented
+  <li>Parental Control: the technology to allow blocking of channels and programs
+  <li>HDMI-CEC: the technology to allow remote control of various devices over HDMI
+</ul>
+
+<p>These components are covered in detail below. See the following diagram for a
+detailed view of the Android TV Input Framework architecture.</p>
+
+<img src="images/TIF_Overview.png" alt="Overview of the Android TIF architecture">
+<p class="img-caption"><strong>Figure 1.</strong> Android TV Input Framework (TIF) architecture</p>
+
+<h2 id=flow>Flow</h2>
+
+<p>Here is how the architecture is exercised:</p>
+
+<ol>
+  <li>The user sees and interacts with the TV App, a system app that can’t be
+replaced by a third-party app.
+  <li>The TV App displays the AV content from the TV Input.
+  <li>The TV App cannot talk directly with the TV Inputs. The TV Input Manager
+identifies the state of TV Inputs for the TV App. See <em>TV Input Manager</em> below for more details about these limitations.
+</ol>
+
+<h2 id=permissions>Permissions</h2>
+
+<ul>
+  <li>Only <code><a
+href="http://developer.android.com/guide/topics/manifest/permission-element.html#plevel">signatureOrSystem</a></code>
+TV Inputs and TV App have full access to the TV Provider database and are able
+to receive KeyEvents.
+  <li>Only system TV Inputs can access the TV Input HAL through the TV Input Manager
+service. TV Inputs are accessed one-to-one via TV Input Manager sessions.
+  <li>Third-party TV Inputs have package-locked access to the TV Provider database
+and can READ/WRITE only to matching package rows.
+  <li>Third-party TV inputs can either display their own content or content from a
+device manufacturer’s passthrough TV inputs, like HDMI1. They can’t display
+content from non-passthrough TV inputs, like a built-in or IPTV tuner.
+  <li><code>TV_INPUT_HARDWARE</code> permission for a hardware TV Input app, signals the TV Input Manager Service
+to notify the TV Input service on boot to call the TV Input Manager Service and
+add its TV Inputs. This permission allows a hardware TV Input app to support
+multiple TV Inputs per TV Input service, as well as being able to dynamically
+add and remove its supported TV Inputs.
+</ul>
+
+<h2 id=tv_provider>TV Provider</h2>
+
+<p>The TV Provider database stores the channels and programs from TV Inputs. The
+TV Provider also publishes and manages the associated permissions so that TV
+Inputs can see only their own records. For instance, a specific TV Input can
+see only the channels and programs it has supplied and is prohibited from
+accessing any other TV Inputs’ channels and programs. </p>
+
+<p>The TV Provider maps "broadcast genre" to "canonical genre" internally. TV
+Inputs are responsible for populating "broadcast genre" with the value in the
+underlying broadcast standard, and the "canonical genre" field will
+automatically be populated with the correct associated genre from <code>android.provider.TvContract.Genres</code>. For example, with broadcast standard ATSC A/65 and program with genre 0x25
+(meaning “Sports”), the TV Input will populate the “broadcast genre” with the
+String “Sports” and TV Provider will populate the “canonical genre” field with
+the mapped value <code>android.provider.TvContract.Genres.SPORTS</code>.</p>
+
+<p>See the diagram below for a detailed view of the TV Provider. </p>
+
+<img src="images/TIF_TV_Provider.png" alt="Android TV Provider">
+<p class="img-caption"><strong>Figure 2.</strong> Android TV Provider</p>
+
+<p><em>Only apps in the privileged system partition can read the entire TV Provider
+database. </em></p>
+
+<p>Passthrough TV inputs do not store channels and programs. </p>
+
+<p>In addition to the standard fields for channels and programs, the TV Provider
+database also offers a BLOB type field, <code>COLUMN_INTERNAL_PROVIDER_DATA</code>, in each table that TV Inputs may use to store arbitrary data. That BLOB data
+can include custom information, such as frequency of the associated tuner, and
+may be provided in a protocol buffer or another form. A Searchable field is
+available to make certain channels unavailable in search (such as to meet
+country-specific requirements for content protection).</p>
+
+<h3 id=tv_provider_database_field_examples>Database field examples</h3>
+
+<p>The TV Provider supports structured data in channel (<code>android.provider.TvContract.Channels</code>) and program (<code>android.provider.TvContract.Programs</code>) tables. These tables are populated and accessed by TV Inputs and system apps
+like the TV App. These tables have four types of fields:</p>
+
+<ul>
+  <li><strong>Display: </strong>Display fields contain information that apps may want to make visible to the
+user, like a channel’s name (<code>COLUMN_DISPLAY_NAME</code>) or number (<code>COLUMN_DISPLAY_NUMBER</code>), or the title of the program being viewed.
+  <li><strong>Metadata:</strong> There are three fields for identifying content, according to relevant
+standards, like a channel’s transport stream ID (<code>COLUMN_TRANSPORT_STREAM_ID</code>), original network ID (<code>COLUMN_ORIGINAL_NETWORK_ID</code>) and service id (<code>COLUMN_SERVICE_ID</code>).
+  <li><strong>Internal data</strong>: Fields that are for the custom use of TV Inputs.<br>
+    Some fields, like <code>COLUMN_INTERNAL_PROVIDER_DATA</code>, are customizable BLOB fields where a TV Input can store arbitrary metadata
+about their channel or program.
+  <li><strong>Flag: </strong>Flag fields represent whether a channel should be restricted from search,
+browse, or viewing. This can be set only at the channel level. All programs
+defer to the setting on the channel.
+  <ul>
+    <li><code>COLUMN_SEARCHABLE</code>: Restricting search from some channels may be a requirement in certain
+regions. <code>COLUMN_SEARCHABLE = 0</code> means the channel should not be exposed in search results. 
+    <li><code>COLUMN_BROWSABLE</code>: Visible to system applications only. Restricting channel from being browsed
+by applications. <code>COLUMN_BROWSABLE = 0</code> means the channel should not be included in the channel list.
+    <li><code>COLUMN_LOCKED</code>: Visible to system applications only. Restricting channel from being viewed by
+invalid accounts without entering PIN code. <code>COLUMN_LOCKED = 1</code> means the channel should be protected by parental control.
+  </ul>
+</ul>
+
+<p>For a more exhaustive list of the fields, see <code>android/frameworks/base/media/java/android/media/tv/TvContract.java</code></p>
+
+<h3 id=permissions_and_access_control>Permissions and access control</h3>
+
+<p>All fields are visible to anyone with access to the corresponding row. No
+fields are directly accessible to users; they see only what the TV App, System
+apps, or TV Inputs surface.</p>
+
+<ul>
+  <li>Each row has <code>PACKAGE_NAME</code>, the package (app) that owns that row, checked on Query, Insert, Update via
+TvProvider.java.
+A TV Input may access only the information it wrote and is
+cordoned off from the information provided by other TV Inputs.
+  <li>READ, WRITE permissions via AndroidManifest.xml (requires user consent) to
+determine available channels.
+  <li>Only <code>signatureOrSystem</code> apps can acquire <code>ACCESS_ALL_EPG_DATA</code> permission to access the entire database.
+</ul>
+
+<h2 id=tv_input_manager>TV Input Manager</h2>
+
+<p>The TV Input Manager provides a central system API to the overall Android TV
+Input Framework. It arbitrates interaction between apps and TV Inputs and
+provides parental control functionality. TV Input Manager sessions must be
+created one-to-one with TV Inputs. The TV Input Manager allows access to
+installed TV Inputs so apps may:</p>
+
+<ul>
+  <li>List TV inputs and check their status
+  <li>Create sessions and manage listeners
+</ul>
+
+<p>For sessions, a TV Input may be tuned by the TV App only to URIs it has added
+to the TV Provider database, except for passthrough TV Inputs which can be
+tuned to using <code>TvContract.buildChannelUriForPassthroughInput()</code>. A TV Input may also have its volume set. TV Inputs provided and signed by the
+device manufacturer (signature apps) or other apps installed in the system
+partition will have access to the entire TV Provider database. This access can
+be used to construct apps to browse and search across all available TV channels
+and programs.</p>
+
+<p>An app may create and register a <code>TvInputCallback</code> with the <code>android.media.tv.TvInputManager</code> to be called back on a TV Input’s state change or on the addition or removal
+of a TV Input. For example, a TV App can react when a TV Input is disconnected
+by displaying it as disconnected and preventing its selection.</p>
+
+<p>The TV Input Manager abstracts communication between the TV App and TV Inputs.
+The standard interface of TV Input Manager and TV Input allows multiple
+device manufacturers to create their own TV Apps while helping all third-party TV Inputs
+work on all TV Apps.</p>
+
+<h2 id=tv_inputs>TV Inputs</h2>
+
+<p>TV Inputs are Android apps in the sense they have an AndroidManifest.xml and
+are installed (via Play, pre-installed, or sideloaded). Android TV supports
+pre-installed system apps, apps signed by the device manufacturer and
+third-party TV Inputs. </p>
+
+<p>Some inputs, like the HDMI input or built-in tuner input, can be provided only
+by the manufacturer as they speak directly with the underlying hardware.
+Others, such as IPTV, place-shifting, and external STB, can be supplied by
+third parties as APKs on Google Play Store. Once downloaded and installed, the
+new input can be selected within the TV App.</p>
+
+<h3 id=passthrough_input_example>Passthrough input example</h3>
+
+<img src="images/TIF_HDMI_TV_Input.png" alt="Android TV System Input">
+<p class="img-caption"><strong>Figure 3.</strong> Android TV System Input</p>
+
+<p>In this example, the TV Input provided by the device manufacturer is trusted
+and has full access to the TV Provider. As a passthrough TV Input, it does not
+register any channels or programs with the TV Provider. To obtain the URI used
+to reference the passthrough input, use the <code>android.media.tv.TvContract</code> utility method <code>buildChannelUriForPassthroughInput(String inputId)</code>.  The TV App communicates with the TV Input Manager to reach the HDMI TV
+Input. </p>
+
+<h3 id=built-in_tuner_example>Built-in tuner example</h3>
+
+<img src="images/Built-in_Tuner_TV_Input.png" alt="Android TV Built-in Tuner Input">
+<p class="img-caption"><strong>Figure 4.</strong> Android TV Built-in Tuner Input</p>
+
+<p>In this example, the Built-in Tuner TV Input provided by the device
+manufacturer is trusted and has full access to  the TV Provider. </p>
+
+<h3 id=third-party_input_example>Third-party input example</h3>
+
+<img src="images/Third-party_Input_HDMI.png" alt="Android TV third-party input">
+<p class="img-caption"><strong>Figure 5.</strong> Android TV third-party input</p>
+
+<p>In this example, the external STB TV Input is provided by a third party. Since
+that TV Input can’t directly access the HDMI video feed coming in, it must go
+through the TV Input Manager and use the HDMI TV Input provided by the device
+manufacture.</p>
+
+<p>Through the TV Input Manager, the external STB TV Input can speak with the HDMI
+TV Input and ask it to show the video on HDMI1. So the STB TV Input can control
+the TV while the manufacturer-provided HDMI TV Input renders the video.</p>
+
+<h3 id=picture_in_picture_pip_example>Picture in picture (PIP) example </h3>
+
+<img src="images/TIF_PIP-PAP.png" alt="Android TV KeyEvents">
+<p class="img-caption"><strong>Figure 6.</strong> Android TV KeyEvents</p>
+
+<p>The diagram above shows how buttons on a remote control are passed to a
+specific TV Input for picture in picture (PIP) display. Those button presses
+are interpreted by the hardware driver supplied by the device manufacturer,
+converting hardware scancodes to Android keycodes and passing them to the
+standard Android <a href="http://source.android.com/devices/tech/input/overview.html">input pipeline</a> <code>InputReader</code> and <code>InputDispatcher</code> functions as <a href="http://developer.android.com/reference/android/view/KeyEvent.html">KeyEvents</a>. These in turn trigger events on the TV App if it is in focus. </p>
+
+<p>Only system TV Inputs are eligible to receive <code>InputEvents</code>, and only if they have the <code>RECEIVE_INPUT_EVENT</code> system permission. The TV Input is responsible to determine which InputEvents
+to consume and should allow the TV App to handle the keys it does not need to
+consume.</p>
+
+<p>The TV App is responsible for knowing which system TV Input is active, meaning
+selected by the user, and to disambiguate incoming <code>KeyEvents</code> and route them to the correct TV Input Manager session, calling <code>dispatchInputEvent()</code> to pass on the event to the associated TV Input. </p>
+
+<h3 id=mheg-5_input_example>MHEG-5 input example</h3>
+
+<p>The following diagram shows a more detailed view of how <code>KeyEvents</code> are routed through the Android TIF.</p>
+
+<img src="images/TIF_MHEG5_app.png" alt="Android TV Red button example">
+<p class="img-caption"><strong>Figure 7.</strong> Android TV Red button example</p>
+
+<p>It depicts the flow of a Red button app, common in Europe for letting users
+access interactive apps on their televisions. An app can be delivered over this
+transport stream. When the button is clicked, it lets users interact with these
+broadcast apps. For example, you might use these broadcast apps to access
+related web pages or sports scores.</p>
+
+<p>See the <em>Broadcast app</em> section to learn how broadcast apps interact with the TV App.</p>
+
+<p>In this example:</p>
+
+<ol>
+  <li>The TV App is in focus and receives all keys.
+  <li><code>KeyEvents</code> (e.g. the Red button) is passed to the active TV Input as <code>InputEvents.</code>
+  <li>The system TV Input integrates with MHEG-5 stack and has the <code>RECEIVE_INPUT_EVENT</code> system permission.
+  <li>On receiving activation keycode (e.g. Red button), the TV Input activates
+broadcast app.
+  <li>TV input consumes <code>KeyEvents</code> as <code>InputEvents</code> and the broadcast app is the focus and handles <code>InputEvents</code> until dismissed. 
+</ol>
+
+<p class="note"><strong>Note</strong>: Third-party TV inputs never receive keys. </p>
+
+<h2 id=tv_input_hal>TV Input HAL</h2>
+
+<p>The TV Input HAL aids development of TV Inputs to access TV-specific hardware.
+As with other Android HALs, the TV Input HAL (<code>tv_input</code>) is
+available in the AOSP source tree and the vendor develops its implementation.</p>
+
+<h2 id=tv_app>TV App</h2>
+
+<p>The TV App provides channel and program search results (via
+<code>com.android.tv.search.TvProviderSearch</code>) and passes keys, tune, and
+volume calls to TV Inputs through the TV Input
+Manager. Manufacturers must implement the TV App to ensure search functions
+work for their users. Otherwise, users will struggle to navigate the resulting
+Android TV. Third-party developers cannot develop TV Apps as the APIs require
+system or signature permission.</p>
+
+<p>As with the TIF in general, the TV App does not seek to implement device
+manufacturer or country-specific features. Instead, it handles these tasks by
+default:</p>
+
+<h3 id=setup_and_configuration>Setup and configuration</h3>
+
+<ul>
+  <li>Auto-detect TV Inputs
+  <li>Let TV Inputs initiate channel setup
+  <li>Control parental settings
+  <li>Alter TV settings
+  <ul>
+    <li>Edit channel
+  </ul>
+</ul>
+
+<h3 id=viewing>Viewing</h3>
+<ul>
+  <li>Access and navigate all TV channels
+  <li>Access TV program information bar
+  <li>Multiple audio and subtitle track support
+  <li>Parental control PIN challenge
+  <li>Allow TV Input UI overlay for:
+  <ul>
+    <li>TV standard (HbbTV, etc.)
+  </ul>
+</ul>
+
+<h2 id=parental_control>Parental Control</h2>
+
+<p>Parental control lets a user block undesired channels and programs, but bypass
+the block by entering a PIN code.</p>
+
+<p>Responsibility for parental control functionality is shared amongst the TV App,
+TV Input Manager service, TV Provider, and TV Input. </p>
+
+<h3 id=tv_provider>TV Provider</h3>
+
+<p>Each channel row has a <code>COLUMN_LOCKED</code> field that is used to lock
+specific channels from viewing without entering a PIN code. The program field
+<code>COLUMN_CONTENT_RATING</code> is intended for display and is not used to
+enforce parental control.</p>
+
+<h3 id=tv_input_manager>TV Input Manager</h3>
+
+<p>The TV Input Manager stores every blocked <code>TvContentRating</code> and
+responds to <code>isRatingBlocked()</code> to advise if content with the given
+rating should be blocked.</p>
+
+<h3 id=tv_input>TV Input</h3>
+
+<p>The TV Input checks if the current content should be blocked by calling
+<code>isRatingBlocked()</code> on the TV Input Manager when the rating of the
+displayed content has changed
+(on program or channel change), or parental control settings have changed (on
+<code>ACTION_BLOCKED_RATINGS_CHANGED</code> and
+<code>ACTION_PARENTAL_CONTROLS_ENABLED_CHANGED</code>). If the content should
+be blocked, the TV Input disables the audio and video
+and notifies the TV app that the current content is blocked by calling
+<code>notifyContentBlocked(TvContentRating)</code>. If the content should not
+be blocked, the TV Input enables audio and video and notifies the TV App
+the current content is allowed by calling <code>notifyContentAllowed()</code>.</p>
+
+<h3 id=tv_app>TV App</h3>
+
+<p>The TV App shows parental control settings to users and a PIN code UI when it
+is notified by a TV Input that the current content is blocked or when the user
+attempts to view a blocked channel.</p>
+
+<p>The TV App does not directly store the parental control settings. When the user
+changes the parental control settings, every blocked
+<code>TvContentRating</code> is stored by the TV Input Manager, and blocked
+channels are stored by the TV Provider.</p>
+
+<h2 id=hdmi-cec>HDMI-CEC</h2>
+
+<p>HDMI-CEC allows one device to control another, thereby enabling a single remote
+to control multiple appliances in a home theater. It is used by Android TV to
+speed setup and allow distant control over various TV Inputs via the central TV
+App. For instance, it may switch inputs, power up or down devices, and more.</p>
+
+<p>The Android TIF implements HDMI-CEC as the HDMI Control Service so that
+device manufacturers merely need to develop low-level drivers that interact with the
+lightweight Android TV HAL, skipping more complex business logic. In providing
+a standard implementation, Android seeks to mitigate compatibility issues by
+reducing fragmented implementations and selective feature support. The HDMI
+Control Service uses the existing Android services, including input and power.</p>
+
+<p>This means existing HDMI-CEC implementations will need to be redesigned to
+interoperate with the Android TIF. We recommend the hardware platform contain a
+microprocessor to receive CEC power on and other commands.</p>
+
+<img src="images/TV_App_CEC_integration.png" alt="CEC integration on Android TV">
+<p class="img-caption"><strong>Figure 8.</strong> CEC integration on Android TV</p>
+
+<ol>
+  <li> The CEC bus receives a command from the currently active source to switch to a
+different source.
+  <li> The driver passes the command to the HDMI-CEC HAL.
+  <li> The HAL notifies all <code>ActiveSourceChangeListeners</code>.
+  <li> THe HDMI Control Service is notified of source change via <code>ActiveSourceChangeListener</code>.
+  <li> The TV Input Manager service generates an intent for the TV App to switch the
+source.
+  <li> The TV App then creates a TV Input Manager Session for the TV Input being
+switched to and calls <code>setMain</code> on that session. 
+  <li> The TV Input Manager Session passes this information on to the HDMI TV Input.
+  <li> The HDMI TV input requests to set sideband surface.
+  <li> The TV Input Manager Service generates a corresponding routing control command
+back to HDMI Control Service when the surface is set.
+</ol>
+
+<h2 id=tv_integration_guidelines>TV integration guidelines</h2>
+
+<h3 id=broadcast_app>Broadcast app</h3>
+
+<p>Because each country has broadcast-specific requirements (MHEG, Teletext,
+HbbTV, and more), manufacturers are expected to supply their own solutions for
+the broadcast app, for example:</p>
+
+<ul>
+  <li> MHEG: native stack
+  <li> Teletext: native stack
+  <li> HbbTV: webkit modification by Opera browser
+</ul>
+
+<p>In the Android L release, Android TV expects device manufacturers to use systems
+integrators or the Android solutions for regional TV stacks, pass the surface
+to TV software stacks, or pass the necessary key code to interact with legacy
+stacks.</p>
+
+<p>Here’s how the broadcast app and TV App interact:</p>
+
+<ol>
+  <li>The TV App is in focus, receiving all keys.
+  <li>The TV App passes keys (e.g. Red button) to the TV Input device.
+  <li>The TV Input device internally integrates with legacy TV stack.
+  <li>On receiving an activation keycode (e.g. Red button), the TV Input device
+activates broadcast apps.
+  <li>A broadcast app takes focus in the TV App and handles user actions.
+</ol>
+
+<p>For voice search/recommendation, the broadcast app may support In-app search
+for voice search.</p>
+
+<h3 id=dvr>DVR</h3>
+
+<p>Android TV supports digital video recording (DVR) with device manufacturer development. The
+DVR function works like so:</p>
+
+<ol>
+  <li> DVR recording function / Live Buffer can be implemented by any TV Input.
+  <li> TV App passes on key inputs to TV Input (including recording/pause/fast
+forward/ rewind keys).
+  <li> When playing the recorded content, the TV Input handles it with trick play
+overlay.
+  <li> DVR app enables users to browse and manage recorded program.
+</ol>
+
+<p>For voice search/recommendation:</p>
+
+<ul>
+  <li>DVR app supports In-app search for Voice search.
+  <li>DVR app can propose recommendation using notifications.
+</ul>
+
+<p>See the following diagram for a view into a possible DVR implementation in
+Android TV.</p>
+
+<img src="images/TV_Input_DVR.png" alt="Digital video recording in Android TV">
+<p class="img-caption"><strong>Figure 9.</strong> Digital video recording in Android TV</p>
diff --git a/src/index.jd b/src/index.jd
index f96e721..20b7aea 100644
--- a/src/index.jd
+++ b/src/index.jd
@@ -6,7 +6,7 @@
 @jd:body
 
 <!--
-    Copyright 2013 The Android Open Source Project
+    Copyright 2014 The Android Open Source Project
 
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
@@ -42,41 +42,47 @@
     <div class="col-8">
     <h3>What's New</h3>
 
-<a href="{@docRoot}source/build-numbers.html">
-        <h4>Android 4.4.4 released</h4></a>
-        <p>Builds for Android 4.4.4 have been released. See the <strong><a
-href="{@docRoot}source/build-numbers.html#source-code-tags-and-builds">Source
-Code Tags and Builds</a></strong> table on <strong>Codenames, Tags, and Build
-Numbers</strong> for the new builds, tags, and devices supported.</p>
-
-<a href="{@docRoot}compatibility/downloads.html">
-        <h4>Android 4.4 CTS packages updated</h4></a>
-        <p>Revision 3 of the Android 4.4 Compatibility Test Suite (CTS) and
-Android 4.4 CTS Verifier have been added to Compatibility
-<strong><a
-href="{@docRoot}compatibility/downloads.html">Downloads</a></strong>. Packages
-for x86 architectures are included for the first time.</p>
+<a href="{@docRoot}devices/audio.html">
+        <h4>Audio gains attributes, USB, and headset documentation</h4></a>
+        <p>The <strong><a
+        href="{@docRoot}devices/audio.html">Audio section</a></strong> now describes <strong><a
+	href="{@docRoot}devices/audio_attributes.html">Attributes</a></strong> and <strong><a
+        href="{@docRoot}devices/audio_usb.html">USB digital audio</a></strong> support, while a <strong><a
+	href="{@docRoot}accessories/headset-spec.html">Wired audio headset
+        specification</a></strong> can now be found in the <strong><a
+        href="{@docRoot}accessories/index.html">Accessories section</a></strong>.</p>
 
 <img border="0" src="images/Android_Robot_100.png" alt="Android Partner icon" style="display:inline;float:right;margin:5px 10px">
 
-<a href="{@docRoot}devices/tech/dalvik/art.html">
-        <h4>ART introduction completely revised</h4></a>
+<a href="{@docRoot}devices/tech/security/index.html">
+        <h4>Encryption and SELinux revised</h4></a>
         <p><strong><a
-href="{@docRoot}devices/tech/dalvik/art.html">Introducing ART</a></strong> has been
-rewritten to reflect forthcoming changes and prepare developers and
-manufacturers for the runtime's adoption.</p>
+        href="{@docRoot}devices/tech/encryption/index.html">Encryption</a></strong> and <strong><a
+        href="{@docRoot}devices/tech/security/se-linux.html">Security-Enhanced Linux</a></strong> have been
+	updated to describe the latest features of Android 5.0, such as default
+        encryption and full enforcement of SELinux.</p>
 
-<a href="{@docRoot}source/brands.html">
-        <h4>Brand guidelines published</h4></a>
-        <p>Manufacturers have their own set of <strong><a
-href="{@docRoot}source/brands.html">guidelines for Android brand use</a></strong>.</p>
+<a href="{@docRoot}devices/sensors/index.html">
+        <h4>Sensors and Power rewritten</h4></a>
+        <p><strong><a
+        href="{@docRoot}devices/sensors/index.html">Sensors</a></strong> and <strong><a
+	href="{@docRoot}devices/tech/power.html">Power</a></strong> have been
+	completely rewritten to reflect the latest Android release and include
+        new gesture sensors and tools for measuring power consumption.</p>
 
-<a href="{@docRoot}devices/graphics/architecture.html">
- <h4>Graphics architecture document published</h4></a>
-        <p>Android engineering describes the system-level <strong><a
-href="{@docRoot}devices/graphics/architecture.html">Graphics
-Architecture</a></strong> in great detail.</p>
+<a href="{@docRoot}devices/tv/index.html">
+        <h4>TV Input Framework and more come to Android</h4></a>
+        <p>The <strong><a
+        href="{@docRoot}devices/tv/index.html">TV Input Framework</a></strong> and <strong><a
+        href="{@docRoot}devices/tv/HDMI-CEC.html">HDMI-CEC</a></strong> enable TV development, while <strong><a
+	href="{@docRoot}devices/audio_tv.html">TV Audio</a></strong> describes audio routing through televisions.</p>
 
+<a href="{@docRoot}devices/camera/versioning.html">
+        <h4>Camera versioning added, security enhancements updated</h4></a>
+        <p>The Camera team now describes its <strong><a
+        href="{@docRoot}devices/tv/index.html">Version support</a></strong>, while the Security team lists new <strong><a
+        href="{@docRoot}devices/tech/security/enhancements.html">Enhancements</a></strong> and <strong><a
+        href="{@docRoot}devices/tech/security/acknowledgements.html">Acknowledgements</a></strong>.</p>
     </div>
 
     <div class="col-8">
diff --git a/src/license.jd b/src/license.jd
index eb278a0..fb390b9 100644
--- a/src/license.jd
+++ b/src/license.jd
@@ -13,7 +13,7 @@
 </ul>
 
 <p>The documentation content on this site is made available to
-you as part of the <a href="http://source.android.com">Android Open
+you as part of the <a href="https://android.googlesource.com/">Android Open
 Source Project</a>. This documentation, including any code shown in it,
 is licensed under the <a
 href="http://www.apache.org/licenses/LICENSE-2.0">Apache 2.0
@@ -107,7 +107,7 @@
 </p>
 <p style="margin-left:20px;font-style:italic">
  Portions of this page are reproduced from work created and <a
- href="http://code.google.com/policies.html">shared by the Android Open Source Project</a>
+ href="https://code.google.com/p/android/">shared by the Android Open Source Project</a>
  and used according to terms described in the <a
  href="http://creativecommons.org/licenses/by/2.5/">Creative Commons
  2.5 Attribution License</a>.
@@ -125,7 +125,7 @@
 </p>
 <p style="margin-left:20px;font-style:italic">
  Portions of this page are modifications based on work created and <a
- href="http://code.google.com/policies.html">shared by the Android Open
+ href="https://code.google.com/p/android/">shared by the Android Open
  Source Project</a> and used according to terms described in the <a
  href="http://creativecommons.org/licenses/by/2.5/">Creative Commons
  2.5 Attribution License</a>.
diff --git a/src/source/brands.jd b/src/source/brands.jd
index dfb3cb1..c345c02 100644
--- a/src/source/brands.jd
+++ b/src/source/brands.jd
@@ -128,5 +128,6 @@
 <h2 id="Questions">Questions</h2>
 
 <p>For additional brand usage information, please contact our Android Partner
-Marketing team at <a
-href="mailto:android-brand-approvals@google.com">android-brand-approvals@google.com</a>.</p>
+Marketing team by submitting the <a
+href="https://support.google.com/googleplay/contact/brand_developer">Partner
+Brand Inquiry Form</a>.</p>
diff --git a/src/source/build-numbers.jd b/src/source/build-numbers.jd
index 89cf0c4..eea5f48 100644
--- a/src/source/build-numbers.jd
+++ b/src/source/build-numbers.jd
@@ -135,6 +135,11 @@
 <td>4.4 - 4.4.4</td>
 <td>API level 19</td>
 </tr>
+<tr>
+<td>Lollipop</td>
+<td>5.0</td>
+<td>API level 21</td>
+</tr>
 </tbody>
 </table>
 <p>Starting with Cupcake, individual builds are identified with a short
@@ -168,10 +173,28 @@
 <th>Supported devices</th>
 </tr>
 <tr>
+  <td>LRX21M</td>
+  <td>android-5.0.0_r2</td>
+  <td>Lollipop</td>
+  <td>Nexus Player (fugu)</td>
+</tr>
+<tr>
+  <td>LRX21L</td>
+  <td>android-5.0.0_r1</td>
+  <td>Lollipop</td>
+  <td>Nexus 9 (volantis)</td>
+</tr>
+<tr>
+  <td>KTU84Q</td>
+  <td>android-4.4.4_r2</td>
+  <td>KitKat</td>
+  <td>Nexus 5 (hammerhead) (For 2Degrees/NZ, Telstra/AUS and India ONLY)</td>
+</tr>
+<tr>
   <td>KTU84P</td>
   <td>android-4.4.4_r1</td>
   <td>KitKat</td>
-  <td>Nexus 5, Nexus 7 (flo/grouper/tilapia), Nexus 4, Nexus 10</td>
+  <td>Nexus 5, Nexus 7 (flo/deb/grouper/tilapia), Nexus 4, Nexus 10</td>
 </tr>
 <tr>
   <td>KTU84M</td>
@@ -211,7 +234,7 @@
 </tr>
 <tr>
   <td>KRT16M</td>
-  <td>android-4.4.2_r1</td>
+  <td>android-4.4_r1</td>
   <td>KitKat</td>
   <td>Nexus 5 (hammerhead)</td>
 </tr>
diff --git a/src/source/building-running.jd b/src/source/building-running.jd
index 905b94e..ed8c4b7 100644
--- a/src/source/building-running.jd
+++ b/src/source/building-running.jd
@@ -162,15 +162,13 @@
     https://source.android.com/source/download.html
 ************************************************************
 </code></pre>
-<p>This may be caused by</p>
+<p>This may be caused by:</p>
 <ul>
 <li>
-<p>failing to install the correct JDK as specified in <a href="initializing.html">Initializing the Build Environment</a>.</p>
+<p>Failing to install the correct JDK as specified in <a href="initializing.html">Initializing the Build Environment</a>.</p>
 </li>
 <li>
-<p>another JDK that you previously installed appearing in your path.  You can remove the offending JDK from your path with:</p>
-<pre><code>$ export PATH=${PATH/\/path\/to\/jdk\/dir:/}
-</code></pre>
+<p>Another JDK previously installed appearing in your path. Prepend the correct JDK to the beginning of your PATH or remove the problematic JDK.</p>
 </li>
 </ul>
 <h3 id="python-version-3">Python Version 3</h3>
diff --git a/src/source/code-lines.jd b/src/source/code-lines.jd
index 59da8bd..f277019 100644
--- a/src/source/code-lines.jd
+++ b/src/source/code-lines.jd
@@ -91,7 +91,7 @@
 	<p>
 	  An <em>upstream</em> project is an open-source project from which the Android stack is
 	  pulling code. These include obvious projects such as the Linux kernel and WebKit.
-	  Over time we are migrating some of the semi-autonomous Android projects (such as Dalvik,
+	  Over time we are migrating some of the semi-autonomous Android projects (such as ART,
 	  the Android SDK tools, Bionic, and so on) to work as "upstream" projects. Generally,
 	  these projects are developed entirely in the public tree. For some upstream projects,
 	  development is done by contributing directly to the upstream project itself. See <a href=
diff --git a/src/source/code-style.jd b/src/source/code-style.jd
index 9ec3c99..ee65c27 100644
--- a/src/source/code-style.jd
+++ b/src/source/code-style.jd
@@ -277,8 +277,7 @@
 <h3 id="define-fields-in-standard-places">Define Fields in Standard Places</h3>
 <p>Fields should be defined either at the top of the file, or immediately before the methods that use them.</p>
 <h3 id="limit-variable-scope">Limit Variable Scope</h3>
-<p>The scope of local variables should be kept to a minimum (<em>Effective
-Java</em> Item 29). By doing so, you increase the readability and
+<p>The scope of local variables should be kept to a minimum. By doing so, you increase the readability and
 maintainability of your code and reduce the likelihood of error. Each variable
 should be declared in the innermost block that encloses all uses of the
 variable.</p>
@@ -537,8 +536,7 @@
 <p>Both the JDK and the Android code bases are very inconsistent with regards
 to acronyms, therefore, it is virtually impossible to be consistent with the
 code around you. Bite the bullet, and treat acronyms as words.</p>
-<p>For further justifications of this style rule, see <em>Effective Java</em>
-Item 38 and <em>Java Puzzlers</em> Number 68.</p>
+
 <h3 id="use-todo-comments">Use TODO Comments</h3>
 <p>Use TODO comments for code that is temporary, a short-term solution, or
 good-enough but not perfect.</p>
@@ -553,10 +551,9 @@
 specific event ("Remove this code after all production mixers understand
 protocol V7.").</p>
 <h3 id="log-sparingly">Log Sparingly</h3>
-<p>While logging is necessary it has a significantly negative impact on
+<p>While logging is necessary, it has a significantly negative impact on
 performance and quickly loses its usefulness if it's not kept reasonably
-terse. The logging facilities provides five different levels of logging. Below
-are the different levels and when and how they should be used.</p>
+terse. The logging facilities provides five different levels of logging:</p>
 <ul>
 <li>
 <p><code>ERROR</code>: 
diff --git a/src/source/community/index.jd b/src/source/community/index.jd
index 22aa73c..31361ca 100644
--- a/src/source/community/index.jd
+++ b/src/source/community/index.jd
@@ -91,8 +91,7 @@
 <p><em>Use a clear, relevant message subject.</em> This helps everyone, both those trying to answer your question as well as those who may be looking for information in the future.</p>
 </li>
 <li>
-<p><em>Give plenty of details in your post.</em> Code or log snippets, pointers to screenshots, and similar details will get better results and make for better discussions. For a great guide to phrasing your questions, read <a href="http://www.catb.org/%7Eesr/faqs/smart-questions.html">How to Ask Questions the Smart Way</a>.
-<img src="{@docRoot}images/external-link.png"></p>
+<p><em>Give plenty of details in your post.</em> Code or log snippets, pointers to screenshots, and similar details will get better results and make for better discussions. For a great guide to phrasing your questions, read <a href="http://www.catb.org/%7Eesr/faqs/smart-questions.html">How to Ask Questions the Smart Way</a>.</p>
 </li>
 </ul>
 
diff --git a/src/source/developing.jd b/src/source/developing.jd
index 46a51a7..e6a97b5 100644
--- a/src/source/developing.jd
+++ b/src/source/developing.jd
@@ -72,18 +72,19 @@
 <pre><code>$ repo sync PROJECT0 PROJECT1 PROJECT2 ...
 </code></pre>
 <h2 id="creating-topic-branches">Creating topic branches</h2>
-<p>Start a topic branch in your local work environment whenever you begin a change, for example when you begin work on a bug or new feature. A topic branch is not a copy of the original files; it is a pointer to a particular commit. This makes creating local branches and switching among them a light-weight operation. By using branches, you can isolate one aspect of your work from the others. For an interesting article about using topic branches, see <a href="http://www.kernel.org/pub/software/scm/git/docs/howto/separating-topic-branches.txt">Separating topic branches</a>.
-<img src="{@docRoot}images/external-link.png" alt=""></p>
-<p>To start a topic branch using Repo: </p>
-<pre><code>$ repo start BRANCH_NAME
+<p>Start a topic branch in your local work environment whenever you begin a change, for example when you begin work on a bug or new feature. A topic branch is not a copy of the original files; it is a pointer to a particular commit. This makes creating local branches and switching among them a light-weight operation. By using branches, you can isolate one aspect of your work from the others. For an interesting article about using topic branches, see <a href="http://www.kernel.org/pub/software/scm/git/docs/howto/separating-topic-branches.txt">Separating topic branches</a>.</p>
+<p>To start a topic branch using Repo, navigate into the project to be modified and issue: </p>
+<pre><code>$ repo start BRANCH_NAME .
 </code></pre>
-<p>To verify that your new branch was created:</p>
-<pre><code>$ repo status
+<p>Please note, the period represents the project in the current working directory. To verify your new branch was created:</p>
+<pre><code>$ repo status .
 </code></pre>
 <h2 id="using-topic-branches">Using topic branches</h2>
 <p>To assign the branch to a particular project:</p>
-<pre><code>$ repo start BRANCH_NAME PROJECT
+<pre><code>$ repo start BRANCH_NAME PROJECT_NAME
 </code></pre>
+<p>See <a href="https://android.googlesource.com/">android.googlesource.com</a> for a list of all projects. Again, if you've already navigated into a particular project directory, you may simply pass a period to represent the current project.</p>
+
 <p>To switch to another branch that you have created in your local work environment:</p>
 <pre><code>$ git checkout BRANCH_NAME
 </code></pre>
diff --git a/src/source/faqs.jd b/src/source/faqs.jd
index f08a896..346ad98 100644
--- a/src/source/faqs.jd
+++ b/src/source/faqs.jd
@@ -169,7 +169,7 @@
 able to accept. For instance, someone might want to contribute an
 alternative application API, such as a full C++-based environment. We would
 decline that contribution, since Android encourages applications to be run
-in the Dalvik VM. Similarly, we won't accept contributions such as GPL
+in the ART runtime. Similarly, we won't accept contributions such as GPL
 or LGPL libraries that are incompatible with our licensing goals.</p>
 <p>We encourage those interested in contributing source code to contact us
 via the channels listed on the <a href="{@docRoot}source/community/index.html">
@@ -321,5 +321,7 @@
 implement the 'adb' debugging utility. This means that any compatible device
 -- including ones available at retail -- must be able to run the CTS
 tests.</p>
+<h3 id="are-codecs-verified">Are codecs verified by CTS?</h3>
+<p>Yes. All mandatory codecs are verified by CTS.</p>
 
 <a href="#top">Back to top</a>
diff --git a/src/source/initializing.jd b/src/source/initializing.jd
index 73e5545..3283575 100644
--- a/src/source/initializing.jd
+++ b/src/source/initializing.jd
@@ -95,6 +95,11 @@
 $ sudo ln -s /usr/lib/i386-linux-gnu/mesa/libGL.so.1 /usr/lib/i386-linux-gnu/libGL.so
 </code></pre>
 
+<h3 id="installing-required-packages-ubuntu-1404">Installing required packages (Ubuntu 14.04)</h3>
+<p>Building on Ubuntu 14.04 is experimental at the moment but will eventually become the recommended
+environment.</p>
+<pre><code>$ sudo apt-get install bison g++-multilib git gperf libxml2-utils</code></pre>
+
 <h3 id="installing-required-packages-ubuntu-1004-1110">Installing required packages (Ubuntu 10.04 -- 11.10)</h3>
 <p>Building on Ubuntu 10.04-11.10 is no longer supported, but may be useful for building older
 releases of AOSP.</p>
diff --git a/src/source/known-issues.jd b/src/source/known-issues.jd
index f87a34f..9a6d9fc 100644
--- a/src/source/known-issues.jd
+++ b/src/source/known-issues.jd
@@ -27,7 +27,9 @@
 <p>Even with our best care, small problems sometimes slip in. This page keeps
 track of the known issues around using the Android source code.</p>
 
-<h2 id="missing-cellbroadcastreceiver">Missing CellBroadcastReceiver in toro builds</h2>
+<h2 id="build-issues">Build issues</h2>
+
+<h3 id="missing-cellbroadcastreceiver">Missing CellBroadcastReceiver in toro builds</h3>
 <p><strong>Symptom</strong></p>On AOSP builds for toro (up to Jelly Bean 4.2.1),
 CellBroadcastReceiver doesn't get included in the system.</p>
 
@@ -35,14 +37,14 @@
 where <code>PRODUCT_PACKAGES</code> has the K replaced by an H.
 <p><strong>Fix</strong>: Use the latest packages for 4.2.2, or manually fix the typo.</p>
 
-<h2 id="missing-cts-native-xml-generator">Missing CTS Native XML Generator</h2>
+<h3 id="missing-cts-native-xml-generator">Missing CTS Native XML Generator</h3>
 <p><strong>Symptom</strong>: On some builds of IceCreamSandwich and later, the following
 warning is printed early during the build:
 <code>/bin/bash: line 0: cd: cts/tools/cts-native-xml-generator/src/res: No
 such file or directory</code></p>
 <p><strong>Cause</strong>: Some makefile references that path, which doesn't exist.</p>
 <p><strong>Fix</strong>: None. This is a harmless warning.</p>
-<h2 id="black-gingerbread-emulator">Black Gingerbread Emulator</h2>
+<h3 id="black-gingerbread-emulator">Black Gingerbread Emulator</h3>
 <p><strong>Symptom</strong>: The emulator built directly from the gingerbread branch
 doesn't start and stays stuck on a black screen.</p>
 <p><strong>Cause</strong>: The gingerbread branch uses version R7 of the emulator,
@@ -54,14 +56,87 @@
 $ make
 $ emulator -kernel prebuilt/android-arm/kernel/kernel-qemu-armv7
 </code></pre>
-<h2 id="emulator-built-on-macos-107-lion-doesnt-work">Emulator built on MacOS 10.7 Lion doesn't work.</h2>
+<h3 id="emulator-built-on-macos-107-lion-doesnt-work">Emulator built on MacOS 10.7 Lion doesn't work.</h3>
 <p><strong>Symptom</strong>: The emulator (any version) built on MacOS 10.7 Lion
 and/or on XCode 4.x doesn't start.</p>
 <p><strong>Cause</strong>: Some change in the development environment causes
 the emulator to be compiled in a way that prevents it from working.</p>
 <p><strong>Fix</strong>: Use an emulator binary from the SDK, which is built on
 MacOS 10.6 with XCode 3 and works on MacOS 10.7.</p>
-<h2 id="difficulties-syncing-the-source-code-proxy-issues">Difficulties syncing the source code (proxy issues).</h2>
+
+<h3 id="partial-and-emulator-builds"><code>WITH_DEXPREOPT=true</code> and emulator builds.</h3>
+<p><strong>Symptom</strong>: When conducting partial builds or syncs (make system no dependencies)
+on emulator builds, the resulting build doesn't work.</p>
+<p><strong>Cause</strong>: All emulator builds now run Dex optimization at build
+time by default, which requires to follow all dependencies to
+re-optimize the applications each time the framework changes.</p>
+<p><strong>Fix</strong>: Locally disable Dex optimizations with
+<code>export WITH_DEXPREOPT=false</code>, delete the existing optimized
+versions with <code>make installclean</code> and run a full build to
+re-generate non-optimized versions. After that, partial builds
+will work.</p>
+<h3 id="permission-denied-during-builds">"Permission Denied" during builds.</h3>
+<p><strong>Symptom</strong>: All builds fail with "Permission Denied", possibly
+along with anti-virus warnings.</p>
+<p><strong>Cause</strong>: Some anti-virus programs mistakenly recognize some
+source files in the Android source tree as if they contained
+viruses.</p>
+<p><strong>Fix</strong>: After verifying that there are no actual viruses
+involved, disable anti-virus on the Android tree. This has
+the added benefit of reducing build times.</p>
+<h3 id="build-errors-related-to-using-the-wrong-compiler">Build errors related to using the wrong compiler.</h3>
+<p><strong>Symptom</strong>: The build fails with various symptoms. One
+such symptom is <code>cc1: error: unrecognized command line option "-m32"</code></p>
+<p><strong>Cause</strong>: The Android build system uses the default compiler
+in the PATH, assuming it's a suitable compiler to generate
+binaries that run on the host. Other situations (e.g. using
+the Android NDK or building the kernel) cause the default
+compiler to not be a host compiler.</p>
+<p><strong>Fix</strong>: Use a "clean" shell, in which no previous
+actions could have swapped the default compiler.</p>
+<h3 id="build-errors-caused-by-non-default-tool-settings">Build errors caused by non-default tool settings.</h3>
+<p><strong>Symptom</strong>: The build fails with various symptoms, possibly
+complinaing about missing files or files that have the
+wrong format. One such symptom is <code>member [...] in archive is not an object</code>.</p>
+<p><strong>Cause</strong>: The Android build system tends to use many host tools
+and to rely on their default behaviors. Some settings change
+those tools' behaviors and make them behave in ways that
+confuse the build system. Variables known to cause such
+issues are <code>CDPATH</code> and <code>GREP_OPTIONS</code>.</p>
+<p><strong>Fix</strong>: Build Android in an environment that has as few
+customizations as possible.</p>
+<h3 id="build-error-with-40x-and-earlier-on-macos-107">Build error with 4.0.x and earlier on MacOS 10.7.</h3>
+<p><strong>Symptom</strong>: Building IceCreamSandwich 4.0.x (and older
+versions) fails on MacOS 10.7 with errors similar to this:
+<code>Undefined symbols for architecture i386: "_SDL_Init"</code></p>
+<p><strong>Cause</strong>: 4.0.x is not compatible with MacOS 10.7.</p>
+<p><strong>Fix</strong>: Either downgrade to MacOS 10.6, or use the master
+branch, which can be built on MacOS 10.7.</p>
+<pre><code>$ repo init -b master
+$ repo sync
+</code></pre>
+<h3 id="build-error-on-macos-with-xcode-43">Build error on MacOS with XCode 4.3.</h3>
+<p><strong>Symptom</strong>: All builds fail when using XCode 4.3.</p>
+<p><strong>Cause</strong>: XCode 4.3 switched the default compiler from
+gcc to llvm, and llvm rejects code that used to be
+accepted by gcc.</p>
+<p><strong>Fix</strong>: Use XCode 4.2.</p>
+<h3 id="build-error-with-40x-and-earlier-on-ubuntu-1110">Build error with 4.0.x and earlier on Ubuntu 11.10.</h3>
+<p><strong>Symptom</strong>: Building IceCreamSandwich 4.0.x (and older
+versions) on Ubuntu 11.10 and newer fails with errors similar to this:
+<code>&lt;command-line&gt;:0:0: warning: "_FORTIFY_SOURCE" redefined [enabled by default]</code></p>
+<p><strong>Cause</strong>: Ubuntu 11.10 uses a version of gcc where that symbol
+is defined by default, and Android also defines that symbol,
+which causes a conflict.</p>
+<p><strong>Fix</strong>: Either downgrade to Ubuntu 10.04, or use the master
+branch, which can be compiled on Ubuntu 11.10 and newer.</p>
+<pre><code>$ repo init -b master
+$ repo sync
+</code></pre>
+
+<h2 id="source-sync">Source sync issues<h2>
+
+<h3 id="difficulties-syncing-the-source-code-proxy-issues">Difficulties syncing the source code (proxy issues).</h3>
 <p><strong>Symptom</strong>: <code>repo init</code> or <code>repo sync</code> fail with http errors,
 typically 403 or 500.</p>
 <p><strong>Cause</strong>: There are quite a few possible causes, most often
@@ -70,7 +145,7 @@
 <p><strong>Fix</strong>: While there's no general solution, using python 2.7
 and explicitly using <code>repo sync -j1</code> have been reported to
 improve the situation for some users.</p>
-<h2 id="difficulties-syncing-the-source-tree-virtualbox-ethernet-issues">Difficulties syncing the source tree (VirtualBox Ethernet issues).</h2>
+<h3 id="difficulties-syncing-the-source-tree-virtualbox-ethernet-issues">Difficulties syncing the source tree (VirtualBox Ethernet issues).</h3>
 <p><strong>Symptom</strong>: When running <code>repo sync</code> in some VirtualBox installations,
 the process hangs or fails with a variety of possible symptoms.
 One such symptom is
@@ -80,7 +155,7 @@
 the network. The heavy network activity of repo sync triggers some
 corner cases in the NAT code.</p>
 <p><strong>Fix</strong>: Configure VirtualBox to use bridged network instead of NAT.</p>
-<h2 id="difficulties-syncing-the-source-tree-dns-issues">Difficulties syncing the source tree (DNS issues).</h2>
+<h3 id="difficulties-syncing-the-source-tree-dns-issues">Difficulties syncing the source tree (DNS issues).</h3>
 <p><strong>Symptom</strong>: When running <code>repo sync</code>, the process fails with
 various errors related to not recognizing the hostname. One such
 error is <code>&lt;urlopen error [Errno -2] Name or service not known&gt;</code>.</p>
@@ -103,7 +178,7 @@
 <p>Note that this will only work as long as the servers' addresses
 don't change, and if they do and you can't connect you'll have
 to resolve those hostnames again and edit <code>etc/hosts</code> accordingly.</p>
-<h2 id="difficulties-syncing-the-source-tree-tcp-issues">Difficulties syncing the source tree (TCP issues).</h2>
+<h3 id="difficulties-syncing-the-source-tree-tcp-issues">Difficulties syncing the source tree (TCP issues).</h3>
 <p><strong>Symptom</strong>: <code>repo sync</code> hangs while syncing, often when it's
 completed 99% of the sync.</p>
 <p><strong>Cause</strong>: Some settings in the TCP/IP stack cause difficulties
@@ -111,27 +186,10 @@
 nor fails.</p>
 <p><strong>Fix</strong>: On linux, <code>sysctl -w net.ipv4.tcp_window_scaling=0</code>. On
 MacOS, disable the rfc1323 extension in the network settings.</p>
-<h2 id="make-snod-and-emulator-builds"><code>make snod</code> and emulator builds.</h2>
-<p><strong>Symptom</strong>: When using <code>make snod</code> (make system no dependencies)
-on emulator builds, the resulting build doesn't work.</p>
-<p><strong>Cause</strong>: All emulator builds now run Dex optimization at build
-time by default, which requires to follow all dependencies to
-re-optimize the applications each time the framework changes.</p>
-<p><strong>Fix</strong>: Locally disable Dex optimizations with
-<code>export WITH_DEXPREOPT=false</code>, delete the existing optimized
-versions with <code>make installclean</code> and run a full build to
-re-generate non-optimized versions. After that, <code>make snod</code>
-will work.</p>
-<h2 id="permission-denied-during-builds">"Permission Denied" during builds.</h2>
-<p><strong>Symptom</strong>: All builds fail with "Permission Denied", possibly
-along with anti-virus warnings.</p>
-<p><strong>Cause</strong>: Some anti-virus programs mistakenly recognize some
-source files in the Android source tree as if they contained
-viruses.</p>
-<p><strong>Fix</strong>: After verifying that there are no actual viruses
-involved, disable anti-virus on the Android tree. This has
-the added benefit of reducing build times.</p>
-<h2 id="camera-and-gps-dont-work-on-galaxy-nexus">Camera and GPS don't work on Galaxy Nexus.</h2>
+
+
+<h2 id="runtime-issues">Runtime issues</h2>
+<h3 id="camera-and-gps-dont-work-on-galaxy-nexus">Camera and GPS don't work on Galaxy Nexus.</h3>
 <p><strong>Symptom</strong>: Camera and GPS don't work on Galaxy Nexus.
 As an example, the Camera application crashes as soon as it's
 launched.</p>
@@ -139,52 +197,3 @@
 libraries that aren't available in the Android Open Source
 Project.</p>
 <p><strong>Fix</strong>: None.</p>
-<h2 id="build-errors-related-to-using-the-wrong-compiler">Build errors related to using the wrong compiler.</h2>
-<p><strong>Symptom</strong>: The build fails with various symptoms. One
-such symptom is <code>cc1: error: unrecognized command line option "-m32"</code></p>
-<p><strong>Cause</strong>: The Android build system uses the default compiler
-in the PATH, assuming it's a suitable compiler to generate
-binaries that run on the host. Other situations (e.g. using
-the Android NDK or building the kernel) cause the default
-compiler to not be a host compiler.</p>
-<p><strong>Fix</strong>: Use a "clean" shell, in which no previous
-actions could have swapped the default compiler.</p>
-<h2 id="build-errors-caused-by-non-default-tool-settings">Build errors caused by non-default tool settings.</h2>
-<p><strong>Symptom</strong>: The build fails with various symptoms, possibly
-complinaing about missing files or files that have the
-wrong format. One such symptom is <code>member [...] in archive is not an object</code>.</p>
-<p><strong>Cause</strong>: The Android build system tends to use many host tools
-and to rely on their default behaviors. Some settings change
-those tools' behaviors and make them behave in ways that
-confuse the build system. Variables known to cause such
-issues are <code>CDPATH</code> and <code>GREP_OPTIONS</code>.</p>
-<p><strong>Fix</strong>: Build Android in an environment that has as few
-customizations as possible.</p>
-<h2 id="build-error-with-40x-and-earlier-on-macos-107">Build error with 4.0.x and earlier on MacOS 10.7.</h2>
-<p><strong>Symptom</strong>: Building IceCreamSandwich 4.0.x (and older
-versions) fails on MacOS 10.7 with errors similar to this:
-<code>Undefined symbols for architecture i386: "_SDL_Init"</code></p>
-<p><strong>Cause</strong>: 4.0.x is not compatible with MacOS 10.7.</p>
-<p><strong>Fix</strong>: Either downgrade to MacOS 10.6, or use the master
-branch, which can be built on MacOS 10.7.</p>
-<pre><code>$ repo init -b master
-$ repo sync
-</code></pre>
-<h2 id="build-error-on-macos-with-xcode-43">Build error on MacOS with XCode 4.3.</h2>
-<p><strong>Symptom</strong>: All builds fail when using XCode 4.3.</p>
-<p><strong>Cause</strong>: XCode 4.3 switched the default compiler from
-gcc to llvm, and llvm rejects code that used to be
-accepted by gcc.</p>
-<p><strong>Fix</strong>: Use XCode 4.2.</p>
-<h2 id="build-error-with-40x-and-earlier-on-ubuntu-1110">Build error with 4.0.x and earlier on Ubuntu 11.10.</h2>
-<p><strong>Symptom</strong>: Building IceCreamSandwich 4.0.x (and older
-versions) on Ubuntu 11.10 and newer fails with errors similar to this:
-<code>&lt;command-line&gt;:0:0: warning: "_FORTIFY_SOURCE" redefined [enabled by default]</code></p>
-<p><strong>Cause</strong>: Ubuntu 11.10 uses a version of gcc where that symbol
-is defined by default, and Android also defines that symbol,
-which causes a conflict.</p>
-<p><strong>Fix</strong>: Either downgrade to Ubuntu 10.04, or use the master
-branch, which can be compiled on Ubuntu 11.10 and newer.</p>
-<pre><code>$ repo init -b master
-$ repo sync
-</code></pre>
diff --git a/src/source/report-bugs.jd b/src/source/report-bugs.jd
index 027dcd2..e0fcae9 100644
--- a/src/source/report-bugs.jd
+++ b/src/source/report-bugs.jd
@@ -78,7 +78,7 @@
 and a poor bug report.</p>
 
 <h2 id="a-poor-bug-report">A Poor Bug Report</h2>
-<pre>
+<blockquote>
 Title: Error message
 
 When running Eclipse I get an "Internal Error" that says "See the .log file for more details".
@@ -91,18 +91,18 @@
 
 Observed results:
 See above.
-</pre>
+</blockquote>
 <p>This is a poor bug report because it doesn't provide any context for the
-issue; is it a problem in the Dalvik virtual machine, the core framework, or
+issue; is it a problem in the ART runtime, the core framework, or
 something else? It also doesn't provide any code or hint on how to reproduce
 it. In other words, this bug report doesn't provide enough information for
 anyone to take action on, so it would be ignored.</p>
 <h2 id="a-good-bug-report">A Good Bug Report</h2>
-<pre>
+<blockquote>
 Title: Stepping over "Object o = null" causes Eclipse "Internal Error"
 
 Interesting bug, while using Eclipse 3.3.1.1 with m37a of android and the following code:
-
+<pre>
 package com.saville.android;
 
 import android.app.Activity;
@@ -125,7 +125,7 @@
 
     static final String TAG = "TestObjectNull";
 }
-
+</pre>
 Eclipse indicates an "Internal Error" with "See the .log file for more
 details" and then asks if I want to exit the workbench. This occurs when I
 place a break point on "setContentView(R.layout.main);" and then single
@@ -134,7 +134,7 @@
 If I change "Object o = null;" to "Object o" all is well.
 
 The last lines of the .log file are:
-
+<pre>
 !ENTRY org.eclipse.core.jobs 4 2 2008-01-01 13:04:15.825
 !MESSAGE An internal error occurred during: "has children update".
 !STACK 0
@@ -163,3 +163,4 @@
 org.eclipse.debug.internal.ui.model.elements.ElementContentProvider$3.run(ElementContentProvider.java:200)
         at org.eclipse.core.internal.jobs.Worker.run(Worker.java:55)
 </pre>
+</blockquote>
diff --git a/src/source/submit-patches.jd b/src/source/submit-patches.jd
index 1c2dd0d..c41de25 100644
--- a/src/source/submit-patches.jd
+++ b/src/source/submit-patches.jd
@@ -199,7 +199,7 @@
 
 <h2 id="mksh">mksh</h2>
 <p>All changes to the MirBSD Korn Shell project at <code>external/mksh</code> should be made upstream
-either by sending an email to miros-mksh on the mirbsd.o®g domain (no subscription
+either by sending an email to miros-mksh on the mirbsd.org domain (no subscription
 required to submit there) or (optionally) at <a href="https://launchpad.net/mksh">Launchpad</a>.
 </p>
 <h2 id="openssl">OpenSSL</h2>
diff --git a/src/source/using-repo.jd b/src/source/using-repo.jd
index 67ca7b7..ce86c43 100644
--- a/src/source/using-repo.jd
+++ b/src/source/using-repo.jd
@@ -147,7 +147,7 @@
 <p><code>REPO_PATH</code> is the path relative to the root of the client.</p>
 </li>
 <li>
-<p><code>REPO_REMOTE</code> is the name of the remote sstem from the manifest.</p>
+<p><code>REPO_REMOTE</code> is the name of the remote system from the manifest.</p>
 </li>
 <li>
 <p><code>REPO_LREV</code> is the name of the revision from the manifest, translated to a local tracking branch.  Used if you need to pass the manifest revision to a locally executed git command.</p>