Merge remote-tracking branch 'goog/stage-aosp-master' into HEAD
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..291b535
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,9 @@
+*.pyc
+*.pyo
+*.orig
+.ropeproject
+*.egg-info
+devlib/bin/scripts/shutils
+doc/_build/
+build/
+dist/
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..54688a7
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,37 @@
+devlib
+======
+
+``devlib`` exposes an interface for interacting with and collecting
+measurements from a variety of devices (such as mobile phones, tablets and
+development boards) running a Linux-based operating system.
+
+
+Installation
+------------
+
+::
+
+        sudo -H pip install devlib
+
+
+Usage
+-----
+
+Please refer  to the "Overview" section of the documentation.
+
+
+License
+-------
+
+This package is distributed under `Apache v2.0 License <http://www.apache.org/licenses/LICENSE-2.0>`_. 
+
+
+Feedback, Contrubutions and Support
+-----------------------------------
+
+- Please use the GitHub Issue Tracker associated with this repository for
+  feedback.
+- ARM licensees may contact ARM directly via their partner managers.
+- We welcome code contributions via GitHub Pull requests. Please try to
+  stick to the style in the rest of the code for your contributions.
+
diff --git a/devlib/__init__.py b/devlib/__init__.py
new file mode 100644
index 0000000..51a8e47
--- /dev/null
+++ b/devlib/__init__.py
@@ -0,0 +1,24 @@
+from devlib.target import Target, LinuxTarget, AndroidTarget, LocalLinuxTarget
+from devlib.host import PACKAGE_BIN_DIRECTORY
+from devlib.exception import DevlibError, TargetError, HostError, TargetNotRespondingError
+
+from devlib.module import Module, HardRestModule, BootModule, FlashModule
+from devlib.module import get_module, register_module
+
+from devlib.platform import Platform
+from devlib.platform.arm import TC2, Juno, JunoEnergyInstrument
+from devlib.platform.gem5 import Gem5SimulationPlatform
+
+from devlib.instrument import Instrument, InstrumentChannel, Measurement, MeasurementsCsv
+from devlib.instrument import MEASUREMENT_TYPES, INSTANTANEOUS, CONTINUOUS
+from devlib.instrument.daq import DaqInstrument
+from devlib.instrument.energy_probe import EnergyProbeInstrument
+from devlib.instrument.hwmon import HwmonInstrument
+from devlib.instrument.monsoon import MonsoonInstrument
+from devlib.instrument.netstats import NetstatsInstrument
+
+from devlib.trace.ftrace import FtraceCollector
+
+from devlib.host import LocalConnection
+from devlib.utils.android import AdbConnection
+from devlib.utils.ssh import SshConnection, TelnetConnection, Gem5Connection
diff --git a/devlib/bin/LICENSE.busybox b/devlib/bin/LICENSE.busybox
new file mode 100644
index 0000000..6f50a71
--- /dev/null
+++ b/devlib/bin/LICENSE.busybox
@@ -0,0 +1,348 @@
+--- A note on GPL versions
+
+BusyBox is distributed under version 2 of the General Public License (included
+in its entirety, below).  Version 2 is the only version of this license which
+this version of BusyBox (or modified versions derived from this one) may be
+distributed under.
+
+------------------------------------------------------------------------
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+     51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+	    How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year  name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/devlib/bin/LICENSE.trace-cmd b/devlib/bin/LICENSE.trace-cmd
new file mode 100644
index 0000000..9d46c1a
--- /dev/null
+++ b/devlib/bin/LICENSE.trace-cmd
@@ -0,0 +1,39 @@
+Included trace-cmd binaries are Free Software ditributed under GPLv2:
+
+/*
+ * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not,  see <http://www.gnu.org/licenses>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+The full text of the license may be viewed here:
+
+http://www.gnu.org/licenses/gpl-2.0.html
+
+Source code for trace-cmd may be obtained here:
+
+git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/trace-cmd.git
+
+Binaries included here contain modifications by ARM that, at the time of writing,
+have not yet made it into the above repository. The patches for these modifications
+are available here:
+
+http://article.gmane.org/gmane.linux.kernel/1869111
+http://article.gmane.org/gmane.linux.kernel/1869112
+
+
+
diff --git a/devlib/bin/arm64/busybox b/devlib/bin/arm64/busybox
new file mode 100755
index 0000000..6d09a07
--- /dev/null
+++ b/devlib/bin/arm64/busybox
Binary files differ
diff --git a/devlib/bin/arm64/m5 b/devlib/bin/arm64/m5
new file mode 100755
index 0000000..45d604d
--- /dev/null
+++ b/devlib/bin/arm64/m5
Binary files differ
diff --git a/devlib/bin/arm64/readenergy b/devlib/bin/arm64/readenergy
new file mode 100755
index 0000000..01806ce
--- /dev/null
+++ b/devlib/bin/arm64/readenergy
Binary files differ
diff --git a/devlib/bin/arm64/trace-cmd b/devlib/bin/arm64/trace-cmd
new file mode 100755
index 0000000..0d025d0
--- /dev/null
+++ b/devlib/bin/arm64/trace-cmd
Binary files differ
diff --git a/devlib/bin/armeabi/busybox b/devlib/bin/armeabi/busybox
new file mode 100755
index 0000000..1714d40
--- /dev/null
+++ b/devlib/bin/armeabi/busybox
Binary files differ
diff --git a/devlib/bin/armeabi/m5 b/devlib/bin/armeabi/m5
new file mode 100755
index 0000000..4329007
--- /dev/null
+++ b/devlib/bin/armeabi/m5
Binary files differ
diff --git a/devlib/bin/armeabi/trace-cmd b/devlib/bin/armeabi/trace-cmd
new file mode 100755
index 0000000..a445662
--- /dev/null
+++ b/devlib/bin/armeabi/trace-cmd
Binary files differ
diff --git a/devlib/bin/scripts/shutils.in b/devlib/bin/scripts/shutils.in
new file mode 100755
index 0000000..d44e444
--- /dev/null
+++ b/devlib/bin/scripts/shutils.in
@@ -0,0 +1,224 @@
+#!__DEVLIB_SHELL__
+
+CMD=$1
+shift
+
+BUSYBOX=${BUSYBOX:-__DEVLIB_BUSYBOX__}
+FIND=${FIND:-$BUSYBOX find}
+GREP=${GREP:-$BUSYBOX grep}
+SED=${SED:-$BUSYBOX sed}
+CAT=${CAT:-$BUSYBOX cat}
+AWK=${AWK:-$BUSYBOX awk}
+PS=${PS:-$BUSYBOX ps}
+
+################################################################################
+# CPUFrequency Utility Functions
+################################################################################
+
+cpufreq_set_all_frequencies() {
+	FREQ=$1
+	for CPU in /sys/devices/system/cpu/cpu[0-9]*; do
+		echo $FREQ > $CPU/cpufreq/scaling_cur_freq
+	done
+}
+
+cpufreq_get_all_frequencies() {
+	$GREP '' /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq | \
+		$SED -e 's|/sys/devices/system/cpu/cpu||' -e 's|/cpufreq/scaling_cur_freq:| |'
+}
+
+cpufreq_set_all_governors() {
+	GOV=$1
+	for CPU in /sys/devices/system/cpu/cpu[0-9]*; do
+		echo $GOV > $CPU/cpufreq/scaling_governor
+	done
+}
+
+cpufreq_get_all_governors() {
+	$GREP '' /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor | \
+		$SED -e 's|/sys/devices/system/cpu/cpu||' -e 's|/cpufreq/scaling_governor:| |'
+}
+
+cpufreq_trace_all_frequencies() {
+	FREQS=$($CAT /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq)
+	CPU=0; for F in $FREQS; do
+		echo "cpu_frequency_devlib:        state=$F cpu_id=$CPU" > /sys/kernel/debug/tracing/trace_marker
+		CPU=$((CPU + 1))
+	done
+}
+
+################################################################################
+# CPUIdle Utility Functions
+################################################################################
+
+cpuidle_wake_all_cpus() {
+	CPU_PATHS=/sys/devices/system/cpu/cpu[0-9]*
+	MASK=0x1; for F in $CPU_PATHS; do
+		$BUSYBOX taskset $MASK true &
+		MASK=$($BUSYBOX printf '0x%x' $((MASK * 2)))
+	done
+}
+
+################################################################################
+# FTrace Utility Functions
+################################################################################
+
+ftrace_get_function_stats() {
+    for CPU in $(ls /sys/kernel/debug/tracing/trace_stat | sed 's/function//'); do
+        REPLACE_STRING="s/  Function/\n  Function (CPU$CPU)/"
+        $CAT /sys/kernel/debug/tracing/trace_stat/function$CPU \
+            | sed "$REPLACE_STRING"
+    done
+}
+
+
+################################################################################
+# CGroups Utility Functions
+################################################################################
+
+cgroups_get_attributes() {
+	test $# -eq 2 || exit -1
+	CGROUP="$1"
+	CONTROLLER="$2"
+	# Check if controller is mounted with "noprefix" option, which is quite
+	# common on Android for backward compatibility
+	ls $CGROUP/$CONTROLLER\.* 2>&1 >/dev/null
+	if [ $? -eq 0 ]; then
+		# no "noprefix" option, attributes format is:
+		#   mnt_point/controller.attribute_name
+		$GREP '' $CGROUP/* | \
+			$GREP "$CONTROLLER\." | \
+			$SED -e "s|$CONTROLLER\.||" -e "s|$CGROUP/||"
+	else
+		# "noprefix" option, attribute format is:
+		#   mnt_point/attribute_name
+		$GREP '' $(\
+			$FIND $CGROUP -type f -maxdepth 1 |
+			$GREP -v -e ".*tasks" -e ".*cgroup\..*") | \
+		$SED "s|$CGROUP/||"
+	fi
+}
+
+cgroups_run_into() {
+
+	# Control groups mount point
+	CGMOUNT=${CGMOUNT:-/sys/fs/cgroup}
+	# The control group we want to run into
+	CGP=${1}
+	shift 1
+	# The command to run
+	CMD="${@}"
+
+	# Execution under root CGgroup
+	if [ "x/" == "x$CGP" ]; then
+
+	  $FIND $CGMOUNT -type d -maxdepth 0 | \
+	  while read CGPATH; do
+		# Move this shell into that control group
+		echo $$ > $CGPATH/cgroup.procs
+		echo "Moving task into root CGroup ($CGPATH)"
+	  done
+
+	# Execution under specified CGroup
+	else
+
+	  # Check if the required CGroup exists
+	  $FIND $CGMOUNT -type d -mindepth 1 | \
+	  $GREP "$CGP" &>/dev/null
+	  if [ $? -ne 0 ]; then
+		echo "ERROR: could not find any $CGP cgroup under $CGMOUNT"
+		exit 1
+	  fi
+
+	  $FIND $CGMOUNT -type d -mindepth 1 | \
+	  $GREP "$CGP" | \
+	  while read CGPATH; do
+		  # Move this shell into that control group
+		  echo $$ > $CGPATH/cgroup.procs
+		  echo "Moving task into $CGPATH"
+	  done
+
+	fi
+
+	# Execute the command
+	exec $CMD
+
+}
+
+cgroups_tasks_move() {
+	SRC_GRP=${1}
+	DST_GRP=${2}
+	shift 2
+	FILTERS=$*
+
+	$CAT $SRC_GRP/tasks | while read TID; do
+	  echo $TID > $DST_GRP/cgroup.procs
+	done
+
+	[ "x$FILTERS" = "x" ] && exit 0
+
+	PIDS=`$PS -o comm,pid | $GREP $FILTERS | $AWK '{print $2}'`
+	PIDS=`echo $PIDS`
+	echo "PIDs to save: [$PIDS]"
+	for TID in $PIDS; do
+	  COMM=`$CAT /proc/$TID/comm`
+	  echo "$TID : $COMM"
+	  echo $TID > $SRC_GRP/cgroup.procs || true
+	done
+}
+
+cgroups_tasks_in() {
+	GRP=${1}
+	for TID in $($CAT $GRP/tasks); do
+		COMM=`$CAT /proc/$TID/comm 2>/dev/null`
+		[ "$COMM" != "" ] && CMDL=`$CAT /proc/$TID/cmdline 2>/dev/null`
+		[ "$COMM" != "" ] && echo "$TID,$COMM,$CMDL"
+	done
+	exit 0
+}
+
+
+################################################################################
+# Main Function Dispatcher
+################################################################################
+
+case $CMD in
+cpufreq_set_all_frequencies)
+    cpufreq_set_all_frequencies $*
+    ;;
+cpufreq_get_all_frequencies)
+    cpufreq_get_all_frequencies
+    ;;
+cpufreq_set_all_governors)
+    cpufreq_set_all_governors $*
+    ;;
+cpufreq_get_all_governors)
+    cpufreq_get_all_governors
+    ;;
+cpufreq_trace_all_frequencies)
+    cpufreq_trace_all_frequencies $*
+    ;;
+cpuidle_wake_all_cpus)
+    cpuidle_wake_all_cpus $*
+    ;;
+cgroups_get_attributes)
+	cgroups_get_attributes $*
+	;;
+cgroups_run_into)
+    cgroups_run_into $*
+    ;;
+cgroups_tasks_move)
+	cgroups_tasks_move $*
+	;;
+cgroups_tasks_in)
+	cgroups_tasks_in $*
+	;;
+ftrace_get_function_stats)
+    ftrace_get_function_stats
+    ;;
+*)
+    echo "Command [$CMD] not supported"
+    exit -1
+esac
+
+# vim: tabstop=4 shiftwidth=4
diff --git a/devlib/bin/x86_64/busybox b/devlib/bin/x86_64/busybox
new file mode 100755
index 0000000..fa3e46d
--- /dev/null
+++ b/devlib/bin/x86_64/busybox
Binary files differ
diff --git a/devlib/exception.py b/devlib/exception.py
new file mode 100644
index 0000000..16dd04f
--- /dev/null
+++ b/devlib/exception.py
@@ -0,0 +1,51 @@
+#    Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+class DevlibError(Exception):
+    """Base class for all Devlib exceptions."""
+    pass
+
+
+class TargetError(DevlibError):
+    """An error has occured on the target"""
+    pass
+
+
+class TargetNotRespondingError(DevlibError):
+    """The target is unresponsive."""
+
+    def __init__(self, target):
+        super(TargetNotRespondingError, self).__init__('Target {} is not responding.'.format(target))
+
+
+class HostError(DevlibError):
+    """An error has occured on the host"""
+    pass
+
+
+class TimeoutError(DevlibError):
+    """Raised when a subprocess command times out. This is basically a ``DevlibError``-derived version
+    of ``subprocess.CalledProcessError``, the thinking being that while a timeout could be due to
+    programming error (e.g. not setting long enough timers), it is often due to some failure in the
+    environment, and there fore should be classed as a "user error"."""
+
+    def __init__(self, command, output):
+        super(TimeoutError, self).__init__('Timed out: {}'.format(command))
+        self.command = command
+        self.output = output
+
+    def __str__(self):
+        return '\n'.join([self.message, 'OUTPUT:', self.output or ''])
diff --git a/devlib/host.py b/devlib/host.py
new file mode 100644
index 0000000..8c8a069
--- /dev/null
+++ b/devlib/host.py
@@ -0,0 +1,88 @@
+#    Copyright 2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from glob import iglob
+import os
+import shutil
+import subprocess
+import logging
+from getpass import getpass
+
+from devlib.exception import TargetError
+from devlib.utils.misc import check_output
+
+PACKAGE_BIN_DIRECTORY = os.path.join(os.path.dirname(__file__), 'bin')
+
+
+class LocalConnection(object):
+
+    name = 'local'
+
+    def __init__(self, platform=None, keep_password=True, unrooted=False,
+                 password=None, timeout=None):
+        self.logger = logging.getLogger('local_connection')
+        self.keep_password = keep_password
+        self.unrooted = unrooted
+        self.password = password
+
+    def push(self, source, dest, timeout=None, as_root=False):  # pylint: disable=unused-argument
+        self.logger.debug('cp {} {}'.format(source, dest))
+        shutil.copy(source, dest)
+
+    def pull(self, source, dest, timeout=None, as_root=False): # pylint: disable=unused-argument
+        self.logger.debug('cp {} {}'.format(source, dest))
+        if ('*' in source or '?' in source) and os.path.isdir(dest):
+            # Pull all files matching a wildcard expression
+            for each_source in iglob(source):
+                shutil.copy(each_source, dest)
+        else:
+            shutil.copy(source, dest)
+
+    def execute(self, command, timeout=None, check_exit_code=True,
+                as_root=False, strip_colors=True):
+        self.logger.debug(command)
+        if as_root:
+            if self.unrooted:
+                raise TargetError('unrooted')
+            password = self._get_password()
+            command = 'echo \'{}\' | sudo -S '.format(password) + command
+        ignore = None if check_exit_code else 'all'
+        try:
+            return check_output(command, shell=True, timeout=timeout, ignore=ignore)[0]
+        except subprocess.CalledProcessError as e:
+            message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'.format(
+                e.returncode, command, e.output)
+            raise TargetError(message)
+
+    def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
+        if as_root:
+            if self.unrooted:
+                raise TargetError('unrooted')
+            password = self._get_password()
+            command = 'echo \'{}\' | sudo -S '.format(password) + command
+        return subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=True)
+
+    def close(self):
+        pass
+
+    def cancel_running_command(self):
+        pass
+
+    def _get_password(self):
+        if self.password:
+            return self.password
+        password = getpass('sudo password:')
+        if self.keep_password:
+            self.password = password
+        return password
diff --git a/devlib/instrument/__init__.py b/devlib/instrument/__init__.py
new file mode 100644
index 0000000..9d898c4
--- /dev/null
+++ b/devlib/instrument/__init__.py
@@ -0,0 +1,233 @@
+#    Copyright 2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import csv
+import logging
+import collections
+
+from devlib.utils.types import numeric
+
+
+# Channel modes describe what sort of measurement the instrument supports.
+# Values must be powers of 2
+INSTANTANEOUS = 1
+CONTINUOUS = 2
+
+
+class MeasurementType(tuple):
+
+    __slots__ = []
+
+    def __new__(cls, name, units, category=None):
+        return tuple.__new__(cls, (name, units, category))
+
+    @property
+    def name(self):
+        return tuple.__getitem__(self, 0)
+
+    @property
+    def units(self):
+        return tuple.__getitem__(self, 1)
+
+    @property
+    def category(self):
+        return tuple.__getitem__(self, 2)
+
+    def __getitem__(self, item):
+        raise TypeError()
+
+    def __cmp__(self, other):
+        if isinstance(other, MeasurementType):
+            other = other.name
+        return cmp(self.name, other)
+
+    def __str__(self):
+        return self.name
+
+    __repr__ = __str__
+
+
+# Standard measures
+_measurement_types = [
+    MeasurementType('time', 'seconds'),
+    MeasurementType('temperature', 'degrees'),
+
+    MeasurementType('power', 'watts', 'power/energy'),
+    MeasurementType('voltage', 'volts', 'power/energy'),
+    MeasurementType('current', 'amps', 'power/energy'),
+    MeasurementType('energy', 'joules', 'power/energy'),
+
+    MeasurementType('tx', 'bytes', 'data transfer'),
+    MeasurementType('rx', 'bytes', 'data transfer'),
+    MeasurementType('tx/rx', 'bytes', 'data transfer'),
+]
+MEASUREMENT_TYPES = {m.name: m for m in _measurement_types}
+
+
+class Measurement(object):
+
+    __slots__ = ['value', 'channel']
+
+    @property
+    def name(self):
+        return '{}_{}'.format(self.channel.site, self.channel.kind)
+
+    @property
+    def units(self):
+        return self.channel.units
+
+    def __init__(self, value, channel):
+        self.value = value
+        self.channel = channel
+
+    def __cmp__(self, other):
+        if isinstance(other, Measurement):
+            return cmp(self.value, other.value)
+        else:
+            return cmp(self.value, other)
+
+    def __str__(self):
+        if self.units:
+            return '{}: {} {}'.format(self.name, self.value, self.units)
+        else:
+            return '{}: {}'.format(self.name, self.value)
+
+    __repr__ = __str__
+
+
+class MeasurementsCsv(object):
+
+    def __init__(self, path, channels):
+        self.path = path
+        self.channels = channels
+        self._fh = open(path, 'rb')
+
+    def measurements(self):
+        return list(self.itermeasurements())
+
+    def itermeasurements(self):
+        self._fh.seek(0)
+        reader = csv.reader(self._fh)
+        reader.next()  # headings
+        for row in reader:
+            values = map(numeric, row)
+            yield [Measurement(v, c) for (v, c) in zip(values, self.channels)]
+
+
+class InstrumentChannel(object):
+
+    @property
+    def label(self):
+        return '{}_{}'.format(self.site, self.kind)
+
+    @property
+    def kind(self):
+        return self.measurement_type.name
+
+    @property
+    def units(self):
+        return self.measurement_type.units
+
+    def __init__(self, name, site, measurement_type, **attrs):
+        self.name = name
+        self.site = site
+        if isinstance(measurement_type, MeasurementType):
+            self.measurement_type = measurement_type
+        else:
+            try:
+                self.measurement_type = MEASUREMENT_TYPES[measurement_type]
+            except KeyError:
+                raise ValueError('Unknown measurement type:  {}'.format(measurement_type))
+        for atname, atvalue in attrs.iteritems():
+            setattr(self, atname, atvalue)
+
+    def __str__(self):
+        if self.name == self.label:
+            return 'CHAN({})'.format(self.label)
+        else:
+            return 'CHAN({}, {})'.format(self.name, self.label)
+
+    __repr__ = __str__
+
+
+class Instrument(object):
+
+    mode = 0
+
+    def __init__(self, target):
+        self.target = target
+        self.logger = logging.getLogger(self.__class__.__name__)
+        self.channels = collections.OrderedDict()
+        self.active_channels = []
+        self.sample_rate_hz = None
+
+    # channel management
+
+    def list_channels(self):
+        return self.channels.values()
+
+    def get_channels(self, measure):
+        if hasattr(measure, 'name'):
+            measure = measure.name
+        return [c for c in self.list_channels() if c.kind == measure]
+
+    def add_channel(self, site, measure, name=None, **attrs):
+        if name is None:
+            name = '{}_{}'.format(site, measure)
+        chan = InstrumentChannel(name, site, measure, **attrs)
+        self.channels[chan.label] = chan
+
+    # initialization and teardown
+
+    def setup(self, *args, **kwargs):
+        pass
+
+    def teardown(self):
+        pass
+
+    def reset(self, sites=None, kinds=None, channels=None):
+        if kinds is None and sites is None and channels is None:
+            self.active_channels = sorted(self.channels.values(), key=lambda x: x.label)
+        else:
+            if isinstance(sites, basestring):
+                sites = [sites]
+            if isinstance(kinds, basestring):
+                kinds = [kinds]
+            self.active_channels = []
+            for chan_name in (channels or []):
+                try:
+                    self.active_channels.append(self.channels[chan_name])
+                except KeyError:
+                    msg = 'Unexpected channel "{}"; must be in {}'
+                    raise ValueError(msg.format(chan_name, self.channels.keys()))
+            for chan in self.channels.values():
+                if (kinds is None or chan.kind in kinds) and \
+                   (sites is None or chan.site in sites):
+                    self.active_channels.append(chan)
+
+    # instantaneous
+
+    def take_measurement(self):
+        pass
+
+    # continuous
+
+    def start(self):
+        pass
+
+    def stop(self):
+        pass
+
+    def get_data(self, outfile):
+        pass
diff --git a/devlib/instrument/daq.py b/devlib/instrument/daq.py
new file mode 100644
index 0000000..58d2f3e
--- /dev/null
+++ b/devlib/instrument/daq.py
@@ -0,0 +1,139 @@
+import os
+import csv
+import tempfile
+from itertools import chain
+
+from devlib.instrument import Instrument, MeasurementsCsv, CONTINUOUS
+from devlib.exception import HostError
+from devlib.utils.misc import unique
+
+try:
+    from daqpower.client import execute_command, Status
+    from daqpower.config import DeviceConfiguration, ServerConfiguration
+except ImportError, e:
+    execute_command, Status = None, None
+    DeviceConfiguration, ServerConfiguration, ConfigurationError = None, None, None
+    import_error_mesg = e.message
+
+
+class DaqInstrument(Instrument):
+
+    mode = CONTINUOUS
+
+    def __init__(self, target, resistor_values,  # pylint: disable=R0914
+                 labels=None,
+                 host='localhost',
+                 port=45677,
+                 device_id='Dev1',
+                 v_range=2.5,
+                 dv_range=0.2,
+                 sample_rate_hz=10000,
+                 channel_map=(0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23),
+                 ):
+        # pylint: disable=no-member
+        super(DaqInstrument, self).__init__(target)
+        self._need_reset = True
+        if execute_command is None:
+            raise HostError('Could not import "daqpower": {}'.format(import_error_mesg))
+        if labels is None:
+            labels = ['PORT_{}'.format(i) for i in xrange(len(resistor_values))]
+        if len(labels) != len(resistor_values):
+            raise ValueError('"labels" and "resistor_values" must be of the same length')
+        self.server_config = ServerConfiguration(host=host,
+                                                 port=port)
+        result = self.execute('list_devices')
+        if result.status == Status.OK:
+            if device_id not in result.data:
+                raise ValueError('Device "{}" is not found on the DAQ server.'.format(device_id))
+        elif result.status != Status.OKISH:
+            raise HostError('Problem querying DAQ server: {}'.format(result.message))
+
+        self.device_config = DeviceConfiguration(device_id=device_id,
+                                                 v_range=v_range,
+                                                 dv_range=dv_range,
+                                                 sampling_rate=sample_rate_hz,
+                                                 resistor_values=resistor_values,
+                                                 channel_map=channel_map,
+                                                 labels=labels)
+        self.sample_rate_hz = sample_rate_hz
+
+        for label in labels:
+            for kind in ['power', 'voltage']:
+                self.add_channel(label, kind)
+
+    def reset(self, sites=None, kinds=None, channels=None):
+        super(DaqInstrument, self).reset(sites, kinds, channels)
+        self.execute('close')
+        result = self.execute('configure', config=self.device_config)
+        if not result.status == Status.OK:  # pylint: disable=no-member
+            raise HostError(result.message)
+        self._need_reset = False
+
+    def start(self):
+        if self._need_reset:
+            self.reset()
+        self.execute('start')
+
+    def stop(self):
+        self.execute('stop')
+        self._need_reset = True
+
+    def get_data(self, outfile):  # pylint: disable=R0914
+        tempdir = tempfile.mkdtemp(prefix='daq-raw-')
+        self.execute('get_data', output_directory=tempdir)
+        raw_file_map = {}
+        for entry in os.listdir(tempdir):
+            site = os.path.splitext(entry)[0]
+            path = os.path.join(tempdir, entry)
+            raw_file_map[site] = path
+
+        active_sites = unique([c.site for c in self.active_channels])
+        file_handles = []
+        try:
+            site_readers = {}
+            for site in active_sites:
+                try:
+                    site_file = raw_file_map[site]
+                    fh = open(site_file, 'rb')
+                    site_readers[site] = csv.reader(fh)
+                    file_handles.append(fh)
+                except KeyError:
+                    message = 'Could not get DAQ trace for {}; Obtained traces are in {}'
+                    raise HostError(message.format(site, tempdir))
+
+            # The first row is the headers
+            channel_order = []
+            for site, reader in site_readers.iteritems():
+                channel_order.extend(['{}_{}'.format(site, kind)
+                                      for kind in reader.next()])
+
+            def _read_next_rows():
+                parts = []
+                for reader in site_readers.itervalues():
+                    try:
+                        parts.extend(reader.next())
+                    except StopIteration:
+                        parts.extend([None, None])
+                return list(chain(parts))
+
+            with open(outfile, 'wb') as wfh:
+                field_names = [c.label for c in self.active_channels]
+                writer = csv.writer(wfh)
+                writer.writerow(field_names)
+                raw_row = _read_next_rows()
+                while any(raw_row):
+                    row = [raw_row[channel_order.index(f)] for f in field_names]
+                    writer.writerow(row)
+                    raw_row = _read_next_rows()
+
+            return MeasurementsCsv(outfile, self.active_channels)
+        finally:
+            for fh in file_handles:
+                fh.close()
+
+    def teardown(self):
+        self.execute('close')
+
+    def execute(self, command, **kwargs):
+        return execute_command(self.server_config, command, **kwargs)
+
diff --git a/devlib/instrument/energy_probe.py b/devlib/instrument/energy_probe.py
new file mode 100644
index 0000000..3f7506a
--- /dev/null
+++ b/devlib/instrument/energy_probe.py
@@ -0,0 +1,116 @@
+#    Copyright 2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import division
+import os
+import csv
+import signal
+import tempfile
+import struct
+import subprocess
+
+from devlib.instrument import Instrument, CONTINUOUS, MeasurementsCsv
+from devlib.exception import HostError
+from devlib.utils.misc import which
+
+
+class EnergyProbeInstrument(Instrument):
+
+    mode = CONTINUOUS
+
+    def __init__(self, target, resistor_values,
+                 labels=None,
+                 device_entry='/dev/ttyACM0',
+                 ):
+        super(EnergyProbeInstrument, self).__init__(target)
+        self.resistor_values = resistor_values
+        if labels is not None:
+            self.labels = labels
+        else:
+            self.labels = ['PORT_{}'.format(i)
+                           for i in xrange(len(resistor_values))]
+        self.device_entry = device_entry
+        self.caiman = which('caiman')
+        if self.caiman is None:
+            raise HostError('caiman must be installed on the host '
+                            '(see https://github.com/ARM-software/caiman)')
+        self.attributes_per_sample = 3
+        self.bytes_per_sample = self.attributes_per_sample * 4
+        self.attributes = ['power', 'voltage', 'current']
+        self.command = None
+        self.raw_output_directory = None
+        self.process = None
+        self.sample_rate_hz = 10000 # Determined empirically
+
+        for label in self.labels:
+            for kind in self.attributes:
+                self.add_channel(label, kind)
+
+    def reset(self, sites=None, kinds=None, channels=None):
+        super(EnergyProbeInstrument, self).reset(sites, kinds, channels)
+        self.raw_output_directory = tempfile.mkdtemp(prefix='eprobe-caiman-')
+        parts = ['-r {}:{} '.format(i, int(1000 * rval))
+                 for i, rval in enumerate(self.resistor_values)]
+        rstring = ''.join(parts)
+        self.command = '{} -d {} -l {} {}'.format(self.caiman, self.device_entry, rstring, self.raw_output_directory)
+
+    def start(self):
+        self.logger.debug(self.command)
+        self.process = subprocess.Popen(self.command,
+                                        stdout=subprocess.PIPE,
+                                        stderr=subprocess.PIPE,
+                                        stdin=subprocess.PIPE,
+                                        preexec_fn=os.setpgrp,
+                                        shell=True)
+
+    def stop(self):
+        self.process.poll()
+        if self.process.returncode is not None:
+            stdout, stderr = self.process.communicate()
+            raise HostError(
+                'Energy Probe: Caiman exited unexpectedly with exit code {}.\n'
+                'stdout:\n{}\nstderr:\n{}'.format(self.process.returncode,
+                                                  stdout, stderr))
+        os.killpg(self.process.pid, signal.SIGTERM)
+
+    def get_data(self, outfile):  # pylint: disable=R0914
+        all_channels = [c.label for c in self.list_channels()]
+        active_channels = [c.label for c in self.active_channels]
+        active_indexes = [all_channels.index(ac) for ac in active_channels]
+
+        num_of_ports = len(self.resistor_values)
+        struct_format = '{}I'.format(num_of_ports * self.attributes_per_sample)
+        not_a_full_row_seen = False
+        raw_data_file = os.path.join(self.raw_output_directory, '0000000000')
+
+        self.logger.debug('Parsing raw data file: {}'.format(raw_data_file))
+        with open(raw_data_file, 'rb') as bfile:
+            with open(outfile, 'wb') as wfh:
+                writer = csv.writer(wfh)
+                writer.writerow(active_channels)
+                while True:
+                    data = bfile.read(num_of_ports * self.bytes_per_sample)
+                    if data == '':
+                        break
+                    try:
+                        unpacked_data = struct.unpack(struct_format, data)
+                        row = [unpacked_data[i] / 1000 for i in active_indexes]
+                        writer.writerow(row)
+                    except struct.error:
+                        if not_a_full_row_seen:
+                            self.logger.warn('possibly missaligned caiman raw data, row contained {} bytes'.format(len(data)))
+                            continue
+                        else:
+                            not_a_full_row_seen = True
+        return MeasurementsCsv(outfile, self.active_channels)
diff --git a/devlib/instrument/hwmon.py b/devlib/instrument/hwmon.py
new file mode 100644
index 0000000..ae49f40
--- /dev/null
+++ b/devlib/instrument/hwmon.py
@@ -0,0 +1,89 @@
+#    Copyright 2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import division
+import re
+
+from devlib.instrument import Instrument, Measurement, INSTANTANEOUS
+from devlib.exception import TargetError
+
+
+class HwmonInstrument(Instrument):
+
+    name = 'hwmon'
+    mode = INSTANTANEOUS
+
+    # sensor kind --> (meaure, standard unit conversion)
+    measure_map = {
+        'temp': ('temperature', lambda x: x / 1000),
+        'in': ('voltage', lambda x: x / 1000),
+        'curr': ('current', lambda x: x / 1000),
+        'power': ('power', lambda x: x / 1000000),
+        'energy': ('energy', lambda x: x / 1000000),
+    }
+
+    def __init__(self, target):
+        if not hasattr(target, 'hwmon'):
+            raise TargetError('Target does not support HWMON')
+        super(HwmonInstrument, self).__init__(target)
+
+        self.logger.debug('Discovering available HWMON sensors...')
+        for ts in self.target.hwmon.sensors:
+            try:
+                ts.get_file('input')
+                measure = self.measure_map.get(ts.kind)[0]
+                if measure:
+                    self.logger.debug('\tAdding sensor {}'.format(ts.name))
+                    self.add_channel(_guess_site(ts), measure, name=ts.name, sensor=ts)
+                else:
+                    self.logger.debug('\tSkipping sensor {} (unknown kind "{}")'.format(ts.name, ts.kind))
+            except ValueError:
+                message = 'Skipping sensor {} because it does not have an input file'
+                self.logger.debug(message.format(ts.name))
+                continue
+
+    def take_measurement(self):
+        result = []
+        for chan in self.active_channels:
+            convert = self.measure_map[chan.sensor.kind][1]
+            value = convert(chan.sensor.get('input'))
+            result.append(Measurement(value, chan))
+        return result
+
+
+def _guess_site(sensor):
+    """
+    HWMON does not specify a standard for labeling its sensors, or for
+    device/item split (the implication is that each hwmon device a separate chip
+    with possibly several sensors on it, but not everyone adheres to that, e.g.,
+    with some mobile devices splitting a chip's sensors across multiple hwmon
+    devices.  This function processes name/label of the senors to attempt to
+    identify the best "candidate" for the site to which the sensor belongs.
+    """
+    if sensor.name == sensor.label:
+        # If no label has been specified for the sensor (in which case, it
+        # defaults to the sensor's name), assume that the "site" of the sensor
+        # is identified by the HWMON device
+        text = sensor.device.name
+    else:
+        # If a label has been specified, assume multiple sensors controlled by
+        # the same device and the label identifies the site.
+        text = sensor.label
+    # strip out sensor kind suffix, if any, as that does not indicate a site
+    for kind in ['volt', 'in', 'curr', 'power', 'energy',
+                 'temp', 'voltage', 'temperature', 'current']:
+        if kind in text.lower():
+            regex = re.compile(r'_*{}\d*_*'.format(kind), re.I)
+            text = regex.sub('', text)
+    return text.strip()
diff --git a/devlib/instrument/monsoon.py b/devlib/instrument/monsoon.py
new file mode 100644
index 0000000..df908b1
--- /dev/null
+++ b/devlib/instrument/monsoon.py
@@ -0,0 +1,139 @@
+import csv
+import os
+import signal
+from subprocess import Popen, PIPE
+from tempfile import NamedTemporaryFile
+from devlib.instrument import Instrument, CONTINUOUS, MeasurementsCsv
+from devlib.exception import HostError
+from devlib.host import PACKAGE_BIN_DIRECTORY
+from devlib.utils.misc import which
+
+INSTALL_INSTRUCTIONS="""
+MonsoonInstrument requires the monsoon.py tool, available from AOSP:
+
+https://android.googlesource.com/platform/cts/+/master/tools/utils/monsoon.py
+
+Download this script and put it in your $PATH (or pass it as the monsoon_bin
+parameter to MonsoonInstrument). `pip install gflags pyserial` to install the
+dependencies.
+"""
+
+class MonsoonInstrument(Instrument):
+    """Instrument for Monsoon Solutions power monitor
+
+    To use this instrument, you need to install the monsoon.py script available
+    from the Android Open Source Project. As of May 2017 this is under the CTS
+    repository:
+
+    https://android.googlesource.com/platform/cts/+/master/tools/utils/monsoon.py
+
+    Collects power measurements only, from a selection of two channels, the USB
+    passthrough channel and the main output channel.
+
+    :param target: Ignored
+    :param monsoon_bin: Path to monsoon.py executable. If not provided,
+                        ``$PATH`` is searched.
+    :param tty_device: TTY device to use to communicate with the Power
+                       Monitor. If not provided, a sane default is used.
+    """
+
+    mode = CONTINUOUS
+
+    def __init__(self, target, monsoon_bin=None, tty_device=None):
+        super(MonsoonInstrument, self).__init__(target)
+        self.monsoon_bin = monsoon_bin or which('monsoon.py')
+        if not self.monsoon_bin:
+            raise HostError(INSTALL_INSTRUCTIONS)
+
+        self.tty_device = tty_device
+
+        self.process = None
+        self.output = None
+
+        self.sample_rate_hz = 500
+        self.add_channel('output', 'power')
+        self.add_channel('USB', 'power')
+
+    def reset(self, sites=None, kinds=None, channels=None):
+        super(MonsoonInstrument, self).reset(sites, kinds)
+
+    def start(self):
+        if self.process:
+            self.process.kill()
+
+        os.system(self.monsoon_bin + ' --usbpassthrough off')
+
+        cmd = [self.monsoon_bin,
+               '--hz', str(self.sample_rate_hz),
+               '--samples', '-1', # -1 means sample indefinitely
+               '--includeusb']
+        if self.tty_device:
+            cmd += ['--device', self.tty_device]
+
+        self.logger.debug(' '.join(cmd))
+        self.buffer_file = NamedTemporaryFile(prefix='monsoon', delete=False)
+        self.process = Popen(cmd, stdout=self.buffer_file, stderr=PIPE)
+
+    def stop(self):
+        process = self.process
+        self.process = None
+        if not process:
+            raise RuntimeError('Monsoon script not started')
+
+        process.poll()
+        if process.returncode is not None:
+            stdout, stderr = process.communicate()
+            raise HostError(
+                'Monsoon script exited unexpectedly with exit code {}.\n'
+                'stdout:\n{}\nstderr:\n{}'.format(process.returncode,
+                                                  stdout, stderr))
+
+        process.send_signal(signal.SIGINT)
+
+        stderr =  process.stderr.read()
+
+        self.buffer_file.close()
+        with open(self.buffer_file.name) as f:
+            stdout = f.read()
+        os.remove(self.buffer_file.name)
+        self.buffer_file = None
+
+        self.output = (stdout, stderr)
+        os.system(self.monsoon_bin + ' --usbpassthrough on')
+
+        # Wait for USB connection to be restored
+        print ('waiting for usb connection to be back')
+        os.system('adb wait-for-device')
+
+    def get_data(self, outfile):
+        if self.process:
+            raise RuntimeError('`get_data` called before `stop`')
+
+        stdout, stderr = self.output
+
+        with open(outfile, 'wb') as f:
+            writer = csv.writer(f)
+            active_sites = [c.site for c in self.active_channels]
+
+            # Write column headers
+            row = []
+            if 'output' in active_sites:
+                row.append('output_power')
+            if 'USB' in active_sites:
+                row.append('USB_power')
+            writer.writerow(row)
+
+            # Write data
+            for line in stdout.splitlines():
+                # Each output line is a main_output, usb_output measurement pair.
+                # (If our user only requested one channel we still collect both,
+                # and just ignore one of them)
+                output, usb = line.split()
+                row = []
+                if 'output' in active_sites:
+                    row.append(output)
+                if 'USB' in active_sites:
+                    row.append(usb)
+                writer.writerow(row)
+
+        return MeasurementsCsv(outfile, self.active_channels)
diff --git a/devlib/instrument/netstats/__init__.py b/devlib/instrument/netstats/__init__.py
new file mode 100644
index 0000000..f42ea9b
--- /dev/null
+++ b/devlib/instrument/netstats/__init__.py
@@ -0,0 +1,135 @@
+import os
+import re
+import csv
+import tempfile
+from datetime import datetime
+from collections import defaultdict
+from itertools import izip_longest
+
+from devlib.instrument import Instrument, MeasurementsCsv, CONTINUOUS
+from devlib.exception import TargetError, HostError
+from devlib.utils.android import ApkInfo
+
+
+THIS_DIR = os.path.dirname(__file__)
+
+NETSTAT_REGEX = re.compile(r'I/(?P<tag>netstats-\d+)\(\s*\d*\): (?P<ts>\d+) '
+                           r'"(?P<package>[^"]+)" TX: (?P<tx>\S+) RX: (?P<rx>\S+)')
+
+
+def extract_netstats(filepath, tag=None):
+    netstats = []
+    with open(filepath) as fh:
+        for line in fh:
+            match = NETSTAT_REGEX.search(line)
+            if not match:
+                continue
+            if tag and match.group('tag') != tag:
+                continue
+            netstats.append((match.group('tag'),
+                             match.group('ts'),
+                             match.group('package'),
+                             match.group('tx'),
+                             match.group('rx')))
+    return netstats
+
+
+def netstats_to_measurements(netstats):
+    measurements = defaultdict(list)
+    for row in netstats:
+        tag, ts, package, tx, rx = row  # pylint: disable=unused-variable
+        measurements[package + '_tx'].append(tx)
+        measurements[package + '_rx'].append(rx)
+    return measurements
+
+
+def write_measurements_csv(measurements, filepath):
+    headers = sorted(measurements.keys())
+    columns = [measurements[h] for h in headers]
+    with open(filepath, 'wb') as wfh:
+        writer = csv.writer(wfh)
+        writer.writerow(headers)
+        writer.writerows(izip_longest(*columns))
+
+
+class NetstatsInstrument(Instrument):
+
+    mode = CONTINUOUS
+
+    def __init__(self, target, apk=None, service='.TrafficMetricsService'):
+        """
+        Additional paramerter:
+
+        :apk: Path to the APK file that contains ``com.arm.devlab.netstats``
+              package. If not specified, it will be assumed that an APK with
+              name "netstats.apk" is located in the same directory as the
+              Python module for the instrument.
+        :service: Name of the service to be launched. This service must be
+                  present in the APK.
+
+        """
+        if target.os != 'android':
+            raise TargetError('netstats insturment only supports Android targets')
+        if apk is None:
+            apk = os.path.join(THIS_DIR, 'netstats.apk')
+        if not os.path.isfile(apk):
+            raise HostError('APK for netstats instrument does not exist ({})'.format(apk))
+        super(NetstatsInstrument, self).__init__(target)
+        self.apk = apk
+        self.package = ApkInfo(self.apk).package
+        self.service = service
+        self.tag = None
+        self.command = None
+        self.stop_command = 'am kill {}'.format(self.package)
+
+        for package in self.target.list_packages():
+            self.add_channel(package, 'tx')
+            self.add_channel(package, 'rx')
+
+    def setup(self, force=False, *args, **kwargs):
+        if self.target.package_is_installed(self.package):
+            if force:
+                self.logger.debug('Re-installing {} (forced)'.format(self.package))
+                self.target.uninstall_package(self.package)
+                self.target.install(self.apk)
+            else:
+                self.logger.debug('{} already present on target'.format(self.package))
+        else:
+            self.logger.debug('Deploying {} to target'.format(self.package))
+            self.target.install(self.apk)
+
+    def reset(self, sites=None, kinds=None, channels=None, period=None):  # pylint: disable=arguments-differ
+        super(NetstatsInstrument, self).reset(sites, kinds, channels)
+        period_arg, packages_arg = '', ''
+        self.tag = 'netstats-{}'.format(datetime.now().strftime('%Y%m%d%H%M%s'))
+        tag_arg = ' --es tag {}'.format(self.tag)
+        if sites:
+            packages_arg = ' --es packages {}'.format(','.join(sites))
+        if period:
+            period_arg = ' --ei period {}'.format(period)
+        self.command = 'am startservice{}{}{} {}/{}'.format(tag_arg,
+                                                            period_arg,
+                                                            packages_arg,
+                                                            self.package,
+                                                            self.service)
+        self.target.execute(self.stop_command)  # ensure the service is not running.
+
+    def start(self):
+        if self.command is None:
+            raise RuntimeError('reset() must be called before start()')
+        self.target.execute(self.command)
+
+    def stop(self):
+        self.target.execute(self.stop_command)
+
+    def get_data(self, outfile):
+        raw_log_file = tempfile.mktemp()
+        self.target.dump_logcat(raw_log_file)
+        data = extract_netstats(raw_log_file)
+        measurements = netstats_to_measurements(data)
+        write_measurements_csv(measurements, outfile)
+        os.remove(raw_log_file)
+        return MeasurementsCsv(outfile, self.active_channels)
+
+    def teardown(self):
+        self.target.uninstall_package(self.package)
diff --git a/devlib/instrument/netstats/netstats.apk b/devlib/instrument/netstats/netstats.apk
new file mode 100644
index 0000000..8b93da6
--- /dev/null
+++ b/devlib/instrument/netstats/netstats.apk
Binary files differ
diff --git a/devlib/module/__init__.py b/devlib/module/__init__.py
new file mode 100644
index 0000000..5cf0a43
--- /dev/null
+++ b/devlib/module/__init__.py
@@ -0,0 +1,122 @@
+#    Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import logging
+from inspect import isclass
+
+from devlib.utils.misc import walk_modules
+from devlib.utils.types import identifier
+
+
+__module_cache = {}
+
+
+class Module(object):
+
+    name = None
+    kind = None
+    # This is the stage at which the module will be installed. Current valid
+    # stages are:
+    #  'early' -- installed when the Target is first created. This should be
+    #             used for modules that do not rely on the main connection
+    #             being established (usually because the commumnitcate with the
+    #             target through some sorto of secondary connection, e.g. via
+    #             serial).
+    #  'connected' -- installed when a connection to to the target has been
+    #                 established. This is the default.
+    stage = 'connected'
+
+    @staticmethod
+    def probe(target):
+        raise NotImplementedError()
+
+    @classmethod
+    def install(cls, target, **params):
+        if cls.kind is not None:
+            attr_name = identifier(cls.kind)
+        else:
+            attr_name = identifier(cls.name)
+        if hasattr(target, attr_name):
+            existing_module = getattr(target, attr_name)
+            existing_name = getattr(existing_module, 'name', str(existing_module))
+            message = 'Attempting to install module "{}" which already exists (new: {}, existing: {})'
+            raise ValueError(message.format(attr_name, cls.name, existing_name))
+        setattr(target, attr_name, cls(target, **params))
+
+    def __init__(self, target):
+        self.target = target
+        self.logger = logging.getLogger(self.__class__.__name__)
+
+
+class HardRestModule(Module):  # pylint: disable=R0921
+
+    kind = 'hard_reset'
+
+    def __call__(self):
+        raise NotImplementedError()
+
+
+class BootModule(Module):  # pylint: disable=R0921
+
+    kind = 'boot'
+
+    def __call__(self):
+        raise NotImplementedError()
+
+    def update(self, **kwargs):
+        for name, value in kwargs.iteritems():
+            if not hasattr(self, name):
+                raise ValueError('Unknown parameter "{}" for {}'.format(name, self.name))
+            self.logger.debug('Updating "{}" to "{}"'.format(name, value))
+            setattr(self, name, value)
+
+
+class FlashModule(Module):
+
+    kind = 'flash'
+
+    def __call__(self, image_bundle=None, images=None, boot_config=None):
+        raise NotImplementedError()
+
+
+def get_module(mod):
+    if not __module_cache:
+        __load_cache()
+
+    if isinstance(mod, basestring):
+        try:
+            return __module_cache[mod]
+        except KeyError:
+            raise ValueError('Module "{}" does not exist'.format(mod))
+    elif issubclass(mod, Module):
+        return mod
+    else:
+        raise ValueError('Not a valid module: {}'.format(mod))
+
+
+def register_module(mod):
+    if not issubclass(mod, Module):
+        raise ValueError('A module must subclass devlib.Module')
+    if mod.name is None:
+        raise ValueError('A module must define a name')
+    if mod.name in __module_cache:
+        raise ValueError('Module {} already exists'.format(mod.name))
+    __module_cache[mod.name] = mod
+
+
+def __load_cache():
+    for module in walk_modules('devlib.module'):
+        for obj in vars(module).itervalues():
+            if isclass(obj) and issubclass(obj, Module) and obj.name:
+                register_module(obj)
diff --git a/devlib/module/android.py b/devlib/module/android.py
new file mode 100644
index 0000000..bec0c6f
--- /dev/null
+++ b/devlib/module/android.py
@@ -0,0 +1,128 @@
+#    Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=attribute-defined-outside-init
+import os
+import time
+import tarfile
+import tempfile
+
+from devlib.module import FlashModule
+from devlib.exception import HostError
+from devlib.utils.android import fastboot_flash_partition, fastboot_command
+from devlib.utils.misc import merge_dicts
+
+
+class FastbootFlashModule(FlashModule):
+
+    name = 'fastboot'
+    description = """
+    Enables automated flashing of images using the fastboot utility.
+
+    To use this flasher, a set of image files to be flused are required.
+    In addition a mapping between partitions and image file is required. There are two ways
+    to specify those requirements:
+
+    - Image mapping: In this mode, a mapping between partitions and images is given in the agenda.
+    - Image Bundle: In This mode a tarball is specified, which must contain all image files as well
+      as well as a partition file, named ``partitions.txt`` which contains the mapping between
+      partitions and images.
+
+    The format of ``partitions.txt`` defines one mapping per line as such: ::
+
+        kernel zImage-dtb
+        ramdisk ramdisk_image
+
+    """
+
+    delay = 0.5
+    partitions_file_name = 'partitions.txt'
+
+    @staticmethod
+    def probe(target):
+        return target.os == 'android'
+
+    def __call__(self, image_bundle=None, images=None, bootargs=None):
+        if bootargs:
+            raise ValueError('{} does not support boot configuration'.format(self.name))
+        self.prelude_done = False
+        to_flash = {}
+        if image_bundle:  # pylint: disable=access-member-before-definition
+            image_bundle = expand_path(image_bundle)
+            to_flash = self._bundle_to_images(image_bundle)
+        to_flash = merge_dicts(to_flash, images or {}, should_normalize=False)
+        for partition, image_path in to_flash.iteritems():
+            self.logger.debug('flashing {}'.format(partition))
+            self._flash_image(self.target, partition, expand_path(image_path))
+        fastboot_command('reboot')
+        self.target.connect(timeout=180)
+
+    def _validate_image_bundle(self, image_bundle):
+        if not tarfile.is_tarfile(image_bundle):
+            raise HostError('File {} is not a tarfile'.format(image_bundle))
+        with tarfile.open(image_bundle) as tar:
+            files = [tf.name for tf in tar.getmembers()]
+            if not any(pf in files for pf in (self.partitions_file_name, '{}/{}'.format(files[0], self.partitions_file_name))):
+                HostError('Image bundle does not contain the required partition file (see documentation)')
+
+    def _bundle_to_images(self, image_bundle):
+        """
+        Extracts the bundle to a temporary location and creates a mapping between the contents of the bundle
+        and images to be flushed.
+        """
+        self._validate_image_bundle(image_bundle)
+        extract_dir = tempfile.mkdtemp()
+        with tarfile.open(image_bundle) as tar:
+            tar.extractall(path=extract_dir)
+            files = [tf.name for tf in tar.getmembers()]
+            if self.partitions_file_name not in files:
+                extract_dir = os.path.join(extract_dir, files[0])
+        partition_file = os.path.join(extract_dir, self.partitions_file_name)
+        return get_mapping(extract_dir, partition_file)
+
+    def _flash_image(self, target, partition, image_path):
+        if not self.prelude_done:
+            self._fastboot_prelude(target)
+        fastboot_flash_partition(partition, image_path)
+        time.sleep(self.delay)
+
+    def _fastboot_prelude(self, target):
+        target.reset(fastboot=True)
+        time.sleep(self.delay)
+        self.prelude_done = True
+
+
+# utility functions
+
+def expand_path(original_path):
+    path = os.path.abspath(os.path.expanduser(original_path))
+    if not os.path.exists(path):
+        raise HostError('{} does not exist.'.format(path))
+    return path
+
+
+def get_mapping(base_dir, partition_file):
+    mapping = {}
+    with open(partition_file) as pf:
+        for line in pf:
+            pair = line.split()
+            if len(pair) != 2:
+                HostError('partitions.txt is not properly formated')
+            image_path = os.path.join(base_dir, pair[1])
+            if not os.path.isfile(expand_path(image_path)):
+                HostError('file {} was not found in the bundle or was misplaced'.format(pair[1]))
+            mapping[pair[0]] = image_path
+    return mapping
+
diff --git a/devlib/module/biglittle.py b/devlib/module/biglittle.py
new file mode 100644
index 0000000..eafcb0a
--- /dev/null
+++ b/devlib/module/biglittle.py
@@ -0,0 +1,122 @@
+from devlib.module import Module
+
+
+class BigLittleModule(Module):
+
+    name = 'bl'
+
+    @staticmethod
+    def probe(target):
+        return target.big_core is not None
+
+    @property
+    def bigs(self):
+        return [i for i, c in enumerate(self.target.platform.core_names)
+                if c == self.target.platform.big_core]
+
+    @property
+    def littles(self):
+        return [i for i, c in enumerate(self.target.platform.core_names)
+                if c == self.target.platform.little_core]
+
+    @property
+    def bigs_online(self):
+        return list(sorted(set(self.bigs).intersection(self.target.list_online_cpus())))
+
+    @property
+    def littles_online(self):
+        return list(sorted(set(self.littles).intersection(self.target.list_online_cpus())))
+
+    # hotplug
+
+    def online_all_bigs(self):
+        self.target.hotplug.online(*self.bigs)
+
+    def offline_all_bigs(self):
+        self.target.hotplug.offline(*self.bigs)
+
+    def online_all_littles(self):
+        self.target.hotplug.online(*self.littles)
+
+    def offline_all_littles(self):
+        self.target.hotplug.offline(*self.littles)
+
+    # cpufreq
+
+    def list_bigs_frequencies(self):
+        return self.target.cpufreq.list_frequencies(self.bigs_online[0])
+
+    def list_bigs_governors(self):
+        return self.target.cpufreq.list_governors(self.bigs_online[0])
+
+    def list_bigs_governor_tunables(self):
+        return self.target.cpufreq.list_governor_tunables(self.bigs_online[0])
+
+    def list_littles_frequencies(self):
+        return self.target.cpufreq.list_frequencies(self.littles_online[0])
+
+    def list_littles_governors(self):
+        return self.target.cpufreq.list_governors(self.littles_online[0])
+
+    def list_littles_governor_tunables(self):
+        return self.target.cpufreq.list_governor_tunables(self.littles_online[0])
+
+    def get_bigs_governor(self):
+        return self.target.cpufreq.get_governor(self.bigs_online[0])
+
+    def get_bigs_governor_tunables(self):
+        return self.target.cpufreq.get_governor_tunables(self.bigs_online[0])
+
+    def get_bigs_frequency(self):
+        return self.target.cpufreq.get_frequency(self.bigs_online[0])
+
+    def get_bigs_min_frequency(self):
+        return self.target.cpufreq.get_min_frequency(self.bigs_online[0])
+
+    def get_bigs_max_frequency(self):
+        return self.target.cpufreq.get_max_frequency(self.bigs_online[0])
+
+    def get_littles_governor(self):
+        return self.target.cpufreq.get_governor(self.littles_online[0])
+
+    def get_littles_governor_tunables(self):
+        return self.target.cpufreq.get_governor_tunables(self.littles_online[0])
+
+    def get_littles_frequency(self):
+        return self.target.cpufreq.get_frequency(self.littles_online[0])
+
+    def get_littles_min_frequency(self):
+        return self.target.cpufreq.get_min_frequency(self.littles_online[0])
+
+    def get_littles_max_frequency(self):
+        return self.target.cpufreq.get_max_frequency(self.littles_online[0])
+
+    def set_bigs_governor(self, governor, **kwargs):
+        self.target.cpufreq.set_governor(self.bigs_online[0], governor, **kwargs)
+
+    def set_bigs_governor_tunables(self, governor, **kwargs):
+        self.target.cpufreq.set_governor_tunables(self.bigs_online[0], governor, **kwargs)
+
+    def set_bigs_frequency(self, frequency, exact=True):
+        self.target.cpufreq.set_frequency(self.bigs_online[0], frequency, exact)
+
+    def set_bigs_min_frequency(self, frequency, exact=True):
+        self.target.cpufreq.set_min_frequency(self.bigs_online[0], frequency, exact)
+
+    def set_bigs_max_frequency(self, frequency, exact=True):
+        self.target.cpufreq.set_max_frequency(self.bigs_online[0], frequency, exact)
+
+    def set_littles_governor(self, governor, **kwargs):
+        self.target.cpufreq.set_governor(self.littles_online[0], governor, **kwargs)
+
+    def set_littles_governor_tunables(self, governor, **kwargs):
+        self.target.cpufreq.set_governor_tunables(self.littles_online[0], governor, **kwargs)
+
+    def set_littles_frequency(self, frequency, exact=True):
+        self.target.cpufreq.set_frequency(self.littles_online[0], frequency, exact)
+
+    def set_littles_min_frequency(self, frequency, exact=True):
+        self.target.cpufreq.set_min_frequency(self.littles_online[0], frequency, exact)
+
+    def set_littles_max_frequency(self, frequency, exact=True):
+        self.target.cpufreq.set_max_frequency(self.littles_online[0], frequency, exact)
diff --git a/devlib/module/cgroups.py b/devlib/module/cgroups.py
new file mode 100644
index 0000000..bfe2785
--- /dev/null
+++ b/devlib/module/cgroups.py
@@ -0,0 +1,488 @@
+#    Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pylint: disable=attribute-defined-outside-init
+import logging
+from collections import namedtuple
+
+from devlib.module import Module
+from devlib.exception import TargetError
+from devlib.utils.misc import list_to_ranges, isiterable
+from devlib.utils.types import boolean
+
+
+class Controller(object):
+
+    def __init__(self, kind, hid, clist):
+        """
+        Initialize a controller given the hierarchy it belongs to.
+
+        :param kind: the name of the controller
+        :type kind: str
+
+        :param hid: the Hierarchy ID this controller is mounted on
+        :type hid: int
+
+        :param clist: the list of controller mounted in the same hierarchy
+        :type clist: list(str)
+        """
+        self.mount_name = 'devlib_cgh{}'.format(hid)
+        self.kind = kind
+        self.hid = hid
+        self.clist = clist
+        self.target = None
+        self._noprefix = False
+
+        self.logger = logging.getLogger('CGroup.'+self.kind)
+        self.logger.debug('Initialized [%s, %d, %s]',
+                          self.kind, self.hid, self.clist)
+
+        self.mount_point = None
+        self._cgroups = {}
+
+    def mount(self, target, mount_root):
+
+        mounted = target.list_file_systems()
+        if self.mount_name in [e.device for e in mounted]:
+            # Identify mount point if controller is already in use
+            self.mount_point = [
+                    fs.mount_point
+                    for fs in mounted
+                    if fs.device == self.mount_name
+                ][0]
+        else:
+            # Mount the controller if not already in use
+            self.mount_point = target.path.join(mount_root, self.mount_name)
+            target.execute('mkdir -p {} 2>/dev/null'\
+                    .format(self.mount_point), as_root=True)
+            target.execute('mount -t cgroup -o {} {} {}'\
+                    .format(','.join(self.clist),
+                            self.mount_name,
+                            self.mount_point),
+                            as_root=True)
+
+        # Check if this controller uses "noprefix" option
+        output = target.execute('mount | grep "{} "'.format(self.mount_name))
+        if 'noprefix' in output:
+            self._noprefix = True
+            # self.logger.debug('Controller %s using "noprefix" option',
+            #                   self.kind)
+
+        self.logger.debug('Controller %s mounted under: %s (noprefix=%s)',
+            self.kind, self.mount_point, self._noprefix)
+
+        # Mark this contoller as available
+        self.target = target
+
+        # Create root control group
+        self.cgroup('/')
+
+    def cgroup(self, name):
+        if not self.target:
+            raise RuntimeError('CGroup creation failed: {} controller not mounted'\
+                    .format(self.kind))
+        if name not in self._cgroups:
+            self._cgroups[name] = CGroup(self, name)
+        return self._cgroups[name]
+
+    def exists(self, name):
+        if not self.target:
+            raise RuntimeError('CGroup creation failed: {} controller not mounted'\
+                    .format(self.kind))
+        if name not in self._cgroups:
+            self._cgroups[name] = CGroup(self, name, create=False)
+        return self._cgroups[name].existe()
+
+    def list_all(self):
+        self.logger.debug('Listing groups for %s controller', self.kind)
+        output = self.target.execute('{} find {} -type d'\
+                .format(self.target.busybox, self.mount_point),
+                as_root=True)
+        cgroups = []
+        for cg in output.splitlines():
+            cg = cg.replace(self.mount_point + '/', '/')
+            cg = cg.replace(self.mount_point, '/')
+            cg = cg.strip()
+            if cg == '':
+                continue
+            self.logger.debug('Populate %s cgroup: %s', self.kind, cg)
+            cgroups.append(cg)
+        return cgroups
+
+    def move_tasks(self, source, dest, exclude=[]):
+        try:
+            srcg = self._cgroups[source]
+            dstg = self._cgroups[dest]
+        except KeyError as e:
+            raise ValueError('Unkown group: {}'.format(e))
+        output = self.target._execute_util(
+                    'cgroups_tasks_move {} {} \'{}\''.format(
+                    srcg.directory, dstg.directory, exclude),
+                    as_root=True)
+
+    def move_all_tasks_to(self, dest, exclude=[]):
+        """
+        Move all the tasks to the specified CGroup
+
+        Tasks are moved from all their original CGroup the the specified on.
+        The tasks which name matches one of the string in exclude are moved
+        instead in the root CGroup for the controller.
+        The name of a tasks to exclude must be a substring of the task named as
+        reported by the "ps" command. Indeed, this list will be translated into
+        a: "ps | grep -e name1 -e name2..." in order to obtain the PID of these
+        tasks.
+
+        :param exclude: list of commands to keep in the root CGroup
+        :type exlude: list(str)
+        """
+
+        if isinstance(exclude, str):
+            exclude = [exclude]
+        if not isinstance(exclude, list):
+            raise ValueError('wrong type for "exclude" parameter, '
+                             'it must be a str or a list')
+
+        logging.debug('Moving all tasks into %s', dest)
+
+        # Build list of tasks to exclude
+        grep_filters = ''
+        for comm in exclude:
+            grep_filters += '-e {} '.format(comm)
+        logging.debug('   using grep filter: %s', grep_filters)
+        if grep_filters != '':
+            logging.debug('   excluding tasks which name matches:')
+            logging.debug('   %s', ', '.join(exclude))
+
+        for cgroup in self._cgroups:
+            if cgroup != dest:
+                self.move_tasks(cgroup, dest, grep_filters)
+
+    def tasks(self, cgroup):
+        try:
+            cg = self._cgroups[cgroup]
+        except KeyError as e:
+            raise ValueError('Unkown group: {}'.format(e))
+        output = self.target._execute_util(
+                    'cgroups_tasks_in {}'.format(cg.directory),
+                    as_root=True)
+        entries = output.splitlines()
+        tasks = {}
+        for task in entries:
+            tid = task.split(',')[0]
+            try:
+                tname = task.split(',')[1]
+            except: continue
+            try:
+                tcmdline = task.split(',')[2]
+            except:
+                tcmdline = ''
+            tasks[int(tid)] = (tname, tcmdline)
+        return tasks
+
+    def tasks_count(self, cgroup):
+        try:
+            cg = self._cgroups[cgroup]
+        except KeyError as e:
+            raise ValueError('Unkown group: {}'.format(e))
+        output = self.target.execute(
+                    '{} wc -l {}/tasks'.format(
+                    self.target.busybox, cg.directory),
+                    as_root=True)
+        return int(output.split()[0])
+
+    def tasks_per_group(self):
+        tasks = {}
+        for cg in self.list_all():
+            tasks[cg] = self.tasks_count(cg)
+        return tasks
+
+class CGroup(object):
+
+    def __init__(self, controller, name, create=True):
+        self.logger = logging.getLogger('cgroups.' + controller.kind)
+        self.target = controller.target
+        self.controller = controller
+        self.name = name
+
+        # Control cgroup path
+        self.directory = controller.mount_point
+        if name != '/':
+            self.directory = self.target.path.join(controller.mount_point, name[1:])
+
+        # Setup path for tasks file
+        self.tasks_file = self.target.path.join(self.directory, 'tasks')
+        self.procs_file = self.target.path.join(self.directory, 'cgroup.procs')
+
+        if not create:
+            return
+
+        self.logger.debug('Creating cgroup %s', self.directory)
+        self.target.execute('[ -d {0} ] || mkdir -p {0}'\
+                .format(self.directory), as_root=True)
+
+    def exists(self):
+        try:
+            self.target.execute('[ -d {0} ]'\
+                .format(self.directory), as_root=True)
+            return True
+        except TargetError:
+            return False
+
+    def get(self):
+        conf = {}
+
+        logging.debug('Reading %s attributes from:',
+                self.controller.kind)
+        logging.debug('  %s',
+                self.directory)
+        output = self.target._execute_util(
+                    'cgroups_get_attributes {} {}'.format(
+                    self.directory, self.controller.kind),
+                    as_root=True)
+        for res in output.splitlines():
+            attr = res.split(':')[0]
+            value = res.split(':')[1]
+            conf[attr] = value
+
+        return conf
+
+    def set(self, **attrs):
+        for idx in attrs:
+            if isiterable(attrs[idx]):
+                attrs[idx] = list_to_ranges(attrs[idx])
+            # Build attribute path
+            if self.controller._noprefix:
+                attr_name = '{}'.format(idx)
+            else:
+                attr_name = '{}.{}'.format(self.controller.kind, idx)
+            path = self.target.path.join(self.directory, attr_name)
+
+            self.logger.debug('Set attribute [%s] to: %s"',
+                    path, attrs[idx])
+
+            # Set the attribute value
+            try:
+                self.target.write_value(path, attrs[idx])
+            except TargetError:
+                # Check if the error is due to a non-existing attribute
+                attrs = self.get()
+                if idx not in attrs:
+                    raise ValueError('Controller [{}] does not provide attribute [{}]'\
+                                     .format(self.controller.kind, attr_name))
+                raise
+
+    def get_tasks(self):
+        task_ids = self.target.read_value(self.tasks_file).split()
+        logging.debug('Tasks: %s', task_ids)
+        return map(int, task_ids)
+
+    def add_task(self, tid):
+        self.target.write_value(self.tasks_file, tid, verify=False)
+
+    def add_tasks(self, tasks):
+        for tid in tasks:
+            self.add_task(tid)
+
+    def add_proc(self, pid):
+        self.target.write_value(self.procs_file, pid, verify=False)
+
+CgroupSubsystemEntry = namedtuple('CgroupSubsystemEntry', 'name hierarchy num_cgroups enabled')
+
+class CgroupsModule(Module):
+
+    name = 'cgroups'
+    stage = 'setup'
+
+    @staticmethod
+    def probe(target):
+        if not target.is_rooted:
+            return False
+        if target.file_exists('/proc/cgroups'):
+            return True
+        return target.config.has('cgroups')
+
+    def __init__(self, target):
+        super(CgroupsModule, self).__init__(target)
+
+        self.logger = logging.getLogger('CGroups')
+
+        # Set Devlib's CGroups mount point
+        self.cgroup_root = target.path.join(
+            target.working_directory, 'cgroups')
+
+        # Get the list of the available controllers
+        subsys = self.list_subsystems()
+        if len(subsys) == 0:
+            self.logger.warning('No CGroups controller available')
+            return
+
+        # Map hierarchy IDs into a list of controllers
+        hierarchy = {}
+        for ss in subsys:
+            try:
+                hierarchy[ss.hierarchy].append(ss.name)
+            except KeyError:
+                hierarchy[ss.hierarchy] = [ss.name]
+        self.logger.debug('Available hierarchies: %s', hierarchy)
+
+        # Initialize controllers
+        self.logger.info('Available controllers:')
+        self.controllers = {}
+        for ss in subsys:
+            hid = ss.hierarchy
+            controller = Controller(ss.name, hid, hierarchy[hid])
+            try:
+                controller.mount(self.target, self.cgroup_root)
+            except TargetError:
+                message = 'Failed to mount "{}" controller'
+                raise TargetError(message.format(controller.kind))
+            self.logger.info('  %-12s : %s', controller.kind,
+                             controller.mount_point)
+            self.controllers[ss.name] = controller
+
+    def list_subsystems(self):
+        subsystems = []
+        for line in self.target.execute('{} cat /proc/cgroups'\
+                .format(self.target.busybox)).splitlines()[1:]:
+            line = line.strip()
+            if not line or line.startswith('#'):
+                continue
+            name, hierarchy, num_cgroups, enabled = line.split()
+            subsystems.append(CgroupSubsystemEntry(name,
+                                                   int(hierarchy),
+                                                   int(num_cgroups),
+                                                   boolean(enabled)))
+        return subsystems
+
+
+    def controller(self, kind):
+        if kind not in self.controllers:
+            self.logger.warning('Controller %s not available', kind)
+            return None
+        return self.controllers[kind]
+
+    def run_into_cmd(self, cgroup, cmdline):
+        """
+        Get the command to run a command into a given cgroup
+
+        :param cmdline: Commdand to be run into cgroup
+        :param cgroup: Name of cgroup to run command into
+        :returns: A command to run `cmdline` into `cgroup`
+        """
+        return 'CGMOUNT={} {} cgroups_run_into {} {}'\
+                .format(self.cgroup_root, self.target.shutils,
+                        cgroup, cmdline)
+
+    def run_into(self, cgroup, cmdline):
+        """
+        Run the specified command into the specified CGroup
+
+        :param cmdline: Command to be run into cgroup
+        :param cgroup: Name of cgroup to run command into
+        :returns: Output of command.
+        """
+        cmd = self.run_into_cmd(cgroup, cmdline)
+        raw_output = self.target.execute(cmd)
+
+        # First line of output comes from shutils; strip it out.
+        return raw_output.split('\n', 1)[1]
+
+    def cgroups_tasks_move(self, srcg, dstg, exclude=''):
+        """
+        Move all the tasks from the srcg CGroup to the dstg one.
+        A regexps of tasks names can be used to defined tasks which should not
+        be moved.
+        """
+        return self.target._execute_util(
+            'cgroups_tasks_move {} {} {}'.format(srcg, dstg, exclude),
+            as_root=True)
+
+    def isolate(self, cpus, exclude=[]):
+        """
+        Remove all userspace tasks from specified CPUs.
+
+        A list of CPUs can be specified where we do not want userspace tasks
+        running. This functions creates a sandbox cpuset CGroup where all
+        user-space tasks and not-pinned kernel-space tasks are moved into.
+        This should allows to isolate the specified CPUs which will not get
+        tasks running unless explicitely moved into the isolated group.
+
+        :param cpus: the list of CPUs to isolate
+        :type cpus: list(int)
+
+        :return: the (sandbox, isolated) tuple, where:
+                 sandbox is the CGroup of sandboxed CPUs
+                 isolated is the CGroup of isolated CPUs
+        """
+        all_cpus = set(range(self.target.number_of_cpus))
+        sbox_cpus = list(all_cpus - set(cpus))
+        isol_cpus = list(all_cpus - set(sbox_cpus))
+
+        # Create Sandbox and Isolated cpuset CGroups
+        cpuset = self.controller('cpuset')
+        sbox_cg = cpuset.cgroup('/DEVLIB_SBOX')
+        isol_cg = cpuset.cgroup('/DEVLIB_ISOL')
+
+        # Set CPUs for Sandbox and Isolated CGroups
+        sbox_cg.set(cpus=sbox_cpus, mems=0)
+        isol_cg.set(cpus=isol_cpus, mems=0)
+
+        # Move all currently running tasks to the Sandbox CGroup
+        cpuset.move_all_tasks_to('/DEVLIB_SBOX', exclude)
+
+        return sbox_cg, isol_cg
+
+    def freeze(self, exclude=[], thaw=False):
+        """
+        Freeze all user-space tasks but the specified ones
+
+        A freezer cgroup is used to stop all the tasks in the target system but
+        the ones which name match one of the path specified by the exclude
+        paramater. The name of a tasks to exclude must be a substring of the
+        task named as reported by the "ps" command. Indeed, this list will be
+        translated into a: "ps | grep -e name1 -e name2..." in order to obtain
+        the PID of these tasks.
+
+        :param exclude: list of commands paths to exclude from freezer
+        :type exclude: list(str)
+
+        :param thaw: if true thaw tasks instead
+        :type thaw: bool
+        """
+
+        # Create Freezer CGroup
+        freezer = self.controller('freezer')
+        if freezer is None:
+            raise RuntimeError('freezer cgroup controller not present')
+        freezer_cg = freezer.cgroup('/DEVLIB_FREEZER')
+        thawed_cg = freezer.cgroup('/')
+
+        if thaw:
+            # Restart froozen tasks
+            freezer_cg.set(state='THAWED')
+            # Remove all tasks from freezer
+            freezer.move_all_tasks_to('/')
+            return
+
+        # Move all tasks into the freezer group
+        freezer.move_all_tasks_to('/DEVLIB_FREEZER', exclude)
+
+        # Get list of not frozen tasks, which is reported as output
+        tasks = freezer.tasks('/')
+
+        # Freeze all tasks
+        freezer_cg.set(state='FROZEN')
+
+        return tasks
+
diff --git a/devlib/module/cooling.py b/devlib/module/cooling.py
new file mode 100644
index 0000000..3d03a73
--- /dev/null
+++ b/devlib/module/cooling.py
@@ -0,0 +1,63 @@
+#    Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from devlib.module import Module
+from devlib.utils.serial_port import open_serial_connection
+
+
+class MbedFanActiveCoolingModule(Module):
+
+    name = 'mbed-fan'
+    timeout = 30
+
+    @staticmethod
+    def probe(target):
+        return True
+
+    def __init__(self, target, port='/dev/ttyACM0', baud=115200, fan_pin=0):
+        super(MbedFanActiveCoolingModule, self).__init__(target)
+        self.port = port
+        self.baud = baud
+        self.fan_pin = fan_pin
+
+    def start(self):
+        with open_serial_connection(timeout=self.timeout,
+                                    port=self.port,
+                                    baudrate=self.baud) as target:
+            target.sendline('motor_{}_1'.format(self.fan_pin))
+
+    def stop(self):
+        with open_serial_connection(timeout=self.timeout,
+                                    port=self.port,
+                                    baudrate=self.baud) as target:
+            target.sendline('motor_{}_0'.format(self.fan_pin))
+
+
+class OdroidXU3ctiveCoolingModule(Module):
+
+    name = 'odroidxu3-fan'
+
+    @staticmethod
+    def probe(target):
+        return target.file_exists('/sys/devices/odroid_fan.15/fan_mode')
+
+    def start(self):
+        self.target.write_value('/sys/devices/odroid_fan.15/fan_mode', 0, verify=False)
+        self.target.write_value('/sys/devices/odroid_fan.15/pwm_duty', 255, verify=False)
+
+    def stop(self):
+        self.target.write_value('/sys/devices/odroid_fan.15/fan_mode', 0, verify=False)
+        self.target.write_value('/sys/devices/odroid_fan.15/pwm_duty', 1, verify=False)
diff --git a/devlib/module/cpufreq.py b/devlib/module/cpufreq.py
new file mode 100644
index 0000000..d72b8fd
--- /dev/null
+++ b/devlib/module/cpufreq.py
@@ -0,0 +1,423 @@
+#    Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from devlib.module import Module
+from devlib.exception import TargetError
+from devlib.utils.misc import memoized
+
+
+# a dict of governor name and a list of it tunables that can't be read
+WRITE_ONLY_TUNABLES = {
+    'interactive': ['boostpulse']
+}
+
+
+class CpufreqModule(Module):
+
+    name = 'cpufreq'
+
+    @staticmethod
+    def probe(target):
+
+        # x86 with Intel P-State driver
+        if target.abi == 'x86_64':
+            path = '/sys/devices/system/cpu/intel_pstate'
+            if target.file_exists(path):
+                return True
+
+        # Generic CPUFreq support (single policy)
+        path = '/sys/devices/system/cpu/cpufreq'
+        if target.file_exists(path):
+            return True
+
+        # Generic CPUFreq support (per CPU policy)
+        path = '/sys/devices/system/cpu/cpu0/cpufreq'
+        return target.file_exists(path)
+
+    def __init__(self, target):
+        super(CpufreqModule, self).__init__(target)
+        self._governor_tunables = {}
+
+    @memoized
+    def list_governors(self, cpu):
+        """Returns a list of governors supported by the cpu."""
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+        sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_available_governors'.format(cpu)
+        output = self.target.read_value(sysfile)
+        return output.strip().split()
+
+    def get_governor(self, cpu):
+        """Returns the governor currently set for the specified CPU."""
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+        sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
+        return self.target.read_value(sysfile)
+
+    def set_governor(self, cpu, governor, **kwargs):
+        """
+        Set the governor for the specified CPU.
+        See https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
+
+        :param cpu: The CPU for which the governor is to be set. This must be
+                    the full name as it appears in sysfs, e.g. "cpu0".
+        :param governor: The name of the governor to be used. This must be
+                         supported by the specific device.
+
+        Additional keyword arguments can be used to specify governor tunables for
+        governors that support them.
+
+        :note: On big.LITTLE all cores in a cluster must be using the same governor.
+               Setting the governor on any core in a cluster will also set it on all
+               other cores in that cluster.
+
+        :raises: TargetError if governor is not supported by the CPU, or if,
+                 for some reason, the governor could not be set.
+
+        """
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+        supported = self.list_governors(cpu)
+        if governor not in supported:
+            raise TargetError('Governor {} not supported for cpu {}'.format(governor, cpu))
+        sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
+        self.target.write_value(sysfile, governor)
+        self.set_governor_tunables(cpu, governor, **kwargs)
+
+    def list_governor_tunables(self, cpu):
+        """Returns a list of tunables available for the governor on the specified CPU."""
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+        governor = self.get_governor(cpu)
+        if governor not in self._governor_tunables:
+            try:
+                tunables_path = '/sys/devices/system/cpu/{}/cpufreq/{}'.format(cpu, governor)
+                self._governor_tunables[governor] = self.target.list_directory(tunables_path)
+            except TargetError:  # probably an older kernel
+                try:
+                    tunables_path = '/sys/devices/system/cpu/cpufreq/{}'.format(governor)
+                    self._governor_tunables[governor] = self.target.list_directory(tunables_path)
+                except TargetError:  # governor does not support tunables
+                    self._governor_tunables[governor] = []
+        return self._governor_tunables[governor]
+
+    def get_governor_tunables(self, cpu):
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+        governor = self.get_governor(cpu)
+        tunables = {}
+        for tunable in self.list_governor_tunables(cpu):
+            if tunable not in WRITE_ONLY_TUNABLES.get(governor, []):
+                try:
+                    path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
+                    tunables[tunable] = self.target.read_value(path)
+                except TargetError:  # May be an older kernel
+                    path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
+                    tunables[tunable] = self.target.read_value(path)
+        return tunables
+
+    def set_governor_tunables(self, cpu, governor=None, **kwargs):
+        """
+        Set tunables for the specified governor. Tunables should be specified as
+        keyword arguments. Which tunables and values are valid depends on the
+        governor.
+
+        :param cpu: The cpu for which the governor will be set. ``int`` or
+                    full cpu name as it appears in sysfs, e.g. ``cpu0``.
+        :param governor: The name of the governor. Must be all lower case.
+
+        The rest should be keyword parameters mapping tunable name onto the value to
+        be set for it.
+
+        :raises: TargetError if governor specified is not a valid governor name, or if
+                 a tunable specified is not valid for the governor, or if could not set
+                 tunable.
+
+        """
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+        if governor is None:
+            governor = self.get_governor(cpu)
+        valid_tunables = self.list_governor_tunables(cpu)
+        for tunable, value in kwargs.iteritems():
+            if tunable in valid_tunables:
+                path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
+                try:
+                    self.target.write_value(path, value)
+                except TargetError:
+                    if self.target.file_exists(path):
+                        # File exists but we did something wrong
+                        raise
+                    # Expected file doesn't exist, try older sysfs layout.
+                    path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
+                    self.target.write_value(path, value)
+            else:
+                message = 'Unexpected tunable {} for governor {} on {}.\n'.format(tunable, governor, cpu)
+                message += 'Available tunables are: {}'.format(valid_tunables)
+                raise TargetError(message)
+
+    @memoized
+    def list_frequencies(self, cpu):
+        """Returns a list of frequencies supported by the cpu or an empty list
+        if not could be found."""
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+        try:
+            cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/scaling_available_frequencies'.format(cpu)
+            output = self.target.execute(cmd)
+            available_frequencies = map(int, output.strip().split())  # pylint: disable=E1103
+        except TargetError:
+            # On some devices scaling_frequencies  is not generated.
+            # http://adrynalyne-teachtofish.blogspot.co.uk/2011/11/how-to-enable-scalingavailablefrequenci.html
+            # Fall back to parsing stats/time_in_state
+            cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/stats/time_in_state'.format(cpu)
+            out_iter = iter(self.target.execute(cmd).strip().split())
+            available_frequencies = map(int, reversed([f for f, _ in zip(out_iter, out_iter)]))
+        return available_frequencies
+
+    def get_min_frequency(self, cpu):
+        """
+        Returns the min frequency currently set for the specified CPU.
+
+        Warning, this method does not check if the cpu is online or not. It will
+        try to read the minimum frequency and the following exception will be
+        raised ::
+
+        :raises: TargetError if for some reason the frequency could not be read.
+
+        """
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+        sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
+        return self.target.read_int(sysfile)
+
+    def set_min_frequency(self, cpu, frequency, exact=True):
+        """
+        Set's the minimum value for CPU frequency. Actual frequency will
+        depend on the Governor used and may vary during execution. The value should be
+        either an int or a string representing an integer. The Value must also be
+        supported by the device. The available frequencies can be obtained by calling
+        get_frequencies() or examining
+
+        /sys/devices/system/cpu/cpuX/cpufreq/scaling_frequencies
+
+        on the device.
+
+        :raises: TargetError if the frequency is not supported by the CPU, or if, for
+                 some reason, frequency could not be set.
+        :raises: ValueError if ``frequency`` is not an integer.
+
+        """
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+        available_frequencies = self.list_frequencies(cpu)
+        try:
+            value = int(frequency)
+            if exact and available_frequencies and value not in available_frequencies:
+                raise TargetError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
+                                                                                        value,
+                                                                                        available_frequencies))
+            sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
+            self.target.write_value(sysfile, value)
+        except ValueError:
+            raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
+
+    def get_frequency(self, cpu):
+        """
+        Returns the current frequency currently set for the specified CPU.
+
+        Warning, this method does not check if the cpu is online or not. It will
+        try to read the current frequency and the following exception will be
+        raised ::
+
+        :raises: TargetError if for some reason the frequency could not be read.
+
+        """
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+        sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_cur_freq'.format(cpu)
+        return self.target.read_int(sysfile)
+
+    def set_frequency(self, cpu, frequency, exact=True):
+        """
+        Set's the minimum value for CPU frequency. Actual frequency will
+        depend on the Governor used and may vary during execution. The value should be
+        either an int or a string representing an integer.
+
+        If ``exact`` flag is set (the default), the Value must also be supported by
+        the device. The available frequencies can be obtained by calling
+        get_frequencies() or examining
+
+        /sys/devices/system/cpu/cpuX/cpufreq/scaling_frequencies
+
+        on the device (if it exists).
+
+        :raises: TargetError if the frequency is not supported by the CPU, or if, for
+                 some reason, frequency could not be set.
+        :raises: ValueError if ``frequency`` is not an integer.
+
+        """
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+        try:
+            value = int(frequency)
+            if exact:
+                available_frequencies = self.list_frequencies(cpu)
+                if available_frequencies and value not in available_frequencies:
+                    raise TargetError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
+                                                                                            value,
+                                                                                            available_frequencies))
+            if self.get_governor(cpu) != 'userspace':
+                raise TargetError('Can\'t set {} frequency; governor must be "userspace"'.format(cpu))
+            sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_setspeed'.format(cpu)
+            self.target.write_value(sysfile, value, verify=False)
+        except ValueError:
+            raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
+
+    def get_max_frequency(self, cpu):
+        """
+        Returns the max frequency currently set for the specified CPU.
+
+        Warning, this method does not check if the cpu is online or not. It will
+        try to read the maximum frequency and the following exception will be
+        raised ::
+
+        :raises: TargetError if for some reason the frequency could not be read.
+        """
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+        sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
+        return self.target.read_int(sysfile)
+
+    def set_max_frequency(self, cpu, frequency, exact=True):
+        """
+        Set's the minimum value for CPU frequency. Actual frequency will
+        depend on the Governor used and may vary during execution. The value should be
+        either an int or a string representing an integer. The Value must also be
+        supported by the device. The available frequencies can be obtained by calling
+        get_frequencies() or examining
+
+        /sys/devices/system/cpu/cpuX/cpufreq/scaling_frequencies
+
+        on the device.
+
+        :raises: TargetError if the frequency is not supported by the CPU, or if, for
+                 some reason, frequency could not be set.
+        :raises: ValueError if ``frequency`` is not an integer.
+
+        """
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+        available_frequencies = self.list_frequencies(cpu)
+        try:
+            value = int(frequency)
+            if exact and available_frequencies and value not in available_frequencies:
+                raise TargetError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
+                                                                                        value,
+                                                                                        available_frequencies))
+            sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
+            self.target.write_value(sysfile, value)
+        except ValueError:
+            raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
+
+    def set_governor_for_cpus(self, cpus, governor, **kwargs):
+        """
+        Set the governor for the specified list of CPUs.
+        See https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
+
+        :param cpus: The list of CPU for which the governor is to be set.
+        """
+        for cpu in cpus:
+            self.set_governor(cpu, governor, **kwargs)
+
+    def set_frequency_for_cpus(self, cpus, freq, exact=False):
+        """
+        Set the frequency for the specified list of CPUs.
+        See https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
+
+        :param cpus: The list of CPU for which the frequency has to be set.
+        """
+        for cpu in cpus:
+            self.set_frequency(cpu, freq, exact)
+
+    def set_all_frequencies(self, freq):
+        """
+        Set the specified (minimum) frequency for all the (online) CPUs
+        """
+        return self.target._execute_util(
+                'cpufreq_set_all_frequencies {}'.format(freq),
+                as_root=True)
+
+    def get_all_frequencies(self):
+        """
+        Get the current frequency for all the (online) CPUs
+        """
+        output = self.target._execute_util(
+                'cpufreq_get_all_frequencies', as_root=True)
+        frequencies = {}
+        for x in output.splitlines():
+            kv = x.split(' ')
+            if kv[0] == '':
+                break
+            frequencies[kv[0]] = kv[1]
+        return frequencies
+
+    def set_all_governors(self, governor):
+        """
+        Set the specified governor for all the (online) CPUs
+        """
+        try:
+            return self.target._execute_util(
+                'cpufreq_set_all_governors {}'.format(governor),
+                as_root=True)
+        except TargetError as e:
+            if "echo: I/O error" in str(e):
+                cpus_unsupported = [c for c in self.target.list_online_cpus()
+                                    if governor not in self.list_governors(c)]
+                raise TargetError("Governor {} unsupported for CPUs {}".format(
+                    governor, cpus_unsupported))
+            else:
+                raise
+
+    def get_all_governors(self):
+        """
+        Get the current governor for all the (online) CPUs
+        """
+        output = self.target._execute_util(
+                'cpufreq_get_all_governors', as_root=True)
+        governors = {}
+        for x in output.splitlines():
+            kv = x.split(' ')
+            if kv[0] == '':
+                break
+            governors[kv[0]] = kv[1]
+        return governors
+
+    def trace_frequencies(self):
+        """
+        Report current frequencies on trace file
+        """
+        return self.target._execute_util('cpufreq_trace_all_frequencies', as_root=True)
+
+    @memoized
+    def get_domain_cpus(self, cpu):
+        """
+        Get the CPUs that share a frequency domain with the given CPU
+        """
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+
+        sysfile = '/sys/devices/system/cpu/{}/cpufreq/affected_cpus'.format(cpu)
+
+        return [int(c) for c in self.target.read_value(sysfile).split()]
diff --git a/devlib/module/cpuidle.py b/devlib/module/cpuidle.py
new file mode 100644
index 0000000..fd986c0
--- /dev/null
+++ b/devlib/module/cpuidle.py
@@ -0,0 +1,178 @@
+#    Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pylint: disable=attribute-defined-outside-init
+from devlib.module import Module
+from devlib.utils.misc import memoized
+from devlib.utils.types import integer, boolean
+
+
+class CpuidleState(object):
+
+    @property
+    def usage(self):
+        return integer(self.get('usage'))
+
+    @property
+    def time(self):
+        return integer(self.get('time'))
+
+    @property
+    def is_enabled(self):
+        return not boolean(self.get('disable'))
+
+    @property
+    def ordinal(self):
+        i = len(self.id)
+        while self.id[i - 1].isdigit():
+            i -= 1
+            if not i:
+                raise ValueError('invalid idle state name: "{}"'.format(self.id))
+        return int(self.id[i:])
+
+    def __init__(self, target, index, path):
+        self.target = target
+        self.index = index
+        self.path = path
+        self.id = self.target.path.basename(self.path)
+        self.cpu = self.target.path.basename(self.target.path.dirname(path))
+
+    @property
+    @memoized
+    def desc(self):
+        return self.get('desc')
+
+    @property
+    @memoized
+    def name(self):
+        return self.get('name')
+
+    @property
+    @memoized
+    def latency(self):
+        """Exit latency in uS"""
+        return self.get('latency')
+
+    @property
+    @memoized
+    def power(self):
+        """Power usage in mW
+
+        ..note::
+
+            This value is not always populated by the kernel and may be garbage.
+        """
+        return self.get('power')
+
+    @property
+    @memoized
+    def target_residency(self):
+        """Target residency in uS
+
+        This is the amount of time in the state required to 'break even' on
+        power - the system should avoid entering the state for less time than
+        this.
+        """
+        return self.get('residency')
+
+    def enable(self):
+        self.set('disable', 0)
+
+    def disable(self):
+        self.set('disable', 1)
+
+    def get(self, prop):
+        property_path = self.target.path.join(self.path, prop)
+        return self.target.read_value(property_path)
+
+    def set(self, prop, value):
+        property_path = self.target.path.join(self.path, prop)
+        self.target.write_value(property_path, value)
+
+    def __eq__(self, other):
+        if isinstance(other, CpuidleState):
+            return (self.name == other.name) and (self.desc == other.desc)
+        elif isinstance(other, basestring):
+            return (self.name == other) or (self.desc == other)
+        else:
+            return False
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __str__(self):
+        return 'CpuidleState({}, {})'.format(self.name, self.desc)
+
+    __repr__ = __str__
+
+
+class Cpuidle(Module):
+
+    name = 'cpuidle'
+    root_path = '/sys/devices/system/cpu/cpuidle'
+
+    @staticmethod
+    def probe(target):
+        return target.file_exists(Cpuidle.root_path)
+
+    def get_driver(self):
+        return self.target.read_value(self.target.path.join(self.root_path, 'current_driver'))
+
+    def get_governor(self):
+        return self.target.read_value(self.target.path.join(self.root_path, 'current_governor_ro'))
+
+    @memoized
+    def get_states(self, cpu=0):
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+        states_dir = self.target.path.join(self.target.path.dirname(self.root_path), cpu, 'cpuidle')
+        idle_states = []
+        for state in self.target.list_directory(states_dir):
+            if state.startswith('state'):
+                index = int(state[5:])
+                idle_states.append(CpuidleState(self.target, index, self.target.path.join(states_dir, state)))
+        return idle_states
+
+    def get_state(self, state, cpu=0):
+        if isinstance(state, int):
+            try:
+                return self.get_states(cpu)[state]
+            except IndexError:
+                raise ValueError('Cpuidle state {} does not exist'.format(state))
+        else:  # assume string-like
+            for s in self.get_states(cpu):
+                if state in [s.id, s.name, s.desc]:
+                    return s
+            raise ValueError('Cpuidle state {} does not exist'.format(state))
+
+    def enable(self, state, cpu=0):
+        self.get_state(state, cpu).enable()
+
+    def disable(self, state, cpu=0):
+        self.get_state(state, cpu).disable()
+
+    def enable_all(self, cpu=0):
+        for state in self.get_states(cpu):
+            state.enable()
+
+    def disable_all(self, cpu=0):
+        for state in self.get_states(cpu):
+            state.disable()
+
+    def perturb_cpus(self):
+        """
+        Momentarily wake each CPU. Ensures cpu_idle events in trace file.
+        """
+        output = self.target._execute_util('cpuidle_wake_all_cpus')
+        print(output)
diff --git a/devlib/module/hotplug.py b/devlib/module/hotplug.py
new file mode 100644
index 0000000..8ae238e
--- /dev/null
+++ b/devlib/module/hotplug.py
@@ -0,0 +1,40 @@
+from devlib.module import Module
+
+
+class HotplugModule(Module):
+
+    name = 'hotplug'
+    base_path = '/sys/devices/system/cpu'
+
+    @classmethod
+    def probe(cls, target):  # pylint: disable=arguments-differ
+        # If a system has just 1 CPU, it makes not sense to hotplug it.
+        # If a system has more than 1 CPU, CPU0 could be configured to be not
+        # hotpluggable. Thus, check for hotplug support by looking at CPU1
+        path = cls._cpu_path(target, 1)
+        return target.file_exists(path) and target.is_rooted
+
+    @classmethod
+    def _cpu_path(cls, target, cpu):
+        if isinstance(cpu, int):
+            cpu = 'cpu{}'.format(cpu)
+        return target.path.join(cls.base_path, cpu, 'online')
+
+    def online_all(self):
+        self.online(*range(self.target.number_of_cpus))
+
+    def online(self, *args):
+        for cpu in args:
+            self.hotplug(cpu, online=True)
+
+    def offline(self, *args):
+        for cpu in args:
+            self.hotplug(cpu, online=False)
+
+    def hotplug(self, cpu, online):
+        path = self._cpu_path(self.target, cpu)
+        if not self.target.file_exists(path):
+            return
+        value = 1 if online else 0
+        self.target.write_value(path, value)
+
diff --git a/devlib/module/hwmon.py b/devlib/module/hwmon.py
new file mode 100644
index 0000000..dc00442
--- /dev/null
+++ b/devlib/module/hwmon.py
@@ -0,0 +1,142 @@
+#    Copyright 2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import re
+from collections import defaultdict
+
+from devlib.module import Module
+from devlib.utils.types import integer
+
+
+HWMON_ROOT = '/sys/class/hwmon'
+HWMON_FILE_REGEX = re.compile(r'(?P<kind>\w+?)(?P<number>\d+)_(?P<item>\w+)')
+
+
+class HwmonSensor(object):
+
+    def __init__(self, device, path, kind, number):
+        self.device = device
+        self.path = path
+        self.kind = kind
+        self.number = number
+        self.target = self.device.target
+        self.name = '{}/{}{}'.format(self.device.name, self.kind, self.number)
+        self.label = self.name
+        self.items = set()
+
+    def add(self, item):
+        self.items.add(item)
+        if item == 'label':
+            self.label = self.get('label')
+
+    def get(self, item):
+        path = self.get_file(item)
+        value = self.target.read_value(path)
+        try:
+            return  integer(value)
+        except (TypeError, ValueError):
+            return value
+
+    def set(self, item, value):
+        path = self.get_file(item)
+        self.target.write_value(path, value)
+
+    def get_file(self, item):
+        if item not in self.items:
+            raise ValueError('item "{}" does not exist for {}'.format(item, self.name))
+        filename = '{}{}_{}'.format(self.kind, self.number, item)
+        return self.target.path.join(self.path, filename)
+
+    def __str__(self):
+        if self.name != self.label:
+            text = 'HS({}, {})'.format(self.name, self.label)
+        else:
+            text = 'HS({})'.format(self.name)
+        return text
+
+    __repr__ = __str__
+
+
+class HwmonDevice(object):
+
+    @property
+    def sensors(self):
+        all_sensors = []
+        for sensors_of_kind in self._sensors.itervalues():
+            all_sensors.extend(sensors_of_kind.values())
+        return all_sensors
+
+    def __init__(self, target, path):
+        self.target = target
+        self.path = path
+        self.name = self.target.read_value(self.target.path.join(self.path, 'name'))
+        self._sensors = defaultdict(dict)
+        path = self.path
+        if not path.endswith(self.target.path.sep):
+            path += self.target.path.sep
+        for entry in self.target.list_directory(path,
+                                                as_root=self.target.is_rooted):
+            match = HWMON_FILE_REGEX.search(entry)
+            if match:
+                kind = match.group('kind')
+                number = int(match.group('number'))
+                item = match.group('item')
+                if number not in self._sensors[kind]:
+                    sensor = HwmonSensor(self, self.path, kind, number)
+                    self._sensors[kind][number] = sensor
+                self._sensors[kind][number].add(item)
+
+    def get(self, kind, number=None):
+        if number is None:
+            return [s for _, s in sorted(self._sensors[kind].iteritems(),
+                                         key=lambda x: x[0])]
+        else:
+            return self._sensors[kind].get(number)
+
+    def __str__(self):
+        return 'HD({})'.format(self.name)
+
+    __repr__ = __str__
+
+
+class HwmonModule(Module):
+
+    name = 'hwmon'
+
+    @staticmethod
+    def probe(target):
+        return target.file_exists(HWMON_ROOT)
+
+    @property
+    def sensors(self):
+        all_sensors = []
+        for device in self.devices:
+            all_sensors.extend(device.sensors)
+        return all_sensors
+
+    def __init__(self, target):
+        super(HwmonModule, self).__init__(target)
+        self.root = HWMON_ROOT
+        self.devices = []
+        self.scan()
+
+    def scan(self):
+        for entry in self.target.list_directory(self.root,
+                                                as_root=self.target.is_rooted):
+            if entry.startswith('hwmon'):
+                entry_path = self.target.path.join(self.root, entry)
+                if self.target.file_exists(self.target.path.join(entry_path, 'name')):
+                    device = HwmonDevice(self.target, entry_path)
+                    self.devices.append(device)
+
diff --git a/devlib/module/thermal.py b/devlib/module/thermal.py
new file mode 100644
index 0000000..4fa8e15
--- /dev/null
+++ b/devlib/module/thermal.py
@@ -0,0 +1,104 @@
+#    Copyright 2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+
+from devlib.module import Module
+
+class TripPoint(object):
+    def __init__(self, zone, _id):
+        self._id = _id
+        self.zone = zone
+        self.temp_node = 'trip_point_' + _id + '_temp'
+        self.type_node = 'trip_point_' + _id + '_type'
+
+    @property
+    def target(self):
+        return self.zone.target
+
+    def get_temperature(self):
+        """Returns the currently configured temperature of the trip point"""
+        temp_file = self.target.path.join(self.zone.path, self.temp_node)
+        return self.target.read_int(temp_file)
+
+    def set_temperature(self, temperature):
+        temp_file = self.target.path.join(self.zone.path, self.temp_node)
+        self.target.write_value(temp_file, temperature)
+
+    def get_type(self):
+        """Returns the type of trip point"""
+        type_file = self.target.path.join(self.zone.path, self.type_node)
+        return self.target.read_value(type_file)
+
+class ThermalZone(object):
+    def __init__(self, target, root, _id):
+        self.target = target
+        self.name = 'thermal_zone' + _id
+        self.path = target.path.join(root, self.name)
+        self.trip_points = {}
+
+        for entry in self.target.list_directory(self.path):
+            re_match = re.match('^trip_point_([0-9]+)_temp', entry)
+            if re_match is not None:
+                self.add_trip_point(re_match.group(1))
+
+    def add_trip_point(self, _id):
+        self.trip_points[int(_id)] = TripPoint(self, _id)
+
+    def is_enabled(self):
+        """Returns a boolean representing the 'mode' of the thermal zone"""
+        value = self.target.read_value(self.target.path.join(self.path, 'mode'))
+        return value == 'enabled'
+
+    def set_mode(self, enable):
+        value = 'enabled' if enable else 'disabled'
+        self.target.write_value(self.target.path.join(self.path, 'mode'), value)
+
+    def get_temperature(self):
+        """Returns the temperature of the thermal zone"""
+        temp_file = self.target.path.join(self.path, 'temp')
+        return self.target.read_int(temp_file)
+
+class ThermalModule(Module):
+    name = 'thermal'
+    thermal_root = '/sys/class/thermal'
+
+    @staticmethod
+    def probe(target):
+
+        if target.file_exists(ThermalModule.thermal_root):
+            return True
+
+    def __init__(self, target):
+        super(ThermalModule, self).__init__(target)
+
+        self.zones = {}
+        self.cdevs = []
+
+        for entry in target.list_directory(self.thermal_root):
+            re_match = re.match('^(thermal_zone|cooling_device)([0-9]+)', entry)
+
+            if re_match.group(1) == 'thermal_zone':
+                self.add_thermal_zone(re_match.group(2))
+            elif re_match.group(1) == 'cooling_device':
+                # TODO
+                pass
+
+    def add_thermal_zone(self, _id):
+        self.zones[int(_id)] = ThermalZone(self.target, self.thermal_root, _id)
+
+    def disable_all_zones(self):
+        """Disables all the thermal zones in the target"""
+        for zone in self.zones:
+            zone.set_mode('disabled')
diff --git a/devlib/module/vexpress.py b/devlib/module/vexpress.py
new file mode 100644
index 0000000..b623d48
--- /dev/null
+++ b/devlib/module/vexpress.py
@@ -0,0 +1,386 @@
+#
+#    Copyright 2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import time
+import tarfile
+import shutil
+
+from devlib.module import HardRestModule, BootModule, FlashModule
+from devlib.exception import TargetError, HostError
+from devlib.utils.serial_port import open_serial_connection, pulse_dtr, write_characters
+from devlib.utils.uefi import UefiMenu, UefiConfig
+from devlib.utils.uboot import UbootMenu
+
+
+AUTOSTART_MESSAGE = 'Press Enter to stop auto boot...'
+POWERUP_MESSAGE = 'Powering up system...'
+DEFAULT_MCC_PROMPT = 'Cmd>'
+
+
+class VexpressDtrHardReset(HardRestModule):
+
+    name = 'vexpress-dtr'
+    stage = 'early'
+
+    @staticmethod
+    def probe(target):
+        return True
+
+    def __init__(self, target, port='/dev/ttyS0', baudrate=115200,
+                 mcc_prompt=DEFAULT_MCC_PROMPT, timeout=300):
+        super(VexpressDtrHardReset, self).__init__(target)
+        self.port = port
+        self.baudrate = baudrate
+        self.mcc_prompt = mcc_prompt
+        self.timeout = timeout
+
+    def __call__(self):
+        try:
+            if self.target.is_connected:
+                self.target.execute('sync')
+        except TargetError:
+            pass
+        with open_serial_connection(port=self.port,
+                                    baudrate=self.baudrate,
+                                    timeout=self.timeout,
+                                    init_dtr=0,
+                                    get_conn=True) as (_, conn):
+            pulse_dtr(conn, state=True, duration=0.1)  # TRM specifies a pulse of >=100ms
+
+
+class VexpressReboottxtHardReset(HardRestModule):
+
+    name = 'vexpress-reboottxt'
+    stage = 'early'
+
+    @staticmethod
+    def probe(target):
+        return True
+
+    def __init__(self, target,
+                 port='/dev/ttyS0', baudrate=115200,
+                 path='/media/VEMSD',
+                 mcc_prompt=DEFAULT_MCC_PROMPT, timeout=30, short_delay=1):
+        super(VexpressReboottxtHardReset, self).__init__(target)
+        self.port = port
+        self.baudrate = baudrate
+        self.path = path
+        self.mcc_prompt = mcc_prompt
+        self.timeout = timeout
+        self.short_delay = short_delay
+        self.filepath = os.path.join(path, 'reboot.txt')
+
+    def __call__(self):
+        try:
+            if self.target.is_connected:
+                self.target.execute('sync')
+        except TargetError:
+            pass
+
+        if not os.path.exists(self.path):
+            self.logger.debug('{} does not exisit; attempting to mount...'.format(self.path))
+            with open_serial_connection(port=self.port,
+                                        baudrate=self.baudrate,
+                                        timeout=self.timeout,
+                                        init_dtr=0) as tty:
+                wait_for_vemsd(self.path, tty, self.mcc_prompt, self.short_delay)
+        with open(self.filepath, 'w'):
+            pass
+
+
+class VexpressBootModule(BootModule):
+
+    stage = 'early'
+
+    @staticmethod
+    def probe(target):
+        return True
+
+    def __init__(self, target, uefi_entry=None,
+                 port='/dev/ttyS0', baudrate=115200,
+                 mcc_prompt=DEFAULT_MCC_PROMPT,
+                 timeout=120, short_delay=1):
+        super(VexpressBootModule, self).__init__(target)
+        self.port = port
+        self.baudrate = baudrate
+        self.uefi_entry = uefi_entry
+        self.mcc_prompt = mcc_prompt
+        self.timeout = timeout
+        self.short_delay = short_delay
+
+    def __call__(self):
+        with open_serial_connection(port=self.port,
+                                    baudrate=self.baudrate,
+                                    timeout=self.timeout,
+                                    init_dtr=0) as tty:
+            self.get_through_early_boot(tty)
+            self.perform_boot_sequence(tty)
+            self.wait_for_android_prompt(tty)
+
+    def perform_boot_sequence(self, tty):
+        raise NotImplementedError()
+
+    def get_through_early_boot(self, tty):
+        self.logger.debug('Establishing initial state...')
+        tty.sendline('')
+        i = tty.expect([AUTOSTART_MESSAGE, POWERUP_MESSAGE, self.mcc_prompt])
+        if i == 2:
+            self.logger.debug('Saw MCC prompt.')
+            time.sleep(self.short_delay)
+            tty.sendline('reboot')
+        elif i == 1:
+            self.logger.debug('Saw powering up message (assuming soft reboot).')
+        else:
+            self.logger.debug('Saw auto boot message.')
+            tty.sendline('')
+            time.sleep(self.short_delay)
+            tty.sendline('reboot')
+
+    def get_uefi_menu(self, tty):
+        menu = UefiMenu(tty)
+        self.logger.debug('Waiting for UEFI menu...')
+        menu.wait(timeout=self.timeout)
+        return menu
+
+    def wait_for_android_prompt(self, tty):
+        self.logger.debug('Waiting for the Android prompt.')
+        tty.expect(self.target.shell_prompt, timeout=self.timeout)
+        # This delay is needed to allow the platform some time to finish
+        # initilizing; querying the ip address too early from connect() may
+        # result in a bogus address being assigned to eth0.
+        time.sleep(5)
+
+
+class VexpressUefiBoot(VexpressBootModule):
+
+    name = 'vexpress-uefi'
+
+    def __init__(self, target, uefi_entry,
+                 image, fdt, bootargs, initrd,
+                 *args, **kwargs):
+        super(VexpressUefiBoot, self).__init__(target, uefi_entry=uefi_entry,
+                                               *args, **kwargs)
+        self.uefi_config = self._create_config(image, fdt, bootargs, initrd)
+
+    def perform_boot_sequence(self, tty):
+        menu = self.get_uefi_menu(tty)
+        try:
+            menu.select(self.uefi_entry)
+        except LookupError:
+            self.logger.debug('{} UEFI entry not found.'.format(self.uefi_entry))
+            self.logger.debug('Attempting to create one using default flasher configuration.')
+            menu.create_entry(self.uefi_entry, self.uefi_config)
+            menu.select(self.uefi_entry)
+
+    def _create_config(self, image, fdt, bootargs, initrd):  # pylint: disable=R0201
+        config_dict = {
+            'image_name': image,
+            'image_args': bootargs,
+            'initrd': initrd,
+        }
+
+        if fdt:
+            config_dict['fdt_support'] = True
+            config_dict['fdt_path'] = fdt
+        else:
+            config_dict['fdt_support'] = False
+
+        return UefiConfig(config_dict)
+
+
+class VexpressUefiShellBoot(VexpressBootModule):
+
+    name = 'vexpress-uefi-shell'
+
+    def __init__(self, target, uefi_entry='^Shell$',
+                 efi_shell_prompt='Shell>',
+                 image='kernel', bootargs=None,
+                 *args, **kwargs):
+        super(VexpressUefiShellBoot, self).__init__(target, uefi_entry=uefi_entry,
+                                                    *args, **kwargs)
+        self.efi_shell_prompt = efi_shell_prompt
+        self.image = image
+        self.bootargs = bootargs
+
+    def perform_boot_sequence(self, tty):
+        menu = self.get_uefi_menu(tty)
+        try:
+            menu.select(self.uefi_entry)
+        except LookupError:
+            raise TargetError('Did not see "{}" UEFI entry.'.format(self.uefi_entry))
+        tty.expect(self.efi_shell_prompt, timeout=self.timeout)
+        if self.bootargs:
+            tty.sendline('')  # stop default boot
+            time.sleep(self.short_delay)
+            efi_shell_command = '{} {}'.format(self.image, self.bootargs)
+            self.logger.debug(efi_shell_command)
+            write_characters(tty, efi_shell_command)
+            tty.sendline('\r\n')
+
+
+class VexpressUBoot(VexpressBootModule):
+
+    name = 'vexpress-u-boot'
+
+    def __init__(self, target, env=None,
+                 *args, **kwargs):
+        super(VexpressUBoot, self).__init__(target, *args, **kwargs)
+        self.env = env
+
+    def perform_boot_sequence(self, tty):
+        if self.env is None:
+            return  # Will boot automatically
+
+        menu = UbootMenu(tty)
+        self.logger.debug('Waiting for U-Boot prompt...')
+        menu.open(timeout=120)
+        for var, value in self.env.iteritems():
+            menu.setenv(var, value)
+        menu.boot()
+
+
+class VexpressBootmon(VexpressBootModule):
+
+    name = 'vexpress-bootmon'
+
+    def __init__(self, target,
+                 image, fdt, initrd, bootargs,
+                 uses_bootscript=False,
+                 bootmon_prompt='>',
+                 *args, **kwargs):
+        super(VexpressBootmon, self).__init__(target, *args, **kwargs)
+        self.image = image
+        self.fdt = fdt
+        self.initrd = initrd
+        self.bootargs = bootargs
+        self.uses_bootscript = uses_bootscript
+        self.bootmon_prompt = bootmon_prompt
+
+    def perform_boot_sequence(self, tty):
+        if self.uses_bootscript:
+            return  # Will boot automatically
+
+        time.sleep(self.short_delay)
+        tty.expect(self.bootmon_prompt, timeout=self.timeout)
+        with open_serial_connection(port=self.port,
+                                    baudrate=self.baudrate,
+                                    timeout=self.timeout,
+                                    init_dtr=0) as tty:
+            write_characters(tty, 'fl linux fdt {}'.format(self.fdt))
+            write_characters(tty, 'fl linux initrd {}'.format(self.initrd))
+            write_characters(tty, 'fl linux boot {} {}'.format(self.image,
+                                                               self.bootargs))
+
+
+class VersatileExpressFlashModule(FlashModule):
+
+    name = 'vexpress-vemsd'
+    description = """
+    Enables flashing of kernels and firmware to ARM Versatile Express devices.
+
+    This modules enables flashing of image bundles or individual images to ARM
+    Versatile Express-based devices (e.g. JUNO) via host-mounted MicroSD on the
+    board.
+
+    The bundle, if specified, must reflect the directory structure of the MicroSD
+    and will be extracted directly into the location it is mounted on the host. The
+    images, if  specified, must be a dict mapping the absolute path of the image on
+    the host to the destination path within the board's MicroSD; the destination path
+    may be either absolute, or relative to the MicroSD mount location.
+
+    """
+
+    stage = 'early'
+
+    @staticmethod
+    def probe(target):
+        if not target.has('hard_reset'):
+            return False
+        return True
+
+    def __init__(self, target, vemsd_mount, mcc_prompt=DEFAULT_MCC_PROMPT, timeout=30, short_delay=1):
+        super(VersatileExpressFlashModule, self).__init__(target)
+        self.vemsd_mount = vemsd_mount
+        self.mcc_prompt = mcc_prompt
+        self.timeout = timeout
+        self.short_delay = short_delay
+
+    def __call__(self, image_bundle=None, images=None, bootargs=None):
+        self.target.hard_reset()
+        with open_serial_connection(port=self.target.platform.serial_port,
+                                    baudrate=self.target.platform.baudrate,
+                                    timeout=self.timeout,
+                                    init_dtr=0) as tty:
+            i = tty.expect([self.mcc_prompt, AUTOSTART_MESSAGE])
+            if i:
+                tty.sendline('')
+            wait_for_vemsd(self.vemsd_mount, tty, self.mcc_prompt, self.short_delay)
+        try:
+            if image_bundle:
+                self._deploy_image_bundle(image_bundle)
+            if images:
+                self._overlay_images(images)
+            os.system('sync')
+        except (IOError, OSError), e:
+            msg = 'Could not deploy images to {}; got: {}'
+            raise TargetError(msg.format(self.vemsd_mount, e))
+        self.target.boot()
+        self.target.connect(timeout=30)
+
+    def _deploy_image_bundle(self, bundle):
+        self.logger.debug('Validating {}'.format(bundle))
+        validate_image_bundle(bundle)
+        self.logger.debug('Extracting {} into {}...'.format(bundle, self.vemsd_mount))
+        with tarfile.open(bundle) as tar:
+            tar.extractall(self.vemsd_mount)
+
+    def _overlay_images(self, images):
+        for dest, src in images.iteritems():
+            dest = os.path.join(self.vemsd_mount, dest)
+            self.logger.debug('Copying {} to {}'.format(src, dest))
+            shutil.copy(src, dest)
+
+
+# utility functions
+
+def validate_image_bundle(bundle):
+    if not tarfile.is_tarfile(bundle):
+        raise HostError('Image bundle {} does not appear to be a valid TAR file.'.format(bundle))
+    with tarfile.open(bundle) as tar:
+        try:
+            tar.getmember('config.txt')
+        except KeyError:
+            try:
+                tar.getmember('./config.txt')
+            except KeyError:
+                msg = 'Tarball {} does not appear to be a valid image bundle (did not see config.txt).'
+                raise HostError(msg.format(bundle))
+
+
+def wait_for_vemsd(vemsd_mount, tty, mcc_prompt=DEFAULT_MCC_PROMPT, short_delay=1, retries=3):
+    attempts = 1 + retries
+    path = os.path.join(vemsd_mount, 'config.txt')
+    if os.path.exists(path):
+        return
+    for _ in xrange(attempts):
+        tty.sendline('')  # clear any garbage
+        tty.expect(mcc_prompt, timeout=short_delay)
+        tty.sendline('usb_on')
+        time.sleep(short_delay * 3)
+        if os.path.exists(path):
+            return
+    raise TargetError('Could not mount {}'.format(vemsd_mount))
+
diff --git a/devlib/platform/__init__.py b/devlib/platform/__init__.py
new file mode 100644
index 0000000..1bf8fac
--- /dev/null
+++ b/devlib/platform/__init__.py
@@ -0,0 +1,95 @@
+import logging
+
+
+BIG_CPUS = ['A15', 'A57', 'A72']
+
+
+class Platform(object):
+
+    @property
+    def number_of_clusters(self):
+        return len(set(self.core_clusters))
+
+    def __init__(self,
+                 name=None,
+                 core_names=None,
+                 core_clusters=None,
+                 big_core=None,
+                 model=None,
+                 modules=None,
+                 ):
+        self.name = name
+        self.core_names = core_names or []
+        self.core_clusters = core_clusters or []
+        self.big_core = big_core
+        self.little_core = None
+        self.model = model
+        self.modules = modules or []
+        self.logger = logging.getLogger(self.name)
+        if not self.core_clusters and self.core_names:
+            self._set_core_clusters_from_core_names()
+
+    def init_target_connection(self, target):
+        # May be ovewritten by subclasses to provide target-specific
+        # connection initialisation.
+        pass
+
+    def update_from_target(self, target):
+        if not self.core_names:
+            self.core_names = target.cpuinfo.cpu_names
+            self._set_core_clusters_from_core_names()
+        if not self.big_core and self.number_of_clusters == 2:
+            self.big_core = self._identify_big_core()
+        if not self.core_clusters and self.core_names:
+            self._set_core_clusters_from_core_names()
+        if not self.model:
+            self._set_model_from_target(target)
+        if not self.name:
+            self.name = self.model
+        self._validate()
+
+    def setup(self, target):
+        # May be overwritten by subclasses to provide platform-specific
+        # setup procedures.
+        pass
+
+    def _set_core_clusters_from_core_names(self):
+        self.core_clusters = []
+        clusters = []
+        for cn in self.core_names:
+            if cn not in clusters:
+                clusters.append(cn)
+            self.core_clusters.append(clusters.index(cn))
+
+    def _set_model_from_target(self, target):
+        if target.os == 'android':
+            self.model = target.getprop('ro.product.model')
+        elif target.is_rooted:
+            try:
+                self.model = target.execute('dmidecode -s system-version',
+                                            as_root=True).strip()
+            except Exception:  # pylint: disable=broad-except
+                pass  # this is best-effort
+
+    def _identify_big_core(self):
+        for core in self.core_names:
+            if core.upper() in BIG_CPUS:
+                return core
+        big_idx = self.core_clusters.index(max(self.core_clusters))
+        return self.core_names[big_idx]
+
+    def _validate(self):
+        if len(self.core_names) != len(self.core_clusters):
+            raise ValueError('core_names and core_clusters are of different lengths.')
+        if self.big_core and self.number_of_clusters != 2:
+            raise ValueError('attempting to set big_core on non-big.LITTLE device. '
+                             '(number of clusters  is not 2)')
+        if self.big_core and self.big_core not in self.core_names:
+            message = 'Invalid big_core value "{}"; must be in [{}]'
+            raise ValueError(message.format(self.big_core,
+                                            ', '.join(set(self.core_names))))
+        if self.big_core:
+            for core in self.core_names:
+                if core != self.big_core:
+                    self.little_core = core
+                    break
diff --git a/devlib/platform/arm.py b/devlib/platform/arm.py
new file mode 100644
index 0000000..e760eaf
--- /dev/null
+++ b/devlib/platform/arm.py
@@ -0,0 +1,295 @@
+#    Copyright 2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import division
+import os
+import tempfile
+import csv
+import time
+import pexpect
+
+from devlib.platform import Platform
+from devlib.instrument import Instrument, InstrumentChannel, MeasurementsCsv, Measurement,  CONTINUOUS,  INSTANTANEOUS
+from devlib.exception import TargetError, HostError
+from devlib.host import PACKAGE_BIN_DIRECTORY
+from devlib.utils.serial_port import open_serial_connection
+
+
+class VersatileExpressPlatform(Platform):
+
+    def __init__(self, name,  # pylint: disable=too-many-locals
+
+                 core_names=None,
+                 core_clusters=None,
+                 big_core=None,
+                 modules=None,
+
+                 # serial settings
+                 serial_port='/dev/ttyS0',
+                 baudrate=115200,
+
+                 # VExpress MicroSD mount point
+                 vemsd_mount=None,
+
+                 # supported: dtr, reboottxt
+                 hard_reset_method=None,
+                 # supported: uefi, uefi-shell, u-boot, bootmon
+                 bootloader=None,
+                 # supported: vemsd
+                 flash_method='vemsd',
+
+                 image=None,
+                 fdt=None,
+                 initrd=None,
+                 bootargs=None,
+
+                 uefi_entry=None,  # only used if bootloader is "uefi"
+                 ready_timeout=60,
+                 ):
+        super(VersatileExpressPlatform, self).__init__(name,
+                                                       core_names,
+                                                       core_clusters,
+                                                       big_core,
+                                                       modules)
+        self.serial_port = serial_port
+        self.baudrate = baudrate
+        self.vemsd_mount = vemsd_mount
+        self.image = image
+        self.fdt = fdt
+        self.initrd = initrd
+        self.bootargs = bootargs
+        self.uefi_entry = uefi_entry
+        self.ready_timeout = ready_timeout
+        self.bootloader = None
+        self.hard_reset_method = None
+        self._set_bootloader(bootloader)
+        self._set_hard_reset_method(hard_reset_method)
+        self._set_flash_method(flash_method)
+
+    def init_target_connection(self, target):
+        if target.os == 'android':
+            self._init_android_target(target)
+        else:
+            self._init_linux_target(target)
+
+    def _init_android_target(self, target):
+        if target.connection_settings.get('device') is None:
+            addr = self._get_target_ip_address(target)
+            target.connection_settings['device'] = addr + ':5555'
+
+    def _init_linux_target(self, target):
+        if target.connection_settings.get('host') is None:
+            addr = self._get_target_ip_address(target)
+            target.connection_settings['host'] = addr
+
+    def _get_target_ip_address(self, target):
+        with open_serial_connection(port=self.serial_port,
+                                    baudrate=self.baudrate,
+                                    timeout=30,
+                                    init_dtr=0) as tty:
+            tty.sendline('')
+            self.logger.debug('Waiting for the Android shell prompt.')
+            tty.expect(target.shell_prompt)
+
+            self.logger.debug('Waiting for IP address...')
+            wait_start_time = time.time()
+            while True:
+                tty.sendline('ip addr list eth0')
+                time.sleep(1)
+                try:
+                    tty.expect(r'inet ([1-9]\d*.\d+.\d+.\d+)', timeout=10)
+                    return tty.match.group(1)
+                except pexpect.TIMEOUT:
+                    pass  # We have our own timeout -- see below.
+                if (time.time() - wait_start_time) > self.ready_timeout:
+                    raise TargetError('Could not acquire IP address.')
+
+    def _set_hard_reset_method(self, hard_reset_method):
+        if hard_reset_method == 'dtr':
+            self.modules.append({'vexpress-dtr': {'port': self.serial_port,
+                                                  'baudrate': self.baudrate,
+                                                  }})
+        elif hard_reset_method == 'reboottxt':
+            self.modules.append({'vexpress-reboottxt': {'port': self.serial_port,
+                                                        'baudrate': self.baudrate,
+                                                        'path': self.vemsd_mount,
+                                                        }})
+        else:
+            ValueError('Invalid hard_reset_method: {}'.format(hard_reset_method))
+
+    def _set_bootloader(self, bootloader):
+        self.bootloader = bootloader
+        if self.bootloader == 'uefi':
+            self.modules.append({'vexpress-uefi': {'port': self.serial_port,
+                                                   'baudrate': self.baudrate,
+                                                   'image': self.image,
+                                                   'fdt': self.fdt,
+                                                   'initrd': self.initrd,
+                                                   'bootargs': self.bootargs,
+                                                   }})
+        elif self.bootloader == 'uefi-shell':
+            self.modules.append({'vexpress-uefi-shell': {'port': self.serial_port,
+                                                         'baudrate': self.baudrate,
+                                                         'image': self.image,
+                                                         'bootargs': self.bootargs,
+                                                         }})
+        elif self.bootloader == 'u-boot':
+            uboot_env = None
+            if self.bootargs:
+                uboot_env = {'bootargs': self.bootargs}
+            self.modules.append({'vexpress-u-boot': {'port': self.serial_port,
+                                                     'baudrate': self.baudrate,
+                                                     'env': uboot_env,
+                                                     }})
+        elif self.bootloader == 'bootmon':
+            self.modules.append({'vexpress-bootmon': {'port': self.serial_port,
+                                                      'baudrate': self.baudrate,
+                                                      'image': self.image,
+                                                      'fdt': self.fdt,
+                                                      'initrd': self.initrd,
+                                                      'bootargs': self.bootargs,
+                                                      }})
+        else:
+            ValueError('Invalid hard_reset_method: {}'.format(bootloader))
+
+    def _set_flash_method(self, flash_method):
+        if flash_method == 'vemsd':
+            self.modules.append({'vexpress-vemsd': {'vemsd_mount': self.vemsd_mount}})
+        else:
+            ValueError('Invalid flash_method: {}'.format(flash_method))
+
+
+class Juno(VersatileExpressPlatform):
+
+    def __init__(self,
+                 vemsd_mount='/media/JUNO',
+                 baudrate=115200,
+                 bootloader='u-boot',
+                 hard_reset_method='dtr',
+                 **kwargs
+                 ):
+        super(Juno, self).__init__('juno',
+                                   vemsd_mount=vemsd_mount,
+                                   baudrate=baudrate,
+                                   bootloader=bootloader,
+                                   hard_reset_method=hard_reset_method,
+                                   **kwargs)
+
+
+class TC2(VersatileExpressPlatform):
+
+    def __init__(self,
+                 vemsd_mount='/media/VEMSD',
+                 baudrate=38400,
+                 bootloader='bootmon',
+                 hard_reset_method='reboottxt',
+                 **kwargs
+                 ):
+        super(TC2, self).__init__('tc2',
+                                  vemsd_mount=vemsd_mount,
+                                  baudrate=baudrate,
+                                  bootloader=bootloader,
+                                  hard_reset_method=hard_reset_method,
+                                  **kwargs)
+
+
+class JunoEnergyInstrument(Instrument):
+
+    binname = 'readenergy'
+    mode = CONTINUOUS | INSTANTANEOUS
+
+    _channels = [
+        InstrumentChannel('sys_curr', 'sys', 'current'),
+        InstrumentChannel('a57_curr', 'a57', 'current'),
+        InstrumentChannel('a53_curr', 'a53', 'current'),
+        InstrumentChannel('gpu_curr', 'gpu', 'current'),
+        InstrumentChannel('sys_volt', 'sys', 'voltage'),
+        InstrumentChannel('a57_volt', 'a57', 'voltage'),
+        InstrumentChannel('a53_volt', 'a53', 'voltage'),
+        InstrumentChannel('gpu_volt', 'gpu', 'voltage'),
+        InstrumentChannel('sys_pow', 'sys', 'power'),
+        InstrumentChannel('a57_pow', 'a57', 'power'),
+        InstrumentChannel('a53_pow', 'a53', 'power'),
+        InstrumentChannel('gpu_pow', 'gpu', 'power'),
+        InstrumentChannel('sys_cenr', 'sys', 'energy'),
+        InstrumentChannel('a57_cenr', 'a57', 'energy'),
+        InstrumentChannel('a53_cenr', 'a53', 'energy'),
+        InstrumentChannel('gpu_cenr', 'gpu', 'energy'),
+    ]
+
+    def __init__(self, target):
+        super(JunoEnergyInstrument, self).__init__(target)
+        self.on_target_file = None
+        self.command = None
+        self.binary = self.target.bin(self.binname)
+        for chan in self._channels:
+            self.channels[chan.name] = chan
+        self.on_target_file = self.target.tempfile('energy', '.csv')
+        self.sample_rate_hz = 10 # DEFAULT_PERIOD is 100[ms] in readenergy.c
+        self.command = '{} -o {}'.format(self.binary, self.on_target_file)
+        self.command2 = '{}'.format(self.binary)
+
+    def setup(self):
+        self.binary = self.target.install(os.path.join(PACKAGE_BIN_DIRECTORY,
+                                                       self.target.abi, self.binname))
+
+    def reset(self, sites=None, kinds=None):
+        super(JunoEnergyInstrument, self).reset(sites, kinds)
+        self.target.killall(self.binname, as_root=True)
+
+    def start(self):
+        self.target.kick_off(self.command, as_root=True)
+
+    def stop(self):
+        self.target.killall(self.binname, signal='TERM', as_root=True)
+
+    def get_data(self, output_file):
+        temp_file = tempfile.mktemp()
+        self.target.pull(self.on_target_file, temp_file)
+        self.target.remove(self.on_target_file)
+
+        with open(temp_file, 'rb') as fh:
+            reader = csv.reader(fh)
+            headings = reader.next()
+
+            # Figure out which columns from the collected csv we actually want
+            select_columns = []
+            for chan in self.active_channels:
+                try:
+                    select_columns.append(headings.index(chan.name))
+                except ValueError:
+                    raise HostError('Channel "{}" is not in {}'.format(chan.name, temp_file))
+
+            with open(output_file, 'wb') as wfh:
+                write_headings = ['{}_{}'.format(c.site, c.kind)
+                                  for c in self.active_channels]
+                writer = csv.writer(wfh)
+                writer.writerow(write_headings)
+                for row in reader:
+                    write_row = [row[c] for c in select_columns]
+                    writer.writerow(write_row)
+
+        return MeasurementsCsv(output_file, self.active_channels)
+
+    def take_measurement(self):
+        result = []
+        output = self.target.execute(self.command2).split()
+        reader=csv.reader(output)
+        headings=reader.next()
+        values = reader.next()
+        for chan in self.active_channels:
+            value = values[headings.index(chan.name)]
+            result.append(Measurement(value, chan))
+        return result
+
diff --git a/devlib/platform/gem5.py b/devlib/platform/gem5.py
new file mode 100644
index 0000000..a4bad28
--- /dev/null
+++ b/devlib/platform/gem5.py
@@ -0,0 +1,292 @@
+#    Copyright 2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+import subprocess
+import sys
+import shutil
+import time
+import types
+
+from devlib.exception import TargetError
+from devlib.host import PACKAGE_BIN_DIRECTORY
+from devlib.platform import Platform
+from devlib.utils.ssh import AndroidGem5Connection, LinuxGem5Connection
+
+class Gem5SimulationPlatform(Platform):
+
+    def __init__(self, name,
+                 host_output_dir,
+                 gem5_bin,
+                 gem5_args,
+                 gem5_virtio,
+                 core_names=None,
+                 core_clusters=None,
+                 big_core=None,
+                 model=None,
+                 modules=None,
+                 gem5_telnet_port=None):
+
+        # First call the parent class
+        super(Gem5SimulationPlatform, self).__init__(name, core_names, core_clusters,
+                                                     big_core, model, modules)
+
+        # Start setting up the gem5 parameters/directories
+        # The gem5 subprocess
+        self.gem5 = None
+        self.gem5_port = gem5_telnet_port or None
+        self.stats_directory = host_output_dir
+        self.gem5_out_dir = os.path.join(self.stats_directory, "gem5")
+        self.gem5_interact_dir = '/tmp' # Host directory
+        self.executable_dir = None # Device directory
+        self.working_dir = None # Device directory
+        self.stdout_file = None
+        self.stderr_file = None
+        self.stderr_filename = None
+        if self.gem5_port is None:
+            # Allows devlib to pick up already running simulations
+            self.start_gem5_simulation = True
+        else:
+            self.start_gem5_simulation = False
+
+        # Find the first one that does not exist. Ensures that we do not re-use
+        # the directory used by someone else.
+        for i in xrange(sys.maxint):
+            directory = os.path.join(self.gem5_interact_dir, "wa_{}".format(i))
+            try:
+                os.stat(directory)
+                continue
+            except OSError:
+                break
+        self.gem5_interact_dir = directory
+        self.logger.debug("Using {} as the temporary directory."
+                          .format(self.gem5_interact_dir))
+
+        # Parameters passed onto gem5
+        self.gem5args_binary = gem5_bin
+        self.gem5args_args = gem5_args
+        self.gem5args_virtio = gem5_virtio
+        self._check_gem5_command()
+
+        # Start the interaction with gem5
+        self._start_interaction_gem5()
+
+    def _check_gem5_command(self):
+        """
+        Check if the command to start gem5 makes sense
+        """
+        if self.gem5args_binary is None:
+            raise TargetError('Please specify a gem5 binary.')
+        if self.gem5args_args is None:
+            raise TargetError('Please specify the arguments passed on to gem5.')
+        self.gem5args_virtio = str(self.gem5args_virtio).format(self.gem5_interact_dir)
+        if self.gem5args_virtio is None:
+            raise TargetError('Please specify arguments needed for virtIO.')
+
+    def _start_interaction_gem5(self):
+        """
+        Starts the interaction of devlib with gem5.
+        """
+
+        # First create the input and output directories for gem5
+        if self.start_gem5_simulation:
+            # Create the directory to send data to/from gem5 system
+            self.logger.info("Creating temporary directory for interaction "
+                             " with gem5 via virtIO: {}"
+                             .format(self.gem5_interact_dir))
+            os.mkdir(self.gem5_interact_dir)
+
+            # Create the directory for gem5 output (stats files etc)
+            if not os.path.exists(self.stats_directory):
+                os.mkdir(self.stats_directory)
+            if os.path.exists(self.gem5_out_dir):
+                raise TargetError("The gem5 stats directory {} already "
+                                  "exists.".format(self.gem5_out_dir))
+            else:
+                os.mkdir(self.gem5_out_dir)
+
+            # We need to redirect the standard output and standard error for the
+            # gem5 process to a file so that we can debug when things go wrong.
+            f = os.path.join(self.gem5_out_dir, 'stdout')
+            self.stdout_file = open(f, 'w')
+            f = os.path.join(self.gem5_out_dir, 'stderr')
+            self.stderr_file = open(f, 'w')
+            # We need to keep this so we can check which port to use for the
+            # telnet connection.
+            self.stderr_filename = f
+
+            # Start gem5 simulation
+            self.logger.info("Starting the gem5 simulator")
+
+            command_line = "{} --outdir={} {} {}".format(self.gem5args_binary,
+                                                         self.gem5_out_dir,
+                                                         self.gem5args_args,
+                                                         self.gem5args_virtio)
+            self.logger.debug("gem5 command line: {}".format(command_line))
+            self.gem5 = subprocess.Popen(command_line.split(),
+                                         stdout=self.stdout_file,
+                                         stderr=self.stderr_file)
+
+        else:
+            # The simulation should already be running
+            # Need to dig up the (1) gem5 simulation in question (2) its input
+            # and output directories (3) virtio setting
+            self._intercept_existing_gem5()
+
+        # As the gem5 simulation is running now or was already running
+        # we now need to find out which telnet port it uses
+        self._intercept_telnet_port()
+
+    def _intercept_existing_gem5(self):
+        """
+        Intercept the information about a running gem5 simulation
+        e.g. pid, input directory etc
+        """
+        self.logger("This functionality is not yet implemented")
+        raise TargetError()
+
+    def _intercept_telnet_port(self):
+        """
+        Intercept the telnet port of a running gem5 simulation
+        """
+
+        if self.gem5 is None:
+            raise TargetError('The platform has no gem5 simulation! '
+                              'Something went wrong')
+        while self.gem5_port is None:
+            # Check that gem5 is running!
+            if self.gem5.poll():
+                raise TargetError("The gem5 process has crashed with error code {}!".format(self.gem5.poll()))
+
+            # Open the stderr file
+            with open(self.stderr_filename, 'r') as f:
+                for line in f:
+                    m = re.search(r"Listening for system connection on port (?P<port>\d+)", line)
+                    if m:
+                        port = int(m.group('port'))
+                        if port >= 3456 and port < 5900:
+                            self.gem5_port = port
+                            break
+                    # Check if the sockets are not disabled
+                    m = re.search(r"Sockets disabled, not accepting terminal connections", line)
+                    if m:
+                        raise TargetError("The sockets have been disabled!"
+                                          "Pass --listener-mode=on to gem5")
+                else:
+                    time.sleep(1)
+
+    def init_target_connection(self, target):
+        """
+        Update the type of connection in the target from here
+        """
+        if target.os == 'linux':
+            target.conn_cls = LinuxGem5Connection
+        else:
+            target.conn_cls = AndroidGem5Connection
+
+    def setup(self, target):
+        """
+        Deploy m5 if not yet installed
+        """
+        m5_path = target.get_installed('m5')
+        if m5_path is None:
+            m5_path = self._deploy_m5(target)
+        target.conn.m5_path = m5_path
+
+        # Set the terminal settings for the connection to gem5
+        self._resize_shell(target)
+
+    def update_from_target(self, target):
+        """
+        Set the m5 path and if not yet installed, deploy m5
+        Overwrite certain methods in the target that either can be done
+        more efficiently by gem5 or don't exist in gem5
+        """
+        m5_path = target.get_installed('m5')
+        if m5_path is None:
+            m5_path = self._deploy_m5(target)
+        target.conn.m5_path = m5_path
+
+        # Overwrite the following methods (monkey-patching)
+        self.logger.debug("Overwriting the 'capture_screen' method in target")
+        # Housekeeping to prevent recursion
+        setattr(target, 'target_impl_capture_screen', target.capture_screen)
+        target.capture_screen = types.MethodType(_overwritten_capture_screen, target)
+        self.logger.debug("Overwriting the 'reset' method in target")
+        target.reset = types.MethodType(_overwritten_reset, target)
+        self.logger.debug("Overwriting the 'reboot' method in target")
+        target.reboot = types.MethodType(_overwritten_reboot, target)
+
+        # Call the general update_from_target implementation
+        super(Gem5SimulationPlatform, self).update_from_target(target)
+
+    def gem5_capture_screen(self, filepath):
+        file_list = os.listdir(self.gem5_out_dir)
+        screen_caps = []
+        for f in file_list:
+            if '.bmp' in f:
+                screen_caps.append(f)
+
+        successful_capture = False
+        if len(screen_caps) == 1:
+            # Bail out if we do not have image, and resort to the slower, built
+            # in method.
+            try:
+                import Image
+                gem5_image = os.path.join(self.gem5_out_dir, screen_caps[0])
+                temp_image = os.path.join(self.gem5_out_dir, "file.png")
+                im = Image.open(gem5_image)
+                im.save(temp_image, "PNG")
+                shutil.copy(temp_image, filepath)
+                os.remove(temp_image)
+                gem5_logger.info("capture_screen: using gem5 screencap")
+                successful_capture = True
+
+            except (shutil.Error, ImportError, IOError):
+                pass
+
+        return successful_capture
+
+    def _deploy_m5(self, target):
+        # m5 is not yet installed so install it
+        host_executable = os.path.join(PACKAGE_BIN_DIRECTORY,
+                                       target.abi, 'm5')
+        return target.install(host_executable)
+
+    def _resize_shell(self, target):
+        """
+        Resize the shell to avoid line wrapping issues.
+
+        """
+        # Try and avoid line wrapping as much as possible.
+        target.execute('{} stty columns 1024'.format(target.busybox))
+        target.execute('reset', check_exit_code=False)
+
+# Methods that will be monkey-patched onto the target
+def _overwritten_reset(self):
+    raise TargetError('Resetting is not allowed on gem5 platforms!')
+
+def _overwritten_reboot(self):
+    raise TargetError('Rebooting is not allowed on gem5 platforms!')
+
+def _overwritten_capture_screen(self, filepath):
+    connection_screencapped = self.platform.gem5_capture_screen(filepath)
+    if connection_screencapped == False:
+        # The connection was not able to capture the screen so use the target
+        # implementation
+        self.logger.debug('{} was not able to screen cap, using the original target implementation'.format(self.platform.__class__.__name__))
+        self.target_impl_capture_screen(filepath)
+
+
diff --git a/devlib/target.py b/devlib/target.py
new file mode 100644
index 0000000..5bddad4
--- /dev/null
+++ b/devlib/target.py
@@ -0,0 +1,1364 @@
+import os
+import re
+import time
+import logging
+import posixpath
+import subprocess
+import tempfile
+import threading
+from collections import namedtuple
+
+from devlib.host import LocalConnection, PACKAGE_BIN_DIRECTORY
+from devlib.module import get_module
+from devlib.platform import Platform
+from devlib.exception import TargetError, TargetNotRespondingError, TimeoutError
+from devlib.utils.ssh import SshConnection
+from devlib.utils.android import AdbConnection, AndroidProperties, adb_command, adb_disconnect
+from devlib.utils.misc import memoized, isiterable, convert_new_lines, merge_lists
+from devlib.utils.misc import ABI_MAP, get_cpu_name, ranges_to_list, escape_double_quotes
+from devlib.utils.types import integer, boolean, bitmask, identifier, caseless_string
+
+
+FSTAB_ENTRY_REGEX = re.compile(r'(\S+) on (.+) type (\S+) \((\S+)\)')
+ANDROID_SCREEN_STATE_REGEX = re.compile('(?:mPowerState|mScreenOn|Display Power: state)=([0-9]+|true|false|ON|OFF)',
+                                        re.IGNORECASE)
+ANDROID_SCREEN_RESOLUTION_REGEX = re.compile(r'mUnrestrictedScreen=\(\d+,\d+\)'
+                                             r'\s+(?P<width>\d+)x(?P<height>\d+)')
+DEFAULT_SHELL_PROMPT = re.compile(r'^.*(shell|root)@.*:/\S* [#$] ',
+                                  re.MULTILINE)
+KVERSION_REGEX =re.compile(
+    r'(?P<version>\d+)(\.(?P<major>\d+)(\.(?P<minor>\d+)(-rc(?P<rc>\d+))?)?)?(.*-g(?P<sha1>[0-9a-fA-F]{7,}))?'
+)
+
+
+class Target(object):
+
+    path = None
+    os = None
+
+    default_modules = [
+        'hotplug',
+        'cpufreq',
+        'cpuidle',
+        'cgroups',
+        'hwmon',
+    ]
+
+    @property
+    def core_names(self):
+        return self.platform.core_names
+
+    @property
+    def core_clusters(self):
+        return self.platform.core_clusters
+
+    @property
+    def big_core(self):
+        return self.platform.big_core
+
+    @property
+    def little_core(self):
+        return self.platform.little_core
+
+    @property
+    def is_connected(self):
+        return self.conn is not None
+
+    @property
+    def connected_as_root(self):
+        if self._connected_as_root is None:
+            result = self.execute('id')
+            self._connected_as_root = 'uid=0(' in result
+        return self._connected_as_root
+
+    @property
+    @memoized
+    def is_rooted(self):
+        if self.connected_as_root:
+            return True
+        try:
+            self.execute('ls /', timeout=2, as_root=True)
+            return True
+        except (TargetError, TimeoutError):
+            return False
+
+    @property
+    @memoized
+    def needs_su(self):
+        return not self.connected_as_root and self.is_rooted
+
+    @property
+    @memoized
+    def kernel_version(self):
+        return KernelVersion(self.execute('{} uname -r -v'.format(self.busybox)).strip())
+
+    @property
+    def os_version(self):  # pylint: disable=no-self-use
+        return {}
+
+    @property
+    def abi(self):  # pylint: disable=no-self-use
+        return None
+
+    @property
+    @memoized
+    def cpuinfo(self):
+        return Cpuinfo(self.execute('cat /proc/cpuinfo'))
+
+    @property
+    @memoized
+    def number_of_cpus(self):
+        num_cpus = 0
+        corere = re.compile(r'^\s*cpu\d+\s*$')
+        output = self.execute('ls /sys/devices/system/cpu')
+        for entry in output.split():
+            if corere.match(entry):
+                num_cpus += 1
+        return num_cpus
+
+    @property
+    @memoized
+    def config(self):
+        try:
+            return KernelConfig(self.execute('zcat /proc/config.gz'))
+        except TargetError:
+            for path in ['/boot/config', '/boot/config-$(uname -r)']:
+                try:
+                    return KernelConfig(self.execute('cat {}'.format(path)))
+                except TargetError:
+                    pass
+        return KernelConfig('')
+
+    @property
+    @memoized
+    def user(self):
+        return self.getenv('USER')
+
+    @property
+    def conn(self):
+        if self._connections:
+            tid = id(threading.current_thread())
+            if tid not in self._connections:
+                self._connections[tid] = self.get_connection()
+            return self._connections[tid]
+        else:
+            return None
+
+    def __init__(self,
+                 connection_settings=None,
+                 platform=None,
+                 working_directory=None,
+                 executables_directory=None,
+                 connect=True,
+                 modules=None,
+                 load_default_modules=True,
+                 shell_prompt=DEFAULT_SHELL_PROMPT,
+                 conn_cls=None,
+                 ):
+        self._connected_as_root = None
+        self.connection_settings = connection_settings or {}
+        # Set self.platform: either it's given directly (by platform argument)
+        # or it's given in the connection_settings argument
+        # If neither, create default Platform()
+        if platform is None:
+            self.platform = self.connection_settings.get('platform', Platform())
+        else:
+            self.platform = platform
+        # Check if the user hasn't given two different platforms
+        if 'platform' in self.connection_settings:
+            if connection_settings['platform'] is not platform:
+                raise TargetError('Platform specified in connection_settings '
+                                   '({}) differs from that directly passed '
+                                   '({})!)'
+                                   .format(connection_settings['platform'],
+                                    self.platform))
+        self.connection_settings['platform'] = self.platform
+        self.working_directory = working_directory
+        self.executables_directory = executables_directory
+        self.modules = modules or []
+        self.load_default_modules = load_default_modules
+        self.shell_prompt = shell_prompt
+        self.conn_cls = conn_cls
+        self.logger = logging.getLogger(self.__class__.__name__)
+        self._installed_binaries = {}
+        self._installed_modules = {}
+        self._cache = {}
+        self._connections = {}
+        self.busybox = None
+
+        if load_default_modules:
+            module_lists = [self.default_modules]
+        else:
+            module_lists = []
+        module_lists += [self.modules, self.platform.modules]
+        self.modules = merge_lists(*module_lists, duplicates='first')
+        self._update_modules('early')
+        if connect:
+            self.connect()
+
+    # connection and initialization
+
+    def connect(self, timeout=None):
+        self.platform.init_target_connection(self)
+        tid = id(threading.current_thread())
+        self._connections[tid] = self.get_connection(timeout=timeout)
+        self._resolve_paths()
+        self.busybox = self.get_installed('busybox')
+        self.platform.update_from_target(self)
+        self._update_modules('connected')
+        if self.platform.big_core and self.load_default_modules:
+            self._install_module(get_module('bl'))
+
+    def disconnect(self):
+        for conn in self._connections.itervalues():
+            conn.close()
+        self._connections = {}
+
+    def get_connection(self, timeout=None):
+        if self.conn_cls == None:
+            raise ValueError('Connection class not specified on Target creation.')
+        return self.conn_cls(timeout=timeout, **self.connection_settings)  # pylint: disable=not-callable
+
+    def setup(self, executables=None):
+        self.execute('mkdir -p {}'.format(self.working_directory))
+        self.execute('mkdir -p {}'.format(self.executables_directory))
+        self.busybox = self.install(os.path.join(PACKAGE_BIN_DIRECTORY, self.abi, 'busybox'))
+
+        # Setup shutils script for the target
+        shutils_ifile = os.path.join(PACKAGE_BIN_DIRECTORY, 'scripts', 'shutils.in')
+        shutils_ofile = os.path.join(PACKAGE_BIN_DIRECTORY, 'scripts', 'shutils')
+        shell_path = '/bin/sh'
+        if self.os == 'android':
+            shell_path = '/system/bin/sh'
+        with open(shutils_ifile) as fh:
+            lines = fh.readlines()
+        with open(shutils_ofile, 'w') as ofile:
+            for line in lines:
+                line = line.replace("__DEVLIB_SHELL__", shell_path)
+                line = line.replace("__DEVLIB_BUSYBOX__", self.busybox)
+                ofile.write(line)
+        self.shutils = self.install(os.path.join(PACKAGE_BIN_DIRECTORY, 'scripts', 'shutils'))
+
+        for host_exe in (executables or []):  # pylint: disable=superfluous-parens
+            self.install(host_exe)
+
+        # Check for platform dependent setup procedures
+        self.platform.setup(self)
+
+        # Initialize modules which requires Buxybox (e.g. shutil dependent tasks)
+        self._update_modules('setup')
+
+    def reboot(self, hard=False, connect=True, timeout=180):
+        if hard:
+            if not self.has('hard_reset'):
+                raise TargetError('Hard reset not supported for this target.')
+            self.hard_reset()  # pylint: disable=no-member
+        else:
+            if not self.is_connected:
+                message = 'Cannot reboot target becuase it is disconnected. ' +\
+                          'Either connect() first, or specify hard=True ' +\
+                          '(in which case, a hard_reset module must be installed)'
+                raise TargetError(message)
+            self.reset()
+            # Wait a fixed delay before starting polling to give the target time to
+            # shut down, otherwise, might create the connection while it's still shutting
+            # down resulting in subsequenct connection failing.
+            self.logger.debug('Waiting for target to power down...')
+            reset_delay = 20
+            time.sleep(reset_delay)
+            timeout = max(timeout - reset_delay, 10)
+        if self.has('boot'):
+            self.boot()  # pylint: disable=no-member
+        self._connected_as_root = None
+        if connect:
+            self.connect(timeout=timeout)
+
+    # file transfer
+
+    def push(self, source, dest, timeout=None):
+        return self.conn.push(source, dest, timeout=timeout)
+
+    def pull(self, source, dest, timeout=None):
+        return self.conn.pull(source, dest, timeout=timeout)
+
+    # execution
+
+    def execute(self, command, timeout=None, check_exit_code=True, as_root=False):
+        return self.conn.execute(command, timeout, check_exit_code, as_root)
+
+    def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
+        return self.conn.background(command, stdout, stderr, as_root)
+
+    def invoke(self, binary, args=None, in_directory=None, on_cpus=None,
+               as_root=False, timeout=30):
+        """
+        Executes the specified binary under the specified conditions.
+
+        :binary: binary to execute. Must be present and executable on the device.
+        :args: arguments to be passed to the binary. The can be either a list or
+               a string.
+        :in_directory:  execute the binary in the  specified directory. This must
+                        be an absolute path.
+        :on_cpus:  taskset the binary to these CPUs. This may be a single ``int`` (in which
+                   case, it will be interpreted as the mask), a list of ``ints``, in which
+                   case this will be interpreted as the list of cpus, or string, which
+                   will be interpreted as a comma-separated list of cpu ranges, e.g.
+                   ``"0,4-7"``.
+        :as_root: Specify whether the command should be run as root
+        :timeout: If the invocation does not terminate within this number of seconds,
+                  a ``TimeoutError`` exception will be raised. Set to ``None`` if the
+                  invocation should not timeout.
+
+        :returns: output of command.
+        """
+        command = binary
+        if args:
+            if isiterable(args):
+                args = ' '.join(args)
+            command = '{} {}'.format(command, args)
+        if on_cpus:
+            on_cpus = bitmask(on_cpus)
+            command = '{} taskset 0x{:x} {}'.format(self.busybox, on_cpus, command)
+        if in_directory:
+            command = 'cd {} && {}'.format(in_directory, command)
+        return self.execute(command, as_root=as_root, timeout=timeout)
+
+    def kick_off(self, command, as_root=False):
+        raise NotImplementedError()
+
+    # sysfs interaction
+
+    def read_value(self, path, kind=None):
+        output = self.execute('cat \'{}\''.format(path), as_root=self.needs_su).strip()  # pylint: disable=E1103
+        if kind:
+            return kind(output)
+        else:
+            return output
+
+    def read_int(self, path):
+        return self.read_value(path, kind=integer)
+
+    def read_bool(self, path):
+        return self.read_value(path, kind=boolean)
+
+    def write_value(self, path, value, verify=True):
+        value = str(value)
+        self.execute('echo {} > \'{}\''.format(value, path), check_exit_code=False, as_root=True)
+        if verify:
+            output = self.read_value(path)
+            if not output == value:
+                message = 'Could not set the value of {} to "{}" (read "{}")'.format(path, value, output)
+                raise TargetError(message)
+
+    def reset(self):
+        try:
+            self.execute('reboot', as_root=self.needs_su, timeout=2)
+        except (TargetError, TimeoutError, subprocess.CalledProcessError):
+            # on some targets "reboot" doesn't return gracefully
+            pass
+        self._connected_as_root = None
+
+    def check_responsive(self):
+        try:
+            self.conn.execute('ls /', timeout=5)
+        except (TimeoutError, subprocess.CalledProcessError):
+            raise TargetNotRespondingError(self.conn.name)
+
+    # process management
+
+    def kill(self, pid, signal=None, as_root=False):
+        signal_string = '-s {}'.format(signal) if signal else ''
+        self.execute('kill {} {}'.format(signal_string, pid), as_root=as_root)
+
+    def killall(self, process_name, signal=None, as_root=False):
+        for pid in self.get_pids_of(process_name):
+            try:
+                self.kill(pid, signal=signal, as_root=as_root)
+            except TargetError:
+                pass
+
+    def get_pids_of(self, process_name):
+        raise NotImplementedError()
+
+    def ps(self, **kwargs):
+        raise NotImplementedError()
+
+    # files
+
+    def file_exists(self, filepath):
+        command = 'if [ -e \'{}\' ]; then echo 1; else echo 0; fi'
+        output = self.execute(command.format(filepath), as_root=self.is_rooted)
+        return boolean(output.strip())
+
+    def directory_exists(self, filepath):
+        output = self.execute('if [ -d \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath))
+        # output from ssh my contain part of the expression in the buffer,
+        # split out everything except the last word.
+        return boolean(output.split()[-1])  # pylint: disable=maybe-no-member
+
+    def list_file_systems(self):
+        output = self.execute('mount')
+        fstab = []
+        for line in output.split('\n'):
+            line = line.strip()
+            if not line:
+                continue
+            match = FSTAB_ENTRY_REGEX.search(line)
+            if match:
+                fstab.append(FstabEntry(match.group(1), match.group(2),
+                                        match.group(3), match.group(4),
+                                        None, None))
+            else:  # assume pre-M Android
+                fstab.append(FstabEntry(*line.split()))
+        return fstab
+
+    def list_directory(self, path, as_root=False):
+        raise NotImplementedError()
+
+    def get_workpath(self, name):
+        return self.path.join(self.working_directory, name)
+
+    def tempfile(self, prefix='', suffix=''):
+        names = tempfile._get_candidate_names()  # pylint: disable=W0212
+        for _ in xrange(tempfile.TMP_MAX):
+            name = names.next()
+            path = self.get_workpath(prefix + name + suffix)
+            if not self.file_exists(path):
+                return path
+        raise IOError('No usable temporary filename found')
+
+    def remove(self, path, as_root=False):
+        self.execute('rm -rf {}'.format(path), as_root=as_root)
+
+    # misc
+    def core_cpus(self, core):
+        return [i for i, c in enumerate(self.core_names) if c == core]
+
+    def list_online_cpus(self, core=None):
+        path = self.path.join('/sys/devices/system/cpu/online')
+        output = self.read_value(path)
+        all_online = ranges_to_list(output)
+        if core:
+            cpus = self.core_cpus(core)
+            if not cpus:
+                raise ValueError(core)
+            return [o for o in all_online if o in cpus]
+        else:
+            return all_online
+
+    def list_offline_cpus(self):
+        online = self.list_online_cpus()
+        return [c for c in xrange(self.number_of_cpus)
+                if c not in online]
+
+    def getenv(self, variable):
+        return self.execute('echo ${}'.format(variable)).rstrip('\r\n')
+
+    def capture_screen(self, filepath):
+        raise NotImplementedError()
+
+    def install(self, filepath, timeout=None, with_name=None):
+        raise NotImplementedError()
+
+    def uninstall(self, name):
+        raise NotImplementedError()
+
+    def get_installed(self, name, search_system_binaries=True):
+        # Check user installed binaries first
+        if self.file_exists(self.executables_directory):
+            if name in self.list_directory(self.executables_directory):
+                return self.path.join(self.executables_directory, name)
+        # Fall back to binaries in PATH
+        if search_system_binaries:
+            for path in self.getenv('PATH').split(self.path.pathsep):
+                try:
+                    if name in self.list_directory(path):
+                        return self.path.join(path, name)
+                except TargetError:
+                    pass  # directory does not exist or no executable premssions
+
+    which = get_installed
+
+    def install_if_needed(self, host_path, search_system_binaries=True):
+
+        binary_path = self.get_installed(os.path.split(host_path)[1],
+                                         search_system_binaries=search_system_binaries)
+        if not binary_path:
+            binary_path = self.install(host_path)
+        return binary_path
+
+    def is_installed(self, name):
+        return bool(self.get_installed(name))
+
+    def bin(self, name):
+        return self._installed_binaries.get(name, name)
+
+    def has(self, modname):
+        return hasattr(self, identifier(modname))
+
+    def lsmod(self):
+        lines = self.execute('lsmod').splitlines()
+        entries = []
+        for line in lines[1:]:  # first line is the header
+            if not line.strip():
+                continue
+            parts = line.split()
+            name = parts[0]
+            size = int(parts[1])
+            use_count = int(parts[2])
+            if len(parts) > 3:
+                used_by = ''.join(parts[3:]).split(',')
+            else:
+                used_by = []
+            entries.append(LsmodEntry(name, size, use_count, used_by))
+        return entries
+
+    def insmod(self, path):
+        target_path = self.get_workpath(os.path.basename(path))
+        self.push(path, target_path)
+        self.execute('insmod {}'.format(target_path), as_root=True)
+
+
+    def extract(self, path, dest=None):
+        """
+        Extact the specified on-target file. The extraction method to be used
+        (unzip, gunzip, bunzip2, or tar) will be based on the file's extension.
+        If ``dest`` is specified, it must be an existing directory on target;
+        the extracted contents will be placed there.
+
+        Note that, depending on the archive file format (and therfore the
+        extraction method used), the original archive file may or may not exist
+        after the extraction.
+
+        The return value is the path to the extracted contents.  In case of
+        gunzip and bunzip2, this will be path to the extracted file; for tar
+        and uzip, this will be the directory with the extracted file(s)
+        (``dest`` if it was specified otherwise, the directory that cotained
+        the archive).
+
+        """
+        for ending in ['.tar.gz', '.tar.bz', '.tar.bz2',
+                       '.tgz', '.tbz', '.tbz2']:
+            if path.endswith(ending):
+                return self._extract_archive(path, 'tar xf {} -C {}', dest)
+
+        ext = self.path.splitext(path)[1]
+        if ext in ['.bz', '.bz2']:
+            return self._extract_file(path, 'bunzip2 -f {}', dest)
+        elif ext == '.gz':
+            return self._extract_file(path, 'gunzip -f {}', dest)
+        elif ext == '.zip':
+            return self._extract_archive(path, 'unzip {} -d {}', dest)
+        else:
+            raise ValueError('Unknown compression format: {}'.format(ext))
+
+    def sleep(self, duration):
+        timeout = duration + 10
+        self.execute('sleep {}'.format(duration), timeout=timeout)
+
+    # internal methods
+
+    def _execute_util(self, command, timeout=None, check_exit_code=True, as_root=False):
+        command = '{} {}'.format(self.shutils, command)
+        return self.conn.execute(command, timeout, check_exit_code, as_root)
+
+    def _extract_archive(self, path, cmd, dest=None):
+        cmd = '{} ' + cmd  # busybox
+        if dest:
+            extracted = dest
+        else:
+            extracted = self.path.dirname(path)
+        cmdtext = cmd.format(self.busybox, path, extracted)
+        self.execute(cmdtext)
+        return extracted
+
+    def _extract_file(self, path, cmd, dest=None):
+        cmd = '{} ' + cmd  # busybox
+        cmdtext = cmd.format(self.busybox, path)
+        self.execute(cmdtext)
+        extracted = self.path.splitext(path)[0]
+        if dest:
+            self.execute('mv -f {} {}'.format(extracted, dest))
+            if dest.endswith('/'):
+                extracted = self.path.join(dest, self.path.basename(extracted))
+            else:
+                extracted = dest
+        return extracted
+
+    def _update_modules(self, stage):
+        for mod in self.modules:
+            if isinstance(mod, dict):
+                mod, params = mod.items()[0]
+            else:
+                params = {}
+            mod = get_module(mod)
+            if not mod.stage == stage:
+                continue
+            if mod.probe(self):
+                self._install_module(mod, **params)
+            else:
+                msg = 'Module {} is not supported by the target'.format(mod.name)
+                if self.load_default_modules:
+                    self.logger.debug(msg)
+                else:
+                    self.logger.warning(msg)
+
+    def _install_module(self, mod, **params):
+        if mod.name not in self._installed_modules:
+            self.logger.debug('Installing module {}'.format(mod.name))
+            mod.install(self, **params)
+            self._installed_modules[mod.name] = mod
+        else:
+            self.logger.debug('Module {} is already installed.'.format(mod.name))
+
+    def _resolve_paths(self):
+        raise NotImplementedError()
+
+
+class LinuxTarget(Target):
+
+    path = posixpath
+    os = 'linux'
+
+    @property
+    @memoized
+    def abi(self):
+        value = self.execute('uname -m').strip()
+        for abi, architectures in ABI_MAP.iteritems():
+            if value in architectures:
+                result = abi
+                break
+        else:
+            result = value
+        return result
+
+    @property
+    @memoized
+    def os_version(self):
+        os_version = {}
+        try:
+            command = 'ls /etc/*-release /etc*-version /etc/*_release /etc/*_version 2>/dev/null'
+            version_files = self.execute(command, check_exit_code=False).strip().split()
+            for vf in version_files:
+                name = self.path.basename(vf)
+                output = self.read_value(vf)
+                os_version[name] = output.strip().replace('\n', ' ')
+        except TargetError:
+            raise
+        return os_version
+
+    @property
+    @memoized
+    # There is currently no better way to do this cross platform.
+    # ARM does not have dmidecode
+    def model(self):
+        if self.file_exists("/proc/device-tree/model"):
+            raw_model = self.execute("cat /proc/device-tree/model")
+            return '_'.join(raw_model.split()[:2])
+        return None
+
+    def __init__(self,
+                 connection_settings=None,
+                 platform=None,
+                 working_directory=None,
+                 executables_directory=None,
+                 connect=True,
+                 modules=None,
+                 load_default_modules=True,
+                 shell_prompt=DEFAULT_SHELL_PROMPT,
+                 conn_cls=SshConnection,
+                 ):
+        super(LinuxTarget, self).__init__(connection_settings=connection_settings,
+                                          platform=platform,
+                                          working_directory=working_directory,
+                                          executables_directory=executables_directory,
+                                          connect=connect,
+                                          modules=modules,
+                                          load_default_modules=load_default_modules,
+                                          shell_prompt=shell_prompt,
+                                          conn_cls=conn_cls)
+
+    def connect(self, timeout=None):
+        super(LinuxTarget, self).connect(timeout=timeout)
+
+    def kick_off(self, command, as_root=False):
+        command = 'sh -c "{}" 1>/dev/null 2>/dev/null &'.format(escape_double_quotes(command))
+        return self.conn.execute(command, as_root=as_root)
+
+    def get_pids_of(self, process_name):
+        """Returns a list of PIDs of all processes with the specified name."""
+        # result should be a column of PIDs with the first row as "PID" header
+        result = self.execute('ps -C {} -o pid'.format(process_name),  # NOQA
+                              check_exit_code=False).strip().split()
+        if len(result) >= 2:  # at least one row besides the header
+            return map(int, result[1:])
+        else:
+            return []
+
+    def ps(self, **kwargs):
+        command = 'ps -eo user,pid,ppid,vsize,rss,wchan,pcpu,state,fname'
+        lines = iter(convert_new_lines(self.execute(command)).split('\n'))
+        lines.next()  # header
+
+        result = []
+        for line in lines:
+            parts = re.split(r'\s+', line, maxsplit=8)
+            if parts and parts != ['']:
+                result.append(PsEntry(*(parts[0:1] + map(int, parts[1:5]) + parts[5:])))
+
+        if not kwargs:
+            return result
+        else:
+            filtered_result = []
+            for entry in result:
+                if all(getattr(entry, k) == v for k, v in kwargs.iteritems()):
+                    filtered_result.append(entry)
+            return filtered_result
+
+    def list_directory(self, path, as_root=False):
+        contents = self.execute('ls -1 {}'.format(path), as_root=as_root)
+        return [x.strip() for x in contents.split('\n') if x.strip()]
+
+    def install(self, filepath, timeout=None, with_name=None):  # pylint: disable=W0221
+        destpath = self.path.join(self.executables_directory,
+                                  with_name and with_name or self.path.basename(filepath))
+        self.push(filepath, destpath)
+        self.execute('chmod a+x {}'.format(destpath), timeout=timeout)
+        self._installed_binaries[self.path.basename(destpath)] = destpath
+        return destpath
+
+    def uninstall(self, name):
+        path = self.path.join(self.executables_directory, name)
+        self.remove(path)
+
+    def capture_screen(self, filepath):
+        if not self.is_installed('scrot'):
+            self.logger.debug('Could not take screenshot as scrot is not installed.')
+            return
+        try:
+
+            tmpfile = self.tempfile()
+            self.execute('DISPLAY=:0.0 scrot {}'.format(tmpfile))
+            self.pull(tmpfile, filepath)
+            self.remove(tmpfile)
+        except TargetError as e:
+            if "Can't open X dispay." not in e.message:
+                raise e
+            message = e.message.split('OUTPUT:', 1)[1].strip()  # pylint: disable=no-member
+            self.logger.debug('Could not take screenshot: {}'.format(message))
+
+    def _resolve_paths(self):
+        if self.working_directory is None:
+            if self.connected_as_root:
+                self.working_directory = '/root/devlib-target'
+            else:
+                self.working_directory = '/home/{}/devlib-target'.format(self.user)
+        if self.executables_directory is None:
+            self.executables_directory = self.path.join(self.working_directory, 'bin')
+
+
+class AndroidTarget(Target):
+
+    path = posixpath
+    os = 'android'
+    ls_command = ''
+
+    @property
+    @memoized
+    def abi(self):
+        return self.getprop()['ro.product.cpu.abi'].split('-')[0]
+
+    @property
+    @memoized
+    def os_version(self):
+        os_version = {}
+        for k, v in self.getprop().iteritems():
+            if k.startswith('ro.build.version'):
+                part = k.split('.')[-1]
+                os_version[part] = v
+        return os_version
+
+    @property
+    def adb_name(self):
+        return self.conn.device
+
+    @property
+    @memoized
+    def android_id(self):
+        """
+        Get the device's ANDROID_ID. Which is
+
+            "A 64-bit number (as a hex string) that is randomly generated when the user
+            first sets up the device and should remain constant for the lifetime of the
+            user's device."
+
+        .. note:: This will get reset on userdata erasure.
+
+        """
+        output = self.execute('content query --uri content://settings/secure --projection value --where "name=\'android_id\'"').strip()
+        return output.split('value=')[-1]
+
+    @property
+    @memoized
+    def model(self):
+        try:
+            return self.getprop(prop='ro.product.device')
+        except KeyError:
+            return None
+
+    @property
+    @memoized
+    def screen_resolution(self):
+        output = self.execute('dumpsys window')
+        match = ANDROID_SCREEN_RESOLUTION_REGEX.search(output)
+        if match:
+            return (int(match.group('width')),
+                    int(match.group('height')))
+        else:
+            return (0, 0)
+
+    def __init__(self,
+                 connection_settings=None,
+                 platform=None,
+                 working_directory=None,
+                 executables_directory=None,
+                 connect=True,
+                 modules=None,
+                 load_default_modules=True,
+                 shell_prompt=DEFAULT_SHELL_PROMPT,
+                 conn_cls=AdbConnection,
+                 package_data_directory="/data/data",
+                 ):
+        super(AndroidTarget, self).__init__(connection_settings=connection_settings,
+                                            platform=platform,
+                                            working_directory=working_directory,
+                                            executables_directory=executables_directory,
+                                            connect=connect,
+                                            modules=modules,
+                                            load_default_modules=load_default_modules,
+                                            shell_prompt=shell_prompt,
+                                            conn_cls=conn_cls)
+        self.package_data_directory = package_data_directory
+
+    def reset(self, fastboot=False):  # pylint: disable=arguments-differ
+        try:
+            self.execute('reboot {}'.format(fastboot and 'fastboot' or ''),
+                         as_root=self.needs_su, timeout=2)
+        except (TargetError, TimeoutError, subprocess.CalledProcessError):
+            # on some targets "reboot" doesn't return gracefully
+            pass
+        self._connected_as_root = None
+
+    def connect(self, timeout=10, check_boot_completed=True):  # pylint: disable=arguments-differ
+        start = time.time()
+        device = self.connection_settings.get('device')
+        if device and ':' in device:
+            # ADB does not automatically remove a network device from it's
+            # devices list when the connection is broken by the remote, so the
+            # adb connection may have gone "stale", resulting in adb blocking
+            # indefinitely when making calls to the device. To avoid this,
+            # always disconnect first.
+            adb_disconnect(device)
+        super(AndroidTarget, self).connect(timeout=timeout)
+
+        if check_boot_completed:
+            boot_completed = boolean(self.getprop('sys.boot_completed'))
+            while not boot_completed and timeout >= time.time() - start:
+                time.sleep(5)
+                boot_completed = boolean(self.getprop('sys.boot_completed'))
+            if not boot_completed:
+                raise TargetError('Connected but Android did not fully boot.')
+
+    def setup(self, executables=None):
+        super(AndroidTarget, self).setup(executables)
+        self.execute('mkdir -p {}'.format(self._file_transfer_cache))
+
+    def kick_off(self, command, as_root=None):
+        """
+        Like execute but closes adb session and returns immediately, leaving the command running on the
+        device (this is different from execute(background=True) which keeps adb connection open and returns
+        a subprocess object).
+        """
+        if as_root is None:
+            as_root = self.needs_su
+        try:
+            command = 'cd {} && {} nohup {} &'.format(self.working_directory, self.busybox, command)
+            output = self.execute(command, timeout=1, as_root=as_root)
+        except TimeoutError:
+            pass
+
+    def __setup_list_directory(self):
+        # In at least Linaro Android 16.09 (which was their first Android 7 release) and maybe
+        # AOSP 7.0 as well, the ls command was changed.
+        # Previous versions default to a single column listing, which is nice and easy to parse.
+        # Newer versions default to a multi-column listing, which is not, but it does support
+        # a '-1' option to get into single column mode. Older versions do not support this option
+        # so we try the new version, and if it fails we use the old version.
+        self.ls_command = 'ls -1'
+        try:
+            self.execute('ls -1 {}'.format(self.working_directory), as_root=False)
+        except TargetError:
+            self.ls_command = 'ls'
+
+    def list_directory(self, path, as_root=False):
+        if self.ls_command == '':
+            self.__setup_list_directory()
+        contents = self.execute('{} {}'.format(self.ls_command, path), as_root=as_root)
+        return [x.strip() for x in contents.split('\n') if x.strip()]
+
+    def install(self, filepath, timeout=None, with_name=None):  # pylint: disable=W0221
+        ext = os.path.splitext(filepath)[1].lower()
+        if ext == '.apk':
+            return self.install_apk(filepath, timeout)
+        else:
+            return self.install_executable(filepath, with_name)
+
+    def uninstall(self, name):
+        if self.package_is_installed(name):
+            self.uninstall_package(name)
+        else:
+            self.uninstall_executable(name)
+
+    def get_pids_of(self, process_name):
+        result = self.execute('ps {}'.format(process_name[-15:]), check_exit_code=False).strip()
+        if result and 'not found' not in result:
+            return [int(x.split()[1]) for x in result.split('\n')[1:]]
+        else:
+            return []
+
+    def ps(self, **kwargs):
+        lines = iter(convert_new_lines(self.execute('ps')).split('\n'))
+        lines.next()  # header
+        result = []
+        for line in lines:
+            parts = line.split(None, 8)
+            if parts:
+                result.append(PsEntry(*(parts[0:1] + map(int, parts[1:5]) + parts[5:])))
+        if not kwargs:
+            return result
+        else:
+            filtered_result = []
+            for entry in result:
+                if all(getattr(entry, k) == v for k, v in kwargs.iteritems()):
+                    filtered_result.append(entry)
+            return filtered_result
+
+    def capture_screen(self, filepath):
+        on_device_file = self.path.join(self.working_directory, 'screen_capture.png')
+        self.execute('screencap -p  {}'.format(on_device_file))
+        self.pull(on_device_file, filepath)
+        self.remove(on_device_file)
+
+    def push(self, source, dest, as_root=False, timeout=None):  # pylint: disable=arguments-differ
+        if not as_root:
+            self.conn.push(source, dest, timeout=timeout)
+        else:
+            device_tempfile = self.path.join(self._file_transfer_cache, source.lstrip(self.path.sep))
+            self.execute("mkdir -p '{}'".format(self.path.dirname(device_tempfile)))
+            self.conn.push(source, device_tempfile, timeout=timeout)
+            self.execute("cp '{}' '{}'".format(device_tempfile, dest), as_root=True)
+
+    def pull(self, source, dest, as_root=False, timeout=None):  # pylint: disable=arguments-differ
+        if not as_root:
+            self.conn.pull(source, dest, timeout=timeout)
+        else:
+            device_tempfile = self.path.join(self._file_transfer_cache, source.lstrip(self.path.sep))
+            self.execute("mkdir -p '{}'".format(self.path.dirname(device_tempfile)))
+            self.execute("cp '{}' '{}'".format(source, device_tempfile), as_root=True)
+            self.execute("chmod 0644 '{}'".format(device_tempfile), as_root=True)
+            self.conn.pull(device_tempfile, dest, timeout=timeout)
+
+    # Android-specific
+
+    def swipe_to_unlock(self, direction="horizontal"):
+        width, height = self.screen_resolution
+        command = 'input swipe {} {} {} {}'
+        if direction == "horizontal":
+            swipe_heigh = height * 2 // 3
+            start = 100
+            stop = width - start
+            self.execute(command.format(start, swipe_heigh, stop, swipe_heigh))
+        if direction == "vertical":
+            swipe_middle = height / 2
+            swipe_heigh = height * 2 // 3
+            self.execute(command.format(swipe_middle, swipe_heigh, swipe_middle, 0))
+        else:
+            raise DeviceError("Invalid swipe direction: {}".format(self.swipe_to_unlock))
+
+    def getprop(self, prop=None):
+        props = AndroidProperties(self.execute('getprop'))
+        if prop:
+            return props[prop]
+        return props
+
+    def is_installed(self, name):
+        return super(AndroidTarget, self).is_installed(name) or self.package_is_installed(name)
+
+    def package_is_installed(self, package_name):
+        return package_name in self.list_packages()
+
+    def list_packages(self):
+        output = self.execute('pm list packages')
+        output = output.replace('package:', '')
+        return output.split()
+
+    def get_package_version(self, package):
+        output = self.execute('dumpsys package {}'.format(package))
+        for line in convert_new_lines(output).split('\n'):
+            if 'versionName' in line:
+                return line.split('=', 1)[1]
+        return None
+
+    def install_apk(self, filepath, timeout=None):  # pylint: disable=W0221
+        ext = os.path.splitext(filepath)[1].lower()
+        if ext == '.apk':
+            return adb_command(self.adb_name, "install '{}'".format(filepath), timeout=timeout)
+        else:
+            raise TargetError('Can\'t install {}: unsupported format.'.format(filepath))
+
+    def install_executable(self, filepath, with_name=None):
+        self._ensure_executables_directory_is_writable()
+        executable_name = with_name or os.path.basename(filepath)
+        on_device_file = self.path.join(self.working_directory, executable_name)
+        on_device_executable = self.path.join(self.executables_directory, executable_name)
+        self.push(filepath, on_device_file)
+        if on_device_file != on_device_executable:
+            self.execute('cp {} {}'.format(on_device_file, on_device_executable), as_root=self.needs_su)
+            self.remove(on_device_file, as_root=self.needs_su)
+        self.execute("chmod 0777 '{}'".format(on_device_executable), as_root=self.needs_su)
+        self._installed_binaries[executable_name] = on_device_executable
+        return on_device_executable
+
+    def uninstall_package(self, package):
+        adb_command(self.adb_name, "uninstall {}".format(package), timeout=30)
+
+    def uninstall_executable(self, executable_name):
+        on_device_executable = self.path.join(self.executables_directory, executable_name)
+        self._ensure_executables_directory_is_writable()
+        self.remove(on_device_executable, as_root=self.needs_su)
+
+    def dump_logcat(self, filepath, filter=None, append=False, timeout=30):  # pylint: disable=redefined-builtin
+        op = '>>' if append else '>'
+        filtstr = ' -s {}'.format(filter) if filter else ''
+        command = 'logcat -d{} {} {}'.format(filtstr, op, filepath)
+        adb_command(self.adb_name, command, timeout=timeout)
+
+    def clear_logcat(self):
+        adb_command(self.adb_name, 'logcat -c', timeout=30)
+
+    def adb_reboot_bootloader(self, timeout=30):
+        adb_command(self.adb_name, 'reboot-bootloader', timeout)
+
+    def adb_root(self, enable=True, force=False):
+        if enable:
+            if self._connected_as_root and not force:
+                return
+            adb_command(self.adb_name, 'root', timeout=30)
+            self._connected_as_root = True
+            return
+        adb_command(self.adb_name, 'unroot', timeout=30)
+        self._connected_as_root = False
+
+    def is_screen_on(self):
+        output = self.execute('dumpsys power')
+        match = ANDROID_SCREEN_STATE_REGEX.search(output)
+        if match:
+            return boolean(match.group(1))
+        else:
+            raise TargetError('Could not establish screen state.')
+
+    def ensure_screen_is_on(self):
+        if not self.is_screen_on():
+            self.execute('input keyevent 26')
+
+    def ensure_screen_is_off(self):
+        if self.is_screen_on():
+            self.execute('input keyevent 26')
+
+    def homescreen(self):
+        self.execute('am start -a android.intent.action.MAIN -c android.intent.category.HOME')
+
+    def _resolve_paths(self):
+        if self.working_directory is None:
+            self.working_directory = '/data/local/tmp/devlib-target'
+        self._file_transfer_cache = self.path.join(self.working_directory, '.file-cache')
+        if self.executables_directory is None:
+            self.executables_directory = '/data/local/tmp/bin'
+
+    def _ensure_executables_directory_is_writable(self):
+        matched = []
+        for entry in self.list_file_systems():
+            if self.executables_directory.rstrip('/').startswith(entry.mount_point):
+                matched.append(entry)
+        if matched:
+            entry = sorted(matched, key=lambda x: len(x.mount_point))[-1]
+            if 'rw' not in entry.options:
+                self.execute('mount -o rw,remount {} {}'.format(entry.device,
+                                                                entry.mount_point),
+                             as_root=True)
+        else:
+            message = 'Could not find mount point for executables directory {}'
+            raise TargetError(message.format(self.executables_directory))
+
+    _charging_enabled_path = '/sys/class/power_supply/battery/charging_enabled'
+
+    @property
+    def charging_enabled(self):
+        """
+        Whether drawing power to charge the battery is enabled
+
+        Not all devices have the ability to enable/disable battery charging
+        (e.g. because they don't have a battery). In that case,
+        ``charging_enabled`` is None.
+        """
+        if not self.file_exists(self._charging_enabled_path):
+            return None
+        return self.read_bool(self._charging_enabled_path)
+
+    @charging_enabled.setter
+    def charging_enabled(self, enabled):
+        """
+        Enable/disable drawing power to charge the battery
+
+        Not all devices have this facility. In that case, do nothing.
+        """
+        if not self.file_exists(self._charging_enabled_path):
+            return
+        self.write_value(self._charging_enabled_path, int(bool(enabled)))
+
+FstabEntry = namedtuple('FstabEntry', ['device', 'mount_point', 'fs_type', 'options', 'dump_freq', 'pass_num'])
+PsEntry = namedtuple('PsEntry', 'user pid ppid vsize rss wchan pc state name')
+LsmodEntry = namedtuple('LsmodEntry', ['name', 'size', 'use_count', 'used_by'])
+
+
+class Cpuinfo(object):
+
+    @property
+    @memoized
+    def architecture(self):
+        for section in self.sections:
+            if 'CPU architecture' in section:
+                return section['CPU architecture']
+            if 'architecture' in section:
+                return section['architecture']
+
+    @property
+    @memoized
+    def cpu_names(self):
+        cpu_names = []
+        global_name = None
+        for section in self.sections:
+            if 'processor' in section:
+                if 'CPU part' in section:
+                    cpu_names.append(_get_part_name(section))
+                elif 'model name' in section:
+                    cpu_names.append(_get_model_name(section))
+                else:
+                    cpu_names.append(None)
+            elif 'CPU part' in section:
+                global_name = _get_part_name(section)
+        return [caseless_string(c or global_name) for c in cpu_names]
+
+    def __init__(self, text):
+        self.sections = None
+        self.text = None
+        self.parse(text)
+
+    @memoized
+    def get_cpu_features(self, cpuid=0):
+        global_features = []
+        for section in self.sections:
+            if 'processor' in section:
+                if int(section.get('processor')) != cpuid:
+                    continue
+                if 'Features' in section:
+                    return section.get('Features').split()
+                elif 'flags' in section:
+                    return section.get('flags').split()
+            elif 'Features' in section:
+                global_features = section.get('Features').split()
+            elif 'flags' in section:
+                global_features = section.get('flags').split()
+        return global_features
+
+    def parse(self, text):
+        self.sections = []
+        current_section = {}
+        self.text = text.strip()
+        for line in self.text.split('\n'):
+            line = line.strip()
+            if line:
+                key, value = line.split(':', 1)
+                current_section[key.strip()] = value.strip()
+            else:  # not line
+                self.sections.append(current_section)
+                current_section = {}
+        self.sections.append(current_section)
+
+    def __str__(self):
+        return 'CpuInfo({})'.format(self.cpu_names)
+
+    __repr__ = __str__
+
+
+class KernelVersion(object):
+    """
+    Class representing the version of a target kernel
+
+    Not expected to work for very old (pre-3.0) kernel version numbers.
+
+    :ivar release: Version number/revision string. Typical output of
+                   ``uname -r``
+    :type release: str
+    :ivar version: Extra version info (aside from ``release``) reported by
+                   ``uname``
+    :type version: str
+    :ivar version_number: Main version number (e.g. 3 for Linux 3.18)
+    :type version_number: int
+    :ivar major: Major version number (e.g. 18 for Linux 3.18)
+    :type major: int
+    :ivar minor: Minor version number for stable kernels (e.g. 9 for 4.9.9). May
+                 be None
+    :type minor: int
+    :ivar rc: Release candidate number (e.g. 3 for Linux 4.9-rc3). May be None.
+    :type rc: int
+    :ivar sha1: Kernel git revision hash, if available (otherwise None)
+    :type sha1: str
+
+    :ivar parts: Tuple of version number components. Can be used for
+                 lexicographically comparing kernel versions.
+    :type parts: tuple(int)
+    """
+    def __init__(self, version_string):
+        if ' #' in version_string:
+            release, version = version_string.split(' #')
+            self.release = release
+            self.version = version
+        elif version_string.startswith('#'):
+            self.release = ''
+            self.version = version_string
+        else:
+            self.release = version_string
+            self.version = ''
+
+        self.version_number = None
+        self.major = None
+        self.minor = None
+        self.sha1 = None
+        self.rc = None
+        match = KVERSION_REGEX.match(version_string)
+        if match:
+            groups = match.groupdict()
+            self.version_number = int(groups['version'])
+            self.major = int(groups['major'])
+            if groups['minor'] is not None:
+                self.minor = int(groups['minor'])
+            if groups['rc'] is not None:
+                self.rc = int(groups['rc'])
+            if groups['sha1'] is not None:
+                self.sha1 = match.group('sha1')
+
+        self.parts = (self.version_number, self.major, self.minor)
+
+    def __str__(self):
+        return '{} {}'.format(self.release, self.version)
+
+    __repr__ = __str__
+
+
+class KernelConfig(object):
+
+    not_set_regex = re.compile(r'# (\S+) is not set')
+
+    @staticmethod
+    def get_config_name(name):
+        name = name.upper()
+        if not name.startswith('CONFIG_'):
+            name = 'CONFIG_' + name
+        return name
+
+    def iteritems(self):
+        return self._config.iteritems()
+
+    def __init__(self, text):
+        self.text = text
+        self._config = {}
+        for line in text.split('\n'):
+            line = line.strip()
+            if line.startswith('#'):
+                match = self.not_set_regex.search(line)
+                if match:
+                    self._config[match.group(1)] = 'n'
+            elif '=' in line:
+                name, value = line.split('=', 1)
+                self._config[name.strip()] = value.strip()
+
+    def get(self, name):
+        return self._config.get(self.get_config_name(name))
+
+    def like(self, name):
+        regex = re.compile(name, re.I)
+        result = {}
+        for k, v in self._config.iteritems():
+            if regex.search(k):
+                result[k] = v
+        return result
+
+    def is_enabled(self, name):
+        return self.get(name) == 'y'
+
+    def is_module(self, name):
+        return self.get(name) == 'm'
+
+    def is_not_set(self, name):
+        return self.get(name) == 'n'
+
+    def has(self, name):
+        return self.get(name) in ['m', 'y']
+
+
+class LocalLinuxTarget(LinuxTarget):
+
+    def __init__(self,
+                 connection_settings=None,
+                 platform=None,
+                 working_directory=None,
+                 executables_directory=None,
+                 connect=True,
+                 modules=None,
+                 load_default_modules=True,
+                 shell_prompt=DEFAULT_SHELL_PROMPT,
+                 conn_cls=LocalConnection,
+                 ):
+        super(LocalLinuxTarget, self).__init__(connection_settings=connection_settings,
+                                               platform=platform,
+                                               working_directory=working_directory,
+                                               executables_directory=executables_directory,
+                                               connect=connect,
+                                               modules=modules,
+                                               load_default_modules=load_default_modules,
+                                               shell_prompt=shell_prompt,
+                                               conn_cls=conn_cls)
+
+    def _resolve_paths(self):
+        if self.working_directory is None:
+            self.working_directory = '/tmp'
+        if self.executables_directory is None:
+            self.executables_directory = '/tmp'
+
+
+def _get_model_name(section):
+    name_string = section['model name']
+    parts = name_string.split('@')[0].strip().split()
+    return ' '.join([p for p in parts
+                     if '(' not in p and p != 'CPU'])
+
+
+def _get_part_name(section):
+    implementer = section.get('CPU implementer', '0x0')
+    part = section['CPU part']
+    variant = section.get('CPU variant', '0x0')
+    name = get_cpu_name(*map(integer, [implementer, part, variant]))
+    if name is None:
+        name = '{}/{}/{}'.format(implementer, part, variant)
+    return name
diff --git a/devlib/trace/__init__.py b/devlib/trace/__init__.py
new file mode 100644
index 0000000..5eeb939
--- /dev/null
+++ b/devlib/trace/__init__.py
@@ -0,0 +1,20 @@
+import logging
+
+
+class TraceCollector(object):
+
+    def __init__(self, target):
+        self.target = target
+        self.logger = logging.getLogger(self.__class__.__name__)
+
+    def reset(self):
+        pass
+
+    def start(self):
+        pass
+
+    def stop(self):
+        pass
+
+    def get_trace(self, outfile):
+        pass
diff --git a/devlib/trace/ftrace.py b/devlib/trace/ftrace.py
new file mode 100644
index 0000000..d4e37e1
--- /dev/null
+++ b/devlib/trace/ftrace.py
@@ -0,0 +1,347 @@
+#    Copyright 2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from __future__ import division
+import os
+import json
+import time
+import re
+import subprocess
+
+from devlib.trace import TraceCollector
+from devlib.host import PACKAGE_BIN_DIRECTORY
+from devlib.exception import TargetError, HostError
+from devlib.utils.misc import check_output, which
+
+
+TRACE_MARKER_START = 'TRACE_MARKER_START'
+TRACE_MARKER_STOP = 'TRACE_MARKER_STOP'
+OUTPUT_TRACE_FILE = 'trace.dat'
+OUTPUT_PROFILE_FILE = 'trace_stat.dat'
+DEFAULT_EVENTS = [
+    'cpu_frequency',
+    'cpu_idle',
+    'sched_migrate_task',
+    'sched_process_exec',
+    'sched_process_fork',
+    'sched_stat_iowait',
+    'sched_switch',
+    'sched_wakeup',
+    'sched_wakeup_new',
+]
+TIMEOUT = 180
+
+# Regexps for parsing of function profiling data
+CPU_RE = re.compile(r'  Function \(CPU([0-9]+)\)')
+STATS_RE = re.compile(r'([^ ]*) +([0-9]+) +([0-9.]+) us +([0-9.]+) us +([0-9.]+) us')
+
+class FtraceCollector(TraceCollector):
+
+    def __init__(self, target,
+                 events=None,
+                 functions=None,
+                 buffer_size=None,
+                 buffer_size_step=1000,
+                 tracing_path='/sys/kernel/debug/tracing',
+                 automark=True,
+                 autoreport=True,
+                 autoview=False,
+                 no_install=False,
+                 strict=False,
+                 report_on_target=False,
+                 ):
+        super(FtraceCollector, self).__init__(target)
+        self.events = events if events is not None else DEFAULT_EVENTS
+        self.functions = functions
+        self.buffer_size = buffer_size
+        self.buffer_size_step = buffer_size_step
+        self.tracing_path = tracing_path
+        self.automark = automark
+        self.autoreport = autoreport
+        self.autoview = autoview
+        self.report_on_target = report_on_target
+        self.target_output_file = target.path.join(self.target.working_directory, OUTPUT_TRACE_FILE)
+        text_file_name = target.path.splitext(OUTPUT_TRACE_FILE)[0] + '.txt'
+        self.target_text_file = target.path.join(self.target.working_directory, text_file_name)
+        self.target_binary = None
+        self.host_binary = None
+        self.start_time = None
+        self.stop_time = None
+        self.event_string = None
+        self.function_string = None
+        self._reset_needed = True
+
+        # Setup tracing paths
+        self.available_events_file    = self.target.path.join(self.tracing_path, 'available_events')
+        self.available_functions_file = self.target.path.join(self.tracing_path, 'available_filter_functions')
+        self.buffer_size_file         = self.target.path.join(self.tracing_path, 'buffer_size_kb')
+        self.current_tracer_file      = self.target.path.join(self.tracing_path, 'current_tracer')
+        self.function_profile_file    = self.target.path.join(self.tracing_path, 'function_profile_enabled')
+        self.marker_file              = self.target.path.join(self.tracing_path, 'trace_marker')
+        self.ftrace_filter_file       = self.target.path.join(self.tracing_path, 'set_ftrace_filter')
+
+        self.host_binary = which('trace-cmd')
+        self.kernelshark = which('kernelshark')
+
+        if not self.target.is_rooted:
+            raise TargetError('trace-cmd instrument cannot be used on an unrooted device.')
+        if self.autoreport and not self.report_on_target and self.host_binary is None:
+            raise HostError('trace-cmd binary must be installed on the host if autoreport=True.')
+        if self.autoview and self.kernelshark is None:
+            raise HostError('kernelshark binary must be installed on the host if autoview=True.')
+        if not no_install:
+            host_file = os.path.join(PACKAGE_BIN_DIRECTORY, self.target.abi, 'trace-cmd')
+            self.target_binary = self.target.install(host_file)
+        else:
+            if not self.target.is_installed('trace-cmd'):
+                raise TargetError('No trace-cmd found on device and no_install=True is specified.')
+            self.target_binary = 'trace-cmd'
+
+        # Validate required events to be traced
+        available_events = self.target.execute(
+                'cat {}'.format(self.available_events_file),
+                as_root=True).splitlines()
+        selected_events = []
+        for event in self.events:
+            # Convert globs supported by FTrace into valid regexp globs
+            _event = event
+            if event[0] != '*':
+                _event = '*' + event
+            event_re = re.compile(_event.replace('*', '.*'))
+            # Select events matching the required ones
+            if len(filter(event_re.match, available_events)) == 0:
+                message = 'Event [{}] not available for tracing'.format(event)
+                if strict:
+                    raise TargetError(message)
+                self.target.logger.warning(message)
+            else:
+                selected_events.append(event)
+        # If function profiling is enabled we always need at least one event.
+        # Thus, if not other events have been specified, try to add at least
+        # a tracepoint which is always available and possibly triggered few
+        # times.
+        if self.functions and len(selected_events) == 0:
+            selected_events = ['sched_wakeup_new']
+        self.event_string = _build_trace_events(selected_events)
+
+        # Check for function tracing support
+        if self.functions:
+            if not self.target.file_exists(self.function_profile_file):
+                raise TargetError('Function profiling not supported. '\
+                        'A kernel build with CONFIG_FUNCTION_PROFILER enable is required')
+            # Validate required functions to be traced
+            available_functions = self.target.execute(
+                    'cat {}'.format(self.available_functions_file),
+                    as_root=True).splitlines()
+            selected_functions = []
+            for function in self.functions:
+                if function not in available_functions:
+                    message = 'Function [{}] not available for profiling'.format(function)
+                    if strict:
+                        raise TargetError(message)
+                    self.target.logger.warning(message)
+                else:
+                    selected_functions.append(function)
+            self.function_string = _build_trace_functions(selected_functions)
+
+    def reset(self):
+        if self.buffer_size:
+            self._set_buffer_size()
+        self.target.execute('{} reset'.format(self.target_binary),
+                            as_root=True, timeout=TIMEOUT)
+        self._reset_needed = False
+
+    def start(self):
+        self.start_time = time.time()
+        if self._reset_needed:
+            self.reset()
+        self.target.execute('{} start {}'.format(self.target_binary, self.event_string),
+                            as_root=True)
+        if self.automark:
+            self.mark_start()
+        if 'cpufreq' in self.target.modules:
+            self.logger.debug('Trace CPUFreq frequencies')
+            self.target.cpufreq.trace_frequencies()
+        if 'cpuidle' in self.target.modules:
+            self.logger.debug('Trace CPUIdle states')
+            self.target.cpuidle.perturb_cpus()
+        # Enable kernel function profiling
+        if self.functions:
+            self.target.execute('echo nop > {}'.format(self.current_tracer_file),
+                                as_root=True)
+            self.target.execute('echo 0 > {}'.format(self.function_profile_file),
+                                as_root=True)
+            self.target.execute('echo {} > {}'.format(self.function_string, self.ftrace_filter_file),
+                                as_root=True)
+            self.target.execute('echo 1 > {}'.format(self.function_profile_file),
+                                as_root=True)
+
+
+    def stop(self):
+        # Disable kernel function profiling
+        if self.functions:
+            self.target.execute('echo 1 > {}'.format(self.function_profile_file),
+                                as_root=True)
+        if 'cpufreq' in self.target.modules:
+            self.logger.debug('Trace CPUFreq frequencies')
+            self.target.cpufreq.trace_frequencies()
+        self.stop_time = time.time()
+        if self.automark:
+            self.mark_stop()
+        self.target.execute('{} stop'.format(self.target_binary),
+                            timeout=TIMEOUT, as_root=True)
+        self._reset_needed = True
+
+    def get_trace(self, outfile):
+        if os.path.isdir(outfile):
+            outfile = os.path.join(outfile, os.path.basename(self.target_output_file))
+        self.target.execute('{0} extract -o {1}; chmod 666 {1}'.format(self.target_binary,
+                                                                       self.target_output_file),
+                            timeout=TIMEOUT, as_root=True)
+
+        # The size of trace.dat will depend on how long trace-cmd was running.
+        # Therefore timout for the pull command must also be adjusted
+        # accordingly.
+        pull_timeout = 5 * (self.stop_time - self.start_time)
+        self.target.pull(self.target_output_file, outfile, timeout=pull_timeout)
+        if not os.path.isfile(outfile):
+            self.logger.warning('Binary trace not pulled from device.')
+        else:
+            if self.autoreport:
+                textfile = os.path.splitext(outfile)[0] + '.txt'
+                if self.report_on_target:
+                    self.generate_report_on_target()
+                    self.target.pull(self.target_text_file,
+                                     textfile, timeout=pull_timeout)
+                else:
+                    self.report(outfile, textfile)
+            if self.autoview:
+                self.view(outfile)
+
+    def get_stats(self, outfile):
+        if not self.functions:
+            return
+
+        if os.path.isdir(outfile):
+            outfile = os.path.join(outfile, OUTPUT_PROFILE_FILE)
+        output = self.target._execute_util('ftrace_get_function_stats',
+                                            as_root=True)
+
+        function_stats = {}
+        for line in output.splitlines():
+            # Match a new CPU dataset
+            match = CPU_RE.search(line)
+            if match:
+                cpu_id = int(match.group(1))
+                function_stats[cpu_id] = {}
+                self.logger.debug("Processing stats for CPU%d...", cpu_id)
+                continue
+            # Match a new function dataset
+            match = STATS_RE.search(line)
+            if match:
+                fname = match.group(1)
+                function_stats[cpu_id][fname] = {
+                        'hits' : int(match.group(2)),
+                        'time' : float(match.group(3)),
+                        'avg'  : float(match.group(4)),
+                        's_2'  : float(match.group(5)),
+                    }
+                self.logger.debug(" %s: %s",
+                             fname, function_stats[cpu_id][fname])
+
+        self.logger.debug("FTrace stats output [%s]...", outfile)
+        with open(outfile, 'w') as fh:
+           json.dump(function_stats, fh, indent=4)
+        self.logger.debug("FTrace function stats save in [%s]", outfile)
+
+        return function_stats
+
+    def report(self, binfile, destfile):
+        # To get the output of trace.dat, trace-cmd must be installed
+        # This is done host-side because the generated file is very large
+        try:
+            command = '{} report {} > {}'.format(self.host_binary, binfile, destfile)
+            self.logger.debug(command)
+            process = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True)
+            _, error = process.communicate()
+            if process.returncode:
+                raise TargetError('trace-cmd returned non-zero exit code {}'.format(process.returncode))
+            if error:
+                # logged at debug level, as trace-cmd always outputs some
+                # errors that seem benign.
+                self.logger.debug(error)
+            if os.path.isfile(destfile):
+                self.logger.debug('Verifying traces.')
+                with open(destfile) as fh:
+                    for line in fh:
+                        if 'EVENTS DROPPED' in line:
+                            self.logger.warning('Dropped events detected.')
+                            break
+                    else:
+                        self.logger.debug('Trace verified.')
+            else:
+                self.logger.warning('Could not generate trace.txt.')
+        except OSError:
+            raise HostError('Could not find trace-cmd. Please make sure it is installed and is in PATH.')
+
+    def generate_report_on_target(self):
+        command = '{} report {} > {}'.format(self.target_binary,
+                                             self.target_output_file,
+                                             self.target_text_file)
+        self.target.execute(command, timeout=TIMEOUT)
+
+    def view(self, binfile):
+        check_output('{} {}'.format(self.kernelshark, binfile), shell=True)
+
+    def teardown(self):
+        self.target.remove(self.target.path.join(self.target.working_directory, OUTPUT_TRACE_FILE))
+
+    def mark_start(self):
+        self.target.write_value(self.marker_file, TRACE_MARKER_START, verify=False)
+
+    def mark_stop(self):
+        self.target.write_value(self.marker_file, TRACE_MARKER_STOP, verify=False)
+
+    def _set_buffer_size(self):
+        target_buffer_size = self.buffer_size
+        attempt_buffer_size = target_buffer_size
+        buffer_size = 0
+        floor = 1000 if target_buffer_size > 1000 else target_buffer_size
+        while attempt_buffer_size >= floor:
+            self.target.write_value(self.buffer_size_file, attempt_buffer_size, verify=False)
+            buffer_size = self.target.read_int(self.buffer_size_file)
+            if buffer_size == attempt_buffer_size:
+                break
+            else:
+                attempt_buffer_size -= self.buffer_size_step
+        if buffer_size == target_buffer_size:
+            return
+        while attempt_buffer_size < target_buffer_size:
+            attempt_buffer_size += self.buffer_size_step
+            self.target.write_value(self.buffer_size_file, attempt_buffer_size, verify=False)
+            buffer_size = self.target.read_int(self.buffer_size_file)
+            if attempt_buffer_size != buffer_size:
+                message = 'Failed to set trace buffer size to {}, value set was {}'
+                self.logger.warning(message.format(target_buffer_size, buffer_size))
+                break
+
+
+def _build_trace_events(events):
+    event_string = ' '.join(['-e {}'.format(e) for e in events])
+    return event_string
+
+def _build_trace_functions(functions):
+    function_string = " ".join(functions)
+    return function_string
diff --git a/devlib/utils/__init__.py b/devlib/utils/__init__.py
new file mode 100644
index 0000000..cd5d64d
--- /dev/null
+++ b/devlib/utils/__init__.py
@@ -0,0 +1,16 @@
+#    Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/devlib/utils/android.py b/devlib/utils/android.py
new file mode 100644
index 0000000..be8b960
--- /dev/null
+++ b/devlib/utils/android.py
@@ -0,0 +1,496 @@
+#    Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+Utility functions for working with Android devices through adb.
+
+"""
+# pylint: disable=E1103
+import os
+import time
+import subprocess
+import logging
+import re
+from collections import defaultdict
+
+from devlib.exception import TargetError, HostError, DevlibError
+from devlib.utils.misc import check_output, which, memoized
+from devlib.utils.misc import escape_single_quotes, escape_double_quotes
+
+
+logger = logging.getLogger('android')
+
+MAX_ATTEMPTS = 5
+AM_START_ERROR = re.compile(r"Error: Activity class {[\w|.|/]*} does not exist")
+
+# See:
+# http://developer.android.com/guide/topics/manifest/uses-sdk-element.html#ApiLevels
+ANDROID_VERSION_MAP = {
+    23: 'MARSHMALLOW',
+    22: 'LOLLYPOP_MR1',
+    21: 'LOLLYPOP',
+    20: 'KITKAT_WATCH',
+    19: 'KITKAT',
+    18: 'JELLY_BEAN_MR2',
+    17: 'JELLY_BEAN_MR1',
+    16: 'JELLY_BEAN',
+    15: 'ICE_CREAM_SANDWICH_MR1',
+    14: 'ICE_CREAM_SANDWICH',
+    13: 'HONEYCOMB_MR2',
+    12: 'HONEYCOMB_MR1',
+    11: 'HONEYCOMB',
+    10: 'GINGERBREAD_MR1',
+    9: 'GINGERBREAD',
+    8: 'FROYO',
+    7: 'ECLAIR_MR1',
+    6: 'ECLAIR_0_1',
+    5: 'ECLAIR',
+    4: 'DONUT',
+    3: 'CUPCAKE',
+    2: 'BASE_1_1',
+    1: 'BASE',
+}
+
+
+# Initialized in functions near the botton of the file
+android_home = None
+platform_tools = None
+adb = None
+aapt = None
+fastboot = None
+
+
+class AndroidProperties(object):
+
+    def __init__(self, text):
+        self._properties = {}
+        self.parse(text)
+
+    def parse(self, text):
+        self._properties = dict(re.findall(r'\[(.*?)\]:\s+\[(.*?)\]', text))
+
+    def iteritems(self):
+        return self._properties.iteritems()
+
+    def __iter__(self):
+        return iter(self._properties)
+
+    def __getattr__(self, name):
+        return self._properties.get(name)
+
+    __getitem__ = __getattr__
+
+
+class AdbDevice(object):
+
+    def __init__(self, name, status):
+        self.name = name
+        self.status = status
+
+    def __cmp__(self, other):
+        if isinstance(other, AdbDevice):
+            return cmp(self.name, other.name)
+        else:
+            return cmp(self.name, other)
+
+    def __str__(self):
+        return 'AdbDevice({}, {})'.format(self.name, self.status)
+
+    __repr__ = __str__
+
+
+class ApkInfo(object):
+
+    version_regex = re.compile(r"name='(?P<name>[^']+)' versionCode='(?P<vcode>[^']+)' versionName='(?P<vname>[^']+)'")
+    name_regex = re.compile(r"name='(?P<name>[^']+)'")
+
+    def __init__(self, path=None):
+        self.path = path
+        self.package = None
+        self.activity = None
+        self.label = None
+        self.version_name = None
+        self.version_code = None
+        self.parse(path)
+
+    def parse(self, apk_path):
+        _check_env()
+        command = [aapt, 'dump', 'badging', apk_path]
+        logger.debug(' '.join(command))
+        output = subprocess.check_output(command)
+        for line in output.split('\n'):
+            if line.startswith('application-label:'):
+                self.label = line.split(':')[1].strip().replace('\'', '')
+            elif line.startswith('package:'):
+                match = self.version_regex.search(line)
+                if match:
+                    self.package = match.group('name')
+                    self.version_code = match.group('vcode')
+                    self.version_name = match.group('vname')
+            elif line.startswith('launchable-activity:'):
+                match = self.name_regex.search(line)
+                self.activity = match.group('name')
+            else:
+                pass  # not interested
+
+
+class AdbConnection(object):
+
+    # maintains the count of parallel active connections to a device, so that
+    # adb disconnect is not invoked untill all connections are closed
+    active_connections = defaultdict(int)
+    default_timeout = 10
+    ls_command = 'ls'
+
+    @property
+    def name(self):
+        return self.device
+
+    @property
+    @memoized
+    def newline_separator(self):
+        output = adb_command(self.device,
+                             "shell '({}); echo \"\n$?\"'".format(self.ls_command))
+        if output.endswith('\r\n'):
+            return '\r\n'
+        elif output.endswith('\n'):
+            return '\n'
+        else:
+            raise DevlibError("Unknown line ending")
+
+    # Again, we need to handle boards where the default output format from ls is
+    # single column *and* boards where the default output is multi-column.
+    # We need to do this purely because the '-1' option causes errors on older
+    # versions of the ls tool in Android pre-v7.
+    def _setup_ls(self):
+        command = "shell '(ls -1); echo \"\n$?\"'"
+        try:
+            output = adb_command(self.device, command, timeout=self.timeout)
+        except subprocess.CalledProcessError as e:
+            raise HostError(
+                'Failed to set up ls command on Android device. Output:\n'
+                + e.output)
+        lines = output.splitlines()
+        retval = lines[-1].strip()
+        if int(retval) == 0:
+            self.ls_command = 'ls -1'
+        else:
+            self.ls_command = 'ls'
+        logger.debug("ls command is set to {}".format(self.ls_command))
+
+    def __init__(self, device=None, timeout=None, platform=None):
+        self.timeout = timeout if timeout is not None else self.default_timeout
+        if device is None:
+            device = adb_get_device(timeout=timeout)
+        self.device = device
+        adb_connect(self.device)
+        AdbConnection.active_connections[self.device] += 1
+        self._setup_ls()
+
+    def push(self, source, dest, timeout=None):
+        if timeout is None:
+            timeout = self.timeout
+        command = "push '{}' '{}'".format(source, dest)
+        if not os.path.exists(source):
+            raise HostError('No such file "{}"'.format(source))
+        return adb_command(self.device, command, timeout=timeout)
+
+    def pull(self, source, dest, timeout=None):
+        if timeout is None:
+            timeout = self.timeout
+        # Pull all files matching a wildcard expression
+        if os.path.isdir(dest) and \
+           ('*' in source or '?' in source):
+            command = 'shell {} {}'.format(self.ls_command, source)
+            output = adb_command(self.device, command, timeout=timeout)
+            for line in output.splitlines():
+                command = "pull '{}' '{}'".format(line.strip(), dest)
+                adb_command(self.device, command, timeout=timeout)
+            return
+        command = "pull '{}' '{}'".format(source, dest)
+        return adb_command(self.device, command, timeout=timeout)
+
+    def execute(self, command, timeout=None, check_exit_code=False,
+                as_root=False, strip_colors=True):
+        return adb_shell(self.device, command, timeout, check_exit_code,
+                         as_root, self.newline_separator)
+
+    def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
+        return adb_background_shell(self.device, command, stdout, stderr, as_root)
+
+    def close(self):
+        AdbConnection.active_connections[self.device] -= 1
+        if AdbConnection.active_connections[self.device] <= 0:
+            adb_disconnect(self.device)
+            del AdbConnection.active_connections[self.device]
+
+    def cancel_running_command(self):
+        # adbd multiplexes commands so that they don't interfer with each
+        # other, so there is no need to explicitly cancel a running command
+        # before the next one can be issued.
+        pass
+
+
+def fastboot_command(command, timeout=None, device=None):
+    _check_env()
+    target = '-s {}'.format(device) if device else ''
+    full_command = 'fastboot {} {}'.format(target, command)
+    logger.debug(full_command)
+    output, _ = check_output(full_command, timeout, shell=True)
+    return output
+
+
+def fastboot_flash_partition(partition, path_to_image):
+    command = 'flash {} {}'.format(partition, path_to_image)
+    fastboot_command(command)
+
+
+def adb_get_device(timeout=None):
+    """
+    Returns the serial number of a connected android device.
+
+    If there are more than one device connected to the machine, or it could not
+    find any device connected, :class:`devlib.exceptions.HostError` is raised.
+    """
+    # TODO this is a hacky way to issue a adb command to all listed devices
+
+    # The output of calling adb devices consists of a heading line then
+    # a list of the devices sperated by new line
+    # The last line is a blank new line. in otherwords, if there is a device found
+    # then the output length is 2 + (1 for each device)
+    start = time.time()
+    while True:
+        output = adb_command(None, "devices").splitlines()  # pylint: disable=E1103
+        output_length = len(output)
+        if output_length == 3:
+            # output[1] is the 2nd line in the output which has the device name
+            # Splitting the line by '\t' gives a list of two indexes, which has
+            # device serial in 0 number and device type in 1.
+            return output[1].split('\t')[0]
+        elif output_length > 3:
+            message = '{} Android devices found; either explicitly specify ' +\
+                      'the device you want, or make sure only one is connected.'
+            raise HostError(message.format(output_length - 2))
+        else:
+            if timeout < time.time() - start:
+                raise HostError('No device is connected and available')
+            time.sleep(1)
+
+
+def adb_connect(device, timeout=None, attempts=MAX_ATTEMPTS):
+    _check_env()
+    # Connect is required only for ADB-over-IP
+    if "." not in device:
+        logger.debug('Device connected via USB, connect not required')
+        return
+    tries = 0
+    output = None
+    while tries <= attempts:
+        tries += 1
+        if device:
+            command = 'adb connect {}'.format(device)
+            logger.debug(command)
+            output, _ = check_output(command, shell=True, timeout=timeout)
+        if _ping(device):
+            break
+        time.sleep(10)
+    else:  # did not connect to the device
+        message = 'Could not connect to {}'.format(device or 'a device')
+        if output:
+            message += '; got: "{}"'.format(output)
+        raise HostError(message)
+
+
+def adb_disconnect(device):
+    _check_env()
+    if not device:
+        return
+    if ":" in device and device in adb_list_devices():
+        command = "adb disconnect " + device
+        logger.debug(command)
+        retval = subprocess.call(command, stdout=open(os.devnull, 'wb'), shell=True)
+        if retval:
+            raise TargetError('"{}" returned {}'.format(command, retval))
+
+
+def _ping(device):
+    _check_env()
+    device_string = ' -s {}'.format(device) if device else ''
+    command = "adb{} shell \"ls / > /dev/null\"".format(device_string)
+    logger.debug(command)
+    result = subprocess.call(command, stderr=subprocess.PIPE, shell=True)
+    if not result:
+        return True
+    else:
+        return False
+
+
+def adb_shell(device, command, timeout=None, check_exit_code=False,
+              as_root=False, newline_separator='\r\n'):  # NOQA
+    _check_env()
+    if as_root:
+        command = 'echo \'{}\' | su'.format(escape_single_quotes(command))
+    device_part = ['-s', device] if device else []
+
+    # On older combinations of ADB/Android versions, the adb host command always
+    # exits with 0 if it was able to run the command on the target, even if the
+    # command failed (https://code.google.com/p/android/issues/detail?id=3254).
+    # Homogenise this behaviour by running the command then echoing the exit
+    # code.
+    adb_shell_command = '({}); echo \"\n$?\"'.format(command)
+    actual_command = ['adb'] + device_part + ['shell', adb_shell_command]
+    logger.debug('adb {} shell {}'.format(' '.join(device_part), command))
+    raw_output, error = check_output(actual_command, timeout, shell=False)
+    if raw_output:
+        try:
+            output, exit_code, _ = raw_output.rsplit(newline_separator, 2)
+        except ValueError:
+            exit_code, _ = raw_output.rsplit(newline_separator, 1)
+            output = ''
+    else:  # raw_output is empty
+        exit_code = '969696'  # just because
+        output = ''
+
+    if check_exit_code:
+        exit_code = exit_code.strip()
+        if exit_code.isdigit():
+            if int(exit_code):
+                message = ('Got exit code {}\nfrom target command: {}\n'
+                           'STDOUT: {}\nSTDERR: {}')
+                raise TargetError(message.format(exit_code, command, output, error))
+            elif AM_START_ERROR.findall(output):
+                message = 'Could not start activity; got the following:'
+                message += '\n{}'.format(AM_START_ERROR.findall(output)[0])
+                raise TargetError(message)
+        else:  # not all digits
+            if AM_START_ERROR.findall(output):
+                message = 'Could not start activity; got the following:\n{}'
+                raise TargetError(message.format(AM_START_ERROR.findall(output)[0]))
+            else:
+                message = 'adb has returned early; did not get an exit code. '\
+                          'Was kill-server invoked?'
+                raise TargetError(message)
+
+    return output
+
+
+def adb_background_shell(device, command,
+                         stdout=subprocess.PIPE,
+                         stderr=subprocess.PIPE,
+                         as_root=False):
+    """Runs the sepcified command in a subprocess, returning the the Popen object."""
+    _check_env()
+    if as_root:
+        command = 'echo \'{}\' | su'.format(escape_single_quotes(command))
+    device_string = ' -s {}'.format(device) if device else ''
+    full_command = 'adb{} shell "{}"'.format(device_string, escape_double_quotes(command))
+    logger.debug(full_command)
+    return subprocess.Popen(full_command, stdout=stdout, stderr=stderr, shell=True)
+
+
+def adb_list_devices():
+    output = adb_command(None, 'devices')
+    devices = []
+    for line in output.splitlines():
+        parts = [p.strip() for p in line.split()]
+        if len(parts) == 2:
+            devices.append(AdbDevice(*parts))
+    return devices
+
+
+def adb_command(device, command, timeout=None):
+    _check_env()
+    device_string = ' -s {}'.format(device) if device else ''
+    full_command = "adb{} {}".format(device_string, command)
+    logger.debug(full_command)
+    output, _ = check_output(full_command, timeout, shell=True)
+    return output
+
+
+# Messy environment initialisation stuff...
+
+class _AndroidEnvironment(object):
+
+    def __init__(self):
+        self.android_home = None
+        self.platform_tools = None
+        self.adb = None
+        self.aapt = None
+        self.fastboot = None
+
+
+def _initialize_with_android_home(env):
+    logger.debug('Using ANDROID_HOME from the environment.')
+    env.android_home = android_home
+    env.platform_tools = os.path.join(android_home, 'platform-tools')
+    os.environ['PATH'] = env.platform_tools + os.pathsep + os.environ['PATH']
+    _init_common(env)
+    return env
+
+
+def _initialize_without_android_home(env):
+    adb_full_path = which('adb')
+    if adb_full_path:
+        env.adb = 'adb'
+    else:
+        raise HostError('ANDROID_HOME is not set and adb is not in PATH. '
+                        'Have you installed Android SDK?')
+    logger.debug('Discovering ANDROID_HOME from adb path.')
+    env.platform_tools = os.path.dirname(adb_full_path)
+    env.android_home = os.path.dirname(env.platform_tools)
+    try:
+        _init_common(env)
+    except:
+        env.aapt = which('aapt')
+        if env.aapt:
+            logger.info('Using aapt: ' + env.aapt)
+        else:
+            raise RuntimeError('aapt not found, try setting ANDROID_HOME to \
+                                Android SDK or run LISA from android environment')
+    return env
+
+
+def _init_common(env):
+    logger.debug('ANDROID_HOME: {}'.format(env.android_home))
+    build_tools_directory = os.path.join(env.android_home, 'build-tools')
+    if not os.path.isdir(build_tools_directory):
+        msg = '''ANDROID_HOME ({}) does not appear to have valid Android SDK install
+                 (cannot find build-tools)'''
+        raise HostError(msg.format(env.android_home))
+    versions = os.listdir(build_tools_directory)
+    for version in reversed(sorted(versions)):
+        aapt_path = os.path.join(build_tools_directory, version, 'aapt')
+        if os.path.isfile(aapt_path):
+            logger.debug('Using aapt for version {}'.format(version))
+            env.aapt = aapt_path
+            break
+    else:
+        raise HostError('aapt not found. Please make sure at least one Android '
+                        'platform is installed.')
+
+
+def _check_env():
+    global android_home, platform_tools, adb, aapt  # pylint: disable=W0603
+    if not android_home:
+        android_home = os.getenv('ANDROID_HOME')
+        if android_home:
+            _env = _initialize_with_android_home(_AndroidEnvironment())
+        else:
+            _env = _initialize_without_android_home(_AndroidEnvironment())
+        android_home = _env.android_home
+        platform_tools = _env.platform_tools
+        adb = _env.adb
+        aapt = _env.aapt
diff --git a/devlib/utils/misc.py b/devlib/utils/misc.py
new file mode 100644
index 0000000..b8626aa
--- /dev/null
+++ b/devlib/utils/misc.py
@@ -0,0 +1,601 @@
+#    Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+Miscellaneous functions that don't fit anywhere else.
+
+"""
+from __future__ import division
+import os
+import sys
+import re
+import string
+import threading
+import signal
+import subprocess
+import pkgutil
+import logging
+import random
+import ctypes
+from operator import itemgetter
+from itertools import groupby
+from functools import partial
+
+import wrapt
+
+from devlib.exception import HostError, TimeoutError
+
+
+# ABI --> architectures list
+ABI_MAP = {
+    'armeabi': ['armeabi', 'armv7', 'armv7l', 'armv7el', 'armv7lh'],
+    'arm64': ['arm64', 'armv8', 'arm64-v8a', 'aarch64'],
+}
+
+# Vendor ID --> CPU part ID --> CPU variant ID --> Core Name
+# None means variant is not used.
+CPU_PART_MAP = {
+    0x41: {  # ARM
+        0x926: {None: 'ARM926'},
+        0x946: {None: 'ARM946'},
+        0x966: {None: 'ARM966'},
+        0xb02: {None: 'ARM11MPCore'},
+        0xb36: {None: 'ARM1136'},
+        0xb56: {None: 'ARM1156'},
+        0xb76: {None: 'ARM1176'},
+        0xc05: {None: 'A5'},
+        0xc07: {None: 'A7'},
+        0xc08: {None: 'A8'},
+        0xc09: {None: 'A9'},
+        0xc0e: {None: 'A17'},
+        0xc0f: {None: 'A15'},
+        0xc14: {None: 'R4'},
+        0xc15: {None: 'R5'},
+        0xc17: {None: 'R7'},
+        0xc18: {None: 'R8'},
+        0xc20: {None: 'M0'},
+        0xc60: {None: 'M0+'},
+        0xc21: {None: 'M1'},
+        0xc23: {None: 'M3'},
+        0xc24: {None: 'M4'},
+        0xc27: {None: 'M7'},
+        0xd01: {None: 'A32'},
+        0xd03: {None: 'A53'},
+        0xd04: {None: 'A35'},
+        0xd07: {None: 'A57'},
+        0xd08: {None: 'A72'},
+        0xd09: {None: 'A73'},
+    },
+    0x4e: {  # Nvidia
+        0x0: {None: 'Denver'},
+    },
+    0x51: {  # Qualcomm
+        0x02d: {None: 'Scorpion'},
+        0x04d: {None: 'MSM8960'},
+        0x06f: {  # Krait
+            0x2: 'Krait400',
+            0x3: 'Krait450',
+        },
+        0x205: {0x1: 'KryoSilver'},
+        0x211: {0x1: 'KryoGold'},
+    },
+    0x56: {  # Marvell
+        0x131: {
+            0x2: 'Feroceon 88F6281',
+        }
+    },
+}
+
+
+def get_cpu_name(implementer, part, variant):
+    part_data = CPU_PART_MAP.get(implementer, {}).get(part, {})
+    if None in part_data:  # variant does not determine core Name for this vendor
+        name = part_data[None]
+    else:
+        name = part_data.get(variant)
+    return name
+
+
+def preexec_function():
+    # Ignore the SIGINT signal by setting the handler to the standard
+    # signal handler SIG_IGN.
+    signal.signal(signal.SIGINT, signal.SIG_IGN)
+    # Change process group in case we have to kill the subprocess and all of
+    # its children later.
+    # TODO: this is Unix-specific; would be good to find an OS-agnostic way
+    #       to do this in case we wanna port WA to Windows.
+    os.setpgrp()
+
+
+check_output_logger = logging.getLogger('check_output')
+
+
+def check_output(command, timeout=None, ignore=None, inputtext=None, **kwargs):
+    """This is a version of subprocess.check_output that adds a timeout parameter to kill
+    the subprocess if it does not return within the specified time."""
+    # pylint: disable=too-many-branches
+    if ignore is None:
+        ignore = []
+    elif isinstance(ignore, int):
+        ignore = [ignore]
+    elif not isinstance(ignore, list) and ignore != 'all':
+        message = 'Invalid value for ignore parameter: "{}"; must be an int or a list'
+        raise ValueError(message.format(ignore))
+    if 'stdout' in kwargs:
+        raise ValueError('stdout argument not allowed, it will be overridden.')
+
+    def callback(pid):
+        try:
+            check_output_logger.debug('{} timed out; sending SIGKILL'.format(pid))
+            os.killpg(pid, signal.SIGKILL)
+        except OSError:
+            pass  # process may have already terminated.
+
+    process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                               stdin=subprocess.PIPE,
+                               preexec_fn=preexec_function, **kwargs)
+
+    if timeout:
+        timer = threading.Timer(timeout, callback, [process.pid, ])
+        timer.start()
+
+    try:
+        output, error = process.communicate(inputtext)
+    finally:
+        if timeout:
+            timer.cancel()
+
+    retcode = process.poll()
+    if retcode:
+        if retcode == -9:  # killed, assume due to timeout callback
+            raise TimeoutError(command, output='\n'.join([output, error]))
+        elif ignore != 'all' and retcode not in ignore:
+            raise subprocess.CalledProcessError(retcode, command, output='\n'.join([output, error]))
+    return output, error
+
+
+def walk_modules(path):
+    """
+    Given package name, return a list of all modules (including submodules, etc)
+    in that package.
+
+    :raises HostError: if an exception is raised while trying to import one of the
+                       modules under ``path``. The exception will have addtional
+                       attributes set: ``module`` will be set to the qualified name
+                       of the originating module, and ``orig_exc`` will contain
+                       the original exception.
+
+    """
+
+    def __try_import(path):
+        try:
+            return __import__(path, {}, {}, [''])
+        except Exception as e:
+            he = HostError('Could not load {}: {}'.format(path, str(e)))
+            he.module = path
+            he.exc_info = sys.exc_info()
+            he.orig_exc = e
+            raise he
+
+    root_mod = __try_import(path)
+    mods = [root_mod]
+    if not hasattr(root_mod, '__path__'):
+        # root is a module not a package -- nothing to walk
+        return mods
+    for _, name, ispkg in pkgutil.iter_modules(root_mod.__path__):
+        submod_path = '.'.join([path, name])
+        if ispkg:
+            mods.extend(walk_modules(submod_path))
+        else:
+            submod = __try_import(submod_path)
+            mods.append(submod)
+    return mods
+
+
+def ensure_directory_exists(dirpath):
+    """A filter for directory paths to ensure they exist."""
+    if not os.path.isdir(dirpath):
+        os.makedirs(dirpath)
+    return dirpath
+
+
+def ensure_file_directory_exists(filepath):
+    """
+    A filter for file paths to ensure the directory of the
+    file exists and the file can be created there. The file
+    itself is *not* going to be created if it doesn't already
+    exist.
+
+    """
+    ensure_directory_exists(os.path.dirname(filepath))
+    return filepath
+
+
+def merge_dicts(*args, **kwargs):
+    if not len(args) >= 2:
+        raise ValueError('Must specify at least two dicts to merge.')
+    func = partial(_merge_two_dicts, **kwargs)
+    return reduce(func, args)
+
+
+def _merge_two_dicts(base, other, list_duplicates='all', match_types=False,  # pylint: disable=R0912,R0914
+                     dict_type=dict, should_normalize=True, should_merge_lists=True):
+    """Merge dicts normalizing their keys."""
+    merged = dict_type()
+    base_keys = base.keys()
+    other_keys = other.keys()
+    norm = normalize if should_normalize else lambda x, y: x
+
+    base_only = []
+    other_only = []
+    both = []
+    union = []
+    for k in base_keys:
+        if k in other_keys:
+            both.append(k)
+        else:
+            base_only.append(k)
+            union.append(k)
+    for k in other_keys:
+        if k in base_keys:
+            union.append(k)
+        else:
+            union.append(k)
+            other_only.append(k)
+
+    for k in union:
+        if k in base_only:
+            merged[k] = norm(base[k], dict_type)
+        elif k in other_only:
+            merged[k] = norm(other[k], dict_type)
+        elif k in both:
+            base_value = base[k]
+            other_value = other[k]
+            base_type = type(base_value)
+            other_type = type(other_value)
+            if (match_types and (base_type != other_type) and
+                    (base_value is not None) and (other_value is not None)):
+                raise ValueError('Type mismatch for {} got {} ({}) and {} ({})'.format(k, base_value, base_type,
+                                                                                       other_value, other_type))
+            if isinstance(base_value, dict):
+                merged[k] = _merge_two_dicts(base_value, other_value, list_duplicates, match_types, dict_type)
+            elif isinstance(base_value, list):
+                if should_merge_lists:
+                    merged[k] = _merge_two_lists(base_value, other_value, list_duplicates, dict_type)
+                else:
+                    merged[k] = _merge_two_lists([], other_value, list_duplicates, dict_type)
+
+            elif isinstance(base_value, set):
+                merged[k] = norm(base_value.union(other_value), dict_type)
+            else:
+                merged[k] = norm(other_value, dict_type)
+        else:  # Should never get here
+            raise AssertionError('Unexpected merge key: {}'.format(k))
+
+    return merged
+
+
+def merge_lists(*args, **kwargs):
+    if not len(args) >= 2:
+        raise ValueError('Must specify at least two lists to merge.')
+    func = partial(_merge_two_lists, **kwargs)
+    return reduce(func, args)
+
+
+def _merge_two_lists(base, other, duplicates='all', dict_type=dict):  # pylint: disable=R0912
+    """
+    Merge lists, normalizing their entries.
+
+    parameters:
+
+        :base, other: the two lists to be merged. ``other`` will be merged on
+                      top of base.
+        :duplicates: Indicates the strategy of handling entries that appear
+                     in both lists. ``all`` will keep occurrences from both
+                     lists; ``first`` will only keep occurrences from
+                     ``base``; ``last`` will only keep occurrences from
+                     ``other``;
+
+                     .. note:: duplicate entries that appear in the *same* list
+                               will never be removed.
+
+    """
+    if not isiterable(base):
+        base = [base]
+    if not isiterable(other):
+        other = [other]
+    if duplicates == 'all':
+        merged_list = []
+        for v in normalize(base, dict_type) + normalize(other, dict_type):
+            if not _check_remove_item(merged_list, v):
+                merged_list.append(v)
+        return merged_list
+    elif duplicates == 'first':
+        base_norm = normalize(base, dict_type)
+        merged_list = normalize(base, dict_type)
+        for v in base_norm:
+            _check_remove_item(merged_list, v)
+        for v in normalize(other, dict_type):
+            if not _check_remove_item(merged_list, v):
+                if v not in base_norm:
+                    merged_list.append(v)  # pylint: disable=no-member
+        return merged_list
+    elif duplicates == 'last':
+        other_norm = normalize(other, dict_type)
+        merged_list = []
+        for v in normalize(base, dict_type):
+            if not _check_remove_item(merged_list, v):
+                if v not in other_norm:
+                    merged_list.append(v)
+        for v in other_norm:
+            if not _check_remove_item(merged_list, v):
+                merged_list.append(v)
+        return merged_list
+    else:
+        raise ValueError('Unexpected value for list duplicates argument: {}. '.format(duplicates) +
+                         'Must be in {"all", "first", "last"}.')
+
+
+def _check_remove_item(the_list, item):
+    """Helper function for merge_lists that implements checking wether an items
+    should be removed from the list and doing so if needed. Returns ``True`` if
+    the item has been removed and ``False`` otherwise."""
+    if not isinstance(item, basestring):
+        return False
+    if not item.startswith('~'):
+        return False
+    actual_item = item[1:]
+    if actual_item in the_list:
+        del the_list[the_list.index(actual_item)]
+    return True
+
+
+def normalize(value, dict_type=dict):
+    """Normalize values. Recursively normalizes dict keys to be lower case,
+    no surrounding whitespace, underscore-delimited strings."""
+    if isinstance(value, dict):
+        normalized = dict_type()
+        for k, v in value.iteritems():
+            key = k.strip().lower().replace(' ', '_')
+            normalized[key] = normalize(v, dict_type)
+        return normalized
+    elif isinstance(value, list):
+        return [normalize(v, dict_type) for v in value]
+    elif isinstance(value, tuple):
+        return tuple([normalize(v, dict_type) for v in value])
+    else:
+        return value
+
+
+def convert_new_lines(text):
+    """ Convert new lines to a common format.  """
+    return text.replace('\r\n', '\n').replace('\r', '\n')
+
+
+def escape_quotes(text):
+    """Escape quotes, and escaped quotes, in the specified text."""
+    return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\\\'').replace('\"', '\\\"')
+
+
+def escape_single_quotes(text):
+    """Escape single quotes, and escaped single quotes, in the specified text."""
+    return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\'\\\'\'')
+
+
+def escape_double_quotes(text):
+    """Escape double quotes, and escaped double quotes, in the specified text."""
+    return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\"', '\\\"')
+
+
+def getch(count=1):
+    """Read ``count`` characters from standard input."""
+    if os.name == 'nt':
+        import msvcrt  # pylint: disable=F0401
+        return ''.join([msvcrt.getch() for _ in xrange(count)])
+    else:  # assume Unix
+        import tty  # NOQA
+        import termios  # NOQA
+        fd = sys.stdin.fileno()
+        old_settings = termios.tcgetattr(fd)
+        try:
+            tty.setraw(sys.stdin.fileno())
+            ch = sys.stdin.read(count)
+        finally:
+            termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+        return ch
+
+
+def isiterable(obj):
+    """Returns ``True`` if the specified object is iterable and
+    *is not a string type*, ``False`` otherwise."""
+    return hasattr(obj, '__iter__') and not isinstance(obj, basestring)
+
+
+def as_relative(path):
+    """Convert path to relative by stripping away the leading '/' on UNIX or
+    the equivant on other platforms."""
+    path = os.path.splitdrive(path)[1]
+    return path.lstrip(os.sep)
+
+
+def get_cpu_mask(cores):
+    """Return a string with the hex for the cpu mask for the specified core numbers."""
+    mask = 0
+    for i in cores:
+        mask |= 1 << i
+    return '0x{0:x}'.format(mask)
+
+
+def which(name):
+    """Platform-independent version of UNIX which utility."""
+    if os.name == 'nt':
+        paths = os.getenv('PATH').split(os.pathsep)
+        exts = os.getenv('PATHEXT').split(os.pathsep)
+        for path in paths:
+            testpath = os.path.join(path, name)
+            if os.path.isfile(testpath):
+                return testpath
+            for ext in exts:
+                testpathext = testpath + ext
+                if os.path.isfile(testpathext):
+                    return testpathext
+        return None
+    else:  # assume UNIX-like
+        try:
+            return check_output(['which', name])[0].strip()  # pylint: disable=E1103
+        except subprocess.CalledProcessError:
+            return None
+
+
+_bash_color_regex = re.compile('\x1b\\[[0-9;]+m')
+
+
+def strip_bash_colors(text):
+    return _bash_color_regex.sub('', text)
+
+
+def get_random_string(length):
+    """Returns a random ASCII string of the specified length)."""
+    return ''.join(random.choice(string.ascii_letters + string.digits) for _ in xrange(length))
+
+
+class LoadSyntaxError(Exception):
+
+    def __init__(self, message, filepath, lineno):
+        super(LoadSyntaxError, self).__init__(message)
+        self.filepath = filepath
+        self.lineno = lineno
+
+    def __str__(self):
+        message = 'Syntax Error in {}, line {}:\n\t{}'
+        return message.format(self.filepath, self.lineno, self.message)
+
+
+RAND_MOD_NAME_LEN = 30
+BAD_CHARS = string.punctuation + string.whitespace
+TRANS_TABLE = string.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
+
+
+def to_identifier(text):
+    """Converts text to a valid Python identifier by replacing all
+    whitespace and punctuation."""
+    return re.sub('_+', '_', text.translate(TRANS_TABLE))
+
+
+def unique(alist):
+    """
+    Returns a list containing only unique elements from the input list (but preserves
+    order, unlike sets).
+
+    """
+    result = []
+    for item in alist:
+        if item not in result:
+            result.append(item)
+    return result
+
+
+def ranges_to_list(ranges_string):
+    """Converts a sysfs-style ranges string, e.g. ``"0,2-4"``, into a list ,e.g ``[0,2,3,4]``"""
+    values = []
+    for rg in ranges_string.split(','):
+        if '-' in rg:
+            first, last = map(int, rg.split('-'))
+            values.extend(xrange(first, last + 1))
+        else:
+            values.append(int(rg))
+    return values
+
+
+def list_to_ranges(values):
+    """Converts a list, e.g ``[0,2,3,4]``, into a sysfs-style ranges string, e.g. ``"0,2-4"``"""
+    range_groups = []
+    for _, g in groupby(enumerate(values), lambda (i, x): i - x):
+        range_groups.append(map(itemgetter(1), g))
+    range_strings = []
+    for group in range_groups:
+        if len(group) == 1:
+            range_strings.append(str(group[0]))
+        else:
+            range_strings.append('{}-{}'.format(group[0], group[-1]))
+    return ','.join(range_strings)
+
+
+def list_to_mask(values, base=0x0):
+    """Converts the specified list of integer values into
+    a bit mask for those values. Optinally, the list can be
+    applied to an existing mask."""
+    for v in values:
+        base |= (1 << v)
+    return base
+
+
+def mask_to_list(mask):
+    """Converts the specfied integer bitmask into a list of
+    indexes of bits that are set in the mask."""
+    size = len(bin(mask)) - 2  # because of "0b"
+    return [size - i - 1 for i in xrange(size)
+            if mask & (1 << size - i - 1)]
+
+
+__memo_cache = {}
+
+
+def reset_memo_cache():
+    __memo_cache.clear()
+
+
+def __get_memo_id(obj):
+    """
+    An object's id() may be re-used after an object is freed, so it's not
+    sufficiently unique to identify params for the memo cache (two different
+    params may end up with the same id). this attempts to generate a more unique
+    ID string.
+    """
+    obj_id = id(obj)
+    try:
+        return '{}/{}'.format(obj_id, hash(obj))
+    except TypeError:  # obj is not hashable
+        obj_pyobj = ctypes.cast(obj_id, ctypes.py_object)
+        # TODO: Note: there is still a possibility of a clash here. If Two
+        # different objects get assigned the same ID, an are large and are
+        # identical in the first thirty two bytes. This shouldn't be much of an
+        # issue in the current application of memoizing Target calls, as it's very
+        # unlikely that a target will get passed large params; but may cause
+        # problems in other applications, e.g. when memoizing results of operations
+        # on large arrays. I can't really think of a good way around that apart
+        # form, e.g., md5 hashing the entire raw object, which will have an
+        # undesirable impact on performance.
+        num_bytes = min(ctypes.sizeof(obj_pyobj), 32)
+        obj_bytes = ctypes.string_at(ctypes.addressof(obj_pyobj), num_bytes)
+        return '{}/{}'.format(obj_id, obj_bytes)
+
+
+@wrapt.decorator
+def memoized(wrapped, instance, args, kwargs):
+    """A decorator for memoizing functions and methods."""
+    func_id = repr(wrapped)
+
+    def memoize_wrapper(*args, **kwargs):
+        id_string = func_id + ','.join([__get_memo_id(a) for a in  args])
+        id_string += ','.join('{}={}'.format(k, v)
+                              for k, v in kwargs.iteritems())
+        if id_string not in __memo_cache:
+            __memo_cache[id_string] = wrapped(*args, **kwargs)
+        return __memo_cache[id_string]
+
+    return memoize_wrapper(*args, **kwargs)
+
diff --git a/devlib/utils/serial_port.py b/devlib/utils/serial_port.py
new file mode 100644
index 0000000..d1410a4
--- /dev/null
+++ b/devlib/utils/serial_port.py
@@ -0,0 +1,113 @@
+#    Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import time
+from contextlib import contextmanager
+from logging import Logger
+
+import serial
+
+import pexpect
+from distutils.version import StrictVersion as V
+if V(pexpect.__version__) < V('4.0.0'):
+    import fdpexpect
+else:
+    from pexpect import fdpexpect
+# Adding pexpect exceptions into this module's namespace
+from pexpect import EOF, TIMEOUT  # NOQA pylint: disable=W0611
+
+from devlib.exception import HostError
+
+
+def pulse_dtr(conn, state=True, duration=0.1):
+    """Set the DTR line of the specified serial connection to the specified state
+    for the specified duration (note: the initial state of the line is *not* checked."""
+    conn.setDTR(state)
+    time.sleep(duration)
+    conn.setDTR(not state)
+
+
+def get_connection(timeout, init_dtr=None, logcls=Logger,
+                   *args, **kwargs):
+    if init_dtr is not None:
+        kwargs['dsrdtr'] = True
+    try:
+        conn = serial.Serial(*args, **kwargs)
+    except serial.SerialException as e:
+        raise HostError(e.message)
+    if init_dtr is not None:
+        conn.setDTR(init_dtr)
+    conn.nonblocking()
+    conn.flushOutput()
+    target = fdpexpect.fdspawn(conn.fileno(), timeout=timeout)
+    target.logfile_read = logcls('read')
+    target.logfile_send = logcls('send')
+
+    # Monkey-patching sendline to introduce a short delay after
+    # chacters are sent to the serial. If two sendline s are issued
+    # one after another the second one might start putting characters
+    # into the serial device before the first one has finished, causing
+    # corruption. The delay prevents that.
+    tsln = target.sendline
+
+    def sendline(x):
+        tsln(x)
+        time.sleep(0.1)
+
+    target.sendline = sendline
+    return target, conn
+
+
+def write_characters(conn, line, delay=0.05):
+    """Write a single line out to serial charcter-by-character. This will ensure that nothing will
+    be dropped for longer lines."""
+    line = line.rstrip('\r\n')
+    for c in line:
+        conn.send(c)
+        time.sleep(delay)
+    conn.sendline('')
+
+
+@contextmanager
+def open_serial_connection(timeout, get_conn=False, init_dtr=None,
+                           logcls=Logger, *args, **kwargs):
+    """
+    Opens a serial connection to a device.
+
+    :param timeout: timeout for the fdpexpect spawn object.
+    :param conn: ``bool`` that specfies whether the underlying connection
+                 object should be yielded as well.
+    :param init_dtr: specifies the initial DTR state stat should be set.
+
+    All arguments are passed into the __init__ of serial.Serial. See
+    pyserial documentation for details:
+
+        http://pyserial.sourceforge.net/pyserial_api.html#serial.Serial
+
+    :returns: a pexpect spawn object connected to the device.
+              See: http://pexpect.sourceforge.net/pexpect.html
+
+    """
+    target, conn = get_connection(timeout, init_dtr=init_dtr,
+                                  logcls=logcls, *args, **kwargs)
+    if get_conn:
+        yield target, conn
+    else:
+        yield target
+
+    target.close()  # Closes the file descriptor used by the conn.
+    del conn
+
diff --git a/devlib/utils/ssh.py b/devlib/utils/ssh.py
new file mode 100644
index 0000000..f87746a
--- /dev/null
+++ b/devlib/utils/ssh.py
@@ -0,0 +1,843 @@
+#    Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+import stat
+import logging
+import subprocess
+import re
+import threading
+import tempfile
+import shutil
+import socket
+import time
+
+import pexpect
+from distutils.version import StrictVersion as V
+if V(pexpect.__version__) < V('4.0.0'):
+    import pxssh
+else:
+    from pexpect import pxssh
+from pexpect import EOF, TIMEOUT, spawn
+
+from devlib.exception import HostError, TargetError, TimeoutError
+from devlib.utils.misc import which, strip_bash_colors, escape_single_quotes, check_output
+from devlib.utils.types import boolean
+
+
+ssh = None
+scp = None
+sshpass = None
+
+
+logger = logging.getLogger('ssh')
+gem5_logger = logging.getLogger('gem5-connection')
+
+def ssh_get_shell(host, username, password=None, keyfile=None, port=None, timeout=10, telnet=False, original_prompt=None):
+    _check_env()
+    start_time = time.time()
+    while True:
+        if telnet:
+            if keyfile:
+                raise ValueError('keyfile may not be used with a telnet connection.')
+            conn = TelnetPxssh(original_prompt=original_prompt)
+        else:  # ssh
+            conn = pxssh.pxssh()
+
+        try:
+            if keyfile:
+                conn.login(host, username, ssh_key=keyfile, port=port, login_timeout=timeout)
+            else:
+                conn.login(host, username, password, port=port, login_timeout=timeout)
+            break
+        except EOF:
+            timeout -= time.time() - start_time
+            if timeout <= 0:
+                message = 'Could not connect to {}; is the host name correct?'
+                raise TargetError(message.format(host))
+            time.sleep(5)
+
+    conn.setwinsize(500,200)
+    conn.sendline('')
+    conn.prompt()
+    conn.setecho(False)
+    return conn
+
+
+class TelnetPxssh(pxssh.pxssh):
+    # pylint: disable=arguments-differ
+
+    def __init__(self, original_prompt):
+        super(TelnetPxssh, self).__init__()
+        self.original_prompt = original_prompt or r'[#$]'
+
+    def login(self, server, username, password='', login_timeout=10,
+              auto_prompt_reset=True, sync_multiplier=1, port=23):
+        args = ['telnet']
+        if username is not None:
+            args += ['-l', username]
+        args += [server, str(port)]
+        cmd = ' '.join(args)
+
+        spawn._spawn(self, cmd)  # pylint: disable=protected-access
+
+        try:
+            i = self.expect('(?i)(?:password)', timeout=login_timeout)
+            if i == 0:
+                self.sendline(password)
+                i = self.expect([self.original_prompt, 'Login incorrect'], timeout=login_timeout)
+            if i:
+                raise pxssh.ExceptionPxssh('could not log in: password was incorrect')
+        except TIMEOUT:
+            if not password:
+                # No password promt before TIMEOUT & no password provided
+                # so assume everything is okay
+                pass
+            else:
+                raise pxssh.ExceptionPxssh('could not log in: did not see a password prompt')
+
+        if not self.sync_original_prompt(sync_multiplier):
+            self.close()
+            raise pxssh.ExceptionPxssh('could not synchronize with original prompt')
+
+        if auto_prompt_reset:
+            if not self.set_unique_prompt():
+                self.close()
+                message = 'could not set shell prompt (recieved: {}, expected: {}).'
+                raise pxssh.ExceptionPxssh(message.format(self.before, self.PROMPT))
+        return True
+
+
+def check_keyfile(keyfile):
+    """
+    keyfile must have the right access premissions in order to be useable. If the specified
+    file doesn't, create a temporary copy and set the right permissions for that.
+
+    Returns either the ``keyfile`` (if the permissions on it are correct) or the path to a
+    temporary copy with the right permissions.
+    """
+    desired_mask = stat.S_IWUSR | stat.S_IRUSR
+    actual_mask = os.stat(keyfile).st_mode & 0xFF
+    if actual_mask != desired_mask:
+        tmp_file = os.path.join(tempfile.gettempdir(), os.path.basename(keyfile))
+        shutil.copy(keyfile, tmp_file)
+        os.chmod(tmp_file, desired_mask)
+        return tmp_file
+    else:  # permissions on keyfile are OK
+        return keyfile
+
+
+class SshConnection(object):
+
+    default_password_prompt = '[sudo] password'
+    max_cancel_attempts = 5
+    default_timeout=10
+
+    @property
+    def name(self):
+        return self.host
+
+    def __init__(self,
+                 host,
+                 username,
+                 password=None,
+                 keyfile=None,
+                 port=None,
+                 timeout=None,
+                 telnet=False,
+                 password_prompt=None,
+                 original_prompt=None,
+                 platform=None
+                 ):
+        self.host = host
+        self.username = username
+        self.password = password
+        self.keyfile = check_keyfile(keyfile) if keyfile else keyfile
+        self.port = port
+        self.lock = threading.Lock()
+        self.password_prompt = password_prompt if password_prompt is not None else self.default_password_prompt
+        logger.debug('Logging in {}@{}'.format(username, host))
+        timeout = timeout if timeout is not None else self.default_timeout
+        self.conn = ssh_get_shell(host, username, password, self.keyfile, port, timeout, False, None)
+
+    def push(self, source, dest, timeout=30):
+        dest = '{}@{}:{}'.format(self.username, self.host, dest)
+        return self._scp(source, dest, timeout)
+
+    def pull(self, source, dest, timeout=30):
+        source = '{}@{}:{}'.format(self.username, self.host, source)
+        return self._scp(source, dest, timeout)
+
+    def execute(self, command, timeout=None, check_exit_code=True,
+                as_root=False, strip_colors=True): #pylint: disable=unused-argument
+        if command == '':
+            # Empty command is valid but the __devlib_ec stuff below will
+            # produce a syntax error with bash. Treat as a special case.
+            return ''
+        try:
+            with self.lock:
+                _command = '({}); __devlib_ec=$?; echo; echo $__devlib_ec'.format(command)
+                raw_output = self._execute_and_wait_for_prompt(
+                    _command, timeout, as_root, strip_colors)
+                output, exit_code_text, _ = raw_output.rsplit('\r\n', 2)
+                if check_exit_code:
+                    try:
+                        exit_code = int(exit_code_text)
+                        if exit_code:
+                            message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'
+                            raise TargetError(message.format(exit_code, command, output))
+                    except (ValueError, IndexError):
+                        logger.warning(
+                            'Could not get exit code for "{}",\ngot: "{}"'\
+                            .format(command, exit_code_text))
+                return output
+        except EOF:
+            raise TargetError('Connection lost.')
+
+    def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
+        try:
+            port_string = '-p {}'.format(self.port) if self.port else ''
+            keyfile_string = '-i {}'.format(self.keyfile) if self.keyfile else ''
+            if as_root:
+                command = "sudo -- sh -c '{}'".format(command)
+            command = '{} {} {} {}@{} {}'.format(ssh, keyfile_string, port_string, self.username, self.host, command)
+            logger.debug(command)
+            if self.password:
+                command = _give_password(self.password, command)
+            return subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=True)
+        except EOF:
+            raise TargetError('Connection lost.')
+
+    def close(self):
+        logger.debug('Logging out {}@{}'.format(self.username, self.host))
+        self.conn.logout()
+
+    def cancel_running_command(self):
+        # simulate impatiently hitting ^C until command prompt appears
+        logger.debug('Sending ^C')
+        for _ in xrange(self.max_cancel_attempts):
+            self.conn.sendline(chr(3))
+            if self.conn.prompt(0.1):
+                return True
+        return False
+
+    def _execute_and_wait_for_prompt(self, command, timeout=None, as_root=False, strip_colors=True, log=True):
+        self.conn.prompt(0.1)  # clear an existing prompt if there is one.
+        if self.username == 'root':
+            # As we're already root, there is no need to use sudo.
+            as_root = False
+        if as_root:
+            command = "sudo -- sh -c '{}'".format(escape_single_quotes(command))
+            if log:
+                logger.debug(command)
+            self.conn.sendline(command)
+            if self.password:
+                index = self.conn.expect_exact([self.password_prompt, TIMEOUT], timeout=0.5)
+                if index == 0:
+                    self.conn.sendline(self.password)
+        else:  # not as_root
+            if log:
+                logger.debug(command)
+            self.conn.sendline(command)
+        timed_out = self._wait_for_prompt(timeout)
+        # the regex removes line breaks potential introduced when writing
+        # command to shell.
+        output = process_backspaces(self.conn.before)
+        output = re.sub(r'\r([^\n])', r'\1', output)
+        if '\r\n' in output: # strip the echoed command
+            output = output.split('\r\n', 1)[1]
+        if timed_out:
+            self.cancel_running_command()
+            raise TimeoutError(command, output)
+        if strip_colors:
+            output = strip_bash_colors(output)
+        return output
+
+    def _wait_for_prompt(self, timeout=None):
+        if timeout:
+            return not self.conn.prompt(timeout)
+        else:  # cannot timeout; wait forever
+            while not self.conn.prompt(1):
+                pass
+            return False
+
+    def _scp(self, source, dest, timeout=30):
+        # NOTE: the version of scp in Ubuntu 12.04 occasionally (and bizarrely)
+        # fails to connect to a device if port is explicitly specified using -P
+        # option, even if it is the default port, 22. To minimize this problem,
+        # only specify -P for scp if the port is *not* the default.
+        port_string = '-P {}'.format(self.port) if (self.port and self.port != 22) else ''
+        keyfile_string = '-i {}'.format(self.keyfile) if self.keyfile else ''
+        command = '{} -r {} {} {} {}'.format(scp, keyfile_string, port_string, source, dest)
+        pass_string = ''
+        logger.debug(command)
+        if self.password:
+            command = _give_password(self.password, command)
+        try:
+            check_output(command, timeout=timeout, shell=True)
+        except subprocess.CalledProcessError as e:
+            raise subprocess.CalledProcessError(e.returncode, e.cmd.replace(pass_string, ''), e.output)
+        except TimeoutError as e:
+            raise TimeoutError(e.command.replace(pass_string, ''), e.output)
+
+
+class TelnetConnection(SshConnection):
+
+    def __init__(self,
+                 host,
+                 username,
+                 password=None,
+                 port=None,
+                 timeout=None,
+                 password_prompt=None,
+                 original_prompt=None,
+                 platform=None):
+        self.host = host
+        self.username = username
+        self.password = password
+        self.port = port
+        self.keyfile = None
+        self.lock = threading.Lock()
+        self.password_prompt = password_prompt if password_prompt is not None else self.default_password_prompt
+        logger.debug('Logging in {}@{}'.format(username, host))
+        timeout = timeout if timeout is not None else self.default_timeout
+        self.conn = ssh_get_shell(host, username, password, None, port, timeout, True, original_prompt)
+
+
+class Gem5Connection(TelnetConnection):
+
+    def __init__(self,
+                 platform,
+                 host=None,
+                 username=None,
+                 password=None,
+                 port=None,
+                 timeout=None,
+                 password_prompt=None,
+                 original_prompt=None,
+                 ):
+        if host is not None:
+            host_system = socket.gethostname()
+            if host_system != host:
+                raise TargetError("Gem5Connection can only connect to gem5 "
+                                   "simulations on your current host, which "
+                                   "differs from the one given {}!"
+                                   .format(host_system, host))
+        if username is not None and username != 'root':
+            raise ValueError('User should be root in gem5!')
+        if password is not None and password != '':
+            raise ValueError('No password needed in gem5!')
+        self.username = 'root'
+        self.is_rooted = True
+        self.password = None
+        self.port = None
+        # Long timeouts to account for gem5 being slow
+        # Can be overriden if the given timeout is longer
+        self.default_timeout = 3600
+        if timeout is not None:
+            if timeout > self.default_timeout:
+                logger.info('Overwriting the default timeout of gem5 ({})'
+                                 ' to {}'.format(self.default_timeout, timeout))
+                self.default_timeout = timeout
+            else:
+                logger.info('Ignoring the given timeout --> gem5 needs longer timeouts')
+        self.ready_timeout = self.default_timeout * 3
+        # Counterpart in gem5_interact_dir
+        self.gem5_input_dir = '/mnt/host/'
+        # Location of m5 binary in the gem5 simulated system
+        self.m5_path = None
+        # Actual telnet connection to gem5 simulation
+        self.conn = None
+        # Flag to indicate the gem5 device is ready to interact with the
+        # outer world
+        self.ready = False
+        # Lock file to prevent multiple connections to same gem5 simulation
+        # (gem5 does not allow this)
+        self.lock_directory = '/tmp/'
+        self.lock_file_name = None # Will be set once connected to gem5
+
+        # These parameters will be set by either the method to connect to the
+        # gem5 platform or directly to the gem5 simulation
+        # Intermediate directory to push things to gem5 using VirtIO
+        self.gem5_interact_dir = None
+        # Directory to store output  from gem5 on the host
+        self.gem5_out_dir = None
+        # Actual gem5 simulation
+        self.gem5simulation = None
+
+        # Connect to gem5
+        if platform:
+            self._connect_gem5_platform(platform)
+
+        # Wait for boot
+        self._wait_for_boot()
+
+        # Mount the virtIO to transfer files in/out gem5 system
+        self._mount_virtio()
+
+    def set_hostinteractdir(self, indir):
+        logger.info('Setting hostinteractdir  from {} to {}'
+                    .format(self.gem5_input_dir, indir))
+        self.gem5_input_dir = indir
+
+    def push(self, source, dest, timeout=None):
+        """
+        Push a file to the gem5 device using VirtIO
+
+        The file to push to the device is copied to the temporary directory on
+        the host, before being copied within the simulation to the destination.
+        Checks, in the form of 'ls' with error code checking, are performed to
+        ensure that the file is copied to the destination.
+        """
+        # First check if the connection is set up to interact with gem5
+        self._check_ready()
+
+        filename = os.path.basename(source)
+        logger.debug("Pushing {} to device.".format(source))
+        logger.debug("gem5interactdir: {}".format(self.gem5_interact_dir))
+        logger.debug("dest: {}".format(dest))
+        logger.debug("filename: {}".format(filename))
+
+        # We need to copy the file to copy to the temporary directory
+        self._move_to_temp_dir(source)
+
+        # Dest in gem5 world is a file rather than directory
+        if os.path.basename(dest) != filename:
+            dest = os.path.join(dest, filename)
+        # Back to the gem5 world
+        self._gem5_shell("ls -al {}{}".format(self.gem5_input_dir, filename))
+        self._gem5_shell("cat '{}''{}' > '{}'".format(self.gem5_input_dir,
+                                                     filename,
+                                                     dest))
+        self._gem5_shell("sync")
+        self._gem5_shell("ls -al {}".format(dest))
+        self._gem5_shell("ls -al {}".format(self.gem5_input_dir))
+        logger.debug("Push complete.")
+
+    def pull(self, source, dest, timeout=0): #pylint: disable=unused-argument
+        """
+        Pull a file from the gem5 device using m5 writefile
+
+        The file is copied to the local directory within the guest as the m5
+        writefile command assumes that the file is local. The file is then
+        written out to the host system using writefile, prior to being moved to
+        the destination on the host.
+        """
+        # First check if the connection is set up to interact with gem5
+        self._check_ready()
+
+        filename = os.path.basename(source)
+
+        logger.debug("pull_file {} {}".format(source, filename))
+        # We don't check the exit code here because it is non-zero if the source
+        # and destination are the same. The ls below will cause an error if the
+        # file was not where we expected it to be.
+        if os.path.dirname(source) != os.getcwd():
+            self._gem5_shell("cat '{}' > '{}'".format(source, filename))
+        self._gem5_shell("sync")
+        self._gem5_shell("ls -la {}".format(filename))
+        logger.debug('Finished the copy in the simulator')
+        self._gem5_util("writefile {}".format(filename))
+
+        if 'cpu' not in filename:
+            while not os.path.exists(os.path.join(self.gem5_out_dir, filename)):
+                time.sleep(1)
+
+        # Perform the local move
+        shutil.move(os.path.join(self.gem5_out_dir, filename), dest)
+        logger.debug("Pull complete.")
+
+    def execute(self, command, timeout=1000, check_exit_code=True,
+                as_root=False, strip_colors=True):
+        """
+        Execute a command on the gem5 platform
+        """
+        # First check if the connection is set up to interact with gem5
+        self._check_ready()
+
+        output = self._gem5_shell(command,
+                                  check_exit_code=check_exit_code,
+                                  as_root=as_root)
+        if strip_colors:
+            output = strip_bash_colors(output)
+        return output
+
+    def background(self, command, stdout=subprocess.PIPE,
+                   stderr=subprocess.PIPE, as_root=False):
+        # First check if the connection is set up to interact with gem5
+        self._check_ready()
+
+        # Create the logfile for stderr/stdout redirection
+        command_name = command.split(' ')[0].split('/')[-1]
+        redirection_file = 'BACKGROUND_{}.log'.format(command_name)
+        trial = 0
+        while os.path.isfile(redirection_file):
+            # Log file already exists so add to name
+           redirection_file = 'BACKGROUND_{}{}.log'.format(command_name, trial)
+           trial += 1
+
+        # Create the command to pass on to gem5 shell
+        complete_command = '{} >> {} 2>&1 &'.format(command, redirection_file)
+        output = self._gem5_shell(complete_command, as_root=as_root)
+        output = strip_bash_colors(output)
+        gem5_logger.info('STDERR/STDOUT of background command will be '
+                         'redirected to {}. Use target.pull() to '
+                         'get this file'.format(redirection_file))
+        return output
+
+    def close(self):
+        """
+        Close and disconnect from the gem5 simulation. Additionally, we remove
+        the temporary directory used to pass files into the simulation.
+        """
+        gem5_logger.info("Gracefully terminating the gem5 simulation.")
+        try:
+            self._gem5_util("exit")
+            self.gem5simulation.wait()
+        except EOF:
+            pass
+        gem5_logger.info("Removing the temporary directory")
+        try:
+            shutil.rmtree(self.gem5_interact_dir)
+        except OSError:
+            gem5_logger.warn("Failed to remove the temporary directory!")
+
+        # Delete the lock file
+        os.remove(self.lock_file_name)
+
+    # Functions only to be called by the Gem5 connection itself
+    def _connect_gem5_platform(self, platform):
+        port = platform.gem5_port
+        gem5_simulation = platform.gem5
+        gem5_interact_dir = platform.gem5_interact_dir
+        gem5_out_dir = platform.gem5_out_dir
+
+        self.connect_gem5(port, gem5_simulation, gem5_interact_dir, gem5_out_dir)
+
+    # This function connects to the gem5 simulation
+    def connect_gem5(self, port, gem5_simulation, gem5_interact_dir,
+                      gem5_out_dir):
+        """
+        Connect to the telnet port of the gem5 simulation.
+
+        We connect, and wait for the prompt to be found. We do not use a timeout
+        for this, and wait for the prompt in a while loop as the gem5 simulation
+        can take many hours to reach a prompt when booting the system. We also
+        inject some newlines periodically to try and force gem5 to show a
+        prompt. Once the prompt has been found, we replace it with a unique
+        prompt to ensure that we are able to match it properly. We also disable
+        the echo as this simplifies parsing the output when executing commands
+        on the device.
+        """
+        host = socket.gethostname()
+        gem5_logger.info("Connecting to the gem5 simulation on port {}".format(port))
+
+        # Check if there is no on-going connection yet
+        lock_file_name = '{}{}_{}.LOCK'.format(self.lock_directory, host, port)
+        if os.path.isfile(lock_file_name):
+            # There is already a connection to this gem5 simulation
+            raise TargetError('There is already a connection to the gem5 '
+                              'simulation using port {} on {}!'
+                              .format(port, host))
+
+        # Connect to the gem5 telnet port. Use a short timeout here.
+        attempts = 0
+        while attempts < 10:
+            attempts += 1
+            try:
+                self.conn = TelnetPxssh(original_prompt=None)
+                self.conn.login(host, self.username, port=port,
+                                login_timeout=10, auto_prompt_reset=False)
+                break
+            except pxssh.ExceptionPxssh:
+                pass
+        else:
+            gem5_simulation.kill()
+            raise TargetError("Failed to connect to the gem5 telnet session.")
+
+        gem5_logger.info("Connected! Waiting for prompt...")
+
+        # Create the lock file
+        self.lock_file_name = lock_file_name
+        open(self.lock_file_name, 'w').close() # Similar to touch
+        gem5_logger.info("Created lock file {} to prevent reconnecting to "
+                         "same simulation".format(self.lock_file_name))
+
+        # We need to find the prompt. It might be different if we are resuming
+        # from a checkpoint. Therefore, we test multiple options here.
+        prompt_found = False
+        while not prompt_found:
+            try:
+                self._login_to_device()
+            except TIMEOUT:
+                pass
+            try:
+                # Try and force a prompt to be shown
+                self.conn.send('\n')
+                self.conn.expect([r'# ', self.conn.UNIQUE_PROMPT, r'\[PEXPECT\][\\\$\#]+ '], timeout=60)
+                prompt_found = True
+            except TIMEOUT:
+                pass
+
+        gem5_logger.info("Successfully logged in")
+        gem5_logger.info("Setting unique prompt...")
+
+        self.conn.set_unique_prompt()
+        self.conn.prompt()
+        gem5_logger.info("Prompt found and replaced with a unique string")
+
+        # We check that the prompt is what we think it should be. If not, we
+        # need to update the regex we use to match.
+        self._find_prompt()
+
+        self.conn.setecho(False)
+        self._sync_gem5_shell()
+
+        # Fully connected to gem5 simulation
+        self.gem5_interact_dir = gem5_interact_dir
+        self.gem5_out_dir = gem5_out_dir
+        self.gem5simulation = gem5_simulation
+
+        # Ready for interaction now
+        self.ready = True
+
+    def _login_to_device(self):
+        """
+        Login to device, will be overwritten if there is an actual login
+        """
+        pass
+
+    def _find_prompt(self):
+        prompt = r'\[PEXPECT\][\\\$\#]+ '
+        synced = False
+        while not synced:
+            self.conn.send('\n')
+            i = self.conn.expect([prompt, self.conn.UNIQUE_PROMPT, r'[\$\#] '], timeout=self.default_timeout)
+            if i == 0:
+                synced = True
+            elif i == 1:
+                prompt = self.conn.UNIQUE_PROMPT
+                synced = True
+            else:
+                prompt = re.sub(r'\$', r'\\\$', self.conn.before.strip() + self.conn.after.strip())
+                prompt = re.sub(r'\#', r'\\\#', prompt)
+                prompt = re.sub(r'\[', r'\[', prompt)
+                prompt = re.sub(r'\]', r'\]', prompt)
+
+        self.conn.PROMPT = prompt
+
+    def _sync_gem5_shell(self):
+        """
+        Synchronise with the gem5 shell.
+
+        Write some unique text to the gem5 device to allow us to synchronise
+        with the shell output. We actually get two prompts so we need to match
+        both of these.
+        """
+        gem5_logger.debug("Sending Sync")
+        self.conn.send("echo \*\*sync\*\*\n")
+        self.conn.expect(r"\*\*sync\*\*", timeout=self.default_timeout)
+        self.conn.expect([self.conn.UNIQUE_PROMPT, self.conn.PROMPT], timeout=self.default_timeout)
+        self.conn.expect([self.conn.UNIQUE_PROMPT, self.conn.PROMPT], timeout=self.default_timeout)
+
+    def _gem5_util(self, command):
+        """ Execute a gem5 utility command using the m5 binary on the device """
+        if self.m5_path is None:
+            raise TargetError('Path to m5 binary on simulated system  is not set!')
+        self._gem5_shell('{} {}'.format(self.m5_path, command))
+
+    def _gem5_shell(self, command, as_root=False, timeout=None, check_exit_code=True, sync=True):  # pylint: disable=R0912
+        """
+        Execute a command in the gem5 shell
+
+        This wraps the telnet connection to gem5 and processes the raw output.
+
+        This method waits for the shell to return, and then will try and
+        separate the output from the command from the command itself. If this
+        fails, warn, but continue with the potentially wrong output.
+
+        The exit code is also checked by default, and non-zero exit codes will
+        raise a TargetError.
+        """
+        if sync:
+            self._sync_gem5_shell()
+
+        gem5_logger.debug("gem5_shell command: {}".format(command))
+
+        # Send the actual command
+        self.conn.send("{}\n".format(command))
+
+        # Wait for the response. We just sit here and wait for the prompt to
+        # appear, as gem5 might take a long time to provide the output. This
+        # avoids timeout issues.
+        command_index = -1
+        while command_index == -1:
+            if self.conn.prompt():
+                output = re.sub(r' \r([^\n])', r'\1', self.conn.before)
+                output = re.sub(r'[\b]', r'', output)
+                # Deal with line wrapping
+                output = re.sub(r'[\r].+?<', r'', output)
+                command_index = output.find(command)
+
+                # If we have -1, then we cannot match the command, but the
+                # prompt has returned. Hence, we have a bit of an issue. We
+                # warn, and return the whole output.
+                if command_index == -1:
+                    gem5_logger.warn("gem5_shell: Unable to match command in "
+                                     "command output. Expect parsing errors!")
+                    command_index = 0
+
+        output = output[command_index + len(command):].strip()
+
+        # It is possible that gem5 will echo the command. Therefore, we need to
+        # remove that too!
+        command_index = output.find(command)
+        if command_index != -1:
+            output = output[command_index + len(command):].strip()
+
+        gem5_logger.debug("gem5_shell output: {}".format(output))
+
+        # We get a second prompt. Hence, we need to eat one to make sure that we
+        # stay in sync. If we do not do this, we risk getting out of sync for
+        # slower simulations.
+        self.conn.expect([self.conn.UNIQUE_PROMPT, self.conn.PROMPT], timeout=self.default_timeout)
+
+        if check_exit_code:
+            exit_code_text = self._gem5_shell('echo $?', as_root=as_root,
+                                             timeout=timeout, check_exit_code=False,
+                                             sync=False)
+            try:
+                exit_code = int(exit_code_text.split()[0])
+                if exit_code:
+                    message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'
+                    raise TargetError(message.format(exit_code, command, output))
+            except (ValueError, IndexError):
+                gem5_logger.warning('Could not get exit code for "{}",\ngot: "{}"'.format(command, exit_code_text))
+
+        return output
+
+    def _mount_virtio(self):
+        """
+        Mount the VirtIO device in the simulated system.
+        """
+        gem5_logger.info("Mounting VirtIO device in simulated system")
+
+        self._gem5_shell('su -c "mkdir -p {}" root'.format(self.gem5_input_dir))
+        mount_command = "mount -t 9p -o trans=virtio,version=9p2000.L,aname={} gem5 {}".format(self.gem5_interact_dir, self.gem5_input_dir)
+        self._gem5_shell(mount_command)
+
+    def _move_to_temp_dir(self, source):
+        """
+        Move a file to the temporary directory on the host for copying to the
+        gem5 device
+        """
+        command = "cp {} {}".format(source, self.gem5_interact_dir)
+        gem5_logger.debug("Local copy command: {}".format(command))
+        subprocess.call(command.split())
+        subprocess.call("sync".split())
+
+    def _check_ready(self):
+        """
+        Check if the gem5 platform is ready
+        """
+        if not self.ready:
+            raise TargetError('Gem5 is not ready to interact yet')
+
+    def _wait_for_boot(self):
+        pass
+
+    def _probe_file(self, filepath):
+        """
+        Internal method to check if the target has a certain file
+        """
+        command = 'if [ -e \'{}\' ]; then echo 1; else echo 0; fi'
+        output = self.execute(command.format(filepath), as_root=self.is_rooted)
+        return boolean(output.strip())
+
+
+class LinuxGem5Connection(Gem5Connection):
+
+    def _login_to_device(self):
+        gem5_logger.info("Trying to log in to gem5 device")
+        login_prompt = ['login:', 'AEL login:', 'username:', 'aarch64-gem5 login:']
+        login_password_prompt = ['password:']
+        # Wait for the login prompt
+        prompt = login_prompt + [self.conn.UNIQUE_PROMPT]
+        i = self.conn.expect(prompt, timeout=10)
+        # Check if we are already at a prompt, or if we need to log in.
+        if i < len(prompt) - 1:
+            self.conn.sendline("{}".format(self.username))
+            password_prompt = login_password_prompt + [r'# ', self.conn.UNIQUE_PROMPT]
+            j = self.conn.expect(password_prompt, timeout=self.default_timeout)
+            if j < len(password_prompt) - 2:
+                self.conn.sendline("{}".format(self.password))
+                self.conn.expect([r'# ', self.conn.UNIQUE_PROMPT], timeout=self.default_timeout)
+
+
+
+class AndroidGem5Connection(Gem5Connection):
+
+    def _wait_for_boot(self):
+        """
+        Wait for the system to boot
+
+        We monitor the sys.boot_completed and service.bootanim.exit system
+        properties to determine when the system has finished booting. In the
+        event that we cannot coerce the result of service.bootanim.exit to an
+        integer, we assume that the boot animation was disabled and do not wait
+        for it to finish.
+
+        """
+        gem5_logger.info("Waiting for Android to boot...")
+        while True:
+            booted = False
+            anim_finished = True  # Assume boot animation was disabled on except
+            try:
+                booted = (int('0' + self._gem5_shell('getprop sys.boot_completed', check_exit_code=False).strip()) == 1)
+                anim_finished = (int(self._gem5_shell('getprop service.bootanim.exit', check_exit_code=False).strip()) == 1)
+            except ValueError:
+                pass
+            if booted and anim_finished:
+                break
+            time.sleep(60)
+
+        gem5_logger.info("Android booted")
+
+def _give_password(password, command):
+    if not sshpass:
+        raise HostError('Must have sshpass installed on the host in order to use password-based auth.')
+    pass_string = "sshpass -p '{}' ".format(password)
+    return pass_string + command
+
+
+def _check_env():
+    global ssh, scp, sshpass  # pylint: disable=global-statement
+    if not ssh:
+        ssh = which('ssh')
+        scp = which('scp')
+        sshpass = which('sshpass')
+    if not (ssh and scp):
+        raise HostError('OpenSSH must be installed on the host.')
+
+
+def process_backspaces(text):
+    chars = []
+    for c in text:
+        if c == chr(8) and chars:  # backspace
+            chars.pop()
+        else:
+            chars.append(c)
+    return ''.join(chars)
diff --git a/devlib/utils/types.py b/devlib/utils/types.py
new file mode 100644
index 0000000..be30bfc
--- /dev/null
+++ b/devlib/utils/types.py
@@ -0,0 +1,113 @@
+#    Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+Routines for doing various type conversions. These usually embody some higher-level
+semantics than are present in standard Python types (e.g. ``boolean`` will convert the
+string ``"false"`` to ``False``, where as non-empty strings are usually considered to be
+``True``).
+
+A lot of these are intened to stpecify type conversions declaratively in place like
+``Parameter``'s ``kind`` argument. These are basically "hacks" around the fact that Python
+is not the best language to use for configuration.
+
+"""
+import math
+
+from devlib.utils.misc import isiterable, to_identifier, ranges_to_list, list_to_mask
+
+
+def identifier(text):
+    """Converts text to a valid Python identifier by replacing all
+    whitespace and punctuation."""
+    return to_identifier(text)
+
+
+def boolean(value):
+    """
+    Returns bool represented by the value. This is different from
+    calling the builtin bool() in that it will interpret string representations.
+    e.g. boolean('0') and boolean('false') will both yield False.
+
+    """
+    false_strings = ['', '0', 'n', 'no', 'off']
+    if isinstance(value, basestring):
+        value = value.lower()
+        if value in false_strings or 'false'.startswith(value):
+            return False
+    return bool(value)
+
+
+def integer(value):
+    """Handles conversions for string respresentations of binary, octal and hex."""
+    if isinstance(value, basestring):
+        return int(value, 0)
+    else:
+        return int(value)
+
+
+def numeric(value):
+    """
+    Returns the value as number (int if possible, or float otherwise), or
+    raises ``ValueError`` if the specified ``value`` does not have a straight
+    forward numeric conversion.
+
+    """
+    if isinstance(value, int):
+        return value
+    try:
+        fvalue = float(value)
+    except ValueError:
+        raise ValueError('Not numeric: {}'.format(value))
+    if not math.isnan(fvalue) and not math.isinf(fvalue):
+        ivalue = int(fvalue)
+        if ivalue == fvalue:  # yeah, yeah, I know. Whatever. This is best-effort.
+            return ivalue
+    return fvalue
+
+
+class caseless_string(str):
+    """
+    Just like built-in Python string except case-insensitive on comparisons. However, the
+    case is preserved otherwise.
+
+    """
+
+    def __eq__(self, other):
+        if isinstance(other, basestring):
+            other = other.lower()
+        return self.lower() == other
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __cmp__(self, other):
+        if isinstance(basestring, other):
+            other = other.lower()
+        return cmp(self.lower(), other)
+
+    def format(self, *args, **kwargs):
+        return caseless_string(super(caseless_string, self).format(*args, **kwargs))
+
+
+def bitmask(value):
+    if isinstance(value, basestring):
+        value = ranges_to_list(value)
+    if isiterable(value):
+        value = list_to_mask(value)
+    if not isinstance(value, int):
+        raise ValueError(value)
+    return value
diff --git a/devlib/utils/uboot.py b/devlib/utils/uboot.py
new file mode 100644
index 0000000..f85ece3
--- /dev/null
+++ b/devlib/utils/uboot.py
@@ -0,0 +1,116 @@
+#
+#    Copyright 2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import re
+import time
+import logging
+
+from devlib.utils.serial_port import TIMEOUT
+
+
+logger = logging.getLogger('U-Boot')
+
+
+class UbootMenu(object):
+    """
+    Allows navigating Das U-boot menu over serial (it relies on a pexpect connection).
+
+    """
+
+    option_regex = re.compile(r'^\[(\d+)\]\s+([^\r]+)\r\n', re.M)
+    prompt_regex = re.compile(r'^([^\r\n]+):\s*', re.M)
+    invalid_regex = re.compile(r'Invalid input \(max (\d+)\)', re.M)
+
+    load_delay = 1  # seconds
+    default_timeout = 60  # seconds
+
+    def __init__(self, conn, start_prompt='Hit any key to stop autoboot'):
+        """
+        :param conn: A serial connection as returned by ``pexect.spawn()``.
+        :param prompt: U-Boot menu prompt
+        :param start_prompt: The starting prompt to wait for during ``open()``.
+
+        """
+        self.conn = conn
+        self.conn.crlf = '\n\r'  # TODO: this has *got* to be a bug in U-Boot...
+        self.start_prompt = start_prompt
+        self.options = {}
+        self.prompt = None
+
+    def open(self, timeout=default_timeout):
+        """
+        "Open" the UEFI menu by sending an interrupt on STDIN after seeing the
+        starting prompt (configurable upon creation of the ``UefiMenu`` object.
+
+        """
+        self.conn.expect(self.start_prompt, timeout)
+        self.conn.sendline('')
+        time.sleep(self.load_delay)
+        self.conn.readline()  # garbage
+        self.conn.sendline('')
+        self.prompt = self.conn.readline().strip()
+
+    def getenv(self):
+        output = self.enter('printenv')
+        result = {}
+        for line in output.split('\n'):
+            if '=' in line:
+                variable, value = line.split('=', 1)
+                result[variable.strip()] = value.strip()
+        return result
+
+    def setenv(self, variable, value, force=False):
+        force_str = ' -f' if force else ''
+        if value is not None:
+            command = 'setenv{} {} {}'.format(force_str, variable, value)
+        else:
+            command = 'setenv{} {}'.format(force_str, variable)
+        return self.enter(command)
+
+    def boot(self):
+        self.write_characters('boot')
+
+    def nudge(self):
+        """Send a little nudge to ensure there is something to read. This is useful when you're not
+        sure if all out put from the serial has been read already."""
+        self.enter('')
+
+    def enter(self, value, delay=load_delay):
+        """Like ``select()`` except no resolution is performed -- the value is sent directly
+        to the serial connection."""
+        # Empty the buffer first, so that only response to the input about to
+        # be sent will be processed by subsequent commands.
+        value = str(value)
+        self.empty_buffer()
+        self.write_characters(value)
+        self.conn.expect(self.prompt, timeout=delay)
+        return self.conn.before
+
+    def write_characters(self, line):
+        line = line.rstrip('\r\n')
+        for c in line:
+            self.conn.send(c)
+            time.sleep(0.05)
+        self.conn.sendline('')
+
+    def empty_buffer(self):
+        try:
+            while True:
+                time.sleep(0.1)
+                self.conn.read_nonblocking(size=1024, timeout=0.1)
+        except TIMEOUT:
+            pass
+        self.conn.buffer = ''
+
diff --git a/devlib/utils/uefi.py b/devlib/utils/uefi.py
new file mode 100644
index 0000000..08d10d9
--- /dev/null
+++ b/devlib/utils/uefi.py
@@ -0,0 +1,239 @@
+#    Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import re
+import time
+import logging
+from copy import copy
+
+from devlib.utils.serial_port import write_characters, TIMEOUT
+from devlib.utils.types import boolean
+
+
+logger = logging.getLogger('UEFI')
+
+
+class UefiConfig(object):
+
+    def __init__(self, config_dict):
+        if isinstance(config_dict, UefiConfig):
+            self.__dict__ = copy(config_dict.__dict__)
+        else:
+            try:
+                self.image_name = config_dict['image_name']
+                self.image_args = config_dict['image_args']
+                self.fdt_support = boolean(config_dict['fdt_support'])
+            except KeyError as e:
+                raise ValueError('Missing mandatory parameter for UEFI entry config: "{}"'.format(e))
+            self.initrd = config_dict.get('initrd')
+            self.fdt_path = config_dict.get('fdt_path')
+            if self.fdt_path and not self.fdt_support:
+                raise ValueError('FDT path has been specfied for UEFI entry, when FDT support is "False"')
+
+
+class UefiMenu(object):
+    """
+    Allows navigating UEFI menu over serial (it relies on a pexpect connection).
+
+    """
+
+    option_regex = re.compile(r'^\[(\d+)\]\s+([^\r]+)\r\n', re.M)
+    prompt_regex = re.compile(r'^(\S[^\r\n]+):\s*', re.M)
+    invalid_regex = re.compile(r'Invalid input \(max (\d+)\)', re.M)
+
+    load_delay = 1  # seconds
+    default_timeout = 60  # seconds
+
+    def __init__(self, conn, prompt='The default boot selection will start in'):
+        """
+        :param conn: A serial connection as returned by ``pexect.spawn()``.
+        :param prompt: The starting prompt to wait for during ``open()``.
+
+        """
+        self.conn = conn
+        self.start_prompt = prompt
+        self.options = {}
+        self.prompt = None
+        self.attempting_invalid_retry = False
+
+    def wait(self, timeout=default_timeout):
+        """
+        "Open" the UEFI menu by sending an interrupt on STDIN after seeing the
+        starting prompt (configurable upon creation of the ``UefiMenu`` object.
+
+        """
+        self.conn.expect(self.start_prompt, timeout)
+        self.connect()
+
+    def connect(self, timeout=default_timeout):
+        self.nudge()
+        time.sleep(self.load_delay)
+        self.read_menu(timeout=timeout)
+
+    def create_entry(self, name, config):
+        """Create a new UEFI entry using the parameters. The menu is assumed
+        to be at the top level. Upon return, the menu will be at the top level."""
+        logger.debug('Creating UEFI entry {}'.format(name))
+        self.nudge()
+        self.select('Boot Manager')
+        self.select('Add Boot Device Entry')
+        self.select('NOR Flash')
+        self.enter(config.image_name)
+        self.enter('y' if config.fdt_support else 'n')
+        if config.initrd:
+            self.enter('y')
+            self.enter(config.initrd)
+        else:
+            self.enter('n')
+        self.enter(config.image_args)
+        self.enter(name)
+
+        if config.fdt_path:
+            self.select('Update FDT path')
+            self.enter(config.fdt_path)
+
+        self.select('Return to main menu')
+
+    def delete_entry(self, name):
+        """Delete the specified UEFI entry. The menu is assumed
+        to be at the top level. Upon return, the menu will be at the top level."""
+        logger.debug('Removing UEFI entry {}'.format(name))
+        self.nudge()
+        self.select('Boot Manager')
+        self.select('Remove Boot Device Entry')
+        self.select(name)
+        self.select('Return to main menu')
+
+    def select(self, option, timeout=default_timeout):
+        """
+        Select the specified option from the current menu.
+
+        :param option: Could be an ``int`` index of the option, or a string/regex to
+                       match option text against.
+        :param timeout: If a non-``int`` option is specified, the option list may need
+                        need to be parsed (if it hasn't been already), this may block
+                        and the timeout is used to cap that , resulting in a ``TIMEOUT``
+                        exception.
+        :param delay: A fixed delay to wait after sending the input to the serial connection.
+                      This should be set if input this action is known to result in a
+                      long-running operation.
+
+        """
+        if isinstance(option, basestring):
+            option = self.get_option_index(option, timeout)
+        self.enter(option)
+
+    def enter(self, value, delay=load_delay):
+        """Like ``select()`` except no resolution is performed -- the value is sent directly
+        to the serial connection."""
+        # Empty the buffer first, so that only response to the input about to
+        # be sent will be processed by subsequent commands.
+        value = str(value)
+        self._reset()
+        write_characters(self.conn, value)
+        # TODO: in case the value is long an complicated, things may get
+        # screwed up (e.g. there may be line breaks injected), additionally,
+        # special chars might cause regex to fail. To avoid these issues i'm
+        # only matching against the first 5 chars of the value. This is
+        # entirely arbitrary and I'll probably have to find a better way of
+        # doing this at some point.
+        self.conn.expect(value[:5], timeout=delay)
+        time.sleep(self.load_delay)
+
+    def read_menu(self, timeout=default_timeout):
+        """Parse serial output to get the menu options and the following prompt."""
+        attempting_timeout_retry = False
+        self.attempting_invalid_retry = False
+        while True:
+            index = self.conn.expect([self.option_regex, self.prompt_regex, self.invalid_regex, TIMEOUT],
+                                     timeout=timeout)
+            match = self.conn.match
+            if index == 0:  # matched menu option
+                self.options[match.group(1)] = match.group(2)
+            elif index == 1:  # matched prompt
+                self.prompt = match.group(1)
+                break
+            elif index == 2:  # matched invalid selection
+                # We've sent an invalid input (which includes an empty line) at
+                # the top-level menu. To get back the menu options, it seems we
+                # need to enter what the error reports as the max + 1, so...
+                if not self.attempting_invalid_retry:
+                    self.attempting_invalid_retry = True
+                    val = int(match.group(1))
+                    self.empty_buffer()
+                    self.enter(val)
+                    self.select('Return to main menu')
+                    self.attempting_invalid_retry = False
+                else:   # OK, that didn't work; panic!
+                    raise RuntimeError('Could not read menu entries stuck on "{}" prompt'.format(self.prompt))
+            elif index == 3:  # timed out
+                if not attempting_timeout_retry:
+                    attempting_timeout_retry = True
+                    self.nudge()
+                else:  # Didn't help. Run away!
+                    raise RuntimeError('Did not see a valid UEFI menu.')
+            else:
+                raise AssertionError('Unexpected response waiting for UEFI menu')  # should never get here
+
+    def list_options(self, timeout=default_timeout):
+        """Returns the menu index of the specified option text (uses regex matching). If the option
+        is not in the current menu, ``LookupError`` will be raised."""
+        if not self.prompt:
+            self.read_menu(timeout)
+        return self.options.items()
+
+    def get_option_index(self, text, timeout=default_timeout):
+        """Returns the menu index of the specified option text (uses regex matching). If the option
+        is not in the current menu, ``LookupError`` will be raised."""
+        if not self.prompt:
+            self.read_menu(timeout)
+        for k, v in self.options.iteritems():
+            if re.search(text, v):
+                return k
+        raise LookupError(text)
+
+    def has_option(self, text, timeout=default_timeout):
+        """Returns ``True`` if at least one of the options in the current menu has
+        matched (using regex) the specified text."""
+        try:
+            self.get_option_index(text, timeout)
+            return True
+        except LookupError:
+            return False
+
+    def nudge(self):
+        """Send a little nudge to ensure there is something to read. This is useful when you're not
+        sure if all out put from the serial has been read already."""
+        self.enter('')
+
+    def empty_buffer(self):
+        """Read everything from the serial and clear the internal pexpect buffer. This ensures
+        that the next ``expect()`` call will time out (unless further input will be sent to the
+        serial beforehand. This is used to create a "known" state and avoid unexpected matches."""
+        try:
+            while True:
+                time.sleep(0.1)
+                self.conn.read_nonblocking(size=1024, timeout=0.1)
+        except TIMEOUT:
+            pass
+        self.conn.buffer = ''
+
+    def _reset(self):
+        self.options = {}
+        self.prompt = None
+        self.empty_buffer()
+
+
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 0000000..24b22ac
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,192 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  applehelp  to make an Apple Help Book"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  xml        to make Docutils-native XML files"
+	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+	@echo "  coverage   to run coverage check of the documentation (if enabled)"
+
+clean:
+	rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/devlib.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/devlib.qhc"
+
+applehelp:
+	$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
+	@echo
+	@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
+	@echo "N.B. You won't be able to view it unless you put it in" \
+	      "~/Library/Documentation/Help or install it in your application" \
+	      "bundle."
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/devlib"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/devlib"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through platex and dvipdfmx..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
+
+coverage:
+	$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
+	@echo "Testing of coverage in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/coverage/python.txt."
+
+xml:
+	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+	@echo
+	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+	@echo
+	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/doc/conf.py b/doc/conf.py
new file mode 100644
index 0000000..3212c4d
--- /dev/null
+++ b/doc/conf.py
@@ -0,0 +1,287 @@
+# -*- coding: utf-8 -*-
+#
+# devlib documentation build configuration file, created by
+# sphinx-quickstart on Tue Aug 11 17:37:27 2015.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+import shlex
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'sphinx.ext.autodoc',
+    'sphinx.ext.viewcode',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['static/templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'devlib'
+copyright = u'2015, ARM Limited'
+author = u'ARM Limited'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '0.1'
+# The full version, including alpha/beta/rc tags.
+release = '0.1'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['../build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'sphinx_rtd_theme'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+#   'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+#   'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
+#html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+#html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'devlibdoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+
+# Latex figure (float) alignment
+#'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+  (master_doc, 'devlib.tex', u'devlib Documentation',
+   u'ARM Limited', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    (master_doc, 'devlib', u'devlib Documentation',
+     [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+  (master_doc, 'devlib', u'devlib Documentation',
+   author, 'devlib', 'One line description of project.',
+   'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
diff --git a/doc/connection.rst b/doc/connection.rst
new file mode 100644
index 0000000..1d8f098
--- /dev/null
+++ b/doc/connection.rst
@@ -0,0 +1,240 @@
+Connection
+==========
+
+A :class:`Connection` abstracts an actual physical connection to a device. The
+first connection is created when :func:`Target.connect` method is called. If a
+:class:`Target` is used in a multi-threaded environment, it will maintain a
+connection for each thread in which it is invoked. This allows the same target
+object to be used in parallel in multiple threads.
+
+:class:`Connection`\ s will be automatically created and managed by
+:class:`Target`\ s, so there is usually no reason to create one manually.
+Instead, configuration for a :class:`Connection` is passed as
+`connection_settings` parameter when creating a :class:`Target`. The connection
+to be used target is also specified on instantiation by `conn_cls` parameter,
+though all concrete :class:`Target` implementations will set an appropriate
+default, so there is typically no need to specify this explicitly.
+
+:class:`Connection` classes are not a part of an inheritance hierarchy, i.e.
+they do not derive from a common base. Instead, a :class:`Connection` is any
+class that implements the following methods.
+
+
+.. method:: push(self, source, dest, timeout=None)
+
+   Transfer a file from the host machine to the connected device.
+
+   :param source: path of to the file on the host
+   :param dest: path of to the file on the connected device.
+   :param timeout: timeout (in seconds) for the transfer; if the transfer does
+       not  complete within this period, an exception will be raised.
+
+.. method:: pull(self, source, dest, timeout=None)
+
+   Transfer a file, or files matching a glob pattern, from the connected device
+   to the host machine.
+
+   :param source: path of to the file on the connected device. If ``dest`` is a
+       directory, may be a glob pattern.
+   :param dest: path of to the file on the host
+   :param timeout: timeout (in seconds) for the transfer; if the transfer does
+       not  complete within this period, an exception will be raised.
+
+.. method:: execute(self, command, timeout=None, check_exit_code=False, as_root=False)
+
+   Execute the specified command on the connected device and return its output.
+
+   :param command: The command to be executed.
+   :param timeout: Timeout (in seconds) for the execution of the command. If
+       specified, an exception will be raised if execution does not complete
+       with the specified period.
+   :param check_exit_code: If ``True`` the exit code (on connected device)
+       from execution of the command will be checked, and an exception will be
+       raised if it is not ``0``.
+   :param as_root: The command will be executed as root. This will fail on
+       unrooted connected devices.
+
+.. method:: background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False)
+
+   Execute the command on the connected device, invoking it via subprocess on the host.
+   This will return :class:`subprocess.Popen` instance for the command.
+
+   :param command: The command to be executed.
+   :param stdout: By default, standard output will be piped from the subprocess;
+      this may be used to redirect it to an alternative file handle.
+   :param stderr: By default, standard error will be piped from the subprocess;
+      this may be used to redirect it to an alternative file handle.
+   :param as_root: The command will be executed as root. This will fail on
+       unrooted connected devices.
+
+   .. note:: This **will block the connection** until the command completes.
+
+.. note:: The above methods are directly wrapped by :class:`Target` methods,
+          however note that some of the defaults are different.
+
+.. method:: cancel_running_command(self)
+
+   Cancel a running command (previously started with :func:`background`) and free up the connection.
+   It is valid to call this if the command has already terminated (or if no
+   command was issued), in which case this is a no-op.
+
+.. method:: close(self)
+
+   Close the connection to the device. The :class:`Connection` object should not
+   be used after this method is called. There is no way to reopen a previously
+   closed connection, a new connection object should be created instead.
+
+.. note:: There is no :func:`open` method, as the connection is assumed to be
+          opened on instantiation.
+
+
+.. _connection-types:
+
+Connection Types
+----------------
+
+.. class:: AdbConnection(device=None, timeout=None)
+
+    A connection to an android device via ``adb`` (Android Debug Bridge).
+    ``adb`` is part of the Android SDK (though stand-alone versions are also
+    available).
+
+    :param device: The name of the adb divice. This is usually a unique hex
+                   string for USB-connected devices, or an ip address/port
+                   combination. To see connected devices, you can run ``adb
+                   devices`` on the host.
+    :param timeout: Connection timeout in seconds. If a connection to the device
+                    is not esblished within this period, :class:`HostError`
+                    is raised.
+
+
+.. class:: SshConnection(host, username, password=None, keyfile=None, port=None,\
+                         timeout=None, password_prompt=None)
+
+    A connectioned to a device on the network over SSH.
+
+    :param host: SSH host to which to connect
+    :param username: username for SSH login
+    :param password: password for the SSH connection
+
+                     .. note:: In order to user password-based authentication,
+                               ``sshpass`` utility must be installed on the
+                               system.
+
+    :param keyfile: Path to the SSH private key to be used for the connection.
+
+                    .. note:: ``keyfile`` and ``password`` can't be specified
+                              at the same time.
+
+    :param port: TCP port on which SSH server is litening on the remoted device.
+                 Omit to use the default port.
+    :param timeout: Timeout for the connection in seconds. If a connection
+                    cannot be established within this time, an error will be
+                    raised.
+    :param password_prompt: A string with the password prompt used by
+                            ``sshpass``. Set this if your version of ``sshpass``
+                            uses somethin other than ``"[sudo] password"``.
+
+
+.. class:: TelnetConnection(host, username, password=None, port=None,\
+                            timeout=None, password_prompt=None,\
+                            original_prompt=None)
+
+    A connectioned to a device on the network over Telenet.
+
+    .. note:: Since Telenet protocol is does not support file transfer, scp is
+              used for that purpose.
+
+    :param host: SSH host to which to connect
+    :param username: username for SSH login
+    :param password: password for the SSH connection
+
+                     .. note:: In order to user password-based authentication,
+                               ``sshpass`` utility must be installed on the
+                               system.
+
+    :param port: TCP port on which SSH server is litening on the remoted device.
+                 Omit to use the default port.
+    :param timeout: Timeout for the connection in seconds. If a connection
+                    cannot be established within this time, an error will be
+                    raised.
+    :param password_prompt: A string with the password prompt used by
+                            ``sshpass``. Set this if your version of ``sshpass``
+                            uses somethin other than ``"[sudo] password"``.
+    :param original_prompt: A regex for the shell prompted presented in the Telenet
+                            connection (the prompt will be reset to a
+                            randomly-generated pattern for the duration of the
+                            connection to reduce the possibility of clashes).
+                            This paramer is ignored for SSH connections.
+
+
+.. class:: LocalConnection(keep_password=True, unrooted=False, password=None)
+
+    A connection to the local host allowing it to be treated as a Target.
+
+
+    :param keep_password: If this is ``True`` (the default) user's password will
+                          be cached in memory after it is first requested.
+    :param unrooted: If set to ``True``, the platform will be assumed to be
+                     unrooted without testing for root. This is useful to avoid
+                     blocking on password request in scripts.
+    :param password: Specify password on connection creation rather than
+                     prompting for it.
+
+
+.. class:: Gem5Connection(platform, host=None, username=None, password=None,\
+                          timeout=None, password_prompt=None,\
+                          original_prompt=None)
+
+    A connection to a gem5 simulation using a local Telnet connection.
+
+    .. note:: Some of the following input parameters are optional and will be ignored during
+              initialisation. They were kept to keep the anology with a :class:`TelnetConnection`
+              (i.e. ``host``, `username``, ``password``, ``port``,
+              ``password_prompt`` and ``original_promp``)
+
+
+    :param host: Host on which the gem5 simulation is running
+
+                     .. note:: Even thought the input parameter for the ``host``
+                               will be ignored, the gem5 simulation needs to on
+                               the same host as the user as the user is
+                               currently on, so if the host given as input
+                               parameter is not the same as the actual host, a
+                               ``TargetError`` will be raised to prevent
+                               confusion.
+
+    :param username: Username in the simulated system
+    :param password: No password required in gem5 so does not need to be set
+    :param port: Telnet port to connect to gem5. This does not need to be set
+                 at initialisation as this will either be determined by the
+                 :class:`Gem5SimulationPlatform` or can be set using the
+                 :func:`connect_gem5` method
+    :param timeout: Timeout for the connection in seconds. Gem5 has high
+                    latencies so unless the timeout given by the user via
+                    this input parameter is higher than the default one
+                    (3600 seconds), this input parameter will be ignored.
+    :param password_prompt: A string with password prompt
+    :param original_prompt: A regex for the shell prompt
+
+There are two classes that inherit from :class:`Gem5Connection`:
+:class:`AndroidGem5Connection` and :class:`LinuxGem5Connection`.
+They inherit *almost* all methods from the parent class, without altering them.
+The only methods discussed belows are those that will be overwritten by the
+:class:`LinuxGem5Connection` and :class:`AndroidGem5Connection` respectively.
+
+.. class:: LinuxGem5Connection
+
+    A connection to a gem5 simulation that emulates a Linux system.
+
+.. method:: _login_to_device(self)
+
+    Login to the gem5 simulated system.
+
+.. class:: AndroidGem5Connection
+
+    A connection to a gem5 simulation that emulates an Android system.
+
+.. method:: _wait_for_boot(self)
+
+    Wait for the gem5 simulated system to have booted and finished the booting animation.
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644
index 0000000..2c6d72f
--- /dev/null
+++ b/doc/index.rst
@@ -0,0 +1,31 @@
+.. devlib documentation master file, created by
+   sphinx-quickstart on Tue Aug 11 17:37:27 2015.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Welcome to devlib documentation
+===============================
+
+devlib provides an interface for interacting with remote targets, such as
+development boards, mobile devices, etc. It also provides means of collecting
+various measurements and traces from such targets.
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   overview
+   target
+   modules
+   instrumentation
+   platform
+   connection
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/doc/instrumentation.rst b/doc/instrumentation.rst
new file mode 100644
index 0000000..7beec79
--- /dev/null
+++ b/doc/instrumentation.rst
@@ -0,0 +1,244 @@
+Instrumentation
+===============
+
+The ``Instrument`` API provide a consistent way of collecting measurements from
+a target. Measurements are collected via an instance of a class derived from
+:class:`Instrument`. An ``Instrument`` allows collection of measurement from one
+or more channels. An ``Instrument`` may support ``INSTANTANEOUS`` or
+``CONTINUOUS`` collection, or both.
+
+Example
+-------
+
+The following example shows how to use an instrument to read temperature from an
+Android target.
+
+.. code-block:: ipython
+
+    # import and instantiate the Target and the instrument
+    # (note: this assumes exactly one android target connected
+    #  to the host machine).
+    In [1]: from devlib import AndroidTarget, HwmonInstrument
+
+    In [2]: t = AndroidTarget()
+
+    In [3]: i = HwmonInstrument(t)
+
+    # Set up the instrument on the Target. In case of HWMON, this is
+    # a no-op, but is included here for completeness.
+    In [4]: i.setup()
+
+    # Find out what the instrument is capable collecting from the
+    # target.
+    In [5]: i.list_channels()
+    Out[5]:
+    [CHAN(battery/temp1, battery_temperature),
+     CHAN(exynos-therm/temp1, exynos-therm_temperature)]
+
+    # Set up a new measurement session, and specify what is to be
+    # collected.
+    In [6]: i.reset(sites=['exynos-therm'])
+
+    # HWMON instrument supports INSTANTANEOUS collection, so invoking
+    # take_measurement() will return a list of measurements take from
+    # each of the channels configured during reset()
+    In [7]: i.take_measurement()
+    Out[7]: [exynos-therm_temperature: 36.0 degrees]
+
+API
+---
+
+Instrument
+~~~~~~~~~~
+
+.. class:: Instrument(target, **kwargs)
+
+   An ``Instrument`` allows collection of measurement from one or more
+   channels. An ``Instrument`` may support ``INSTANTANEOUS`` or ``CONTINUOUS``
+   collection, or both.
+
+.. attribute:: Instrument.mode
+
+   A bit mask that indicates collection modes that are supported by this
+   instrument. Possible values are:
+
+   :INSTANTANEOUS: The instrument supports taking a single sample via
+                   ``take_measurement()``.
+   :CONTINUOUS: The instrument supports collecting measurements over a
+                period of time via ``start()``, ``stop()``, and
+                ``get_data()`` methods.
+
+   .. note:: It's possible for one instrument to support more than a single
+             mode.
+
+.. attribute:: Instrument.active_channels
+
+   Channels that have been activated via ``reset()``. Measurements will only be
+   collected for these channels.
+
+.. method:: Instrument.list_channels()
+
+   Returns a list of :class:`InstrumentChannel` instances that describe what
+   this instrument can measure on the current target. A channel is a combination
+   of a ``kind`` of measurement (power, temperature, etc) and a ``site`` that
+   indicates where on the target the measurement will be collected from.
+
+.. method:: Instrument.get_channels(measure)
+
+   Returns channels for a particular ``measure`` type. A ``measure`` can be
+   either a string (e.g. ``"power"``) or a :class:`MeasurmentType` instance.
+
+.. method::  Instrument.setup(*args, **kwargs)
+
+   This will set up the instrument on the target. Parameters this method takes
+   are particular to subclasses (see documentation for specific instruments
+   below).  What actions are performed by this method are also
+   instrument-specific.  Usually these will be things like  installing
+   executables, starting services, deploying assets, etc. Typically, this method
+   needs to be invoked at most once per reboot of the target (unless
+   ``teardown()`` has been called), but see documentation for the instrument
+   you're interested in.
+
+.. method:: Instrument.reset([sites, [kinds]])
+
+   This is used to configure an instrument for collection. This must be invoked
+   before ``start()`` is called to begin collection. ``sites`` and ``kinds``
+   parameters may be used to specify which channels measurements should be
+   collected from (if omitted, then measurements will be collected for all
+   available sites/kinds). This methods sets the ``active_channels`` attribute
+   of the ``Instrument``.
+
+.. method:: Instrument.take_measurment()
+
+   Take a single measurement from ``active_channels``. Returns a list of
+   :class:`Measurement` objects (one for each active channel).
+
+   .. note:: This method is only implemented by :class:`Instrument`\ s that
+             support ``INSTANTANEOUS`` measurment.
+
+.. method:: Instrument.start()
+
+   Starts collecting measurements from ``active_channels``.
+
+   .. note:: This method is only implemented by :class:`Instrument`\ s that
+             support ``CONTINUOUS`` measurment.
+
+.. method:: Instrument.stop()
+
+   Stops collecting measurements from ``active_channels``. Must be called after
+   :func:`start()`.
+
+   .. note:: This method is only implemented by :class:`Instrument`\ s that
+             support ``CONTINUOUS`` measurment.
+
+.. method:: Instrument.get_data(outfile)
+
+   Write collected data into ``outfile``. Must be called after :func:`stop()`.
+   Data will be written in CSV format with a column for each channel and a row
+   for each sample. Column heading will be channel, labels in the form
+   ``<site>_<kind>`` (see :class:`InstrumentChannel`). The order of the coluns
+   will be the same as the order of channels in ``Instrument.active_channels``.
+
+   This returns a :class:`MeasurementCsv` instance associated with the outfile
+   that can be used to stream :class:`Measurement`\ s lists (similar to what is
+   returned by ``take_measurement()``.
+
+   .. note:: This method is only implemented by :class:`Instrument`\ s that
+             support ``CONTINUOUS`` measurment.
+
+.. attribute:: Instrument.sample_rate_hz
+
+   Sample rate of the instrument in Hz. Assumed to be the same for all channels.
+
+   .. note:: This attribute is only provided by :class:`Instrument`\ s that
+             support ``CONTINUOUS`` measurment.
+
+Instrument Channel
+~~~~~~~~~~~~~~~~~~
+
+.. class:: InstrumentChannel(name, site, measurement_type, **attrs)
+
+   An :class:`InstrumentChannel` describes a single type of measurement that may
+   be collected by an :class:`Instrument`. A channel is primarily defined by a
+   ``site`` and a ``measurement_type``.
+
+   A ``site`` indicates where  on the target a measurement is collected from
+   (e.g. a volage rail or location of a sensor).
+
+   A ``measurement_type`` is an instance of :class:`MeasurmentType` that
+   describes what sort of measurment this is (power, temperature, etc). Each
+   mesurement type has a standard unit it is reported in, regardless of an
+   instrument used to collect it.
+
+   A channel (i.e. site/measurement_type combination) is unique per instrument,
+   however there may be more than one channel associated with one site (e.g. for
+   both volatage and power).
+
+   It should not be assumed that any site/measurement_type combination is valid.
+   The list of available channels can queried with
+   :func:`Instrument.list_channels()`.
+
+.. attribute:: InstrumentChannel.site
+
+   The name of the "site" from which the measurments are collected (e.g. voltage
+   rail, sensor, etc).
+
+.. attribute:: InstrumentChannel.kind
+
+   A string indingcating the type of measrument that will be collted. This is
+   the ``name`` of the :class:`MeasurmentType` associated with this channel.
+
+.. attribute:: InstrumentChannel.units
+
+   Units in which measurment will be reported. this is determined by the
+   underlying :class:`MeasurmentType`.
+
+.. attribute:: InstrumentChannel.label
+
+   A label that can be attached to measurments associated with with channel.
+   This is constructed with ::
+
+       '{}_{}'.format(self.site, self.kind)
+
+
+Measurement Types
+~~~~~~~~~~~~~~~~~
+
+In order to make instruments easer to use, and to make it easier to swap them
+out when necessary (e.g. change method of collecting power), a number of
+standard measurement types are defined. This way, for example, power will always
+be reported as "power" in Watts, and never as "pwr" in milliWatts. Currently
+defined measurement types are
+
+
++-------------+---------+---------------+
+| name        | units   | category      |
++=============+=========+===============+
+| time        | seconds |               |
++-------------+---------+---------------+
+| temperature | degrees |               |
++-------------+---------+---------------+
+| power       | watts   | power/energy  |
++-------------+---------+---------------+
+| voltage     | volts   | power/energy  |
++-------------+---------+---------------+
+| current     | amps    | power/energy  |
++-------------+---------+---------------+
+| energy      | joules  | power/energy  |
++-------------+---------+---------------+
+| tx          | bytes   | data transfer |
++-------------+---------+---------------+
+| rx          | bytes   | data transfer |
++-------------+---------+---------------+
+| tx/rx       | bytes   | data transfer |
++-------------+---------+---------------+
+
+
+.. instruments:
+
+Available Instruments
+---------------------
+
+This section lists instruments that are currently part of devlib.
+
+TODO
diff --git a/doc/make.bat b/doc/make.bat
new file mode 100644
index 0000000..6471ff3
--- /dev/null
+++ b/doc/make.bat
@@ -0,0 +1,263 @@
+@ECHO OFF

+

+REM Command file for Sphinx documentation

+

+if "%SPHINXBUILD%" == "" (

+	set SPHINXBUILD=sphinx-build

+)

+set BUILDDIR=_build

+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .

+set I18NSPHINXOPTS=%SPHINXOPTS% .

+if NOT "%PAPER%" == "" (

+	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%

+	set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%

+)

+

+if "%1" == "" goto help

+

+if "%1" == "help" (

+	:help

+	echo.Please use `make ^<target^>` where ^<target^> is one of

+	echo.  html       to make standalone HTML files

+	echo.  dirhtml    to make HTML files named index.html in directories

+	echo.  singlehtml to make a single large HTML file

+	echo.  pickle     to make pickle files

+	echo.  json       to make JSON files

+	echo.  htmlhelp   to make HTML files and a HTML help project

+	echo.  qthelp     to make HTML files and a qthelp project

+	echo.  devhelp    to make HTML files and a Devhelp project

+	echo.  epub       to make an epub

+	echo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter

+	echo.  text       to make text files

+	echo.  man        to make manual pages

+	echo.  texinfo    to make Texinfo files

+	echo.  gettext    to make PO message catalogs

+	echo.  changes    to make an overview over all changed/added/deprecated items

+	echo.  xml        to make Docutils-native XML files

+	echo.  pseudoxml  to make pseudoxml-XML files for display purposes

+	echo.  linkcheck  to check all external links for integrity

+	echo.  doctest    to run all doctests embedded in the documentation if enabled

+	echo.  coverage   to run coverage check of the documentation if enabled

+	goto end

+)

+

+if "%1" == "clean" (

+	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i

+	del /q /s %BUILDDIR%\*

+	goto end

+)

+

+

+REM Check if sphinx-build is available and fallback to Python version if any

+%SPHINXBUILD% 2> nul

+if errorlevel 9009 goto sphinx_python

+goto sphinx_ok

+

+:sphinx_python

+

+set SPHINXBUILD=python -m sphinx.__init__

+%SPHINXBUILD% 2> nul

+if errorlevel 9009 (

+	echo.

+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx

+	echo.installed, then set the SPHINXBUILD environment variable to point

+	echo.to the full path of the 'sphinx-build' executable. Alternatively you

+	echo.may add the Sphinx directory to PATH.

+	echo.

+	echo.If you don't have Sphinx installed, grab it from

+	echo.http://sphinx-doc.org/

+	exit /b 1

+)

+

+:sphinx_ok

+

+

+if "%1" == "html" (

+	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The HTML pages are in %BUILDDIR%/html.

+	goto end

+)

+

+if "%1" == "dirhtml" (

+	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.

+	goto end

+)

+

+if "%1" == "singlehtml" (

+	%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.

+	goto end

+)

+

+if "%1" == "pickle" (

+	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished; now you can process the pickle files.

+	goto end

+)

+

+if "%1" == "json" (

+	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished; now you can process the JSON files.

+	goto end

+)

+

+if "%1" == "htmlhelp" (

+	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished; now you can run HTML Help Workshop with the ^

+.hhp project file in %BUILDDIR%/htmlhelp.

+	goto end

+)

+

+if "%1" == "qthelp" (

+	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished; now you can run "qcollectiongenerator" with the ^

+.qhcp project file in %BUILDDIR%/qthelp, like this:

+	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\devlib.qhcp

+	echo.To view the help file:

+	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\devlib.ghc

+	goto end

+)

+

+if "%1" == "devhelp" (

+	%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished.

+	goto end

+)

+

+if "%1" == "epub" (

+	%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The epub file is in %BUILDDIR%/epub.

+	goto end

+)

+

+if "%1" == "latex" (

+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.

+	goto end

+)

+

+if "%1" == "latexpdf" (

+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex

+	cd %BUILDDIR%/latex

+	make all-pdf

+	cd %~dp0

+	echo.

+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.

+	goto end

+)

+

+if "%1" == "latexpdfja" (

+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex

+	cd %BUILDDIR%/latex

+	make all-pdf-ja

+	cd %~dp0

+	echo.

+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.

+	goto end

+)

+

+if "%1" == "text" (

+	%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The text files are in %BUILDDIR%/text.

+	goto end

+)

+

+if "%1" == "man" (

+	%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The manual pages are in %BUILDDIR%/man.

+	goto end

+)

+

+if "%1" == "texinfo" (

+	%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.

+	goto end

+)

+

+if "%1" == "gettext" (

+	%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The message catalogs are in %BUILDDIR%/locale.

+	goto end

+)

+

+if "%1" == "changes" (

+	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.The overview file is in %BUILDDIR%/changes.

+	goto end

+)

+

+if "%1" == "linkcheck" (

+	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Link check complete; look for any errors in the above output ^

+or in %BUILDDIR%/linkcheck/output.txt.

+	goto end

+)

+

+if "%1" == "doctest" (

+	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Testing of doctests in the sources finished, look at the ^

+results in %BUILDDIR%/doctest/output.txt.

+	goto end

+)

+

+if "%1" == "coverage" (

+	%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Testing of coverage in the sources finished, look at the ^

+results in %BUILDDIR%/coverage/python.txt.

+	goto end

+)

+

+if "%1" == "xml" (

+	%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The XML files are in %BUILDDIR%/xml.

+	goto end

+)

+

+if "%1" == "pseudoxml" (

+	%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml

+	if errorlevel 1 exit /b 1

+	echo.

+	echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.

+	goto end

+)

+

+:end

diff --git a/doc/modules.rst b/doc/modules.rst
new file mode 100644
index 0000000..ac75f99
--- /dev/null
+++ b/doc/modules.rst
@@ -0,0 +1,362 @@
+.. _modules:
+
+Modules
+=======
+
+Modules add additional functionality to the core :class:`Target` interface.
+Usually, it is support for specific subsystems on the target. Modules are
+instantiated as attributes of the :class:`Target` instance.
+
+hotplug
+-------
+
+Kernel ``hotplug`` subsystem allows offlining ("removing") cores from the
+system, and onlining them back in. The ``devlib`` module exposes a simple
+interface to this subsystem
+
+.. code:: python
+
+   from devlib import LocalLinuxTarget
+   target = LocalLinuxTarget()
+
+   # offline cpus 2 and 3, "removing" them from the system
+   target.hotplug.offline(2, 3)
+
+   # bring CPU 2 back in
+   target.hotplug.online(2)
+
+   # Make sure all cpus are online
+   target.hotplug.online_all()
+
+cpufreq
+-------
+
+``cpufreq`` is the kernel subsystem for managing DVFS (Dynamic Voltage and
+Frequency Scaling). It allows controlling frequency ranges and switching
+policies (governors). The ``devlib`` module exposes the following interface
+
+.. note:: On ARM big.LITTLE systems, all cores on a cluster (usually all cores
+          of the same type) are in the same frequency domain, so setting
+          ``cpufreq`` state on one core on a cluster will affect all cores on
+          that cluster. Because of this, some devices only expose cpufreq sysfs
+          interface (which is what is used by the ``devlib`` module) on the
+          first cpu in a cluster. So to keep your scripts portable, always use
+          the fist (online) CPU in a cluster to set ``cpufreq`` state.
+
+.. method:: target.cpufreq.list_governors(cpu)
+
+   List cpufreq governors available for the specified cpu. Returns a list of
+   strings.
+
+   :param cpu: The cpu; could be a numeric or the corresponding string (e.g.
+               ``1`` or ``"cpu1"``).
+
+.. method:: target.cpufreq.list_governor_tunables(cpu)
+
+   List the tunables for the specified cpu's current governor.
+
+   :param cpu: The cpu; could be a numeric or the corresponding string (e.g.
+       ``1`` or ``"cpu1"``).
+
+
+.. method:: target.cpufreq.get_governor(cpu)
+
+   Returns the name of the currently set governor for the specified cpu.
+
+   :param cpu: The cpu; could be a numeric or the corresponding string (e.g.
+               ``1`` or ``"cpu1"``).
+
+.. method:: target.cpufreq.set_governor(cpu, governor, \*\*kwargs)
+
+   Sets the governor for the specified cpu.
+
+   :param cpu: The cpu; could be a numeric or the corresponding string (e.g.
+        ``1`` or ``"cpu1"``).
+   :param governor: The name of the governor. This must be one of the governors 
+                supported by the CPU (as returned by ``list_governors()``.
+
+   Keyword arguments may be used to specify governor tunable values.
+
+
+.. method:: target.cpufreq.get_governor_tunables(cpu)
+
+   Return a dict with the values of the specified CPU's current governor.
+
+   :param cpu: The cpu; could be a numeric or the corresponding string (e.g.
+       ``1`` or ``"cpu1"``).
+
+.. method:: target.cpufreq.set_governor_tunables(cpu, \*\*kwargs)
+
+   Set the tunables for the current governor on the specified CPU.
+
+   :param cpu: The cpu; could be a numeric or the corresponding string (e.g.
+       ``1`` or ``"cpu1"``).
+
+   Keyword arguments should be used to specify tunable values.
+
+.. method:: target.cpufreq.list_frequencies(cpu)
+
+   List DVFS frequencies supported by the specified CPU. Returns a list of ints.
+
+   :param cpu: The cpu; could be a numeric or the corresponding string (e.g.
+       ``1`` or ``"cpu1"``).
+
+.. method:: target.cpufreq.get_min_frequency(cpu)
+            target.cpufreq.get_max_frequency(cpu)
+            target.cpufreq.set_min_frequency(cpu, frequency[, exact=True])
+            target.cpufreq.set_max_frequency(cpu, frequency[, exact=True])
+
+   Get and set min and max frequencies on the specified CPU. "set" functions are
+   available with all governors other than ``userspace``.
+
+   :param cpu: The cpu; could be a numeric or the corresponding string (e.g.
+       ``1`` or ``"cpu1"``).
+   :param frequency: Frequency to set.
+
+.. method:: target.cpufreq.get_frequency(cpu)
+            target.cpufreq.set_frequency(cpu, frequency[, exact=True])
+
+   Get and set current frequency on the specified CPU. ``set_frequency`` is only
+   available if the current governor is ``userspace``.
+
+   :param cpu: The cpu; could be a numeric or the corresponding string (e.g.
+       ``1`` or ``"cpu1"``).
+   :param frequency: Frequency to set.
+
+cpuidle
+-------
+
+``cpufreq`` is the kernel subsystem for managing CPU low power (idle) states.
+
+.. method:: target.cpuidle.get_driver()
+
+   Return the name current cpuidle driver.
+
+.. method:: target.cpuidle.get_governor()
+
+   Return the name current cpuidle governor (policy).
+
+.. method:: target.cpuidle.get_states([cpu=0])
+
+   Return idle states (optionally, for the specified CPU). Returns a list of
+   :class:`CpuidleState` instances.
+
+.. method:: target.cpuidle.get_state(state[, cpu=0])
+
+   Return :class:`CpuidleState` instance (optionally, for the specified CPU)
+   representing the specified idle state. ``state`` can be either an integer
+   index of the state or a string with the states ``name`` or ``desc``.
+
+.. method:: target.cpuidle.enable(state[, cpu=0])
+            target.cpuidle.disable(state[, cpu=0])
+            target.cpuidle.enable_all([cpu=0])
+            target.cpuidle.disable_all([cpu=0])
+
+    Enable or disable the specified or all states (optionally on the specified
+    CPU.
+
+You can also call ``enable()`` or ``disable()`` on :class:`CpuidleState` objects 
+returned by get_state(s).
+
+cgroups
+-------
+
+TODO
+
+hwmon
+-----
+
+TODO
+
+API
+---
+
+Generic Module API Description
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Modules implement discrete, optional pieces of functionality ("optional" in the
+sense that the functionality may or may not be present on the target device, or
+that it may or may not be necessary for a particular application).
+
+Every module (ultimately) derives from :class:`Module` class.  A module must
+define the following class attributes:
+
+:name: A unique name for the module. This cannot clash with any of the existing
+       names and must be a valid Python identifier, but is otherwise free-from.
+:kind: This identifies the type of functionality a module implements, which in
+       turn determines the interface implemented by the module (all modules of
+       the same kind must expose a consistent interface). This must be a valid
+       Python identifier, but is otherwise free-form, though, where possible,
+       one should try to stick to an already-defined kind/interface, lest we end
+       up with a bunch of modules implementing similar functionality but
+       exposing slightly different interfaces.
+
+       .. note:: It is possible to omit ``kind`` when defining a module, in
+                 which case the module's ``name`` will be treated as its
+                 ``kind`` as well.
+
+:stage: This defines when the module will be installed into a :class:`Target`.
+        Currently, the following values are allowed:
+
+        :connected: The module is installed after a connection to the target has
+                    been established. This is the default.
+        :early: The module will be installed when a :class:`Target` is first
+                created. This should be used for modules that do not rely on a
+                live connection to the target.
+
+Additionally, a module must implement a static (or class) method :func:`probe`:
+
+.. method:: Module.probe(target)
+
+    This method takes a :class:`Target` instance and returns ``True`` if this
+    module is supported by that target, or ``False`` otherwise.
+
+    .. note:: If the module ``stage`` is ``"early"``, this method cannot assume
+              that a connection has been established (i.e. it can only access
+              attributes of the Target that do not rely on a connection).
+
+Installation and invocation
+***************************
+
+The default installation method will create an instance of a module (the
+:class:`Target` instance being the sole argument) and assign it to the target
+instance attribute named after the module's ``kind`` (or ``name`` if ``kind`` is
+``None``).
+
+It is possible to change the installation procedure for a module by overriding
+the default :func:`install` method. The method must have the following
+signature:
+
+.. method:: Module.install(cls, target, **kwargs)
+
+    Install the module into the target instance.
+
+
+Implementation and Usage Patterns
+*********************************
+
+There are two common ways to implement the above API, corresponding to the two
+common uses for modules:
+
+- If a module provides an interface to a particular set of functionality (e.g.
+  an OS subsystem), that  module would typically derive directly form
+  :class:`Module` and  would leave ``kind`` unassigned, so that it is accessed
+  by it name. Its instance's methods and attributes provide the interface for
+  interacting with its functionality. For examples of this type of module, see
+  the subsystem modules listed above (e.g. ``cpufreq``).
+- If a module provides a platform- or infrastructure-specific implementation of
+  a common function, the module would derive from one of :class:`Module`
+  subclasses that define the interface for that function. In that case the
+  module would be accessible via the common ``kind`` defined its super. The
+  module would typically implement :func:`__call__` and be invoked directly. For
+  examples of this type of module, see common function interface definitions
+  below.
+
+
+Common Function Interfaces
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section documents :class:`Module` classes defining interface for common
+functions. Classes derived from them provide concrete implementations for
+specific platforms.
+
+
+HardResetModule
+***************
+
+.. attribute:: HardResetModule.kind
+
+    "hard_reset"
+
+.. method:: HardResetModule.__call__()
+
+    Must be implemented by derived classes.
+    
+    Implements hard reset for a target devices. The equivalent of physically
+    power cycling the device.  This may be used by client code in situations
+    where the target becomes unresponsive and/or a regular reboot is not
+    possible.
+
+
+BootModule
+**********
+
+.. attribute:: BootModule.kind
+
+    "hard_reset"
+
+.. method:: BootModule.__call__()
+
+    Must be implemented by derived classes.
+
+    Implements a boot procedure. This takes the device from (hard or soft)
+    reset to a booted state where the device is ready to accept connections. For
+    a lot of commercial devices the process is entirely automatic, however some
+    devices (e.g. development boards), my require additional steps, such as
+    interactions with the bootloader, in order to boot into the OS.
+
+.. method:: Bootmodule.update(\*\*kwargs)
+
+    Update the boot settings. Some boot sequences allow specifying settings
+    that will be utilized during boot (e.g. linux kernel boot command line). The
+    default implementation will set each setting in ``kwargs`` as an attribute of
+    the boot module (or update the existing attribute).
+
+
+FlashModule
+***********
+
+.. attribute:: FlashModule.kind
+
+    "flash"
+
+.. method:: __call__(image_bundle=None, images=None, boot_config=None)
+
+    Must be implemented by derived classes.
+
+    Flash the target platform with the specified images.
+
+    :param image_bundle: A compressed bundle of image files with any associated
+                         metadata. The format of the bundle is specific to a
+                         particular implementation.
+    :param images: A dict mapping image names/identifiers to the path on the
+                   host file system of the corresponding image file. If both
+                   this and ``image_bundle`` are specified, individual images
+                   will override those in the bundle.
+    :param boot_config: Some platforms require specifying boot arguments at the
+                        time of flashing the images, rather than during each
+                        reboot. For other platforms, this will be ignored.
+
+
+Module Registration
+~~~~~~~~~~~~~~~~~~~
+
+Modules are specified on :class:`Target` or :class:`Platform` creation by name.
+In order to find the class associated with the name, the module needs to be
+registered with ``devlib``. This is accomplished by passing the module class
+into :func:`register_module` method once it is defined.
+
+.. note:: If you're wiring a module to be included as part of ``devlib`` code
+          base, you can place the file with the module class under
+          ``devlib/modules/`` in the source and it will be automatically
+          enumerated. There is no need to explicitly register it in that case.
+
+The code snippet below illustrates an implementation of a hard reset function
+for an "Acme" device.
+
+.. code:: python
+
+    import os
+    from devlib import HardResetModule, register_module
+
+
+    class AcmeHardReset(HardResetModule):
+
+        name = 'acme_hard_reset'
+
+        def __call__(self):
+            # Assuming Acme board comes with a "reset-acme-board" utility 
+            os.system('reset-acme-board {}'.format(self.target.name))
+
+    register_module(AcmeHardReset)
+
diff --git a/doc/overview.rst b/doc/overview.rst
new file mode 100644
index 0000000..421f053
--- /dev/null
+++ b/doc/overview.rst
@@ -0,0 +1,282 @@
+Overview
+========
+
+A :class:`Target` instance serves as the main interface to the target device. 
+There currently three target interfaces:
+
+- :class:`LinuxTarget` for interacting with Linux devices over SSH.
+- :class:`AndroidTraget` for interacting with Android devices over adb.
+- :class:`LocalLinuxTarget`: for interacting with the local Linux host.
+
+They all work in more-or-less the same way, with the major difference being in
+how connection settings are specified; though there may also be a few APIs
+specific to a particular target type (e.g. :class:`AndroidTarget` exposes
+methods for working with logcat).
+
+
+Acquiring a Target
+------------------
+
+To create an interface to your device, you just need to instantiate one of the
+:class:`Target` derivatives listed above, and pass it the right
+``connection_settings``. Code snippet below gives a typical example of
+instantiating each of the three target types. 
+
+.. code:: python
+
+   from devlib import LocalLinuxTarget, LinuxTarget, AndroidTarget
+
+   # Local machine requires no special connection settings.
+   t1 = LocalLinuxTarget()
+
+   # For a Linux device, you will need to provide the normal SSH credentials.
+   # Both password-based, and key-based authentication is supported (password
+   # authentication requires sshpass to be installed on your host machine).'
+   t2 = LinuxTarget(connection_settings={'host': '192.168.0.5',
+                                        'username': 'root',
+                                        'password': 'sekrit',
+                                        # or
+                                        'keyfile': '/home/me/.ssh/id_rsa'})
+
+   # For an Android target, you will need to pass the device name as reported
+   # by "adb devices". If there is only one device visible to adb, you can omit
+   # this setting and instantiate similar to a local target.
+   t3 = AndroidTarget(connection_settings={'device': '0123456789abcde'})
+
+Instantiating a target may take a second or two as the remote device will be
+queried to initialize :class:`Target`'s internal state. If you would like to
+create a :class:`Target` instance but not immediately connect to the remote
+device, you can pass ``connect=False`` parameter. If you do that, you would have
+to then explicitly call ``t.connect()`` before you can interact with the device.
+
+There are a few additional parameters you can pass in instantiation besides
+``connection_settings``, but they are usually unnecessary. Please see
+:class:`Target` API documentation for more details.
+
+Target Interface
+----------------
+
+This is a quick overview of the basic interface to the device. See
+:class:`Target` API documentation for the full list of supported methods and
+more detailed documentation.
+
+One-time Setup
+~~~~~~~~~~~~~~
+
+.. code:: python
+
+   from devlib import LocalLinuxTarget
+   t = LocalLinuxTarget()
+
+   t.setup()
+
+This sets up the target for ``devlib`` interaction. This includes creating
+working directories, deploying busybox, etc. It's usually enough to do this once
+for a new device, as the changes this makes will persist across reboots.
+However, there is no issue with calling this multiple times, so, to be on the
+safe site, it's a good idea to call this once at the beginning of your scripts.
+
+Command Execution
+~~~~~~~~~~~~~~~~~
+
+There are several ways to execute a command on the target. In each case, a
+:class:`TargetError` will be raised if something goes wrong. In very case, it is
+also possible to specify ``as_root=True`` if the specified command should be
+executed as root.
+
+.. code:: python
+
+   from devlib import LocalLinuxTarget
+   t = LocalLinuxTarget()
+
+   # Execute a command 
+   output = t.execute('echo $PWD')
+
+   # Execute command via a subprocess and return the corresponding Popen object.
+   # This will block current connection to the device until the command
+   # completes.
+   p = t.background('echo $PWD')
+   output, error = p.communicate()
+
+   # Run the command in the background on the device and return immediately.
+   # This will not block the connection, allowing to immediately execute another
+   # command. 
+   t.kick_off('echo $PWD')
+
+   # This is used to invoke an executable binary on the device. This allows some
+   # finer-grained control over the invocation, such as specifying the directory
+   # in which the executable will run; however you're limited to a single binary
+   # and cannot construct complex commands (e.g. this does not allow chaining or
+   # piping several commands together).
+   output = t.invoke('echo', args=['$PWD'], in_directory='/')
+
+File Transfer
+~~~~~~~~~~~~~
+
+.. code:: python
+
+   from devlib import LocalLinuxTarget
+   t = LocalLinuxTarget()
+
+   # "push" a file from the local machine onto the target device.
+   t.push('/path/to/local/file.txt', '/path/to/target/file.txt')
+
+   # "pull" a file from the target device into a location on the local machine
+   t.pull('/path/to/target/file.txt', '/path/to/local/file.txt')
+
+   # Install the specified binary on the target. This will deploy the file and
+   # ensure it's executable. This will *not* guarantee that the binary will be 
+   # in PATH. Instead the path to the binary will be returned; this should be
+   # used to call the binary henceforth.
+   target_bin = t.install('/path/to/local/bin.exe')
+   # Example invocation:
+   output = t.execute('{} --some-option'.format(target_bin))
+
+The usual access permission constraints on the user account (both on the target
+and the host) apply. 
+
+Process Control
+~~~~~~~~~~~~~~~
+
+.. code:: python
+
+   import signal
+   from devlib import LocalLinuxTarget
+   t = LocalLinuxTarget()
+
+   # return PIDs of all running instances of a process
+   pids = t.get_pids_of('sshd')
+
+   # kill a running process. This works the same ways as the kill command, so
+   # SIGTERM will be used by default.
+   t.kill(666, signal=signal.SIGKILL)
+
+   # kill all running instances of a process.
+   t.killall('badexe', signal=signal.SIGKILL)
+
+   # List processes running on the target. This retruns a list of parsed
+   # PsEntry records.
+   entries = t.ps()
+   # e.g.  print virtual memory sizes of all running sshd processes:
+   print ', '.join(str(e.vsize) for e in entries if e.name == 'sshd')
+
+
+More...
+~~~~~~~
+
+As mentioned previously, the above is not intended to be exhaustive
+documentation of the :class:`Target` interface. Please refer to the API
+documentation for the full list of attributes and methods and their parameters.
+
+Super User Privileges
+---------------------
+
+It is not necessary for the account logged in on the target to have super user
+privileges, however the functionality will obviously be diminished, if that is
+not the case. ``devilib`` will determine if the logged in user has root
+privileges and the correct way to invoke it. You should avoid including "sudo"
+directly in your commands, instead, specify ``as_root=True`` where needed. This
+will make your scripts portable across multiple devices and OS's.
+
+
+On-Target Locations
+-------------------
+
+File system layouts vary wildly between devices and operating systems.
+Hard-coding absolute paths in your scripts will mean there is a good chance they
+will break if run on a different device.  To help with this, ``devlib`` defines
+a couple of "standard" locations and a means of working with them.
+
+working_directory
+        This is a directory on the target readable and writable by the account
+        used to log in. This should generally be used for all output generated
+        by your script on the device and as the destination for all
+        host-to-target file transfers. It may or may not permit execution so
+        executables should not be run directly from here.
+        
+executables_directory
+        This directory allows execution. This will be used by ``install()``.
+
+.. code:: python
+
+   from devlib import LocalLinuxTarget
+   t = LocalLinuxTarget()
+
+   # t.path  is equivalent to Python standard library's os.path, and should be
+   # used in the same way. This insures that your scripts are portable across
+   # both target and host OS variations. e.g.
+   on_target_path = t.path.join(t.working_directory, 'assets.tar.gz')
+   t.push('/local/path/to/assets.tar.gz', on_target_path)
+
+   # Since working_directory is a common base path for on-target locations,
+   # there a short-hand for the above:
+   t.push('/local/path/to/assets.tar.gz', t.get_workpath('assets.tar.gz'))
+
+
+Modules
+-------
+
+Additional functionality is exposed via modules. Modules are initialized as
+attributes of a target instance. By default, ``hotplug``, ``cpufreq``,
+``cpuidle``, ``cgroups`` and ``hwmon`` will attempt to load on target; additional
+modules may be specified when creating a :class:`Target` instance.
+
+A module will probe the target for support before attempting to load. So if the
+underlying platform does not support particular functionality (e.g. the kernel
+on target device was built without hotplug support). To check whether a module
+has been successfully installed on a target, you can use ``has()`` method, e.g.
+
+.. code:: python
+
+   from devlib import LocalLinuxTarget
+   t = LocalLinuxTarget()
+
+   cpu0_freqs = []
+   if t.has('cpufreq'):
+       cpu0_freqs = t.cpufreq.list_frequencies(0)
+
+
+Please see the modules documentation for more detail.
+
+
+Measurement and Trace
+---------------------
+
+You can collected traces (currently, just ftrace) using
+:class:`TraceCollector`\ s. For example
+
+.. code:: python
+
+   from devlib import AndroidTarget, FtraceCollector
+   t = LocalLinuxTarget()
+  
+   # Initialize a collector specifying the events you want to collect and
+   # the buffer size to be used.
+   trace = FtraceCollector(t, events=['power*'], buffer_size=40000)
+
+   # clear ftrace buffer
+   trace.reset()
+
+   # start trace collection
+   trace.start()
+
+   # Perform the operations you want to trace here...
+   import time; time.sleep(5)
+
+   # stop trace collection
+   trace.stop()
+
+   # extract the trace file from the target into a local file
+   trace.get_trace('/tmp/trace.bin')
+
+   # View trace file using Kernelshark (must be installed on the host).
+   trace.view('/tmp/trace.bin')
+
+   # Convert binary trace into text format. This would normally be done
+   # automatically during get_trace(), unless autoreport is set to False during
+   # instantiation of the trace collector.
+   trace.report('/tmp/trace.bin', '/tmp/trace.txt')
+
+In a similar way, :class:`Instrument` instances may be used to collect
+measurements (such as power) from targets that support it. Please see
+instruments documentation for more details.
diff --git a/doc/platform.rst b/doc/platform.rst
new file mode 100644
index 0000000..5270c09
--- /dev/null
+++ b/doc/platform.rst
@@ -0,0 +1,171 @@
+.. _platform:
+
+Platform
+========
+
+:class:`Platform`\ s describe the system underlying the OS. They encapsulate
+hardware- and firmware-specific details. In most cases, the generic
+:class:`Platform` class, which gets used if a platform is not explicitly
+specified on :class:`Target` creation, will be sufficient. It will automatically
+query as much platform information (such CPU topology, hardware model, etc) if
+it was not specified explicitly by the user.
+
+
+.. class:: Platform(name=None, core_names=None, core_clusters=None,\
+                    big_core=None, model=None, modules=None)
+
+    :param name: A user-friendly identifier for the platform.
+    :param core_names: A list of CPU core names in the order they appear
+                       registered with the OS. If they are not specified,
+                       they will be queried at run time.
+    :param core_clusters: Alist with cluster ids of each core (starting with
+                          0). If this is not specified, clusters will be
+                          inferred from core names (cores with the same name are
+                          assumed to be in a cluster).
+    :param big_core: The name of the big core in a big.LITTLE system. If this is
+                     not specified it will be inferred (on systems with exactly 
+                     two clasters).
+    :param model: Model name of the hardware system. If this is not specified it
+                  will be queried at run time.
+    :param modules: Modules with additional functionality supported by the
+                    platfrom (e.g. for handling flashing, rebooting, etc). These
+                    would be added to the Target's modules. (See :ref:`modules`\ ).
+
+
+Versatile Express
+-----------------
+
+The generic platform may be extended to support hardware- or
+infrastructure-specific functionality. Platforms exist for ARM
+VersatileExpress-based :class:`Juno` and :class:`TC2` development boards. In
+addition to the standard :class:`Platform` parameters above, these platfroms
+support additional configuration:
+
+
+.. class:: VersatileExpressPlatform
+
+    Normally, this would be instatiated via one of its derived classes
+    (:class:`Juno` or :class:`TC2`) that set appropriate defaults for some of
+    the parameters.
+
+    :param serial_port: Identifies the serial port (usual a /dev node) on which the
+                        device is connected.
+    :param baudrate: Baud rate for the serial connection. This defaults to
+                     ``115200`` for :class:`Juno` and ``38400`` for
+                     :class:`TC2`.
+    :param vemsd_mount: Mount point for the VEMSD (Versatile Express MicroSD card
+                        that is used for board configuration files and firmware
+                        images). This defaults to ``"/media/JUNO"`` for
+                        :class:`Juno` and ``"/media/VEMSD"`` for :class:`TC2`,
+                        though you would most likely need to change this for
+                        your setup as it would depend both on the file system
+                        label on the MicroSD card, and on how the card was
+                        mounted on the host system.
+    :param hard_reset_method: Specifies the method for hard-resetting the devices
+                            (e.g. if it becomes unresponsive and normal reboot
+                            method doesn not work). Currently supported methods
+                            are:
+
+                            :dtr: reboot by toggling DTR line on the serial
+                                  connection (this is enabled via a DIP switch
+                                  on the board).
+                            :reboottxt: reboot by writing a filed called
+                                        ``reboot.txt`` to the root of the VEMSD
+                                        mount (this is enabled via board
+                                        configuration file).
+
+                            This defaults to ``dtr`` for :class:`Juno` and
+                            ``reboottxt`` for :class:`TC2`.
+    :param bootloader: Specifies the bootloader configuration used by the board.
+                      The following values are currently supported:
+
+                       :uefi: Boot via UEFI menu, by selecting the entry
+                              specified by ``uefi_entry`` paramter. If this
+                              entry does not exist, it will be automatically
+                              created based on values provided for ``image``,
+                              ``initrd``, ``fdt``, and ``bootargs`` parameters.
+                       :uefi-shell: Boot by going via the UEFI shell.
+                       :u-boot: Boot using Das U-Boot.
+                       :bootmon: Boot directly via Versatile Express Bootmon
+                                 using the values provided for ``image``, 
+                                 ``initrd``, ``fdt``, and ``bootargs`` 
+                                 parameters.
+
+                      This defaults to ``u-boot`` for :class:`Juno` and
+                      ``bootmon`` for :class:`TC2`.
+    :param flash_method: Specifies how the device is flashed. Currently, only
+                        ``"vemsd"`` method is supported, which flashes by
+                        writing firmware images to an appropriate location on
+                        the VEMSD.
+    :param image: Specfies the kernel image name for ``uefi``  or ``bootmon`` boot.
+    :param fdt: Specifies the device tree blob for  ``uefi``  or ``bootmon`` boot.
+    :param initrd: Specifies the ramdisk image for  ``uefi`` or ``bootmon`` boot.
+    :param bootargs: Specifies the boot arguments that will be pass to the
+                     kernel by the bootloader.
+    :param uefi_entry: Then name of the UEFI entry to be used/created by
+                       ``uefi`` bootloader.
+    :param ready_timeout: Timeout, in seconds, for the time it takes the
+                          platform to become ready to accept connections. Note:
+                          this does not mean that the system is fully booted;
+                          just that the services needed to establish a
+                          connection (e.g. sshd or adbd) are up.
+
+
+.. _gem5-platform:
+
+Gem5 Simulation Platform
+------------------------
+
+By initialising a Gem5SimulationPlatform, devlib will start a gem5 simulation (based upon the
+arguments the user provided) and then connect to it using :class:`Gem5Connection`.
+Using the methods discussed above, some methods of the :class:`Target` will be altered
+slightly to better suit gem5.
+
+.. class:: Gem5SimulationPlatform(name, host_output_dir, gem5_bin, gem5_args, gem5_virtio, gem5_telnet_port=None)
+
+    During initialisation the gem5 simulation will be kicked off (based upon the arguments
+    provided by the user) and the telnet port used by the gem5 simulation will be intercepted
+    and stored for use by the :class:`Gem5Connection`.
+
+    :param name: Platform name
+
+    :param host_output_dir: Path on the host where the gem5 outputs will be placed (e.g. stats file)
+
+    :param gem5_bin: gem5 binary
+
+    :param gem5_args: Arguments to be passed onto gem5 such as config file etc.
+
+    :param gem5_virtio: Arguments to be passed onto gem5 in terms of the virtIO device used
+                        to transfer files between the host and the gem5 simulated system.
+
+    :param gem5_telnet_port: Not yet in use as it would be used in future implementations
+                             of devlib in which the user could use the platform to pick
+                             up an existing and running simulation.
+
+
+.. method:: Gem5SimulationPlatform.init_target_connection([target])
+
+    Based upon the OS defined in the :class:`Target`, the type of :class:`Gem5Connection`
+    will be set (:class:`AndroidGem5Connection` or :class:`AndroidGem5Connection`).
+
+.. method:: Gem5SimulationPlatform.update_from_target([target])
+
+    This method provides specific setup procedures for a gem5 simulation. First of all, the m5
+    binary will be installed on the guest (if it is not present). Secondly, three methods
+    in the :class:`Target` will be monkey-patched:
+
+            - **reboot**: this is not supported in gem5
+            - **reset**: this is not supported in gem5
+            - **capture_screen**: gem5 might already have screencaps so the
+              monkey-patched method will first try to
+              transfer the existing screencaps.
+              In case that does not work, it will fall back
+              to the original :class:`Target` implementation
+              of :func:`capture_screen`.
+
+    Finally, it will call the parent implementation of :func:`update_from_target`.
+
+.. method:: Gem5SimulationPlatform.setup([target])
+
+    The m5 binary be installed, if not yet installed on the gem5 simulated system.
+    It will also resize the gem5 shell, to avoid line wrapping issues.
diff --git a/doc/target.rst b/doc/target.rst
new file mode 100644
index 0000000..d14942e
--- /dev/null
+++ b/doc/target.rst
@@ -0,0 +1,434 @@
+Target
+======
+
+
+.. class:: Target(connection_settings=None, platform=None, working_directory=None, executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT)
+   
+    :class:`Target` is the primary interface to the remote device. All interactions
+    with the device are performed via a :class:`Target` instance, either
+    directly, or via its modules or a wrapper interface (such as an
+    :class:`Instrument`).
+
+    :param connection_settings: A ``dict`` that specifies how to connect to the remote 
+       device. Its contents depend on the specific :class:`Target` type (used see
+       :ref:`connection-types`\ ).
+
+    :param platform: A :class:`Target` defines interactions at Operating System level. A 
+        :class:`Platform` describes the underlying hardware (such as CPUs
+        available). If a :class:`Platform` instance is not specified on
+        :class:`Target` creation, one will be created automatically and it will
+        dynamically probe the device to discover as much about the underlying
+        hardware as it can. See also :ref:`platform`\ .
+
+    :param working_directory: This is primary location for on-target file system
+        interactions performed by ``devlib``. This location *must* be readable and
+        writable directly (i.e. without sudo) by the connection's user account. 
+        It may or may not allow execution. This location will be created, 
+        if necessary, during ``setup()``.
+
+        If not explicitly specified, this will be set to a default value
+        depending on the type of :class:`Target`
+
+    :param executables_directory: This is the location to which ``devlib`` will
+        install executable binaries (either during ``setup()`` or via an
+        explicit ``install()`` call). This location *must* support execution
+        (obviously). It should also be possible to write to this location,
+        possibly with elevated privileges (i.e. on a rooted Linux target, it
+        should be possible to write here with sudo, but not necessarily directly
+        by the connection's account). This location will be created, 
+        if necessary, during ``setup()``.
+
+        This location does *not* to be same as the system's executables
+        location. In fact, to prevent devlib from overwriting system's defaults,
+        it better if this is a separate location, if possible.
+
+        If not explicitly specified, this will be set to a default value
+        depending on the type of :class:`Target`
+
+    :param connect: Specifies whether a connections should be established to the
+        target. If this is set to ``False``, then ``connect()`` must be
+        explicitly called later on before the :class:`Target` instance can be
+        used.
+
+    :param modules: a list of additional modules to be installed. Some modules will
+        try to install by default (if supported by the underlying target).
+        Current default modules are ``hotplug``, ``cpufreq``, ``cpuidle``, 
+        ``cgroups``, and ``hwmon`` (See :ref:`modules`\ ).
+
+        See modules documentation for more detail.
+
+    :param load_default_modules: If set to ``False``,  default modules listed
+         above will *not* attempt to load. This may be used to either speed up
+         target instantiation (probing for initializing modules takes a bit of time)
+         or if there is an issue with one of the modules on a particular device
+         (the rest of the modules will then have to be explicitly specified in
+         the ``modules``).
+
+    :param shell_prompt: This is a regular expression that matches the shell
+         prompted on the target. This may be used by some modules that establish
+         auxiliary connections to a target over UART.
+
+.. attribute:: Target.core_names
+
+   This is a list containing names of CPU cores on the target, in the order in
+   which they are index by the kernel. This is obtained via the underlying
+   :class:`Platform`.
+
+.. attribute:: Target.core_clusters
+
+   Some devices feature heterogeneous core configurations (such as ARM
+   big.LITTLE).  This is a list that maps CPUs onto underlying clusters.
+   (Usually, but not always, clusters correspond to groups of CPUs with the same
+   name). This is obtained via the underlying :class:`Platform`.
+
+.. attribute:: Target.big_core
+
+   This is the name of the cores that the "big"s in an ARM big.LITTLE
+   configuration. This is obtained via the underlying :class:`Platform`.
+
+.. attribute:: Target.little_core
+
+   This is the name of the cores that the "little"s in an ARM big.LITTLE
+   configuration. This is obtained via the underlying :class:`Platform`.
+
+.. attribute:: Target.is_connected
+
+   A boolean value that indicates whether an active connection exists to the
+   target device. 
+
+.. attribute:: Target.connected_as_root
+
+   A boolean value that indicate whether the account that was used to connect to
+   the target device is "root" (uid=0).
+
+.. attribute:: Target.is_rooted
+
+   A boolean value that indicates whether the connected user has super user
+   privileges on the devices (either is root, or is a sudoer).
+
+.. attribute:: Target.kernel_version
+
+   The version of the kernel on the target device. This returns a
+   :class:`KernelVersion` instance that has separate ``version`` and ``release``
+   fields.
+
+.. attribute:: Target.os_version
+
+   This is a dict that contains a mapping of OS version elements to their
+   values. This mapping is OS-specific.
+
+.. attribute:: Target.cpuinfo
+
+   This is a :class:`Cpuinfo` instance which contains parsed contents of
+   ``/proc/cpuinfo``.
+
+.. attribute:: Target.number_of_cpus
+
+   The total number of CPU cores on the target device.
+
+.. attribute:: Target.config
+
+   A :class:`KernelConfig` instance that contains parsed kernel config from the
+   target device. This may be ``None`` if kernel config could not be extracted.
+
+.. attribute:: Target.user
+
+   The name of the user logged in on the target device.
+
+.. attribute:: Target.conn
+
+   The underlying connection object. This will be ``None`` if an active
+   connection does not exist (e.g. if ``connect=False`` as passed on
+   initialization and ``connect()`` has not been called).
+
+   .. note:: a :class:`Target` will automatically create a connection per
+             thread. This will always be set to the connection for the current
+             thread.
+
+.. method:: Target.connect([timeout])
+   
+   Establish a connection to the target. It is usually not necessary to call
+   this explicitly, as a connection gets automatically established on
+   instantiation.
+
+.. method:: Target.disconnect()
+
+   Disconnect from target, closing all active connections to it.
+
+.. method:: Target.get_connection([timeout])
+
+   Get an additional connection to the target. A connection can be used to
+   execute one blocking command at time. This will return a connection that can
+   be used to interact with a target in parallel while a blocking operation is
+   being executed.
+
+   This should *not* be used to establish an initial connection; use
+   ``connect()`` instead.
+
+   .. note:: :class:`Target` will automatically create a connection per
+             thread, so you don't normally need to use this explicitly in
+             threaded code. This is generally useful if you want to perform a
+             blocking operation (e.g. using ``background()``) while at the same
+             time doing something else in the same host-side thread.
+
+.. method:: Target.setup([executables])
+
+   This will perform an initial one-time set up of a device for devlib
+   interaction. This involves deployment of tools relied on the :class:`Target`,
+   creation of working locations on the device, etc.
+
+   Usually, it is enough to call this method once per new device, as its effects
+   will persist across reboots. However, it is safe to call this method multiple
+   times. It may therefore be a good practice to always call it once at the
+   beginning of a script to ensure that subsequent interactions will succeed.
+
+   Optionally, this may also be used to deploy additional tools to the device
+   by specifying a list of binaries to install in the ``executables`` parameter.
+
+.. method:: Target.reboot([hard [, connect, [timeout]]])
+
+   Reboot the target device.
+
+   :param hard: A boolean value. If ``True`` a hard reset will be used instead
+        of the usual soft reset. Hard reset must be supported (usually via a
+        module) for this to work. Defaults to ``False``.
+   :param connect: A boolean value. If ``True``, a connection will be
+        automatically established to the target after reboot. Defaults to
+        ``True``.
+   :param timeout: If set, this will be used by various (platform-specific)
+        operations during reboot process to detect if the reboot has failed and
+        the device has hung.
+
+.. method:: Target.push(source, dest [, timeout])
+
+   Transfer a file from the host machine to the target device.
+
+   :param source: path of to the file on the host
+   :param dest: path of to the file on the target
+   :param timeout: timeout (in seconds) for the transfer; if the transfer does
+       not  complete within this period, an exception will be raised.
+
+.. method:: Target.pull(source, dest [, timeout])
+
+   Transfer a file from the target device to the host machine.
+
+   :param source: path of to the file on the target
+   :param dest: path of to the file on the host
+   :param timeout: timeout (in seconds) for the transfer; if the transfer does
+       not  complete within this period, an exception will be raised.
+
+.. method:: Target.execute(command [, timeout [, check_exit_code [, as_root]]])
+
+   Execute the specified command on the target device and return its output.
+
+   :param command: The command to be executed.
+   :param timeout: Timeout (in seconds) for the execution of the command. If
+       specified, an exception will be raised if execution does not complete
+       with the specified period.
+   :param check_exit_code: If ``True`` (the default) the exit code (on target) 
+       from execution of the command will be checked, and an exception will be
+       raised if it is not ``0``.
+   :param as_root: The command will be executed as root. This will fail on
+       unrooted targets.
+
+.. method:: Target.background(command [, stdout [, stderr [, as_root]]])
+
+   Execute the command on the target, invoking it via subprocess on the host.
+   This will return :class:`subprocess.Popen` instance for the command.
+
+   :param command: The command to be executed.
+   :param stdout: By default, standard output will be piped from the subprocess;
+      this may be used to redirect it to an alternative file handle.
+   :param stderr: By default, standard error will be piped from the subprocess;
+      this may be used to redirect it to an alternative file handle.
+   :param as_root: The command will be executed as root. This will fail on
+       unrooted targets.
+
+   .. note:: This **will block the connection** until the command completes.
+
+.. method:: Target.invoke(binary [, args [, in_directory [, on_cpus [, as_root [, timeout]]]]])
+
+   Execute the specified binary on target (must already be installed) under the
+   specified conditions and return the output.
+
+   :param binary: binary to execute. Must be present and executable on the device.
+   :param args: arguments to be passed to the binary. The can be either a list or
+          a string.
+   :param in_directory:  execute the binary in the  specified directory. This must
+                   be an absolute path.
+   :param on_cpus:  taskset the binary to these CPUs. This may be a single ``int`` (in which
+          case, it will be interpreted as the mask), a list of ``ints``, in which
+          case this will be interpreted as the list of cpus, or string, which
+          will be interpreted as a comma-separated list of cpu ranges, e.g.
+          ``"0,4-7"``.
+   :param as_root: Specify whether the command should be run as root
+   :param timeout: If this is specified and invocation does not terminate within this number 
+           of seconds, an exception will be raised.
+
+.. method:: Target.kick_off(command [, as_root])
+
+   Kick off the specified command on the target and return immediately. Unlike
+   ``background()`` this will not block the connection; on the other hand, there
+   is not way to know when the command finishes (apart from calling ``ps()``)
+   or to get its output (unless its redirected into a file that can be pulled
+   later as part of the command).
+
+   :param command: The command to be executed.
+   :param as_root: The command will be executed as root. This will fail on
+       unrooted targets.
+
+.. method:: Target.read_value(path [,kind])
+
+   Read the value from the specified path. This is primarily intended for
+   sysfs/procfs/debugfs etc.
+
+   :param path: file to read
+   :param kind: Optionally, read value will be converted into the specified
+        kind (which should be a callable that takes exactly one parameter).
+
+.. method:: Target.read_int(self, path)
+
+   Equivalent to ``Target.read_value(path, kind=devlab.utils.types.integer)``
+
+.. method:: Target.read_bool(self, path)
+
+   Equivalent to ``Target.read_value(path, kind=devlab.utils.types.boolean)``
+
+.. method:: Target.write_value(path, value [, verify])
+
+   Write the value to the specified path on the target. This is primarily 
+   intended for sysfs/procfs/debugfs etc.
+
+   :param path: file to write into
+   :param value: value to be written
+   :param verify: If ``True`` (the default) the value will be read back after
+       it is written to make sure it has been written successfully. This due to 
+       some sysfs entries silently failing to set the written value without
+       returning an error code.
+
+.. method:: Target.reset()
+
+   Soft reset the target. Typically, this means executing ``reboot`` on the
+   target.
+
+.. method:: Target.check_responsive()
+
+   Returns ``True`` if the target appears to be responsive and ``False``
+   otherwise.
+
+.. method:: Target.kill(pid[, signal[, as_root]])
+
+   Kill a process on the target.
+
+   :param pid: PID of the process to be killed.
+   :param signal: Signal to be used to kill the process. Defaults to
+       ``signal.SIGTERM``.
+   :param as_root: If set to ``True``, kill will be issued as root. This will
+       fail on unrooted targets.
+
+.. method:: Target.killall(name[, signal[, as_root]])
+
+   Kill all processes with the specified name on the target. Other parameters
+   are the same as for ``kill()``.
+
+.. method:: Target.get_pids_of(name)
+
+   Return a list of PIDs of all running instances of the specified process.
+
+.. method:: Target.ps()
+
+   Return a list of :class:`PsEntry` instances for all running processes on the
+   system.
+
+.. method:: Target.file_exists(self, filepath)
+
+   Returns ``True`` if the specified path exists on the target and ``False``
+   otherwise.
+
+.. method:: Target.list_file_systems()
+
+   Lists file systems mounted on the target. Returns a list of
+   :class:`FstabEntry`\ s.
+
+.. method:: Target.list_directory(path[, as_root])
+
+   List (optionally, as root) the contents of the specified directory. Returns a
+   list of strings.
+
+
+.. method:: Target.get_workpath(self, path)
+
+   Convert the specified path to an absolute path relative to
+   ``working_directory`` on the target. This is a shortcut for
+   ``t.path.join(t.working_directory, path)``
+
+.. method:: Target.tempfile([prefix [, suffix]])
+
+   Get a path to a temporary file (optionally, with the specified prefix and/or
+   suffix) on the target.
+
+.. method:: Target.remove(path[, as_root])
+
+   Delete the specified path on the target. Will work on files and directories.
+
+.. method:: Target.core_cpus(core)
+
+   Return a list of numeric cpu IDs corresponding to the specified core name.
+
+.. method:: Target.list_online_cpus([core])
+
+   Return a list of numeric cpu IDs for all online CPUs (optionally, only for
+   CPUs corresponding to the specified core).
+
+.. method:: Target.list_offline_cpus([core])
+
+   Return a list of numeric cpu IDs for all offline CPUs (optionally, only for
+   CPUs corresponding to the specified core).
+
+.. method:: Target.getenv(variable)
+
+   Return the value of the specified environment variable on the device
+
+.. method:: Target.capture_screen(filepath)
+
+   Take a screenshot on the device and save it to the specified file on the
+   host. This may not be supported by the target.
+
+.. method:: Target.install(filepath[, timeout[, with_name]])
+
+   Install an executable on the device.
+
+   :param filepath: path to the executable on the host
+   :param timeout: Optional timeout (in seconds) for the installation
+   :param with_name: This may be used to rename the executable on the target
+
+.. method:: Target.uninstall(name)
+
+   Uninstall the specified executable from the target
+
+.. method:: Target.get_installed(name)
+
+   Return the full installation path on the target for the specified executable,
+   or ``None`` if the executable is not installed.
+
+.. method:: Target.which(name)
+
+   Alias for ``get_installed()``
+
+.. method:: Target.is_installed(name)
+
+   Returns ``True`` if an executable with the specified name is installed on the
+   target and ``False`` other wise.
+
+.. method:: Target.extract(path, dest=None)
+
+   Extracts the specified archive/file and returns the path to the extrated
+   contents. The extraction method is determined based on the file extension.
+   ``zip``, ``tar``, ``gzip``, and ``bzip2`` are supported.
+
+   :param dest: Specified an on-target destination directory (which must exist)
+                for the extrated contents.
+
+    Returns the path to the extracted contents. In case of files (gzip and
+    bzip2), the path to the decompressed file is returned; for archives, the
+    path to the directory with the archive's contents is returned.
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..47a4475
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,90 @@
+#    Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import sys
+import warnings
+from itertools import chain
+
+try:
+    from setuptools import setup
+except ImportError:
+    from distutils.core import setup
+
+
+devlib_dir = os.path.join(os.path.dirname(__file__), 'devlib')
+
+sys.path.insert(0, os.path.join(devlib_dir, 'core'))
+
+# happends if falling back to distutils
+warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
+warnings.filterwarnings('ignore', "Unknown distribution option: 'extras_require'")
+
+try:
+    os.remove('MANIFEST')
+except OSError:
+    pass
+
+packages = []
+data_files = {}
+source_dir = os.path.dirname(__file__)
+for root, dirs, files in os.walk(devlib_dir):
+    rel_dir = os.path.relpath(root, source_dir)
+    data = []
+    if '__init__.py' in files:
+        for f in files:
+            if os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo']:
+                data.append(f)
+        package_name = rel_dir.replace(os.sep, '.')
+        package_dir = root
+        packages.append(package_name)
+        data_files[package_name] = data
+    else:
+        # use previous package name
+        filepaths = [os.path.join(root, f) for f in files]
+        data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths])
+
+params = dict(
+    name='devlib',
+    description='A framework for automating workload execution and measurment collection on ARM devices.',
+    version='0.0.4',
+    packages=packages,
+    package_data=data_files,
+    url='N/A',
+    license='Apache v2',
+    maintainer='ARM Ltd.',
+    install_requires=[
+        'python-dateutil',  # converting between UTC and local time.
+        'pexpect>=3.3',  # Send/recieve to/from device
+        'pyserial',  # Serial port interface
+        'wrapt',  # Basic for construction of decorator functions
+    ],
+    extras_require={
+        'daq': ['daqpower'],
+        'doc': ['sphinx'],
+    },
+    # https://pypi.python.org/pypi?%3Aaction=list_classifiers
+    classifiers=[
+        'Development Status :: 4 - Beta',
+        'License :: OSI Approved :: Apache Software License',
+        'Operating System :: POSIX :: Linux',
+        'Programming Language :: Python :: 2.7',
+    ],
+)
+
+all_extras = list(chain(params['extras_require'].itervalues()))
+params['extras_require']['full'] = all_extras
+
+setup(**params)
diff --git a/src/netstats/AndroidManifest.xml b/src/netstats/AndroidManifest.xml
new file mode 100644
index 0000000..1095a0c
--- /dev/null
+++ b/src/netstats/AndroidManifest.xml
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="utf-8"?>
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+      package="com.arm.devlib.netstats"
+      android:versionCode="1"
+      android:versionName="1.0">
+    <application android:label="@string/app_name" android:icon="@drawable/ic_launcher">
+        <service android:name="com.arm.devlib.netstats.TrafficMetricsService" android:exported="true" android:enabled="true">
+            <intent-filter>
+                <action android:name="com.arm.devlib.netstats.TrafficMetricsService" />
+            </intent-filter>
+        </service>
+    </application>
+    <uses-feature android:name="android.hardware.touchscreen" android:required="false" />
+</manifest>
diff --git a/src/netstats/build.sh b/src/netstats/build.sh
new file mode 100755
index 0000000..7e211e5
--- /dev/null
+++ b/src/netstats/build.sh
@@ -0,0 +1,8 @@
+set -e
+THIS_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+pushd $THIS_DIR
+ant release
+jarsigner -verbose -keystore ~/.android/debug.keystore -storepass android -keypass android $THIS_DIR/bin/netstats-*.apk androiddebugkey
+cp $THIS_DIR/bin/netstats-*.apk $THIS_DIR/../../devlib/instrument/netstats/netstats.apk
+ant clean
+popd
diff --git a/src/netstats/build.xml b/src/netstats/build.xml
new file mode 100644
index 0000000..0a3dcc7
--- /dev/null
+++ b/src/netstats/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="netstats" default="help">
+
+    <!-- The local.properties file is created and updated by the 'android' tool.
+         It contains the path to the SDK. It should *NOT* be checked into
+         Version Control Systems. -->
+    <property file="local.properties" />
+
+    <!-- The ant.properties file can be created by you. It is only edited by the
+         'android' tool to add properties to it.
+         This is the place to change some Ant specific build properties.
+         Here are some properties you may want to change/update:
+
+         source.dir
+             The name of the source directory. Default is 'src'.
+         out.dir
+             The name of the output directory. Default is 'bin'.
+
+         For other overridable properties, look at the beginning of the rules
+         files in the SDK, at tools/ant/build.xml
+
+         Properties related to the SDK location or the project target should
+         be updated using the 'android' tool with the 'update' action.
+
+         This file is an integral part of the build system for your
+         application and should be checked into Version Control Systems.
+
+         -->
+    <property file="ant.properties" />
+
+    <!-- if sdk.dir was not set from one of the property file, then
+         get it from the ANDROID_HOME env var.
+         This must be done before we load project.properties since
+         the proguard config can use sdk.dir -->
+    <property environment="env" />
+    <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+        <isset property="env.ANDROID_HOME" />
+    </condition>
+
+    <!-- The project.properties file is created and updated by the 'android'
+         tool, as well as ADT.
+
+         This contains project specific properties such as project target, and library
+         dependencies. Lower level build properties are stored in ant.properties
+         (or in .classpath for Eclipse projects).
+
+         This file is an integral part of the build system for your
+         application and should be checked into Version Control Systems. -->
+    <loadproperties srcFile="project.properties" />
+
+    <!-- quick check on sdk.dir -->
+    <fail
+            message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+            unless="sdk.dir"
+    />
+
+    <!--
+        Import per project custom build rules if present at the root of the project.
+        This is the place to put custom intermediary targets such as:
+            -pre-build
+            -pre-compile
+            -post-compile (This is typically used for code obfuscation.
+                           Compiled code location: ${out.classes.absolute.dir}
+                           If this is not done in place, override ${out.dex.input.absolute.dir})
+            -post-package
+            -post-build
+            -pre-clean
+    -->
+    <import file="custom_rules.xml" optional="true" />
+
+    <!-- Import the actual build file.
+
+         To customize existing targets, there are two options:
+         - Customize only one target:
+             - copy/paste the target into this file, *before* the
+               <import> task.
+             - customize it to your needs.
+         - Customize the whole content of build.xml
+             - copy/paste the content of the rules files (minus the top node)
+               into this file, replacing the <import> task.
+             - customize to your needs.
+
+         ***********************
+         ****** IMPORTANT ******
+         ***********************
+         In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+         in order to avoid having your file be overridden by tools such as "android update project"
+    -->
+    <!-- version-tag: 1 -->
+    <import file="${sdk.dir}/tools/ant/build.xml" />
+
+</project>
diff --git a/src/netstats/proguard-project.txt b/src/netstats/proguard-project.txt
new file mode 100644
index 0000000..f2fe155
--- /dev/null
+++ b/src/netstats/proguard-project.txt
@@ -0,0 +1,20 @@
+# To enable ProGuard in your project, edit project.properties
+# to define the proguard.config property as described in that file.
+#
+# Add project specific ProGuard rules here.
+# By default, the flags in this file are appended to flags specified
+# in ${sdk.dir}/tools/proguard/proguard-android.txt
+# You can edit the include path and order by changing the ProGuard
+# include property in project.properties.
+#
+# For more details, see
+#   http://developer.android.com/guide/developing/tools/proguard.html
+
+# Add any project specific keep options here:
+
+# If your project uses WebView with JS, uncomment the following
+# and specify the fully qualified class name to the JavaScript interface
+# class:
+#-keepclassmembers class fqcn.of.javascript.interface.for.webview {
+#   public *;
+#}
diff --git a/src/netstats/project.properties b/src/netstats/project.properties
new file mode 100644
index 0000000..3999a20
--- /dev/null
+++ b/src/netstats/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=Google Inc.:Google APIs (x86 System Image):19
diff --git a/src/netstats/res/drawable-hdpi/ic_launcher.png b/src/netstats/res/drawable-hdpi/ic_launcher.png
new file mode 100644
index 0000000..96a442e
--- /dev/null
+++ b/src/netstats/res/drawable-hdpi/ic_launcher.png
Binary files differ
diff --git a/src/netstats/res/drawable-ldpi/ic_launcher.png b/src/netstats/res/drawable-ldpi/ic_launcher.png
new file mode 100644
index 0000000..9923872
--- /dev/null
+++ b/src/netstats/res/drawable-ldpi/ic_launcher.png
Binary files differ
diff --git a/src/netstats/res/drawable-mdpi/ic_launcher.png b/src/netstats/res/drawable-mdpi/ic_launcher.png
new file mode 100644
index 0000000..359047d
--- /dev/null
+++ b/src/netstats/res/drawable-mdpi/ic_launcher.png
Binary files differ
diff --git a/src/netstats/res/drawable-xhdpi/ic_launcher.png b/src/netstats/res/drawable-xhdpi/ic_launcher.png
new file mode 100644
index 0000000..71c6d76
--- /dev/null
+++ b/src/netstats/res/drawable-xhdpi/ic_launcher.png
Binary files differ
diff --git a/src/netstats/res/layout/main.xml b/src/netstats/res/layout/main.xml
new file mode 100644
index 0000000..5f5f112
--- /dev/null
+++ b/src/netstats/res/layout/main.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="utf-8"?>
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+    android:orientation="vertical"
+    android:layout_width="fill_parent"
+    android:layout_height="fill_parent"
+    >
+<TextView
+    android:layout_width="fill_parent"
+    android:layout_height="wrap_content"
+    android:text="Hello World, NetStats"
+    />
+</LinearLayout>
+
diff --git a/src/netstats/res/values/strings.xml b/src/netstats/res/values/strings.xml
new file mode 100644
index 0000000..6676c4f
--- /dev/null
+++ b/src/netstats/res/values/strings.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="utf-8"?>
+<resources>
+    <string name="app_name">NetStats</string>
+</resources>
diff --git a/src/netstats/src/com/arm/devlib/trafficcatcher/TrafficMetricsService.java b/src/netstats/src/com/arm/devlib/trafficcatcher/TrafficMetricsService.java
new file mode 100644
index 0000000..9d3bb69
--- /dev/null
+++ b/src/netstats/src/com/arm/devlib/trafficcatcher/TrafficMetricsService.java
@@ -0,0 +1,124 @@
+package com.arm.devlib.netstats;
+
+import java.lang.InterruptedException;
+import java.lang.System;
+import java.lang.Thread;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+
+import android.app.Activity;
+import android.app.IntentService;
+import android.content.Intent;
+import android.content.pm.ApplicationInfo;
+import android.content.pm.PackageManager;
+import android.net.TrafficStats;
+import android.os.Bundle;
+import android.util.Log;
+
+class TrafficPoller implements Runnable {
+
+    private String tag;
+    private int period;
+    private PackageManager pm;
+    private static String TAG = "TrafficMetrics";
+    private List<String> packageNames;
+    private Map<String, Map<String, Long>> previousValues;
+
+    public TrafficPoller(String tag, PackageManager pm, int period, List<String> packages) {
+        this.tag = tag;
+        this.pm = pm;
+        this.period = period;
+        this.packageNames = packages;
+        this.previousValues = new HashMap<String, Map<String, Long>>();
+    }
+
+    public void run() {
+        try {
+            while (true) {
+                Thread.sleep(this.period);
+                getPakagesInfo();
+                if (Thread.interrupted()) {
+                    throw new InterruptedException();
+                }
+            }
+        } catch (InterruptedException e) {
+        }
+    }
+
+    public void getPakagesInfo() {
+        List<ApplicationInfo> apps;
+        if (this.packageNames == null) {
+            apps = pm.getInstalledApplications(0);
+            for (ApplicationInfo app : apps) {
+            }
+        } else {
+            apps = new ArrayList<ApplicationInfo>();
+            for (String packageName : packageNames) {
+                try {
+                    ApplicationInfo info = pm.getApplicationInfo(packageName,  0);
+                    apps.add(info);
+                } catch (PackageManager.NameNotFoundException e) {
+                    e.printStackTrace();
+                }
+            }
+        }
+
+        for (ApplicationInfo appInfo : apps) {
+            int uid = appInfo.uid;
+            String name = appInfo.packageName;
+            long time =  System.currentTimeMillis();
+            long received = TrafficStats.getUidRxBytes(uid);
+            long sent = TrafficStats.getUidTxBytes(uid);
+
+            if (!this.previousValues.containsKey(name)) {
+                this.previousValues.put(name, new HashMap<String, Long>());
+                this.previousValues.get(name).put("sent", sent);
+                this.previousValues.get(name).put("received", received);
+                Log.i(this.tag, String.format("INITIAL \"%s\" TX: %d RX: %d", 
+                                              name, sent, received));
+            } else {
+                long previosSent = this.previousValues.get(name).put("sent", sent);
+                long previosReceived = this.previousValues.get(name).put("received", received);
+                Log.i(this.tag, String.format("%d \"%s\" TX: %d RX: %d", 
+                                              time, name, 
+                                              sent - previosSent, 
+                                              received - previosReceived));
+            }
+        }
+    }
+}
+
+public class TrafficMetricsService extends IntentService {
+
+    private static String TAG = "TrafficMetrics";
+    private Thread thread;
+    private static int defaultPollingPeriod = 5000;
+
+    public TrafficMetricsService() {
+            super("TrafficMetrics");
+    }
+
+    @Override
+    public void onHandleIntent(Intent intent) {
+        List<String> packages = null;
+        String runTag = intent.getStringExtra("tag");
+        if (runTag == null) {
+            runTag = TAG;
+        }
+        String packagesString = intent.getStringExtra("packages");
+        int pollingPeriod = intent.getIntExtra("period", this.defaultPollingPeriod);
+        if (packagesString != null) {
+            packages = new ArrayList<String>(Arrays.asList(packagesString.split(",")));
+        } 
+
+        if (this.thread != null) {
+            Log.e(runTag, "Attemping to start when monitoring is already in progress.");
+            return;
+        }
+        this.thread = new Thread(new TrafficPoller(runTag, getPackageManager(), pollingPeriod, packages));
+        this.thread.start();
+    }
+}
diff --git a/src/readenergy/Makefile b/src/readenergy/Makefile
new file mode 100644
index 0000000..dd5cae2
--- /dev/null
+++ b/src/readenergy/Makefile
@@ -0,0 +1,11 @@
+# To build:
+#
+# CROSS_COMPILE=aarch64-linux-gnu- make
+#
+CROSS_COMPILE?=aarch64-linux-gnu-
+CC=$(CROSS_COMPILE)gcc
+CFLAGS=-static -lc
+
+readenergy: readenergy.c
+	$(CC) $(CFLAGS) readenergy.c -o readenergy
+	mv readenergy ../../devlib/bin/arm64/readenergy
diff --git a/src/readenergy/readenergy.c b/src/readenergy/readenergy.c
new file mode 100644
index 0000000..6e4f35f
--- /dev/null
+++ b/src/readenergy/readenergy.c
@@ -0,0 +1,379 @@
+/*    Copyright 2014-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+/*
+ * readenergy.c 
+ *
+ * Reads APB energy registers in Juno and outputs the measurements (converted to appropriate units).
+ *
+*/
+#include <errno.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+// The following values obtained from Juno TRM 2014/03/04 section 4.5
+
+// Location of APB registers in memory
+#define APB_BASE_MEMORY 0x1C010000
+// APB energy counters start at offset 0xD0 from the base APB address.
+#define BASE_INDEX 0xD0 / 4
+// the one-past last APB counter
+#define APB_SIZE 0x120
+
+// Masks specifying the bits that contain the actual counter values
+#define CMASK 0xFFF
+#define VMASK 0xFFF
+#define PMASK 0xFFFFFF
+
+// Sclaing factor (divisor) or getting measured values from counters
+#define SYS_ADC_CH0_PM1_SYS_SCALE 761
+#define SYS_ADC_CH1_PM2_A57_SCALE 381
+#define SYS_ADC_CH2_PM3_A53_SCALE 761
+#define SYS_ADC_CH3_PM4_GPU_SCALE 381
+#define SYS_ADC_CH4_VSYS_SCALE 1622
+#define SYS_ADC_CH5_VA57_SCALE 1622
+#define SYS_ADC_CH6_VA53_SCALE 1622
+#define SYS_ADC_CH7_VGPU_SCALE 1622
+#define SYS_POW_CH04_SYS_SCALE (SYS_ADC_CH0_PM1_SYS_SCALE * SYS_ADC_CH4_VSYS_SCALE)
+#define SYS_POW_CH15_A57_SCALE (SYS_ADC_CH1_PM2_A57_SCALE * SYS_ADC_CH5_VA57_SCALE)
+#define SYS_POW_CH26_A53_SCALE (SYS_ADC_CH2_PM3_A53_SCALE * SYS_ADC_CH6_VA53_SCALE)
+#define SYS_POW_CH37_GPU_SCALE (SYS_ADC_CH3_PM4_GPU_SCALE * SYS_ADC_CH7_VGPU_SCALE)
+#define SYS_ENM_CH0_SYS_SCALE 12348030000
+#define SYS_ENM_CH1_A57_SCALE 6174020000
+#define SYS_ENM_CH0_A53_SCALE 12348030000
+#define SYS_ENM_CH0_GPU_SCALE 6174020000
+
+// Original values prior to re-callibrations.
+/*#define SYS_ADC_CH0_PM1_SYS_SCALE 819.2*/
+/*#define SYS_ADC_CH1_PM2_A57_SCALE 409.6*/
+/*#define SYS_ADC_CH2_PM3_A53_SCALE 819.2*/
+/*#define SYS_ADC_CH3_PM4_GPU_SCALE 409.6*/
+/*#define SYS_ADC_CH4_VSYS_SCALE 1638.4*/
+/*#define SYS_ADC_CH5_VA57_SCALE 1638.4*/
+/*#define SYS_ADC_CH6_VA53_SCALE 1638.4*/
+/*#define SYS_ADC_CH7_VGPU_SCALE 1638.4*/
+/*#define SYS_POW_CH04_SYS_SCALE (SYS_ADC_CH0_PM1_SYS_SCALE * SYS_ADC_CH4_VSYS_SCALE)*/
+/*#define SYS_POW_CH15_A57_SCALE (SYS_ADC_CH1_PM2_A57_SCALE * SYS_ADC_CH5_VA57_SCALE)*/
+/*#define SYS_POW_CH26_A53_SCALE (SYS_ADC_CH2_PM3_A53_SCALE * SYS_ADC_CH6_VA53_SCALE)*/
+/*#define SYS_POW_CH37_GPU_SCALE (SYS_ADC_CH3_PM4_GPU_SCALE * SYS_ADC_CH7_VGPU_SCALE)*/
+/*#define SYS_ENM_CH0_SYS_SCALE 13421772800.0*/
+/*#define SYS_ENM_CH1_A57_SCALE 6710886400.0*/
+/*#define SYS_ENM_CH0_A53_SCALE 13421772800.0*/
+/*#define SYS_ENM_CH0_GPU_SCALE 6710886400.0*/
+
+// Ignore individual errors but if see too many, abort.
+#define ERROR_THRESHOLD 10
+
+// Default counter poll period (in milliseconds).
+#define DEFAULT_PERIOD 100
+
+// Default duration for the instrument execution (in seconds); 0 means 'forever'
+#define DEFAULT_DURATION 0
+
+// A single reading from the energy meter. The values are the proper readings converted
+// to appropriate units (e.g. Watts for power); they are *not* raw counter values.
+struct reading
+{
+	double sys_adc_ch0_pm1_sys;
+	double sys_adc_ch1_pm2_a57;
+	double sys_adc_ch2_pm3_a53;
+	double sys_adc_ch3_pm4_gpu;
+	double sys_adc_ch4_vsys;
+	double sys_adc_ch5_va57;
+	double sys_adc_ch6_va53;
+	double sys_adc_ch7_vgpu;
+	double sys_pow_ch04_sys;
+	double sys_pow_ch15_a57;
+	double sys_pow_ch26_a53;
+	double sys_pow_ch37_gpu;
+	double sys_enm_ch0_sys;
+	double sys_enm_ch1_a57;
+	double sys_enm_ch0_a53;
+	double sys_enm_ch0_gpu;
+};
+
+inline uint64_t join_64bit_register(uint32_t *buffer, int index)
+{
+	uint64_t result = 0;
+	result |= buffer[index];
+	result |= (uint64_t)(buffer[index+1]) << 32;
+	return result;
+}
+
+int nsleep(const struct timespec *req, struct timespec *rem)
+{
+	struct timespec temp_rem;
+	if (nanosleep(req, rem) == -1)
+	{
+		if (errno == EINTR)
+		{
+			nsleep(rem, &temp_rem);
+		}
+		else
+		{
+			return errno;
+		}
+	}
+	else
+	{
+		return 0;
+	}
+}
+ 
+void print_help()
+{
+	fprintf(stderr, "Usage: readenergy [-t PERIOD] [-o OUTFILE]\n\n"
+			"Read Juno energy counters every PERIOD milliseconds, writing them\n"
+			"to OUTFILE in CSV format either until SIGTERM is received OR\n"
+			"till the specified duration elapsed.\n"
+			"If OUTFILE is not specified, stdout will be used.\n\n"
+			"Parameters:\n"
+			"	PERIOD is the counter poll period in milliseconds.\n"
+			"	       (Defaults to 100 milliseconds.)\n"
+			"	DURATION is the duration before execution terminates.\n"
+			"		(Defaults to 0 seconds, meaning run till user\n"
+			"		terminates execution.\n"
+			"	OUTFILE is the output file path\n");
+}
+
+// debugging only...
+inline void dprint(char *msg)
+{
+	fprintf(stderr, "%s\n", msg);
+	sync();
+}
+
+// -------------------------------------- config ----------------------------------------------------
+
+struct config
+{
+	struct timespec period;
+	char *output_file;
+	long duration_in_sec;
+};
+
+void config_init_period_from_millis(struct config *this, long millis)
+{
+	this->period.tv_sec = (time_t)(millis / 1000);
+	this->period.tv_nsec = (millis % 1000) * 1000000;
+}
+
+void config_init(struct config *this, int argc, char *argv[])
+{
+	this->output_file = NULL;
+	config_init_period_from_millis(this, DEFAULT_PERIOD);
+	this->duration_in_sec = DEFAULT_DURATION;
+
+	int opt;
+	while ((opt = getopt(argc, argv, "ht:o:d:")) != -1)
+	{
+		switch(opt)
+		{
+			case 't':
+				config_init_period_from_millis(this, atol(optarg));
+				break;
+			case 'o':
+				this->output_file = optarg;
+				break;
+			case 'd':
+				this->duration_in_sec = atol(optarg);
+				break;
+			case 'h':
+				print_help();
+				exit(EXIT_SUCCESS);
+				break;
+			default:
+				fprintf(stderr, "ERROR: Unexpected option %s\n\n", opt);
+				print_help();
+				exit(EXIT_FAILURE);
+		}
+	}
+}
+
+// -------------------------------------- /config ---------------------------------------------------
+
+// -------------------------------------- emeter ----------------------------------------------------
+
+struct emeter
+{
+	int fd;
+	FILE *out;
+	void *mmap_base;
+};
+
+void emeter_init(struct emeter *this, char *outfile)
+{
+	if(outfile)
+	{
+		this->out = fopen(outfile, "w");
+		if (this->out == NULL)
+		{
+			fprintf(stderr, "ERROR: Could not open output file %s; got %s\n", outfile, strerror(errno));
+			exit(EXIT_FAILURE);
+		}
+	} else {
+		this->out = stdout;
+	}
+        this->fd = open("/dev/mem", O_RDONLY);
+        if(this->fd < 0)
+        {
+                fprintf(stderr, "ERROR: Can't open /dev/mem; got %s\n", strerror(errno));
+		fclose(this->out);
+		exit(EXIT_FAILURE);
+        }
+
+	this->mmap_base = mmap(NULL, APB_SIZE, PROT_READ, MAP_SHARED, this->fd, APB_BASE_MEMORY);
+	if (this->mmap_base == MAP_FAILED)
+	{
+		fprintf(stderr, "ERROR: mmap failed; got %s\n", strerror(errno));
+		close(this->fd);
+		fclose(this->out);
+		exit(EXIT_FAILURE);
+	}
+
+	if(this->out) {
+		fprintf(this->out, "sys_curr,a57_curr,a53_curr,gpu_curr,"
+				   "sys_volt,a57_volt,a53_volt,gpu_volt,"
+				   "sys_pow,a57_pow,a53_pow,gpu_pow,"
+				   "sys_cenr,a57_cenr,a53_cenr,gpu_cenr\n");
+	}
+}
+
+void emeter_read_measurements(struct emeter *this, struct reading *reading)
+{
+	uint32_t *buffer = (uint32_t *)this->mmap_base;
+	reading->sys_adc_ch0_pm1_sys = (double)(CMASK & buffer[BASE_INDEX+0]) / SYS_ADC_CH0_PM1_SYS_SCALE;
+	reading->sys_adc_ch1_pm2_a57 = (double)(CMASK & buffer[BASE_INDEX+1]) / SYS_ADC_CH1_PM2_A57_SCALE;
+	reading->sys_adc_ch2_pm3_a53 = (double)(CMASK & buffer[BASE_INDEX+2]) / SYS_ADC_CH2_PM3_A53_SCALE;
+	reading->sys_adc_ch3_pm4_gpu = (double)(CMASK & buffer[BASE_INDEX+3]) / SYS_ADC_CH3_PM4_GPU_SCALE;
+	reading->sys_adc_ch4_vsys = (double)(VMASK & buffer[BASE_INDEX+4]) / SYS_ADC_CH4_VSYS_SCALE;
+	reading->sys_adc_ch5_va57 = (double)(VMASK & buffer[BASE_INDEX+5]) / SYS_ADC_CH5_VA57_SCALE;
+	reading->sys_adc_ch6_va53 = (double)(VMASK & buffer[BASE_INDEX+6]) / SYS_ADC_CH6_VA53_SCALE;
+	reading->sys_adc_ch7_vgpu = (double)(VMASK & buffer[BASE_INDEX+7]) / SYS_ADC_CH7_VGPU_SCALE;
+	reading->sys_pow_ch04_sys = (double)(PMASK & buffer[BASE_INDEX+8]) / SYS_POW_CH04_SYS_SCALE;
+	reading->sys_pow_ch15_a57 = (double)(PMASK & buffer[BASE_INDEX+9]) / SYS_POW_CH15_A57_SCALE;
+	reading->sys_pow_ch26_a53 = (double)(PMASK & buffer[BASE_INDEX+10]) / SYS_POW_CH26_A53_SCALE;
+	reading->sys_pow_ch37_gpu = (double)(PMASK & buffer[BASE_INDEX+11]) / SYS_POW_CH37_GPU_SCALE;
+	reading->sys_enm_ch0_sys = (double)join_64bit_register(buffer, BASE_INDEX+12) / SYS_ENM_CH0_SYS_SCALE;
+	reading->sys_enm_ch1_a57 = (double)join_64bit_register(buffer, BASE_INDEX+14) / SYS_ENM_CH1_A57_SCALE;
+	reading->sys_enm_ch0_a53 = (double)join_64bit_register(buffer, BASE_INDEX+16) / SYS_ENM_CH0_A53_SCALE;
+	reading->sys_enm_ch0_gpu = (double)join_64bit_register(buffer, BASE_INDEX+18) / SYS_ENM_CH0_GPU_SCALE;
+}
+
+void emeter_take_reading(struct emeter *this)
+{
+	static struct reading reading;
+	int error_count = 0;
+	emeter_read_measurements(this, &reading);
+	int ret = fprintf(this->out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
+			reading.sys_adc_ch0_pm1_sys,
+			reading.sys_adc_ch1_pm2_a57,
+			reading.sys_adc_ch2_pm3_a53,
+			reading.sys_adc_ch3_pm4_gpu,
+			reading.sys_adc_ch4_vsys,
+			reading.sys_adc_ch5_va57,
+			reading.sys_adc_ch6_va53,
+			reading.sys_adc_ch7_vgpu,
+			reading.sys_pow_ch04_sys,
+			reading.sys_pow_ch15_a57,
+			reading.sys_pow_ch26_a53,
+			reading.sys_pow_ch37_gpu,
+			reading.sys_enm_ch0_sys,
+			reading.sys_enm_ch1_a57,
+			reading.sys_enm_ch0_a53,
+			reading.sys_enm_ch0_gpu);
+	if (ret < 0)
+	{
+		fprintf(stderr, "ERROR: while writing a meter reading: %s\n", strerror(errno));
+		if (++error_count > ERROR_THRESHOLD)
+			exit(EXIT_FAILURE);
+	}
+}
+
+void emeter_finalize(struct emeter *this)
+{
+	if (munmap(this->mmap_base, APB_SIZE) == -1) 
+	{
+		// Report the error but don't bother doing anything else, as we're not gonna do 
+		// anything with emeter after this point anyway.
+		fprintf(stderr, "ERROR: munmap failed; got %s\n", strerror(errno));
+	}
+	close(this->fd);
+	fclose(this->out);
+}
+
+// -------------------------------------- /emeter ----------------------------------------------------
+
+volatile int done = 0;
+
+void term_handler(int signum)
+{
+	done = 1;
+}
+
+void sigalrm_handler(int signum)
+{
+	done = 1;
+}
+
+
+int main(int argc, char *argv[])
+{
+	struct sigaction action;
+	memset(&action, 0, sizeof(struct sigaction));
+	action.sa_handler = term_handler;
+	sigaction(SIGTERM, &action, NULL);
+
+	struct config config;
+	struct emeter emeter;
+	config_init(&config, argc, argv);
+	emeter_init(&emeter, config.output_file);
+
+	if (0 != config.duration_in_sec)
+	{
+		/*Set the alarm with the duration from use only if a non-zero value is specified
+		  else it will run forever until SIGTERM signal received from user*/
+		/*Set the signal handler first*/
+		signal(SIGALRM, sigalrm_handler);
+		/*Now set the alarm for the duration specified by the user*/
+		alarm(config.duration_in_sec);
+
+	}
+
+	if(config.output_file)
+	{
+		struct timespec remaining;
+		while (!done)
+		{
+			emeter_take_reading(&emeter);
+			nsleep(&config.period, &remaining);
+		}
+	} else 	{
+		emeter_take_reading(&emeter);
+	}
+
+	emeter_finalize(&emeter);
+	return EXIT_SUCCESS;
+}