Support Galaxy Nexus, Nexus 4/5/9
diff --git a/configure.py b/configure.py
index 7099360..032842a 100755
--- a/configure.py
+++ b/configure.py
@@ -68,6 +68,10 @@
                     build.unittest("jetson-tx1-test", build.cxx("jetson-tx1.cc"))
 
                     build.unittest("nexus-s-test", build.cxx("nexus-s.cc"))
+                    build.unittest("galaxy-nexus-test", build.cxx("galaxy-nexus.cc"))
+                    build.unittest("nexus4-test", build.cxx("nexus4.cc"))
+                    build.unittest("nexus5-test", build.cxx("nexus5.cc"))
+                    build.unittest("nexus9-test", build.cxx("nexus9.cc"))
 
     return build
 
diff --git a/src/arm/cache.c b/src/arm/cache.c
index 727cc7d..5615d63 100644
--- a/src/arm/cache.c
+++ b/src/arm/cache.c
@@ -524,6 +524,32 @@
 				.line_size = 64 /* assume same as Krait */
 			};
 			break;
+		case cpuinfo_uarch_denver:
+			/*
+			 * The Denver chip includes a 128KB, 4-way level 1 instruction cache, a 64KB, 4-way level 2 data cache,
+			 * and a 2MB, 16-way level 2 cache, all of which can service both cores. [1]
+			 *
+			 * All the caches have 64-byte lines. [2]
+			 *
+			 * [1] http://www.pcworld.com/article/2463900/nvidia-reveals-pc-like-performance-for-denver-tegra-k1.html
+			 * [2] http://linleygroup.com/newsletters/newsletter_detail.php?num=5205&year=2014
+			 */
+			*l1i = (struct cpuinfo_cache) {
+				.size = 128 * 1024,
+				.associativity = 4,
+				.line_size = 64
+			};
+			*l1d = (struct cpuinfo_cache) {
+				.size = 64 * 1024,
+				.associativity = 4,
+				.line_size = 64
+			};
+			*l2 = (struct cpuinfo_cache) {
+				.size = 2 * 1024 * 1024,
+				.associativity = 16,
+				.line_size = 64
+			};
+			break;
 		case cpuinfo_uarch_mongoose:
 			/*
 			 * - "Moving past branch prediction we can see some elements of how the cache is set up for the L1 I$,
diff --git a/src/arm/linux/cpuinfo.c b/src/arm/linux/cpuinfo.c
index 3ead6c7..960cb7f 100644
--- a/src/arm/linux/cpuinfo.c
+++ b/src/arm/linux/cpuinfo.c
@@ -245,6 +245,15 @@
 	struct proc_cpuinfo proc_cpuinfo[restrict static 1])
 {
 	const size_t cpu_architecture_length = (size_t) (cpu_architecture_end - cpu_architecture_start);
+	/* Early AArch64 kernels report "CPU architecture: AArch64" instead of a numeric value 8 */
+	if (cpu_architecture_length == 7) {
+		if (memcmp(cpu_architecture_start, "AArch64", cpu_architecture_length) == 0) {
+			proc_cpuinfo->architecture.version = 8;
+			proc_cpuinfo->valid_mask |= PROC_CPUINFO_VALID_ARCHITECTURE;
+			return;
+		}
+	}
+
 
 	uint32_t architecture = 0;
 	const char* cpu_architecture_ptr = cpu_architecture_start;
@@ -259,31 +268,39 @@
 		architecture = architecture * 10 + digit;
 	}
 
-	if (architecture != 0) {
-		proc_cpuinfo->architecture.version = architecture;
-		proc_cpuinfo->valid_mask |= PROC_CPUINFO_VALID_ARCHITECTURE;
+	if (cpu_architecture_ptr == cpu_architecture_start) {
+		cpuinfo_log_warning("CPU architecture %.*s in /proc/cpuinfo is ignored due to non-digit at the beginning of the string",
+			(int) cpu_architecture_length, cpu_architecture_start);
+	} else {
+		if (architecture != 0) {
+			proc_cpuinfo->architecture.version = architecture;
+			proc_cpuinfo->valid_mask |= PROC_CPUINFO_VALID_ARCHITECTURE;
 
-		for (; cpu_architecture_ptr != cpu_architecture_end; cpu_architecture_ptr++) {
-			const char feature = *cpu_architecture_ptr;
-			switch (feature) {
-				case 'T':
-					proc_cpuinfo->architecture.flags |= PROC_CPUINFO_ARCH_T;
-					break;
-				case 'E':
-					proc_cpuinfo->architecture.flags |= PROC_CPUINFO_ARCH_E;
-					break;
-				case 'J':
-					proc_cpuinfo->architecture.flags |= PROC_CPUINFO_ARCH_J;
-					break;
-				case ' ':
-				case '\t':
-					/* Ignore whitespace at the end */
-					break;
-				default:
-					cpuinfo_log_warning("skipped unknown architectural feature '%c' for ARMv%"PRIu32,
-						feature, architecture);
-					break;
+			for (; cpu_architecture_ptr != cpu_architecture_end; cpu_architecture_ptr++) {
+				const char feature = *cpu_architecture_ptr;
+				switch (feature) {
+					case 'T':
+						proc_cpuinfo->architecture.flags |= PROC_CPUINFO_ARCH_T;
+						break;
+					case 'E':
+						proc_cpuinfo->architecture.flags |= PROC_CPUINFO_ARCH_E;
+						break;
+					case 'J':
+						proc_cpuinfo->architecture.flags |= PROC_CPUINFO_ARCH_J;
+						break;
+					case ' ':
+					case '\t':
+						/* Ignore whitespace at the end */
+						break;
+					default:
+						cpuinfo_log_warning("skipped unknown architectural feature '%c' for ARMv%"PRIu32,
+							feature, architecture);
+						break;
+				}
 			}
+		} else {
+			cpuinfo_log_warning("CPU architecture %.*s in /proc/cpuinfo is ignored due to invalid value (0)",
+				(int) cpu_architecture_length, cpu_architecture_start);
 		}
 	}
 }
@@ -543,12 +560,12 @@
 static uint32_t parse_line(
 	const char* line_start,
 	const char* line_end,
-	uint32_t processor_number,
+	uint32_t processor_count,
 	struct proc_cpuinfo* proc_cpuinfo)
 {
 	/* Empty line. Skip. */
 	if (line_start == line_end) {
-		return processor_number;
+		return processor_count;
 	}
 	
 	/* Search for ':' on the line. */
@@ -562,7 +579,7 @@
 	if (separator == line_end) {
 		cpuinfo_log_warning("Line %.*s in /proc/cpuinfo is ignored: key/value separator ':' not found",
 			(int) (line_end - line_start), line_start);
-		return processor_number;
+		return processor_count;
 	}
 
 	/* Skip trailing spaces in key part. */
@@ -576,7 +593,7 @@
 	if (key_end == line_start) {
 		cpuinfo_log_warning("Line %.*s in /proc/cpuinfo is ignored: key contains only spaces",
 			(int) (line_end - line_start), line_start);
-		return processor_number;
+		return processor_count;
 	}
 
 	/* Skip leading spaces in value part. */
@@ -590,7 +607,7 @@
 	if (value_start == line_end) {
 		cpuinfo_log_warning("Line %.*s in /proc/cpuinfo is ignored: value contains only spaces",
 			(int) (line_end - line_start), line_start);
-		return processor_number;
+		return processor_count;
 	}
 
 	/* Skip trailing spaces in value part (if any) */
@@ -659,13 +676,13 @@
 				const uint32_t new_processor_number =
 					parse_processor_number(value_start, value_end, proc_cpuinfo);
 				const uint32_t new_processors_count = new_processor_number + 1;
-				if (new_processors_count <= processor_number && processor_number != 0) {
+				if (new_processor_number < processor_count && new_processor_number != 0) {
 					cpuinfo_log_warning("ignored unexpectedly low processor number %"PRIu32" following processor %"PRIu32" in /proc/cpuinfo",
-						new_processor_number, processor_number);
+						new_processor_number, processor_count - 1);
 				} else {
-					if (new_processors_count > processor_number + 1) {
+					if (new_processor_number > processor_count) {
 						cpuinfo_log_info("unexpectedly high processor number %"PRIu32" following processor %"PRIu32" in /proc/cpuinfo",
-							new_processor_number, processor_number);
+							new_processor_number, processor_count - 1);
 						return new_processors_count;
 					}
 					return new_processors_count;
@@ -724,7 +741,7 @@
 			cpuinfo_log_debug("unknown /proc/cpuinfo key: %.*s", (int) key_length, line_start);
 
 	}
-	return processor_number;
+	return processor_count;
 }
 
 struct proc_cpuinfo* cpuinfo_arm_linux_parse_proc_cpuinfo(uint32_t processors_count_ptr[restrict static 1]) {
diff --git a/src/arm/linux/isa.c b/src/arm/linux/isa.c
index 9f061f2..2b27845 100644
--- a/src/arm/linux/isa.c
+++ b/src/arm/linux/isa.c
@@ -114,7 +114,17 @@
 
 		if ((features & PROC_CPUINFO_FEATURE_IDIV) == PROC_CPUINFO_FEATURE_IDIV) {
 			cpuinfo_isa.idiv = true;
+		} else {
+			/* Qualcomm Krait may have buggy kernel configuration that doesn't report IDIV */
+			if (cpu_implementer == 'Q') {
+				switch (cpu_part) {
+					case 0x04D: /* Dual-core Krait */
+					case 0x06F: /* Quad-core Krait */
+						cpuinfo_isa.idiv = true;
+				}
+			}
 		}
+
 		const uint32_t vfp_mask = \
 			PROC_CPUINFO_FEATURE_VFP | PROC_CPUINFO_FEATURE_VFPV3 | PROC_CPUINFO_FEATURE_VFPV3D16 | \
 			PROC_CPUINFO_FEATURE_VFPD32 | PROC_CPUINFO_FEATURE_VFPV4 | PROC_CPUINFO_FEATURE_NEON;
diff --git a/test/galaxy-nexus.cc b/test/galaxy-nexus.cc
new file mode 100644
index 0000000..d1c8df9
--- /dev/null
+++ b/test/galaxy-nexus.cc
@@ -0,0 +1,316 @@
+#include <gtest/gtest.h>
+
+#include <cpuinfo.h>
+#include <cpuinfo-mock.h>
+
+
+TEST(PROCESSORS, count) {
+	ASSERT_EQ(2, cpuinfo_processors_count);
+}
+
+TEST(PROCESSORS, non_null) {
+	ASSERT_TRUE(cpuinfo_processors);
+}
+
+TEST(PROCESSORS, vendor_arm) {
+	for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_processors[i].vendor);
+	}
+}
+
+TEST(PROCESSORS, uarch_cortex_a9) {
+	for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a9, cpuinfo_processors[i].uarch);
+	}
+}
+
+TEST(ISA, thumb) {
+	ASSERT_TRUE(cpuinfo_isa.thumb);
+}
+
+TEST(ISA, thumb2) {
+	ASSERT_TRUE(cpuinfo_isa.thumb2);
+}
+
+TEST(ISA, thumbee) {
+	ASSERT_TRUE(cpuinfo_isa.thumbee);
+}
+
+TEST(ISA, jazelle) {
+	ASSERT_FALSE(cpuinfo_isa.jazelle);
+}
+
+TEST(ISA, armv5e) {
+	ASSERT_TRUE(cpuinfo_isa.armv5e);
+}
+
+TEST(ISA, armv6) {
+	ASSERT_TRUE(cpuinfo_isa.armv6);
+}
+
+TEST(ISA, armv6k) {
+	ASSERT_TRUE(cpuinfo_isa.armv6k);
+}
+
+TEST(ISA, armv7) {
+	ASSERT_TRUE(cpuinfo_isa.armv7);
+}
+
+TEST(ISA, armv7mp) {
+	ASSERT_TRUE(cpuinfo_isa.armv7mp);
+}
+
+TEST(ISA, idiv) {
+	ASSERT_FALSE(cpuinfo_isa.idiv);
+}
+
+TEST(ISA, vfpv2) {
+	ASSERT_FALSE(cpuinfo_isa.vfpv2);
+}
+
+TEST(ISA, vfpv3) {
+	ASSERT_TRUE(cpuinfo_isa.vfpv3);
+}
+
+TEST(ISA, d32) {
+	ASSERT_TRUE(cpuinfo_isa.d32);
+}
+
+TEST(ISA, fp16) {
+	ASSERT_TRUE(cpuinfo_isa.fp16);
+}
+
+TEST(ISA, fma) {
+	ASSERT_FALSE(cpuinfo_isa.fma);
+}
+
+TEST(ISA, wmmx) {
+	ASSERT_FALSE(cpuinfo_isa.wmmx);
+}
+
+TEST(ISA, wmmx2) {
+	ASSERT_FALSE(cpuinfo_isa.wmmx2);
+}
+
+TEST(ISA, neon) {
+	ASSERT_TRUE(cpuinfo_isa.neon);
+}
+
+TEST(ISA, aes) {
+	ASSERT_FALSE(cpuinfo_isa.aes);
+}
+
+TEST(ISA, sha1) {
+	ASSERT_FALSE(cpuinfo_isa.sha1);
+}
+
+TEST(ISA, sha2) {
+	ASSERT_FALSE(cpuinfo_isa.sha2);
+}
+
+TEST(ISA, pmull) {
+	ASSERT_FALSE(cpuinfo_isa.pmull);
+}
+
+TEST(ISA, crc32) {
+	ASSERT_FALSE(cpuinfo_isa.crc32);
+}
+
+TEST(L1I, count) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	ASSERT_EQ(2, l1i.count);
+}
+
+TEST(L1I, non_null) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	ASSERT_TRUE(l1i.instances);
+}
+
+TEST(L1I, size) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+	}
+}
+
+TEST(L1I, associativity) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(4, l1i.instances[k].associativity);
+	}
+}
+
+TEST(L1I, sets) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(256, l1i.instances[k].sets);
+	}
+}
+
+TEST(L1I, partitions) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(1, l1i.instances[k].partitions);
+	}
+}
+
+TEST(L1I, line_size) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(32, l1i.instances[k].line_size);
+	}
+}
+
+TEST(L1I, flags) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(0, l1i.instances[k].flags);
+	}
+}
+
+TEST(L1I, processors) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(k, l1i.instances[k].thread_start);
+		ASSERT_EQ(1, l1i.instances[k].thread_count);
+	}
+}
+
+TEST(L1D, count) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	ASSERT_EQ(2, l1d.count);
+}
+
+TEST(L1D, non_null) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	ASSERT_TRUE(l1d.instances);
+}
+
+TEST(L1D, size) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+	}
+}
+
+TEST(L1D, associativity) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(4, l1d.instances[k].associativity);
+	}
+}
+
+TEST(L1D, sets) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(256, l1d.instances[k].sets);
+	}
+}
+
+TEST(L1D, partitions) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(1, l1d.instances[k].partitions);
+	}
+}
+
+TEST(L1D, line_size) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(32, l1d.instances[k].line_size);
+	}
+}
+
+TEST(L1D, flags) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(0, l1d.instances[k].flags);
+	}
+}
+
+TEST(L1D, processors) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(k, l1d.instances[k].thread_start);
+		ASSERT_EQ(1, l1d.instances[k].thread_count);
+	}
+}
+
+TEST(L2, count) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	ASSERT_EQ(1, l2.count);
+}
+
+TEST(L2, non_null) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	ASSERT_TRUE(l2.instances);
+}
+
+TEST(L2, size) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(1024 * 1024, l2.instances[k].size);
+	}
+}
+
+TEST(L2, associativity) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(16, l2.instances[k].associativity);
+	}
+}
+
+TEST(L2, sets) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(2048, l2.instances[k].sets);
+	}
+}
+
+TEST(L2, partitions) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(1, l2.instances[k].partitions);
+	}
+}
+
+TEST(L2, line_size) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(32, l2.instances[k].line_size);
+	}
+}
+
+TEST(L2, flags) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(0, l2.instances[k].flags);
+	}
+}
+
+TEST(L2, processors) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(0, l2.instances[k].thread_start);
+		ASSERT_EQ(2, l2.instances[k].thread_count);
+	}
+}
+
+TEST(L3, none) {
+	cpuinfo_caches l3 = cpuinfo_get_l3_cache();
+	ASSERT_EQ(0, l3.count);
+	ASSERT_FALSE(l3.instances);
+}
+
+TEST(L4, none) {
+	cpuinfo_caches l4 = cpuinfo_get_l4_cache();
+	ASSERT_EQ(0, l4.count);
+	ASSERT_FALSE(l4.instances);
+}
+
+int main(int argc, char* argv[]) {
+	cpuinfo_set_proc_cpuinfo_path("test/cpuinfo/galaxy-nexus.log");
+	cpuinfo_initialize();
+	::testing::InitGoogleTest(&argc, argv);
+	return RUN_ALL_TESTS();
+}
diff --git a/test/nexus4.cc b/test/nexus4.cc
new file mode 100644
index 0000000..44a06fc
--- /dev/null
+++ b/test/nexus4.cc
@@ -0,0 +1,316 @@
+#include <gtest/gtest.h>
+
+#include <cpuinfo.h>
+#include <cpuinfo-mock.h>
+
+
+TEST(PROCESSORS, count) {
+	ASSERT_EQ(4, cpuinfo_processors_count);
+}
+
+TEST(PROCESSORS, non_null) {
+	ASSERT_TRUE(cpuinfo_processors);
+}
+
+TEST(PROCESSORS, vendor_qualcomm) {
+	for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+		ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_processors[i].vendor);
+	}
+}
+
+TEST(PROCESSORS, uarch_krait) {
+	for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+		ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_processors[i].uarch);
+	}
+}
+
+TEST(ISA, thumb) {
+	ASSERT_TRUE(cpuinfo_isa.thumb);
+}
+
+TEST(ISA, thumb2) {
+	ASSERT_TRUE(cpuinfo_isa.thumb2);
+}
+
+TEST(ISA, thumbee) {
+	ASSERT_FALSE(cpuinfo_isa.thumbee);
+}
+
+TEST(ISA, jazelle) {
+	ASSERT_FALSE(cpuinfo_isa.jazelle);
+}
+
+TEST(ISA, armv5e) {
+	ASSERT_TRUE(cpuinfo_isa.armv5e);
+}
+
+TEST(ISA, armv6) {
+	ASSERT_TRUE(cpuinfo_isa.armv6);
+}
+
+TEST(ISA, armv6k) {
+	ASSERT_TRUE(cpuinfo_isa.armv6k);
+}
+
+TEST(ISA, armv7) {
+	ASSERT_TRUE(cpuinfo_isa.armv7);
+}
+
+TEST(ISA, armv7mp) {
+	ASSERT_TRUE(cpuinfo_isa.armv7mp);
+}
+
+TEST(ISA, idiv) {
+	ASSERT_TRUE(cpuinfo_isa.idiv);
+}
+
+TEST(ISA, vfpv2) {
+	ASSERT_FALSE(cpuinfo_isa.vfpv2);
+}
+
+TEST(ISA, vfpv3) {
+	ASSERT_TRUE(cpuinfo_isa.vfpv3);
+}
+
+TEST(ISA, d32) {
+	ASSERT_TRUE(cpuinfo_isa.d32);
+}
+
+TEST(ISA, fp16) {
+	ASSERT_TRUE(cpuinfo_isa.fp16);
+}
+
+TEST(ISA, fma) {
+	ASSERT_TRUE(cpuinfo_isa.fma);
+}
+
+TEST(ISA, wmmx) {
+	ASSERT_FALSE(cpuinfo_isa.wmmx);
+}
+
+TEST(ISA, wmmx2) {
+	ASSERT_FALSE(cpuinfo_isa.wmmx2);
+}
+
+TEST(ISA, neon) {
+	ASSERT_TRUE(cpuinfo_isa.neon);
+}
+
+TEST(ISA, aes) {
+	ASSERT_FALSE(cpuinfo_isa.aes);
+}
+
+TEST(ISA, sha1) {
+	ASSERT_FALSE(cpuinfo_isa.sha1);
+}
+
+TEST(ISA, sha2) {
+	ASSERT_FALSE(cpuinfo_isa.sha2);
+}
+
+TEST(ISA, pmull) {
+	ASSERT_FALSE(cpuinfo_isa.pmull);
+}
+
+TEST(ISA, crc32) {
+	ASSERT_FALSE(cpuinfo_isa.crc32);
+}
+
+TEST(L1I, count) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	ASSERT_EQ(4, l1i.count);
+}
+
+TEST(L1I, non_null) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	ASSERT_TRUE(l1i.instances);
+}
+
+TEST(L1I, size) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+	}
+}
+
+TEST(L1I, associativity) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(4, l1i.instances[k].associativity);
+	}
+}
+
+TEST(L1I, sets) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(64, l1i.instances[k].sets);
+	}
+}
+
+TEST(L1I, partitions) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(1, l1i.instances[k].partitions);
+	}
+}
+
+TEST(L1I, line_size) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(64, l1i.instances[k].line_size);
+	}
+}
+
+TEST(L1I, flags) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(0, l1i.instances[k].flags);
+	}
+}
+
+TEST(L1I, processors) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(k, l1i.instances[k].thread_start);
+		ASSERT_EQ(1, l1i.instances[k].thread_count);
+	}
+}
+
+TEST(L1D, count) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	ASSERT_EQ(4, l1d.count);
+}
+
+TEST(L1D, non_null) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	ASSERT_TRUE(l1d.instances);
+}
+
+TEST(L1D, size) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+	}
+}
+
+TEST(L1D, associativity) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(4, l1d.instances[k].associativity);
+	}
+}
+
+TEST(L1D, sets) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(64, l1d.instances[k].sets);
+	}
+}
+
+TEST(L1D, partitions) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(1, l1d.instances[k].partitions);
+	}
+}
+
+TEST(L1D, line_size) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(64, l1d.instances[k].line_size);
+	}
+}
+
+TEST(L1D, flags) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(0, l1d.instances[k].flags);
+	}
+}
+
+TEST(L1D, processors) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(k, l1d.instances[k].thread_start);
+		ASSERT_EQ(1, l1d.instances[k].thread_count);
+	}
+}
+
+TEST(L2, count) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	ASSERT_EQ(1, l2.count);
+}
+
+TEST(L2, non_null) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	ASSERT_TRUE(l2.instances);
+}
+
+TEST(L2, size) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+	}
+}
+
+TEST(L2, associativity) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(8, l2.instances[k].associativity);
+	}
+}
+
+TEST(L2, sets) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(2048, l2.instances[k].sets);
+	}
+}
+
+TEST(L2, partitions) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(1, l2.instances[k].partitions);
+	}
+}
+
+TEST(L2, line_size) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(128, l2.instances[k].line_size);
+	}
+}
+
+TEST(L2, flags) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(0, l2.instances[k].flags);
+	}
+}
+
+TEST(L2, processors) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(0, l2.instances[k].thread_start);
+		ASSERT_EQ(4, l2.instances[k].thread_count);
+	}
+}
+
+TEST(L3, none) {
+	cpuinfo_caches l3 = cpuinfo_get_l3_cache();
+	ASSERT_EQ(0, l3.count);
+	ASSERT_FALSE(l3.instances);
+}
+
+TEST(L4, none) {
+	cpuinfo_caches l4 = cpuinfo_get_l4_cache();
+	ASSERT_EQ(0, l4.count);
+	ASSERT_FALSE(l4.instances);
+}
+
+int main(int argc, char* argv[]) {
+	cpuinfo_set_proc_cpuinfo_path("test/cpuinfo/nexus4.log");
+	cpuinfo_initialize();
+	::testing::InitGoogleTest(&argc, argv);
+	return RUN_ALL_TESTS();
+}
diff --git a/test/nexus5.cc b/test/nexus5.cc
new file mode 100644
index 0000000..ad65d56
--- /dev/null
+++ b/test/nexus5.cc
@@ -0,0 +1,316 @@
+#include <gtest/gtest.h>
+
+#include <cpuinfo.h>
+#include <cpuinfo-mock.h>
+
+
+TEST(PROCESSORS, count) {
+	ASSERT_EQ(4, cpuinfo_processors_count);
+}
+
+TEST(PROCESSORS, non_null) {
+	ASSERT_TRUE(cpuinfo_processors);
+}
+
+TEST(PROCESSORS, vendor_qualcomm) {
+	for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+		ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_processors[i].vendor);
+	}
+}
+
+TEST(PROCESSORS, uarch_krait) {
+	for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+		ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_processors[i].uarch);
+	}
+}
+
+TEST(ISA, thumb) {
+	ASSERT_TRUE(cpuinfo_isa.thumb);
+}
+
+TEST(ISA, thumb2) {
+	ASSERT_TRUE(cpuinfo_isa.thumb2);
+}
+
+TEST(ISA, thumbee) {
+	ASSERT_FALSE(cpuinfo_isa.thumbee);
+}
+
+TEST(ISA, jazelle) {
+	ASSERT_FALSE(cpuinfo_isa.jazelle);
+}
+
+TEST(ISA, armv5e) {
+	ASSERT_TRUE(cpuinfo_isa.armv5e);
+}
+
+TEST(ISA, armv6) {
+	ASSERT_TRUE(cpuinfo_isa.armv6);
+}
+
+TEST(ISA, armv6k) {
+	ASSERT_TRUE(cpuinfo_isa.armv6k);
+}
+
+TEST(ISA, armv7) {
+	ASSERT_TRUE(cpuinfo_isa.armv7);
+}
+
+TEST(ISA, armv7mp) {
+	ASSERT_TRUE(cpuinfo_isa.armv7mp);
+}
+
+TEST(ISA, idiv) {
+	ASSERT_TRUE(cpuinfo_isa.idiv);
+}
+
+TEST(ISA, vfpv2) {
+	ASSERT_FALSE(cpuinfo_isa.vfpv2);
+}
+
+TEST(ISA, vfpv3) {
+	ASSERT_TRUE(cpuinfo_isa.vfpv3);
+}
+
+TEST(ISA, d32) {
+	ASSERT_TRUE(cpuinfo_isa.d32);
+}
+
+TEST(ISA, fp16) {
+	ASSERT_TRUE(cpuinfo_isa.fp16);
+}
+
+TEST(ISA, fma) {
+	ASSERT_TRUE(cpuinfo_isa.fma);
+}
+
+TEST(ISA, wmmx) {
+	ASSERT_FALSE(cpuinfo_isa.wmmx);
+}
+
+TEST(ISA, wmmx2) {
+	ASSERT_FALSE(cpuinfo_isa.wmmx2);
+}
+
+TEST(ISA, neon) {
+	ASSERT_TRUE(cpuinfo_isa.neon);
+}
+
+TEST(ISA, aes) {
+	ASSERT_FALSE(cpuinfo_isa.aes);
+}
+
+TEST(ISA, sha1) {
+	ASSERT_FALSE(cpuinfo_isa.sha1);
+}
+
+TEST(ISA, sha2) {
+	ASSERT_FALSE(cpuinfo_isa.sha2);
+}
+
+TEST(ISA, pmull) {
+	ASSERT_FALSE(cpuinfo_isa.pmull);
+}
+
+TEST(ISA, crc32) {
+	ASSERT_FALSE(cpuinfo_isa.crc32);
+}
+
+TEST(L1I, count) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	ASSERT_EQ(4, l1i.count);
+}
+
+TEST(L1I, non_null) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	ASSERT_TRUE(l1i.instances);
+}
+
+TEST(L1I, size) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+	}
+}
+
+TEST(L1I, associativity) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(4, l1i.instances[k].associativity);
+	}
+}
+
+TEST(L1I, sets) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(64, l1i.instances[k].sets);
+	}
+}
+
+TEST(L1I, partitions) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(1, l1i.instances[k].partitions);
+	}
+}
+
+TEST(L1I, line_size) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(64, l1i.instances[k].line_size);
+	}
+}
+
+TEST(L1I, flags) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(0, l1i.instances[k].flags);
+	}
+}
+
+TEST(L1I, processors) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(k, l1i.instances[k].thread_start);
+		ASSERT_EQ(1, l1i.instances[k].thread_count);
+	}
+}
+
+TEST(L1D, count) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	ASSERT_EQ(4, l1d.count);
+}
+
+TEST(L1D, non_null) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	ASSERT_TRUE(l1d.instances);
+}
+
+TEST(L1D, size) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+	}
+}
+
+TEST(L1D, associativity) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(4, l1d.instances[k].associativity);
+	}
+}
+
+TEST(L1D, sets) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(64, l1d.instances[k].sets);
+	}
+}
+
+TEST(L1D, partitions) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(1, l1d.instances[k].partitions);
+	}
+}
+
+TEST(L1D, line_size) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(64, l1d.instances[k].line_size);
+	}
+}
+
+TEST(L1D, flags) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(0, l1d.instances[k].flags);
+	}
+}
+
+TEST(L1D, processors) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(k, l1d.instances[k].thread_start);
+		ASSERT_EQ(1, l1d.instances[k].thread_count);
+	}
+}
+
+TEST(L2, count) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	ASSERT_EQ(1, l2.count);
+}
+
+TEST(L2, non_null) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	ASSERT_TRUE(l2.instances);
+}
+
+TEST(L2, size) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+	}
+}
+
+TEST(L2, associativity) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(8, l2.instances[k].associativity);
+	}
+}
+
+TEST(L2, sets) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(2048, l2.instances[k].sets);
+	}
+}
+
+TEST(L2, partitions) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(1, l2.instances[k].partitions);
+	}
+}
+
+TEST(L2, line_size) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(128, l2.instances[k].line_size);
+	}
+}
+
+TEST(L2, flags) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(0, l2.instances[k].flags);
+	}
+}
+
+TEST(L2, processors) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(0, l2.instances[k].thread_start);
+		ASSERT_EQ(4, l2.instances[k].thread_count);
+	}
+}
+
+TEST(L3, none) {
+	cpuinfo_caches l3 = cpuinfo_get_l3_cache();
+	ASSERT_EQ(0, l3.count);
+	ASSERT_FALSE(l3.instances);
+}
+
+TEST(L4, none) {
+	cpuinfo_caches l4 = cpuinfo_get_l4_cache();
+	ASSERT_EQ(0, l4.count);
+	ASSERT_FALSE(l4.instances);
+}
+
+int main(int argc, char* argv[]) {
+	cpuinfo_set_proc_cpuinfo_path("test/cpuinfo/nexus5.log");
+	cpuinfo_initialize();
+	::testing::InitGoogleTest(&argc, argv);
+	return RUN_ALL_TESTS();
+}
diff --git a/test/nexus9.cc b/test/nexus9.cc
new file mode 100644
index 0000000..84ec9d8
--- /dev/null
+++ b/test/nexus9.cc
@@ -0,0 +1,316 @@
+#include <gtest/gtest.h>
+
+#include <cpuinfo.h>
+#include <cpuinfo-mock.h>
+
+
+TEST(PROCESSORS, count) {
+	ASSERT_EQ(2, cpuinfo_processors_count);
+}
+
+TEST(PROCESSORS, non_null) {
+	ASSERT_TRUE(cpuinfo_processors);
+}
+
+TEST(PROCESSORS, vendor_nvidia) {
+	for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+		ASSERT_EQ(cpuinfo_vendor_nvidia, cpuinfo_processors[i].vendor);
+	}
+}
+
+TEST(PROCESSORS, uarch_denver) {
+	for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+		ASSERT_EQ(cpuinfo_uarch_denver, cpuinfo_processors[i].uarch);
+	}
+}
+
+TEST(ISA, thumb) {
+	ASSERT_TRUE(cpuinfo_isa.thumb);
+}
+
+TEST(ISA, thumb2) {
+	ASSERT_TRUE(cpuinfo_isa.thumb2);
+}
+
+TEST(ISA, thumbee) {
+	ASSERT_FALSE(cpuinfo_isa.thumbee);
+}
+
+TEST(ISA, jazelle) {
+	ASSERT_FALSE(cpuinfo_isa.jazelle);
+}
+
+TEST(ISA, armv5e) {
+	ASSERT_TRUE(cpuinfo_isa.armv5e);
+}
+
+TEST(ISA, armv6) {
+	ASSERT_TRUE(cpuinfo_isa.armv6);
+}
+
+TEST(ISA, armv6k) {
+	ASSERT_TRUE(cpuinfo_isa.armv6k);
+}
+
+TEST(ISA, armv7) {
+	ASSERT_TRUE(cpuinfo_isa.armv7);
+}
+
+TEST(ISA, armv7mp) {
+	ASSERT_TRUE(cpuinfo_isa.armv7mp);
+}
+
+TEST(ISA, idiv) {
+	ASSERT_TRUE(cpuinfo_isa.idiv);
+}
+
+TEST(ISA, vfpv2) {
+	ASSERT_FALSE(cpuinfo_isa.vfpv2);
+}
+
+TEST(ISA, vfpv3) {
+	ASSERT_TRUE(cpuinfo_isa.vfpv3);
+}
+
+TEST(ISA, d32) {
+	ASSERT_TRUE(cpuinfo_isa.d32);
+}
+
+TEST(ISA, fp16) {
+	ASSERT_TRUE(cpuinfo_isa.fp16);
+}
+
+TEST(ISA, fma) {
+	ASSERT_TRUE(cpuinfo_isa.fma);
+}
+
+TEST(ISA, wmmx) {
+	ASSERT_FALSE(cpuinfo_isa.wmmx);
+}
+
+TEST(ISA, wmmx2) {
+	ASSERT_FALSE(cpuinfo_isa.wmmx2);
+}
+
+TEST(ISA, neon) {
+	ASSERT_TRUE(cpuinfo_isa.neon);
+}
+
+TEST(ISA, aes) {
+	ASSERT_TRUE(cpuinfo_isa.aes);
+}
+
+TEST(ISA, sha1) {
+	ASSERT_TRUE(cpuinfo_isa.sha1);
+}
+
+TEST(ISA, sha2) {
+	ASSERT_TRUE(cpuinfo_isa.sha2);
+}
+
+TEST(ISA, pmull) {
+	ASSERT_TRUE(cpuinfo_isa.pmull);
+}
+
+TEST(ISA, crc32) {
+	ASSERT_TRUE(cpuinfo_isa.crc32);
+}
+
+TEST(L1I, count) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	ASSERT_EQ(2, l1i.count);
+}
+
+TEST(L1I, non_null) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	ASSERT_TRUE(l1i.instances);
+}
+
+TEST(L1I, size) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(128 * 1024, l1i.instances[k].size);
+	}
+}
+
+TEST(L1I, associativity) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(4, l1i.instances[k].associativity);
+	}
+}
+
+TEST(L1I, sets) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(512, l1i.instances[k].sets);
+	}
+}
+
+TEST(L1I, partitions) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(1, l1i.instances[k].partitions);
+	}
+}
+
+TEST(L1I, line_size) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(64, l1i.instances[k].line_size);
+	}
+}
+
+TEST(L1I, flags) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(0, l1i.instances[k].flags);
+	}
+}
+
+TEST(L1I, processors) {
+	cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+	for (uint32_t k = 0; k < l1i.count; k++) {
+		ASSERT_EQ(k, l1i.instances[k].thread_start);
+		ASSERT_EQ(1, l1i.instances[k].thread_count);
+	}
+}
+
+TEST(L1D, count) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	ASSERT_EQ(2, l1d.count);
+}
+
+TEST(L1D, non_null) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	ASSERT_TRUE(l1d.instances);
+}
+
+TEST(L1D, size) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(64 * 1024, l1d.instances[k].size);
+	}
+}
+
+TEST(L1D, associativity) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(4, l1d.instances[k].associativity);
+	}
+}
+
+TEST(L1D, sets) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(256, l1d.instances[k].sets);
+	}
+}
+
+TEST(L1D, partitions) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(1, l1d.instances[k].partitions);
+	}
+}
+
+TEST(L1D, line_size) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(64, l1d.instances[k].line_size);
+	}
+}
+
+TEST(L1D, flags) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(0, l1d.instances[k].flags);
+	}
+}
+
+TEST(L1D, processors) {
+	cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+	for (uint32_t k = 0; k < l1d.count; k++) {
+		ASSERT_EQ(k, l1d.instances[k].thread_start);
+		ASSERT_EQ(1, l1d.instances[k].thread_count);
+	}
+}
+
+TEST(L2, count) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	ASSERT_EQ(1, l2.count);
+}
+
+TEST(L2, non_null) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	ASSERT_TRUE(l2.instances);
+}
+
+TEST(L2, size) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+	}
+}
+
+TEST(L2, associativity) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(16, l2.instances[k].associativity);
+	}
+}
+
+TEST(L2, sets) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(2048, l2.instances[k].sets);
+	}
+}
+
+TEST(L2, partitions) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(1, l2.instances[k].partitions);
+	}
+}
+
+TEST(L2, line_size) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(64, l2.instances[k].line_size);
+	}
+}
+
+TEST(L2, flags) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(0, l2.instances[k].flags);
+	}
+}
+
+TEST(L2, processors) {
+	cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+	for (uint32_t k = 0; k < l2.count; k++) {
+		ASSERT_EQ(0, l2.instances[k].thread_start);
+		ASSERT_EQ(2, l2.instances[k].thread_count);
+	}
+}
+
+TEST(L3, none) {
+	cpuinfo_caches l3 = cpuinfo_get_l3_cache();
+	ASSERT_EQ(0, l3.count);
+	ASSERT_FALSE(l3.instances);
+}
+
+TEST(L4, none) {
+	cpuinfo_caches l4 = cpuinfo_get_l4_cache();
+	ASSERT_EQ(0, l4.count);
+	ASSERT_FALSE(l4.instances);
+}
+
+int main(int argc, char* argv[]) {
+	cpuinfo_set_proc_cpuinfo_path("test/cpuinfo/nexus9.log");
+	cpuinfo_initialize();
+	::testing::InitGoogleTest(&argc, argv);
+	return RUN_ALL_TESTS();
+}