Complete test for cluster info in mock tests
diff --git a/test/mock/alcatel-revvl.cc b/test/mock/alcatel-revvl.cc
index 24fc615..1382888 100644
--- a/test/mock/alcatel-revvl.cc
+++ b/test/mock/alcatel-revvl.cc
@@ -171,6 +171,102 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+				break;
+			case 1:
+				ASSERT_EQ(1, cpuinfo_get_cluster(i)->processor_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+				break;
+			case 1:
+				ASSERT_EQ(1, cpuinfo_get_cluster(i)->core_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1508000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/atm7029b-tablet.cc b/test/mock/atm7029b-tablet.cc
index c4e49fb..c8462bc 100644
--- a/test/mock/atm7029b-tablet.cc
+++ b/test/mock/atm7029b-tablet.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a5, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FC051), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1320000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/blu-r1-hd.cc b/test/mock/blu-r1-hd.cc
index a30dea6..120edc5 100644
--- a/test/mock/blu-r1-hd.cc
+++ b/test/mock/blu-r1-hd.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1300000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-a3-2016-eu.cc b/test/mock/galaxy-a3-2016-eu.cc
index 5c70164..3f0a4ce 100644
--- a/test/mock/galaxy-a3-2016-eu.cc
+++ b/test/mock/galaxy-a3-2016-eu.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1500000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-a8-2016-duos.cc b/test/mock/galaxy-a8-2016-duos.cc
index 1f37a24..1a3058e 100644
--- a/test/mock/galaxy-a8-2016-duos.cc
+++ b/test/mock/galaxy-a8-2016-duos.cc
@@ -199,6 +199,95 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD031), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(1459200000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1113600000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-a8-2018.cc b/test/mock/galaxy-a8-2018.cc
index 4b06bc3..4bcefb1 100644
--- a/test/mock/galaxy-a8-2018.cc
+++ b/test/mock/galaxy-a8-2018.cc
@@ -225,6 +225,123 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+				break;
+			case 1:
+				ASSERT_EQ(6, cpuinfo_get_cluster(i)->processor_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+				break;
+			case 1:
+				ASSERT_EQ(6, cpuinfo_get_cluster(i)->core_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a73, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x410FD092), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2184000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1586000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-c9-pro.cc b/test/mock/galaxy-c9-pro.cc
index 4c45c98..44a628a 100644
--- a/test/mock/galaxy-c9-pro.cc
+++ b/test/mock/galaxy-c9-pro.cc
@@ -225,6 +225,109 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x410FD080), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(1958400000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1401600000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-grand-prime-value-edition.cc b/test/mock/galaxy-grand-prime-value-edition.cc
index 6ca2e1e..ece2820 100644
--- a/test/mock/galaxy-grand-prime-value-edition.cc
+++ b/test/mock/galaxy-grand-prime-value-edition.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1300000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-j1-2016.cc b/test/mock/galaxy-j1-2016.cc
index f2268ab..303b973 100644
--- a/test/mock/galaxy-j1-2016.cc
+++ b/test/mock/galaxy-j1-2016.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1200000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-j5.cc b/test/mock/galaxy-j5.cc
index ebdeeb6..fd509b8 100644
--- a/test/mock/galaxy-j5.cc
+++ b/test/mock/galaxy-j5.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD030), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1190400000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-j7-prime.cc b/test/mock/galaxy-j7-prime.cc
index 7c624dc..97572d1 100644
--- a/test/mock/galaxy-j7-prime.cc
+++ b/test/mock/galaxy-j7-prime.cc
@@ -186,6 +186,88 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1586000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-j7-tmobile.cc b/test/mock/galaxy-j7-tmobile.cc
index 3a41f1f..7248165 100644
--- a/test/mock/galaxy-j7-tmobile.cc
+++ b/test/mock/galaxy-j7-tmobile.cc
@@ -186,6 +186,88 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1500000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-j7-uae.cc b/test/mock/galaxy-j7-uae.cc
index 37344d0..934e792 100644
--- a/test/mock/galaxy-j7-uae.cc
+++ b/test/mock/galaxy-j7-uae.cc
@@ -186,6 +186,88 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1500000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-s3-us.cc b/test/mock/galaxy-s3-us.cc
index 9990424..52d6130 100644
--- a/test/mock/galaxy-s3-us.cc
+++ b/test/mock/galaxy-s3-us.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x511F04D4), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1512000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-s4-us.cc b/test/mock/galaxy-s4-us.cc
index f017fd6..9f0fac3 100644
--- a/test/mock/galaxy-s4-us.cc
+++ b/test/mock/galaxy-s4-us.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x511F06F0), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1890000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-s5-global.cc b/test/mock/galaxy-s5-global.cc
index 480d473..bfc628e 100644
--- a/test/mock/galaxy-s5-global.cc
+++ b/test/mock/galaxy-s5-global.cc
@@ -225,6 +225,109 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a15, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x412FC0F3), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(1900000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1300000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-s5-us.cc b/test/mock/galaxy-s5-us.cc
index 21a8b8a..3b6cc5b 100644
--- a/test/mock/galaxy-s5-us.cc
+++ b/test/mock/galaxy-s5-us.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x512F06F1), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(2457600000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-s6.cc b/test/mock/galaxy-s6.cc
index d6d4f7b..2f8ac96 100644
--- a/test/mock/galaxy-s6.cc
+++ b/test/mock/galaxy-s6.cc
@@ -225,6 +225,109 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x411FD070), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2100000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1500000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-s7-global.cc b/test/mock/galaxy-s7-global.cc
index 34217b9..f0945f5 100644
--- a/test/mock/galaxy-s7-global.cc
+++ b/test/mock/galaxy-s7-global.cc
@@ -238,6 +238,116 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_vendor_samsung, cpuinfo_get_cluster(i)->vendor);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_mongoose_m1, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x531F0011), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2600000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1586000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-s7-us.cc b/test/mock/galaxy-s7-us.cc
index 2da531b..080b623 100644
--- a/test/mock/galaxy-s7-us.cc
+++ b/test/mock/galaxy-s7-us.cc
@@ -188,6 +188,102 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x511F2052), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x511F2112), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2150400000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1593600000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-s8-global.cc b/test/mock/galaxy-s8-global.cc
index 91c4cfd..3052c06 100644
--- a/test/mock/galaxy-s8-global.cc
+++ b/test/mock/galaxy-s8-global.cc
@@ -238,6 +238,116 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_vendor_samsung, cpuinfo_get_cluster(i)->vendor);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_mongoose_m2, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x534F0010), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2314000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1690000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-s8-us.cc b/test/mock/galaxy-s8-us.cc
index 74aceb4..f5cd0c6 100644
--- a/test/mock/galaxy-s8-us.cc
+++ b/test/mock/galaxy-s8-us.cc
@@ -225,6 +225,109 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a73, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x51AF8001), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x51AF8014), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2361600000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1900800000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-s9-us.cc b/test/mock/galaxy-s9-us.cc
index 9ab392f..6904a4b 100644
--- a/test/mock/galaxy-s9-us.cc
+++ b/test/mock/galaxy-s9-us.cc
@@ -212,6 +212,109 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a75, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a55, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x516F802D), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x517F803C), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2803200000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1766400000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-tab-3-7.0.cc b/test/mock/galaxy-tab-3-7.0.cc
index 51d83b3..2d42830 100644
--- a/test/mock/galaxy-tab-3-7.0.cc
+++ b/test/mock/galaxy-tab-3-7.0.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a9, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x413FC090), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1205000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-tab-3-lite.cc b/test/mock/galaxy-tab-3-lite.cc
index 2d92243..c5d3d9b 100644
--- a/test/mock/galaxy-tab-3-lite.cc
+++ b/test/mock/galaxy-tab-3-lite.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1300000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/galaxy-win-duos.cc b/test/mock/galaxy-win-duos.cc
index fa0095b..5cc857c 100644
--- a/test/mock/galaxy-win-duos.cc
+++ b/test/mock/galaxy-win-duos.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a5, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FC051), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1209600000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/huawei-ascend-p7.cc b/test/mock/huawei-ascend-p7.cc
index 058a680..d4d8412 100644
--- a/test/mock/huawei-ascend-p7.cc
+++ b/test/mock/huawei-ascend-p7.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a9, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x414FC091), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1795000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/huawei-mate-10.cc b/test/mock/huawei-mate-10.cc
index f307a95..fb51bef 100644
--- a/test/mock/huawei-mate-10.cc
+++ b/test/mock/huawei-mate-10.cc
@@ -225,6 +225,109 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a73, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x410FD092), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2362000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1844000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/huawei-mate-8.cc b/test/mock/huawei-mate-8.cc
index e563b59..902f686 100644
--- a/test/mock/huawei-mate-8.cc
+++ b/test/mock/huawei-mate-8.cc
@@ -225,6 +225,109 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x410FD080), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2304000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1805000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/huawei-mate-9.cc b/test/mock/huawei-mate-9.cc
index bb9f301..1730179 100644
--- a/test/mock/huawei-mate-9.cc
+++ b/test/mock/huawei-mate-9.cc
@@ -225,6 +225,109 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a73, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x410FD091), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2362000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1844000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/huawei-p8-lite.cc b/test/mock/huawei-p8-lite.cc
index 0734698..84664fb 100644
--- a/test/mock/huawei-p8-lite.cc
+++ b/test/mock/huawei-p8-lite.cc
@@ -173,6 +173,88 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1200000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/huawei-p9-lite.cc b/test/mock/huawei-p9-lite.cc
index 5be731a..f0332af 100644
--- a/test/mock/huawei-p9-lite.cc
+++ b/test/mock/huawei-p9-lite.cc
@@ -199,6 +199,95 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2016000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1709000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/iconia-one-10.cc b/test/mock/iconia-one-10.cc
index 9392cee..ab01658 100644
--- a/test/mock/iconia-one-10.cc
+++ b/test/mock/iconia-one-10.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a35, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD041), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1300000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/lenovo-a6600-plus.cc b/test/mock/lenovo-a6600-plus.cc
index 8b28cb4..b07df25 100644
--- a/test/mock/lenovo-a6600-plus.cc
+++ b/test/mock/lenovo-a6600-plus.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(988000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/lenovo-vibe-x2.cc b/test/mock/lenovo-vibe-x2.cc
index f2a566c..d5e9e45 100644
--- a/test/mock/lenovo-vibe-x2.cc
+++ b/test/mock/lenovo-vibe-x2.cc
@@ -206,6 +206,109 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a17, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x410FC0E0), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2002000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1690000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/lg-k10-eu.cc b/test/mock/lg-k10-eu.cc
index 081186e..9392f6d 100644
--- a/test/mock/lg-k10-eu.cc
+++ b/test/mock/lg-k10-eu.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD030), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1209600000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/lg-optimus-g-pro.cc b/test/mock/lg-optimus-g-pro.cc
index a611933..b78785f 100644
--- a/test/mock/lg-optimus-g-pro.cc
+++ b/test/mock/lg-optimus-g-pro.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x511F06F0), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1728000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/meizu-pro-6.cc b/test/mock/meizu-pro-6.cc
index 907f00a..7bed744 100644
--- a/test/mock/meizu-pro-6.cc
+++ b/test/mock/meizu-pro-6.cc
@@ -249,6 +249,140 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(3, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 2:
+				ASSERT_EQ(6, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+				break;
+			case 2:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 2:
+				ASSERT_EQ(6, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+				break;
+			case 2:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+			case 2:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x410FD081), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+			case 2:
+				ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2522000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(2002000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 2:
+				ASSERT_EQ(UINT64_C(1547000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/meizu-pro-6s.cc b/test/mock/meizu-pro-6s.cc
index 3735428..6184c64 100644
--- a/test/mock/meizu-pro-6s.cc
+++ b/test/mock/meizu-pro-6s.cc
@@ -249,6 +249,140 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(3, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 2:
+				ASSERT_EQ(6, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+				break;
+			case 2:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 2:
+				ASSERT_EQ(6, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+				break;
+			case 2:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+			case 2:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x410FD081), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+			case 2:
+				ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2522000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(2002000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 2:
+				ASSERT_EQ(UINT64_C(1547000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/meizu-pro-7-plus.cc b/test/mock/meizu-pro-7-plus.cc
index f23bc37..c690208 100644
--- a/test/mock/meizu-pro-7-plus.cc
+++ b/test/mock/meizu-pro-7-plus.cc
@@ -253,6 +253,144 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(3, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 2:
+				ASSERT_EQ(6, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+				break;
+			case 2:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 2:
+				ASSERT_EQ(6, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+				break;
+			case 2:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a73, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 2:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a35, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x410FD092), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 2:
+				ASSERT_EQ(UINT32_C(0x410FD041), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2600000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(2197000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 2:
+				ASSERT_EQ(UINT64_C(1898000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/moto-e-gen1.cc b/test/mock/moto-e-gen1.cc
index 1a48e87..d4853d5 100644
--- a/test/mock/moto-e-gen1.cc
+++ b/test/mock/moto-e-gen1.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1190400000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/moto-g-gen1.cc b/test/mock/moto-g-gen1.cc
index f8b36cf..cee88c4 100644
--- a/test/mock/moto-g-gen1.cc
+++ b/test/mock/moto-g-gen1.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1190400000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/moto-g-gen2.cc b/test/mock/moto-g-gen2.cc
index 0cf95e5..03ba04a 100644
--- a/test/mock/moto-g-gen2.cc
+++ b/test/mock/moto-g-gen2.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1190400000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/moto-g-gen3.cc b/test/mock/moto-g-gen3.cc
index add594b..602f6e1 100644
--- a/test/mock/moto-g-gen3.cc
+++ b/test/mock/moto-g-gen3.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD030), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1363200000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/moto-g-gen4.cc b/test/mock/moto-g-gen4.cc
index 2bd8899..f325f7e 100644
--- a/test/mock/moto-g-gen4.cc
+++ b/test/mock/moto-g-gen4.cc
@@ -186,6 +186,88 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1516800000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/nexus-s.cc b/test/mock/nexus-s.cc
index a1efb52..d841acd 100644
--- a/test/mock/nexus-s.cc
+++ b/test/mock/nexus-s.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(1, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(1, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a8, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x412FC082), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1000000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/nexus10.cc b/test/mock/nexus10.cc
index d111c81..86ec36c 100644
--- a/test/mock/nexus10.cc
+++ b/test/mock/nexus10.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a15, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FC0F4), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1700000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/nexus4.cc b/test/mock/nexus4.cc
index 902e915..0aa3974 100644
--- a/test/mock/nexus4.cc
+++ b/test/mock/nexus4.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x510F06F2), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1512000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/nexus5x.cc b/test/mock/nexus5x.cc
index 99c2752..ce6e029 100644
--- a/test/mock/nexus5x.cc
+++ b/test/mock/nexus5x.cc
@@ -211,6 +211,123 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x411FD072), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(1824000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1440000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/nexus6.cc b/test/mock/nexus6.cc
index 57fa1cc..e53d2dd 100644
--- a/test/mock/nexus6.cc
+++ b/test/mock/nexus6.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x513F06F1), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(2649600000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/nexus6p.cc b/test/mock/nexus6p.cc
index 251e602..3e44545 100644
--- a/test/mock/nexus6p.cc
+++ b/test/mock/nexus6p.cc
@@ -225,6 +225,109 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x411FD071), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(1958400000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1555200000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/nexus9.cc b/test/mock/nexus9.cc
index 90ade44..67d56ec 100644
--- a/test/mock/nexus9.cc
+++ b/test/mock/nexus9.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_nvidia, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_denver, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x4E0F0000), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(2499000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/oneplus-3t.cc b/test/mock/oneplus-3t.cc
index 4f46d54..645cbd0 100644
--- a/test/mock/oneplus-3t.cc
+++ b/test/mock/oneplus-3t.cc
@@ -188,6 +188,102 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x512F2051), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x512F2011), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2342400000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(2188800000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/oneplus-5.cc b/test/mock/oneplus-5.cc
index 63d000c..b2c9f92 100644
--- a/test/mock/oneplus-5.cc
+++ b/test/mock/oneplus-5.cc
@@ -225,6 +225,109 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a73, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x51AF8001), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x51AF8014), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2457600000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1900800000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/oneplus-5t.cc b/test/mock/oneplus-5t.cc
index b28ad42..5983f63 100644
--- a/test/mock/oneplus-5t.cc
+++ b/test/mock/oneplus-5t.cc
@@ -225,6 +225,109 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a73, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x51AF8001), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x51AF8014), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2457600000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1900800000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/oppo-a37.cc b/test/mock/oppo-a37.cc
index 6f10e41..1beec85 100644
--- a/test/mock/oppo-a37.cc
+++ b/test/mock/oppo-a37.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD030), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1209600000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/oppo-r9.cc b/test/mock/oppo-r9.cc
index 965b5b4..dfefa57 100644
--- a/test/mock/oppo-r9.cc
+++ b/test/mock/oppo-r9.cc
@@ -199,6 +199,95 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(1950000000), cpuinfo_get_core(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1144000000), cpuinfo_get_core(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/padcod-10.1.cc b/test/mock/padcod-10.1.cc
index 95d1bdf..4c59d07 100644
--- a/test/mock/padcod-10.1.cc
+++ b/test/mock/padcod-10.1.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1344000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/pixel-2-xl.cc b/test/mock/pixel-2-xl.cc
index c129cf6..23bfb15 100644
--- a/test/mock/pixel-2-xl.cc
+++ b/test/mock/pixel-2-xl.cc
@@ -225,6 +225,109 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a73, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x51AF8001), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x51AF8014), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2457600000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1900800000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/pixel-c.cc b/test/mock/pixel-c.cc
index 5f2a66d..07e7a0d 100644
--- a/test/mock/pixel-c.cc
+++ b/test/mock/pixel-c.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x411FD071), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1912500000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/pixel-xl.cc b/test/mock/pixel-xl.cc
index 30bebbc..49a443b 100644
--- a/test/mock/pixel-xl.cc
+++ b/test/mock/pixel-xl.cc
@@ -188,6 +188,102 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x512F2051), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x512F2011), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2150400000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1593600000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/pixel.cc b/test/mock/pixel.cc
index 8ee7017..f2c03b6 100644
--- a/test/mock/pixel.cc
+++ b/test/mock/pixel.cc
@@ -188,6 +188,102 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x512F2051), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x512F2011), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2150400000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1593600000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/xiaomi-mi-5c.cc b/test/mock/xiaomi-mi-5c.cc
index fd3975c..cf435cf 100644
--- a/test/mock/xiaomi-mi-5c.cc
+++ b/test/mock/xiaomi-mi-5c.cc
@@ -199,6 +199,95 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(2158000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1404000000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/xiaomi-redmi-2a.cc b/test/mock/xiaomi-redmi-2a.cc
index 0020530..a60d4a3 100644
--- a/test/mock/xiaomi-redmi-2a.cc
+++ b/test/mock/xiaomi-redmi-2a.cc
@@ -164,6 +164,102 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+				break;
+			case 1:
+				ASSERT_EQ(1, cpuinfo_get_cluster(i)->processor_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+				break;
+			case 1:
+				ASSERT_EQ(1, cpuinfo_get_cluster(i)->core_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1495000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/xiaomi-redmi-note-3.cc b/test/mock/xiaomi-redmi-note-3.cc
index 628ba84..4cb0550 100644
--- a/test/mock/xiaomi-redmi-note-3.cc
+++ b/test/mock/xiaomi-redmi-note-3.cc
@@ -211,6 +211,123 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_get_cluster(i)->uarch);
+				break;
+			case 1:
+				ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT32_C(0x410FD080), cpuinfo_get_cluster(i)->midr);
+				break;
+			case 1:
+				ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(UINT64_C(1804800000), cpuinfo_get_cluster(i)->frequency);
+				break;
+			case 1:
+				ASSERT_EQ(UINT64_C(1401600000), cpuinfo_get_cluster(i)->frequency);
+				break;
+		}
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/xiaomi-redmi-note-4.cc b/test/mock/xiaomi-redmi-note-4.cc
index 609ab21..dc4f1de 100644
--- a/test/mock/xiaomi-redmi-note-4.cc
+++ b/test/mock/xiaomi-redmi-note-4.cc
@@ -186,6 +186,88 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(2016000000), cpuinfo_get_core(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/xperia-c4-dual.cc b/test/mock/xperia-c4-dual.cc
index 5ed0050..a2918f0 100644
--- a/test/mock/xperia-c4-dual.cc
+++ b/test/mock/xperia-c4-dual.cc
@@ -186,6 +186,88 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(2, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		switch (i) {
+			case 0:
+				ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+				break;
+			case 1:
+				ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_start);
+				break;
+		}
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(4, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1690000000), cpuinfo_get_core(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }
diff --git a/test/mock/xperia-sl.cc b/test/mock/xperia-sl.cc
index ec9d0ad..5a3cd73 100644
--- a/test/mock/xperia-sl.cc
+++ b/test/mock/xperia-sl.cc
@@ -134,6 +134,74 @@
 	}
 }
 
+TEST(CLUSTERS, count) {
+	ASSERT_EQ(1, cpuinfo_get_clusters_count());
+}
+
+TEST(CLUSTERS, non_null) {
+	ASSERT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTERS, processor_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->processor_start);
+	}
+}
+
+TEST(CLUSTERS, processor_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->processor_count);
+	}
+}
+
+TEST(CLUSTERS, core_start) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(0, cpuinfo_get_cluster(i)->core_start);
+	}
+}
+
+TEST(CLUSTERS, core_count) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(2, cpuinfo_get_cluster(i)->core_count);
+	}
+}
+
+TEST(CLUSTERS, cluster_id) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(i, cpuinfo_get_cluster(i)->cluster_id);
+	}
+}
+
+TEST(CLUSTERS, package) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_cluster(i)->package);
+	}
+}
+
+TEST(CLUSTERS, vendor) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_cluster(i)->vendor);
+	}
+}
+
+TEST(CLUSTERS, uarch) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(cpuinfo_uarch_scorpion, cpuinfo_get_cluster(i)->uarch);
+	}
+}
+
+TEST(CLUSTERS, midr) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT32_C(0x510F02D4), cpuinfo_get_cluster(i)->midr);
+	}
+}
+
+TEST(CLUSTERS, DISABLED_frequency) {
+	for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+		ASSERT_EQ(UINT64_C(1674000000), cpuinfo_get_cluster(i)->frequency);
+	}
+}
+
 TEST(PACKAGES, count) {
 	ASSERT_EQ(1, cpuinfo_get_packages_count());
 }