blob: 7d9eedcdff4ed0b02ffe7cafffb0870a27137a67 [file] [log] [blame]
Marat Dukhanf368e1a2017-09-08 20:13:59 -07001#include <gtest/gtest.h>
2
3#include <cpuinfo.h>
4#include <cpuinfo-mock.h>
5
6
7TEST(PROCESSORS, count) {
8 ASSERT_EQ(8, cpuinfo_processors_count);
9}
10
11TEST(PROCESSORS, non_null) {
12 ASSERT_TRUE(cpuinfo_processors);
13}
14
15TEST(PROCESSORS, vendor) {
16 for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
17 ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_processors[i].vendor);
18 }
19}
20
21TEST(PROCESSORS, uarch) {
22 for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
23 switch (i) {
24 case 0:
25 case 1:
26 case 2:
27 case 3:
28 ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_processors[i].uarch);
29 break;
30 case 4:
31 case 5:
32 case 6:
33 case 7:
34 ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_processors[i].uarch);
35 break;
36 }
37 }
38}
39
40TEST(ISA, thumb) {
41 ASSERT_TRUE(cpuinfo_isa.thumb);
42}
43
44TEST(ISA, thumb2) {
45 ASSERT_TRUE(cpuinfo_isa.thumb2);
46}
47
48TEST(ISA, thumbee) {
49 ASSERT_FALSE(cpuinfo_isa.thumbee);
50}
51
52TEST(ISA, jazelle) {
53 ASSERT_FALSE(cpuinfo_isa.jazelle);
54}
55
56TEST(ISA, armv5e) {
57 ASSERT_TRUE(cpuinfo_isa.armv5e);
58}
59
60TEST(ISA, armv6) {
61 ASSERT_TRUE(cpuinfo_isa.armv6);
62}
63
64TEST(ISA, armv6k) {
65 ASSERT_TRUE(cpuinfo_isa.armv6k);
66}
67
68TEST(ISA, armv7) {
69 ASSERT_TRUE(cpuinfo_isa.armv7);
70}
71
72TEST(ISA, armv7mp) {
73 ASSERT_TRUE(cpuinfo_isa.armv7mp);
74}
75
76TEST(ISA, idiv) {
77 ASSERT_TRUE(cpuinfo_isa.idiv);
78}
79
80TEST(ISA, vfpv2) {
81 ASSERT_FALSE(cpuinfo_isa.vfpv2);
82}
83
84TEST(ISA, vfpv3) {
85 ASSERT_TRUE(cpuinfo_isa.vfpv3);
86}
87
88TEST(ISA, d32) {
89 ASSERT_TRUE(cpuinfo_isa.d32);
90}
91
92TEST(ISA, fp16) {
93 ASSERT_TRUE(cpuinfo_isa.fp16);
94}
95
96TEST(ISA, fma) {
97 ASSERT_TRUE(cpuinfo_isa.fma);
98}
99
100TEST(ISA, wmmx) {
101 ASSERT_FALSE(cpuinfo_isa.wmmx);
102}
103
104TEST(ISA, wmmx2) {
105 ASSERT_FALSE(cpuinfo_isa.wmmx2);
106}
107
108TEST(ISA, neon) {
109 ASSERT_TRUE(cpuinfo_isa.neon);
110}
111
112TEST(ISA, aes) {
113 ASSERT_TRUE(cpuinfo_isa.aes);
114}
115
116TEST(ISA, sha1) {
117 ASSERT_TRUE(cpuinfo_isa.sha1);
118}
119
120TEST(ISA, sha2) {
121 ASSERT_TRUE(cpuinfo_isa.sha2);
122}
123
124TEST(ISA, pmull) {
125 ASSERT_TRUE(cpuinfo_isa.pmull);
126}
127
128TEST(ISA, crc32) {
129 ASSERT_TRUE(cpuinfo_isa.crc32);
130}
131
132TEST(L1I, count) {
133 cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
134 ASSERT_EQ(8, l1i.count);
135}
136
137TEST(L1I, non_null) {
138 cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
139 ASSERT_TRUE(l1i.instances);
140}
141
142TEST(L1I, size) {
143 cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
144 for (uint32_t k = 0; k < l1i.count; k++) {
145 switch (k) {
146 case 0:
147 case 1:
148 case 2:
149 case 3:
150 ASSERT_EQ(48 * 1024, l1i.instances[k].size);
151 break;
152 case 4:
153 case 5:
154 case 6:
155 case 7:
156 ASSERT_EQ(16 * 1024, l1i.instances[k].size);
157 break;
158 }
159 }
160}
161
162TEST(L1I, associativity) {
163 cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
164 for (uint32_t k = 0; k < l1i.count; k++) {
165 switch (k) {
166 case 0:
167 case 1:
168 case 2:
169 case 3:
170 ASSERT_EQ(3, l1i.instances[k].associativity);
171 break;
172 case 4:
173 case 5:
174 case 6:
175 case 7:
176 ASSERT_EQ(2, l1i.instances[k].associativity);
177 break;
178 }
179 }
180}
181
182TEST(L1I, sets) {
183 cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
184 for (uint32_t k = 0; k < l1i.count; k++) {
185 ASSERT_EQ(l1i.instances[k].size,
186 l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
187 }
188}
189
190TEST(L1I, partitions) {
191 cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
192 for (uint32_t k = 0; k < l1i.count; k++) {
193 ASSERT_EQ(1, l1i.instances[k].partitions);
194 }
195}
196
197TEST(L1I, line_size) {
198 cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
199 for (uint32_t k = 0; k < l1i.count; k++) {
200 ASSERT_EQ(64, l1i.instances[k].line_size);
201 }
202}
203
204TEST(L1I, flags) {
205 cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
206 for (uint32_t k = 0; k < l1i.count; k++) {
207 ASSERT_EQ(0, l1i.instances[k].flags);
208 }
209}
210
211TEST(L1I, processors) {
212 cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
213 for (uint32_t k = 0; k < l1i.count; k++) {
214 ASSERT_EQ(k, l1i.instances[k].processor_start);
215 ASSERT_EQ(1, l1i.instances[k].processor_count);
216 }
217}
218
219TEST(L1D, count) {
220 cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
221 ASSERT_EQ(8, l1d.count);
222}
223
224TEST(L1D, non_null) {
225 cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
226 ASSERT_TRUE(l1d.instances);
227}
228
229TEST(L1D, size) {
230 cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
231 for (uint32_t k = 0; k < l1d.count; k++) {
232 switch (k) {
233 case 0:
234 case 1:
235 case 2:
236 case 3:
237 ASSERT_EQ(32 * 1024, l1d.instances[k].size);
238 break;
239 case 4:
240 case 5:
241 case 6:
242 case 7:
243 ASSERT_EQ(16 * 1024, l1d.instances[k].size);
244 break;
245 }
246 }
247}
248
249TEST(L1D, associativity) {
250 cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
251 for (uint32_t k = 0; k < l1d.count; k++) {
252 switch (k) {
253 case 0:
254 case 1:
255 case 2:
256 case 3:
257 ASSERT_EQ(2, l1d.instances[k].associativity);
258 break;
259 case 4:
260 case 5:
261 case 6:
262 case 7:
263 ASSERT_EQ(4, l1d.instances[k].associativity);
264 break;
265 }
266 }
267}
268
269TEST(L1D, sets) {
270 cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
271 for (uint32_t k = 0; k < l1d.count; k++) {
272 ASSERT_EQ(l1d.instances[k].size,
273 l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
274 }
275}
276
277TEST(L1D, partitions) {
278 cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
279 for (uint32_t k = 0; k < l1d.count; k++) {
280 ASSERT_EQ(1, l1d.instances[k].partitions);
281 }
282}
283
284TEST(L1D, line_size) {
285 cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
286 for (uint32_t k = 0; k < l1d.count; k++) {
287 ASSERT_EQ(64, l1d.instances[k].line_size);
288 }
289}
290
291TEST(L1D, flags) {
292 cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
293 for (uint32_t k = 0; k < l1d.count; k++) {
294 ASSERT_EQ(0, l1d.instances[k].flags);
295 }
296}
297
298TEST(L1D, processors) {
299 cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
300 for (uint32_t k = 0; k < l1d.count; k++) {
301 ASSERT_EQ(k, l1d.instances[k].processor_start);
302 ASSERT_EQ(1, l1d.instances[k].processor_count);
303 }
304}
305
306TEST(L2, count) {
307 cpuinfo_caches l2 = cpuinfo_get_l2_cache();
308 ASSERT_EQ(2, l2.count);
309}
310
311TEST(L2, non_null) {
312 cpuinfo_caches l2 = cpuinfo_get_l2_cache();
313 ASSERT_TRUE(l2.instances);
314}
315
316TEST(L2, size) {
317 cpuinfo_caches l2 = cpuinfo_get_l2_cache();
318 for (uint32_t k = 0; k < l2.count; k++) {
319 switch (k) {
320 case 0:
321 ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
322 break;
323 case 1:
324 ASSERT_EQ(256 * 1024, l2.instances[k].size);
325 break;
326 }
327 }
328}
329
330TEST(L2, associativity) {
331 cpuinfo_caches l2 = cpuinfo_get_l2_cache();
332 for (uint32_t k = 0; k < l2.count; k++) {
333 ASSERT_EQ(16, l2.instances[k].associativity);
334 }
335}
336
337TEST(L2, sets) {
338 cpuinfo_caches l2 = cpuinfo_get_l2_cache();
339 for (uint32_t k = 0; k < l2.count; k++) {
340 ASSERT_EQ(l2.instances[k].size,
341 l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
342 }
343}
344
345TEST(L2, partitions) {
346 cpuinfo_caches l2 = cpuinfo_get_l2_cache();
347 for (uint32_t k = 0; k < l2.count; k++) {
348 ASSERT_EQ(1, l2.instances[k].partitions);
349 }
350}
351
352TEST(L2, line_size) {
353 cpuinfo_caches l2 = cpuinfo_get_l2_cache();
354 for (uint32_t k = 0; k < l2.count; k++) {
355 ASSERT_EQ(64, l2.instances[k].line_size);
356 }
357}
358
359TEST(L2, flags) {
360 cpuinfo_caches l2 = cpuinfo_get_l2_cache();
361 for (uint32_t k = 0; k < l2.count; k++) {
362 switch (k) {
363 case 0:
364 ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
365 break;
366 case 1:
367 ASSERT_EQ(0, l2.instances[k].flags);
368 break;
369 }
370 }
371}
372
373TEST(L2, processors) {
374 cpuinfo_caches l2 = cpuinfo_get_l2_cache();
375 for (uint32_t k = 0; k < l2.count; k++) {
376 switch (k) {
377 case 0:
378 ASSERT_EQ(0, l2.instances[k].processor_start);
379 ASSERT_EQ(4, l2.instances[k].processor_count);
380 break;
381 case 1:
382 ASSERT_EQ(4, l2.instances[k].processor_start);
383 ASSERT_EQ(4, l2.instances[k].processor_count);
384 break;
385 }
386 }
387}
388
389TEST(L3, none) {
390 cpuinfo_caches l3 = cpuinfo_get_l3_cache();
391 ASSERT_EQ(0, l3.count);
392 ASSERT_FALSE(l3.instances);
393}
394
395TEST(L4, none) {
396 cpuinfo_caches l4 = cpuinfo_get_l4_cache();
397 ASSERT_EQ(0, l4.count);
398 ASSERT_FALSE(l4.instances);
399}
400
401#include <huawei-mate-8.h>
402
403int main(int argc, char* argv[]) {
404 cpuinfo_mock_filesystem(filesystem);
405 cpuinfo_initialize();
406 ::testing::InitGoogleTest(&argc, argv);
407 return RUN_ALL_TESTS();
408}