blob: d728889062c3130a6e56cd4b5b7e374bdfe45895 [file] [log] [blame]
Hao Lu922070c2017-10-18 16:29:02 -07001#include <stdio.h>
2#include <stdint.h>
3#include <stdlib.h>
4#include <string.h>
5#include <alloca.h>
6
7#include <errno.h>
8#include <sys/types.h>
9#include <sys/sysctl.h>
10#include <mach/host_info.h>
11#include <mach/mach_host.h>
12#include <mach/machine.h>
13
14#include <cpuinfo.h>
15#include <mach/api.h>
Hao Lu3617d5b2017-10-23 15:16:50 -070016#include <gpu/api.h>
Hao Lu922070c2017-10-18 16:29:02 -070017#include <api.h>
18#include <log.h>
19
20
Marat Dukhanec862142017-10-18 17:24:46 -070021struct cpuinfo_arm_isa cpuinfo_isa = {
22#if CPUINFO_ARCH_ARM
23 .thumb = true,
24 .thumb2 = true,
25 .thumbee = false,
26 .jazelle = false,
27 .armv5e = true,
28 .armv6 = true,
29 .armv6k = true,
30 .armv7 = true,
31 .vfpv2 = false,
32 .vfpv3 = true,
33 .d32 = true,
34 .wmmx = false,
35 .wmmx2 = false,
36 .neon = true,
37#endif
38#if CPUINFO_ARCH_ARM64
39 .aes = true,
40 .sha1 = true,
41 .sha2 = true,
42 .pmull = true,
43 .crc32 = true,
44#endif
45};
Marat Dukhan7b738882017-10-18 16:59:28 -070046
Hao Lu922070c2017-10-18 16:29:02 -070047static uint32_t get_sys_info(int type_specifier, char* name) {
48 size_t size = 0;
49 uint32_t result = 0;
50 int mib[2] = { CTL_HW, type_specifier };
51 if (sysctl(mib, 2, NULL, &size, NULL, 0) != 0) {
52 cpuinfo_log_error("sysctl(\"%s\") failed: %s", name, strerror(errno));
53 } else if (size == sizeof(uint32_t)) {
54 sysctl(mib, 2, &result, &size, NULL, 0);
55 cpuinfo_log_debug("%s: %"PRIu32 ", size = %lu", name, result, size);
56 } else {
57 cpuinfo_log_warning("sysctl does not support non-integer lookup for (\"%s\")", name);
58 }
59 return result;
60}
61
62static uint64_t get_sys_info_by_name(char* type_specifier) {
63 size_t size = 0;
64 uint32_t result = 0;
65 if (sysctlbyname(type_specifier, NULL, &size, NULL, 0) != 0) {
66 cpuinfo_log_error("sysctlbyname(\"%s\") failed: %s", type_specifier, strerror(errno));
67 } else if (size == sizeof(uint32_t)) {
68 sysctlbyname(type_specifier, &result, &size, NULL, 0);
69 cpuinfo_log_debug("%s: %"PRIu32 ", size = %lu", type_specifier, result, size);
70 } else {
71 cpuinfo_log_warning("sysctl does not support non-integer lookup for (\"%s\")", type_specifier);
72 }
73 return result;
74}
75
76static enum cpuinfo_uarch decode_uarch(uint32_t cpu_family, uint32_t cpu_subtype, uint32_t core_index) {
77 switch (cpu_family) {
78 case CPUFAMILY_ARM_SWIFT:
79 return cpuinfo_uarch_swift;
80 case CPUFAMILY_ARM_CYCLONE:
81 return cpuinfo_uarch_cyclone;
82 case CPUFAMILY_ARM_TYPHOON:
83 return cpuinfo_uarch_typhoon;
84 case CPUFAMILY_ARM_TWISTER:
85 return cpuinfo_uarch_twister;
86 case CPUFAMILY_ARM_HURRICANE:
87 return cpuinfo_uarch_hurricane;
Marat Dukhan7b738882017-10-18 16:59:28 -070088#ifdef CPUFAMILY_ARM_MONSOON_MISTRAL
Hao Lu922070c2017-10-18 16:29:02 -070089 case CPUFAMILY_ARM_MONSOON_MISTRAL:
Marat Dukhan7b738882017-10-18 16:59:28 -070090#else
91 case 0xe81e7ef6:
92 /* Hard-coded value for older SDKs which do not define CPUFAMILY_ARM_MONSOON_MISTRAL */
93#endif
Hao Lu922070c2017-10-18 16:29:02 -070094 /* 2x Monsoon + 4x Mistral cores */
95 return core_index < 2 ? cpuinfo_uarch_monsoon : cpuinfo_uarch_mistral;
96 default:
97 /* Use hw.cpusubtype for detection */
98 break;
99 }
100
101 switch (cpu_subtype) {
102 case CPU_SUBTYPE_ARM_V7:
103 return cpuinfo_uarch_cortex_a8;
104 case CPU_SUBTYPE_ARM_V7F:
105 return cpuinfo_uarch_cortex_a9;
106 case CPU_SUBTYPE_ARM_V7K:
107 return cpuinfo_uarch_cortex_a7;
108 default:
109 return cpuinfo_uarch_unknown;
110 }
111}
112
113static void decode_package_name(char* package_name) {
114 size_t size;
115 if (sysctlbyname("hw.machine", NULL, &size, NULL, 0) != 0) {
116 cpuinfo_log_warning("sysctlbyname(\"hw.machine\") failed: %s", strerror(errno));
117 return;
118 }
119
120 char *machine_name = alloca(size);
121 if (sysctlbyname("hw.machine", machine_name, &size, NULL, 0) != 0) {
122 cpuinfo_log_warning("sysctlbyname(\"hw.machine\") failed: %s", strerror(errno));
123 return;
124 }
125 cpuinfo_log_debug("hw.machine: %s", machine_name);
126
127 char name[10];
128 uint32_t major = 0, minor = 0;
129 if (sscanf(machine_name, "%9[^,0123456789]%"SCNu32",%"SCNu32, name, &major, &minor) != 3) {
130 cpuinfo_log_warning("parsing \"hw.machine\" failed: %s", strerror(errno));
131 return;
132 }
133
134 uint32_t chip_model = 0;
135 char suffix = '\0';
136 if (strcmp(name, "iPhone") == 0) {
137 /*
138 * iPhone 4 and up are supported:
139 * - iPhone 4 [A4]: iPhone3,1, iPhone3,2, iPhone3,3
140 * - iPhone 4S [A5]: iPhone4,1
141 * - iPhone 5 [A6]: iPhone5,1, iPhone5,2
142 * - iPhone 5c [A6]: iPhone5,3, iPhone5,4
143 * - iPhone 5s [A7]: iPhone6,1, iPhone6,2
144 * - iPhone 6 [A8]: iPhone7,2
145 * - iPhone 6 Plus [A8]: iPhone7,1
146 * - iPhone 6s [A9]: iPhone8,1
147 * - iPhone 6s Plus [A9]: iPhone8,2
148 * - iPhone SE [A9]: iPhone8,4
149 * - iPhone 7 [A10]: iPhone9,1, iPhone9,3
150 * - iPhone 7 Plus [A10]: iPhone9,2, iPhone9,4
151 * - iPhone 8 [A11]: iPhone10,1, iPhone10,4
152 * - iPhone 8 Plus [A11]: iPhone10,2, iPhone10,5
153 * - iPhone X [A11]: iPhone10,3, iPhone10,6
154 */
155 chip_model = major + 1;
156 } else if (strcmp(name, "iPad") == 0) {
157 switch (major) {
158 /* iPad 2 and up are supported */
159 case 2:
160 /*
161 * iPad 2 [A5]: iPad2,1, iPad2,2, iPad2,3, iPad2,4
162 * iPad mini [A5]: iPad2,5, iPad2,6, iPad2,7
163 */
164 chip_model = major + 3;
165 break;
166 case 3:
167 /*
168 * iPad 3rd Gen [A5X]: iPad3,1, iPad3,2, iPad3,3
169 * iPad 4th Gen [A6X]: iPad3,4, iPad3,5, iPad3,6
170 */
171 chip_model = (minor <= 3) ? 5 : 6;
172 suffix = 'X';
173 break;
174 case 4:
175 /*
176 * iPad Air [A7]: iPad4,1, iPad4,2, iPad4,3
177 * iPad mini Retina [A7]: iPad4,4, iPad4,5, iPad4,6
178 * iPad mini 3 [A7]: iPad4,7, iPad4,8, iPad4,9
179 */
180 chip_model = major + 3;
181 break;
182 case 5:
183 /*
184 * iPad mini 4 [A8]: iPad5,1, iPad5,2
185 * iPad Air 2 [A8X]: iPad5,3, iPad5,4
186 */
187 chip_model = major + 3;
188 suffix = (minor <= 2) ? '\0' : 'X';
189 break;
190 case 6:
191 /*
192 * iPad Pro 9.7" [A9X]: iPad6,3, iPad6,4
193 * iPad Pro [A9X]: iPad6,7, iPad6,8
194 * iPad 5th Gen [A9]: iPad6,11, iPad6,12
195 */
196 chip_model = major + 3;
197 suffix = minor <= 8 ? 'X' : '\0';
198 break;
199 case 7:
200 /*
201 * iPad Pro 12.9" [A10X]: iPad7,1, iPad7,2
202 * iPad Pro 10.5" [A10X]: iPad7,3, iPad7,4
203 */
204 chip_model = major + 3;
205 suffix = 'X';
206 break;
207 default:
208 cpuinfo_log_info("unknown iPad: %s", machine_name);
209 break;
210 }
211 } else if (strcmp(name, "iPod") == 0) {
212 switch (major) {
213 case 5:
214 chip_model = 5;
215 break;
216 /* iPod touch (5th Gen) [A5]: iPod5,1 */
217 case 7:
218 /* iPod touch (6th Gen, 2015) [A8]: iPod7,1 */
219 chip_model = 8;
220 break;
221 default:
222 cpuinfo_log_info("unknown iPod: %s", machine_name);
223 break;
224 }
225 } else {
226 cpuinfo_log_info("unknown device: %s", machine_name);
227 }
228 if (chip_model != 0) {
229 snprintf(package_name, CPUINFO_PACKAGE_NAME_MAX, "Apple A%"PRIu32"%c", chip_model, suffix);
230 }
231}
232
233void cpuinfo_arm_mach_init(void) {
234 struct cpuinfo_processor* processors = NULL;
235 struct cpuinfo_core* cores = NULL;
236 struct cpuinfo_package* packages = NULL;
237 struct cpuinfo_cache* l1i = NULL;
238 struct cpuinfo_cache* l1d = NULL;
239 struct cpuinfo_cache* l2 = NULL;
240 struct cpuinfo_cache* l3 = NULL;
241
242 struct cpuinfo_mach_topology mach_topology = cpuinfo_mach_detect_topology();
243 processors = calloc(mach_topology.threads, sizeof(struct cpuinfo_processor));
244 if (processors == NULL) {
245 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" logical processors",
246 mach_topology.threads * sizeof(struct cpuinfo_processor), mach_topology.threads);
247 goto cleanup;
248 }
249 cores = calloc(mach_topology.cores, sizeof(struct cpuinfo_core));
250 if (cores == NULL) {
251 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" cores",
252 mach_topology.cores * sizeof(struct cpuinfo_core), mach_topology.cores);
253 goto cleanup;
254 }
255 packages = calloc(mach_topology.packages, sizeof(struct cpuinfo_package));
256 if (packages == NULL) {
257 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" packages",
258 mach_topology.packages * sizeof(struct cpuinfo_package), mach_topology.packages);
259 goto cleanup;
260 }
261
262 const uint32_t threads_per_core = mach_topology.threads / mach_topology.cores;
263 const uint32_t threads_per_package = mach_topology.threads / mach_topology.packages;
264 const uint32_t cores_per_package = mach_topology.cores / mach_topology.packages;
265
266 for (uint32_t i = 0; i < mach_topology.packages; i++) {
267 packages[i] = (struct cpuinfo_package) {
268 .processor_start = i * threads_per_package,
269 .processor_count = threads_per_package,
270 .core_start = i * cores_per_package,
271 .core_count = cores_per_package,
272 };
273 decode_package_name(packages[i].name);
Hao Lu3617d5b2017-10-23 15:16:50 -0700274 cpuinfo_gpu_ios_query_gles2(packages[i].gpu_name);
Hao Lu922070c2017-10-18 16:29:02 -0700275 }
Marat Dukhanec862142017-10-18 17:24:46 -0700276
277
Hao Lu922070c2017-10-18 16:29:02 -0700278 const uint32_t cpu_family = get_sys_info_by_name("hw.cpufamily");
Marat Dukhanec862142017-10-18 17:24:46 -0700279 const uint32_t cpu_type = get_sys_info_by_name("hw.cputype");
Hao Lu922070c2017-10-18 16:29:02 -0700280 const uint32_t cpu_subtype = get_sys_info_by_name("hw.cpusubtype");
Marat Dukhanec862142017-10-18 17:24:46 -0700281 switch (cpu_type) {
282 case CPU_TYPE_ARM64:
283 cpuinfo_isa.aes = true;
284 cpuinfo_isa.sha1 = true;
285 cpuinfo_isa.sha2 = true;
286 cpuinfo_isa.pmull = true;
287 cpuinfo_isa.crc32 = true;
288 break;
289#if CPUINFO_ARCH_ARM
290 case CPU_TYPE_ARM:
291 switch (cpu_subtype) {
292 case CPU_SUBTYPE_ARM_V8:
293 cpuinfo_isa.aes = true;
294 cpuinfo_isa.sha1 = true;
295 cpuinfo_isa.sha2 = true;
296 cpuinfo_isa.pmull = true;
297 cpuinfo_isa.crc32 = true;
298 /* Fall-through to add ARMv7S features */
299 case CPU_SUBTYPE_ARM_V7S:
300 case CPU_SUBTYPE_ARM_V7K:
301 cpuinfo_isa.fma = true;
302 /* Fall-through to add ARMv7F features */
303 case CPU_SUBTYPE_ARM_V7F:
304 cpuinfo_isa.armv7mp = true;
305 cpuinfo_isa.fp16 = true;
306 /* Fall-through to add ARMv7 features */
307 case CPU_SUBTYPE_ARM_V7:
308 break;
309 default:
310 break;
311 }
312 break;
313#endif
314 }
315
Hao Lu922070c2017-10-18 16:29:02 -0700316 for (uint32_t i = 0; i < mach_topology.cores; i++) {
317 cores[i] = (struct cpuinfo_core) {
318 .processor_start = i * threads_per_core,
319 .processor_count = threads_per_core,
320 .core_id = i % cores_per_package,
321 .package = packages + i / cores_per_package,
322 .vendor = cpuinfo_vendor_apple,
323 .uarch = decode_uarch(cpu_family, cpu_subtype, i),
324 };
325 }
326 for (uint32_t i = 0; i < mach_topology.threads; i++) {
327 const uint32_t smt_id = i % threads_per_core;
328 const uint32_t core_id = i / threads_per_core;
329 const uint32_t package_id = i / threads_per_package;
330
331 processors[i].smt_id = smt_id;
Marat Dukhan7fcd4412017-11-30 09:46:49 -0800332 processors[i].core = &cores[core_id];
333 processors[i].package = &packages[package_id];
Hao Lu922070c2017-10-18 16:29:02 -0700334 }
335
336 const uint32_t cacheline_size = get_sys_info(HW_CACHELINE, "HW_CACHELINE");
337 const uint32_t l1d_cache_size = get_sys_info(HW_L1DCACHESIZE, "HW_L1DCACHESIZE");
338 const uint32_t l1i_cache_size = get_sys_info(HW_L1ICACHESIZE, "HW_L1ICACHESIZE");
339 const uint32_t l2_cache_size = get_sys_info(HW_L2CACHESIZE, "HW_L2CACHESIZE");
340 const uint32_t l3_cache_size = get_sys_info(HW_L3CACHESIZE, "HW_L3CACHESIZE");
341 const uint32_t l1_cache_associativity = 4;
342 const uint32_t l2_cache_associativity = 8;
343 const uint32_t l3_cache_associativity = 16;
344 const uint32_t cache_partitions = 1;
345 const uint32_t cache_flags = 0;
346
347 uint32_t threads_per_l1 = 0, l1_count = 0;
348 if (l1i_cache_size != 0 || l1d_cache_size != 0) {
Hao Lu3617d5b2017-10-23 15:16:50 -0700349 /* Assume L1 caches are private to each core */
Hao Lu922070c2017-10-18 16:29:02 -0700350 threads_per_l1 = 1;
351 l1_count = mach_topology.threads / threads_per_l1;
352 cpuinfo_log_debug("detected %"PRIu32" L1 caches", l1_count);
353 }
354
355 uint32_t threads_per_l2 = 0, l2_count = 0;
356 if (l2_cache_size != 0) {
Hao Lu3617d5b2017-10-23 15:16:50 -0700357 /* Assume L2 cache is shared between all cores */
Hao Lu922070c2017-10-18 16:29:02 -0700358 threads_per_l2 = mach_topology.cores;
359 l2_count = 1;
360 cpuinfo_log_debug("detected %"PRIu32" L2 caches", l2_count);
361 }
362
363 uint32_t threads_per_l3 = 0, l3_count = 0;
364 if (l3_cache_size != 0) {
Hao Lu3617d5b2017-10-23 15:16:50 -0700365 /* Assume L3 cache is shared between all cores */
Hao Lu922070c2017-10-18 16:29:02 -0700366 threads_per_l3 = mach_topology.cores;
367 l3_count = 1;
368 cpuinfo_log_debug("detected %"PRIu32" L3 caches", l3_count);
369 }
370
371 if (l1i_cache_size != 0) {
372 l1i = calloc(l1_count, sizeof(struct cpuinfo_cache));
373 if (l1i == NULL) {
374 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1I caches",
375 l1_count * sizeof(struct cpuinfo_cache), l1_count);
376 goto cleanup;
377 }
378 for (uint32_t c = 0; c < l1_count; c++) {
379 l1i[c] = (struct cpuinfo_cache) {
380 .size = l1i_cache_size,
381 .associativity = l1_cache_associativity,
382 .sets = l1i_cache_size / (l1_cache_associativity * cacheline_size),
383 .partitions = cache_partitions,
384 .line_size = cacheline_size,
385 .flags = cache_flags,
386 .processor_start = c * threads_per_l1,
387 .processor_count = threads_per_l1,
388 };
389 }
390 for (uint32_t t = 0; t < mach_topology.threads; t++) {
391 processors[t].cache.l1i = &l1i[t / threads_per_l1];
392 }
393 }
394
395 if (l1d_cache_size != 0) {
396 l1d = calloc(l1_count, sizeof(struct cpuinfo_cache));
397 if (l1d == NULL) {
398 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1D caches",
399 l1_count * sizeof(struct cpuinfo_cache), l1_count);
400 goto cleanup;
401 }
402 for (uint32_t c = 0; c < l1_count; c++) {
403 l1d[c] = (struct cpuinfo_cache) {
404 .size = l1d_cache_size,
405 .associativity = l1_cache_associativity,
406 .sets = l1d_cache_size / (l1_cache_associativity * cacheline_size),
407 .partitions = cache_partitions,
408 .line_size = cacheline_size,
409 .flags = cache_flags,
410 .processor_start = c * threads_per_l1,
411 .processor_count = threads_per_l1,
412 };
413 }
414 for (uint32_t t = 0; t < mach_topology.threads; t++) {
415 processors[t].cache.l1d = &l1d[t / threads_per_l1];
416 }
417 }
418
419 if (l2_count != 0) {
420 l2 = calloc(l2_count, sizeof(struct cpuinfo_cache));
421 if (l2 == NULL) {
422 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L2 caches",
423 l2_count * sizeof(struct cpuinfo_cache), l2_count);
424 goto cleanup;
425 }
426 for (uint32_t c = 0; c < l2_count; c++) {
427 l2[c] = (struct cpuinfo_cache) {
428 .size = l2_cache_size,
429 .associativity = l2_cache_associativity,
430 .sets = l2_cache_size / (l2_cache_associativity * cacheline_size),
431 .partitions = cache_partitions,
432 .line_size = cacheline_size,
433 .flags = cache_flags,
434 .processor_start = c * threads_per_l2,
435 .processor_count = threads_per_l2,
436 };
437 }
438 for (uint32_t t = 0; t < mach_topology.threads; t++) {
439 processors[t].cache.l2 = &l2[0];
440 }
441 }
442
443 if (l3_count != 0) {
444 l3 = calloc(l3_count, sizeof(struct cpuinfo_cache));
445 if (l3 == NULL) {
446 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L3 caches",
447 l3_count * sizeof(struct cpuinfo_cache), l3_count);
448 goto cleanup;
449 }
450 for (uint32_t c = 0; c < l3_count; c++) {
451 l3[c] = (struct cpuinfo_cache) {
452 .size = l3_cache_size,
453 .associativity = l3_cache_associativity,
454 .sets = l3_cache_size / (l3_cache_associativity * cacheline_size),
455 .partitions = cache_partitions,
456 .line_size = cacheline_size,
457 .flags = cache_flags,
458 .processor_start = c * threads_per_l3,
459 .processor_count = threads_per_l3,
460 };
461 }
462 for (uint32_t t = 0; t < mach_topology.threads; t++) {
463 processors[t].cache.l3 = &l3[0];
464 }
465 }
466
467 /* Commit changes */
468 cpuinfo_cache[cpuinfo_cache_level_1i] = l1i;
469 cpuinfo_cache[cpuinfo_cache_level_1d] = l1d;
470 cpuinfo_cache[cpuinfo_cache_level_2] = l2;
471 cpuinfo_cache[cpuinfo_cache_level_3] = l3;
472
473 cpuinfo_processors = processors;
474 cpuinfo_cores = cores;
475 cpuinfo_packages = packages;
476
477 cpuinfo_cache_count[cpuinfo_cache_level_1i] = l1_count;
478 cpuinfo_cache_count[cpuinfo_cache_level_1d] = l1_count;
479 cpuinfo_cache_count[cpuinfo_cache_level_2] = l2_count;
480 cpuinfo_cache_count[cpuinfo_cache_level_3] = l3_count;
481
482 cpuinfo_processors_count = mach_topology.threads;
483 cpuinfo_cores_count = mach_topology.cores;
484 cpuinfo_packages_count = mach_topology.packages;
485
486 processors = NULL;
487 cores = NULL;
488 packages = NULL;
489 l1i = l1d = l2 = l3 = NULL;
490
491cleanup:
492 free(processors);
493 free(cores);
494 free(packages);
495 free(l1i);
496 free(l1d);
497 free(l2);
498 free(l3);
499}