blob: 61c028d0d864a3c339b2bd4404cc293f9883f6ab [file] [log] [blame]
Hao Lu922070c2017-10-18 16:29:02 -07001#include <stdio.h>
2#include <stdint.h>
3#include <stdlib.h>
4#include <string.h>
5#include <alloca.h>
6
7#include <errno.h>
8#include <sys/types.h>
9#include <sys/sysctl.h>
10#include <mach/host_info.h>
11#include <mach/mach_host.h>
12#include <mach/machine.h>
13
14#include <cpuinfo.h>
15#include <mach/api.h>
16#include <api.h>
17#include <log.h>
18
19
Marat Dukhan7b738882017-10-18 16:59:28 -070020struct cpuinfo_arm_isa cpuinfo_isa = { 0 };
21
Hao Lu922070c2017-10-18 16:29:02 -070022static uint32_t get_sys_info(int type_specifier, char* name) {
23 size_t size = 0;
24 uint32_t result = 0;
25 int mib[2] = { CTL_HW, type_specifier };
26 if (sysctl(mib, 2, NULL, &size, NULL, 0) != 0) {
27 cpuinfo_log_error("sysctl(\"%s\") failed: %s", name, strerror(errno));
28 } else if (size == sizeof(uint32_t)) {
29 sysctl(mib, 2, &result, &size, NULL, 0);
30 cpuinfo_log_debug("%s: %"PRIu32 ", size = %lu", name, result, size);
31 } else {
32 cpuinfo_log_warning("sysctl does not support non-integer lookup for (\"%s\")", name);
33 }
34 return result;
35}
36
37static uint64_t get_sys_info_by_name(char* type_specifier) {
38 size_t size = 0;
39 uint32_t result = 0;
40 if (sysctlbyname(type_specifier, NULL, &size, NULL, 0) != 0) {
41 cpuinfo_log_error("sysctlbyname(\"%s\") failed: %s", type_specifier, strerror(errno));
42 } else if (size == sizeof(uint32_t)) {
43 sysctlbyname(type_specifier, &result, &size, NULL, 0);
44 cpuinfo_log_debug("%s: %"PRIu32 ", size = %lu", type_specifier, result, size);
45 } else {
46 cpuinfo_log_warning("sysctl does not support non-integer lookup for (\"%s\")", type_specifier);
47 }
48 return result;
49}
50
51static enum cpuinfo_uarch decode_uarch(uint32_t cpu_family, uint32_t cpu_subtype, uint32_t core_index) {
52 switch (cpu_family) {
53 case CPUFAMILY_ARM_SWIFT:
54 return cpuinfo_uarch_swift;
55 case CPUFAMILY_ARM_CYCLONE:
56 return cpuinfo_uarch_cyclone;
57 case CPUFAMILY_ARM_TYPHOON:
58 return cpuinfo_uarch_typhoon;
59 case CPUFAMILY_ARM_TWISTER:
60 return cpuinfo_uarch_twister;
61 case CPUFAMILY_ARM_HURRICANE:
62 return cpuinfo_uarch_hurricane;
Marat Dukhan7b738882017-10-18 16:59:28 -070063#ifdef CPUFAMILY_ARM_MONSOON_MISTRAL
Hao Lu922070c2017-10-18 16:29:02 -070064 case CPUFAMILY_ARM_MONSOON_MISTRAL:
Marat Dukhan7b738882017-10-18 16:59:28 -070065#else
66 case 0xe81e7ef6:
67 /* Hard-coded value for older SDKs which do not define CPUFAMILY_ARM_MONSOON_MISTRAL */
68#endif
Hao Lu922070c2017-10-18 16:29:02 -070069 /* 2x Monsoon + 4x Mistral cores */
70 return core_index < 2 ? cpuinfo_uarch_monsoon : cpuinfo_uarch_mistral;
71 default:
72 /* Use hw.cpusubtype for detection */
73 break;
74 }
75
76 switch (cpu_subtype) {
77 case CPU_SUBTYPE_ARM_V7:
78 return cpuinfo_uarch_cortex_a8;
79 case CPU_SUBTYPE_ARM_V7F:
80 return cpuinfo_uarch_cortex_a9;
81 case CPU_SUBTYPE_ARM_V7K:
82 return cpuinfo_uarch_cortex_a7;
83 default:
84 return cpuinfo_uarch_unknown;
85 }
86}
87
88static void decode_package_name(char* package_name) {
89 size_t size;
90 if (sysctlbyname("hw.machine", NULL, &size, NULL, 0) != 0) {
91 cpuinfo_log_warning("sysctlbyname(\"hw.machine\") failed: %s", strerror(errno));
92 return;
93 }
94
95 char *machine_name = alloca(size);
96 if (sysctlbyname("hw.machine", machine_name, &size, NULL, 0) != 0) {
97 cpuinfo_log_warning("sysctlbyname(\"hw.machine\") failed: %s", strerror(errno));
98 return;
99 }
100 cpuinfo_log_debug("hw.machine: %s", machine_name);
101
102 char name[10];
103 uint32_t major = 0, minor = 0;
104 if (sscanf(machine_name, "%9[^,0123456789]%"SCNu32",%"SCNu32, name, &major, &minor) != 3) {
105 cpuinfo_log_warning("parsing \"hw.machine\" failed: %s", strerror(errno));
106 return;
107 }
108
109 uint32_t chip_model = 0;
110 char suffix = '\0';
111 if (strcmp(name, "iPhone") == 0) {
112 /*
113 * iPhone 4 and up are supported:
114 * - iPhone 4 [A4]: iPhone3,1, iPhone3,2, iPhone3,3
115 * - iPhone 4S [A5]: iPhone4,1
116 * - iPhone 5 [A6]: iPhone5,1, iPhone5,2
117 * - iPhone 5c [A6]: iPhone5,3, iPhone5,4
118 * - iPhone 5s [A7]: iPhone6,1, iPhone6,2
119 * - iPhone 6 [A8]: iPhone7,2
120 * - iPhone 6 Plus [A8]: iPhone7,1
121 * - iPhone 6s [A9]: iPhone8,1
122 * - iPhone 6s Plus [A9]: iPhone8,2
123 * - iPhone SE [A9]: iPhone8,4
124 * - iPhone 7 [A10]: iPhone9,1, iPhone9,3
125 * - iPhone 7 Plus [A10]: iPhone9,2, iPhone9,4
126 * - iPhone 8 [A11]: iPhone10,1, iPhone10,4
127 * - iPhone 8 Plus [A11]: iPhone10,2, iPhone10,5
128 * - iPhone X [A11]: iPhone10,3, iPhone10,6
129 */
130 chip_model = major + 1;
131 } else if (strcmp(name, "iPad") == 0) {
132 switch (major) {
133 /* iPad 2 and up are supported */
134 case 2:
135 /*
136 * iPad 2 [A5]: iPad2,1, iPad2,2, iPad2,3, iPad2,4
137 * iPad mini [A5]: iPad2,5, iPad2,6, iPad2,7
138 */
139 chip_model = major + 3;
140 break;
141 case 3:
142 /*
143 * iPad 3rd Gen [A5X]: iPad3,1, iPad3,2, iPad3,3
144 * iPad 4th Gen [A6X]: iPad3,4, iPad3,5, iPad3,6
145 */
146 chip_model = (minor <= 3) ? 5 : 6;
147 suffix = 'X';
148 break;
149 case 4:
150 /*
151 * iPad Air [A7]: iPad4,1, iPad4,2, iPad4,3
152 * iPad mini Retina [A7]: iPad4,4, iPad4,5, iPad4,6
153 * iPad mini 3 [A7]: iPad4,7, iPad4,8, iPad4,9
154 */
155 chip_model = major + 3;
156 break;
157 case 5:
158 /*
159 * iPad mini 4 [A8]: iPad5,1, iPad5,2
160 * iPad Air 2 [A8X]: iPad5,3, iPad5,4
161 */
162 chip_model = major + 3;
163 suffix = (minor <= 2) ? '\0' : 'X';
164 break;
165 case 6:
166 /*
167 * iPad Pro 9.7" [A9X]: iPad6,3, iPad6,4
168 * iPad Pro [A9X]: iPad6,7, iPad6,8
169 * iPad 5th Gen [A9]: iPad6,11, iPad6,12
170 */
171 chip_model = major + 3;
172 suffix = minor <= 8 ? 'X' : '\0';
173 break;
174 case 7:
175 /*
176 * iPad Pro 12.9" [A10X]: iPad7,1, iPad7,2
177 * iPad Pro 10.5" [A10X]: iPad7,3, iPad7,4
178 */
179 chip_model = major + 3;
180 suffix = 'X';
181 break;
182 default:
183 cpuinfo_log_info("unknown iPad: %s", machine_name);
184 break;
185 }
186 } else if (strcmp(name, "iPod") == 0) {
187 switch (major) {
188 case 5:
189 chip_model = 5;
190 break;
191 /* iPod touch (5th Gen) [A5]: iPod5,1 */
192 case 7:
193 /* iPod touch (6th Gen, 2015) [A8]: iPod7,1 */
194 chip_model = 8;
195 break;
196 default:
197 cpuinfo_log_info("unknown iPod: %s", machine_name);
198 break;
199 }
200 } else {
201 cpuinfo_log_info("unknown device: %s", machine_name);
202 }
203 if (chip_model != 0) {
204 snprintf(package_name, CPUINFO_PACKAGE_NAME_MAX, "Apple A%"PRIu32"%c", chip_model, suffix);
205 }
206}
207
208void cpuinfo_arm_mach_init(void) {
209 struct cpuinfo_processor* processors = NULL;
210 struct cpuinfo_core* cores = NULL;
211 struct cpuinfo_package* packages = NULL;
212 struct cpuinfo_cache* l1i = NULL;
213 struct cpuinfo_cache* l1d = NULL;
214 struct cpuinfo_cache* l2 = NULL;
215 struct cpuinfo_cache* l3 = NULL;
216
217 struct cpuinfo_mach_topology mach_topology = cpuinfo_mach_detect_topology();
218 processors = calloc(mach_topology.threads, sizeof(struct cpuinfo_processor));
219 if (processors == NULL) {
220 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" logical processors",
221 mach_topology.threads * sizeof(struct cpuinfo_processor), mach_topology.threads);
222 goto cleanup;
223 }
224 cores = calloc(mach_topology.cores, sizeof(struct cpuinfo_core));
225 if (cores == NULL) {
226 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" cores",
227 mach_topology.cores * sizeof(struct cpuinfo_core), mach_topology.cores);
228 goto cleanup;
229 }
230 packages = calloc(mach_topology.packages, sizeof(struct cpuinfo_package));
231 if (packages == NULL) {
232 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" packages",
233 mach_topology.packages * sizeof(struct cpuinfo_package), mach_topology.packages);
234 goto cleanup;
235 }
236
237 const uint32_t threads_per_core = mach_topology.threads / mach_topology.cores;
238 const uint32_t threads_per_package = mach_topology.threads / mach_topology.packages;
239 const uint32_t cores_per_package = mach_topology.cores / mach_topology.packages;
240
241 for (uint32_t i = 0; i < mach_topology.packages; i++) {
242 packages[i] = (struct cpuinfo_package) {
243 .processor_start = i * threads_per_package,
244 .processor_count = threads_per_package,
245 .core_start = i * cores_per_package,
246 .core_count = cores_per_package,
247 };
248 decode_package_name(packages[i].name);
249 }
250
251 const uint32_t cpu_family = get_sys_info_by_name("hw.cpufamily");
252 const uint32_t cpu_subtype = get_sys_info_by_name("hw.cpusubtype");
253 for (uint32_t i = 0; i < mach_topology.cores; i++) {
254 cores[i] = (struct cpuinfo_core) {
255 .processor_start = i * threads_per_core,
256 .processor_count = threads_per_core,
257 .core_id = i % cores_per_package,
258 .package = packages + i / cores_per_package,
259 .vendor = cpuinfo_vendor_apple,
260 .uarch = decode_uarch(cpu_family, cpu_subtype, i),
261 };
262 }
263 for (uint32_t i = 0; i < mach_topology.threads; i++) {
264 const uint32_t smt_id = i % threads_per_core;
265 const uint32_t core_id = i / threads_per_core;
266 const uint32_t package_id = i / threads_per_package;
267
268 processors[i].smt_id = smt_id;
269 processors[i].core = cores + i / threads_per_core;
270 processors[i].package = packages + i / threads_per_package;
271 }
272
273 const uint32_t cacheline_size = get_sys_info(HW_CACHELINE, "HW_CACHELINE");
274 const uint32_t l1d_cache_size = get_sys_info(HW_L1DCACHESIZE, "HW_L1DCACHESIZE");
275 const uint32_t l1i_cache_size = get_sys_info(HW_L1ICACHESIZE, "HW_L1ICACHESIZE");
276 const uint32_t l2_cache_size = get_sys_info(HW_L2CACHESIZE, "HW_L2CACHESIZE");
277 const uint32_t l3_cache_size = get_sys_info(HW_L3CACHESIZE, "HW_L3CACHESIZE");
278 const uint32_t l1_cache_associativity = 4;
279 const uint32_t l2_cache_associativity = 8;
280 const uint32_t l3_cache_associativity = 16;
281 const uint32_t cache_partitions = 1;
282 const uint32_t cache_flags = 0;
283
284 uint32_t threads_per_l1 = 0, l1_count = 0;
285 if (l1i_cache_size != 0 || l1d_cache_size != 0) {
286 /* Assume that L1 caches are private to each core */
287 threads_per_l1 = 1;
288 l1_count = mach_topology.threads / threads_per_l1;
289 cpuinfo_log_debug("detected %"PRIu32" L1 caches", l1_count);
290 }
291
292 uint32_t threads_per_l2 = 0, l2_count = 0;
293 if (l2_cache_size != 0) {
294 /* L2 cache is shared between all cores */
295 threads_per_l2 = mach_topology.cores;
296 l2_count = 1;
297 cpuinfo_log_debug("detected %"PRIu32" L2 caches", l2_count);
298 }
299
300 uint32_t threads_per_l3 = 0, l3_count = 0;
301 if (l3_cache_size != 0) {
302 /* L3 cache is shared between all cores */
303 threads_per_l3 = mach_topology.cores;
304 l3_count = 1;
305 cpuinfo_log_debug("detected %"PRIu32" L3 caches", l3_count);
306 }
307
308 if (l1i_cache_size != 0) {
309 l1i = calloc(l1_count, sizeof(struct cpuinfo_cache));
310 if (l1i == NULL) {
311 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1I caches",
312 l1_count * sizeof(struct cpuinfo_cache), l1_count);
313 goto cleanup;
314 }
315 for (uint32_t c = 0; c < l1_count; c++) {
316 l1i[c] = (struct cpuinfo_cache) {
317 .size = l1i_cache_size,
318 .associativity = l1_cache_associativity,
319 .sets = l1i_cache_size / (l1_cache_associativity * cacheline_size),
320 .partitions = cache_partitions,
321 .line_size = cacheline_size,
322 .flags = cache_flags,
323 .processor_start = c * threads_per_l1,
324 .processor_count = threads_per_l1,
325 };
326 }
327 for (uint32_t t = 0; t < mach_topology.threads; t++) {
328 processors[t].cache.l1i = &l1i[t / threads_per_l1];
329 }
330 }
331
332 if (l1d_cache_size != 0) {
333 l1d = calloc(l1_count, sizeof(struct cpuinfo_cache));
334 if (l1d == NULL) {
335 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1D caches",
336 l1_count * sizeof(struct cpuinfo_cache), l1_count);
337 goto cleanup;
338 }
339 for (uint32_t c = 0; c < l1_count; c++) {
340 l1d[c] = (struct cpuinfo_cache) {
341 .size = l1d_cache_size,
342 .associativity = l1_cache_associativity,
343 .sets = l1d_cache_size / (l1_cache_associativity * cacheline_size),
344 .partitions = cache_partitions,
345 .line_size = cacheline_size,
346 .flags = cache_flags,
347 .processor_start = c * threads_per_l1,
348 .processor_count = threads_per_l1,
349 };
350 }
351 for (uint32_t t = 0; t < mach_topology.threads; t++) {
352 processors[t].cache.l1d = &l1d[t / threads_per_l1];
353 }
354 }
355
356 if (l2_count != 0) {
357 l2 = calloc(l2_count, sizeof(struct cpuinfo_cache));
358 if (l2 == NULL) {
359 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L2 caches",
360 l2_count * sizeof(struct cpuinfo_cache), l2_count);
361 goto cleanup;
362 }
363 for (uint32_t c = 0; c < l2_count; c++) {
364 l2[c] = (struct cpuinfo_cache) {
365 .size = l2_cache_size,
366 .associativity = l2_cache_associativity,
367 .sets = l2_cache_size / (l2_cache_associativity * cacheline_size),
368 .partitions = cache_partitions,
369 .line_size = cacheline_size,
370 .flags = cache_flags,
371 .processor_start = c * threads_per_l2,
372 .processor_count = threads_per_l2,
373 };
374 }
375 for (uint32_t t = 0; t < mach_topology.threads; t++) {
376 processors[t].cache.l2 = &l2[0];
377 }
378 }
379
380 if (l3_count != 0) {
381 l3 = calloc(l3_count, sizeof(struct cpuinfo_cache));
382 if (l3 == NULL) {
383 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L3 caches",
384 l3_count * sizeof(struct cpuinfo_cache), l3_count);
385 goto cleanup;
386 }
387 for (uint32_t c = 0; c < l3_count; c++) {
388 l3[c] = (struct cpuinfo_cache) {
389 .size = l3_cache_size,
390 .associativity = l3_cache_associativity,
391 .sets = l3_cache_size / (l3_cache_associativity * cacheline_size),
392 .partitions = cache_partitions,
393 .line_size = cacheline_size,
394 .flags = cache_flags,
395 .processor_start = c * threads_per_l3,
396 .processor_count = threads_per_l3,
397 };
398 }
399 for (uint32_t t = 0; t < mach_topology.threads; t++) {
400 processors[t].cache.l3 = &l3[0];
401 }
402 }
403
404 /* Commit changes */
405 cpuinfo_cache[cpuinfo_cache_level_1i] = l1i;
406 cpuinfo_cache[cpuinfo_cache_level_1d] = l1d;
407 cpuinfo_cache[cpuinfo_cache_level_2] = l2;
408 cpuinfo_cache[cpuinfo_cache_level_3] = l3;
409
410 cpuinfo_processors = processors;
411 cpuinfo_cores = cores;
412 cpuinfo_packages = packages;
413
414 cpuinfo_cache_count[cpuinfo_cache_level_1i] = l1_count;
415 cpuinfo_cache_count[cpuinfo_cache_level_1d] = l1_count;
416 cpuinfo_cache_count[cpuinfo_cache_level_2] = l2_count;
417 cpuinfo_cache_count[cpuinfo_cache_level_3] = l3_count;
418
419 cpuinfo_processors_count = mach_topology.threads;
420 cpuinfo_cores_count = mach_topology.cores;
421 cpuinfo_packages_count = mach_topology.packages;
422
423 processors = NULL;
424 cores = NULL;
425 packages = NULL;
426 l1i = l1d = l2 = l3 = NULL;
427
428cleanup:
429 free(processors);
430 free(cores);
431 free(packages);
432 free(l1i);
433 free(l1d);
434 free(l2);
435 free(l3);
436}