blob: 90f105140e8c110d97165f4f4403e25ed9b1b6d7 [file] [log] [blame]
Hao Lu922070c2017-10-18 16:29:02 -07001#include <stdio.h>
2#include <stdint.h>
3#include <stdlib.h>
4#include <string.h>
5#include <alloca.h>
6
7#include <errno.h>
8#include <sys/types.h>
9#include <sys/sysctl.h>
Hao Lu922070c2017-10-18 16:29:02 -070010#include <mach/machine.h>
11
12#include <cpuinfo.h>
13#include <mach/api.h>
Hao Lu3617d5b2017-10-23 15:16:50 -070014#include <gpu/api.h>
Hao Lu922070c2017-10-18 16:29:02 -070015#include <api.h>
16#include <log.h>
17
18
Marat Dukhanec862142017-10-18 17:24:46 -070019struct cpuinfo_arm_isa cpuinfo_isa = {
20#if CPUINFO_ARCH_ARM
21 .thumb = true,
22 .thumb2 = true,
23 .thumbee = false,
24 .jazelle = false,
25 .armv5e = true,
26 .armv6 = true,
27 .armv6k = true,
28 .armv7 = true,
29 .vfpv2 = false,
30 .vfpv3 = true,
31 .d32 = true,
32 .wmmx = false,
33 .wmmx2 = false,
34 .neon = true,
35#endif
36#if CPUINFO_ARCH_ARM64
37 .aes = true,
38 .sha1 = true,
39 .sha2 = true,
40 .pmull = true,
41 .crc32 = true,
42#endif
43};
Marat Dukhan7b738882017-10-18 16:59:28 -070044
Marat Dukhan7d52b052018-03-18 22:57:05 -070045static uint32_t get_sys_info(int type_specifier, const char* name) {
Hao Lu922070c2017-10-18 16:29:02 -070046 size_t size = 0;
47 uint32_t result = 0;
48 int mib[2] = { CTL_HW, type_specifier };
49 if (sysctl(mib, 2, NULL, &size, NULL, 0) != 0) {
50 cpuinfo_log_error("sysctl(\"%s\") failed: %s", name, strerror(errno));
51 } else if (size == sizeof(uint32_t)) {
52 sysctl(mib, 2, &result, &size, NULL, 0);
53 cpuinfo_log_debug("%s: %"PRIu32 ", size = %lu", name, result, size);
54 } else {
55 cpuinfo_log_warning("sysctl does not support non-integer lookup for (\"%s\")", name);
56 }
57 return result;
58}
59
Marat Dukhan7d52b052018-03-18 22:57:05 -070060static uint64_t get_sys_info_by_name(const char* type_specifier) {
Hao Lu922070c2017-10-18 16:29:02 -070061 size_t size = 0;
62 uint32_t result = 0;
63 if (sysctlbyname(type_specifier, NULL, &size, NULL, 0) != 0) {
64 cpuinfo_log_error("sysctlbyname(\"%s\") failed: %s", type_specifier, strerror(errno));
65 } else if (size == sizeof(uint32_t)) {
66 sysctlbyname(type_specifier, &result, &size, NULL, 0);
67 cpuinfo_log_debug("%s: %"PRIu32 ", size = %lu", type_specifier, result, size);
68 } else {
69 cpuinfo_log_warning("sysctl does not support non-integer lookup for (\"%s\")", type_specifier);
70 }
71 return result;
72}
73
74static enum cpuinfo_uarch decode_uarch(uint32_t cpu_family, uint32_t cpu_subtype, uint32_t core_index) {
75 switch (cpu_family) {
76 case CPUFAMILY_ARM_SWIFT:
77 return cpuinfo_uarch_swift;
78 case CPUFAMILY_ARM_CYCLONE:
79 return cpuinfo_uarch_cyclone;
80 case CPUFAMILY_ARM_TYPHOON:
81 return cpuinfo_uarch_typhoon;
82 case CPUFAMILY_ARM_TWISTER:
83 return cpuinfo_uarch_twister;
84 case CPUFAMILY_ARM_HURRICANE:
85 return cpuinfo_uarch_hurricane;
Marat Dukhan7b738882017-10-18 16:59:28 -070086#ifdef CPUFAMILY_ARM_MONSOON_MISTRAL
Hao Lu922070c2017-10-18 16:29:02 -070087 case CPUFAMILY_ARM_MONSOON_MISTRAL:
Marat Dukhan7b738882017-10-18 16:59:28 -070088#else
89 case 0xe81e7ef6:
90 /* Hard-coded value for older SDKs which do not define CPUFAMILY_ARM_MONSOON_MISTRAL */
91#endif
Hao Lu922070c2017-10-18 16:29:02 -070092 /* 2x Monsoon + 4x Mistral cores */
93 return core_index < 2 ? cpuinfo_uarch_monsoon : cpuinfo_uarch_mistral;
94 default:
95 /* Use hw.cpusubtype for detection */
96 break;
97 }
98
99 switch (cpu_subtype) {
100 case CPU_SUBTYPE_ARM_V7:
101 return cpuinfo_uarch_cortex_a8;
102 case CPU_SUBTYPE_ARM_V7F:
103 return cpuinfo_uarch_cortex_a9;
104 case CPU_SUBTYPE_ARM_V7K:
105 return cpuinfo_uarch_cortex_a7;
106 default:
107 return cpuinfo_uarch_unknown;
108 }
109}
110
111static void decode_package_name(char* package_name) {
112 size_t size;
113 if (sysctlbyname("hw.machine", NULL, &size, NULL, 0) != 0) {
114 cpuinfo_log_warning("sysctlbyname(\"hw.machine\") failed: %s", strerror(errno));
115 return;
116 }
117
118 char *machine_name = alloca(size);
119 if (sysctlbyname("hw.machine", machine_name, &size, NULL, 0) != 0) {
120 cpuinfo_log_warning("sysctlbyname(\"hw.machine\") failed: %s", strerror(errno));
121 return;
122 }
123 cpuinfo_log_debug("hw.machine: %s", machine_name);
Hao Lu8c2a3832018-07-23 23:12:11 -0700124
Hao Lu922070c2017-10-18 16:29:02 -0700125 char name[10];
126 uint32_t major = 0, minor = 0;
127 if (sscanf(machine_name, "%9[^,0123456789]%"SCNu32",%"SCNu32, name, &major, &minor) != 3) {
128 cpuinfo_log_warning("parsing \"hw.machine\" failed: %s", strerror(errno));
129 return;
130 }
Hao Lu8c2a3832018-07-23 23:12:11 -0700131
Hao Lu922070c2017-10-18 16:29:02 -0700132 uint32_t chip_model = 0;
133 char suffix = '\0';
134 if (strcmp(name, "iPhone") == 0) {
135 /*
136 * iPhone 4 and up are supported:
137 * - iPhone 4 [A4]: iPhone3,1, iPhone3,2, iPhone3,3
138 * - iPhone 4S [A5]: iPhone4,1
139 * - iPhone 5 [A6]: iPhone5,1, iPhone5,2
140 * - iPhone 5c [A6]: iPhone5,3, iPhone5,4
141 * - iPhone 5s [A7]: iPhone6,1, iPhone6,2
142 * - iPhone 6 [A8]: iPhone7,2
143 * - iPhone 6 Plus [A8]: iPhone7,1
144 * - iPhone 6s [A9]: iPhone8,1
145 * - iPhone 6s Plus [A9]: iPhone8,2
146 * - iPhone SE [A9]: iPhone8,4
147 * - iPhone 7 [A10]: iPhone9,1, iPhone9,3
148 * - iPhone 7 Plus [A10]: iPhone9,2, iPhone9,4
149 * - iPhone 8 [A11]: iPhone10,1, iPhone10,4
150 * - iPhone 8 Plus [A11]: iPhone10,2, iPhone10,5
151 * - iPhone X [A11]: iPhone10,3, iPhone10,6
Marat Dukhanfd54c3d2018-09-30 22:21:20 -0700152 * - iPhone XS [A12]: iPhone11,2,
153 * - iPhone XS Max [A12]: iPhone11,4, iPhone11,6
154 * - iPhone XR [A12]: iPhone11,8
Hao Lu922070c2017-10-18 16:29:02 -0700155 */
156 chip_model = major + 1;
157 } else if (strcmp(name, "iPad") == 0) {
158 switch (major) {
159 /* iPad 2 and up are supported */
160 case 2:
161 /*
162 * iPad 2 [A5]: iPad2,1, iPad2,2, iPad2,3, iPad2,4
163 * iPad mini [A5]: iPad2,5, iPad2,6, iPad2,7
164 */
165 chip_model = major + 3;
166 break;
167 case 3:
168 /*
169 * iPad 3rd Gen [A5X]: iPad3,1, iPad3,2, iPad3,3
170 * iPad 4th Gen [A6X]: iPad3,4, iPad3,5, iPad3,6
171 */
172 chip_model = (minor <= 3) ? 5 : 6;
173 suffix = 'X';
174 break;
175 case 4:
176 /*
177 * iPad Air [A7]: iPad4,1, iPad4,2, iPad4,3
178 * iPad mini Retina [A7]: iPad4,4, iPad4,5, iPad4,6
179 * iPad mini 3 [A7]: iPad4,7, iPad4,8, iPad4,9
180 */
181 chip_model = major + 3;
182 break;
183 case 5:
184 /*
185 * iPad mini 4 [A8]: iPad5,1, iPad5,2
186 * iPad Air 2 [A8X]: iPad5,3, iPad5,4
187 */
188 chip_model = major + 3;
189 suffix = (minor <= 2) ? '\0' : 'X';
190 break;
191 case 6:
192 /*
193 * iPad Pro 9.7" [A9X]: iPad6,3, iPad6,4
194 * iPad Pro [A9X]: iPad6,7, iPad6,8
195 * iPad 5th Gen [A9]: iPad6,11, iPad6,12
196 */
197 chip_model = major + 3;
198 suffix = minor <= 8 ? 'X' : '\0';
199 break;
200 case 7:
201 /*
202 * iPad Pro 12.9" [A10X]: iPad7,1, iPad7,2
203 * iPad Pro 10.5" [A10X]: iPad7,3, iPad7,4
Marat Dukhanfd54c3d2018-09-30 22:21:20 -0700204 * iPad 6th Gen [A10]: iPad7,5, iPad7,6
Hao Lu922070c2017-10-18 16:29:02 -0700205 */
206 chip_model = major + 3;
Marat Dukhanfd54c3d2018-09-30 22:21:20 -0700207 suffix = minor <= 4 ? 'X' : '\0';
Hao Lu922070c2017-10-18 16:29:02 -0700208 break;
209 default:
210 cpuinfo_log_info("unknown iPad: %s", machine_name);
211 break;
212 }
213 } else if (strcmp(name, "iPod") == 0) {
214 switch (major) {
215 case 5:
216 chip_model = 5;
217 break;
218 /* iPod touch (5th Gen) [A5]: iPod5,1 */
219 case 7:
220 /* iPod touch (6th Gen, 2015) [A8]: iPod7,1 */
221 chip_model = 8;
222 break;
223 default:
224 cpuinfo_log_info("unknown iPod: %s", machine_name);
225 break;
226 }
227 } else {
228 cpuinfo_log_info("unknown device: %s", machine_name);
229 }
230 if (chip_model != 0) {
231 snprintf(package_name, CPUINFO_PACKAGE_NAME_MAX, "Apple A%"PRIu32"%c", chip_model, suffix);
232 }
233}
234
235void cpuinfo_arm_mach_init(void) {
236 struct cpuinfo_processor* processors = NULL;
237 struct cpuinfo_core* cores = NULL;
Hao Lu8c2a3832018-07-23 23:12:11 -0700238 struct cpuinfo_cluster* clusters = NULL;
Hao Lu922070c2017-10-18 16:29:02 -0700239 struct cpuinfo_package* packages = NULL;
240 struct cpuinfo_cache* l1i = NULL;
241 struct cpuinfo_cache* l1d = NULL;
242 struct cpuinfo_cache* l2 = NULL;
243 struct cpuinfo_cache* l3 = NULL;
244
245 struct cpuinfo_mach_topology mach_topology = cpuinfo_mach_detect_topology();
246 processors = calloc(mach_topology.threads, sizeof(struct cpuinfo_processor));
247 if (processors == NULL) {
248 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" logical processors",
249 mach_topology.threads * sizeof(struct cpuinfo_processor), mach_topology.threads);
250 goto cleanup;
251 }
252 cores = calloc(mach_topology.cores, sizeof(struct cpuinfo_core));
253 if (cores == NULL) {
254 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" cores",
255 mach_topology.cores * sizeof(struct cpuinfo_core), mach_topology.cores);
256 goto cleanup;
257 }
258 packages = calloc(mach_topology.packages, sizeof(struct cpuinfo_package));
259 if (packages == NULL) {
260 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" packages",
261 mach_topology.packages * sizeof(struct cpuinfo_package), mach_topology.packages);
262 goto cleanup;
263 }
264
265 const uint32_t threads_per_core = mach_topology.threads / mach_topology.cores;
266 const uint32_t threads_per_package = mach_topology.threads / mach_topology.packages;
267 const uint32_t cores_per_package = mach_topology.cores / mach_topology.packages;
Hao Lu8c2a3832018-07-23 23:12:11 -0700268
Hao Lu922070c2017-10-18 16:29:02 -0700269 for (uint32_t i = 0; i < mach_topology.packages; i++) {
270 packages[i] = (struct cpuinfo_package) {
271 .processor_start = i * threads_per_package,
272 .processor_count = threads_per_package,
273 .core_start = i * cores_per_package,
274 .core_count = cores_per_package,
275 };
276 decode_package_name(packages[i].name);
Hao Lu3617d5b2017-10-23 15:16:50 -0700277 cpuinfo_gpu_ios_query_gles2(packages[i].gpu_name);
Hao Lu922070c2017-10-18 16:29:02 -0700278 }
Marat Dukhanec862142017-10-18 17:24:46 -0700279
280
Hao Lu922070c2017-10-18 16:29:02 -0700281 const uint32_t cpu_family = get_sys_info_by_name("hw.cpufamily");
Marat Dukhanec862142017-10-18 17:24:46 -0700282 const uint32_t cpu_type = get_sys_info_by_name("hw.cputype");
Hao Lu922070c2017-10-18 16:29:02 -0700283 const uint32_t cpu_subtype = get_sys_info_by_name("hw.cpusubtype");
Marat Dukhanec862142017-10-18 17:24:46 -0700284 switch (cpu_type) {
285 case CPU_TYPE_ARM64:
286 cpuinfo_isa.aes = true;
287 cpuinfo_isa.sha1 = true;
288 cpuinfo_isa.sha2 = true;
289 cpuinfo_isa.pmull = true;
290 cpuinfo_isa.crc32 = true;
291 break;
292#if CPUINFO_ARCH_ARM
293 case CPU_TYPE_ARM:
294 switch (cpu_subtype) {
295 case CPU_SUBTYPE_ARM_V8:
296 cpuinfo_isa.aes = true;
297 cpuinfo_isa.sha1 = true;
298 cpuinfo_isa.sha2 = true;
299 cpuinfo_isa.pmull = true;
300 cpuinfo_isa.crc32 = true;
301 /* Fall-through to add ARMv7S features */
302 case CPU_SUBTYPE_ARM_V7S:
303 case CPU_SUBTYPE_ARM_V7K:
304 cpuinfo_isa.fma = true;
305 /* Fall-through to add ARMv7F features */
306 case CPU_SUBTYPE_ARM_V7F:
307 cpuinfo_isa.armv7mp = true;
308 cpuinfo_isa.fp16 = true;
309 /* Fall-through to add ARMv7 features */
310 case CPU_SUBTYPE_ARM_V7:
311 break;
312 default:
313 break;
314 }
315 break;
316#endif
317 }
318
Hao Lu8c2a3832018-07-23 23:12:11 -0700319 uint32_t num_clusters = 1;
Hao Lu922070c2017-10-18 16:29:02 -0700320 for (uint32_t i = 0; i < mach_topology.cores; i++) {
321 cores[i] = (struct cpuinfo_core) {
322 .processor_start = i * threads_per_core,
323 .processor_count = threads_per_core,
324 .core_id = i % cores_per_package,
325 .package = packages + i / cores_per_package,
326 .vendor = cpuinfo_vendor_apple,
327 .uarch = decode_uarch(cpu_family, cpu_subtype, i),
328 };
Hao Lu8c2a3832018-07-23 23:12:11 -0700329 if (i != 0 && cores[i].uarch != cores[i - 1].uarch) {
330 num_clusters++;
331 }
Hao Lu922070c2017-10-18 16:29:02 -0700332 }
333 for (uint32_t i = 0; i < mach_topology.threads; i++) {
334 const uint32_t smt_id = i % threads_per_core;
335 const uint32_t core_id = i / threads_per_core;
336 const uint32_t package_id = i / threads_per_package;
337
338 processors[i].smt_id = smt_id;
Marat Dukhan7fcd4412017-11-30 09:46:49 -0800339 processors[i].core = &cores[core_id];
340 processors[i].package = &packages[package_id];
Hao Lu922070c2017-10-18 16:29:02 -0700341 }
342
Hao Lu8c2a3832018-07-23 23:12:11 -0700343 clusters = calloc(num_clusters, sizeof(struct cpuinfo_cluster));
344 if (clusters == NULL) {
345 cpuinfo_log_error(
346 "failed to allocate %zu bytes for descriptions of %"PRIu32" clusters",
347 num_clusters * sizeof(struct cpuinfo_cluster), num_clusters);
348 goto cleanup;
349 }
350 uint32_t cluster_idx = UINT32_MAX;
351 for (uint32_t i = 0; i < mach_topology.cores; i++) {
352 if (i == 0 || cores[i].uarch != cores[i - 1].uarch) {
353 cluster_idx++;
354 clusters[cluster_idx] = (struct cpuinfo_cluster) {
355 .processor_start = i * threads_per_core,
356 .processor_count = 1,
357 .core_start = i,
358 .core_count = 1,
359 .cluster_id = cluster_idx,
360 .package = cores[i].package,
361 .vendor = cores[i].vendor,
362 .uarch = cores[i].uarch,
363 };
364 } else {
365 clusters[cluster_idx].processor_count++;
366 clusters[cluster_idx].core_count++;
367 }
368 cores[i].cluster = &clusters[cluster_idx];
369 }
370
371 for (uint32_t i = 0; i < mach_topology.threads; i++) {
372 const uint32_t core_id = i / threads_per_core;
373 processors[i].cluster = cores[core_id].cluster;
374 }
375
376 for (uint32_t i = 0; i < mach_topology.packages; i++) {
377 packages[i].cluster_start = 0;
378 packages[i].cluster_count = num_clusters;
379 }
380
Hao Lu922070c2017-10-18 16:29:02 -0700381 const uint32_t cacheline_size = get_sys_info(HW_CACHELINE, "HW_CACHELINE");
382 const uint32_t l1d_cache_size = get_sys_info(HW_L1DCACHESIZE, "HW_L1DCACHESIZE");
383 const uint32_t l1i_cache_size = get_sys_info(HW_L1ICACHESIZE, "HW_L1ICACHESIZE");
384 const uint32_t l2_cache_size = get_sys_info(HW_L2CACHESIZE, "HW_L2CACHESIZE");
385 const uint32_t l3_cache_size = get_sys_info(HW_L3CACHESIZE, "HW_L3CACHESIZE");
386 const uint32_t l1_cache_associativity = 4;
387 const uint32_t l2_cache_associativity = 8;
388 const uint32_t l3_cache_associativity = 16;
389 const uint32_t cache_partitions = 1;
390 const uint32_t cache_flags = 0;
391
392 uint32_t threads_per_l1 = 0, l1_count = 0;
393 if (l1i_cache_size != 0 || l1d_cache_size != 0) {
Hao Lu3617d5b2017-10-23 15:16:50 -0700394 /* Assume L1 caches are private to each core */
Hao Lu922070c2017-10-18 16:29:02 -0700395 threads_per_l1 = 1;
396 l1_count = mach_topology.threads / threads_per_l1;
397 cpuinfo_log_debug("detected %"PRIu32" L1 caches", l1_count);
398 }
399
400 uint32_t threads_per_l2 = 0, l2_count = 0;
401 if (l2_cache_size != 0) {
Hao Lu3617d5b2017-10-23 15:16:50 -0700402 /* Assume L2 cache is shared between all cores */
Hao Lu922070c2017-10-18 16:29:02 -0700403 threads_per_l2 = mach_topology.cores;
404 l2_count = 1;
405 cpuinfo_log_debug("detected %"PRIu32" L2 caches", l2_count);
406 }
Hao Lu8c2a3832018-07-23 23:12:11 -0700407
Hao Lu922070c2017-10-18 16:29:02 -0700408 uint32_t threads_per_l3 = 0, l3_count = 0;
409 if (l3_cache_size != 0) {
Hao Lu3617d5b2017-10-23 15:16:50 -0700410 /* Assume L3 cache is shared between all cores */
Hao Lu922070c2017-10-18 16:29:02 -0700411 threads_per_l3 = mach_topology.cores;
412 l3_count = 1;
413 cpuinfo_log_debug("detected %"PRIu32" L3 caches", l3_count);
414 }
415
416 if (l1i_cache_size != 0) {
417 l1i = calloc(l1_count, sizeof(struct cpuinfo_cache));
418 if (l1i == NULL) {
419 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1I caches",
420 l1_count * sizeof(struct cpuinfo_cache), l1_count);
421 goto cleanup;
422 }
423 for (uint32_t c = 0; c < l1_count; c++) {
424 l1i[c] = (struct cpuinfo_cache) {
425 .size = l1i_cache_size,
426 .associativity = l1_cache_associativity,
427 .sets = l1i_cache_size / (l1_cache_associativity * cacheline_size),
428 .partitions = cache_partitions,
429 .line_size = cacheline_size,
430 .flags = cache_flags,
431 .processor_start = c * threads_per_l1,
432 .processor_count = threads_per_l1,
433 };
434 }
435 for (uint32_t t = 0; t < mach_topology.threads; t++) {
436 processors[t].cache.l1i = &l1i[t / threads_per_l1];
437 }
438 }
439
440 if (l1d_cache_size != 0) {
441 l1d = calloc(l1_count, sizeof(struct cpuinfo_cache));
442 if (l1d == NULL) {
443 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1D caches",
444 l1_count * sizeof(struct cpuinfo_cache), l1_count);
445 goto cleanup;
446 }
447 for (uint32_t c = 0; c < l1_count; c++) {
448 l1d[c] = (struct cpuinfo_cache) {
449 .size = l1d_cache_size,
450 .associativity = l1_cache_associativity,
451 .sets = l1d_cache_size / (l1_cache_associativity * cacheline_size),
452 .partitions = cache_partitions,
453 .line_size = cacheline_size,
454 .flags = cache_flags,
455 .processor_start = c * threads_per_l1,
456 .processor_count = threads_per_l1,
457 };
458 }
459 for (uint32_t t = 0; t < mach_topology.threads; t++) {
460 processors[t].cache.l1d = &l1d[t / threads_per_l1];
461 }
462 }
463
464 if (l2_count != 0) {
465 l2 = calloc(l2_count, sizeof(struct cpuinfo_cache));
466 if (l2 == NULL) {
467 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L2 caches",
468 l2_count * sizeof(struct cpuinfo_cache), l2_count);
469 goto cleanup;
470 }
471 for (uint32_t c = 0; c < l2_count; c++) {
472 l2[c] = (struct cpuinfo_cache) {
473 .size = l2_cache_size,
474 .associativity = l2_cache_associativity,
475 .sets = l2_cache_size / (l2_cache_associativity * cacheline_size),
476 .partitions = cache_partitions,
477 .line_size = cacheline_size,
478 .flags = cache_flags,
479 .processor_start = c * threads_per_l2,
480 .processor_count = threads_per_l2,
481 };
482 }
483 for (uint32_t t = 0; t < mach_topology.threads; t++) {
484 processors[t].cache.l2 = &l2[0];
485 }
486 }
Hao Lu8c2a3832018-07-23 23:12:11 -0700487
Hao Lu922070c2017-10-18 16:29:02 -0700488 if (l3_count != 0) {
489 l3 = calloc(l3_count, sizeof(struct cpuinfo_cache));
490 if (l3 == NULL) {
491 cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L3 caches",
492 l3_count * sizeof(struct cpuinfo_cache), l3_count);
493 goto cleanup;
494 }
495 for (uint32_t c = 0; c < l3_count; c++) {
496 l3[c] = (struct cpuinfo_cache) {
497 .size = l3_cache_size,
498 .associativity = l3_cache_associativity,
499 .sets = l3_cache_size / (l3_cache_associativity * cacheline_size),
500 .partitions = cache_partitions,
501 .line_size = cacheline_size,
502 .flags = cache_flags,
503 .processor_start = c * threads_per_l3,
504 .processor_count = threads_per_l3,
505 };
506 }
507 for (uint32_t t = 0; t < mach_topology.threads; t++) {
508 processors[t].cache.l3 = &l3[0];
509 }
510 }
511
512 /* Commit changes */
513 cpuinfo_cache[cpuinfo_cache_level_1i] = l1i;
514 cpuinfo_cache[cpuinfo_cache_level_1d] = l1d;
515 cpuinfo_cache[cpuinfo_cache_level_2] = l2;
516 cpuinfo_cache[cpuinfo_cache_level_3] = l3;
517
518 cpuinfo_processors = processors;
519 cpuinfo_cores = cores;
Hao Lu8c2a3832018-07-23 23:12:11 -0700520 cpuinfo_clusters = clusters;
Hao Lu922070c2017-10-18 16:29:02 -0700521 cpuinfo_packages = packages;
522
523 cpuinfo_cache_count[cpuinfo_cache_level_1i] = l1_count;
524 cpuinfo_cache_count[cpuinfo_cache_level_1d] = l1_count;
525 cpuinfo_cache_count[cpuinfo_cache_level_2] = l2_count;
526 cpuinfo_cache_count[cpuinfo_cache_level_3] = l3_count;
527
528 cpuinfo_processors_count = mach_topology.threads;
529 cpuinfo_cores_count = mach_topology.cores;
Hao Lu8c2a3832018-07-23 23:12:11 -0700530 cpuinfo_clusters_count = num_clusters;
Hao Lu922070c2017-10-18 16:29:02 -0700531 cpuinfo_packages_count = mach_topology.packages;
532
Marat Dukhancf70aee2018-03-24 23:21:02 -0700533 __sync_synchronize();
534
535 cpuinfo_is_initialized = true;
536
Hao Lu922070c2017-10-18 16:29:02 -0700537 processors = NULL;
538 cores = NULL;
Hao Lu8c2a3832018-07-23 23:12:11 -0700539 clusters = NULL;
Hao Lu922070c2017-10-18 16:29:02 -0700540 packages = NULL;
541 l1i = l1d = l2 = l3 = NULL;
542
543cleanup:
544 free(processors);
545 free(cores);
Hao Lu8c2a3832018-07-23 23:12:11 -0700546 free(clusters);
Hao Lu922070c2017-10-18 16:29:02 -0700547 free(packages);
548 free(l1i);
549 free(l1d);
550 free(l2);
551 free(l3);
552}