| Andrew Walbran | 12f6140 | 2020-10-14 11:10:53 +0100 | [diff] [blame^] | 1 | use nix::sched::{sched_getaffinity, sched_setaffinity, CpuSet}; |
| 2 | use nix::unistd::Pid; |
| 3 | |
| 4 | #[test] |
| 5 | fn test_sched_affinity() { |
| 6 | // If pid is zero, then the mask of the calling process is returned. |
| 7 | let initial_affinity = sched_getaffinity(Pid::from_raw(0)).unwrap(); |
| 8 | let mut at_least_one_cpu = false; |
| 9 | let mut last_valid_cpu = 0; |
| 10 | for field in 0..CpuSet::count() { |
| 11 | if initial_affinity.is_set(field).unwrap() { |
| 12 | at_least_one_cpu = true; |
| 13 | last_valid_cpu = field; |
| 14 | } |
| 15 | } |
| 16 | assert!(at_least_one_cpu); |
| 17 | |
| 18 | // Now restrict the running CPU |
| 19 | let mut new_affinity = CpuSet::new(); |
| 20 | new_affinity.set(last_valid_cpu).unwrap(); |
| 21 | sched_setaffinity(Pid::from_raw(0), &new_affinity).unwrap(); |
| 22 | |
| 23 | // And now re-check the affinity which should be only the one we set. |
| 24 | let updated_affinity = sched_getaffinity(Pid::from_raw(0)).unwrap(); |
| 25 | for field in 0..CpuSet::count() { |
| 26 | // Should be set only for the CPU we set previously |
| 27 | assert_eq!(updated_affinity.is_set(field).unwrap(), field==last_valid_cpu) |
| 28 | } |
| 29 | |
| 30 | // Finally, reset the initial CPU set |
| 31 | sched_setaffinity(Pid::from_raw(0), &initial_affinity).unwrap(); |
| 32 | } |