blob: 1eae79ae5b4e93f8862732b13fd2bd57d08d168f [file] [log] [blame]
Andrea Arcangelic47174f2015-09-04 15:47:23 -07001/*
2 * Stress userfaultfd syscall.
3 *
4 * Copyright (C) 2015 Red Hat, Inc.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 * This test allocates two virtual areas and bounces the physical
10 * memory across the two virtual areas (from area_src to area_dst)
11 * using userfaultfd.
12 *
13 * There are three threads running per CPU:
14 *
15 * 1) one per-CPU thread takes a per-page pthread_mutex in a random
16 * page of the area_dst (while the physical page may still be in
17 * area_src), and increments a per-page counter in the same page,
18 * and checks its value against a verification region.
19 *
20 * 2) another per-CPU thread handles the userfaults generated by
21 * thread 1 above. userfaultfd blocking reads or poll() modes are
22 * exercised interleaved.
23 *
24 * 3) one last per-CPU thread transfers the memory in the background
25 * at maximum bandwidth (if not already transferred by thread
26 * 2). Each cpu thread takes cares of transferring a portion of the
27 * area.
28 *
29 * When all threads of type 3 completed the transfer, one bounce is
30 * complete. area_src and area_dst are then swapped. All threads are
31 * respawned and so the bounce is immediately restarted in the
32 * opposite direction.
33 *
34 * per-CPU threads 1 by triggering userfaults inside
35 * pthread_mutex_lock will also verify the atomicity of the memory
36 * transfer (UFFDIO_COPY).
37 *
38 * The program takes two parameters: the amounts of physical memory in
39 * megabytes (MiB) of the area and the number of bounces to execute.
40 *
41 * # 100MiB 99999 bounces
42 * ./userfaultfd 100 99999
43 *
44 * # 1GiB 99 bounces
45 * ./userfaultfd 1000 99
46 *
47 * # 10MiB-~6GiB 999 bounces, continue forever unless an error triggers
48 * while ./userfaultfd $[RANDOM % 6000 + 10] 999; do true; done
49 */
50
51#define _GNU_SOURCE
52#include <stdio.h>
53#include <errno.h>
54#include <unistd.h>
55#include <stdlib.h>
56#include <sys/types.h>
57#include <sys/stat.h>
58#include <fcntl.h>
59#include <time.h>
60#include <signal.h>
61#include <poll.h>
62#include <string.h>
63#include <sys/mman.h>
64#include <sys/syscall.h>
65#include <sys/ioctl.h>
Mike Rapoportda5502c2017-02-22 15:44:06 -080066#include <sys/wait.h>
Andrea Arcangelic47174f2015-09-04 15:47:23 -070067#include <pthread.h>
Thierry Redingd0a87112015-09-22 14:58:52 -070068#include <linux/userfaultfd.h>
Andrea Arcangelic47174f2015-09-04 15:47:23 -070069
Michael Ellerman56ed8f12015-09-22 14:58:58 -070070#ifdef __NR_userfaultfd
Andrea Arcangelic47174f2015-09-04 15:47:23 -070071
72static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
73
74#define BOUNCE_RANDOM (1<<0)
75#define BOUNCE_RACINGFAULTS (1<<1)
76#define BOUNCE_VERIFY (1<<2)
77#define BOUNCE_POLL (1<<3)
78static int bounces;
79
Mike Rapoportb6ad1972017-05-03 14:54:54 -070080#define TEST_ANON 1
81#define TEST_HUGETLB 2
82#define TEST_SHMEM 3
83static int test_type;
84
Mike Kravetz9903bd72017-02-22 15:43:07 -080085static int huge_fd;
86static char *huge_fd_off0;
Andrea Arcangelic47174f2015-09-04 15:47:23 -070087static unsigned long long *count_verify;
Mike Rapoport6228b8f2017-02-22 15:44:01 -080088static int uffd, uffd_flags, finished, *pipefd;
Andrea Arcangelic47174f2015-09-04 15:47:23 -070089static char *area_src, *area_dst;
90static char *zeropage;
91pthread_attr_t attr;
92
93/* pthread_mutex_t starts at page offset 0 */
94#define area_mutex(___area, ___nr) \
95 ((pthread_mutex_t *) ((___area) + (___nr)*page_size))
96/*
97 * count is placed in the page after pthread_mutex_t naturally aligned
98 * to avoid non alignment faults on non-x86 archs.
99 */
100#define area_count(___area, ___nr) \
101 ((volatile unsigned long long *) ((unsigned long) \
102 ((___area) + (___nr)*page_size + \
103 sizeof(pthread_mutex_t) + \
104 sizeof(unsigned long long) - 1) & \
105 ~(unsigned long)(sizeof(unsigned long long) \
106 - 1)))
107
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700108static int anon_release_pages(char *rel_area)
Mike Kravetz9903bd72017-02-22 15:43:07 -0800109{
110 int ret = 0;
111
112 if (madvise(rel_area, nr_pages * page_size, MADV_DONTNEED)) {
113 perror("madvise");
114 ret = 1;
115 }
116
117 return ret;
118}
119
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700120static void anon_allocate_area(void **alloc_area)
Mike Kravetz9903bd72017-02-22 15:43:07 -0800121{
122 if (posix_memalign(alloc_area, page_size, nr_pages * page_size)) {
123 fprintf(stderr, "out of memory\n");
124 *alloc_area = NULL;
125 }
126}
127
Mike Rapoport419624d2017-02-22 15:43:46 -0800128
129/* HugeTLB memory */
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700130static int hugetlb_release_pages(char *rel_area)
Mike Kravetz9903bd72017-02-22 15:43:07 -0800131{
132 int ret = 0;
133
134 if (fallocate(huge_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
135 rel_area == huge_fd_off0 ? 0 :
136 nr_pages * page_size,
137 nr_pages * page_size)) {
138 perror("fallocate");
139 ret = 1;
140 }
141
142 return ret;
143}
144
145
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700146static void hugetlb_allocate_area(void **alloc_area)
Mike Kravetz9903bd72017-02-22 15:43:07 -0800147{
148 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
149 MAP_PRIVATE | MAP_HUGETLB, huge_fd,
150 *alloc_area == area_src ? 0 :
151 nr_pages * page_size);
152 if (*alloc_area == MAP_FAILED) {
153 fprintf(stderr, "mmap of hugetlbfs file failed\n");
154 *alloc_area = NULL;
155 }
156
157 if (*alloc_area == area_src)
158 huge_fd_off0 = *alloc_area;
159}
160
Mike Rapoport419624d2017-02-22 15:43:46 -0800161/* Shared memory */
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700162static int shmem_release_pages(char *rel_area)
Mike Rapoport419624d2017-02-22 15:43:46 -0800163{
164 int ret = 0;
165
166 if (madvise(rel_area, nr_pages * page_size, MADV_REMOVE)) {
167 perror("madvise");
168 ret = 1;
169 }
170
171 return ret;
172}
173
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700174static void shmem_allocate_area(void **alloc_area)
Mike Rapoport419624d2017-02-22 15:43:46 -0800175{
176 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
177 MAP_ANONYMOUS | MAP_SHARED, -1, 0);
178 if (*alloc_area == MAP_FAILED) {
179 fprintf(stderr, "shared memory mmap failed\n");
180 *alloc_area = NULL;
181 }
182}
183
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700184struct uffd_test_ops {
185 unsigned long expected_ioctls;
186 void (*allocate_area)(void **alloc_area);
187 int (*release_pages)(char *rel_area);
188};
Mike Kravetz9903bd72017-02-22 15:43:07 -0800189
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700190#define ANON_EXPECTED_IOCTLS ((1 << _UFFDIO_WAKE) | \
191 (1 << _UFFDIO_COPY) | \
192 (1 << _UFFDIO_ZEROPAGE))
193
194static struct uffd_test_ops anon_uffd_test_ops = {
195 .expected_ioctls = ANON_EXPECTED_IOCTLS,
196 .allocate_area = anon_allocate_area,
197 .release_pages = anon_release_pages,
198};
199
200static struct uffd_test_ops shmem_uffd_test_ops = {
201 .expected_ioctls = UFFD_API_RANGE_IOCTLS_BASIC,
202 .allocate_area = shmem_allocate_area,
203 .release_pages = shmem_release_pages,
204};
205
206static struct uffd_test_ops hugetlb_uffd_test_ops = {
207 .expected_ioctls = UFFD_API_RANGE_IOCTLS_BASIC,
208 .allocate_area = hugetlb_allocate_area,
209 .release_pages = hugetlb_release_pages,
210};
211
212static struct uffd_test_ops *uffd_test_ops;
Mike Rapoport419624d2017-02-22 15:43:46 -0800213
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700214static int my_bcmp(char *str1, char *str2, size_t n)
215{
216 unsigned long i;
217 for (i = 0; i < n; i++)
218 if (str1[i] != str2[i])
219 return 1;
220 return 0;
221}
222
223static void *locking_thread(void *arg)
224{
225 unsigned long cpu = (unsigned long) arg;
226 struct random_data rand;
227 unsigned long page_nr = *(&(page_nr)); /* uninitialized warning */
228 int32_t rand_nr;
229 unsigned long long count;
230 char randstate[64];
231 unsigned int seed;
232 time_t start;
233
234 if (bounces & BOUNCE_RANDOM) {
235 seed = (unsigned int) time(NULL) - bounces;
236 if (!(bounces & BOUNCE_RACINGFAULTS))
237 seed += cpu;
238 bzero(&rand, sizeof(rand));
239 bzero(&randstate, sizeof(randstate));
240 if (initstate_r(seed, randstate, sizeof(randstate), &rand))
241 fprintf(stderr, "srandom_r error\n"), exit(1);
242 } else {
243 page_nr = -bounces;
244 if (!(bounces & BOUNCE_RACINGFAULTS))
245 page_nr += cpu * nr_pages_per_cpu;
246 }
247
248 while (!finished) {
249 if (bounces & BOUNCE_RANDOM) {
250 if (random_r(&rand, &rand_nr))
251 fprintf(stderr, "random_r 1 error\n"), exit(1);
252 page_nr = rand_nr;
253 if (sizeof(page_nr) > sizeof(rand_nr)) {
254 if (random_r(&rand, &rand_nr))
255 fprintf(stderr, "random_r 2 error\n"), exit(1);
Geert Uytterhoevenaf8713b2015-09-08 14:58:25 -0700256 page_nr |= (((unsigned long) rand_nr) << 16) <<
257 16;
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700258 }
259 } else
260 page_nr += 1;
261 page_nr %= nr_pages;
262
263 start = time(NULL);
264 if (bounces & BOUNCE_VERIFY) {
265 count = *area_count(area_dst, page_nr);
266 if (!count)
267 fprintf(stderr,
268 "page_nr %lu wrong count %Lu %Lu\n",
269 page_nr, count,
270 count_verify[page_nr]), exit(1);
271
272
273 /*
274 * We can't use bcmp (or memcmp) because that
275 * returns 0 erroneously if the memory is
276 * changing under it (even if the end of the
277 * page is never changing and always
278 * different).
279 */
280#if 1
281 if (!my_bcmp(area_dst + page_nr * page_size, zeropage,
282 page_size))
283 fprintf(stderr,
284 "my_bcmp page_nr %lu wrong count %Lu %Lu\n",
285 page_nr, count,
286 count_verify[page_nr]), exit(1);
287#else
288 unsigned long loops;
289
290 loops = 0;
291 /* uncomment the below line to test with mutex */
292 /* pthread_mutex_lock(area_mutex(area_dst, page_nr)); */
293 while (!bcmp(area_dst + page_nr * page_size, zeropage,
294 page_size)) {
295 loops += 1;
296 if (loops > 10)
297 break;
298 }
299 /* uncomment below line to test with mutex */
300 /* pthread_mutex_unlock(area_mutex(area_dst, page_nr)); */
301 if (loops) {
302 fprintf(stderr,
303 "page_nr %lu all zero thread %lu %p %lu\n",
304 page_nr, cpu, area_dst + page_nr * page_size,
305 loops);
306 if (loops > 10)
307 exit(1);
308 }
309#endif
310 }
311
312 pthread_mutex_lock(area_mutex(area_dst, page_nr));
313 count = *area_count(area_dst, page_nr);
314 if (count != count_verify[page_nr]) {
315 fprintf(stderr,
316 "page_nr %lu memory corruption %Lu %Lu\n",
317 page_nr, count,
318 count_verify[page_nr]), exit(1);
319 }
320 count++;
321 *area_count(area_dst, page_nr) = count_verify[page_nr] = count;
322 pthread_mutex_unlock(area_mutex(area_dst, page_nr));
323
324 if (time(NULL) - start > 1)
325 fprintf(stderr,
326 "userfault too slow %ld "
327 "possible false positive with overcommit\n",
328 time(NULL) - start);
329 }
330
331 return NULL;
332}
333
Mike Rapoportaa0d2722017-02-22 15:44:04 -0800334static int copy_page(int ufd, unsigned long offset)
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700335{
336 struct uffdio_copy uffdio_copy;
337
338 if (offset >= nr_pages * page_size)
339 fprintf(stderr, "unexpected offset %lu\n",
340 offset), exit(1);
341 uffdio_copy.dst = (unsigned long) area_dst + offset;
342 uffdio_copy.src = (unsigned long) area_src + offset;
343 uffdio_copy.len = page_size;
344 uffdio_copy.mode = 0;
345 uffdio_copy.copy = 0;
Mike Rapoportaa0d2722017-02-22 15:44:04 -0800346 if (ioctl(ufd, UFFDIO_COPY, &uffdio_copy)) {
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700347 /* real retval in ufdio_copy.copy */
348 if (uffdio_copy.copy != -EEXIST)
349 fprintf(stderr, "UFFDIO_COPY error %Ld\n",
350 uffdio_copy.copy), exit(1);
351 } else if (uffdio_copy.copy != page_size) {
352 fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n",
353 uffdio_copy.copy), exit(1);
354 } else
355 return 1;
356 return 0;
357}
358
359static void *uffd_poll_thread(void *arg)
360{
361 unsigned long cpu = (unsigned long) arg;
362 struct pollfd pollfd[2];
363 struct uffd_msg msg;
Mike Rapoportda5502c2017-02-22 15:44:06 -0800364 struct uffdio_register uffd_reg;
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700365 int ret;
366 unsigned long offset;
367 char tmp_chr;
368 unsigned long userfaults = 0;
369
370 pollfd[0].fd = uffd;
371 pollfd[0].events = POLLIN;
372 pollfd[1].fd = pipefd[cpu*2];
373 pollfd[1].events = POLLIN;
374
375 for (;;) {
376 ret = poll(pollfd, 2, -1);
377 if (!ret)
378 fprintf(stderr, "poll error %d\n", ret), exit(1);
379 if (ret < 0)
380 perror("poll"), exit(1);
381 if (pollfd[1].revents & POLLIN) {
382 if (read(pollfd[1].fd, &tmp_chr, 1) != 1)
383 fprintf(stderr, "read pipefd error\n"),
384 exit(1);
385 break;
386 }
387 if (!(pollfd[0].revents & POLLIN))
388 fprintf(stderr, "pollfd[0].revents %d\n",
389 pollfd[0].revents), exit(1);
390 ret = read(uffd, &msg, sizeof(msg));
391 if (ret < 0) {
392 if (errno == EAGAIN)
393 continue;
394 perror("nonblocking read error"), exit(1);
395 }
Mike Rapoportda5502c2017-02-22 15:44:06 -0800396 switch (msg.event) {
397 default:
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700398 fprintf(stderr, "unexpected msg event %u\n",
399 msg.event), exit(1);
Mike Rapoportda5502c2017-02-22 15:44:06 -0800400 break;
401 case UFFD_EVENT_PAGEFAULT:
402 if (msg.arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE)
403 fprintf(stderr, "unexpected write fault\n"), exit(1);
404 offset = (char *)(unsigned long)msg.arg.pagefault.address -
405 area_dst;
406 offset &= ~(page_size-1);
407 if (copy_page(uffd, offset))
408 userfaults++;
409 break;
410 case UFFD_EVENT_FORK:
411 uffd = msg.arg.fork.ufd;
412 pollfd[0].fd = uffd;
413 break;
Mike Rapoportd8119142017-02-24 14:56:02 -0800414 case UFFD_EVENT_REMOVE:
415 uffd_reg.range.start = msg.arg.remove.start;
416 uffd_reg.range.len = msg.arg.remove.end -
417 msg.arg.remove.start;
Mike Rapoportda5502c2017-02-22 15:44:06 -0800418 if (ioctl(uffd, UFFDIO_UNREGISTER, &uffd_reg.range))
Mike Rapoportd8119142017-02-24 14:56:02 -0800419 fprintf(stderr, "remove failure\n"), exit(1);
Mike Rapoportda5502c2017-02-22 15:44:06 -0800420 break;
421 case UFFD_EVENT_REMAP:
422 area_dst = (char *)(unsigned long)msg.arg.remap.to;
423 break;
424 }
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700425 }
426 return (void *)userfaults;
427}
428
429pthread_mutex_t uffd_read_mutex = PTHREAD_MUTEX_INITIALIZER;
430
431static void *uffd_read_thread(void *arg)
432{
433 unsigned long *this_cpu_userfaults;
434 struct uffd_msg msg;
435 unsigned long offset;
436 int ret;
437
438 this_cpu_userfaults = (unsigned long *) arg;
439 *this_cpu_userfaults = 0;
440
441 pthread_mutex_unlock(&uffd_read_mutex);
442 /* from here cancellation is ok */
443
444 for (;;) {
445 ret = read(uffd, &msg, sizeof(msg));
446 if (ret != sizeof(msg)) {
447 if (ret < 0)
448 perror("blocking read error"), exit(1);
449 else
450 fprintf(stderr, "short read\n"), exit(1);
451 }
452 if (msg.event != UFFD_EVENT_PAGEFAULT)
453 fprintf(stderr, "unexpected msg event %u\n",
454 msg.event), exit(1);
455 if (bounces & BOUNCE_VERIFY &&
456 msg.arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE)
457 fprintf(stderr, "unexpected write fault\n"), exit(1);
Geert Uytterhoevenaf8713b2015-09-08 14:58:25 -0700458 offset = (char *)(unsigned long)msg.arg.pagefault.address -
459 area_dst;
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700460 offset &= ~(page_size-1);
Mike Rapoportaa0d2722017-02-22 15:44:04 -0800461 if (copy_page(uffd, offset))
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700462 (*this_cpu_userfaults)++;
463 }
464 return (void *)NULL;
465}
466
467static void *background_thread(void *arg)
468{
469 unsigned long cpu = (unsigned long) arg;
470 unsigned long page_nr;
471
472 for (page_nr = cpu * nr_pages_per_cpu;
473 page_nr < (cpu+1) * nr_pages_per_cpu;
474 page_nr++)
Mike Rapoportaa0d2722017-02-22 15:44:04 -0800475 copy_page(uffd, page_nr * page_size);
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700476
477 return NULL;
478}
479
480static int stress(unsigned long *userfaults)
481{
482 unsigned long cpu;
483 pthread_t locking_threads[nr_cpus];
484 pthread_t uffd_threads[nr_cpus];
485 pthread_t background_threads[nr_cpus];
486 void **_userfaults = (void **) userfaults;
487
488 finished = 0;
489 for (cpu = 0; cpu < nr_cpus; cpu++) {
490 if (pthread_create(&locking_threads[cpu], &attr,
491 locking_thread, (void *)cpu))
492 return 1;
493 if (bounces & BOUNCE_POLL) {
494 if (pthread_create(&uffd_threads[cpu], &attr,
495 uffd_poll_thread, (void *)cpu))
496 return 1;
497 } else {
498 if (pthread_create(&uffd_threads[cpu], &attr,
499 uffd_read_thread,
500 &_userfaults[cpu]))
501 return 1;
502 pthread_mutex_lock(&uffd_read_mutex);
503 }
504 if (pthread_create(&background_threads[cpu], &attr,
505 background_thread, (void *)cpu))
506 return 1;
507 }
508 for (cpu = 0; cpu < nr_cpus; cpu++)
509 if (pthread_join(background_threads[cpu], NULL))
510 return 1;
511
512 /*
513 * Be strict and immediately zap area_src, the whole area has
514 * been transferred already by the background treads. The
515 * area_src could then be faulted in in a racy way by still
516 * running uffdio_threads reading zeropages after we zapped
517 * area_src (but they're guaranteed to get -EEXIST from
518 * UFFDIO_COPY without writing zero pages into area_dst
519 * because the background threads already completed).
520 */
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700521 if (uffd_test_ops->release_pages(area_src))
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700522 return 1;
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700523
524 for (cpu = 0; cpu < nr_cpus; cpu++) {
525 char c;
526 if (bounces & BOUNCE_POLL) {
527 if (write(pipefd[cpu*2+1], &c, 1) != 1) {
528 fprintf(stderr, "pipefd write error\n");
529 return 1;
530 }
531 if (pthread_join(uffd_threads[cpu], &_userfaults[cpu]))
532 return 1;
533 } else {
534 if (pthread_cancel(uffd_threads[cpu]))
535 return 1;
536 if (pthread_join(uffd_threads[cpu], NULL))
537 return 1;
538 }
539 }
540
541 finished = 1;
542 for (cpu = 0; cpu < nr_cpus; cpu++)
543 if (pthread_join(locking_threads[cpu], NULL))
544 return 1;
545
546 return 0;
547}
548
Mike Rapoportda5502c2017-02-22 15:44:06 -0800549static int userfaultfd_open(int features)
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700550{
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700551 struct uffdio_api uffdio_api;
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700552
553 uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
554 if (uffd < 0) {
555 fprintf(stderr,
556 "userfaultfd syscall not available in this kernel\n");
557 return 1;
558 }
559 uffd_flags = fcntl(uffd, F_GETFD, NULL);
560
561 uffdio_api.api = UFFD_API;
Mike Rapoportda5502c2017-02-22 15:44:06 -0800562 uffdio_api.features = features;
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700563 if (ioctl(uffd, UFFDIO_API, &uffdio_api)) {
564 fprintf(stderr, "UFFDIO_API\n");
565 return 1;
566 }
567 if (uffdio_api.api != UFFD_API) {
568 fprintf(stderr, "UFFDIO_API error %Lu\n", uffdio_api.api);
569 return 1;
570 }
571
Mike Rapoport6228b8f2017-02-22 15:44:01 -0800572 return 0;
573}
574
Mike Rapoportda5502c2017-02-22 15:44:06 -0800575/*
576 * For non-cooperative userfaultfd test we fork() a process that will
577 * generate pagefaults, will mremap the area monitored by the
578 * userfaultfd and at last this process will release the monitored
579 * area.
580 * For the anonymous and shared memory the area is divided into two
581 * parts, the first part is accessed before mremap, and the second
582 * part is accessed after mremap. Since hugetlbfs does not support
583 * mremap, the entire monitored area is accessed in a single pass for
584 * HUGETLB_TEST.
Mike Rapoport64527f52017-02-24 14:56:08 -0800585 * The release of the pages currently generates event for shmem and
Mike Rapoportd8119142017-02-24 14:56:02 -0800586 * anonymous memory (UFFD_EVENT_REMOVE), hence it is not checked
Mike Rapoport64527f52017-02-24 14:56:08 -0800587 * for hugetlb.
Mike Rapoportda5502c2017-02-22 15:44:06 -0800588 */
589static int faulting_process(void)
590{
591 unsigned long nr;
592 unsigned long long count;
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700593 unsigned long split_nr_pages;
Mike Rapoportda5502c2017-02-22 15:44:06 -0800594
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700595 if (test_type != TEST_HUGETLB)
596 split_nr_pages = (nr_pages + 1) / 2;
597 else
598 split_nr_pages = nr_pages;
Mike Rapoportda5502c2017-02-22 15:44:06 -0800599
600 for (nr = 0; nr < split_nr_pages; nr++) {
601 count = *area_count(area_dst, nr);
602 if (count != count_verify[nr]) {
603 fprintf(stderr,
604 "nr %lu memory corruption %Lu %Lu\n",
605 nr, count,
606 count_verify[nr]), exit(1);
607 }
608 }
609
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700610 if (test_type == TEST_HUGETLB)
611 return 0;
612
Mike Rapoportda5502c2017-02-22 15:44:06 -0800613 area_dst = mremap(area_dst, nr_pages * page_size, nr_pages * page_size,
614 MREMAP_MAYMOVE | MREMAP_FIXED, area_src);
615 if (area_dst == MAP_FAILED)
616 perror("mremap"), exit(1);
617
618 for (; nr < nr_pages; nr++) {
619 count = *area_count(area_dst, nr);
620 if (count != count_verify[nr]) {
621 fprintf(stderr,
622 "nr %lu memory corruption %Lu %Lu\n",
623 nr, count,
624 count_verify[nr]), exit(1);
625 }
626 }
627
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700628 if (uffd_test_ops->release_pages(area_dst))
Mike Rapoportda5502c2017-02-22 15:44:06 -0800629 return 1;
630
631 for (nr = 0; nr < nr_pages; nr++) {
632 if (my_bcmp(area_dst + nr * page_size, zeropage, page_size))
633 fprintf(stderr, "nr %lu is not zero\n", nr), exit(1);
634 }
Mike Rapoportda5502c2017-02-22 15:44:06 -0800635
Mike Rapoportda5502c2017-02-22 15:44:06 -0800636 return 0;
637}
638
Andrea Arcangeli7a0c4cf2017-02-22 15:44:10 -0800639static int uffdio_zeropage(int ufd, unsigned long offset)
640{
641 struct uffdio_zeropage uffdio_zeropage;
642 int ret;
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700643 unsigned long has_zeropage;
644
645 has_zeropage = uffd_test_ops->expected_ioctls & (1 << _UFFDIO_ZEROPAGE);
Andrea Arcangeli7a0c4cf2017-02-22 15:44:10 -0800646
647 if (offset >= nr_pages * page_size)
648 fprintf(stderr, "unexpected offset %lu\n",
649 offset), exit(1);
650 uffdio_zeropage.range.start = (unsigned long) area_dst + offset;
651 uffdio_zeropage.range.len = page_size;
652 uffdio_zeropage.mode = 0;
653 ret = ioctl(ufd, UFFDIO_ZEROPAGE, &uffdio_zeropage);
654 if (ret) {
655 /* real retval in ufdio_zeropage.zeropage */
656 if (has_zeropage) {
657 if (uffdio_zeropage.zeropage == -EEXIST)
658 fprintf(stderr, "UFFDIO_ZEROPAGE -EEXIST\n"),
659 exit(1);
660 else
661 fprintf(stderr, "UFFDIO_ZEROPAGE error %Ld\n",
662 uffdio_zeropage.zeropage), exit(1);
663 } else {
664 if (uffdio_zeropage.zeropage != -EINVAL)
665 fprintf(stderr,
666 "UFFDIO_ZEROPAGE not -EINVAL %Ld\n",
667 uffdio_zeropage.zeropage), exit(1);
668 }
669 } else if (has_zeropage) {
670 if (uffdio_zeropage.zeropage != page_size) {
671 fprintf(stderr, "UFFDIO_ZEROPAGE unexpected %Ld\n",
672 uffdio_zeropage.zeropage), exit(1);
673 } else
674 return 1;
675 } else {
676 fprintf(stderr,
677 "UFFDIO_ZEROPAGE succeeded %Ld\n",
678 uffdio_zeropage.zeropage), exit(1);
679 }
680
681 return 0;
682}
683
684/* exercise UFFDIO_ZEROPAGE */
685static int userfaultfd_zeropage_test(void)
686{
687 struct uffdio_register uffdio_register;
688 unsigned long expected_ioctls;
689
690 printf("testing UFFDIO_ZEROPAGE: ");
691 fflush(stdout);
692
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700693 if (uffd_test_ops->release_pages(area_dst))
Andrea Arcangeli7a0c4cf2017-02-22 15:44:10 -0800694 return 1;
695
696 if (userfaultfd_open(0) < 0)
697 return 1;
698 uffdio_register.range.start = (unsigned long) area_dst;
699 uffdio_register.range.len = nr_pages * page_size;
700 uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
701 if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
702 fprintf(stderr, "register failure\n"), exit(1);
703
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700704 expected_ioctls = uffd_test_ops->expected_ioctls;
Andrea Arcangeli7a0c4cf2017-02-22 15:44:10 -0800705 if ((uffdio_register.ioctls & expected_ioctls) !=
706 expected_ioctls)
707 fprintf(stderr,
708 "unexpected missing ioctl for anon memory\n"),
709 exit(1);
710
711 if (uffdio_zeropage(uffd, 0)) {
712 if (my_bcmp(area_dst, zeropage, page_size))
713 fprintf(stderr, "zeropage is not zero\n"), exit(1);
714 }
715
716 close(uffd);
717 printf("done.\n");
718 return 0;
719}
720
Mike Rapoportda5502c2017-02-22 15:44:06 -0800721static int userfaultfd_events_test(void)
722{
723 struct uffdio_register uffdio_register;
724 unsigned long expected_ioctls;
725 unsigned long userfaults;
726 pthread_t uffd_mon;
727 int err, features;
728 pid_t pid;
729 char c;
730
Mike Rapoportd8119142017-02-24 14:56:02 -0800731 printf("testing events (fork, remap, remove): ");
Mike Rapoportda5502c2017-02-22 15:44:06 -0800732 fflush(stdout);
733
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700734 if (uffd_test_ops->release_pages(area_dst))
Mike Rapoportda5502c2017-02-22 15:44:06 -0800735 return 1;
736
737 features = UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_EVENT_REMAP |
Mike Rapoportd8119142017-02-24 14:56:02 -0800738 UFFD_FEATURE_EVENT_REMOVE;
Mike Rapoportda5502c2017-02-22 15:44:06 -0800739 if (userfaultfd_open(features) < 0)
740 return 1;
741 fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
742
743 uffdio_register.range.start = (unsigned long) area_dst;
744 uffdio_register.range.len = nr_pages * page_size;
745 uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
746 if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
747 fprintf(stderr, "register failure\n"), exit(1);
748
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700749 expected_ioctls = uffd_test_ops->expected_ioctls;
Mike Rapoportda5502c2017-02-22 15:44:06 -0800750 if ((uffdio_register.ioctls & expected_ioctls) !=
751 expected_ioctls)
752 fprintf(stderr,
753 "unexpected missing ioctl for anon memory\n"),
754 exit(1);
755
756 if (pthread_create(&uffd_mon, &attr, uffd_poll_thread, NULL))
757 perror("uffd_poll_thread create"), exit(1);
758
759 pid = fork();
760 if (pid < 0)
761 perror("fork"), exit(1);
762
763 if (!pid)
764 return faulting_process();
765
766 waitpid(pid, &err, 0);
767 if (err)
768 fprintf(stderr, "faulting process failed\n"), exit(1);
769
770 if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
771 perror("pipe write"), exit(1);
772 if (pthread_join(uffd_mon, (void **)&userfaults))
773 return 1;
774
775 close(uffd);
776 printf("userfaults: %ld\n", userfaults);
777
778 return userfaults != nr_pages;
779}
780
Mike Rapoport6228b8f2017-02-22 15:44:01 -0800781static int userfaultfd_stress(void)
782{
783 void *area;
784 char *tmp_area;
785 unsigned long nr;
786 struct uffdio_register uffdio_register;
787 unsigned long cpu;
788 int err;
789 unsigned long userfaults[nr_cpus];
790
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700791 uffd_test_ops->allocate_area((void **)&area_src);
Mike Rapoport6228b8f2017-02-22 15:44:01 -0800792 if (!area_src)
793 return 1;
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700794 uffd_test_ops->allocate_area((void **)&area_dst);
Mike Rapoport6228b8f2017-02-22 15:44:01 -0800795 if (!area_dst)
796 return 1;
797
Mike Rapoportda5502c2017-02-22 15:44:06 -0800798 if (userfaultfd_open(0) < 0)
Mike Rapoport6228b8f2017-02-22 15:44:01 -0800799 return 1;
800
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700801 count_verify = malloc(nr_pages * sizeof(unsigned long long));
802 if (!count_verify) {
803 perror("count_verify");
804 return 1;
805 }
806
807 for (nr = 0; nr < nr_pages; nr++) {
808 *area_mutex(area_src, nr) = (pthread_mutex_t)
809 PTHREAD_MUTEX_INITIALIZER;
810 count_verify[nr] = *area_count(area_src, nr) = 1;
Andrea Arcangeli1f5fee22015-09-22 14:59:00 -0700811 /*
812 * In the transition between 255 to 256, powerpc will
813 * read out of order in my_bcmp and see both bytes as
814 * zero, so leave a placeholder below always non-zero
815 * after the count, to avoid my_bcmp to trigger false
816 * positives.
817 */
818 *(area_count(area_src, nr) + 1) = 1;
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700819 }
820
821 pipefd = malloc(sizeof(int) * nr_cpus * 2);
822 if (!pipefd) {
823 perror("pipefd");
824 return 1;
825 }
826 for (cpu = 0; cpu < nr_cpus; cpu++) {
827 if (pipe2(&pipefd[cpu*2], O_CLOEXEC | O_NONBLOCK)) {
828 perror("pipe");
829 return 1;
830 }
831 }
832
833 if (posix_memalign(&area, page_size, page_size)) {
834 fprintf(stderr, "out of memory\n");
835 return 1;
836 }
837 zeropage = area;
838 bzero(zeropage, page_size);
839
840 pthread_mutex_lock(&uffd_read_mutex);
841
842 pthread_attr_init(&attr);
843 pthread_attr_setstacksize(&attr, 16*1024*1024);
844
Andrea Arcangelia5932bf2015-09-22 14:59:03 -0700845 err = 0;
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700846 while (bounces--) {
847 unsigned long expected_ioctls;
848
849 printf("bounces: %d, mode:", bounces);
850 if (bounces & BOUNCE_RANDOM)
851 printf(" rnd");
852 if (bounces & BOUNCE_RACINGFAULTS)
853 printf(" racing");
854 if (bounces & BOUNCE_VERIFY)
855 printf(" ver");
856 if (bounces & BOUNCE_POLL)
857 printf(" poll");
858 printf(", ");
859 fflush(stdout);
860
861 if (bounces & BOUNCE_POLL)
862 fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
863 else
864 fcntl(uffd, F_SETFL, uffd_flags & ~O_NONBLOCK);
865
866 /* register */
867 uffdio_register.range.start = (unsigned long) area_dst;
868 uffdio_register.range.len = nr_pages * page_size;
869 uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
870 if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) {
871 fprintf(stderr, "register failure\n");
872 return 1;
873 }
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700874 expected_ioctls = uffd_test_ops->expected_ioctls;
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700875 if ((uffdio_register.ioctls & expected_ioctls) !=
876 expected_ioctls) {
877 fprintf(stderr,
878 "unexpected missing ioctl for anon memory\n");
879 return 1;
880 }
881
882 /*
883 * The madvise done previously isn't enough: some
884 * uffd_thread could have read userfaults (one of
885 * those already resolved by the background thread)
886 * and it may be in the process of calling
887 * UFFDIO_COPY. UFFDIO_COPY will read the zapped
888 * area_src and it would map a zero page in it (of
889 * course such a UFFDIO_COPY is perfectly safe as it'd
890 * return -EEXIST). The problem comes at the next
891 * bounce though: that racing UFFDIO_COPY would
892 * generate zeropages in the area_src, so invalidating
893 * the previous MADV_DONTNEED. Without this additional
894 * MADV_DONTNEED those zeropages leftovers in the
895 * area_src would lead to -EEXIST failure during the
896 * next bounce, effectively leaving a zeropage in the
897 * area_dst.
898 *
899 * Try to comment this out madvise to see the memory
900 * corruption being caught pretty quick.
901 *
902 * khugepaged is also inhibited to collapse THP after
903 * MADV_DONTNEED only after the UFFDIO_REGISTER, so it's
904 * required to MADV_DONTNEED here.
905 */
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700906 if (uffd_test_ops->release_pages(area_dst))
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700907 return 1;
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700908
909 /* bounce pass */
910 if (stress(userfaults))
911 return 1;
912
913 /* unregister */
914 if (ioctl(uffd, UFFDIO_UNREGISTER, &uffdio_register.range)) {
915 fprintf(stderr, "register failure\n");
916 return 1;
917 }
918
919 /* verification */
920 if (bounces & BOUNCE_VERIFY) {
921 for (nr = 0; nr < nr_pages; nr++) {
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700922 if (*area_count(area_dst, nr) != count_verify[nr]) {
923 fprintf(stderr,
924 "error area_count %Lu %Lu %lu\n",
925 *area_count(area_src, nr),
926 count_verify[nr],
927 nr);
Andrea Arcangelia5932bf2015-09-22 14:59:03 -0700928 err = 1;
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700929 bounces = 0;
930 }
931 }
932 }
933
934 /* prepare next bounce */
935 tmp_area = area_src;
936 area_src = area_dst;
937 area_dst = tmp_area;
938
939 printf("userfaults:");
940 for (cpu = 0; cpu < nr_cpus; cpu++)
941 printf(" %lu", userfaults[cpu]);
942 printf("\n");
943 }
944
Mike Rapoportda5502c2017-02-22 15:44:06 -0800945 if (err)
946 return err;
947
948 close(uffd);
Andrea Arcangeli7a0c4cf2017-02-22 15:44:10 -0800949 return userfaultfd_zeropage_test() || userfaultfd_events_test();
Andrea Arcangelic47174f2015-09-04 15:47:23 -0700950}
951
Mike Kravetz9903bd72017-02-22 15:43:07 -0800952/*
953 * Copied from mlock2-tests.c
954 */
955unsigned long default_huge_page_size(void)
956{
957 unsigned long hps = 0;
958 char *line = NULL;
959 size_t linelen = 0;
960 FILE *f = fopen("/proc/meminfo", "r");
961
962 if (!f)
963 return 0;
964 while (getline(&line, &linelen, f) > 0) {
965 if (sscanf(line, "Hugepagesize: %lu kB", &hps) == 1) {
966 hps <<= 10;
967 break;
968 }
969 }
970
971 free(line);
972 fclose(f);
973 return hps;
974}
975
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700976static void set_test_type(const char *type)
Mike Kravetz9903bd72017-02-22 15:43:07 -0800977{
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700978 if (!strcmp(type, "anon")) {
979 test_type = TEST_ANON;
980 uffd_test_ops = &anon_uffd_test_ops;
981 } else if (!strcmp(type, "hugetlb")) {
982 test_type = TEST_HUGETLB;
983 uffd_test_ops = &hugetlb_uffd_test_ops;
984 } else if (!strcmp(type, "shmem")) {
985 test_type = TEST_SHMEM;
986 uffd_test_ops = &shmem_uffd_test_ops;
987 } else {
988 fprintf(stderr, "Unknown test type: %s\n", type), exit(1);
989 }
990
991 if (test_type == TEST_HUGETLB)
992 page_size = default_huge_page_size();
993 else
994 page_size = sysconf(_SC_PAGE_SIZE);
995
Mike Kravetz9903bd72017-02-22 15:43:07 -0800996 if (!page_size)
Mike Rapoportb6ad1972017-05-03 14:54:54 -0700997 fprintf(stderr, "Unable to determine page size\n"),
Mike Kravetz9903bd72017-02-22 15:43:07 -0800998 exit(2);
999 if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) * 2
1000 > page_size)
1001 fprintf(stderr, "Impossible to run this test\n"), exit(2);
Mike Rapoportb6ad1972017-05-03 14:54:54 -07001002}
1003
1004int main(int argc, char **argv)
1005{
1006 if (argc < 4)
1007 fprintf(stderr, "Usage: <test type> <MiB> <bounces> [hugetlbfs_file]\n"),
1008 exit(1);
1009
1010 set_test_type(argv[1]);
1011
1012 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
1013 nr_pages_per_cpu = atol(argv[2]) * 1024*1024 / page_size /
Mike Kravetz9903bd72017-02-22 15:43:07 -08001014 nr_cpus;
1015 if (!nr_pages_per_cpu) {
1016 fprintf(stderr, "invalid MiB\n");
1017 fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1);
1018 }
Mike Rapoportb6ad1972017-05-03 14:54:54 -07001019
1020 bounces = atoi(argv[3]);
Mike Kravetz9903bd72017-02-22 15:43:07 -08001021 if (bounces <= 0) {
1022 fprintf(stderr, "invalid bounces\n");
1023 fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1);
1024 }
1025 nr_pages = nr_pages_per_cpu * nr_cpus;
Mike Rapoportb6ad1972017-05-03 14:54:54 -07001026
1027 if (test_type == TEST_HUGETLB) {
1028 if (argc < 5)
1029 fprintf(stderr, "Usage: hugetlb <MiB> <bounces> <hugetlbfs_file>\n"),
1030 exit(1);
1031 huge_fd = open(argv[4], O_CREAT | O_RDWR, 0755);
1032 if (huge_fd < 0) {
1033 fprintf(stderr, "Open of %s failed", argv[3]);
1034 perror("open");
1035 exit(1);
1036 }
1037 if (ftruncate(huge_fd, 0)) {
1038 fprintf(stderr, "ftruncate %s to size 0 failed", argv[3]);
1039 perror("ftruncate");
1040 exit(1);
1041 }
Mike Kravetz9903bd72017-02-22 15:43:07 -08001042 }
1043 printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n",
1044 nr_pages, nr_pages_per_cpu);
1045 return userfaultfd_stress();
1046}
1047
Michael Ellerman56ed8f12015-09-22 14:58:58 -07001048#else /* __NR_userfaultfd */
1049
1050#warning "missing __NR_userfaultfd definition"
1051
1052int main(void)
1053{
1054 printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
1055 return 0;
1056}
1057
1058#endif /* __NR_userfaultfd */