blob: bb0bd9403e9eaa578b9d3bccaea1a62a027dc039 [file] [log] [blame]
Rusty Russell1515c5c2013-03-20 13:50:24 +10301/* Simple test of virtio code, entirely in userpsace. */
2#define _GNU_SOURCE
3#include <sched.h>
4#include <err.h>
5#include <linux/kernel.h>
6#include <linux/err.h>
7#include <linux/virtio.h>
8#include <linux/vringh.h>
9#include <linux/virtio_ring.h>
10#include <linux/uaccess.h>
11#include <sys/types.h>
12#include <sys/stat.h>
13#include <sys/mman.h>
14#include <sys/wait.h>
15#include <fcntl.h>
16
17#define USER_MEM (1024*1024)
18void *__user_addr_min, *__user_addr_max;
19void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
20static u64 user_addr_offset;
21
22#define RINGSIZE 256
23#define ALIGN 4096
24
25static void never_notify_host(struct virtqueue *vq)
26{
27 abort();
28}
29
30static void never_callback_guest(struct virtqueue *vq)
31{
32 abort();
33}
34
35static bool getrange_iov(struct vringh *vrh, u64 addr, struct vringh_range *r)
36{
37 if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
38 return false;
39 if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
40 return false;
41
42 r->start = (u64)(unsigned long)__user_addr_min - user_addr_offset;
43 r->end_incl = (u64)(unsigned long)__user_addr_max - 1 - user_addr_offset;
44 r->offset = user_addr_offset;
45 return true;
46}
47
48/* We return single byte ranges. */
49static bool getrange_slow(struct vringh *vrh, u64 addr, struct vringh_range *r)
50{
51 if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
52 return false;
53 if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
54 return false;
55
56 r->start = addr;
57 r->end_incl = r->start;
58 r->offset = user_addr_offset;
59 return true;
60}
61
62struct guest_virtio_device {
63 struct virtio_device vdev;
64 int to_host_fd;
65 unsigned long notifies;
66};
67
68static void parallel_notify_host(struct virtqueue *vq)
69{
70 struct guest_virtio_device *gvdev;
71
72 gvdev = container_of(vq->vdev, struct guest_virtio_device, vdev);
73 write(gvdev->to_host_fd, "", 1);
74 gvdev->notifies++;
75}
76
77static void no_notify_host(struct virtqueue *vq)
78{
79}
80
81#define NUM_XFERS (10000000)
82
83/* We aim for two "distant" cpus. */
84static void find_cpus(unsigned int *first, unsigned int *last)
85{
86 unsigned int i;
87
88 *first = -1U;
89 *last = 0;
90 for (i = 0; i < 4096; i++) {
91 cpu_set_t set;
92 CPU_ZERO(&set);
93 CPU_SET(i, &set);
94 if (sched_setaffinity(getpid(), sizeof(set), &set) == 0) {
95 if (i < *first)
96 *first = i;
97 if (i > *last)
98 *last = i;
99 }
100 }
101}
102
103/* Opencoded version for fast mode */
104static inline int vringh_get_head(struct vringh *vrh, u16 *head)
105{
106 u16 avail_idx, i;
107 int err;
108
109 err = get_user(avail_idx, &vrh->vring.avail->idx);
110 if (err)
111 return err;
112
113 if (vrh->last_avail_idx == avail_idx)
114 return 0;
115
116 /* Only get avail ring entries after they have been exposed by guest. */
117 virtio_rmb(vrh->weak_barriers);
118
119 i = vrh->last_avail_idx & (vrh->vring.num - 1);
120
121 err = get_user(*head, &vrh->vring.avail->ring[i]);
122 if (err)
123 return err;
124
125 vrh->last_avail_idx++;
126 return 1;
127}
128
129static int parallel_test(unsigned long features,
130 bool (*getrange)(struct vringh *vrh,
131 u64 addr, struct vringh_range *r),
132 bool fast_vringh)
133{
134 void *host_map, *guest_map;
135 int fd, mapsize, to_guest[2], to_host[2];
136 unsigned long xfers = 0, notifies = 0, receives = 0;
137 unsigned int first_cpu, last_cpu;
138 cpu_set_t cpu_set;
139 char buf[128];
140
141 /* Create real file to mmap. */
142 fd = open("/tmp/vringh_test-file", O_RDWR|O_CREAT|O_TRUNC, 0600);
143 if (fd < 0)
144 err(1, "Opening /tmp/vringh_test-file");
145
146 /* Extra room at the end for some data, and indirects */
147 mapsize = vring_size(RINGSIZE, ALIGN)
148 + RINGSIZE * 2 * sizeof(int)
149 + RINGSIZE * 6 * sizeof(struct vring_desc);
150 mapsize = (mapsize + getpagesize() - 1) & ~(getpagesize() - 1);
151 ftruncate(fd, mapsize);
152
153 /* Parent and child use separate addresses, to check our mapping logic! */
154 host_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
155 guest_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
156
157 pipe(to_guest);
158 pipe(to_host);
159
160 CPU_ZERO(&cpu_set);
161 find_cpus(&first_cpu, &last_cpu);
162 printf("Using CPUS %u and %u\n", first_cpu, last_cpu);
163 fflush(stdout);
164
165 if (fork() != 0) {
166 struct vringh vrh;
167 int status, err, rlen = 0;
168 char rbuf[5];
169
170 /* We are the host: never access guest addresses! */
171 munmap(guest_map, mapsize);
172
173 __user_addr_min = host_map;
174 __user_addr_max = __user_addr_min + mapsize;
175 user_addr_offset = host_map - guest_map;
176 assert(user_addr_offset);
177
178 close(to_guest[0]);
179 close(to_host[1]);
180
181 vring_init(&vrh.vring, RINGSIZE, host_map, ALIGN);
182 vringh_init_user(&vrh, features, RINGSIZE, true,
183 vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
184 CPU_SET(first_cpu, &cpu_set);
185 if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
186 errx(1, "Could not set affinity to cpu %u", first_cpu);
187
188 while (xfers < NUM_XFERS) {
189 struct iovec host_riov[2], host_wiov[2];
190 struct vringh_iov riov, wiov;
191 u16 head, written;
192
193 if (fast_vringh) {
194 for (;;) {
195 err = vringh_get_head(&vrh, &head);
196 if (err != 0)
197 break;
198 err = vringh_need_notify_user(&vrh);
199 if (err < 0)
200 errx(1, "vringh_need_notify_user: %i",
201 err);
202 if (err) {
203 write(to_guest[1], "", 1);
204 notifies++;
205 }
206 }
207 if (err != 1)
208 errx(1, "vringh_get_head");
209 written = 0;
210 goto complete;
211 } else {
212 vringh_iov_init(&riov,
213 host_riov,
214 ARRAY_SIZE(host_riov));
215 vringh_iov_init(&wiov,
216 host_wiov,
217 ARRAY_SIZE(host_wiov));
218
219 err = vringh_getdesc_user(&vrh, &riov, &wiov,
220 getrange, &head);
221 }
222 if (err == 0) {
223 err = vringh_need_notify_user(&vrh);
224 if (err < 0)
225 errx(1, "vringh_need_notify_user: %i",
226 err);
227 if (err) {
228 write(to_guest[1], "", 1);
229 notifies++;
230 }
231
232 if (!vringh_notify_enable_user(&vrh))
233 continue;
234
235 /* Swallow all notifies at once. */
236 if (read(to_host[0], buf, sizeof(buf)) < 1)
237 break;
238
239 vringh_notify_disable_user(&vrh);
240 receives++;
241 continue;
242 }
243 if (err != 1)
244 errx(1, "vringh_getdesc_user: %i", err);
245
246 /* We simply copy bytes. */
247 if (riov.used) {
248 rlen = vringh_iov_pull_user(&riov, rbuf,
249 sizeof(rbuf));
250 if (rlen != 4)
251 errx(1, "vringh_iov_pull_user: %i",
252 rlen);
253 assert(riov.i == riov.used);
254 written = 0;
255 } else {
256 err = vringh_iov_push_user(&wiov, rbuf, rlen);
257 if (err != rlen)
258 errx(1, "vringh_iov_push_user: %i",
259 err);
260 assert(wiov.i == wiov.used);
261 written = err;
262 }
263 complete:
264 xfers++;
265
266 err = vringh_complete_user(&vrh, head, written);
267 if (err != 0)
268 errx(1, "vringh_complete_user: %i", err);
269 }
270
271 err = vringh_need_notify_user(&vrh);
272 if (err < 0)
273 errx(1, "vringh_need_notify_user: %i", err);
274 if (err) {
275 write(to_guest[1], "", 1);
276 notifies++;
277 }
278 wait(&status);
279 if (!WIFEXITED(status))
280 errx(1, "Child died with signal %i?", WTERMSIG(status));
281 if (WEXITSTATUS(status) != 0)
282 errx(1, "Child exited %i?", WEXITSTATUS(status));
283 printf("Host: notified %lu, pinged %lu\n", notifies, receives);
284 return 0;
285 } else {
286 struct guest_virtio_device gvdev;
287 struct virtqueue *vq;
288 unsigned int *data;
289 struct vring_desc *indirects;
290 unsigned int finished = 0;
291
292 /* We pass sg[]s pointing into here, but we need RINGSIZE+1 */
293 data = guest_map + vring_size(RINGSIZE, ALIGN);
294 indirects = (void *)data + (RINGSIZE + 1) * 2 * sizeof(int);
295
296 /* We are the guest. */
297 munmap(host_map, mapsize);
298
299 close(to_guest[1]);
300 close(to_host[0]);
301
302 gvdev.vdev.features[0] = features;
303 gvdev.to_host_fd = to_host[1];
304 gvdev.notifies = 0;
305
306 CPU_SET(first_cpu, &cpu_set);
307 if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
308 err(1, "Could not set affinity to cpu %u", first_cpu);
309
310 vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &gvdev.vdev, true,
311 guest_map, fast_vringh ? no_notify_host
312 : parallel_notify_host,
313 never_callback_guest, "guest vq");
314
315 /* Don't kfree indirects. */
316 __kfree_ignore_start = indirects;
317 __kfree_ignore_end = indirects + RINGSIZE * 6;
318
319 while (xfers < NUM_XFERS) {
320 struct scatterlist sg[4];
321 unsigned int num_sg, len;
322 int *dbuf, err;
323 bool output = !(xfers % 2);
324
325 /* Consume bufs. */
326 while ((dbuf = virtqueue_get_buf(vq, &len)) != NULL) {
327 if (len == 4)
328 assert(*dbuf == finished - 1);
329 else if (!fast_vringh)
330 assert(*dbuf == finished);
331 finished++;
332 }
333
334 /* Produce a buffer. */
335 dbuf = data + (xfers % (RINGSIZE + 1));
336
337 if (output)
338 *dbuf = xfers;
339 else
340 *dbuf = -1;
341
342 switch ((xfers / sizeof(*dbuf)) % 4) {
343 case 0:
344 /* Nasty three-element sg list. */
345 sg_init_table(sg, num_sg = 3);
346 sg_set_buf(&sg[0], (void *)dbuf, 1);
347 sg_set_buf(&sg[1], (void *)dbuf + 1, 2);
348 sg_set_buf(&sg[2], (void *)dbuf + 3, 1);
349 break;
350 case 1:
351 sg_init_table(sg, num_sg = 2);
352 sg_set_buf(&sg[0], (void *)dbuf, 1);
353 sg_set_buf(&sg[1], (void *)dbuf + 1, 3);
354 break;
355 case 2:
356 sg_init_table(sg, num_sg = 1);
357 sg_set_buf(&sg[0], (void *)dbuf, 4);
358 break;
359 case 3:
360 sg_init_table(sg, num_sg = 4);
361 sg_set_buf(&sg[0], (void *)dbuf, 1);
362 sg_set_buf(&sg[1], (void *)dbuf + 1, 1);
363 sg_set_buf(&sg[2], (void *)dbuf + 2, 1);
364 sg_set_buf(&sg[3], (void *)dbuf + 3, 1);
365 break;
366 }
367
368 /* May allocate an indirect, so force it to allocate
369 * user addr */
370 __kmalloc_fake = indirects + (xfers % RINGSIZE) * 4;
371 if (output)
Rusty Russelle538eba2013-03-20 15:44:26 +1030372 err = virtqueue_add_outbuf(vq, sg, num_sg, dbuf,
373 GFP_KERNEL);
Rusty Russell1515c5c2013-03-20 13:50:24 +1030374 else
Rusty Russelle538eba2013-03-20 15:44:26 +1030375 err = virtqueue_add_inbuf(vq, sg, num_sg,
376 dbuf, GFP_KERNEL);
Rusty Russell1515c5c2013-03-20 13:50:24 +1030377
378 if (err == -ENOSPC) {
379 if (!virtqueue_enable_cb_delayed(vq))
380 continue;
381 /* Swallow all notifies at once. */
382 if (read(to_guest[0], buf, sizeof(buf)) < 1)
383 break;
384
385 receives++;
386 virtqueue_disable_cb(vq);
387 continue;
388 }
389
390 if (err)
391 errx(1, "virtqueue_add_buf: %i", err);
392
393 xfers++;
394 virtqueue_kick(vq);
395 }
396
397 /* Any extra? */
398 while (finished != xfers) {
399 int *dbuf;
400 unsigned int len;
401
402 /* Consume bufs. */
403 dbuf = virtqueue_get_buf(vq, &len);
404 if (dbuf) {
405 if (len == 4)
406 assert(*dbuf == finished - 1);
407 else
408 assert(len == 0);
409 finished++;
410 continue;
411 }
412
413 if (!virtqueue_enable_cb_delayed(vq))
414 continue;
415 if (read(to_guest[0], buf, sizeof(buf)) < 1)
416 break;
417
418 receives++;
419 virtqueue_disable_cb(vq);
420 }
421
422 printf("Guest: notified %lu, pinged %lu\n",
423 gvdev.notifies, receives);
424 vring_del_virtqueue(vq);
425 return 0;
426 }
427}
428
429int main(int argc, char *argv[])
430{
431 struct virtio_device vdev;
432 struct virtqueue *vq;
433 struct vringh vrh;
434 struct scatterlist guest_sg[RINGSIZE];
435 struct iovec host_riov[2], host_wiov[2];
436 struct vringh_iov riov, wiov;
437 struct vring_used_elem used[RINGSIZE];
438 char buf[28];
439 u16 head;
440 int err;
441 unsigned i;
442 void *ret;
443 bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r);
444 bool fast_vringh = false, parallel = false;
445
446 getrange = getrange_iov;
447 vdev.features[0] = 0;
448
449 while (argv[1]) {
450 if (strcmp(argv[1], "--indirect") == 0)
451 vdev.features[0] |= (1 << VIRTIO_RING_F_INDIRECT_DESC);
452 else if (strcmp(argv[1], "--eventidx") == 0)
453 vdev.features[0] |= (1 << VIRTIO_RING_F_EVENT_IDX);
454 else if (strcmp(argv[1], "--slow-range") == 0)
455 getrange = getrange_slow;
456 else if (strcmp(argv[1], "--fast-vringh") == 0)
457 fast_vringh = true;
458 else if (strcmp(argv[1], "--parallel") == 0)
459 parallel = true;
460 else
461 errx(1, "Unknown arg %s", argv[1]);
462 argv++;
463 }
464
465 if (parallel)
466 return parallel_test(vdev.features[0], getrange, fast_vringh);
467
468 if (posix_memalign(&__user_addr_min, PAGE_SIZE, USER_MEM) != 0)
469 abort();
470 __user_addr_max = __user_addr_min + USER_MEM;
471 memset(__user_addr_min, 0, vring_size(RINGSIZE, ALIGN));
472
473 /* Set up guest side. */
474 vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true,
475 __user_addr_min,
476 never_notify_host, never_callback_guest,
477 "guest vq");
478
479 /* Set up host side. */
480 vring_init(&vrh.vring, RINGSIZE, __user_addr_min, ALIGN);
481 vringh_init_user(&vrh, vdev.features[0], RINGSIZE, true,
482 vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
483
484 /* No descriptor to get yet... */
485 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
486 if (err != 0)
487 errx(1, "vringh_getdesc_user: %i", err);
488
489 /* Guest puts in a descriptor. */
490 memcpy(__user_addr_max - 1, "a", 1);
491 sg_init_table(guest_sg, 1);
492 sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
493 sg_init_table(guest_sg+1, 1);
494 sg_set_buf(&guest_sg[1], __user_addr_max - 3, 2);
495
496 /* May allocate an indirect, so force it to allocate user addr */
497 __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
498 err = virtqueue_add_buf(vq, guest_sg, 1, 1, &err, GFP_KERNEL);
499 if (err)
500 errx(1, "virtqueue_add_buf: %i", err);
501 __kmalloc_fake = NULL;
502
503 /* Host retreives it. */
504 vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
505 vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
506
507 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
508 if (err != 1)
509 errx(1, "vringh_getdesc_user: %i", err);
510
511 assert(riov.used == 1);
512 assert(riov.iov[0].iov_base == __user_addr_max - 1);
513 assert(riov.iov[0].iov_len == 1);
514 if (getrange != getrange_slow) {
515 assert(wiov.used == 1);
516 assert(wiov.iov[0].iov_base == __user_addr_max - 3);
517 assert(wiov.iov[0].iov_len == 2);
518 } else {
519 assert(wiov.used == 2);
520 assert(wiov.iov[0].iov_base == __user_addr_max - 3);
521 assert(wiov.iov[0].iov_len == 1);
522 assert(wiov.iov[1].iov_base == __user_addr_max - 2);
523 assert(wiov.iov[1].iov_len == 1);
524 }
525
526 err = vringh_iov_pull_user(&riov, buf, 5);
527 if (err != 1)
528 errx(1, "vringh_iov_pull_user: %i", err);
529 assert(buf[0] == 'a');
530 assert(riov.i == 1);
531 assert(vringh_iov_pull_user(&riov, buf, 5) == 0);
532
533 memcpy(buf, "bcdef", 5);
534 err = vringh_iov_push_user(&wiov, buf, 5);
535 if (err != 2)
536 errx(1, "vringh_iov_push_user: %i", err);
537 assert(memcmp(__user_addr_max - 3, "bc", 2) == 0);
538 assert(wiov.i == wiov.used);
539 assert(vringh_iov_push_user(&wiov, buf, 5) == 0);
540
541 /* Host is done. */
542 err = vringh_complete_user(&vrh, head, err);
543 if (err != 0)
544 errx(1, "vringh_complete_user: %i", err);
545
546 /* Guest should see used token now. */
547 __kfree_ignore_start = __user_addr_min + vring_size(RINGSIZE, ALIGN);
548 __kfree_ignore_end = __kfree_ignore_start + 1;
549 ret = virtqueue_get_buf(vq, &i);
550 if (ret != &err)
551 errx(1, "virtqueue_get_buf: %p", ret);
552 assert(i == 2);
553
554 /* Guest puts in a huge descriptor. */
555 sg_init_table(guest_sg, RINGSIZE);
556 for (i = 0; i < RINGSIZE; i++) {
557 sg_set_buf(&guest_sg[i],
558 __user_addr_max - USER_MEM/4, USER_MEM/4);
559 }
560
561 /* Fill contents with recognisable garbage. */
562 for (i = 0; i < USER_MEM/4; i++)
563 ((char *)__user_addr_max - USER_MEM/4)[i] = i;
564
565 /* This will allocate an indirect, so force it to allocate user addr */
566 __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
567 err = virtqueue_add_buf(vq, guest_sg, RINGSIZE, 0, &err, GFP_KERNEL);
568 if (err)
569 errx(1, "virtqueue_add_buf (large): %i", err);
570 __kmalloc_fake = NULL;
571
572 /* Host picks it up (allocates new iov). */
573 vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
574 vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
575
576 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
577 if (err != 1)
578 errx(1, "vringh_getdesc_user: %i", err);
579
580 assert(riov.max_num & VRINGH_IOV_ALLOCATED);
581 assert(riov.iov != host_riov);
582 if (getrange != getrange_slow)
583 assert(riov.used == RINGSIZE);
584 else
585 assert(riov.used == RINGSIZE * USER_MEM/4);
586
587 assert(!(wiov.max_num & VRINGH_IOV_ALLOCATED));
588 assert(wiov.used == 0);
589
590 /* Pull data back out (in odd chunks), should be as expected. */
591 for (i = 0; i < RINGSIZE * USER_MEM/4; i += 3) {
592 err = vringh_iov_pull_user(&riov, buf, 3);
593 if (err != 3 && i + err != RINGSIZE * USER_MEM/4)
594 errx(1, "vringh_iov_pull_user large: %i", err);
595 assert(buf[0] == (char)i);
596 assert(err < 2 || buf[1] == (char)(i + 1));
597 assert(err < 3 || buf[2] == (char)(i + 2));
598 }
599 assert(riov.i == riov.used);
600 vringh_iov_cleanup(&riov);
601 vringh_iov_cleanup(&wiov);
602
603 /* Complete using multi interface, just because we can. */
604 used[0].id = head;
605 used[0].len = 0;
606 err = vringh_complete_multi_user(&vrh, used, 1);
607 if (err)
608 errx(1, "vringh_complete_multi_user(1): %i", err);
609
610 /* Free up those descriptors. */
611 ret = virtqueue_get_buf(vq, &i);
612 if (ret != &err)
613 errx(1, "virtqueue_get_buf: %p", ret);
614
615 /* Add lots of descriptors. */
616 sg_init_table(guest_sg, 1);
617 sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
618 for (i = 0; i < RINGSIZE; i++) {
619 err = virtqueue_add_buf(vq, guest_sg, 1, 0, &err, GFP_KERNEL);
620 if (err)
621 errx(1, "virtqueue_add_buf (multiple): %i", err);
622 }
623
624 /* Now get many, and consume them all at once. */
625 vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
626 vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
627
628 for (i = 0; i < RINGSIZE; i++) {
629 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
630 if (err != 1)
631 errx(1, "vringh_getdesc_user: %i", err);
632 used[i].id = head;
633 used[i].len = 0;
634 }
635 /* Make sure it wraps around ring, to test! */
636 assert(vrh.vring.used->idx % RINGSIZE != 0);
637 err = vringh_complete_multi_user(&vrh, used, RINGSIZE);
638 if (err)
639 errx(1, "vringh_complete_multi_user: %i", err);
640
641 /* Free those buffers. */
642 for (i = 0; i < RINGSIZE; i++) {
643 unsigned len;
644 assert(virtqueue_get_buf(vq, &len) != NULL);
645 }
646
647 /* Test weird (but legal!) indirect. */
648 if (vdev.features[0] & (1 << VIRTIO_RING_F_INDIRECT_DESC)) {
649 char *data = __user_addr_max - USER_MEM/4;
650 struct vring_desc *d = __user_addr_max - USER_MEM/2;
651 struct vring vring;
652
653 /* Force creation of direct, which we modify. */
654 vdev.features[0] &= ~(1 << VIRTIO_RING_F_INDIRECT_DESC);
655 vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true,
656 __user_addr_min,
657 never_notify_host,
658 never_callback_guest,
659 "guest vq");
660
661 sg_init_table(guest_sg, 4);
662 sg_set_buf(&guest_sg[0], d, sizeof(*d)*2);
663 sg_set_buf(&guest_sg[1], d + 2, sizeof(*d)*1);
664 sg_set_buf(&guest_sg[2], data + 6, 4);
665 sg_set_buf(&guest_sg[3], d + 3, sizeof(*d)*3);
666
667 err = virtqueue_add_buf(vq, guest_sg, 4, 0, &err, GFP_KERNEL);
668 if (err)
669 errx(1, "virtqueue_add_buf (indirect): %i", err);
670
671 vring_init(&vring, RINGSIZE, __user_addr_min, ALIGN);
672
673 /* They're used in order, but double-check... */
674 assert(vring.desc[0].addr == (unsigned long)d);
675 assert(vring.desc[1].addr == (unsigned long)(d+2));
676 assert(vring.desc[2].addr == (unsigned long)data + 6);
677 assert(vring.desc[3].addr == (unsigned long)(d+3));
678 vring.desc[0].flags |= VRING_DESC_F_INDIRECT;
679 vring.desc[1].flags |= VRING_DESC_F_INDIRECT;
680 vring.desc[3].flags |= VRING_DESC_F_INDIRECT;
681
682 /* First indirect */
683 d[0].addr = (unsigned long)data;
684 d[0].len = 1;
685 d[0].flags = VRING_DESC_F_NEXT;
686 d[0].next = 1;
687 d[1].addr = (unsigned long)data + 1;
688 d[1].len = 2;
689 d[1].flags = 0;
690
691 /* Second indirect */
692 d[2].addr = (unsigned long)data + 3;
693 d[2].len = 3;
694 d[2].flags = 0;
695
696 /* Third indirect */
697 d[3].addr = (unsigned long)data + 10;
698 d[3].len = 5;
699 d[3].flags = VRING_DESC_F_NEXT;
700 d[3].next = 1;
701 d[4].addr = (unsigned long)data + 15;
702 d[4].len = 6;
703 d[4].flags = VRING_DESC_F_NEXT;
704 d[4].next = 2;
705 d[5].addr = (unsigned long)data + 21;
706 d[5].len = 7;
707 d[5].flags = 0;
708
709 /* Host picks it up (allocates new iov). */
710 vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
711 vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
712
713 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
714 if (err != 1)
715 errx(1, "vringh_getdesc_user: %i", err);
716
717 if (head != 0)
718 errx(1, "vringh_getdesc_user: head %i not 0", head);
719
720 assert(riov.max_num & VRINGH_IOV_ALLOCATED);
721 if (getrange != getrange_slow)
722 assert(riov.used == 7);
723 else
724 assert(riov.used == 28);
725 err = vringh_iov_pull_user(&riov, buf, 29);
726 assert(err == 28);
727
728 /* Data should be linear. */
729 for (i = 0; i < err; i++)
730 assert(buf[i] == i);
731 vringh_iov_cleanup(&riov);
732 }
733
734 /* Don't leak memory... */
735 vring_del_virtqueue(vq);
736 free(__user_addr_min);
737
738 return 0;
739}