Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 1 | /* |
| 2 | * KVM coalesced MMIO |
| 3 | * |
| 4 | * Copyright (c) 2008 Bull S.A.S. |
Avi Kivity | 221d059 | 2010-05-23 18:37:00 +0300 | [diff] [blame] | 5 | * Copyright 2009 Red Hat, Inc. and/or its affiliates. |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 6 | * |
| 7 | * Author: Laurent Vivier <Laurent.Vivier@bull.net> |
| 8 | * |
| 9 | */ |
| 10 | |
| 11 | #include "iodev.h" |
| 12 | |
| 13 | #include <linux/kvm_host.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 14 | #include <linux/slab.h> |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 15 | #include <linux/kvm.h> |
| 16 | |
| 17 | #include "coalesced_mmio.h" |
| 18 | |
Gregory Haskins | d76685c | 2009-06-01 12:54:50 -0400 | [diff] [blame] | 19 | static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev) |
| 20 | { |
| 21 | return container_of(dev, struct kvm_coalesced_mmio_dev, dev); |
| 22 | } |
| 23 | |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 24 | static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, |
| 25 | gpa_t addr, int len) |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 26 | { |
Sasha Levin | 2b3c246 | 2011-07-20 20:59:00 +0300 | [diff] [blame] | 27 | /* is it in a batchable area ? |
| 28 | * (addr,len) is fully included in |
| 29 | * (zone->addr, zone->size) |
| 30 | */ |
Dan Carpenter | 1a21424 | 2011-10-19 09:15:10 +0300 | [diff] [blame] | 31 | if (len < 0) |
| 32 | return 0; |
| 33 | if (addr + len < addr) |
| 34 | return 0; |
| 35 | if (addr < dev->zone.addr) |
| 36 | return 0; |
| 37 | if (addr + len > dev->zone.addr + dev->zone.size) |
| 38 | return 0; |
| 39 | return 1; |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 40 | } |
| 41 | |
Sasha Levin | c298125 | 2011-07-18 17:17:14 +0300 | [diff] [blame] | 42 | static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) |
| 43 | { |
| 44 | struct kvm_coalesced_mmio_ring *ring; |
| 45 | unsigned avail; |
| 46 | |
| 47 | /* Are we able to batch it ? */ |
| 48 | |
| 49 | /* last is the first free entry |
| 50 | * check if we don't meet the first used entry |
| 51 | * there is always one unused entry in the buffer |
| 52 | */ |
| 53 | ring = dev->kvm->coalesced_mmio_ring; |
| 54 | avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; |
| 55 | if (avail == 0) { |
| 56 | /* full */ |
| 57 | return 0; |
| 58 | } |
| 59 | |
| 60 | return 1; |
| 61 | } |
| 62 | |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 63 | static int coalesced_mmio_write(struct kvm_io_device *this, |
| 64 | gpa_t addr, int len, const void *val) |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 65 | { |
Gregory Haskins | d76685c | 2009-06-01 12:54:50 -0400 | [diff] [blame] | 66 | struct kvm_coalesced_mmio_dev *dev = to_mmio(this); |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 67 | struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; |
Sasha Levin | c298125 | 2011-07-18 17:17:14 +0300 | [diff] [blame] | 68 | |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 69 | if (!coalesced_mmio_in_range(dev, addr, len)) |
| 70 | return -EOPNOTSUPP; |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 71 | |
Sasha Levin | 2b3c246 | 2011-07-20 20:59:00 +0300 | [diff] [blame] | 72 | spin_lock(&dev->kvm->ring_lock); |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 73 | |
Sasha Levin | c298125 | 2011-07-18 17:17:14 +0300 | [diff] [blame] | 74 | if (!coalesced_mmio_has_room(dev)) { |
Sasha Levin | 2b3c246 | 2011-07-20 20:59:00 +0300 | [diff] [blame] | 75 | spin_unlock(&dev->kvm->ring_lock); |
Sasha Levin | c298125 | 2011-07-18 17:17:14 +0300 | [diff] [blame] | 76 | return -EOPNOTSUPP; |
| 77 | } |
| 78 | |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 79 | /* copy data in first free entry of the ring */ |
| 80 | |
| 81 | ring->coalesced_mmio[ring->last].phys_addr = addr; |
| 82 | ring->coalesced_mmio[ring->last].len = len; |
| 83 | memcpy(ring->coalesced_mmio[ring->last].data, val, len); |
| 84 | smp_wmb(); |
| 85 | ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; |
Sasha Levin | 2b3c246 | 2011-07-20 20:59:00 +0300 | [diff] [blame] | 86 | spin_unlock(&dev->kvm->ring_lock); |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 87 | return 0; |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | static void coalesced_mmio_destructor(struct kvm_io_device *this) |
| 91 | { |
Gregory Haskins | d76685c | 2009-06-01 12:54:50 -0400 | [diff] [blame] | 92 | struct kvm_coalesced_mmio_dev *dev = to_mmio(this); |
Gregory Haskins | 787a660 | 2009-06-01 12:54:45 -0400 | [diff] [blame] | 93 | |
Sasha Levin | 2b3c246 | 2011-07-20 20:59:00 +0300 | [diff] [blame] | 94 | list_del(&dev->list); |
| 95 | |
Gregory Haskins | 787a660 | 2009-06-01 12:54:45 -0400 | [diff] [blame] | 96 | kfree(dev); |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 97 | } |
| 98 | |
Gregory Haskins | d76685c | 2009-06-01 12:54:50 -0400 | [diff] [blame] | 99 | static const struct kvm_io_device_ops coalesced_mmio_ops = { |
| 100 | .write = coalesced_mmio_write, |
Gregory Haskins | d76685c | 2009-06-01 12:54:50 -0400 | [diff] [blame] | 101 | .destructor = coalesced_mmio_destructor, |
| 102 | }; |
| 103 | |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 104 | int kvm_coalesced_mmio_init(struct kvm *kvm) |
| 105 | { |
Avi Kivity | 980da6c | 2009-12-20 15:13:43 +0200 | [diff] [blame] | 106 | struct page *page; |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 107 | int ret; |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 108 | |
Avi Kivity | 980da6c | 2009-12-20 15:13:43 +0200 | [diff] [blame] | 109 | ret = -ENOMEM; |
| 110 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
| 111 | if (!page) |
| 112 | goto out_err; |
Sasha Levin | 2b3c246 | 2011-07-20 20:59:00 +0300 | [diff] [blame] | 113 | |
| 114 | ret = 0; |
Avi Kivity | 980da6c | 2009-12-20 15:13:43 +0200 | [diff] [blame] | 115 | kvm->coalesced_mmio_ring = page_address(page); |
| 116 | |
Sasha Levin | 2b3c246 | 2011-07-20 20:59:00 +0300 | [diff] [blame] | 117 | /* |
| 118 | * We're using this spinlock to sync access to the coalesced ring. |
| 119 | * The list doesn't need it's own lock since device registration and |
| 120 | * unregistration should only happen when kvm->slots_lock is held. |
| 121 | */ |
| 122 | spin_lock_init(&kvm->ring_lock); |
| 123 | INIT_LIST_HEAD(&kvm->coalesced_zones); |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 124 | |
Avi Kivity | 980da6c | 2009-12-20 15:13:43 +0200 | [diff] [blame] | 125 | out_err: |
| 126 | return ret; |
| 127 | } |
| 128 | |
| 129 | void kvm_coalesced_mmio_free(struct kvm *kvm) |
| 130 | { |
| 131 | if (kvm->coalesced_mmio_ring) |
| 132 | free_page((unsigned long)kvm->coalesced_mmio_ring); |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 133 | } |
| 134 | |
| 135 | int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, |
Jochen Maes | 43db669 | 2010-02-08 11:29:33 +0100 | [diff] [blame] | 136 | struct kvm_coalesced_mmio_zone *zone) |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 137 | { |
Sasha Levin | 2b3c246 | 2011-07-20 20:59:00 +0300 | [diff] [blame] | 138 | int ret; |
| 139 | struct kvm_coalesced_mmio_dev *dev; |
| 140 | |
| 141 | dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); |
| 142 | if (!dev) |
| 143 | return -ENOMEM; |
| 144 | |
| 145 | kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); |
| 146 | dev->kvm = kvm; |
| 147 | dev->zone = *zone; |
| 148 | |
| 149 | mutex_lock(&kvm->slots_lock); |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 150 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr, |
| 151 | zone->size, &dev->dev); |
Sasha Levin | 2b3c246 | 2011-07-20 20:59:00 +0300 | [diff] [blame] | 152 | if (ret < 0) |
| 153 | goto out_free_dev; |
| 154 | list_add_tail(&dev->list, &kvm->coalesced_zones); |
| 155 | mutex_unlock(&kvm->slots_lock); |
| 156 | |
Dan Carpenter | aac5c42 | 2014-01-29 16:16:39 +0300 | [diff] [blame^] | 157 | return 0; |
Sasha Levin | 2b3c246 | 2011-07-20 20:59:00 +0300 | [diff] [blame] | 158 | |
| 159 | out_free_dev: |
| 160 | mutex_unlock(&kvm->slots_lock); |
Sasha Levin | 2b3c246 | 2011-07-20 20:59:00 +0300 | [diff] [blame] | 161 | kfree(dev); |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 162 | |
Dan Carpenter | aac5c42 | 2014-01-29 16:16:39 +0300 | [diff] [blame^] | 163 | return ret; |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 164 | } |
| 165 | |
| 166 | int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, |
| 167 | struct kvm_coalesced_mmio_zone *zone) |
| 168 | { |
Sasha Levin | 2b3c246 | 2011-07-20 20:59:00 +0300 | [diff] [blame] | 169 | struct kvm_coalesced_mmio_dev *dev, *tmp; |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 170 | |
Marcelo Tosatti | 79fac95 | 2009-12-23 14:35:26 -0200 | [diff] [blame] | 171 | mutex_lock(&kvm->slots_lock); |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 172 | |
Sasha Levin | 2b3c246 | 2011-07-20 20:59:00 +0300 | [diff] [blame] | 173 | list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) |
| 174 | if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) { |
| 175 | kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev); |
| 176 | kvm_iodevice_destructor(&dev->dev); |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 177 | } |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 178 | |
Marcelo Tosatti | 79fac95 | 2009-12-23 14:35:26 -0200 | [diff] [blame] | 179 | mutex_unlock(&kvm->slots_lock); |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 180 | |
| 181 | return 0; |
| 182 | } |