blob: c4c7ec2f9d300dfffd54badac315b026de7caea6 [file] [log] [blame]
Laurent Vivier5f94c172008-05-30 16:05:54 +02001/*
2 * KVM coalesced MMIO
3 *
4 * Copyright (c) 2008 Bull S.A.S.
5 *
6 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
7 *
8 */
9
10#include "iodev.h"
11
12#include <linux/kvm_host.h>
13#include <linux/kvm.h>
14
15#include "coalesced_mmio.h"
16
Gregory Haskinsd76685c2009-06-01 12:54:50 -040017static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
18{
19 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
20}
21
Laurent Vivier5f94c172008-05-30 16:05:54 +020022static int coalesced_mmio_in_range(struct kvm_io_device *this,
23 gpa_t addr, int len, int is_write)
24{
Gregory Haskinsd76685c2009-06-01 12:54:50 -040025 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
Laurent Vivier5f94c172008-05-30 16:05:54 +020026 struct kvm_coalesced_mmio_zone *zone;
27 int next;
28 int i;
29
30 if (!is_write)
31 return 0;
32
33 /* kvm->lock is taken by the caller and must be not released before
34 * dev.read/write
35 */
36
37 /* Are we able to batch it ? */
38
39 /* last is the first free entry
40 * check if we don't meet the first used entry
41 * there is always one unused entry in the buffer
42 */
43
44 next = (dev->kvm->coalesced_mmio_ring->last + 1) %
45 KVM_COALESCED_MMIO_MAX;
46 if (next == dev->kvm->coalesced_mmio_ring->first) {
47 /* full */
48 return 0;
49 }
50
51 /* is it in a batchable area ? */
52
53 for (i = 0; i < dev->nb_zones; i++) {
54 zone = &dev->zone[i];
55
56 /* (addr,len) is fully included in
57 * (zone->addr, zone->size)
58 */
59
60 if (zone->addr <= addr &&
61 addr + len <= zone->addr + zone->size)
62 return 1;
63 }
64 return 0;
65}
66
67static void coalesced_mmio_write(struct kvm_io_device *this,
68 gpa_t addr, int len, const void *val)
69{
Gregory Haskinsd76685c2009-06-01 12:54:50 -040070 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
Laurent Vivier5f94c172008-05-30 16:05:54 +020071 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
72
73 /* kvm->lock must be taken by caller before call to in_range()*/
74
75 /* copy data in first free entry of the ring */
76
77 ring->coalesced_mmio[ring->last].phys_addr = addr;
78 ring->coalesced_mmio[ring->last].len = len;
79 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
80 smp_wmb();
81 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
82}
83
84static void coalesced_mmio_destructor(struct kvm_io_device *this)
85{
Gregory Haskinsd76685c2009-06-01 12:54:50 -040086 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
Gregory Haskins787a6602009-06-01 12:54:45 -040087
88 kfree(dev);
Laurent Vivier5f94c172008-05-30 16:05:54 +020089}
90
Gregory Haskinsd76685c2009-06-01 12:54:50 -040091static const struct kvm_io_device_ops coalesced_mmio_ops = {
92 .write = coalesced_mmio_write,
93 .in_range = coalesced_mmio_in_range,
94 .destructor = coalesced_mmio_destructor,
95};
96
Laurent Vivier5f94c172008-05-30 16:05:54 +020097int kvm_coalesced_mmio_init(struct kvm *kvm)
98{
99 struct kvm_coalesced_mmio_dev *dev;
100
101 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
102 if (!dev)
103 return -ENOMEM;
Gregory Haskinsd76685c2009-06-01 12:54:50 -0400104 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200105 dev->kvm = kvm;
106 kvm->coalesced_mmio_dev = dev;
107 kvm_io_bus_register_dev(&kvm->mmio_bus, &dev->dev);
108
109 return 0;
110}
111
112int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
113 struct kvm_coalesced_mmio_zone *zone)
114{
115 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
116
117 if (dev == NULL)
118 return -EINVAL;
119
120 mutex_lock(&kvm->lock);
121 if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
122 mutex_unlock(&kvm->lock);
123 return -ENOBUFS;
124 }
125
126 dev->zone[dev->nb_zones] = *zone;
127 dev->nb_zones++;
128
129 mutex_unlock(&kvm->lock);
130 return 0;
131}
132
133int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
134 struct kvm_coalesced_mmio_zone *zone)
135{
136 int i;
137 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
138 struct kvm_coalesced_mmio_zone *z;
139
140 if (dev == NULL)
141 return -EINVAL;
142
143 mutex_lock(&kvm->lock);
144
145 i = dev->nb_zones;
146 while(i) {
147 z = &dev->zone[i - 1];
148
149 /* unregister all zones
150 * included in (zone->addr, zone->size)
151 */
152
153 if (zone->addr <= z->addr &&
154 z->addr + z->size <= zone->addr + zone->size) {
155 dev->nb_zones--;
156 *z = dev->zone[dev->nb_zones];
157 }
158 i--;
159 }
160
161 mutex_unlock(&kvm->lock);
162
163 return 0;
164}