blob: ae075dc0890d5fdd523c925cdc7ed4db1ad0d22f [file] [log] [blame]
Laurent Vivier5f94c172008-05-30 16:05:54 +02001/*
2 * KVM coalesced MMIO
3 *
4 * Copyright (c) 2008 Bull S.A.S.
Avi Kivity221d0592010-05-23 18:37:00 +03005 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
Laurent Vivier5f94c172008-05-30 16:05:54 +02006 *
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
8 *
9 */
10
11#include "iodev.h"
12
13#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Laurent Vivier5f94c172008-05-30 16:05:54 +020015#include <linux/kvm.h>
16
17#include "coalesced_mmio.h"
18
Gregory Haskinsd76685c2009-06-01 12:54:50 -040019static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
20{
21 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
22}
23
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030024static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
25 gpa_t addr, int len)
Laurent Vivier5f94c172008-05-30 16:05:54 +020026{
Laurent Vivier5f94c172008-05-30 16:05:54 +020027 struct kvm_coalesced_mmio_zone *zone;
Laurent Vivier5f94c172008-05-30 16:05:54 +020028 int i;
29
Laurent Vivier5f94c172008-05-30 16:05:54 +020030 /* is it in a batchable area ? */
31
32 for (i = 0; i < dev->nb_zones; i++) {
33 zone = &dev->zone[i];
34
35 /* (addr,len) is fully included in
36 * (zone->addr, zone->size)
37 */
38
39 if (zone->addr <= addr &&
40 addr + len <= zone->addr + zone->size)
41 return 1;
42 }
43 return 0;
44}
45
Sasha Levinc2981252011-07-18 17:17:14 +030046static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
47{
48 struct kvm_coalesced_mmio_ring *ring;
49 unsigned avail;
50
51 /* Are we able to batch it ? */
52
53 /* last is the first free entry
54 * check if we don't meet the first used entry
55 * there is always one unused entry in the buffer
56 */
57 ring = dev->kvm->coalesced_mmio_ring;
58 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
59 if (avail == 0) {
60 /* full */
61 return 0;
62 }
63
64 return 1;
65}
66
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030067static int coalesced_mmio_write(struct kvm_io_device *this,
68 gpa_t addr, int len, const void *val)
Laurent Vivier5f94c172008-05-30 16:05:54 +020069{
Gregory Haskinsd76685c2009-06-01 12:54:50 -040070 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
Laurent Vivier5f94c172008-05-30 16:05:54 +020071 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
Sasha Levinc2981252011-07-18 17:17:14 +030072
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030073 if (!coalesced_mmio_in_range(dev, addr, len))
74 return -EOPNOTSUPP;
Laurent Vivier5f94c172008-05-30 16:05:54 +020075
Marcelo Tosatti64a22682009-06-04 15:08:22 -030076 spin_lock(&dev->lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +020077
Sasha Levinc2981252011-07-18 17:17:14 +030078 if (!coalesced_mmio_has_room(dev)) {
79 spin_unlock(&dev->lock);
80 return -EOPNOTSUPP;
81 }
82
Laurent Vivier5f94c172008-05-30 16:05:54 +020083 /* copy data in first free entry of the ring */
84
85 ring->coalesced_mmio[ring->last].phys_addr = addr;
86 ring->coalesced_mmio[ring->last].len = len;
87 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
88 smp_wmb();
89 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
Marcelo Tosatti64a22682009-06-04 15:08:22 -030090 spin_unlock(&dev->lock);
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030091 return 0;
Laurent Vivier5f94c172008-05-30 16:05:54 +020092}
93
94static void coalesced_mmio_destructor(struct kvm_io_device *this)
95{
Gregory Haskinsd76685c2009-06-01 12:54:50 -040096 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
Gregory Haskins787a6602009-06-01 12:54:45 -040097
98 kfree(dev);
Laurent Vivier5f94c172008-05-30 16:05:54 +020099}
100
Gregory Haskinsd76685c2009-06-01 12:54:50 -0400101static const struct kvm_io_device_ops coalesced_mmio_ops = {
102 .write = coalesced_mmio_write,
Gregory Haskinsd76685c2009-06-01 12:54:50 -0400103 .destructor = coalesced_mmio_destructor,
104};
105
Laurent Vivier5f94c172008-05-30 16:05:54 +0200106int kvm_coalesced_mmio_init(struct kvm *kvm)
107{
108 struct kvm_coalesced_mmio_dev *dev;
Avi Kivity980da6c2009-12-20 15:13:43 +0200109 struct page *page;
Gregory Haskins090b7af2009-07-07 17:08:44 -0400110 int ret;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200111
Avi Kivity980da6c2009-12-20 15:13:43 +0200112 ret = -ENOMEM;
113 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
114 if (!page)
115 goto out_err;
116 kvm->coalesced_mmio_ring = page_address(page);
117
118 ret = -ENOMEM;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200119 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
120 if (!dev)
Avi Kivity980da6c2009-12-20 15:13:43 +0200121 goto out_free_page;
Marcelo Tosatti64a22682009-06-04 15:08:22 -0300122 spin_lock_init(&dev->lock);
Gregory Haskinsd76685c2009-06-01 12:54:50 -0400123 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200124 dev->kvm = kvm;
125 kvm->coalesced_mmio_dev = dev;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200126
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200127 mutex_lock(&kvm->slots_lock);
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200128 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200129 mutex_unlock(&kvm->slots_lock);
Gregory Haskins090b7af2009-07-07 17:08:44 -0400130 if (ret < 0)
Avi Kivity980da6c2009-12-20 15:13:43 +0200131 goto out_free_dev;
Gregory Haskins090b7af2009-07-07 17:08:44 -0400132
133 return ret;
Avi Kivity980da6c2009-12-20 15:13:43 +0200134
135out_free_dev:
Takuya Yoshikawa6ce5a092010-03-15 22:13:30 +0900136 kvm->coalesced_mmio_dev = NULL;
Avi Kivity980da6c2009-12-20 15:13:43 +0200137 kfree(dev);
138out_free_page:
Takuya Yoshikawa6ce5a092010-03-15 22:13:30 +0900139 kvm->coalesced_mmio_ring = NULL;
Avi Kivity980da6c2009-12-20 15:13:43 +0200140 __free_page(page);
141out_err:
142 return ret;
143}
144
145void kvm_coalesced_mmio_free(struct kvm *kvm)
146{
147 if (kvm->coalesced_mmio_ring)
148 free_page((unsigned long)kvm->coalesced_mmio_ring);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200149}
150
151int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
Jochen Maes43db6692010-02-08 11:29:33 +0100152 struct kvm_coalesced_mmio_zone *zone)
Laurent Vivier5f94c172008-05-30 16:05:54 +0200153{
154 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
155
156 if (dev == NULL)
Wei Yongjuna87fa352010-03-12 12:59:06 +0800157 return -ENXIO;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200158
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200159 mutex_lock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200160 if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200161 mutex_unlock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200162 return -ENOBUFS;
163 }
164
165 dev->zone[dev->nb_zones] = *zone;
166 dev->nb_zones++;
167
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200168 mutex_unlock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200169 return 0;
170}
171
172int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
173 struct kvm_coalesced_mmio_zone *zone)
174{
175 int i;
176 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
177 struct kvm_coalesced_mmio_zone *z;
178
179 if (dev == NULL)
Wei Yongjuna87fa352010-03-12 12:59:06 +0800180 return -ENXIO;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200181
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200182 mutex_lock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200183
184 i = dev->nb_zones;
Jochen Maes43db6692010-02-08 11:29:33 +0100185 while (i) {
Laurent Vivier5f94c172008-05-30 16:05:54 +0200186 z = &dev->zone[i - 1];
187
188 /* unregister all zones
189 * included in (zone->addr, zone->size)
190 */
191
192 if (zone->addr <= z->addr &&
193 z->addr + z->size <= zone->addr + zone->size) {
194 dev->nb_zones--;
195 *z = dev->zone[dev->nb_zones];
196 }
197 i--;
198 }
199
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200200 mutex_unlock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200201
202 return 0;
203}