blob: fc8487564d1f2ac3d88ccd801451248b07e623f8 [file] [log] [blame]
Laurent Vivier5f94c172008-05-30 16:05:54 +02001/*
2 * KVM coalesced MMIO
3 *
4 * Copyright (c) 2008 Bull S.A.S.
Avi Kivity221d0592010-05-23 18:37:00 +03005 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
Laurent Vivier5f94c172008-05-30 16:05:54 +02006 *
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
8 *
9 */
10
11#include "iodev.h"
12
13#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Laurent Vivier5f94c172008-05-30 16:05:54 +020015#include <linux/kvm.h>
16
17#include "coalesced_mmio.h"
18
Gregory Haskinsd76685c2009-06-01 12:54:50 -040019static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
20{
21 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
22}
23
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030024static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
25 gpa_t addr, int len)
Laurent Vivier5f94c172008-05-30 16:05:54 +020026{
Laurent Vivier5f94c172008-05-30 16:05:54 +020027 struct kvm_coalesced_mmio_zone *zone;
Avi Kivity105f8d42009-06-04 18:09:08 +030028 struct kvm_coalesced_mmio_ring *ring;
29 unsigned avail;
Laurent Vivier5f94c172008-05-30 16:05:54 +020030 int i;
31
Laurent Vivier5f94c172008-05-30 16:05:54 +020032 /* Are we able to batch it ? */
33
34 /* last is the first free entry
35 * check if we don't meet the first used entry
36 * there is always one unused entry in the buffer
37 */
Avi Kivity105f8d42009-06-04 18:09:08 +030038 ring = dev->kvm->coalesced_mmio_ring;
39 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
Marcelo Tosatti64a22682009-06-04 15:08:22 -030040 if (avail < KVM_MAX_VCPUS) {
Laurent Vivier5f94c172008-05-30 16:05:54 +020041 /* full */
42 return 0;
43 }
44
45 /* is it in a batchable area ? */
46
47 for (i = 0; i < dev->nb_zones; i++) {
48 zone = &dev->zone[i];
49
50 /* (addr,len) is fully included in
51 * (zone->addr, zone->size)
52 */
53
54 if (zone->addr <= addr &&
55 addr + len <= zone->addr + zone->size)
56 return 1;
57 }
58 return 0;
59}
60
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030061static int coalesced_mmio_write(struct kvm_io_device *this,
62 gpa_t addr, int len, const void *val)
Laurent Vivier5f94c172008-05-30 16:05:54 +020063{
Gregory Haskinsd76685c2009-06-01 12:54:50 -040064 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
Laurent Vivier5f94c172008-05-30 16:05:54 +020065 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030066 if (!coalesced_mmio_in_range(dev, addr, len))
67 return -EOPNOTSUPP;
Laurent Vivier5f94c172008-05-30 16:05:54 +020068
Marcelo Tosatti64a22682009-06-04 15:08:22 -030069 spin_lock(&dev->lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +020070
71 /* copy data in first free entry of the ring */
72
73 ring->coalesced_mmio[ring->last].phys_addr = addr;
74 ring->coalesced_mmio[ring->last].len = len;
75 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
76 smp_wmb();
77 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
Marcelo Tosatti64a22682009-06-04 15:08:22 -030078 spin_unlock(&dev->lock);
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030079 return 0;
Laurent Vivier5f94c172008-05-30 16:05:54 +020080}
81
82static void coalesced_mmio_destructor(struct kvm_io_device *this)
83{
Gregory Haskinsd76685c2009-06-01 12:54:50 -040084 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
Gregory Haskins787a6602009-06-01 12:54:45 -040085
86 kfree(dev);
Laurent Vivier5f94c172008-05-30 16:05:54 +020087}
88
Gregory Haskinsd76685c2009-06-01 12:54:50 -040089static const struct kvm_io_device_ops coalesced_mmio_ops = {
90 .write = coalesced_mmio_write,
Gregory Haskinsd76685c2009-06-01 12:54:50 -040091 .destructor = coalesced_mmio_destructor,
92};
93
Laurent Vivier5f94c172008-05-30 16:05:54 +020094int kvm_coalesced_mmio_init(struct kvm *kvm)
95{
96 struct kvm_coalesced_mmio_dev *dev;
Avi Kivity980da6c2009-12-20 15:13:43 +020097 struct page *page;
Gregory Haskins090b7af2009-07-07 17:08:44 -040098 int ret;
Laurent Vivier5f94c172008-05-30 16:05:54 +020099
Avi Kivity980da6c2009-12-20 15:13:43 +0200100 ret = -ENOMEM;
101 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
102 if (!page)
103 goto out_err;
104 kvm->coalesced_mmio_ring = page_address(page);
105
106 ret = -ENOMEM;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200107 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
108 if (!dev)
Avi Kivity980da6c2009-12-20 15:13:43 +0200109 goto out_free_page;
Marcelo Tosatti64a22682009-06-04 15:08:22 -0300110 spin_lock_init(&dev->lock);
Gregory Haskinsd76685c2009-06-01 12:54:50 -0400111 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200112 dev->kvm = kvm;
113 kvm->coalesced_mmio_dev = dev;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200114
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200115 mutex_lock(&kvm->slots_lock);
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200116 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200117 mutex_unlock(&kvm->slots_lock);
Gregory Haskins090b7af2009-07-07 17:08:44 -0400118 if (ret < 0)
Avi Kivity980da6c2009-12-20 15:13:43 +0200119 goto out_free_dev;
Gregory Haskins090b7af2009-07-07 17:08:44 -0400120
121 return ret;
Avi Kivity980da6c2009-12-20 15:13:43 +0200122
123out_free_dev:
Takuya Yoshikawa6ce5a092010-03-15 22:13:30 +0900124 kvm->coalesced_mmio_dev = NULL;
Avi Kivity980da6c2009-12-20 15:13:43 +0200125 kfree(dev);
126out_free_page:
Takuya Yoshikawa6ce5a092010-03-15 22:13:30 +0900127 kvm->coalesced_mmio_ring = NULL;
Avi Kivity980da6c2009-12-20 15:13:43 +0200128 __free_page(page);
129out_err:
130 return ret;
131}
132
133void kvm_coalesced_mmio_free(struct kvm *kvm)
134{
135 if (kvm->coalesced_mmio_ring)
136 free_page((unsigned long)kvm->coalesced_mmio_ring);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200137}
138
139int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
Jochen Maes43db6692010-02-08 11:29:33 +0100140 struct kvm_coalesced_mmio_zone *zone)
Laurent Vivier5f94c172008-05-30 16:05:54 +0200141{
142 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
143
144 if (dev == NULL)
Wei Yongjuna87fa352010-03-12 12:59:06 +0800145 return -ENXIO;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200146
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200147 mutex_lock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200148 if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200149 mutex_unlock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200150 return -ENOBUFS;
151 }
152
153 dev->zone[dev->nb_zones] = *zone;
154 dev->nb_zones++;
155
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200156 mutex_unlock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200157 return 0;
158}
159
160int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
161 struct kvm_coalesced_mmio_zone *zone)
162{
163 int i;
164 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
165 struct kvm_coalesced_mmio_zone *z;
166
167 if (dev == NULL)
Wei Yongjuna87fa352010-03-12 12:59:06 +0800168 return -ENXIO;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200169
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200170 mutex_lock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200171
172 i = dev->nb_zones;
Jochen Maes43db6692010-02-08 11:29:33 +0100173 while (i) {
Laurent Vivier5f94c172008-05-30 16:05:54 +0200174 z = &dev->zone[i - 1];
175
176 /* unregister all zones
177 * included in (zone->addr, zone->size)
178 */
179
180 if (zone->addr <= z->addr &&
181 z->addr + z->size <= zone->addr + zone->size) {
182 dev->nb_zones--;
183 *z = dev->zone[dev->nb_zones];
184 }
185 i--;
186 }
187
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200188 mutex_unlock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200189
190 return 0;
191}