blob: 9e65feb6fa58d75988e969954aaa5d0c9c99fed0 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Laurent Vivier5f94c172008-05-30 16:05:54 +02002/*
3 * KVM coalesced MMIO
4 *
5 * Copyright (c) 2008 Bull S.A.S.
Avi Kivity221d0592010-05-23 18:37:00 +03006 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
Laurent Vivier5f94c172008-05-30 16:05:54 +02007 *
8 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
9 *
10 */
11
Andre Przywaraaf669ac2015-03-26 14:39:29 +000012#include <kvm/iodev.h>
Laurent Vivier5f94c172008-05-30 16:05:54 +020013
14#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Laurent Vivier5f94c172008-05-30 16:05:54 +020016#include <linux/kvm.h>
17
18#include "coalesced_mmio.h"
19
Gregory Haskinsd76685c2009-06-01 12:54:50 -040020static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
21{
22 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
23}
24
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030025static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
26 gpa_t addr, int len)
Laurent Vivier5f94c172008-05-30 16:05:54 +020027{
Sasha Levin2b3c2462011-07-20 20:59:00 +030028 /* is it in a batchable area ?
29 * (addr,len) is fully included in
30 * (zone->addr, zone->size)
31 */
Dan Carpenter1a214242011-10-19 09:15:10 +030032 if (len < 0)
33 return 0;
34 if (addr + len < addr)
35 return 0;
36 if (addr < dev->zone.addr)
37 return 0;
38 if (addr + len > dev->zone.addr + dev->zone.size)
39 return 0;
40 return 1;
Laurent Vivier5f94c172008-05-30 16:05:54 +020041}
42
Sasha Levinc2981252011-07-18 17:17:14 +030043static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
44{
45 struct kvm_coalesced_mmio_ring *ring;
46 unsigned avail;
47
48 /* Are we able to batch it ? */
49
50 /* last is the first free entry
51 * check if we don't meet the first used entry
52 * there is always one unused entry in the buffer
53 */
54 ring = dev->kvm->coalesced_mmio_ring;
55 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
56 if (avail == 0) {
57 /* full */
58 return 0;
59 }
60
61 return 1;
62}
63
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +000064static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
65 struct kvm_io_device *this, gpa_t addr,
66 int len, const void *val)
Laurent Vivier5f94c172008-05-30 16:05:54 +020067{
Gregory Haskinsd76685c2009-06-01 12:54:50 -040068 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
Laurent Vivier5f94c172008-05-30 16:05:54 +020069 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
Sasha Levinc2981252011-07-18 17:17:14 +030070
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030071 if (!coalesced_mmio_in_range(dev, addr, len))
72 return -EOPNOTSUPP;
Laurent Vivier5f94c172008-05-30 16:05:54 +020073
Sasha Levin2b3c2462011-07-20 20:59:00 +030074 spin_lock(&dev->kvm->ring_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +020075
Sasha Levinc2981252011-07-18 17:17:14 +030076 if (!coalesced_mmio_has_room(dev)) {
Sasha Levin2b3c2462011-07-20 20:59:00 +030077 spin_unlock(&dev->kvm->ring_lock);
Sasha Levinc2981252011-07-18 17:17:14 +030078 return -EOPNOTSUPP;
79 }
80
Laurent Vivier5f94c172008-05-30 16:05:54 +020081 /* copy data in first free entry of the ring */
82
83 ring->coalesced_mmio[ring->last].phys_addr = addr;
84 ring->coalesced_mmio[ring->last].len = len;
85 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
86 smp_wmb();
87 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
Sasha Levin2b3c2462011-07-20 20:59:00 +030088 spin_unlock(&dev->kvm->ring_lock);
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030089 return 0;
Laurent Vivier5f94c172008-05-30 16:05:54 +020090}
91
92static void coalesced_mmio_destructor(struct kvm_io_device *this)
93{
Gregory Haskinsd76685c2009-06-01 12:54:50 -040094 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
Gregory Haskins787a6602009-06-01 12:54:45 -040095
Sasha Levin2b3c2462011-07-20 20:59:00 +030096 list_del(&dev->list);
97
Gregory Haskins787a6602009-06-01 12:54:45 -040098 kfree(dev);
Laurent Vivier5f94c172008-05-30 16:05:54 +020099}
100
Gregory Haskinsd76685c2009-06-01 12:54:50 -0400101static const struct kvm_io_device_ops coalesced_mmio_ops = {
102 .write = coalesced_mmio_write,
Gregory Haskinsd76685c2009-06-01 12:54:50 -0400103 .destructor = coalesced_mmio_destructor,
104};
105
Laurent Vivier5f94c172008-05-30 16:05:54 +0200106int kvm_coalesced_mmio_init(struct kvm *kvm)
107{
Avi Kivity980da6c2009-12-20 15:13:43 +0200108 struct page *page;
Gregory Haskins090b7af2009-07-07 17:08:44 -0400109 int ret;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200110
Avi Kivity980da6c2009-12-20 15:13:43 +0200111 ret = -ENOMEM;
112 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
113 if (!page)
114 goto out_err;
Sasha Levin2b3c2462011-07-20 20:59:00 +0300115
116 ret = 0;
Avi Kivity980da6c2009-12-20 15:13:43 +0200117 kvm->coalesced_mmio_ring = page_address(page);
118
Sasha Levin2b3c2462011-07-20 20:59:00 +0300119 /*
120 * We're using this spinlock to sync access to the coalesced ring.
121 * The list doesn't need it's own lock since device registration and
122 * unregistration should only happen when kvm->slots_lock is held.
123 */
124 spin_lock_init(&kvm->ring_lock);
125 INIT_LIST_HEAD(&kvm->coalesced_zones);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200126
Avi Kivity980da6c2009-12-20 15:13:43 +0200127out_err:
128 return ret;
129}
130
131void kvm_coalesced_mmio_free(struct kvm *kvm)
132{
133 if (kvm->coalesced_mmio_ring)
134 free_page((unsigned long)kvm->coalesced_mmio_ring);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200135}
136
137int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
Jochen Maes43db6692010-02-08 11:29:33 +0100138 struct kvm_coalesced_mmio_zone *zone)
Laurent Vivier5f94c172008-05-30 16:05:54 +0200139{
Sasha Levin2b3c2462011-07-20 20:59:00 +0300140 int ret;
141 struct kvm_coalesced_mmio_dev *dev;
142
143 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
144 if (!dev)
145 return -ENOMEM;
146
147 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
148 dev->kvm = kvm;
149 dev->zone = *zone;
150
151 mutex_lock(&kvm->slots_lock);
Sasha Levin743eeb02011-07-27 16:00:48 +0300152 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
153 zone->size, &dev->dev);
Sasha Levin2b3c2462011-07-20 20:59:00 +0300154 if (ret < 0)
155 goto out_free_dev;
156 list_add_tail(&dev->list, &kvm->coalesced_zones);
157 mutex_unlock(&kvm->slots_lock);
158
Dan Carpenteraac5c422014-01-29 16:16:39 +0300159 return 0;
Sasha Levin2b3c2462011-07-20 20:59:00 +0300160
161out_free_dev:
162 mutex_unlock(&kvm->slots_lock);
Sasha Levin2b3c2462011-07-20 20:59:00 +0300163 kfree(dev);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200164
Dan Carpenteraac5c422014-01-29 16:16:39 +0300165 return ret;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200166}
167
168int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
169 struct kvm_coalesced_mmio_zone *zone)
170{
Sasha Levin2b3c2462011-07-20 20:59:00 +0300171 struct kvm_coalesced_mmio_dev *dev, *tmp;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200172
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200173 mutex_lock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200174
Sasha Levin2b3c2462011-07-20 20:59:00 +0300175 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
176 if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
177 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
178 kvm_iodevice_destructor(&dev->dev);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200179 }
Laurent Vivier5f94c172008-05-30 16:05:54 +0200180
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200181 mutex_unlock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200182
183 return 0;
184}