blob: 5c1efb869df240f4cb0b600307dbe7fbf40d8c39 [file] [log] [blame]
Laurent Vivier5f94c172008-05-30 16:05:54 +02001/*
2 * KVM coalesced MMIO
3 *
4 * Copyright (c) 2008 Bull S.A.S.
Avi Kivity221d0592010-05-23 18:37:00 +03005 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
Laurent Vivier5f94c172008-05-30 16:05:54 +02006 *
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
8 *
9 */
10
Andre Przywaraaf669ac2015-03-26 14:39:29 +000011#include <kvm/iodev.h>
Laurent Vivier5f94c172008-05-30 16:05:54 +020012
13#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Laurent Vivier5f94c172008-05-30 16:05:54 +020015#include <linux/kvm.h>
16
17#include "coalesced_mmio.h"
18
Gregory Haskinsd76685c2009-06-01 12:54:50 -040019static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
20{
21 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
22}
23
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030024static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
25 gpa_t addr, int len)
Laurent Vivier5f94c172008-05-30 16:05:54 +020026{
Sasha Levin2b3c2462011-07-20 20:59:00 +030027 /* is it in a batchable area ?
28 * (addr,len) is fully included in
29 * (zone->addr, zone->size)
30 */
Dan Carpenter1a214242011-10-19 09:15:10 +030031 if (len < 0)
32 return 0;
33 if (addr + len < addr)
34 return 0;
35 if (addr < dev->zone.addr)
36 return 0;
37 if (addr + len > dev->zone.addr + dev->zone.size)
38 return 0;
39 return 1;
Laurent Vivier5f94c172008-05-30 16:05:54 +020040}
41
Matt Delco52cb3fd2019-09-16 14:16:54 -070042static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
Sasha Levinc2981252011-07-18 17:17:14 +030043{
44 struct kvm_coalesced_mmio_ring *ring;
45 unsigned avail;
46
47 /* Are we able to batch it ? */
48
49 /* last is the first free entry
50 * check if we don't meet the first used entry
51 * there is always one unused entry in the buffer
52 */
53 ring = dev->kvm->coalesced_mmio_ring;
Matt Delco52cb3fd2019-09-16 14:16:54 -070054 avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
Sasha Levinc2981252011-07-18 17:17:14 +030055 if (avail == 0) {
56 /* full */
57 return 0;
58 }
59
60 return 1;
61}
62
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +000063static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
64 struct kvm_io_device *this, gpa_t addr,
65 int len, const void *val)
Laurent Vivier5f94c172008-05-30 16:05:54 +020066{
Gregory Haskinsd76685c2009-06-01 12:54:50 -040067 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
Laurent Vivier5f94c172008-05-30 16:05:54 +020068 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
Matt Delco52cb3fd2019-09-16 14:16:54 -070069 __u32 insert;
Sasha Levinc2981252011-07-18 17:17:14 +030070
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030071 if (!coalesced_mmio_in_range(dev, addr, len))
72 return -EOPNOTSUPP;
Laurent Vivier5f94c172008-05-30 16:05:54 +020073
Sasha Levin2b3c2462011-07-20 20:59:00 +030074 spin_lock(&dev->kvm->ring_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +020075
Matt Delco52cb3fd2019-09-16 14:16:54 -070076 insert = READ_ONCE(ring->last);
77 if (!coalesced_mmio_has_room(dev, insert) ||
78 insert >= KVM_COALESCED_MMIO_MAX) {
Sasha Levin2b3c2462011-07-20 20:59:00 +030079 spin_unlock(&dev->kvm->ring_lock);
Sasha Levinc2981252011-07-18 17:17:14 +030080 return -EOPNOTSUPP;
81 }
82
Laurent Vivier5f94c172008-05-30 16:05:54 +020083 /* copy data in first free entry of the ring */
84
Matt Delco52cb3fd2019-09-16 14:16:54 -070085 ring->coalesced_mmio[insert].phys_addr = addr;
86 ring->coalesced_mmio[insert].len = len;
87 memcpy(ring->coalesced_mmio[insert].data, val, len);
Laurent Vivier5f94c172008-05-30 16:05:54 +020088 smp_wmb();
Matt Delco52cb3fd2019-09-16 14:16:54 -070089 ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
Sasha Levin2b3c2462011-07-20 20:59:00 +030090 spin_unlock(&dev->kvm->ring_lock);
Michael S. Tsirkinbda90202009-06-29 22:24:32 +030091 return 0;
Laurent Vivier5f94c172008-05-30 16:05:54 +020092}
93
94static void coalesced_mmio_destructor(struct kvm_io_device *this)
95{
Gregory Haskinsd76685c2009-06-01 12:54:50 -040096 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
Gregory Haskins787a6602009-06-01 12:54:45 -040097
Sasha Levin2b3c2462011-07-20 20:59:00 +030098 list_del(&dev->list);
99
Gregory Haskins787a6602009-06-01 12:54:45 -0400100 kfree(dev);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200101}
102
Gregory Haskinsd76685c2009-06-01 12:54:50 -0400103static const struct kvm_io_device_ops coalesced_mmio_ops = {
104 .write = coalesced_mmio_write,
Gregory Haskinsd76685c2009-06-01 12:54:50 -0400105 .destructor = coalesced_mmio_destructor,
106};
107
Laurent Vivier5f94c172008-05-30 16:05:54 +0200108int kvm_coalesced_mmio_init(struct kvm *kvm)
109{
Avi Kivity980da6c2009-12-20 15:13:43 +0200110 struct page *page;
Gregory Haskins090b7af2009-07-07 17:08:44 -0400111 int ret;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200112
Avi Kivity980da6c2009-12-20 15:13:43 +0200113 ret = -ENOMEM;
114 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
115 if (!page)
116 goto out_err;
Sasha Levin2b3c2462011-07-20 20:59:00 +0300117
118 ret = 0;
Avi Kivity980da6c2009-12-20 15:13:43 +0200119 kvm->coalesced_mmio_ring = page_address(page);
120
Sasha Levin2b3c2462011-07-20 20:59:00 +0300121 /*
122 * We're using this spinlock to sync access to the coalesced ring.
123 * The list doesn't need it's own lock since device registration and
124 * unregistration should only happen when kvm->slots_lock is held.
125 */
126 spin_lock_init(&kvm->ring_lock);
127 INIT_LIST_HEAD(&kvm->coalesced_zones);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200128
Avi Kivity980da6c2009-12-20 15:13:43 +0200129out_err:
130 return ret;
131}
132
133void kvm_coalesced_mmio_free(struct kvm *kvm)
134{
135 if (kvm->coalesced_mmio_ring)
136 free_page((unsigned long)kvm->coalesced_mmio_ring);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200137}
138
139int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
Jochen Maes43db6692010-02-08 11:29:33 +0100140 struct kvm_coalesced_mmio_zone *zone)
Laurent Vivier5f94c172008-05-30 16:05:54 +0200141{
Sasha Levin2b3c2462011-07-20 20:59:00 +0300142 int ret;
143 struct kvm_coalesced_mmio_dev *dev;
144
145 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
146 if (!dev)
147 return -ENOMEM;
148
149 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
150 dev->kvm = kvm;
151 dev->zone = *zone;
152
153 mutex_lock(&kvm->slots_lock);
Sasha Levin743eeb02011-07-27 16:00:48 +0300154 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
155 zone->size, &dev->dev);
Sasha Levin2b3c2462011-07-20 20:59:00 +0300156 if (ret < 0)
157 goto out_free_dev;
158 list_add_tail(&dev->list, &kvm->coalesced_zones);
159 mutex_unlock(&kvm->slots_lock);
160
Dan Carpenteraac5c422014-01-29 16:16:39 +0300161 return 0;
Sasha Levin2b3c2462011-07-20 20:59:00 +0300162
163out_free_dev:
164 mutex_unlock(&kvm->slots_lock);
Sasha Levin2b3c2462011-07-20 20:59:00 +0300165 kfree(dev);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200166
Dan Carpenteraac5c422014-01-29 16:16:39 +0300167 return ret;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200168}
169
170int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
171 struct kvm_coalesced_mmio_zone *zone)
172{
Sasha Levin2b3c2462011-07-20 20:59:00 +0300173 struct kvm_coalesced_mmio_dev *dev, *tmp;
Laurent Vivier5f94c172008-05-30 16:05:54 +0200174
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200175 mutex_lock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200176
Sasha Levin2b3c2462011-07-20 20:59:00 +0300177 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
178 if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
179 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
180 kvm_iodevice_destructor(&dev->dev);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200181 }
Laurent Vivier5f94c172008-05-30 16:05:54 +0200182
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200183 mutex_unlock(&kvm->slots_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200184
185 return 0;
186}