blob: 673c88a8efe92dab9dfc0435d8b8f355118b7ddd [file] [log] [blame]
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +03001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright IBM Corporation, 2008
Avi Kivity221d0592010-05-23 18:37:00 +030019 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
20 *
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030021 * Author: Allen M. Kay <allen.m.kay@intel.com>
22 * Author: Weidong Han <weidong.han@intel.com>
23 * Author: Ben-Ami Yassour <benami@il.ibm.com>
24 */
25
26#include <linux/list.h>
27#include <linux/kvm_host.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
Joerg Roedel19de40a2008-12-03 14:43:34 +010030#include <linux/iommu.h>
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030031#include <linux/intel-iommu.h>
32
33static int kvm_iommu_unmap_memslots(struct kvm *kvm);
34static void kvm_iommu_put_pages(struct kvm *kvm,
35 gfn_t base_gfn, unsigned long npages);
36
Joerg Roedelfcd95802010-01-11 16:38:18 +010037static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot,
38 gfn_t gfn, unsigned long size)
39{
40 gfn_t end_gfn;
41 pfn_t pfn;
42
43 pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
44 end_gfn = gfn + (size >> PAGE_SHIFT);
45 gfn += 1;
46
47 if (is_error_pfn(pfn))
48 return pfn;
49
50 while (gfn < end_gfn)
51 gfn_to_pfn_memslot(kvm, slot, gfn++);
52
53 return pfn;
54}
55
Marcelo Tosatti3ad26d82009-12-23 14:35:20 -020056int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030057{
Joerg Roedelfcd95802010-01-11 16:38:18 +010058 gfn_t gfn, end_gfn;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030059 pfn_t pfn;
Joerg Roedelfcd95802010-01-11 16:38:18 +010060 int r = 0;
Joerg Roedel19de40a2008-12-03 14:43:34 +010061 struct iommu_domain *domain = kvm->arch.iommu_domain;
Sheng Yang522c68c2009-04-27 20:35:43 +080062 int flags;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030063
64 /* check if iommu exists and in use */
65 if (!domain)
66 return 0;
67
Joerg Roedelfcd95802010-01-11 16:38:18 +010068 gfn = slot->base_gfn;
69 end_gfn = gfn + slot->npages;
70
Sheng Yang522c68c2009-04-27 20:35:43 +080071 flags = IOMMU_READ | IOMMU_WRITE;
72 if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
73 flags |= IOMMU_CACHE;
74
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030075
Joerg Roedelfcd95802010-01-11 16:38:18 +010076 while (gfn < end_gfn) {
77 unsigned long page_size;
78
79 /* Check if already mapped */
80 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
81 gfn += 1;
82 continue;
83 }
84
85 /* Get the page size we could use to map */
86 page_size = kvm_host_page_size(kvm, gfn);
87
88 /* Make sure the page_size does not exceed the memslot */
89 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
90 page_size >>= 1;
91
92 /* Make sure gfn is aligned to the page size we want to map */
93 while ((gfn << PAGE_SHIFT) & (page_size - 1))
94 page_size >>= 1;
95
96 /*
97 * Pin all pages we are about to map in memory. This is
98 * important because we unmap and unpin in 4kb steps later.
99 */
100 pfn = kvm_pin_pages(kvm, slot, gfn, page_size);
101 if (is_error_pfn(pfn)) {
102 gfn += 1;
103 continue;
104 }
105
106 /* Map into IO address space */
107 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
108 get_order(page_size), flags);
Weidong Hane5fcfc82008-09-25 23:32:02 +0800109 if (r) {
Weidong Han260782b2008-12-02 21:03:39 +0800110 printk(KERN_ERR "kvm_iommu_map_address:"
Weidong Hane5fcfc82008-09-25 23:32:02 +0800111 "iommu failed to map pfn=%lx\n", pfn);
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300112 goto unmap_pages;
113 }
Joerg Roedelfcd95802010-01-11 16:38:18 +0100114
115 gfn += page_size >> PAGE_SHIFT;
116
117
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300118 }
Joerg Roedelfcd95802010-01-11 16:38:18 +0100119
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300120 return 0;
121
122unmap_pages:
Joerg Roedelfcd95802010-01-11 16:38:18 +0100123 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300124 return r;
125}
126
127static int kvm_iommu_map_memslots(struct kvm *kvm)
128{
Joerg Roedel7398ca72009-01-03 16:37:53 +0100129 int i, r = 0;
Marcelo Tosatti46a26bf2009-12-23 14:35:16 -0200130 struct kvm_memslots *slots;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300131
Lai Jiangshan90d83dc2010-04-19 17:41:23 +0800132 slots = kvm_memslots(kvm);
Marcelo Tosatti46a26bf2009-12-23 14:35:16 -0200133
134 for (i = 0; i < slots->nmemslots; i++) {
Marcelo Tosatti3ad26d82009-12-23 14:35:20 -0200135 r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300136 if (r)
137 break;
138 }
Mark McLoughlin682edb42009-02-05 18:23:46 +0000139
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300140 return r;
141}
142
Weidong Han260782b2008-12-02 21:03:39 +0800143int kvm_assign_device(struct kvm *kvm,
144 struct kvm_assigned_dev_kernel *assigned_dev)
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300145{
146 struct pci_dev *pdev = NULL;
Joerg Roedel19de40a2008-12-03 14:43:34 +0100147 struct iommu_domain *domain = kvm->arch.iommu_domain;
Sheng Yang522c68c2009-04-27 20:35:43 +0800148 int r, last_flags;
Weidong Han260782b2008-12-02 21:03:39 +0800149
150 /* check if iommu exists and in use */
151 if (!domain)
152 return 0;
153
154 pdev = assigned_dev->dev;
155 if (pdev == NULL)
156 return -ENODEV;
157
Joerg Roedel19de40a2008-12-03 14:43:34 +0100158 r = iommu_attach_device(domain, &pdev->dev);
Weidong Han260782b2008-12-02 21:03:39 +0800159 if (r) {
Zhai, Edwinab9f4ec2010-01-29 14:38:44 +0800160 printk(KERN_ERR "assign device %x:%x:%x.%x failed",
161 pci_domain_nr(pdev->bus),
Weidong Han260782b2008-12-02 21:03:39 +0800162 pdev->bus->number,
163 PCI_SLOT(pdev->devfn),
164 PCI_FUNC(pdev->devfn));
165 return r;
166 }
167
Sheng Yang522c68c2009-04-27 20:35:43 +0800168 last_flags = kvm->arch.iommu_flags;
169 if (iommu_domain_has_cap(kvm->arch.iommu_domain,
170 IOMMU_CAP_CACHE_COHERENCY))
171 kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY;
172
173 /* Check if need to update IOMMU page table for guest memory */
174 if ((last_flags ^ kvm->arch.iommu_flags) ==
175 KVM_IOMMU_CACHE_COHERENCY) {
176 kvm_iommu_unmap_memslots(kvm);
177 r = kvm_iommu_map_memslots(kvm);
178 if (r)
179 goto out_unmap;
180 }
181
Zhai, Edwinab9f4ec2010-01-29 14:38:44 +0800182 printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
183 assigned_dev->host_segnr,
Weidong Han260782b2008-12-02 21:03:39 +0800184 assigned_dev->host_busnr,
185 PCI_SLOT(assigned_dev->host_devfn),
186 PCI_FUNC(assigned_dev->host_devfn));
187
188 return 0;
Sheng Yang522c68c2009-04-27 20:35:43 +0800189out_unmap:
190 kvm_iommu_unmap_memslots(kvm);
191 return r;
Weidong Han260782b2008-12-02 21:03:39 +0800192}
193
Weidong Han0a920352008-12-02 21:24:23 +0800194int kvm_deassign_device(struct kvm *kvm,
195 struct kvm_assigned_dev_kernel *assigned_dev)
196{
Joerg Roedel19de40a2008-12-03 14:43:34 +0100197 struct iommu_domain *domain = kvm->arch.iommu_domain;
Weidong Han0a920352008-12-02 21:24:23 +0800198 struct pci_dev *pdev = NULL;
199
200 /* check if iommu exists and in use */
201 if (!domain)
202 return 0;
203
204 pdev = assigned_dev->dev;
205 if (pdev == NULL)
206 return -ENODEV;
207
Joerg Roedel19de40a2008-12-03 14:43:34 +0100208 iommu_detach_device(domain, &pdev->dev);
Weidong Han0a920352008-12-02 21:24:23 +0800209
Zhai, Edwinab9f4ec2010-01-29 14:38:44 +0800210 printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
211 assigned_dev->host_segnr,
Weidong Han0a920352008-12-02 21:24:23 +0800212 assigned_dev->host_busnr,
213 PCI_SLOT(assigned_dev->host_devfn),
214 PCI_FUNC(assigned_dev->host_devfn));
215
216 return 0;
217}
218
Weidong Han260782b2008-12-02 21:03:39 +0800219int kvm_iommu_map_guest(struct kvm *kvm)
220{
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300221 int r;
222
Joerg Roedel19de40a2008-12-03 14:43:34 +0100223 if (!iommu_found()) {
224 printk(KERN_ERR "%s: iommu not found\n", __func__);
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300225 return -ENODEV;
226 }
227
Joerg Roedel19de40a2008-12-03 14:43:34 +0100228 kvm->arch.iommu_domain = iommu_domain_alloc();
229 if (!kvm->arch.iommu_domain)
Weidong Han260782b2008-12-02 21:03:39 +0800230 return -ENOMEM;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300231
232 r = kvm_iommu_map_memslots(kvm);
233 if (r)
234 goto out_unmap;
235
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300236 return 0;
237
238out_unmap:
239 kvm_iommu_unmap_memslots(kvm);
240 return r;
241}
242
Joerg Roedelfcd95802010-01-11 16:38:18 +0100243static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
244{
245 unsigned long i;
246
247 for (i = 0; i < npages; ++i)
248 kvm_release_pfn_clean(pfn + i);
249}
250
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300251static void kvm_iommu_put_pages(struct kvm *kvm,
Weidong Han260782b2008-12-02 21:03:39 +0800252 gfn_t base_gfn, unsigned long npages)
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300253{
Joerg Roedelfcd95802010-01-11 16:38:18 +0100254 struct iommu_domain *domain;
255 gfn_t end_gfn, gfn;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300256 pfn_t pfn;
Weidong Han260782b2008-12-02 21:03:39 +0800257 u64 phys;
258
Joerg Roedelfcd95802010-01-11 16:38:18 +0100259 domain = kvm->arch.iommu_domain;
260 end_gfn = base_gfn + npages;
261 gfn = base_gfn;
262
Weidong Han260782b2008-12-02 21:03:39 +0800263 /* check if iommu exists and in use */
264 if (!domain)
265 return;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300266
Joerg Roedelfcd95802010-01-11 16:38:18 +0100267 while (gfn < end_gfn) {
268 unsigned long unmap_pages;
269 int order;
Weidong Han260782b2008-12-02 21:03:39 +0800270
Joerg Roedelfcd95802010-01-11 16:38:18 +0100271 /* Get physical address */
272 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
273 pfn = phys >> PAGE_SHIFT;
274
275 /* Unmap address from IO address space */
Jan Kiszka05b782a2010-05-26 21:36:33 +0200276 order = iommu_unmap(domain, gfn_to_gpa(gfn), 0);
Joerg Roedelfcd95802010-01-11 16:38:18 +0100277 unmap_pages = 1ULL << order;
278
279 /* Unpin all pages we just unmapped to not leak any memory */
280 kvm_unpin_pages(kvm, pfn, unmap_pages);
281
282 gfn += unmap_pages;
283 }
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300284}
285
286static int kvm_iommu_unmap_memslots(struct kvm *kvm)
287{
288 int i;
Marcelo Tosatti46a26bf2009-12-23 14:35:16 -0200289 struct kvm_memslots *slots;
Mark McLoughlin682edb42009-02-05 18:23:46 +0000290
Lai Jiangshan90d83dc2010-04-19 17:41:23 +0800291 slots = kvm_memslots(kvm);
Marcelo Tosatti46a26bf2009-12-23 14:35:16 -0200292
293 for (i = 0; i < slots->nmemslots; i++) {
294 kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
295 slots->memslots[i].npages);
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300296 }
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300297
298 return 0;
299}
300
301int kvm_iommu_unmap_guest(struct kvm *kvm)
302{
Joerg Roedel19de40a2008-12-03 14:43:34 +0100303 struct iommu_domain *domain = kvm->arch.iommu_domain;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300304
305 /* check if iommu exists and in use */
306 if (!domain)
307 return 0;
308
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300309 kvm_iommu_unmap_memslots(kvm);
Joerg Roedel19de40a2008-12-03 14:43:34 +0100310 iommu_domain_free(domain);
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +0300311 return 0;
312}