blob: 3960e0bceaf25962b6e593177321a321a42a91d3 [file] [log] [blame]
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001/*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 */
8
Michael Ellerman441c19c2014-05-23 18:15:25 +10009#include <linux/cpu.h>
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000010#include <linux/kvm_host.h>
11#include <linux/preempt.h>
Paul Gortmaker66b15db2011-05-27 10:46:24 -040012#include <linux/export.h>
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000013#include <linux/sched.h>
14#include <linux/spinlock.h>
15#include <linux/bootmem.h>
16#include <linux/init.h>
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053017#include <linux/memblock.h>
18#include <linux/sizes.h>
Joonsoo Kimfc95ca72014-08-06 16:05:28 -070019#include <linux/cma.h>
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000020
21#include <asm/cputable.h>
22#include <asm/kvm_ppc.h>
23#include <asm/kvm_book3s.h>
24
Joonsoo Kimfc95ca72014-08-06 16:05:28 -070025#define KVM_CMA_CHUNK_ORDER 18
26
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053027/*
28 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
29 * should be power of 2.
30 */
31#define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
32/*
33 * By default we reserve 5% of memory for hash pagetable allocation.
34 */
35static unsigned long kvm_cma_resv_ratio = 5;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000036/*
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +053037 * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000038 * Each RMA has to be physically contiguous and of a size that the
39 * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
40 * and other larger sizes. Since we are unlikely to be allocate that
41 * much physically contiguous memory after the system is up and running,
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +053042 * we preallocate a set of RMAs in early boot using CMA.
43 * should be power of 2.
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000044 */
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +053045unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */
46EXPORT_SYMBOL_GPL(kvm_rma_pages);
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000047
Joonsoo Kimfc95ca72014-08-06 16:05:28 -070048static struct cma *kvm_cma;
49
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000050/* Work out RMLS (real mode limit selector) field value for a given RMA size.
Paul Mackerras9e368f22011-06-29 00:40:08 +000051 Assumes POWER7 or PPC970. */
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000052static inline int lpcr_rmls(unsigned long rma_size)
53{
54 switch (rma_size) {
55 case 32ul << 20: /* 32 MB */
Paul Mackerras9e368f22011-06-29 00:40:08 +000056 if (cpu_has_feature(CPU_FTR_ARCH_206))
57 return 8; /* only supported on POWER7 */
58 return -1;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000059 case 64ul << 20: /* 64 MB */
60 return 3;
61 case 128ul << 20: /* 128 MB */
62 return 7;
63 case 256ul << 20: /* 256 MB */
64 return 4;
65 case 1ul << 30: /* 1 GB */
66 return 2;
67 case 16ul << 30: /* 16 GB */
68 return 1;
69 case 256ul << 30: /* 256 GB */
70 return 0;
71 default:
72 return -1;
73 }
74}
75
Alexander Grafb4e70612012-01-16 16:50:10 +010076static int __init early_parse_rma_size(char *p)
77{
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +053078 unsigned long kvm_rma_size;
79
80 pr_debug("%s(%s)\n", __func__, p);
Alexander Grafb4e70612012-01-16 16:50:10 +010081 if (!p)
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +053082 return -EINVAL;
Alexander Grafb4e70612012-01-16 16:50:10 +010083 kvm_rma_size = memparse(p, &p);
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +053084 /*
85 * Check that the requested size is one supported in hardware
86 */
87 if (lpcr_rmls(kvm_rma_size) < 0) {
88 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
89 return -EINVAL;
90 }
91 kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
Alexander Grafb4e70612012-01-16 16:50:10 +010092 return 0;
93}
94early_param("kvm_rma_size", early_parse_rma_size);
95
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +053096struct kvm_rma_info *kvm_alloc_rma()
Alexander Grafb4e70612012-01-16 16:50:10 +010097{
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +053098 struct page *page;
99 struct kvm_rma_info *ri;
Alexander Grafb4e70612012-01-16 16:50:10 +0100100
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +0530101 ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
102 if (!ri)
103 return NULL;
Joonsoo Kimfc95ca72014-08-06 16:05:28 -0700104 page = cma_alloc(kvm_cma, kvm_rma_pages, get_order(kvm_rma_pages));
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +0530105 if (!page)
106 goto err_out;
107 atomic_set(&ri->use_count, 1);
108 ri->base_pfn = page_to_pfn(page);
109 return ri;
110err_out:
111 kfree(ri);
112 return NULL;
Alexander Grafb4e70612012-01-16 16:50:10 +0100113}
114EXPORT_SYMBOL_GPL(kvm_alloc_rma);
115
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +0530116void kvm_release_rma(struct kvm_rma_info *ri)
Alexander Grafb4e70612012-01-16 16:50:10 +0100117{
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +0530118 if (atomic_dec_and_test(&ri->use_count)) {
Joonsoo Kimfc95ca72014-08-06 16:05:28 -0700119 cma_release(kvm_cma, pfn_to_page(ri->base_pfn), kvm_rma_pages);
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +0530120 kfree(ri);
121 }
Alexander Grafb4e70612012-01-16 16:50:10 +0100122}
123EXPORT_SYMBOL_GPL(kvm_release_rma);
124
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530125static int __init early_parse_kvm_cma_resv(char *p)
Alexander Grafd2a1b482012-01-16 19:12:11 +0100126{
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530127 pr_debug("%s(%s)\n", __func__, p);
Alexander Grafd2a1b482012-01-16 19:12:11 +0100128 if (!p)
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530129 return -EINVAL;
130 return kstrtoul(p, 0, &kvm_cma_resv_ratio);
Alexander Grafd2a1b482012-01-16 19:12:11 +0100131}
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530132early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
Alexander Grafd2a1b482012-01-16 19:12:11 +0100133
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530134struct page *kvm_alloc_hpt(unsigned long nr_pages)
Alexander Grafd2a1b482012-01-16 19:12:11 +0100135{
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530136 unsigned long align_pages = HPT_ALIGN_PAGES;
137
Joonsoo Kimfc95ca72014-08-06 16:05:28 -0700138 VM_BUG_ON(get_order(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
139
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530140 /* Old CPUs require HPT aligned on a multiple of its size */
141 if (!cpu_has_feature(CPU_FTR_ARCH_206))
142 align_pages = nr_pages;
Joonsoo Kimfc95ca72014-08-06 16:05:28 -0700143 return cma_alloc(kvm_cma, nr_pages, get_order(align_pages));
Alexander Grafd2a1b482012-01-16 19:12:11 +0100144}
145EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
146
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530147void kvm_release_hpt(struct page *page, unsigned long nr_pages)
Alexander Grafd2a1b482012-01-16 19:12:11 +0100148{
Joonsoo Kimfc95ca72014-08-06 16:05:28 -0700149 cma_release(kvm_cma, page, nr_pages);
Alexander Grafd2a1b482012-01-16 19:12:11 +0100150}
151EXPORT_SYMBOL_GPL(kvm_release_hpt);
152
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530153/**
154 * kvm_cma_reserve() - reserve area for kvm hash pagetable
155 *
156 * This function reserves memory from early allocator. It should be
157 * called by arch specific code once the early allocator (memblock or bootmem)
158 * has been activated and all other subsystems have already allocated/reserved
159 * memory.
160 */
161void __init kvm_cma_reserve(void)
162{
163 unsigned long align_size;
164 struct memblock_region *reg;
165 phys_addr_t selected_size = 0;
166 /*
167 * We cannot use memblock_phys_mem_size() here, because
168 * memblock_analyze() has not been called yet.
169 */
170 for_each_memblock(memory, reg)
171 selected_size += memblock_region_memory_end_pfn(reg) -
172 memblock_region_memory_base_pfn(reg);
173
174 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
175 if (selected_size) {
176 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
177 (unsigned long)selected_size / SZ_1M);
178 /*
179 * Old CPUs require HPT aligned on a multiple of its size. So for them
180 * make the alignment as max size we could request.
181 */
182 if (!cpu_has_feature(CPU_FTR_ARCH_206))
183 align_size = __rounddown_pow_of_two(selected_size);
184 else
185 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +0530186
187 align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
Joonsoo Kimfc95ca72014-08-06 16:05:28 -0700188 cma_declare_contiguous(selected_size, 0, 0, align_size,
189 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, &kvm_cma, false);
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530190 }
191}
Michael Ellerman441c19c2014-05-23 18:15:25 +1000192
193/*
194 * When running HV mode KVM we need to block certain operations while KVM VMs
195 * exist in the system. We use a counter of VMs to track this.
196 *
197 * One of the operations we need to block is onlining of secondaries, so we
198 * protect hv_vm_count with get/put_online_cpus().
199 */
200static atomic_t hv_vm_count;
201
202void kvm_hv_vm_activated(void)
203{
204 get_online_cpus();
205 atomic_inc(&hv_vm_count);
206 put_online_cpus();
207}
208EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
209
210void kvm_hv_vm_deactivated(void)
211{
212 get_online_cpus();
213 atomic_dec(&hv_vm_count);
214 put_online_cpus();
215}
216EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
217
218bool kvm_hv_mode_active(void)
219{
220 return atomic_read(&hv_vm_count) != 0;
221}