Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License, version 2, as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/kvm_host.h> |
| 10 | #include <linux/preempt.h> |
Paul Gortmaker | 66b15db | 2011-05-27 10:46:24 -0400 | [diff] [blame] | 11 | #include <linux/export.h> |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 12 | #include <linux/sched.h> |
| 13 | #include <linux/spinlock.h> |
| 14 | #include <linux/bootmem.h> |
| 15 | #include <linux/init.h> |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame^] | 16 | #include <linux/memblock.h> |
| 17 | #include <linux/sizes.h> |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 18 | |
| 19 | #include <asm/cputable.h> |
| 20 | #include <asm/kvm_ppc.h> |
| 21 | #include <asm/kvm_book3s.h> |
| 22 | |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame^] | 23 | #include "book3s_hv_cma.h" |
| 24 | |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 25 | #define KVM_LINEAR_RMA 0 |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 26 | #define KVM_LINEAR_HPT 1 |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 27 | |
| 28 | static void __init kvm_linear_init_one(ulong size, int count, int type); |
| 29 | static struct kvmppc_linear_info *kvm_alloc_linear(int type); |
| 30 | static void kvm_release_linear(struct kvmppc_linear_info *ri); |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame^] | 31 | /* |
| 32 | * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) |
| 33 | * should be power of 2. |
| 34 | */ |
| 35 | #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ |
| 36 | /* |
| 37 | * By default we reserve 5% of memory for hash pagetable allocation. |
| 38 | */ |
| 39 | static unsigned long kvm_cma_resv_ratio = 5; |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 40 | |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 41 | /*************** RMA *************/ |
| 42 | |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 43 | /* |
| 44 | * This maintains a list of RMAs (real mode areas) for KVM guests to use. |
| 45 | * Each RMA has to be physically contiguous and of a size that the |
| 46 | * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB, |
| 47 | * and other larger sizes. Since we are unlikely to be allocate that |
| 48 | * much physically contiguous memory after the system is up and running, |
| 49 | * we preallocate a set of RMAs in early boot for KVM to use. |
| 50 | */ |
| 51 | static unsigned long kvm_rma_size = 64 << 20; /* 64MB */ |
| 52 | static unsigned long kvm_rma_count; |
| 53 | |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 54 | /* Work out RMLS (real mode limit selector) field value for a given RMA size. |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 55 | Assumes POWER7 or PPC970. */ |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 56 | static inline int lpcr_rmls(unsigned long rma_size) |
| 57 | { |
| 58 | switch (rma_size) { |
| 59 | case 32ul << 20: /* 32 MB */ |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 60 | if (cpu_has_feature(CPU_FTR_ARCH_206)) |
| 61 | return 8; /* only supported on POWER7 */ |
| 62 | return -1; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 63 | case 64ul << 20: /* 64 MB */ |
| 64 | return 3; |
| 65 | case 128ul << 20: /* 128 MB */ |
| 66 | return 7; |
| 67 | case 256ul << 20: /* 256 MB */ |
| 68 | return 4; |
| 69 | case 1ul << 30: /* 1 GB */ |
| 70 | return 2; |
| 71 | case 16ul << 30: /* 16 GB */ |
| 72 | return 1; |
| 73 | case 256ul << 30: /* 256 GB */ |
| 74 | return 0; |
| 75 | default: |
| 76 | return -1; |
| 77 | } |
| 78 | } |
| 79 | |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 80 | static int __init early_parse_rma_size(char *p) |
| 81 | { |
| 82 | if (!p) |
| 83 | return 1; |
| 84 | |
| 85 | kvm_rma_size = memparse(p, &p); |
| 86 | |
| 87 | return 0; |
| 88 | } |
| 89 | early_param("kvm_rma_size", early_parse_rma_size); |
| 90 | |
| 91 | static int __init early_parse_rma_count(char *p) |
| 92 | { |
| 93 | if (!p) |
| 94 | return 1; |
| 95 | |
| 96 | kvm_rma_count = simple_strtoul(p, NULL, 0); |
| 97 | |
| 98 | return 0; |
| 99 | } |
| 100 | early_param("kvm_rma_count", early_parse_rma_count); |
| 101 | |
| 102 | struct kvmppc_linear_info *kvm_alloc_rma(void) |
| 103 | { |
| 104 | return kvm_alloc_linear(KVM_LINEAR_RMA); |
| 105 | } |
| 106 | EXPORT_SYMBOL_GPL(kvm_alloc_rma); |
| 107 | |
| 108 | void kvm_release_rma(struct kvmppc_linear_info *ri) |
| 109 | { |
| 110 | kvm_release_linear(ri); |
| 111 | } |
| 112 | EXPORT_SYMBOL_GPL(kvm_release_rma); |
| 113 | |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame^] | 114 | static int __init early_parse_kvm_cma_resv(char *p) |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 115 | { |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame^] | 116 | pr_debug("%s(%s)\n", __func__, p); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 117 | if (!p) |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame^] | 118 | return -EINVAL; |
| 119 | return kstrtoul(p, 0, &kvm_cma_resv_ratio); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 120 | } |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame^] | 121 | early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 122 | |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame^] | 123 | struct page *kvm_alloc_hpt(unsigned long nr_pages) |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 124 | { |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame^] | 125 | unsigned long align_pages = HPT_ALIGN_PAGES; |
| 126 | |
| 127 | /* Old CPUs require HPT aligned on a multiple of its size */ |
| 128 | if (!cpu_has_feature(CPU_FTR_ARCH_206)) |
| 129 | align_pages = nr_pages; |
| 130 | return kvm_alloc_cma(nr_pages, align_pages); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 131 | } |
| 132 | EXPORT_SYMBOL_GPL(kvm_alloc_hpt); |
| 133 | |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame^] | 134 | void kvm_release_hpt(struct page *page, unsigned long nr_pages) |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 135 | { |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame^] | 136 | kvm_release_cma(page, nr_pages); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 137 | } |
| 138 | EXPORT_SYMBOL_GPL(kvm_release_hpt); |
| 139 | |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 140 | /*************** generic *************/ |
| 141 | |
| 142 | static LIST_HEAD(free_linears); |
| 143 | static DEFINE_SPINLOCK(linear_lock); |
| 144 | |
| 145 | static void __init kvm_linear_init_one(ulong size, int count, int type) |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 146 | { |
| 147 | unsigned long i; |
| 148 | unsigned long j, npages; |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 149 | void *linear; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 150 | struct page *pg; |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 151 | const char *typestr; |
| 152 | struct kvmppc_linear_info *linear_info; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 153 | |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 154 | if (!count) |
| 155 | return; |
| 156 | |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 157 | typestr = (type == KVM_LINEAR_RMA) ? "RMA" : "HPT"; |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 158 | |
| 159 | npages = size >> PAGE_SHIFT; |
| 160 | linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info)); |
| 161 | for (i = 0; i < count; ++i) { |
| 162 | linear = alloc_bootmem_align(size, size); |
Paul Mackerras | 1340f3e8 | 2012-08-06 00:04:14 +0000 | [diff] [blame] | 163 | pr_debug("Allocated KVM %s at %p (%ld MB)\n", typestr, linear, |
| 164 | size >> 20); |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 165 | linear_info[i].base_virt = linear; |
| 166 | linear_info[i].base_pfn = __pa(linear) >> PAGE_SHIFT; |
| 167 | linear_info[i].npages = npages; |
| 168 | linear_info[i].type = type; |
| 169 | list_add_tail(&linear_info[i].list, &free_linears); |
| 170 | atomic_set(&linear_info[i].use_count, 0); |
| 171 | |
| 172 | pg = pfn_to_page(linear_info[i].base_pfn); |
| 173 | for (j = 0; j < npages; ++j) { |
| 174 | atomic_inc(&pg->_count); |
| 175 | ++pg; |
| 176 | } |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | static struct kvmppc_linear_info *kvm_alloc_linear(int type) |
| 181 | { |
Paul Mackerras | b4e51229 | 2012-02-03 00:45:02 +0000 | [diff] [blame] | 182 | struct kvmppc_linear_info *ri, *ret; |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 183 | |
Paul Mackerras | b4e51229 | 2012-02-03 00:45:02 +0000 | [diff] [blame] | 184 | ret = NULL; |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 185 | spin_lock(&linear_lock); |
| 186 | list_for_each_entry(ri, &free_linears, list) { |
| 187 | if (ri->type != type) |
| 188 | continue; |
| 189 | |
| 190 | list_del(&ri->list); |
| 191 | atomic_inc(&ri->use_count); |
Paul Mackerras | b4e51229 | 2012-02-03 00:45:02 +0000 | [diff] [blame] | 192 | memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT); |
| 193 | ret = ri; |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 194 | break; |
| 195 | } |
| 196 | spin_unlock(&linear_lock); |
Paul Mackerras | b4e51229 | 2012-02-03 00:45:02 +0000 | [diff] [blame] | 197 | return ret; |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | static void kvm_release_linear(struct kvmppc_linear_info *ri) |
| 201 | { |
| 202 | if (atomic_dec_and_test(&ri->use_count)) { |
| 203 | spin_lock(&linear_lock); |
| 204 | list_add_tail(&ri->list, &free_linears); |
| 205 | spin_unlock(&linear_lock); |
| 206 | |
| 207 | } |
| 208 | } |
| 209 | |
| 210 | /* |
| 211 | * Called at boot time while the bootmem allocator is active, |
| 212 | * to allocate contiguous physical memory for the hash page |
| 213 | * tables for guests. |
| 214 | */ |
| 215 | void __init kvm_linear_init(void) |
| 216 | { |
| 217 | /* RMA */ |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 218 | /* Only do this on PPC970 in HV mode */ |
| 219 | if (!cpu_has_feature(CPU_FTR_HVMODE) || |
| 220 | !cpu_has_feature(CPU_FTR_ARCH_201)) |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 221 | return; |
| 222 | |
| 223 | if (!kvm_rma_size || !kvm_rma_count) |
| 224 | return; |
| 225 | |
| 226 | /* Check that the requested size is one supported in hardware */ |
| 227 | if (lpcr_rmls(kvm_rma_size) < 0) { |
| 228 | pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size); |
| 229 | return; |
| 230 | } |
| 231 | |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 232 | kvm_linear_init_one(kvm_rma_size, kvm_rma_count, KVM_LINEAR_RMA); |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 233 | } |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame^] | 234 | |
| 235 | /** |
| 236 | * kvm_cma_reserve() - reserve area for kvm hash pagetable |
| 237 | * |
| 238 | * This function reserves memory from early allocator. It should be |
| 239 | * called by arch specific code once the early allocator (memblock or bootmem) |
| 240 | * has been activated and all other subsystems have already allocated/reserved |
| 241 | * memory. |
| 242 | */ |
| 243 | void __init kvm_cma_reserve(void) |
| 244 | { |
| 245 | unsigned long align_size; |
| 246 | struct memblock_region *reg; |
| 247 | phys_addr_t selected_size = 0; |
| 248 | /* |
| 249 | * We cannot use memblock_phys_mem_size() here, because |
| 250 | * memblock_analyze() has not been called yet. |
| 251 | */ |
| 252 | for_each_memblock(memory, reg) |
| 253 | selected_size += memblock_region_memory_end_pfn(reg) - |
| 254 | memblock_region_memory_base_pfn(reg); |
| 255 | |
| 256 | selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; |
| 257 | if (selected_size) { |
| 258 | pr_debug("%s: reserving %ld MiB for global area\n", __func__, |
| 259 | (unsigned long)selected_size / SZ_1M); |
| 260 | /* |
| 261 | * Old CPUs require HPT aligned on a multiple of its size. So for them |
| 262 | * make the alignment as max size we could request. |
| 263 | */ |
| 264 | if (!cpu_has_feature(CPU_FTR_ARCH_206)) |
| 265 | align_size = __rounddown_pow_of_two(selected_size); |
| 266 | else |
| 267 | align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; |
| 268 | kvm_cma_declare_contiguous(selected_size, align_size); |
| 269 | } |
| 270 | } |