Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License, version 2, as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
Michael Ellerman | 441c19c | 2014-05-23 18:15:25 +1000 | [diff] [blame^] | 9 | #include <linux/cpu.h> |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 10 | #include <linux/kvm_host.h> |
| 11 | #include <linux/preempt.h> |
Paul Gortmaker | 66b15db | 2011-05-27 10:46:24 -0400 | [diff] [blame] | 12 | #include <linux/export.h> |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 13 | #include <linux/sched.h> |
| 14 | #include <linux/spinlock.h> |
| 15 | #include <linux/bootmem.h> |
| 16 | #include <linux/init.h> |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 17 | #include <linux/memblock.h> |
| 18 | #include <linux/sizes.h> |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 19 | |
| 20 | #include <asm/cputable.h> |
| 21 | #include <asm/kvm_ppc.h> |
| 22 | #include <asm/kvm_book3s.h> |
| 23 | |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 24 | #include "book3s_hv_cma.h" |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 25 | /* |
| 26 | * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) |
| 27 | * should be power of 2. |
| 28 | */ |
| 29 | #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ |
| 30 | /* |
| 31 | * By default we reserve 5% of memory for hash pagetable allocation. |
| 32 | */ |
| 33 | static unsigned long kvm_cma_resv_ratio = 5; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 34 | /* |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 35 | * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area. |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 36 | * Each RMA has to be physically contiguous and of a size that the |
| 37 | * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB, |
| 38 | * and other larger sizes. Since we are unlikely to be allocate that |
| 39 | * much physically contiguous memory after the system is up and running, |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 40 | * we preallocate a set of RMAs in early boot using CMA. |
| 41 | * should be power of 2. |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 42 | */ |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 43 | unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */ |
| 44 | EXPORT_SYMBOL_GPL(kvm_rma_pages); |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 45 | |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 46 | /* Work out RMLS (real mode limit selector) field value for a given RMA size. |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 47 | Assumes POWER7 or PPC970. */ |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 48 | static inline int lpcr_rmls(unsigned long rma_size) |
| 49 | { |
| 50 | switch (rma_size) { |
| 51 | case 32ul << 20: /* 32 MB */ |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 52 | if (cpu_has_feature(CPU_FTR_ARCH_206)) |
| 53 | return 8; /* only supported on POWER7 */ |
| 54 | return -1; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 55 | case 64ul << 20: /* 64 MB */ |
| 56 | return 3; |
| 57 | case 128ul << 20: /* 128 MB */ |
| 58 | return 7; |
| 59 | case 256ul << 20: /* 256 MB */ |
| 60 | return 4; |
| 61 | case 1ul << 30: /* 1 GB */ |
| 62 | return 2; |
| 63 | case 16ul << 30: /* 16 GB */ |
| 64 | return 1; |
| 65 | case 256ul << 30: /* 256 GB */ |
| 66 | return 0; |
| 67 | default: |
| 68 | return -1; |
| 69 | } |
| 70 | } |
| 71 | |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 72 | static int __init early_parse_rma_size(char *p) |
| 73 | { |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 74 | unsigned long kvm_rma_size; |
| 75 | |
| 76 | pr_debug("%s(%s)\n", __func__, p); |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 77 | if (!p) |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 78 | return -EINVAL; |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 79 | kvm_rma_size = memparse(p, &p); |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 80 | /* |
| 81 | * Check that the requested size is one supported in hardware |
| 82 | */ |
| 83 | if (lpcr_rmls(kvm_rma_size) < 0) { |
| 84 | pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size); |
| 85 | return -EINVAL; |
| 86 | } |
| 87 | kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT; |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 88 | return 0; |
| 89 | } |
| 90 | early_param("kvm_rma_size", early_parse_rma_size); |
| 91 | |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 92 | struct kvm_rma_info *kvm_alloc_rma() |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 93 | { |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 94 | struct page *page; |
| 95 | struct kvm_rma_info *ri; |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 96 | |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 97 | ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL); |
| 98 | if (!ri) |
| 99 | return NULL; |
| 100 | page = kvm_alloc_cma(kvm_rma_pages, kvm_rma_pages); |
| 101 | if (!page) |
| 102 | goto err_out; |
| 103 | atomic_set(&ri->use_count, 1); |
| 104 | ri->base_pfn = page_to_pfn(page); |
| 105 | return ri; |
| 106 | err_out: |
| 107 | kfree(ri); |
| 108 | return NULL; |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 109 | } |
| 110 | EXPORT_SYMBOL_GPL(kvm_alloc_rma); |
| 111 | |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 112 | void kvm_release_rma(struct kvm_rma_info *ri) |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 113 | { |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 114 | if (atomic_dec_and_test(&ri->use_count)) { |
| 115 | kvm_release_cma(pfn_to_page(ri->base_pfn), kvm_rma_pages); |
| 116 | kfree(ri); |
| 117 | } |
Alexander Graf | b4e7061 | 2012-01-16 16:50:10 +0100 | [diff] [blame] | 118 | } |
| 119 | EXPORT_SYMBOL_GPL(kvm_release_rma); |
| 120 | |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 121 | static int __init early_parse_kvm_cma_resv(char *p) |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 122 | { |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 123 | pr_debug("%s(%s)\n", __func__, p); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 124 | if (!p) |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 125 | return -EINVAL; |
| 126 | return kstrtoul(p, 0, &kvm_cma_resv_ratio); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 127 | } |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 128 | early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 129 | |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 130 | struct page *kvm_alloc_hpt(unsigned long nr_pages) |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 131 | { |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 132 | unsigned long align_pages = HPT_ALIGN_PAGES; |
| 133 | |
| 134 | /* Old CPUs require HPT aligned on a multiple of its size */ |
| 135 | if (!cpu_has_feature(CPU_FTR_ARCH_206)) |
| 136 | align_pages = nr_pages; |
| 137 | return kvm_alloc_cma(nr_pages, align_pages); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 138 | } |
| 139 | EXPORT_SYMBOL_GPL(kvm_alloc_hpt); |
| 140 | |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 141 | void kvm_release_hpt(struct page *page, unsigned long nr_pages) |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 142 | { |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 143 | kvm_release_cma(page, nr_pages); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 144 | } |
| 145 | EXPORT_SYMBOL_GPL(kvm_release_hpt); |
| 146 | |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 147 | /** |
| 148 | * kvm_cma_reserve() - reserve area for kvm hash pagetable |
| 149 | * |
| 150 | * This function reserves memory from early allocator. It should be |
| 151 | * called by arch specific code once the early allocator (memblock or bootmem) |
| 152 | * has been activated and all other subsystems have already allocated/reserved |
| 153 | * memory. |
| 154 | */ |
| 155 | void __init kvm_cma_reserve(void) |
| 156 | { |
| 157 | unsigned long align_size; |
| 158 | struct memblock_region *reg; |
| 159 | phys_addr_t selected_size = 0; |
| 160 | /* |
| 161 | * We cannot use memblock_phys_mem_size() here, because |
| 162 | * memblock_analyze() has not been called yet. |
| 163 | */ |
| 164 | for_each_memblock(memory, reg) |
| 165 | selected_size += memblock_region_memory_end_pfn(reg) - |
| 166 | memblock_region_memory_base_pfn(reg); |
| 167 | |
| 168 | selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; |
| 169 | if (selected_size) { |
| 170 | pr_debug("%s: reserving %ld MiB for global area\n", __func__, |
| 171 | (unsigned long)selected_size / SZ_1M); |
| 172 | /* |
| 173 | * Old CPUs require HPT aligned on a multiple of its size. So for them |
| 174 | * make the alignment as max size we could request. |
| 175 | */ |
| 176 | if (!cpu_has_feature(CPU_FTR_ARCH_206)) |
| 177 | align_size = __rounddown_pow_of_two(selected_size); |
| 178 | else |
| 179 | align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 180 | |
| 181 | align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size); |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 182 | kvm_cma_declare_contiguous(selected_size, align_size); |
| 183 | } |
| 184 | } |
Michael Ellerman | 441c19c | 2014-05-23 18:15:25 +1000 | [diff] [blame^] | 185 | |
| 186 | /* |
| 187 | * When running HV mode KVM we need to block certain operations while KVM VMs |
| 188 | * exist in the system. We use a counter of VMs to track this. |
| 189 | * |
| 190 | * One of the operations we need to block is onlining of secondaries, so we |
| 191 | * protect hv_vm_count with get/put_online_cpus(). |
| 192 | */ |
| 193 | static atomic_t hv_vm_count; |
| 194 | |
| 195 | void kvm_hv_vm_activated(void) |
| 196 | { |
| 197 | get_online_cpus(); |
| 198 | atomic_inc(&hv_vm_count); |
| 199 | put_online_cpus(); |
| 200 | } |
| 201 | EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); |
| 202 | |
| 203 | void kvm_hv_vm_deactivated(void) |
| 204 | { |
| 205 | get_online_cpus(); |
| 206 | atomic_dec(&hv_vm_count); |
| 207 | put_online_cpus(); |
| 208 | } |
| 209 | EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); |
| 210 | |
| 211 | bool kvm_hv_mode_active(void) |
| 212 | { |
| 213 | return atomic_read(&hv_vm_count) != 0; |
| 214 | } |