blob: 1954a1c4b1f913c5d542bf571c2a503fb6d4c144 [file] [log] [blame]
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001/*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 */
8
Michael Ellerman441c19c2014-05-23 18:15:25 +10009#include <linux/cpu.h>
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000010#include <linux/kvm_host.h>
11#include <linux/preempt.h>
Paul Gortmaker66b15db2011-05-27 10:46:24 -040012#include <linux/export.h>
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000013#include <linux/sched.h>
14#include <linux/spinlock.h>
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000015#include <linux/init.h>
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053016#include <linux/memblock.h>
17#include <linux/sizes.h>
Joonsoo Kimfc95ca72014-08-06 16:05:28 -070018#include <linux/cma.h>
Sam Bobroff90fd09f2014-12-03 13:30:40 +110019#include <linux/bitops.h>
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000020
21#include <asm/cputable.h>
22#include <asm/kvm_ppc.h>
23#include <asm/kvm_book3s.h>
Michael Ellermane928e9c2015-03-20 20:39:41 +110024#include <asm/archrandom.h>
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000025
Joonsoo Kimfc95ca72014-08-06 16:05:28 -070026#define KVM_CMA_CHUNK_ORDER 18
27
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053028/*
29 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
30 * should be power of 2.
31 */
32#define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
33/*
34 * By default we reserve 5% of memory for hash pagetable allocation.
35 */
36static unsigned long kvm_cma_resv_ratio = 5;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000037
Joonsoo Kimfc95ca72014-08-06 16:05:28 -070038static struct cma *kvm_cma;
39
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053040static int __init early_parse_kvm_cma_resv(char *p)
Alexander Grafd2a1b482012-01-16 19:12:11 +010041{
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053042 pr_debug("%s(%s)\n", __func__, p);
Alexander Grafd2a1b482012-01-16 19:12:11 +010043 if (!p)
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053044 return -EINVAL;
45 return kstrtoul(p, 0, &kvm_cma_resv_ratio);
Alexander Grafd2a1b482012-01-16 19:12:11 +010046}
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053047early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
Alexander Grafd2a1b482012-01-16 19:12:11 +010048
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053049struct page *kvm_alloc_hpt(unsigned long nr_pages)
Alexander Grafd2a1b482012-01-16 19:12:11 +010050{
Alexey Kardashevskiyc04fa582014-08-14 15:03:07 +100051 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
Joonsoo Kimfc95ca72014-08-06 16:05:28 -070052
Paul Mackerrasc17b98c2014-12-03 13:30:38 +110053 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
Alexander Grafd2a1b482012-01-16 19:12:11 +010054}
55EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
56
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053057void kvm_release_hpt(struct page *page, unsigned long nr_pages)
Alexander Grafd2a1b482012-01-16 19:12:11 +010058{
Joonsoo Kimfc95ca72014-08-06 16:05:28 -070059 cma_release(kvm_cma, page, nr_pages);
Alexander Grafd2a1b482012-01-16 19:12:11 +010060}
61EXPORT_SYMBOL_GPL(kvm_release_hpt);
62
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053063/**
64 * kvm_cma_reserve() - reserve area for kvm hash pagetable
65 *
66 * This function reserves memory from early allocator. It should be
Anton Blanchard14ed7402014-09-17 22:15:34 +100067 * called by arch specific code once the memblock allocator
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053068 * has been activated and all other subsystems have already allocated/reserved
69 * memory.
70 */
71void __init kvm_cma_reserve(void)
72{
73 unsigned long align_size;
74 struct memblock_region *reg;
75 phys_addr_t selected_size = 0;
Aneesh Kumar K.Vcec26bc2014-09-29 13:32:38 +053076
77 /*
78 * We need CMA reservation only when we are in HV mode
79 */
80 if (!cpu_has_feature(CPU_FTR_HVMODE))
81 return;
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053082 /*
83 * We cannot use memblock_phys_mem_size() here, because
84 * memblock_analyze() has not been called yet.
85 */
86 for_each_memblock(memory, reg)
87 selected_size += memblock_region_memory_end_pfn(reg) -
88 memblock_region_memory_base_pfn(reg);
89
90 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
91 if (selected_size) {
92 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
93 (unsigned long)selected_size / SZ_1M);
Paul Mackerrasc17b98c2014-12-03 13:30:38 +110094 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
Joonsoo Kimc1f733a2014-08-06 16:05:32 -070095 cma_declare_contiguous(0, selected_size, 0, align_size,
96 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053097 }
98}
Michael Ellerman441c19c2014-05-23 18:15:25 +100099
100/*
Sam Bobroff90fd09f2014-12-03 13:30:40 +1100101 * Real-mode H_CONFER implementation.
102 * We check if we are the only vcpu out of this virtual core
103 * still running in the guest and not ceded. If so, we pop up
104 * to the virtual-mode implementation; if not, just return to
105 * the guest.
106 */
107long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
108 unsigned int yield_count)
109{
110 struct kvmppc_vcore *vc = vcpu->arch.vcore;
111 int threads_running;
112 int threads_ceded;
113 int threads_conferring;
114 u64 stop = get_tb() + 10 * tb_ticks_per_usec;
115 int rv = H_SUCCESS; /* => don't yield */
116
117 set_bit(vcpu->arch.ptid, &vc->conferring_threads);
118 while ((get_tb() < stop) && (VCORE_EXIT_COUNT(vc) == 0)) {
119 threads_running = VCORE_ENTRY_COUNT(vc);
120 threads_ceded = hweight32(vc->napping_threads);
121 threads_conferring = hweight32(vc->conferring_threads);
122 if (threads_ceded + threads_conferring >= threads_running) {
123 rv = H_TOO_HARD; /* => do yield */
124 break;
125 }
126 }
127 clear_bit(vcpu->arch.ptid, &vc->conferring_threads);
128 return rv;
129}
130
131/*
Michael Ellerman441c19c2014-05-23 18:15:25 +1000132 * When running HV mode KVM we need to block certain operations while KVM VMs
133 * exist in the system. We use a counter of VMs to track this.
134 *
135 * One of the operations we need to block is onlining of secondaries, so we
136 * protect hv_vm_count with get/put_online_cpus().
137 */
138static atomic_t hv_vm_count;
139
140void kvm_hv_vm_activated(void)
141{
142 get_online_cpus();
143 atomic_inc(&hv_vm_count);
144 put_online_cpus();
145}
146EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
147
148void kvm_hv_vm_deactivated(void)
149{
150 get_online_cpus();
151 atomic_dec(&hv_vm_count);
152 put_online_cpus();
153}
154EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
155
156bool kvm_hv_mode_active(void)
157{
158 return atomic_read(&hv_vm_count) != 0;
159}
Paul Mackerrasae2113a2014-06-02 11:03:00 +1000160
161extern int hcall_real_table[], hcall_real_table_end[];
162
163int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
164{
165 cmd /= 4;
166 if (cmd < hcall_real_table_end - hcall_real_table &&
167 hcall_real_table[cmd])
168 return 1;
169
170 return 0;
171}
172EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
Michael Ellermane928e9c2015-03-20 20:39:41 +1100173
174int kvmppc_hwrng_present(void)
175{
176 return powernv_hwrng_present();
177}
178EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
179
180long kvmppc_h_random(struct kvm_vcpu *vcpu)
181{
182 if (powernv_get_random_real_mode(&vcpu->arch.gpr[4]))
183 return H_SUCCESS;
184
185 return H_HARDWARE;
186}