blob: d29777520af332908f260f113f915c98c0cbb6a6 [file] [log] [blame]
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +02001/* Support for MMIO probes.
2 * Benfit many code from kprobes
3 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
4 * 2007 Alexander Eichner
5 * 2008 Pekka Paalanen <pq@iki.fi>
6 */
7
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +02008#include <linux/list.h>
Ingo Molnar668a6c32008-05-19 13:35:24 +02009#include <linux/rculist.h>
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020010#include <linux/spinlock.h>
11#include <linux/hash.h>
12#include <linux/init.h>
13#include <linux/module.h>
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020014#include <linux/kernel.h>
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020015#include <linux/uaccess.h>
16#include <linux/ptrace.h>
17#include <linux/preempt.h>
Pekka Paalanenf5136382008-05-12 21:20:57 +020018#include <linux/percpu.h>
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +020019#include <linux/kdebug.h>
Pekka Paalanend61fc442008-05-12 21:20:57 +020020#include <linux/mutex.h>
Pekka Paalanen970e6fa2008-05-12 21:21:03 +020021#include <linux/io.h>
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020022#include <asm/cacheflush.h>
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020023#include <asm/tlbflush.h>
Pekka Paalanen970e6fa2008-05-12 21:21:03 +020024#include <linux/errno.h>
Pekka Paalanen13829532008-05-12 21:20:58 +020025#include <asm/debugreg.h>
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +020026#include <linux/mmiotrace.h>
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020027
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020028#define KMMIO_PAGE_HASH_BITS 4
29#define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
30
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +020031struct kmmio_fault_page {
32 struct list_head list;
33 struct kmmio_fault_page *release_next;
34 unsigned long page; /* location of the fault page */
Pekka Paalanen5359b582009-03-01 16:11:58 +020035 bool old_presence; /* page presence prior to arming */
36 bool armed;
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +020037
38 /*
39 * Number of times this page has been registered as a part
40 * of a probe. If zero, page is disarmed and this may be freed.
41 * Used only by writers (RCU).
42 */
43 int count;
44};
45
46struct kmmio_delayed_release {
47 struct rcu_head rcu;
48 struct kmmio_fault_page *release_list;
49};
50
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020051struct kmmio_context {
52 struct kmmio_fault_page *fpage;
53 struct kmmio_probe *probe;
54 unsigned long saved_flags;
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +020055 unsigned long addr;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020056 int active;
57};
58
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020059static DEFINE_SPINLOCK(kmmio_lock);
60
Pekka Paalanen13829532008-05-12 21:20:58 +020061/* Protected by kmmio_lock */
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020062unsigned int kmmio_count;
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +020063
64/* Read-protected by RCU, write-protected by kmmio_lock. */
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020065static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
66static LIST_HEAD(kmmio_probes);
67
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +020068static struct list_head *kmmio_page_list(unsigned long page)
69{
70 return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
71}
72
Pekka Paalanenf5136382008-05-12 21:20:57 +020073/* Accessed per-cpu */
74static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020075
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020076/*
77 * this is basically a dynamic stabbing problem:
78 * Could use the existing prio tree code or
79 * Possible better implementations:
80 * The Interval Skip List: A Data Structure for Finding All Intervals That
81 * Overlap a Point (might be simple)
82 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
83 */
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +020084/* Get the kmmio at this addr (if any). You must be holding RCU read lock. */
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020085static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
86{
87 struct kmmio_probe *p;
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +020088 list_for_each_entry_rcu(p, &kmmio_probes, list) {
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020089 if (addr >= p->addr && addr <= (p->addr + p->len))
90 return p;
91 }
92 return NULL;
93}
94
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +020095/* You must be holding RCU read lock. */
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020096static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
97{
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +020098 struct list_head *head;
99 struct kmmio_fault_page *p;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200100
101 page &= PAGE_MASK;
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200102 head = kmmio_page_list(page);
103 list_for_each_entry_rcu(p, head, list) {
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200104 if (p->page == page)
105 return p;
106 }
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200107 return NULL;
108}
109
Pekka Paalanen0b700a62009-03-01 16:12:48 +0200110static void set_pmd_presence(pmd_t *pmd, bool present, bool *old)
111{
112 pmdval_t v = pmd_val(*pmd);
113 *old = !!(v & _PAGE_PRESENT);
114 v &= ~_PAGE_PRESENT;
115 if (present)
116 v |= _PAGE_PRESENT;
117 set_pmd(pmd, __pmd(v));
118}
119
120static void set_pte_presence(pte_t *pte, bool present, bool *old)
121{
122 pteval_t v = pte_val(*pte);
123 *old = !!(v & _PAGE_PRESENT);
124 v &= ~_PAGE_PRESENT;
125 if (present)
126 v |= _PAGE_PRESENT;
127 set_pte_atomic(pte, __pte(v));
128}
129
Pekka Paalanen5359b582009-03-01 16:11:58 +0200130static int set_page_presence(unsigned long addr, bool present, bool *old)
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200131{
Pekka Paalanen790e2a22008-05-12 21:21:14 +0200132 unsigned int level;
Pekka Paalanen13829532008-05-12 21:20:58 +0200133 pte_t *pte = lookup_address(addr, &level);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200134
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200135 if (!pte) {
Pekka Paalanen13829532008-05-12 21:20:58 +0200136 pr_err("kmmio: no pte for page 0x%08lx\n", addr);
Stuart Bennette9d54ca2009-01-30 17:38:59 +0000137 return -1;
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200138 }
139
Pekka Paalanen13829532008-05-12 21:20:58 +0200140 switch (level) {
141 case PG_LEVEL_2M:
Pekka Paalanen0b700a62009-03-01 16:12:48 +0200142 set_pmd_presence((pmd_t *)pte, present, old);
Pekka Paalanen13829532008-05-12 21:20:58 +0200143 break;
Pekka Paalanen13829532008-05-12 21:20:58 +0200144 case PG_LEVEL_4K:
Pekka Paalanen0b700a62009-03-01 16:12:48 +0200145 set_pte_presence(pte, present, old);
Pekka Paalanen13829532008-05-12 21:20:58 +0200146 break;
Pekka Paalanen13829532008-05-12 21:20:58 +0200147 default:
148 pr_err("kmmio: unexpected page level 0x%x.\n", level);
Stuart Bennette9d54ca2009-01-30 17:38:59 +0000149 return -1;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200150 }
151
Pekka Paalanen13829532008-05-12 21:20:58 +0200152 __flush_tlb_one(addr);
Stuart Bennette9d54ca2009-01-30 17:38:59 +0000153 return 0;
Pekka Paalanen13829532008-05-12 21:20:58 +0200154}
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200155
Pekka Paalanen5359b582009-03-01 16:11:58 +0200156/*
157 * Mark the given page as not present. Access to it will trigger a fault.
158 *
159 * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
160 * protection is ignored here. RCU read lock is assumed held, so the struct
161 * will not disappear unexpectedly. Furthermore, the caller must guarantee,
162 * that double arming the same virtual address (page) cannot occur.
163 *
164 * Double disarming on the other hand is allowed, and may occur when a fault
165 * and mmiotrace shutdown happen simultaneously.
166 */
167static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
Pekka Paalanen13829532008-05-12 21:20:58 +0200168{
Pekka Paalanen5359b582009-03-01 16:11:58 +0200169 int ret;
170 WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
171 if (f->armed) {
172 pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
173 f->page, f->count, f->old_presence);
174 }
175 ret = set_page_presence(f->page, false, &f->old_presence);
176 WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
177 f->armed = true;
Stuart Bennette9d54ca2009-01-30 17:38:59 +0000178 return ret;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200179}
180
Pekka Paalanen5359b582009-03-01 16:11:58 +0200181/** Restore the given page to saved presence state. */
182static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200183{
Pekka Paalanen5359b582009-03-01 16:11:58 +0200184 bool tmp;
185 int ret = set_page_presence(f->page, f->old_presence, &tmp);
186 WARN_ONCE(ret < 0,
187 KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
188 f->armed = false;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200189}
190
191/*
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200192 * This is being called from do_page_fault().
193 *
194 * We may be in an interrupt or a critical section. Also prefecthing may
195 * trigger a page fault. We may be in the middle of process switch.
196 * We cannot take any locks, because we could be executing especially
197 * within a kmmio critical section.
198 *
199 * Local interrupts are disabled, so preemption cannot happen.
200 * Do not enable interrupts, do not sleep, and watch out for other CPUs.
201 */
202/*
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200203 * Interrupts are disabled on entry as trap3 is an interrupt gate
204 * and they remain disabled thorough out this function.
205 */
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200206int kmmio_handler(struct pt_regs *regs, unsigned long addr)
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200207{
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200208 struct kmmio_context *ctx;
209 struct kmmio_fault_page *faultpage;
Pekka Paalanen13829532008-05-12 21:20:58 +0200210 int ret = 0; /* default to fault not handled */
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200211
212 /*
213 * Preemption is now disabled to prevent process switch during
214 * single stepping. We can only handle one active kmmio trace
215 * per cpu, so ensure that we finish it before something else
Pekka Paalanend61fc442008-05-12 21:20:57 +0200216 * gets to run. We also hold the RCU read lock over single
217 * stepping to avoid looking up the probe and kmmio_fault_page
218 * again.
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200219 */
220 preempt_disable();
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200221 rcu_read_lock();
Pekka Paalanend61fc442008-05-12 21:20:57 +0200222
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200223 faultpage = get_kmmio_fault_page(addr);
224 if (!faultpage) {
225 /*
226 * Either this page fault is not caused by kmmio, or
227 * another CPU just pulled the kmmio probe from under
Pekka Paalanen13829532008-05-12 21:20:58 +0200228 * our feet. The latter case should not be possible.
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200229 */
230 goto no_kmmio;
231 }
232
233 ctx = &get_cpu_var(kmmio_ctx);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200234 if (ctx->active) {
Pekka Paalanen5359b582009-03-01 16:11:58 +0200235 disarm_kmmio_fault_page(faultpage);
Pekka Paalanen13829532008-05-12 21:20:58 +0200236 if (addr == ctx->addr) {
237 /*
238 * On SMP we sometimes get recursive probe hits on the
239 * same address. Context is already saved, fall out.
240 */
241 pr_debug("kmmio: duplicate probe hit on CPU %d, for "
242 "address 0x%08lx.\n",
243 smp_processor_id(), addr);
244 ret = 1;
245 goto no_kmmio_ctx;
246 }
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200247 /*
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200248 * Prevent overwriting already in-flight context.
Pekka Paalanen13829532008-05-12 21:20:58 +0200249 * This should not happen, let's hope disarming at least
250 * prevents a panic.
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200251 */
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200252 pr_emerg("kmmio: recursive probe hit on CPU %d, "
253 "for address 0x%08lx. Ignoring.\n",
Pekka Paalanenf5136382008-05-12 21:20:57 +0200254 smp_processor_id(), addr);
Pekka Paalanen13829532008-05-12 21:20:58 +0200255 pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
256 ctx->addr);
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200257 goto no_kmmio_ctx;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200258 }
259 ctx->active++;
260
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200261 ctx->fpage = faultpage;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200262 ctx->probe = get_kmmio_probe(addr);
Ingo Molnar49023162008-05-12 21:20:58 +0200263 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200264 ctx->addr = addr;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200265
266 if (ctx->probe && ctx->probe->pre_handler)
267 ctx->probe->pre_handler(ctx->probe, regs, addr);
268
Pekka Paalanend61fc442008-05-12 21:20:57 +0200269 /*
270 * Enable single-stepping and disable interrupts for the faulting
271 * context. Local interrupts must not get enabled during stepping.
272 */
Ingo Molnar49023162008-05-12 21:20:58 +0200273 regs->flags |= X86_EFLAGS_TF;
274 regs->flags &= ~X86_EFLAGS_IF;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200275
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200276 /* Now we set present bit in PTE and single step. */
Pekka Paalanen5359b582009-03-01 16:11:58 +0200277 disarm_kmmio_fault_page(ctx->fpage);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200278
Pekka Paalanend61fc442008-05-12 21:20:57 +0200279 /*
280 * If another cpu accesses the same page while we are stepping,
281 * the access will not be caught. It will simply succeed and the
282 * only downside is we lose the event. If this becomes a problem,
283 * the user should drop to single cpu before tracing.
284 */
285
Pekka Paalanenf5136382008-05-12 21:20:57 +0200286 put_cpu_var(kmmio_ctx);
Pekka Paalanen13829532008-05-12 21:20:58 +0200287 return 1; /* fault handled */
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200288
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200289no_kmmio_ctx:
Pekka Paalanenf5136382008-05-12 21:20:57 +0200290 put_cpu_var(kmmio_ctx);
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200291no_kmmio:
292 rcu_read_unlock();
293 preempt_enable_no_resched();
Pekka Paalanen13829532008-05-12 21:20:58 +0200294 return ret;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200295}
296
297/*
298 * Interrupts are disabled on entry as trap1 is an interrupt gate
299 * and they remain disabled thorough out this function.
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200300 * This must always get called as the pair to kmmio_handler().
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200301 */
302static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
303{
Pekka Paalanenf5136382008-05-12 21:20:57 +0200304 int ret = 0;
305 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200306
Pekka Paalanen13829532008-05-12 21:20:58 +0200307 if (!ctx->active) {
308 pr_debug("kmmio: spurious debug trap on CPU %d.\n",
309 smp_processor_id());
Pekka Paalanenf5136382008-05-12 21:20:57 +0200310 goto out;
Pekka Paalanen13829532008-05-12 21:20:58 +0200311 }
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200312
313 if (ctx->probe && ctx->probe->post_handler)
314 ctx->probe->post_handler(ctx->probe, condition, regs);
315
Pekka Paalanen5359b582009-03-01 16:11:58 +0200316 arm_kmmio_fault_page(ctx->fpage);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200317
Ingo Molnar49023162008-05-12 21:20:58 +0200318 regs->flags &= ~X86_EFLAGS_TF;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200319 regs->flags |= ctx->saved_flags;
320
321 /* These were acquired in kmmio_handler(). */
322 ctx->active--;
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200323 BUG_ON(ctx->active);
Pekka Paalanend61fc442008-05-12 21:20:57 +0200324 rcu_read_unlock();
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200325 preempt_enable_no_resched();
326
327 /*
328 * if somebody else is singlestepping across a probe point, flags
329 * will have TF set, in which case, continue the remaining processing
330 * of do_debug, as if this is not a probe hit.
331 */
Ingo Molnar49023162008-05-12 21:20:58 +0200332 if (!(regs->flags & X86_EFLAGS_TF))
Pekka Paalanenf5136382008-05-12 21:20:57 +0200333 ret = 1;
Pekka Paalanenf5136382008-05-12 21:20:57 +0200334out:
335 put_cpu_var(kmmio_ctx);
336 return ret;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200337}
338
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200339/* You must be holding kmmio_lock. */
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200340static int add_kmmio_fault_page(unsigned long page)
341{
342 struct kmmio_fault_page *f;
343
344 page &= PAGE_MASK;
345 f = get_kmmio_fault_page(page);
346 if (f) {
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200347 if (!f->count)
Pekka Paalanen5359b582009-03-01 16:11:58 +0200348 arm_kmmio_fault_page(f);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200349 f->count++;
350 return 0;
351 }
352
Pekka Paalanen5359b582009-03-01 16:11:58 +0200353 f = kzalloc(sizeof(*f), GFP_ATOMIC);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200354 if (!f)
355 return -1;
356
357 f->count = 1;
358 f->page = page;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200359
Pekka Paalanen5359b582009-03-01 16:11:58 +0200360 if (arm_kmmio_fault_page(f)) {
Stuart Bennette9d54ca2009-01-30 17:38:59 +0000361 kfree(f);
362 return -1;
363 }
364
365 list_add_rcu(&f->list, kmmio_page_list(f->page));
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200366
367 return 0;
368}
369
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200370/* You must be holding kmmio_lock. */
371static void release_kmmio_fault_page(unsigned long page,
372 struct kmmio_fault_page **release_list)
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200373{
374 struct kmmio_fault_page *f;
375
376 page &= PAGE_MASK;
377 f = get_kmmio_fault_page(page);
378 if (!f)
379 return;
380
381 f->count--;
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200382 BUG_ON(f->count < 0);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200383 if (!f->count) {
Pekka Paalanen5359b582009-03-01 16:11:58 +0200384 disarm_kmmio_fault_page(f);
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200385 f->release_next = *release_list;
386 *release_list = f;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200387 }
388}
389
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200390/*
391 * With page-unaligned ioremaps, one or two armed pages may contain
392 * addresses from outside the intended mapping. Events for these addresses
393 * are currently silently dropped. The events may result only from programming
394 * mistakes by accessing addresses before the beginning or past the end of a
395 * mapping.
396 */
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200397int register_kmmio_probe(struct kmmio_probe *p)
398{
Pekka Paalanend61fc442008-05-12 21:20:57 +0200399 unsigned long flags;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200400 int ret = 0;
401 unsigned long size = 0;
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200402 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200403
Pekka Paalanend61fc442008-05-12 21:20:57 +0200404 spin_lock_irqsave(&kmmio_lock, flags);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200405 if (get_kmmio_probe(p->addr)) {
406 ret = -EEXIST;
407 goto out;
408 }
Pekka Paalanend61fc442008-05-12 21:20:57 +0200409 kmmio_count++;
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200410 list_add_rcu(&p->list, &kmmio_probes);
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200411 while (size < size_lim) {
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200412 if (add_kmmio_fault_page(p->addr + size))
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200413 pr_err("kmmio: Unable to set page fault.\n");
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200414 size += PAGE_SIZE;
415 }
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200416out:
Pekka Paalanend61fc442008-05-12 21:20:57 +0200417 spin_unlock_irqrestore(&kmmio_lock, flags);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200418 /*
419 * XXX: What should I do here?
420 * Here was a call to global_flush_tlb(), but it does not exist
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200421 * anymore. It seems it's not needed after all.
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200422 */
423 return ret;
424}
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200425EXPORT_SYMBOL(register_kmmio_probe);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200426
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200427static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200428{
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200429 struct kmmio_delayed_release *dr = container_of(
430 head,
431 struct kmmio_delayed_release,
432 rcu);
433 struct kmmio_fault_page *p = dr->release_list;
434 while (p) {
435 struct kmmio_fault_page *next = p->release_next;
436 BUG_ON(p->count);
437 kfree(p);
438 p = next;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200439 }
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200440 kfree(dr);
441}
442
443static void remove_kmmio_fault_pages(struct rcu_head *head)
444{
445 struct kmmio_delayed_release *dr = container_of(
446 head,
447 struct kmmio_delayed_release,
448 rcu);
449 struct kmmio_fault_page *p = dr->release_list;
450 struct kmmio_fault_page **prevp = &dr->release_list;
451 unsigned long flags;
452 spin_lock_irqsave(&kmmio_lock, flags);
453 while (p) {
454 if (!p->count)
455 list_del_rcu(&p->list);
456 else
457 *prevp = p->release_next;
458 prevp = &p->release_next;
459 p = p->release_next;
460 }
461 spin_unlock_irqrestore(&kmmio_lock, flags);
462 /* This is the real RCU destroy call. */
463 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200464}
465
466/*
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200467 * Remove a kmmio probe. You have to synchronize_rcu() before you can be
Pekka Paalanend61fc442008-05-12 21:20:57 +0200468 * sure that the callbacks will not be called anymore. Only after that
469 * you may actually release your struct kmmio_probe.
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200470 *
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200471 * Unregistering a kmmio fault page has three steps:
472 * 1. release_kmmio_fault_page()
473 * Disarm the page, wait a grace period to let all faults finish.
474 * 2. remove_kmmio_fault_pages()
475 * Remove the pages from kmmio_page_table.
476 * 3. rcu_free_kmmio_fault_pages()
477 * Actally free the kmmio_fault_page structs as with RCU.
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200478 */
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200479void unregister_kmmio_probe(struct kmmio_probe *p)
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200480{
Pekka Paalanend61fc442008-05-12 21:20:57 +0200481 unsigned long flags;
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200482 unsigned long size = 0;
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200483 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200484 struct kmmio_fault_page *release_list = NULL;
485 struct kmmio_delayed_release *drelease;
486
Pekka Paalanend61fc442008-05-12 21:20:57 +0200487 spin_lock_irqsave(&kmmio_lock, flags);
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200488 while (size < size_lim) {
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200489 release_kmmio_fault_page(p->addr + size, &release_list);
490 size += PAGE_SIZE;
491 }
492 list_del_rcu(&p->list);
493 kmmio_count--;
Pekka Paalanend61fc442008-05-12 21:20:57 +0200494 spin_unlock_irqrestore(&kmmio_lock, flags);
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200495
496 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
497 if (!drelease) {
498 pr_crit("kmmio: leaking kmmio_fault_page objects.\n");
499 return;
500 }
501 drelease->release_list = release_list;
502
503 /*
504 * This is not really RCU here. We have just disarmed a set of
505 * pages so that they cannot trigger page faults anymore. However,
506 * we cannot remove the pages from kmmio_page_table,
507 * because a probe hit might be in flight on another CPU. The
508 * pages are collected into a list, and they will be removed from
509 * kmmio_page_table when it is certain that no probe hit related to
510 * these pages can be in flight. RCU grace period sounds like a
511 * good choice.
512 *
513 * If we removed the pages too early, kmmio page fault handler might
514 * not find the respective kmmio_fault_page and determine it's not
515 * a kmmio fault, when it actually is. This would lead to madness.
516 */
517 call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200518}
Pekka Paalanen0fd0e3d2008-05-12 21:20:57 +0200519EXPORT_SYMBOL(unregister_kmmio_probe);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200520
521static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
522 void *args)
523{
524 struct die_args *arg = args;
525
Pekka Paalanen13829532008-05-12 21:20:58 +0200526 if (val == DIE_DEBUG && (arg->err & DR_STEP))
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200527 if (post_kmmio_handler(arg->err, arg->regs) == 1)
528 return NOTIFY_STOP;
529
530 return NOTIFY_DONE;
531}
Pekka Paalanen13829532008-05-12 21:20:58 +0200532
533static struct notifier_block nb_die = {
534 .notifier_call = kmmio_die_notifier
535};
536
537static int __init init_kmmio(void)
538{
539 int i;
540 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
541 INIT_LIST_HEAD(&kmmio_page_table[i]);
542 return register_die_notifier(&nb_die);
543}
544fs_initcall(init_kmmio); /* should be before device_initcall() */