Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 1 | /* Support for MMIO probes. |
| 2 | * Benfit many code from kprobes |
| 3 | * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>. |
| 4 | * 2007 Alexander Eichner |
| 5 | * 2008 Pekka Paalanen <pq@iki.fi> |
| 6 | */ |
| 7 | |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 8 | #include <linux/list.h> |
Ingo Molnar | 668a6c3 | 2008-05-19 13:35:24 +0200 | [diff] [blame] | 9 | #include <linux/rculist.h> |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 10 | #include <linux/spinlock.h> |
| 11 | #include <linux/hash.h> |
| 12 | #include <linux/init.h> |
| 13 | #include <linux/module.h> |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 14 | #include <linux/kernel.h> |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 15 | #include <linux/uaccess.h> |
| 16 | #include <linux/ptrace.h> |
| 17 | #include <linux/preempt.h> |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 18 | #include <linux/percpu.h> |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 19 | #include <linux/kdebug.h> |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 20 | #include <linux/mutex.h> |
Pekka Paalanen | 970e6fa | 2008-05-12 21:21:03 +0200 | [diff] [blame] | 21 | #include <linux/io.h> |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 22 | #include <asm/cacheflush.h> |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 23 | #include <asm/tlbflush.h> |
Pekka Paalanen | 970e6fa | 2008-05-12 21:21:03 +0200 | [diff] [blame] | 24 | #include <linux/errno.h> |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 25 | #include <asm/debugreg.h> |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 26 | #include <linux/mmiotrace.h> |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 27 | |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 28 | #define KMMIO_PAGE_HASH_BITS 4 |
| 29 | #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS) |
| 30 | |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 31 | struct kmmio_fault_page { |
| 32 | struct list_head list; |
| 33 | struct kmmio_fault_page *release_next; |
| 34 | unsigned long page; /* location of the fault page */ |
Pekka Paalanen | 5359b58 | 2009-03-01 16:11:58 +0200 | [diff] [blame] | 35 | bool old_presence; /* page presence prior to arming */ |
| 36 | bool armed; |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 37 | |
| 38 | /* |
| 39 | * Number of times this page has been registered as a part |
| 40 | * of a probe. If zero, page is disarmed and this may be freed. |
Pekka Paalanen | 340430c | 2009-02-24 21:44:15 +0200 | [diff] [blame] | 41 | * Used only by writers (RCU) and post_kmmio_handler(). |
| 42 | * Protected by kmmio_lock, when linked into kmmio_page_table. |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 43 | */ |
| 44 | int count; |
| 45 | }; |
| 46 | |
| 47 | struct kmmio_delayed_release { |
| 48 | struct rcu_head rcu; |
| 49 | struct kmmio_fault_page *release_list; |
| 50 | }; |
| 51 | |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 52 | struct kmmio_context { |
| 53 | struct kmmio_fault_page *fpage; |
| 54 | struct kmmio_probe *probe; |
| 55 | unsigned long saved_flags; |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 56 | unsigned long addr; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 57 | int active; |
| 58 | }; |
| 59 | |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 60 | static DEFINE_SPINLOCK(kmmio_lock); |
| 61 | |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 62 | /* Protected by kmmio_lock */ |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 63 | unsigned int kmmio_count; |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 64 | |
| 65 | /* Read-protected by RCU, write-protected by kmmio_lock. */ |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 66 | static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE]; |
| 67 | static LIST_HEAD(kmmio_probes); |
| 68 | |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 69 | static struct list_head *kmmio_page_list(unsigned long page) |
| 70 | { |
| 71 | return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)]; |
| 72 | } |
| 73 | |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 74 | /* Accessed per-cpu */ |
| 75 | static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 76 | |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 77 | /* |
| 78 | * this is basically a dynamic stabbing problem: |
| 79 | * Could use the existing prio tree code or |
| 80 | * Possible better implementations: |
| 81 | * The Interval Skip List: A Data Structure for Finding All Intervals That |
| 82 | * Overlap a Point (might be simple) |
| 83 | * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup |
| 84 | */ |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 85 | /* Get the kmmio at this addr (if any). You must be holding RCU read lock. */ |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 86 | static struct kmmio_probe *get_kmmio_probe(unsigned long addr) |
| 87 | { |
| 88 | struct kmmio_probe *p; |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 89 | list_for_each_entry_rcu(p, &kmmio_probes, list) { |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 90 | if (addr >= p->addr && addr <= (p->addr + p->len)) |
| 91 | return p; |
| 92 | } |
| 93 | return NULL; |
| 94 | } |
| 95 | |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 96 | /* You must be holding RCU read lock. */ |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 97 | static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page) |
| 98 | { |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 99 | struct list_head *head; |
| 100 | struct kmmio_fault_page *p; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 101 | |
| 102 | page &= PAGE_MASK; |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 103 | head = kmmio_page_list(page); |
| 104 | list_for_each_entry_rcu(p, head, list) { |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 105 | if (p->page == page) |
| 106 | return p; |
| 107 | } |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 108 | return NULL; |
| 109 | } |
| 110 | |
Pekka Paalanen | 0b700a6 | 2009-03-01 16:12:48 +0200 | [diff] [blame] | 111 | static void set_pmd_presence(pmd_t *pmd, bool present, bool *old) |
| 112 | { |
| 113 | pmdval_t v = pmd_val(*pmd); |
| 114 | *old = !!(v & _PAGE_PRESENT); |
| 115 | v &= ~_PAGE_PRESENT; |
| 116 | if (present) |
| 117 | v |= _PAGE_PRESENT; |
| 118 | set_pmd(pmd, __pmd(v)); |
| 119 | } |
| 120 | |
| 121 | static void set_pte_presence(pte_t *pte, bool present, bool *old) |
| 122 | { |
| 123 | pteval_t v = pte_val(*pte); |
| 124 | *old = !!(v & _PAGE_PRESENT); |
| 125 | v &= ~_PAGE_PRESENT; |
| 126 | if (present) |
| 127 | v |= _PAGE_PRESENT; |
| 128 | set_pte_atomic(pte, __pte(v)); |
| 129 | } |
| 130 | |
Pekka Paalanen | 5359b58 | 2009-03-01 16:11:58 +0200 | [diff] [blame] | 131 | static int set_page_presence(unsigned long addr, bool present, bool *old) |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 132 | { |
Pekka Paalanen | 790e2a2 | 2008-05-12 21:21:14 +0200 | [diff] [blame] | 133 | unsigned int level; |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 134 | pte_t *pte = lookup_address(addr, &level); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 135 | |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 136 | if (!pte) { |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 137 | pr_err("kmmio: no pte for page 0x%08lx\n", addr); |
Stuart Bennett | e9d54ca | 2009-01-30 17:38:59 +0000 | [diff] [blame] | 138 | return -1; |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 139 | } |
| 140 | |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 141 | switch (level) { |
| 142 | case PG_LEVEL_2M: |
Pekka Paalanen | 0b700a6 | 2009-03-01 16:12:48 +0200 | [diff] [blame] | 143 | set_pmd_presence((pmd_t *)pte, present, old); |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 144 | break; |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 145 | case PG_LEVEL_4K: |
Pekka Paalanen | 0b700a6 | 2009-03-01 16:12:48 +0200 | [diff] [blame] | 146 | set_pte_presence(pte, present, old); |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 147 | break; |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 148 | default: |
| 149 | pr_err("kmmio: unexpected page level 0x%x.\n", level); |
Stuart Bennett | e9d54ca | 2009-01-30 17:38:59 +0000 | [diff] [blame] | 150 | return -1; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 151 | } |
| 152 | |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 153 | __flush_tlb_one(addr); |
Stuart Bennett | e9d54ca | 2009-01-30 17:38:59 +0000 | [diff] [blame] | 154 | return 0; |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 155 | } |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 156 | |
Pekka Paalanen | 5359b58 | 2009-03-01 16:11:58 +0200 | [diff] [blame] | 157 | /* |
| 158 | * Mark the given page as not present. Access to it will trigger a fault. |
| 159 | * |
| 160 | * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the |
| 161 | * protection is ignored here. RCU read lock is assumed held, so the struct |
| 162 | * will not disappear unexpectedly. Furthermore, the caller must guarantee, |
| 163 | * that double arming the same virtual address (page) cannot occur. |
| 164 | * |
| 165 | * Double disarming on the other hand is allowed, and may occur when a fault |
| 166 | * and mmiotrace shutdown happen simultaneously. |
| 167 | */ |
| 168 | static int arm_kmmio_fault_page(struct kmmio_fault_page *f) |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 169 | { |
Pekka Paalanen | 5359b58 | 2009-03-01 16:11:58 +0200 | [diff] [blame] | 170 | int ret; |
| 171 | WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n"); |
| 172 | if (f->armed) { |
| 173 | pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n", |
| 174 | f->page, f->count, f->old_presence); |
| 175 | } |
| 176 | ret = set_page_presence(f->page, false, &f->old_presence); |
| 177 | WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page); |
| 178 | f->armed = true; |
Stuart Bennett | e9d54ca | 2009-01-30 17:38:59 +0000 | [diff] [blame] | 179 | return ret; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 180 | } |
| 181 | |
Pekka Paalanen | 5359b58 | 2009-03-01 16:11:58 +0200 | [diff] [blame] | 182 | /** Restore the given page to saved presence state. */ |
| 183 | static void disarm_kmmio_fault_page(struct kmmio_fault_page *f) |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 184 | { |
Pekka Paalanen | 5359b58 | 2009-03-01 16:11:58 +0200 | [diff] [blame] | 185 | bool tmp; |
| 186 | int ret = set_page_presence(f->page, f->old_presence, &tmp); |
| 187 | WARN_ONCE(ret < 0, |
| 188 | KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page); |
| 189 | f->armed = false; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 190 | } |
| 191 | |
| 192 | /* |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 193 | * This is being called from do_page_fault(). |
| 194 | * |
| 195 | * We may be in an interrupt or a critical section. Also prefecthing may |
| 196 | * trigger a page fault. We may be in the middle of process switch. |
| 197 | * We cannot take any locks, because we could be executing especially |
| 198 | * within a kmmio critical section. |
| 199 | * |
| 200 | * Local interrupts are disabled, so preemption cannot happen. |
| 201 | * Do not enable interrupts, do not sleep, and watch out for other CPUs. |
| 202 | */ |
| 203 | /* |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 204 | * Interrupts are disabled on entry as trap3 is an interrupt gate |
| 205 | * and they remain disabled thorough out this function. |
| 206 | */ |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 207 | int kmmio_handler(struct pt_regs *regs, unsigned long addr) |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 208 | { |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 209 | struct kmmio_context *ctx; |
| 210 | struct kmmio_fault_page *faultpage; |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 211 | int ret = 0; /* default to fault not handled */ |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 212 | |
| 213 | /* |
| 214 | * Preemption is now disabled to prevent process switch during |
| 215 | * single stepping. We can only handle one active kmmio trace |
| 216 | * per cpu, so ensure that we finish it before something else |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 217 | * gets to run. We also hold the RCU read lock over single |
| 218 | * stepping to avoid looking up the probe and kmmio_fault_page |
| 219 | * again. |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 220 | */ |
| 221 | preempt_disable(); |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 222 | rcu_read_lock(); |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 223 | |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 224 | faultpage = get_kmmio_fault_page(addr); |
| 225 | if (!faultpage) { |
| 226 | /* |
| 227 | * Either this page fault is not caused by kmmio, or |
| 228 | * another CPU just pulled the kmmio probe from under |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 229 | * our feet. The latter case should not be possible. |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 230 | */ |
| 231 | goto no_kmmio; |
| 232 | } |
| 233 | |
| 234 | ctx = &get_cpu_var(kmmio_ctx); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 235 | if (ctx->active) { |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 236 | if (addr == ctx->addr) { |
| 237 | /* |
Stuart Bennett | 3e39aa1 | 2009-02-05 11:02:02 +0000 | [diff] [blame] | 238 | * A second fault on the same page means some other |
| 239 | * condition needs handling by do_page_fault(), the |
| 240 | * page really not being present is the most common. |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 241 | */ |
Stuart Bennett | 3e39aa1 | 2009-02-05 11:02:02 +0000 | [diff] [blame] | 242 | pr_debug("kmmio: secondary hit for 0x%08lx CPU %d.\n", |
| 243 | addr, smp_processor_id()); |
| 244 | |
| 245 | if (!faultpage->old_presence) |
| 246 | pr_info("kmmio: unexpected secondary hit for " |
| 247 | "address 0x%08lx on CPU %d.\n", addr, |
| 248 | smp_processor_id()); |
| 249 | } else { |
| 250 | /* |
| 251 | * Prevent overwriting already in-flight context. |
| 252 | * This should not happen, let's hope disarming at |
| 253 | * least prevents a panic. |
| 254 | */ |
| 255 | pr_emerg("kmmio: recursive probe hit on CPU %d, " |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 256 | "for address 0x%08lx. Ignoring.\n", |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 257 | smp_processor_id(), addr); |
Stuart Bennett | 3e39aa1 | 2009-02-05 11:02:02 +0000 | [diff] [blame] | 258 | pr_emerg("kmmio: previous hit was at 0x%08lx.\n", |
| 259 | ctx->addr); |
| 260 | disarm_kmmio_fault_page(faultpage); |
| 261 | } |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 262 | goto no_kmmio_ctx; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 263 | } |
| 264 | ctx->active++; |
| 265 | |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 266 | ctx->fpage = faultpage; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 267 | ctx->probe = get_kmmio_probe(addr); |
Ingo Molnar | 4902316 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 268 | ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 269 | ctx->addr = addr; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 270 | |
| 271 | if (ctx->probe && ctx->probe->pre_handler) |
| 272 | ctx->probe->pre_handler(ctx->probe, regs, addr); |
| 273 | |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 274 | /* |
| 275 | * Enable single-stepping and disable interrupts for the faulting |
| 276 | * context. Local interrupts must not get enabled during stepping. |
| 277 | */ |
Ingo Molnar | 4902316 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 278 | regs->flags |= X86_EFLAGS_TF; |
| 279 | regs->flags &= ~X86_EFLAGS_IF; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 280 | |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 281 | /* Now we set present bit in PTE and single step. */ |
Pekka Paalanen | 5359b58 | 2009-03-01 16:11:58 +0200 | [diff] [blame] | 282 | disarm_kmmio_fault_page(ctx->fpage); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 283 | |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 284 | /* |
| 285 | * If another cpu accesses the same page while we are stepping, |
| 286 | * the access will not be caught. It will simply succeed and the |
| 287 | * only downside is we lose the event. If this becomes a problem, |
| 288 | * the user should drop to single cpu before tracing. |
| 289 | */ |
| 290 | |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 291 | put_cpu_var(kmmio_ctx); |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 292 | return 1; /* fault handled */ |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 293 | |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 294 | no_kmmio_ctx: |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 295 | put_cpu_var(kmmio_ctx); |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 296 | no_kmmio: |
| 297 | rcu_read_unlock(); |
| 298 | preempt_enable_no_resched(); |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 299 | return ret; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 300 | } |
| 301 | |
| 302 | /* |
| 303 | * Interrupts are disabled on entry as trap1 is an interrupt gate |
| 304 | * and they remain disabled thorough out this function. |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 305 | * This must always get called as the pair to kmmio_handler(). |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 306 | */ |
| 307 | static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) |
| 308 | { |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 309 | int ret = 0; |
| 310 | struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 311 | |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 312 | if (!ctx->active) { |
Stuart Bennett | 3e39aa1 | 2009-02-05 11:02:02 +0000 | [diff] [blame] | 313 | pr_warning("kmmio: spurious debug trap on CPU %d.\n", |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 314 | smp_processor_id()); |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 315 | goto out; |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 316 | } |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 317 | |
| 318 | if (ctx->probe && ctx->probe->post_handler) |
| 319 | ctx->probe->post_handler(ctx->probe, condition, regs); |
| 320 | |
Pekka Paalanen | 340430c | 2009-02-24 21:44:15 +0200 | [diff] [blame] | 321 | /* Prevent racing against release_kmmio_fault_page(). */ |
| 322 | spin_lock(&kmmio_lock); |
| 323 | if (ctx->fpage->count) |
| 324 | arm_kmmio_fault_page(ctx->fpage); |
| 325 | spin_unlock(&kmmio_lock); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 326 | |
Ingo Molnar | 4902316 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 327 | regs->flags &= ~X86_EFLAGS_TF; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 328 | regs->flags |= ctx->saved_flags; |
| 329 | |
| 330 | /* These were acquired in kmmio_handler(). */ |
| 331 | ctx->active--; |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 332 | BUG_ON(ctx->active); |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 333 | rcu_read_unlock(); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 334 | preempt_enable_no_resched(); |
| 335 | |
| 336 | /* |
| 337 | * if somebody else is singlestepping across a probe point, flags |
| 338 | * will have TF set, in which case, continue the remaining processing |
| 339 | * of do_debug, as if this is not a probe hit. |
| 340 | */ |
Ingo Molnar | 4902316 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 341 | if (!(regs->flags & X86_EFLAGS_TF)) |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 342 | ret = 1; |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 343 | out: |
| 344 | put_cpu_var(kmmio_ctx); |
| 345 | return ret; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 346 | } |
| 347 | |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 348 | /* You must be holding kmmio_lock. */ |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 349 | static int add_kmmio_fault_page(unsigned long page) |
| 350 | { |
| 351 | struct kmmio_fault_page *f; |
| 352 | |
| 353 | page &= PAGE_MASK; |
| 354 | f = get_kmmio_fault_page(page); |
| 355 | if (f) { |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 356 | if (!f->count) |
Pekka Paalanen | 5359b58 | 2009-03-01 16:11:58 +0200 | [diff] [blame] | 357 | arm_kmmio_fault_page(f); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 358 | f->count++; |
| 359 | return 0; |
| 360 | } |
| 361 | |
Pekka Paalanen | 5359b58 | 2009-03-01 16:11:58 +0200 | [diff] [blame] | 362 | f = kzalloc(sizeof(*f), GFP_ATOMIC); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 363 | if (!f) |
| 364 | return -1; |
| 365 | |
| 366 | f->count = 1; |
| 367 | f->page = page; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 368 | |
Pekka Paalanen | 5359b58 | 2009-03-01 16:11:58 +0200 | [diff] [blame] | 369 | if (arm_kmmio_fault_page(f)) { |
Stuart Bennett | e9d54ca | 2009-01-30 17:38:59 +0000 | [diff] [blame] | 370 | kfree(f); |
| 371 | return -1; |
| 372 | } |
| 373 | |
| 374 | list_add_rcu(&f->list, kmmio_page_list(f->page)); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 375 | |
| 376 | return 0; |
| 377 | } |
| 378 | |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 379 | /* You must be holding kmmio_lock. */ |
| 380 | static void release_kmmio_fault_page(unsigned long page, |
| 381 | struct kmmio_fault_page **release_list) |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 382 | { |
| 383 | struct kmmio_fault_page *f; |
| 384 | |
| 385 | page &= PAGE_MASK; |
| 386 | f = get_kmmio_fault_page(page); |
| 387 | if (!f) |
| 388 | return; |
| 389 | |
| 390 | f->count--; |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 391 | BUG_ON(f->count < 0); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 392 | if (!f->count) { |
Pekka Paalanen | 5359b58 | 2009-03-01 16:11:58 +0200 | [diff] [blame] | 393 | disarm_kmmio_fault_page(f); |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 394 | f->release_next = *release_list; |
| 395 | *release_list = f; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 396 | } |
| 397 | } |
| 398 | |
Pekka Paalanen | 87e547f | 2008-05-12 21:21:03 +0200 | [diff] [blame] | 399 | /* |
| 400 | * With page-unaligned ioremaps, one or two armed pages may contain |
| 401 | * addresses from outside the intended mapping. Events for these addresses |
| 402 | * are currently silently dropped. The events may result only from programming |
| 403 | * mistakes by accessing addresses before the beginning or past the end of a |
| 404 | * mapping. |
| 405 | */ |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 406 | int register_kmmio_probe(struct kmmio_probe *p) |
| 407 | { |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 408 | unsigned long flags; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 409 | int ret = 0; |
| 410 | unsigned long size = 0; |
Pekka Paalanen | 87e547f | 2008-05-12 21:21:03 +0200 | [diff] [blame] | 411 | const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 412 | |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 413 | spin_lock_irqsave(&kmmio_lock, flags); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 414 | if (get_kmmio_probe(p->addr)) { |
| 415 | ret = -EEXIST; |
| 416 | goto out; |
| 417 | } |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 418 | kmmio_count++; |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 419 | list_add_rcu(&p->list, &kmmio_probes); |
Pekka Paalanen | 87e547f | 2008-05-12 21:21:03 +0200 | [diff] [blame] | 420 | while (size < size_lim) { |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 421 | if (add_kmmio_fault_page(p->addr + size)) |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 422 | pr_err("kmmio: Unable to set page fault.\n"); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 423 | size += PAGE_SIZE; |
| 424 | } |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 425 | out: |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 426 | spin_unlock_irqrestore(&kmmio_lock, flags); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 427 | /* |
| 428 | * XXX: What should I do here? |
| 429 | * Here was a call to global_flush_tlb(), but it does not exist |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 430 | * anymore. It seems it's not needed after all. |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 431 | */ |
| 432 | return ret; |
| 433 | } |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 434 | EXPORT_SYMBOL(register_kmmio_probe); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 435 | |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 436 | static void rcu_free_kmmio_fault_pages(struct rcu_head *head) |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 437 | { |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 438 | struct kmmio_delayed_release *dr = container_of( |
| 439 | head, |
| 440 | struct kmmio_delayed_release, |
| 441 | rcu); |
| 442 | struct kmmio_fault_page *p = dr->release_list; |
| 443 | while (p) { |
| 444 | struct kmmio_fault_page *next = p->release_next; |
| 445 | BUG_ON(p->count); |
| 446 | kfree(p); |
| 447 | p = next; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 448 | } |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 449 | kfree(dr); |
| 450 | } |
| 451 | |
| 452 | static void remove_kmmio_fault_pages(struct rcu_head *head) |
| 453 | { |
Stuart Bennett | d0fc63f | 2009-03-08 20:21:35 +0200 | [diff] [blame] | 454 | struct kmmio_delayed_release *dr = |
| 455 | container_of(head, struct kmmio_delayed_release, rcu); |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 456 | struct kmmio_fault_page *p = dr->release_list; |
| 457 | struct kmmio_fault_page **prevp = &dr->release_list; |
| 458 | unsigned long flags; |
Stuart Bennett | d0fc63f | 2009-03-08 20:21:35 +0200 | [diff] [blame] | 459 | |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 460 | spin_lock_irqsave(&kmmio_lock, flags); |
| 461 | while (p) { |
Stuart Bennett | d0fc63f | 2009-03-08 20:21:35 +0200 | [diff] [blame] | 462 | if (!p->count) { |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 463 | list_del_rcu(&p->list); |
Stuart Bennett | d0fc63f | 2009-03-08 20:21:35 +0200 | [diff] [blame] | 464 | prevp = &p->release_next; |
| 465 | } else { |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 466 | *prevp = p->release_next; |
Stuart Bennett | d0fc63f | 2009-03-08 20:21:35 +0200 | [diff] [blame] | 467 | } |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 468 | p = p->release_next; |
| 469 | } |
| 470 | spin_unlock_irqrestore(&kmmio_lock, flags); |
Stuart Bennett | d0fc63f | 2009-03-08 20:21:35 +0200 | [diff] [blame] | 471 | |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 472 | /* This is the real RCU destroy call. */ |
| 473 | call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 474 | } |
| 475 | |
| 476 | /* |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 477 | * Remove a kmmio probe. You have to synchronize_rcu() before you can be |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 478 | * sure that the callbacks will not be called anymore. Only after that |
| 479 | * you may actually release your struct kmmio_probe. |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 480 | * |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 481 | * Unregistering a kmmio fault page has three steps: |
| 482 | * 1. release_kmmio_fault_page() |
| 483 | * Disarm the page, wait a grace period to let all faults finish. |
| 484 | * 2. remove_kmmio_fault_pages() |
| 485 | * Remove the pages from kmmio_page_table. |
| 486 | * 3. rcu_free_kmmio_fault_pages() |
| 487 | * Actally free the kmmio_fault_page structs as with RCU. |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 488 | */ |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 489 | void unregister_kmmio_probe(struct kmmio_probe *p) |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 490 | { |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 491 | unsigned long flags; |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 492 | unsigned long size = 0; |
Pekka Paalanen | 87e547f | 2008-05-12 21:21:03 +0200 | [diff] [blame] | 493 | const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 494 | struct kmmio_fault_page *release_list = NULL; |
| 495 | struct kmmio_delayed_release *drelease; |
| 496 | |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 497 | spin_lock_irqsave(&kmmio_lock, flags); |
Pekka Paalanen | 87e547f | 2008-05-12 21:21:03 +0200 | [diff] [blame] | 498 | while (size < size_lim) { |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 499 | release_kmmio_fault_page(p->addr + size, &release_list); |
| 500 | size += PAGE_SIZE; |
| 501 | } |
| 502 | list_del_rcu(&p->list); |
| 503 | kmmio_count--; |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 504 | spin_unlock_irqrestore(&kmmio_lock, flags); |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 505 | |
| 506 | drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); |
| 507 | if (!drelease) { |
| 508 | pr_crit("kmmio: leaking kmmio_fault_page objects.\n"); |
| 509 | return; |
| 510 | } |
| 511 | drelease->release_list = release_list; |
| 512 | |
| 513 | /* |
| 514 | * This is not really RCU here. We have just disarmed a set of |
| 515 | * pages so that they cannot trigger page faults anymore. However, |
| 516 | * we cannot remove the pages from kmmio_page_table, |
| 517 | * because a probe hit might be in flight on another CPU. The |
| 518 | * pages are collected into a list, and they will be removed from |
| 519 | * kmmio_page_table when it is certain that no probe hit related to |
| 520 | * these pages can be in flight. RCU grace period sounds like a |
| 521 | * good choice. |
| 522 | * |
| 523 | * If we removed the pages too early, kmmio page fault handler might |
| 524 | * not find the respective kmmio_fault_page and determine it's not |
| 525 | * a kmmio fault, when it actually is. This would lead to madness. |
| 526 | */ |
| 527 | call_rcu(&drelease->rcu, remove_kmmio_fault_pages); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 528 | } |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 529 | EXPORT_SYMBOL(unregister_kmmio_probe); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 530 | |
| 531 | static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val, |
| 532 | void *args) |
| 533 | { |
| 534 | struct die_args *arg = args; |
| 535 | |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 536 | if (val == DIE_DEBUG && (arg->err & DR_STEP)) |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 537 | if (post_kmmio_handler(arg->err, arg->regs) == 1) |
| 538 | return NOTIFY_STOP; |
| 539 | |
| 540 | return NOTIFY_DONE; |
| 541 | } |
Pekka Paalanen | 1382953 | 2008-05-12 21:20:58 +0200 | [diff] [blame] | 542 | |
| 543 | static struct notifier_block nb_die = { |
| 544 | .notifier_call = kmmio_die_notifier |
| 545 | }; |
| 546 | |
| 547 | static int __init init_kmmio(void) |
| 548 | { |
| 549 | int i; |
| 550 | for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) |
| 551 | INIT_LIST_HEAD(&kmmio_page_table[i]); |
| 552 | return register_die_notifier(&nb_die); |
| 553 | } |
| 554 | fs_initcall(init_kmmio); /* should be before device_initcall() */ |