Mike Dodd | 8cfa702 | 2010-11-17 11:12:26 -0800 | [diff] [blame] | 1 | /** |
| 2 | * @file compat22.h |
| 3 | * Compatability functions for 2.2 kernels |
| 4 | * |
| 5 | * @remark Copyright 2002 OProfile authors |
| 6 | * @remark Read the file COPYING |
| 7 | * |
| 8 | * @author John Levon |
| 9 | * @author Philippe Elie |
| 10 | */ |
| 11 | |
| 12 | #ifndef COMPAT22_H |
| 13 | #define COMPAT22_H |
| 14 | |
| 15 | #include <linux/smp_lock.h> |
| 16 | |
| 17 | #define local_irq_disable() __cli() |
| 18 | #define local_irq_enable() __sti() |
| 19 | #define pte_page_address(a) pte_page(a) |
| 20 | |
| 21 | #define GET_VM_OFFSET(v) ((v)->vm_offset) |
| 22 | #define MODULE_LICENSE(l) |
| 23 | #define NEED_2_2_DENTRIES |
| 24 | #define INC_USE_COUNT_MAYBE MOD_INC_USE_COUNT |
| 25 | #define DEC_USE_COUNT_MAYBE MOD_DEC_USE_COUNT |
| 26 | #define lock_execve lock_kernel |
| 27 | #define unlock_execve unlock_kernel |
| 28 | |
| 29 | /* BKL-protected on 2.2 */ |
| 30 | #define lock_mmap(mm) do {} while (0) |
| 31 | #define unlock_mmap(mm) do {} while (0) |
| 32 | |
| 33 | /* on 2.2 we use pid as tgid, thread seperation is possible but |
| 34 | * each thread is in its own thread group */ |
| 35 | static inline pid_t op_get_tgid(void) |
| 36 | { |
| 37 | return current->pid; |
| 38 | } |
| 39 | |
| 40 | /* the wake_up path doesn't disable interrupts for wait queue |
| 41 | * manipulation. So let's force it to. |
| 42 | */ |
| 43 | static inline void oprof_wake_up(struct wait_queue **q) |
| 44 | { |
| 45 | unsigned long flags; |
| 46 | save_flags(flags); |
| 47 | cli(); |
| 48 | wake_up(q); |
| 49 | restore_flags(flags); |
| 50 | } |
| 51 | |
| 52 | extern int wind_dentries_2_2(struct dentry * dentry); |
| 53 | extern uint do_path_hash_2_2(struct dentry * dentry); |
| 54 | #define wind_dentries(d, v, r, m) wind_dentries_2_2(d) |
| 55 | #define hash_path(f) do_path_hash_2_2((f)->f_dentry) |
| 56 | |
| 57 | static inline void lock_out_mmap(void) |
| 58 | { |
| 59 | lock_kernel(); |
| 60 | down(¤t->mm->mmap_sem); |
| 61 | } |
| 62 | |
| 63 | static inline void unlock_out_mmap(void) |
| 64 | { |
| 65 | unlock_kernel(); |
| 66 | up(¤t->mm->mmap_sem); |
| 67 | } |
| 68 | |
| 69 | /* different request_region */ |
| 70 | #define request_region_check compat_request_region |
| 71 | void * compat_request_region (unsigned long start, unsigned long n, char const * name); |
| 72 | |
| 73 | #define __exit |
| 74 | |
| 75 | #define virt_to_page(va) MAP_NR(va) |
| 76 | |
| 77 | /* 2.2 has no cpu_number_map on UP */ |
| 78 | #ifdef CONFIG_SMP |
| 79 | #define op_cpu_id() cpu_number_map[smp_processor_id()] |
| 80 | #else |
| 81 | #define op_cpu_id() smp_processor_id() |
| 82 | #endif /* CONFIG_SMP */ |
| 83 | |
| 84 | /* provide a working smp_call_function */ |
| 85 | #if !defined(CONFIG_SMP) |
| 86 | |
| 87 | #undef smp_call_function |
| 88 | static int inline smp_call_function (void (*func) (void * info), void * info, |
| 89 | int retry, int wait) |
| 90 | { |
| 91 | return 0; |
| 92 | } |
| 93 | |
| 94 | #endif /* !CONFIG_SMP */ |
| 95 | |
| 96 | #if V_BEFORE(2, 2, 18) |
| 97 | |
| 98 | /* 2.2.18 introduced module_init */ |
| 99 | /* Not sure what version aliases were introduced in, but certainly in 2.91.66. */ |
| 100 | #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 91) |
| 101 | #define module_init(x) int init_module(void) __attribute__((alias(#x))); |
| 102 | #define module_exit(x) void cleanup_module(void) __attribute__((alias(#x))); |
| 103 | #else |
| 104 | #define module_init(x) int init_module(void) { return x(); } |
| 105 | #define module_exit(x) void cleanup_module(void) { x(); } |
| 106 | #endif |
| 107 | |
| 108 | /* 2.2.18 introduced vmalloc_32 */ |
| 109 | #define vmalloc_32 vmalloc |
| 110 | |
| 111 | /* 2.2.18 add doubled linked list wait_queue and mutex */ |
| 112 | #define DECLARE_WAIT_QUEUE_HEAD(q) struct wait_queue * q = NULL |
| 113 | #define DECLARE_MUTEX(foo) struct semaphore foo = MUTEX |
| 114 | |
| 115 | /* 2.2.18 add THIS_MODULE */ |
| 116 | #define THIS_MODULE (&__this_module) |
| 117 | |
| 118 | /* 2.2.18 add BUG() FIXME: this is arch dependant would must use |
| 119 | * *(char *)0 = 0 instead ? */ |
| 120 | #define BUG() __asm__ __volatile__("ud2\n"); |
| 121 | |
| 122 | #endif /* V_BEFORE(2,2,18) */ |
| 123 | |
| 124 | /* 2.2.18 introduced the rtc lock */ |
| 125 | #ifdef RTC_LOCK |
| 126 | #define lock_rtc(f) spin_lock_irqsave(&rtc_lock, f) |
| 127 | #define unlock_rtc(f) spin_unlock_irqrestore(&rtc_lock, f) |
| 128 | #else |
| 129 | #define lock_rtc(f) do { save_flags(f); cli(); } while (0) |
| 130 | #define unlock_rtc(f) restore_flags(f) |
| 131 | #endif /* RTC_LOCK */ |
| 132 | |
| 133 | #if V_AT_LEAST(2, 2, 20) |
| 134 | #define PTRACE_OFF(t) ((t)->ptrace &= ~PT_DTRACE) |
| 135 | #else |
| 136 | #define PTRACE_OFF(t) ((t)->flags &= ~PF_DTRACE) |
| 137 | #endif |
| 138 | |
| 139 | /* 2.2.21 introduced cpuid_edx */ |
| 140 | #if V_BEFORE(2, 2, 21) |
| 141 | static inline unsigned int cpuid_edx(unsigned int op) |
| 142 | { |
| 143 | unsigned int eax, edx; |
| 144 | |
| 145 | __asm__("cpuid" |
| 146 | : "=a" (eax), "=d" (edx) |
| 147 | : "0" (op) |
| 148 | : "bx", "cx"); |
| 149 | return edx; |
| 150 | } |
| 151 | #endif |
| 152 | |
| 153 | #endif /* COMPAT22_H */ |