Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * linux/arch/m68k/mm/sun3dvma.c |
| 3 | * |
| 4 | * Copyright (C) 2000 Sam Creasey |
| 5 | * |
| 6 | * Contains common routines for sun3/sun3x DVMA management. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/config.h> |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/list.h> |
| 13 | |
| 14 | #include <asm/page.h> |
| 15 | #include <asm/pgtable.h> |
| 16 | #include <asm/dvma.h> |
| 17 | |
| 18 | #undef DVMA_DEBUG |
| 19 | |
| 20 | #ifdef CONFIG_SUN3X |
| 21 | extern void dvma_unmap_iommu(unsigned long baddr, int len); |
| 22 | #else |
| 23 | static inline void dvma_unmap_iommu(unsigned long a, int b) |
| 24 | { |
| 25 | } |
| 26 | #endif |
| 27 | |
| 28 | #ifdef CONFIG_SUN3 |
| 29 | extern void sun3_dvma_init(void); |
| 30 | #endif |
| 31 | |
| 32 | unsigned long iommu_use[IOMMU_TOTAL_ENTRIES]; |
| 33 | |
| 34 | #define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT) |
| 35 | |
| 36 | #define dvma_entry_use(baddr) (iommu_use[dvma_index(baddr)]) |
| 37 | |
| 38 | struct hole { |
| 39 | unsigned long start; |
| 40 | unsigned long end; |
| 41 | unsigned long size; |
| 42 | struct list_head list; |
| 43 | }; |
| 44 | |
| 45 | static struct list_head hole_list; |
| 46 | static struct list_head hole_cache; |
| 47 | static struct hole initholes[64]; |
| 48 | |
| 49 | #ifdef DVMA_DEBUG |
| 50 | |
| 51 | static unsigned long dvma_allocs; |
| 52 | static unsigned long dvma_frees; |
| 53 | static unsigned long long dvma_alloc_bytes; |
| 54 | static unsigned long long dvma_free_bytes; |
| 55 | |
| 56 | static void print_use(void) |
| 57 | { |
| 58 | |
| 59 | int i; |
| 60 | int j = 0; |
| 61 | |
| 62 | printk("dvma entry usage:\n"); |
| 63 | |
| 64 | for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) { |
| 65 | if(!iommu_use[i]) |
| 66 | continue; |
| 67 | |
| 68 | j++; |
| 69 | |
| 70 | printk("dvma entry: %08lx len %08lx\n", |
| 71 | ( i << DVMA_PAGE_SHIFT) + DVMA_START, |
| 72 | iommu_use[i]); |
| 73 | } |
| 74 | |
| 75 | printk("%d entries in use total\n", j); |
| 76 | |
| 77 | printk("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees); |
| 78 | printk("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes, |
| 79 | dvma_free_bytes); |
| 80 | } |
| 81 | |
| 82 | static void print_holes(struct list_head *holes) |
| 83 | { |
| 84 | |
| 85 | struct list_head *cur; |
| 86 | struct hole *hole; |
| 87 | |
| 88 | printk("listing dvma holes\n"); |
| 89 | list_for_each(cur, holes) { |
| 90 | hole = list_entry(cur, struct hole, list); |
| 91 | |
| 92 | if((hole->start == 0) && (hole->end == 0) && (hole->size == 0)) |
| 93 | continue; |
| 94 | |
| 95 | printk("hole: start %08lx end %08lx size %08lx\n", hole->start, hole->end, hole->size); |
| 96 | } |
| 97 | |
| 98 | printk("end of hole listing...\n"); |
| 99 | |
| 100 | } |
| 101 | #endif /* DVMA_DEBUG */ |
| 102 | |
| 103 | static inline int refill(void) |
| 104 | { |
| 105 | |
| 106 | struct hole *hole; |
| 107 | struct hole *prev = NULL; |
| 108 | struct list_head *cur; |
| 109 | int ret = 0; |
| 110 | |
| 111 | list_for_each(cur, &hole_list) { |
| 112 | hole = list_entry(cur, struct hole, list); |
| 113 | |
| 114 | if(!prev) { |
| 115 | prev = hole; |
| 116 | continue; |
| 117 | } |
| 118 | |
| 119 | if(hole->end == prev->start) { |
| 120 | hole->size += prev->size; |
| 121 | hole->end = prev->end; |
| 122 | list_del(&(prev->list)); |
| 123 | list_add(&(prev->list), &hole_cache); |
| 124 | ret++; |
| 125 | } |
| 126 | |
| 127 | } |
| 128 | |
| 129 | return ret; |
| 130 | } |
| 131 | |
| 132 | static inline struct hole *rmcache(void) |
| 133 | { |
| 134 | struct hole *ret; |
| 135 | |
| 136 | if(list_empty(&hole_cache)) { |
| 137 | if(!refill()) { |
| 138 | printk("out of dvma hole cache!\n"); |
| 139 | BUG(); |
| 140 | } |
| 141 | } |
| 142 | |
| 143 | ret = list_entry(hole_cache.next, struct hole, list); |
| 144 | list_del(&(ret->list)); |
| 145 | |
| 146 | return ret; |
| 147 | |
| 148 | } |
| 149 | |
| 150 | static inline unsigned long get_baddr(int len, unsigned long align) |
| 151 | { |
| 152 | |
| 153 | struct list_head *cur; |
| 154 | struct hole *hole; |
| 155 | |
| 156 | if(list_empty(&hole_list)) { |
| 157 | #ifdef DVMA_DEBUG |
| 158 | printk("out of dvma holes! (printing hole cache)\n"); |
| 159 | print_holes(&hole_cache); |
| 160 | print_use(); |
| 161 | #endif |
| 162 | BUG(); |
| 163 | } |
| 164 | |
| 165 | list_for_each(cur, &hole_list) { |
| 166 | unsigned long newlen; |
| 167 | |
| 168 | hole = list_entry(cur, struct hole, list); |
| 169 | |
| 170 | if(align > DVMA_PAGE_SIZE) |
| 171 | newlen = len + ((hole->end - len) & (align-1)); |
| 172 | else |
| 173 | newlen = len; |
| 174 | |
| 175 | if(hole->size > newlen) { |
| 176 | hole->end -= newlen; |
| 177 | hole->size -= newlen; |
| 178 | dvma_entry_use(hole->end) = newlen; |
| 179 | #ifdef DVMA_DEBUG |
| 180 | dvma_allocs++; |
| 181 | dvma_alloc_bytes += newlen; |
| 182 | #endif |
| 183 | return hole->end; |
| 184 | } else if(hole->size == newlen) { |
| 185 | list_del(&(hole->list)); |
| 186 | list_add(&(hole->list), &hole_cache); |
| 187 | dvma_entry_use(hole->start) = newlen; |
| 188 | #ifdef DVMA_DEBUG |
| 189 | dvma_allocs++; |
| 190 | dvma_alloc_bytes += newlen; |
| 191 | #endif |
| 192 | return hole->start; |
| 193 | } |
| 194 | |
| 195 | } |
| 196 | |
| 197 | printk("unable to find dvma hole!\n"); |
| 198 | BUG(); |
| 199 | return 0; |
| 200 | } |
| 201 | |
| 202 | static inline int free_baddr(unsigned long baddr) |
| 203 | { |
| 204 | |
| 205 | unsigned long len; |
| 206 | struct hole *hole; |
| 207 | struct list_head *cur; |
| 208 | unsigned long orig_baddr; |
| 209 | |
| 210 | orig_baddr = baddr; |
| 211 | len = dvma_entry_use(baddr); |
| 212 | dvma_entry_use(baddr) = 0; |
| 213 | baddr &= DVMA_PAGE_MASK; |
| 214 | dvma_unmap_iommu(baddr, len); |
| 215 | |
| 216 | #ifdef DVMA_DEBUG |
| 217 | dvma_frees++; |
| 218 | dvma_free_bytes += len; |
| 219 | #endif |
| 220 | |
| 221 | list_for_each(cur, &hole_list) { |
| 222 | hole = list_entry(cur, struct hole, list); |
| 223 | |
| 224 | if(hole->end == baddr) { |
| 225 | hole->end += len; |
| 226 | hole->size += len; |
| 227 | return 0; |
| 228 | } else if(hole->start == (baddr + len)) { |
| 229 | hole->start = baddr; |
| 230 | hole->size += len; |
| 231 | return 0; |
| 232 | } |
| 233 | |
| 234 | } |
| 235 | |
| 236 | hole = rmcache(); |
| 237 | |
| 238 | hole->start = baddr; |
| 239 | hole->end = baddr + len; |
| 240 | hole->size = len; |
| 241 | |
| 242 | // list_add_tail(&(hole->list), cur); |
| 243 | list_add(&(hole->list), cur); |
| 244 | |
| 245 | return 0; |
| 246 | |
| 247 | } |
| 248 | |
| 249 | void dvma_init(void) |
| 250 | { |
| 251 | |
| 252 | struct hole *hole; |
| 253 | int i; |
| 254 | |
| 255 | INIT_LIST_HEAD(&hole_list); |
| 256 | INIT_LIST_HEAD(&hole_cache); |
| 257 | |
| 258 | /* prepare the hole cache */ |
| 259 | for(i = 0; i < 64; i++) |
| 260 | list_add(&(initholes[i].list), &hole_cache); |
| 261 | |
| 262 | hole = rmcache(); |
| 263 | hole->start = DVMA_START; |
| 264 | hole->end = DVMA_END; |
| 265 | hole->size = DVMA_SIZE; |
| 266 | |
| 267 | list_add(&(hole->list), &hole_list); |
| 268 | |
| 269 | memset(iommu_use, 0, sizeof(iommu_use)); |
| 270 | |
| 271 | dvma_unmap_iommu(DVMA_START, DVMA_SIZE); |
| 272 | |
| 273 | #ifdef CONFIG_SUN3 |
| 274 | sun3_dvma_init(); |
| 275 | #endif |
| 276 | |
| 277 | } |
| 278 | |
| 279 | inline unsigned long dvma_map_align(unsigned long kaddr, int len, int align) |
| 280 | { |
| 281 | |
| 282 | unsigned long baddr; |
| 283 | unsigned long off; |
| 284 | |
| 285 | if(!len) |
| 286 | len = 0x800; |
| 287 | |
| 288 | if(!kaddr || !len) { |
| 289 | // printk("error: kaddr %lx len %x\n", kaddr, len); |
| 290 | // *(int *)4 = 0; |
| 291 | return 0; |
| 292 | } |
| 293 | |
| 294 | #ifdef DEBUG |
| 295 | printk("dvma_map request %08lx bytes from %08lx\n", |
| 296 | len, kaddr); |
| 297 | #endif |
| 298 | off = kaddr & ~DVMA_PAGE_MASK; |
| 299 | kaddr &= PAGE_MASK; |
| 300 | len += off; |
| 301 | len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); |
| 302 | |
| 303 | if(align == 0) |
| 304 | align = DVMA_PAGE_SIZE; |
| 305 | else |
| 306 | align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); |
| 307 | |
| 308 | baddr = get_baddr(len, align); |
| 309 | // printk("using baddr %lx\n", baddr); |
| 310 | |
| 311 | if(!dvma_map_iommu(kaddr, baddr, len)) |
| 312 | return (baddr + off); |
| 313 | |
| 314 | printk("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr, len); |
| 315 | BUG(); |
| 316 | return 0; |
| 317 | } |
| 318 | |
| 319 | void dvma_unmap(void *baddr) |
| 320 | { |
| 321 | unsigned long addr; |
| 322 | |
| 323 | addr = (unsigned long)baddr; |
| 324 | /* check if this is a vme mapping */ |
| 325 | if(!(addr & 0x00f00000)) |
| 326 | addr |= 0xf00000; |
| 327 | |
| 328 | free_baddr(addr); |
| 329 | |
| 330 | return; |
| 331 | |
| 332 | } |
| 333 | |
| 334 | |
| 335 | void *dvma_malloc_align(unsigned long len, unsigned long align) |
| 336 | { |
| 337 | unsigned long kaddr; |
| 338 | unsigned long baddr; |
| 339 | unsigned long vaddr; |
| 340 | |
| 341 | if(!len) |
| 342 | return NULL; |
| 343 | |
| 344 | #ifdef DEBUG |
| 345 | printk("dvma_malloc request %lx bytes\n", len); |
| 346 | #endif |
| 347 | len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); |
| 348 | |
| 349 | if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0) |
| 350 | return NULL; |
| 351 | |
| 352 | if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) { |
| 353 | free_pages(kaddr, get_order(len)); |
| 354 | return NULL; |
| 355 | } |
| 356 | |
| 357 | vaddr = dvma_btov(baddr); |
| 358 | |
| 359 | if(dvma_map_cpu(kaddr, vaddr, len) < 0) { |
| 360 | dvma_unmap((void *)baddr); |
| 361 | free_pages(kaddr, get_order(len)); |
| 362 | return NULL; |
| 363 | } |
| 364 | |
| 365 | #ifdef DEBUG |
| 366 | printk("mapped %08lx bytes %08lx kern -> %08lx bus\n", |
| 367 | len, kaddr, baddr); |
| 368 | #endif |
| 369 | |
| 370 | return (void *)vaddr; |
| 371 | |
| 372 | } |
| 373 | |
| 374 | void dvma_free(void *vaddr) |
| 375 | { |
| 376 | |
| 377 | return; |
| 378 | |
| 379 | } |