blob: 74716ba2dc3cd3fb1cb3135c25689401e6dcf3ca [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/asm-sh/cpu-sh4/sq.h
3 *
4 * Copyright (C) 2001, 2002, 2003 Paul Mundt
5 * Copyright (C) 2001, 2002 M. R. Brown
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#ifndef __ASM_CPU_SH4_SQ_H
12#define __ASM_CPU_SH4_SQ_H
13
14#include <asm/addrspace.h>
Paul Mundt7bdda622010-02-17 13:23:00 +090015#include <asm/page.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
17/*
18 * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be
19 * mapped to any physical address space. Since data is written (and aligned)
20 * to 32-byte boundaries, we need to be sure that all allocations are aligned.
Paul Mundtd7c30c62006-09-27 15:49:57 +090021 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#define SQ_SIZE 32
23#define SQ_ALIGN_MASK (~(SQ_SIZE - 1))
24#define SQ_ALIGN(addr) (((addr)+SQ_SIZE-1) & SQ_ALIGN_MASK)
25
26#define SQ_QACR0 (P4SEG_REG_BASE + 0x38)
27#define SQ_QACR1 (P4SEG_REG_BASE + 0x3c)
28#define SQ_ADDRMAX (P4SEG_STORE_QUE + 0x04000000)
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030/* arch/sh/kernel/cpu/sh4/sq.c */
Paul Mundtd7c30c62006-09-27 15:49:57 +090031unsigned long sq_remap(unsigned long phys, unsigned int size,
Paul Mundt7bdda622010-02-17 13:23:00 +090032 const char *name, pgprot_t prot);
Paul Mundtd7c30c62006-09-27 15:49:57 +090033void sq_unmap(unsigned long vaddr);
34void sq_flush_range(unsigned long start, unsigned int len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36#endif /* __ASM_CPU_SH4_SQ_H */