blob: 7d6aaa128e8bf0d51dd5f8515f4edf8cd9fae815 [file] [log] [blame]
Chris Metcalf867e3592010-05-28 23:09:12 -04001/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_CACHE_H
16#define _ASM_TILE_CACHE_H
17
18#include <arch/chip.h>
19
20/* bytes per L1 data cache line */
21#define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
22#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
Chris Metcalf867e3592010-05-28 23:09:12 -040023
Chris Metcalf867e3592010-05-28 23:09:12 -040024/* bytes per L2 cache line */
25#define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
26#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT)
27#define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES)
28
FUJITA Tomonoric6673cb2010-06-30 11:10:08 +090029/*
Chris Metcalfbbaa22c2012-06-13 14:46:40 -040030 * TILEPro I/O is not always coherent (networking typically uses coherent
31 * I/O, but PCI traffic does not) and setting ARCH_DMA_MINALIGN to the
32 * L2 cacheline size helps ensure that kernel heap allocations are aligned.
33 * TILE-Gx I/O is always coherent when used on hash-for-home pages.
34 *
35 * However, it's possible at runtime to request not to use hash-for-home
36 * for the kernel heap, in which case the kernel will use flush-and-inval
37 * to manage coherence. As a result, we use L2_CACHE_BYTES for the
38 * DMA minimum alignment to avoid false sharing in the kernel heap.
FUJITA Tomonoric6673cb2010-06-30 11:10:08 +090039 */
Chris Metcalfb3ae98a2010-08-13 20:43:39 -040040#define ARCH_DMA_MINALIGN L2_CACHE_BYTES
FUJITA Tomonoric6673cb2010-06-30 11:10:08 +090041
Chris Metcalf867e3592010-05-28 23:09:12 -040042/* use the cache line size for the L2, which is where it counts */
43#define SMP_CACHE_BYTES_SHIFT L2_CACHE_SHIFT
44#define SMP_CACHE_BYTES L2_CACHE_BYTES
45#define INTERNODE_CACHE_SHIFT L2_CACHE_SHIFT
46#define INTERNODE_CACHE_BYTES L2_CACHE_BYTES
47
48/* Group together read-mostly things to avoid cache false sharing */
Chris Metcalf2cb82402011-02-27 18:52:24 -050049#define __read_mostly __attribute__((__section__(".data..read_mostly")))
Chris Metcalf867e3592010-05-28 23:09:12 -040050
51/*
Chris Metcalfce61cdc2013-08-15 16:29:02 -040052 * Originally we used small TLB pages for kernel data and grouped some
Chris Metcalf14e73e72016-11-07 14:19:10 -050053 * things together as ro-after-init, enforcing the property at the end
Chris Metcalfce61cdc2013-08-15 16:29:02 -040054 * of initialization by making those pages read-only and non-coherent.
55 * This allowed better cache utilization since cache inclusion did not
56 * need to be maintained. However, to do this requires an extra TLB
57 * entry, which on balance is more of a performance hit than the
58 * non-coherence is a performance gain, so we now just make "read
Chris Metcalf14e73e72016-11-07 14:19:10 -050059 * mostly" and "ro-after-init" be synonyms. We keep the attribute
Chris Metcalfce61cdc2013-08-15 16:29:02 -040060 * separate in case we change our minds at a future date.
Chris Metcalf867e3592010-05-28 23:09:12 -040061 */
Chris Metcalfe1233862016-11-07 14:32:02 -050062#define __ro_after_init __read_mostly
63
Chris Metcalf867e3592010-05-28 23:09:12 -040064#endif /* _ASM_TILE_CACHE_H */