Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1 | /************************************************************************** |
| 2 | * |
| 3 | * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA |
| 4 | * All Rights Reserved. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
| 27 | /* |
| 28 | * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> |
| 29 | */ |
| 30 | |
Paul Gortmaker | 2d1a8a4 | 2011-08-30 18:16:33 -0400 | [diff] [blame] | 31 | #include <linux/export.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 32 | #include <drm/drmP.h> |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 33 | |
| 34 | #if defined(CONFIG_X86) |
Ben Widawsky | b04d4a3 | 2014-12-15 12:26:46 -0800 | [diff] [blame] | 35 | #include <asm/smp.h> |
Ross Zwisler | 2a0c772 | 2014-02-26 12:06:51 -0700 | [diff] [blame] | 36 | |
| 37 | /* |
| 38 | * clflushopt is an unordered instruction which needs fencing with mfence or |
| 39 | * sfence to avoid ordering issues. For drm_clflush_page this fencing happens |
| 40 | * in the caller. |
| 41 | */ |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 42 | static void |
| 43 | drm_clflush_page(struct page *page) |
| 44 | { |
| 45 | uint8_t *page_virtual; |
| 46 | unsigned int i; |
Dave Airlie | 87229ad | 2012-09-19 11:12:41 +1000 | [diff] [blame] | 47 | const int size = boot_cpu_data.x86_clflush_size; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 48 | |
| 49 | if (unlikely(page == NULL)) |
| 50 | return; |
| 51 | |
Cong Wang | 1c9c20f | 2011-11-25 23:14:20 +0800 | [diff] [blame] | 52 | page_virtual = kmap_atomic(page); |
Dave Airlie | 87229ad | 2012-09-19 11:12:41 +1000 | [diff] [blame] | 53 | for (i = 0; i < PAGE_SIZE; i += size) |
Ross Zwisler | 2a0c772 | 2014-02-26 12:06:51 -0700 | [diff] [blame] | 54 | clflushopt(page_virtual + i); |
Cong Wang | 1c9c20f | 2011-11-25 23:14:20 +0800 | [diff] [blame] | 55 | kunmap_atomic(page_virtual); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 56 | } |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 57 | |
Dave Airlie | c9c97b8 | 2009-08-27 09:53:47 +1000 | [diff] [blame] | 58 | static void drm_cache_flush_clflush(struct page *pages[], |
| 59 | unsigned long num_pages) |
| 60 | { |
| 61 | unsigned long i; |
| 62 | |
| 63 | mb(); |
| 64 | for (i = 0; i < num_pages; i++) |
| 65 | drm_clflush_page(*pages++); |
| 66 | mb(); |
| 67 | } |
Dave Airlie | c9c97b8 | 2009-08-27 09:53:47 +1000 | [diff] [blame] | 68 | #endif |
Dave Airlie | ed017d9 | 2009-09-02 09:41:13 +1000 | [diff] [blame] | 69 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 70 | void |
| 71 | drm_clflush_pages(struct page *pages[], unsigned long num_pages) |
| 72 | { |
| 73 | |
| 74 | #if defined(CONFIG_X86) |
| 75 | if (cpu_has_clflush) { |
Dave Airlie | c9c97b8 | 2009-08-27 09:53:47 +1000 | [diff] [blame] | 76 | drm_cache_flush_clflush(pages, num_pages); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 77 | return; |
| 78 | } |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 79 | |
Ben Widawsky | b04d4a3 | 2014-12-15 12:26:46 -0800 | [diff] [blame] | 80 | if (wbinvd_on_all_cpus()) |
Dave Airlie | c9c97b8 | 2009-08-27 09:53:47 +1000 | [diff] [blame] | 81 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); |
| 82 | |
| 83 | #elif defined(__powerpc__) |
| 84 | unsigned long i; |
| 85 | for (i = 0; i < num_pages; i++) { |
| 86 | struct page *page = pages[i]; |
| 87 | void *page_virtual; |
| 88 | |
| 89 | if (unlikely(page == NULL)) |
| 90 | continue; |
| 91 | |
Cong Wang | 1c9c20f | 2011-11-25 23:14:20 +0800 | [diff] [blame] | 92 | page_virtual = kmap_atomic(page); |
Dave Airlie | c9c97b8 | 2009-08-27 09:53:47 +1000 | [diff] [blame] | 93 | flush_dcache_range((unsigned long)page_virtual, |
| 94 | (unsigned long)page_virtual + PAGE_SIZE); |
Cong Wang | 1c9c20f | 2011-11-25 23:14:20 +0800 | [diff] [blame] | 95 | kunmap_atomic(page_virtual); |
Dave Airlie | c9c97b8 | 2009-08-27 09:53:47 +1000 | [diff] [blame] | 96 | } |
| 97 | #else |
Dave Airlie | ed017d9 | 2009-09-02 09:41:13 +1000 | [diff] [blame] | 98 | printk(KERN_ERR "Architecture has no drm_cache.c support\n"); |
| 99 | WARN_ON_ONCE(1); |
Dave Airlie | e0f0754 | 2008-10-07 13:41:49 +1000 | [diff] [blame] | 100 | #endif |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 101 | } |
| 102 | EXPORT_SYMBOL(drm_clflush_pages); |
Daniel Vetter | 6d5cd9c | 2012-03-25 19:47:30 +0200 | [diff] [blame] | 103 | |
| 104 | void |
Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 105 | drm_clflush_sg(struct sg_table *st) |
| 106 | { |
| 107 | #if defined(CONFIG_X86) |
| 108 | if (cpu_has_clflush) { |
Imre Deak | f5ddf69 | 2013-02-18 19:28:01 +0200 | [diff] [blame] | 109 | struct sg_page_iter sg_iter; |
Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 110 | |
| 111 | mb(); |
Imre Deak | f5ddf69 | 2013-02-18 19:28:01 +0200 | [diff] [blame] | 112 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) |
Imre Deak | 2db76d7 | 2013-03-26 15:14:18 +0200 | [diff] [blame] | 113 | drm_clflush_page(sg_page_iter_page(&sg_iter)); |
Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 114 | mb(); |
| 115 | |
| 116 | return; |
| 117 | } |
| 118 | |
Ben Widawsky | b04d4a3 | 2014-12-15 12:26:46 -0800 | [diff] [blame] | 119 | if (wbinvd_on_all_cpus()) |
Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 120 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); |
| 121 | #else |
| 122 | printk(KERN_ERR "Architecture has no drm_cache.c support\n"); |
| 123 | WARN_ON_ONCE(1); |
| 124 | #endif |
| 125 | } |
| 126 | EXPORT_SYMBOL(drm_clflush_sg); |
| 127 | |
| 128 | void |
Ville Syrjälä | c2d1535 | 2014-04-01 12:59:08 +0300 | [diff] [blame] | 129 | drm_clflush_virt_range(void *addr, unsigned long length) |
Daniel Vetter | 6d5cd9c | 2012-03-25 19:47:30 +0200 | [diff] [blame] | 130 | { |
| 131 | #if defined(CONFIG_X86) |
| 132 | if (cpu_has_clflush) { |
Chris Wilson | afcd950 | 2015-06-10 15:58:01 +0100 | [diff] [blame] | 133 | const int size = boot_cpu_data.x86_clflush_size; |
Ville Syrjälä | c2d1535 | 2014-04-01 12:59:08 +0300 | [diff] [blame] | 134 | void *end = addr + length; |
Chris Wilson | afcd950 | 2015-06-10 15:58:01 +0100 | [diff] [blame] | 135 | addr = (void *)(((unsigned long)addr) & -size); |
Daniel Vetter | 6d5cd9c | 2012-03-25 19:47:30 +0200 | [diff] [blame] | 136 | mb(); |
Chris Wilson | afcd950 | 2015-06-10 15:58:01 +0100 | [diff] [blame] | 137 | for (; addr < end; addr += size) |
Ross Zwisler | 7927096 | 2014-05-14 09:41:12 -0600 | [diff] [blame] | 138 | clflushopt(addr); |
Daniel Vetter | 6d5cd9c | 2012-03-25 19:47:30 +0200 | [diff] [blame] | 139 | mb(); |
| 140 | return; |
| 141 | } |
| 142 | |
Ben Widawsky | b04d4a3 | 2014-12-15 12:26:46 -0800 | [diff] [blame] | 143 | if (wbinvd_on_all_cpus()) |
Daniel Vetter | 6d5cd9c | 2012-03-25 19:47:30 +0200 | [diff] [blame] | 144 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); |
| 145 | #else |
| 146 | printk(KERN_ERR "Architecture has no drm_cache.c support\n"); |
| 147 | WARN_ON_ONCE(1); |
| 148 | #endif |
| 149 | } |
| 150 | EXPORT_SYMBOL(drm_clflush_virt_range); |