blob: 0bfc9f7b343b2d039ec9b3b82297ec54a75bfe20 [file] [log] [blame]
Alan Cox8c8f1c92011-11-03 18:21:09 +00001/**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 **************************************************************************/
18#include <drm/drmP.h>
19#include "psb_drv.h"
20#include "psb_reg.h"
21
22/*
23 * Code for the SGX MMU:
24 */
25
26/*
27 * clflush on one processor only:
28 * clflush should apparently flush the cache line on all processors in an
29 * SMP system.
30 */
31
32/*
33 * kmap atomic:
34 * The usage of the slots must be completely encapsulated within a spinlock, and
35 * no other functions that may be using the locks for other purposed may be
36 * called from within the locked region.
37 * Since the slots are per processor, this will guarantee that we are the only
38 * user.
39 */
40
41/*
42 * TODO: Inserting ptes from an interrupt handler:
43 * This may be desirable for some SGX functionality where the GPU can fault in
44 * needed pages. For that, we need to make an atomic insert_pages function, that
45 * may fail.
46 * If it fails, the caller need to insert the page using a workqueue function,
47 * but on average it should be fast.
48 */
49
50struct psb_mmu_driver {
51 /* protects driver- and pd structures. Always take in read mode
52 * before taking the page table spinlock.
53 */
54 struct rw_semaphore sem;
55
56 /* protects page tables, directory tables and pt tables.
57 * and pt structures.
58 */
59 spinlock_t lock;
60
61 atomic_t needs_tlbflush;
Patrik Jakobssonb2193722014-01-02 01:27:00 +010062 atomic_t *msvdx_mmu_invaldc;
Alan Cox8c8f1c92011-11-03 18:21:09 +000063 struct psb_mmu_pd *default_pd;
Patrik Jakobssonb2193722014-01-02 01:27:00 +010064 uint32_t bif_ctrl;
Alan Cox8c8f1c92011-11-03 18:21:09 +000065 int has_clflush;
66 int clflush_add;
67 unsigned long clflush_mask;
68
Patrik Jakobssonb2193722014-01-02 01:27:00 +010069 struct drm_device *dev;
Alan Cox8c8f1c92011-11-03 18:21:09 +000070};
71
72struct psb_mmu_pd;
73
74struct psb_mmu_pt {
75 struct psb_mmu_pd *pd;
76 uint32_t index;
77 uint32_t count;
78 struct page *p;
79 uint32_t *v;
80};
81
82struct psb_mmu_pd {
83 struct psb_mmu_driver *driver;
84 int hw_context;
85 struct psb_mmu_pt **tables;
86 struct page *p;
87 struct page *dummy_pt;
88 struct page *dummy_page;
89 uint32_t pd_mask;
90 uint32_t invalid_pde;
91 uint32_t invalid_pte;
92};
93
94static inline uint32_t psb_mmu_pt_index(uint32_t offset)
95{
96 return (offset >> PSB_PTE_SHIFT) & 0x3FF;
97}
98
99static inline uint32_t psb_mmu_pd_index(uint32_t offset)
100{
101 return offset >> PSB_PDE_SHIFT;
102}
103
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100104#if defined(CONFIG_X86)
Alan Cox8c8f1c92011-11-03 18:21:09 +0000105static inline void psb_clflush(void *addr)
106{
107 __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
108}
109
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100110static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
Alan Cox8c8f1c92011-11-03 18:21:09 +0000111{
112 if (!driver->has_clflush)
113 return;
114
115 mb();
116 psb_clflush(addr);
117 mb();
118}
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100119#else
Alan Cox8c8f1c92011-11-03 18:21:09 +0000120
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100121static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
122{;
123}
124
125#endif
126
127static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
Alan Cox8c8f1c92011-11-03 18:21:09 +0000128{
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100129 struct drm_device *dev = driver->dev;
130 struct drm_psb_private *dev_priv = dev->dev_private;
Alan Cox8c8f1c92011-11-03 18:21:09 +0000131
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100132 if (atomic_read(&driver->needs_tlbflush) || force) {
133 uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
134 PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
135
136 /* Make sure data cache is turned off before enabling it */
137 wmb();
138 PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
139 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
140 if (driver->msvdx_mmu_invaldc)
141 atomic_set(driver->msvdx_mmu_invaldc, 1);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000142 }
Alan Cox8c8f1c92011-11-03 18:21:09 +0000143 atomic_set(&driver->needs_tlbflush, 0);
144}
145
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100146#if 0
Alan Cox8c8f1c92011-11-03 18:21:09 +0000147static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
148{
149 down_write(&driver->sem);
150 psb_mmu_flush_pd_locked(driver, force);
151 up_write(&driver->sem);
152}
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100153#endif
Alan Cox8c8f1c92011-11-03 18:21:09 +0000154
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100155void psb_mmu_flush(struct psb_mmu_driver *driver)
Alan Cox8c8f1c92011-11-03 18:21:09 +0000156{
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100157 struct drm_device *dev = driver->dev;
158 struct drm_psb_private *dev_priv = dev->dev_private;
159 uint32_t val;
160
161 down_write(&driver->sem);
162 val = PSB_RSGX32(PSB_CR_BIF_CTRL);
163 if (atomic_read(&driver->needs_tlbflush))
164 PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
165 else
166 PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
167
168 /* Make sure data cache is turned off and MMU is flushed before
169 restoring bank interface control register */
170 wmb();
171 PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
172 PSB_CR_BIF_CTRL);
173 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
174
175 atomic_set(&driver->needs_tlbflush, 0);
176 if (driver->msvdx_mmu_invaldc)
177 atomic_set(driver->msvdx_mmu_invaldc, 1);
178 up_write(&driver->sem);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000179}
180
181void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
182{
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100183 struct drm_device *dev = pd->driver->dev;
184 struct drm_psb_private *dev_priv = dev->dev_private;
185 uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
186 PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
187
Alan Cox8c8f1c92011-11-03 18:21:09 +0000188 down_write(&pd->driver->sem);
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100189 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000190 wmb();
191 psb_mmu_flush_pd_locked(pd->driver, 1);
192 pd->hw_context = hw_context;
193 up_write(&pd->driver->sem);
194
195}
196
197static inline unsigned long psb_pd_addr_end(unsigned long addr,
198 unsigned long end)
199{
Alan Cox8c8f1c92011-11-03 18:21:09 +0000200 addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
201 return (addr < end) ? addr : end;
202}
203
204static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
205{
206 uint32_t mask = PSB_PTE_VALID;
207
208 if (type & PSB_MMU_CACHED_MEMORY)
209 mask |= PSB_PTE_CACHED;
210 if (type & PSB_MMU_RO_MEMORY)
211 mask |= PSB_PTE_RO;
212 if (type & PSB_MMU_WO_MEMORY)
213 mask |= PSB_PTE_WO;
214
215 return (pfn << PAGE_SHIFT) | mask;
216}
217
218struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
219 int trap_pagefaults, int invalid_type)
220{
221 struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
222 uint32_t *v;
223 int i;
224
225 if (!pd)
226 return NULL;
227
228 pd->p = alloc_page(GFP_DMA32);
229 if (!pd->p)
230 goto out_err1;
231 pd->dummy_pt = alloc_page(GFP_DMA32);
232 if (!pd->dummy_pt)
233 goto out_err2;
234 pd->dummy_page = alloc_page(GFP_DMA32);
235 if (!pd->dummy_page)
236 goto out_err3;
237
238 if (!trap_pagefaults) {
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100239 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
240 invalid_type);
241 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
242 invalid_type);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000243 } else {
244 pd->invalid_pde = 0;
245 pd->invalid_pte = 0;
246 }
247
248 v = kmap(pd->dummy_pt);
249 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
250 v[i] = pd->invalid_pte;
251
252 kunmap(pd->dummy_pt);
253
254 v = kmap(pd->p);
255 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
256 v[i] = pd->invalid_pde;
257
258 kunmap(pd->p);
259
260 clear_page(kmap(pd->dummy_page));
261 kunmap(pd->dummy_page);
262
263 pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
264 if (!pd->tables)
265 goto out_err4;
266
267 pd->hw_context = -1;
268 pd->pd_mask = PSB_PTE_VALID;
269 pd->driver = driver;
270
271 return pd;
272
273out_err4:
274 __free_page(pd->dummy_page);
275out_err3:
276 __free_page(pd->dummy_pt);
277out_err2:
278 __free_page(pd->p);
279out_err1:
280 kfree(pd);
281 return NULL;
282}
283
Kirill A. Shutemov3afad3c2012-03-08 16:04:20 +0000284static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
Alan Cox8c8f1c92011-11-03 18:21:09 +0000285{
286 __free_page(pt->p);
287 kfree(pt);
288}
289
290void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
291{
292 struct psb_mmu_driver *driver = pd->driver;
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100293 struct drm_device *dev = driver->dev;
294 struct drm_psb_private *dev_priv = dev->dev_private;
Alan Cox8c8f1c92011-11-03 18:21:09 +0000295 struct psb_mmu_pt *pt;
296 int i;
297
298 down_write(&driver->sem);
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100299 if (pd->hw_context != -1) {
300 PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000301 psb_mmu_flush_pd_locked(driver, 1);
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100302 }
Alan Cox8c8f1c92011-11-03 18:21:09 +0000303
304 /* Should take the spinlock here, but we don't need to do that
305 since we have the semaphore in write mode. */
306
307 for (i = 0; i < 1024; ++i) {
308 pt = pd->tables[i];
309 if (pt)
310 psb_mmu_free_pt(pt);
311 }
312
313 vfree(pd->tables);
314 __free_page(pd->dummy_page);
315 __free_page(pd->dummy_pt);
316 __free_page(pd->p);
317 kfree(pd);
318 up_write(&driver->sem);
319}
320
321static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
322{
323 struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
324 void *v;
325 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
326 uint32_t clflush_count = PAGE_SIZE / clflush_add;
327 spinlock_t *lock = &pd->driver->lock;
328 uint8_t *clf;
329 uint32_t *ptes;
330 int i;
331
332 if (!pt)
333 return NULL;
334
335 pt->p = alloc_page(GFP_DMA32);
336 if (!pt->p) {
337 kfree(pt);
338 return NULL;
339 }
340
341 spin_lock(lock);
342
Cong Wangf0c5b592011-11-25 23:46:51 +0800343 v = kmap_atomic(pt->p);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000344 clf = (uint8_t *) v;
345 ptes = (uint32_t *) v;
346 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
347 *ptes++ = pd->invalid_pte;
348
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100349#if defined(CONFIG_X86)
Alan Cox8c8f1c92011-11-03 18:21:09 +0000350 if (pd->driver->has_clflush && pd->hw_context != -1) {
351 mb();
352 for (i = 0; i < clflush_count; ++i) {
353 psb_clflush(clf);
354 clf += clflush_add;
355 }
356 mb();
357 }
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100358#endif
Cong Wangf0c5b592011-11-25 23:46:51 +0800359 kunmap_atomic(v);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000360 spin_unlock(lock);
361
362 pt->count = 0;
363 pt->pd = pd;
364 pt->index = 0;
365
366 return pt;
367}
368
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100369struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
Alan Cox8c8f1c92011-11-03 18:21:09 +0000370 unsigned long addr)
371{
372 uint32_t index = psb_mmu_pd_index(addr);
373 struct psb_mmu_pt *pt;
374 uint32_t *v;
375 spinlock_t *lock = &pd->driver->lock;
376
377 spin_lock(lock);
378 pt = pd->tables[index];
379 while (!pt) {
380 spin_unlock(lock);
381 pt = psb_mmu_alloc_pt(pd);
382 if (!pt)
383 return NULL;
384 spin_lock(lock);
385
386 if (pd->tables[index]) {
387 spin_unlock(lock);
388 psb_mmu_free_pt(pt);
389 spin_lock(lock);
390 pt = pd->tables[index];
391 continue;
392 }
393
Cong Wangf0c5b592011-11-25 23:46:51 +0800394 v = kmap_atomic(pd->p);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000395 pd->tables[index] = pt;
396 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
397 pt->index = index;
Cong Wangf0c5b592011-11-25 23:46:51 +0800398 kunmap_atomic((void *) v);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000399
400 if (pd->hw_context != -1) {
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100401 psb_mmu_clflush(pd->driver, (void *)&v[index]);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000402 atomic_set(&pd->driver->needs_tlbflush, 1);
403 }
404 }
Cong Wangf0c5b592011-11-25 23:46:51 +0800405 pt->v = kmap_atomic(pt->p);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000406 return pt;
407}
408
409static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
410 unsigned long addr)
411{
412 uint32_t index = psb_mmu_pd_index(addr);
413 struct psb_mmu_pt *pt;
414 spinlock_t *lock = &pd->driver->lock;
415
416 spin_lock(lock);
417 pt = pd->tables[index];
418 if (!pt) {
419 spin_unlock(lock);
420 return NULL;
421 }
Cong Wangf0c5b592011-11-25 23:46:51 +0800422 pt->v = kmap_atomic(pt->p);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000423 return pt;
424}
425
426static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
427{
428 struct psb_mmu_pd *pd = pt->pd;
429 uint32_t *v;
430
Cong Wangf0c5b592011-11-25 23:46:51 +0800431 kunmap_atomic(pt->v);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000432 if (pt->count == 0) {
Cong Wangf0c5b592011-11-25 23:46:51 +0800433 v = kmap_atomic(pd->p);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000434 v[pt->index] = pd->invalid_pde;
435 pd->tables[pt->index] = NULL;
436
437 if (pd->hw_context != -1) {
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100438 psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000439 atomic_set(&pd->driver->needs_tlbflush, 1);
440 }
Cong Wangf0c5b592011-11-25 23:46:51 +0800441 kunmap_atomic(pt->v);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000442 spin_unlock(&pd->driver->lock);
443 psb_mmu_free_pt(pt);
444 return;
445 }
446 spin_unlock(&pd->driver->lock);
447}
448
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100449static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
450 uint32_t pte)
Alan Cox8c8f1c92011-11-03 18:21:09 +0000451{
452 pt->v[psb_mmu_pt_index(addr)] = pte;
453}
454
455static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
456 unsigned long addr)
457{
458 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
459}
460
Alan Cox8c8f1c92011-11-03 18:21:09 +0000461struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
462{
463 struct psb_mmu_pd *pd;
464
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100465 down_read(&driver->sem);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000466 pd = driver->default_pd;
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100467 up_read(&driver->sem);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000468
469 return pd;
470}
471
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100472/* Returns the physical address of the PD shared by sgx/msvdx */
473uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
474{
475 struct psb_mmu_pd *pd;
476
477 pd = psb_mmu_get_default_pd(driver);
478 return page_to_pfn(pd->p) << PAGE_SHIFT;
479}
480
Alan Cox8c8f1c92011-11-03 18:21:09 +0000481void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
482{
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100483 struct drm_device *dev = driver->dev;
484 struct drm_psb_private *dev_priv = dev->dev_private;
485
486 PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000487 psb_mmu_free_pagedir(driver->default_pd);
488 kfree(driver);
489}
490
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100491struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
492 int trap_pagefaults,
493 int invalid_type,
494 atomic_t *msvdx_mmu_invaldc)
Alan Cox8c8f1c92011-11-03 18:21:09 +0000495{
496 struct psb_mmu_driver *driver;
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100497 struct drm_psb_private *dev_priv = dev->dev_private;
Alan Cox8c8f1c92011-11-03 18:21:09 +0000498
499 driver = kmalloc(sizeof(*driver), GFP_KERNEL);
500
501 if (!driver)
502 return NULL;
Alan Cox8c8f1c92011-11-03 18:21:09 +0000503
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100504 driver->dev = dev;
Alan Cox8c8f1c92011-11-03 18:21:09 +0000505 driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
506 invalid_type);
507 if (!driver->default_pd)
508 goto out_err1;
509
510 spin_lock_init(&driver->lock);
511 init_rwsem(&driver->sem);
512 down_write(&driver->sem);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000513 atomic_set(&driver->needs_tlbflush, 1);
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100514 driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
515
516 driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
517 PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
518 PSB_CR_BIF_CTRL);
519 PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
520 PSB_CR_BIF_CTRL);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000521
522 driver->has_clflush = 0;
523
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100524#if defined(CONFIG_X86)
Alan Cox8c8f1c92011-11-03 18:21:09 +0000525 if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
526 uint32_t tfms, misc, cap0, cap4, clflush_size;
527
528 /*
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100529 * clflush size is determined at kernel setup for x86_64 but not
530 * for i386. We have to do it here.
Alan Cox8c8f1c92011-11-03 18:21:09 +0000531 */
532
533 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
534 clflush_size = ((misc >> 8) & 0xff) * 8;
535 driver->has_clflush = 1;
536 driver->clflush_add =
537 PAGE_SIZE * clflush_size / sizeof(uint32_t);
538 driver->clflush_mask = driver->clflush_add - 1;
539 driver->clflush_mask = ~driver->clflush_mask;
540 }
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100541#endif
Alan Cox8c8f1c92011-11-03 18:21:09 +0000542
543 up_write(&driver->sem);
544 return driver;
545
546out_err1:
547 kfree(driver);
548 return NULL;
549}
550
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100551#if defined(CONFIG_X86)
552static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
553 uint32_t num_pages, uint32_t desired_tile_stride,
Alan Cox8c8f1c92011-11-03 18:21:09 +0000554 uint32_t hw_tile_stride)
555{
556 struct psb_mmu_pt *pt;
557 uint32_t rows = 1;
558 uint32_t i;
559 unsigned long addr;
560 unsigned long end;
561 unsigned long next;
562 unsigned long add;
563 unsigned long row_add;
564 unsigned long clflush_add = pd->driver->clflush_add;
565 unsigned long clflush_mask = pd->driver->clflush_mask;
566
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100567 if (!pd->driver->has_clflush)
Alan Cox8c8f1c92011-11-03 18:21:09 +0000568 return;
Alan Cox8c8f1c92011-11-03 18:21:09 +0000569
570 if (hw_tile_stride)
571 rows = num_pages / desired_tile_stride;
572 else
573 desired_tile_stride = num_pages;
574
575 add = desired_tile_stride << PAGE_SHIFT;
576 row_add = hw_tile_stride << PAGE_SHIFT;
577 mb();
578 for (i = 0; i < rows; ++i) {
579
580 addr = address;
581 end = addr + add;
582
583 do {
584 next = psb_pd_addr_end(addr, end);
585 pt = psb_mmu_pt_map_lock(pd, addr);
586 if (!pt)
587 continue;
588 do {
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100589 psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
590 } while (addr += clflush_add,
Alan Cox8c8f1c92011-11-03 18:21:09 +0000591 (addr & clflush_mask) < next);
592
593 psb_mmu_pt_unmap_unlock(pt);
594 } while (addr = next, next != end);
595 address += row_add;
596 }
597 mb();
598}
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100599#else
600static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
601 uint32_t num_pages, uint32_t desired_tile_stride,
602 uint32_t hw_tile_stride)
603{
604 drm_ttm_cache_flush();
605}
606#endif
Alan Cox8c8f1c92011-11-03 18:21:09 +0000607
608void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
609 unsigned long address, uint32_t num_pages)
610{
611 struct psb_mmu_pt *pt;
612 unsigned long addr;
613 unsigned long end;
614 unsigned long next;
615 unsigned long f_address = address;
616
617 down_read(&pd->driver->sem);
618
619 addr = address;
620 end = addr + (num_pages << PAGE_SHIFT);
621
622 do {
623 next = psb_pd_addr_end(addr, end);
624 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
625 if (!pt)
626 goto out;
627 do {
628 psb_mmu_invalidate_pte(pt, addr);
629 --pt->count;
630 } while (addr += PAGE_SIZE, addr < next);
631 psb_mmu_pt_unmap_unlock(pt);
632
633 } while (addr = next, next != end);
634
635out:
636 if (pd->hw_context != -1)
637 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
638
639 up_read(&pd->driver->sem);
640
641 if (pd->hw_context != -1)
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100642 psb_mmu_flush(pd->driver);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000643
644 return;
645}
646
647void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
648 uint32_t num_pages, uint32_t desired_tile_stride,
649 uint32_t hw_tile_stride)
650{
651 struct psb_mmu_pt *pt;
652 uint32_t rows = 1;
653 uint32_t i;
654 unsigned long addr;
655 unsigned long end;
656 unsigned long next;
657 unsigned long add;
658 unsigned long row_add;
659 unsigned long f_address = address;
660
661 if (hw_tile_stride)
662 rows = num_pages / desired_tile_stride;
663 else
664 desired_tile_stride = num_pages;
665
666 add = desired_tile_stride << PAGE_SHIFT;
667 row_add = hw_tile_stride << PAGE_SHIFT;
668
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100669 down_read(&pd->driver->sem);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000670
671 /* Make sure we only need to flush this processor's cache */
672
673 for (i = 0; i < rows; ++i) {
674
675 addr = address;
676 end = addr + add;
677
678 do {
679 next = psb_pd_addr_end(addr, end);
680 pt = psb_mmu_pt_map_lock(pd, addr);
681 if (!pt)
682 continue;
683 do {
684 psb_mmu_invalidate_pte(pt, addr);
685 --pt->count;
686
687 } while (addr += PAGE_SIZE, addr < next);
688 psb_mmu_pt_unmap_unlock(pt);
689
690 } while (addr = next, next != end);
691 address += row_add;
692 }
693 if (pd->hw_context != -1)
694 psb_mmu_flush_ptes(pd, f_address, num_pages,
695 desired_tile_stride, hw_tile_stride);
696
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100697 up_read(&pd->driver->sem);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000698
699 if (pd->hw_context != -1)
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100700 psb_mmu_flush(pd->driver);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000701}
702
703int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
704 unsigned long address, uint32_t num_pages,
705 int type)
706{
707 struct psb_mmu_pt *pt;
708 uint32_t pte;
709 unsigned long addr;
710 unsigned long end;
711 unsigned long next;
712 unsigned long f_address = address;
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100713 int ret = -ENOMEM;
Alan Cox8c8f1c92011-11-03 18:21:09 +0000714
715 down_read(&pd->driver->sem);
716
717 addr = address;
718 end = addr + (num_pages << PAGE_SHIFT);
719
720 do {
721 next = psb_pd_addr_end(addr, end);
722 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
723 if (!pt) {
724 ret = -ENOMEM;
725 goto out;
726 }
727 do {
728 pte = psb_mmu_mask_pte(start_pfn++, type);
729 psb_mmu_set_pte(pt, addr, pte);
730 pt->count++;
731 } while (addr += PAGE_SIZE, addr < next);
732 psb_mmu_pt_unmap_unlock(pt);
733
734 } while (addr = next, next != end);
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100735 ret = 0;
Alan Cox8c8f1c92011-11-03 18:21:09 +0000736
737out:
738 if (pd->hw_context != -1)
739 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
740
741 up_read(&pd->driver->sem);
742
743 if (pd->hw_context != -1)
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100744 psb_mmu_flush(pd->driver);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000745
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100746 return 0;
Alan Cox8c8f1c92011-11-03 18:21:09 +0000747}
748
749int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
750 unsigned long address, uint32_t num_pages,
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100751 uint32_t desired_tile_stride, uint32_t hw_tile_stride,
752 int type)
Alan Cox8c8f1c92011-11-03 18:21:09 +0000753{
754 struct psb_mmu_pt *pt;
755 uint32_t rows = 1;
756 uint32_t i;
757 uint32_t pte;
758 unsigned long addr;
759 unsigned long end;
760 unsigned long next;
761 unsigned long add;
762 unsigned long row_add;
763 unsigned long f_address = address;
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100764 int ret = -ENOMEM;
Alan Cox8c8f1c92011-11-03 18:21:09 +0000765
766 if (hw_tile_stride) {
767 if (num_pages % desired_tile_stride != 0)
768 return -EINVAL;
769 rows = num_pages / desired_tile_stride;
770 } else {
771 desired_tile_stride = num_pages;
772 }
773
774 add = desired_tile_stride << PAGE_SHIFT;
775 row_add = hw_tile_stride << PAGE_SHIFT;
776
777 down_read(&pd->driver->sem);
778
779 for (i = 0; i < rows; ++i) {
780
781 addr = address;
782 end = addr + add;
783
784 do {
785 next = psb_pd_addr_end(addr, end);
786 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100787 if (!pt)
Alan Cox8c8f1c92011-11-03 18:21:09 +0000788 goto out;
Alan Cox8c8f1c92011-11-03 18:21:09 +0000789 do {
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100790 pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
791 type);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000792 psb_mmu_set_pte(pt, addr, pte);
793 pt->count++;
794 } while (addr += PAGE_SIZE, addr < next);
795 psb_mmu_pt_unmap_unlock(pt);
796
797 } while (addr = next, next != end);
798
799 address += row_add;
800 }
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100801
802 ret = 0;
Alan Cox8c8f1c92011-11-03 18:21:09 +0000803out:
804 if (pd->hw_context != -1)
805 psb_mmu_flush_ptes(pd, f_address, num_pages,
806 desired_tile_stride, hw_tile_stride);
807
808 up_read(&pd->driver->sem);
809
810 if (pd->hw_context != -1)
Patrik Jakobssonb2193722014-01-02 01:27:00 +0100811 psb_mmu_flush(pd->driver);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000812
813 return ret;
814}
815
816int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
817 unsigned long *pfn)
818{
819 int ret;
820 struct psb_mmu_pt *pt;
821 uint32_t tmp;
822 spinlock_t *lock = &pd->driver->lock;
823
824 down_read(&pd->driver->sem);
825 pt = psb_mmu_pt_map_lock(pd, virtual);
826 if (!pt) {
827 uint32_t *v;
828
829 spin_lock(lock);
Cong Wangf0c5b592011-11-25 23:46:51 +0800830 v = kmap_atomic(pd->p);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000831 tmp = v[psb_mmu_pd_index(virtual)];
Cong Wangf0c5b592011-11-25 23:46:51 +0800832 kunmap_atomic(v);
Alan Cox8c8f1c92011-11-03 18:21:09 +0000833 spin_unlock(lock);
834
835 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
836 !(pd->invalid_pte & PSB_PTE_VALID)) {
837 ret = -EINVAL;
838 goto out;
839 }
840 ret = 0;
841 *pfn = pd->invalid_pte >> PAGE_SHIFT;
842 goto out;
843 }
844 tmp = pt->v[psb_mmu_pt_index(virtual)];
845 if (!(tmp & PSB_PTE_VALID)) {
846 ret = -EINVAL;
847 } else {
848 ret = 0;
849 *pfn = tmp >> PAGE_SHIFT;
850 }
851 psb_mmu_pt_unmap_unlock(pt);
852out:
853 up_read(&pd->driver->sem);
854 return ret;
855}