blob: f5e8d4edb808f8748d8eaf630f027f0d5796b344 [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
Paul Mackerras14cf11a2005-09-26 16:04:21 +100010 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
Paul Mackerras14cf11a2005-09-26 16:04:21 +100024#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
Paul Gortmaker66b15db2011-05-27 10:46:24 -040029#include <linux/export.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100030#include <linux/types.h>
31#include <linux/mman.h>
32#include <linux/mm.h>
33#include <linux/swap.h>
34#include <linux/stddef.h>
35#include <linux/vmalloc.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100036#include <linux/memblock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Aneesh Kumar K.V06743522014-11-05 21:57:39 +053038#include <linux/hugetlb.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100039
40#include <asm/pgalloc.h>
41#include <asm/page.h>
42#include <asm/prom.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100043#include <asm/io.h>
44#include <asm/mmu_context.h>
45#include <asm/pgtable.h>
46#include <asm/mmu.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100047#include <asm/smp.h>
48#include <asm/machdep.h>
49#include <asm/tlb.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100050#include <asm/processor.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100051#include <asm/cputable.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100052#include <asm/sections.h>
Stephen Rothwell5e203d62006-09-25 13:36:31 +100053#include <asm/firmware.h>
Anton Blanchard68cf0d62014-09-17 22:15:35 +100054#include <asm/dma.h>
David Gibson800fc3e2005-11-16 15:43:48 +110055
56#include "mmu_decl.h"
Paul Mackerras14cf11a2005-09-26 16:04:21 +100057
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000058#ifdef CONFIG_PPC_STD_MMU_64
Aneesh Kumar K.Vaf81d782013-03-13 03:34:55 +000059#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000060#error TASK_SIZE_USER64 exceeds user VSID range
61#endif
62#endif
63
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +100064#ifdef CONFIG_PPC_BOOK3S_64
65/*
66 * partition table and process table for ISA 3.0
67 */
68struct prtb_entry *process_tb;
69struct patb_entry *partition_tb;
Aneesh Kumar K.Vdd1842a2016-04-29 23:25:49 +100070/*
71 * page table size
72 */
73unsigned long __pte_index_size;
74EXPORT_SYMBOL(__pte_index_size);
75unsigned long __pmd_index_size;
76EXPORT_SYMBOL(__pmd_index_size);
77unsigned long __pud_index_size;
78EXPORT_SYMBOL(__pud_index_size);
79unsigned long __pgd_index_size;
80EXPORT_SYMBOL(__pgd_index_size);
81unsigned long __pmd_cache_index;
82EXPORT_SYMBOL(__pmd_cache_index);
83unsigned long __pte_table_size;
84EXPORT_SYMBOL(__pte_table_size);
85unsigned long __pmd_table_size;
86EXPORT_SYMBOL(__pmd_table_size);
87unsigned long __pud_table_size;
88EXPORT_SYMBOL(__pud_table_size);
89unsigned long __pgd_table_size;
90EXPORT_SYMBOL(__pgd_table_size);
Aneesh Kumar K.Va2f41eb2016-04-29 23:26:19 +100091unsigned long __pmd_val_bits;
92EXPORT_SYMBOL(__pmd_val_bits);
93unsigned long __pud_val_bits;
94EXPORT_SYMBOL(__pud_val_bits);
95unsigned long __pgd_val_bits;
96EXPORT_SYMBOL(__pgd_val_bits);
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +100097unsigned long __kernel_virt_start;
98EXPORT_SYMBOL(__kernel_virt_start);
99unsigned long __kernel_virt_size;
100EXPORT_SYMBOL(__kernel_virt_size);
101unsigned long __vmalloc_start;
102EXPORT_SYMBOL(__vmalloc_start);
103unsigned long __vmalloc_end;
104EXPORT_SYMBOL(__vmalloc_end);
105struct page *vmemmap;
106EXPORT_SYMBOL(vmemmap);
Aneesh Kumar K.V5ed7ecd2016-04-29 23:26:23 +1000107unsigned long __pte_frag_nr;
108EXPORT_SYMBOL(__pte_frag_nr);
109unsigned long __pte_frag_size_shift;
110EXPORT_SYMBOL(__pte_frag_size_shift);
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000111unsigned long ioremap_bot;
112#else /* !CONFIG_PPC_BOOK3S_64 */
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +0000113unsigned long ioremap_bot = IOREMAP_BASE;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000114#endif
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +0000115
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000116/**
117 * __ioremap_at - Low level function to establish the page tables
118 * for an IO mapping
119 */
120void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000121 unsigned long flags)
122{
123 unsigned long i;
124
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700125 /* Make sure we have the base flags */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000126 if ((flags & _PAGE_PRESENT) == 0)
127 flags |= pgprot_val(PAGE_KERNEL);
128
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700129 /* We don't support the 4K PFN hack with ioremap */
Aneesh Kumar K.V945537d2016-04-29 23:25:45 +1000130 if (flags & H_PAGE_4K_PFN)
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700131 return NULL;
132
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000133 WARN_ON(pa & ~PAGE_MASK);
134 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
135 WARN_ON(size & ~PAGE_MASK);
136
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000137 for (i = 0; i < size; i += PAGE_SIZE)
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +0000138 if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000139 return NULL;
140
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000141 return (void __iomem *)ea;
142}
143
144/**
145 * __iounmap_from - Low level function to tear down the page tables
146 * for an IO mapping. This is used for mappings that
147 * are manipulated manually, like partial unmapping of
148 * PCI IOs or ISA space.
149 */
150void __iounmap_at(void *ea, unsigned long size)
151{
152 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
153 WARN_ON(size & ~PAGE_MASK);
154
155 unmap_kernel_range((unsigned long)ea, size);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000156}
157
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000158void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
159 unsigned long flags, void *caller)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000160{
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000161 phys_addr_t paligned;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000162 void __iomem *ret;
163
164 /*
165 * Choose an address to map it to.
166 * Once the imalloc system is running, we use it.
167 * Before that, we map using addresses going
168 * up from ioremap_bot. imalloc will use
169 * the addresses from ioremap_bot through
170 * IMALLOC_END
171 *
172 */
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000173 paligned = addr & PAGE_MASK;
174 size = PAGE_ALIGN(addr + size) - paligned;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000175
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000176 if ((size == 0) || (paligned == 0))
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000177 return NULL;
178
Michael Ellermanf691fa12015-03-30 14:10:37 +1100179 if (slab_is_available()) {
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000180 struct vm_struct *area;
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000181
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000182 area = __get_vm_area_caller(size, VM_IOREMAP,
183 ioremap_bot, IOREMAP_END,
184 caller);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000185 if (area == NULL)
186 return NULL;
Michael Ellerman7a9d1252010-11-28 18:26:36 +0000187
188 area->phys_addr = paligned;
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000189 ret = __ioremap_at(paligned, area->addr, size, flags);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000190 if (!ret)
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000191 vunmap(area->addr);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000192 } else {
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000193 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000194 if (ret)
195 ioremap_bot += size;
196 }
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000197
198 if (ret)
199 ret += addr & ~PAGE_MASK;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000200 return ret;
201}
202
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000203void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
204 unsigned long flags)
205{
206 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
207}
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100208
Benjamin Herrenschmidt68a64352006-11-13 09:27:39 +1100209void __iomem * ioremap(phys_addr_t addr, unsigned long size)
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100210{
Aneesh Kumar K.V72176dd2016-04-29 23:25:37 +1000211 unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0)));
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000212 void *caller = __builtin_return_address(0);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100213
214 if (ppc_md.ioremap)
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000215 return ppc_md.ioremap(addr, size, flags, caller);
216 return __ioremap_caller(addr, size, flags, caller);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100217}
218
Anton Blanchardbe135f42011-05-08 21:41:59 +0000219void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
220{
Aneesh Kumar K.V72176dd2016-04-29 23:25:37 +1000221 unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0)));
Anton Blanchardbe135f42011-05-08 21:41:59 +0000222 void *caller = __builtin_return_address(0);
223
224 if (ppc_md.ioremap)
225 return ppc_md.ioremap(addr, size, flags, caller);
226 return __ioremap_caller(addr, size, flags, caller);
227}
228
Anton Blanchard40f1ce72011-05-08 21:43:47 +0000229void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100230 unsigned long flags)
231{
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000232 void *caller = __builtin_return_address(0);
233
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700234 /* writeable implies dirty for kernel addresses */
Aneesh Kumar K.Vc7d54842016-04-29 23:25:30 +1000235 if (flags & _PAGE_WRITE)
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700236 flags |= _PAGE_DIRTY;
237
Aneesh Kumar K.Vac29c642016-04-29 23:25:34 +1000238 /* we don't want to let _PAGE_EXEC leak out */
239 flags &= ~_PAGE_EXEC;
240 /*
241 * Force kernel mapping.
242 */
243#if defined(CONFIG_PPC_BOOK3S_64)
244 flags |= _PAGE_PRIVILEGED;
245#else
246 flags &= ~_PAGE_USER;
247#endif
248
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700249
Benjamin Herrenschmidt55052ee2010-04-07 14:39:36 +1000250#ifdef _PAGE_BAP_SR
251 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
252 * which means that we just cleared supervisor access... oops ;-) This
253 * restores it
254 */
255 flags |= _PAGE_BAP_SR;
256#endif
257
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100258 if (ppc_md.ioremap)
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000259 return ppc_md.ioremap(addr, size, flags, caller);
260 return __ioremap_caller(addr, size, flags, caller);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100261}
262
263
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000264/*
265 * Unmap an IO region and remove it from imalloc'd list.
266 * Access to IO memory should be serialized by driver.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000267 */
Benjamin Herrenschmidt68a64352006-11-13 09:27:39 +1100268void __iounmap(volatile void __iomem *token)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000269{
270 void *addr;
271
Michael Ellermanf691fa12015-03-30 14:10:37 +1100272 if (!slab_is_available())
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000273 return;
274
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000275 addr = (void *) ((unsigned long __force)
276 PCI_FIX_ADDR(token) & PAGE_MASK);
277 if ((unsigned long)addr < ioremap_bot) {
278 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
279 " at 0x%p\n", addr);
280 return;
281 }
282 vunmap(addr);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000283}
284
Benjamin Herrenschmidt68a64352006-11-13 09:27:39 +1100285void iounmap(volatile void __iomem *token)
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100286{
287 if (ppc_md.iounmap)
288 ppc_md.iounmap(token);
289 else
290 __iounmap(token);
291}
292
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000293EXPORT_SYMBOL(ioremap);
Anton Blanchardbe135f42011-05-08 21:41:59 +0000294EXPORT_SYMBOL(ioremap_wc);
Anton Blanchard40f1ce72011-05-08 21:43:47 +0000295EXPORT_SYMBOL(ioremap_prot);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000296EXPORT_SYMBOL(__ioremap);
Olof Johanssona302cb92007-08-31 13:58:51 +1000297EXPORT_SYMBOL(__ioremap_at);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000298EXPORT_SYMBOL(iounmap);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100299EXPORT_SYMBOL(__iounmap);
Olof Johanssona302cb92007-08-31 13:58:51 +1000300EXPORT_SYMBOL(__iounmap_at);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000301
Aneesh Kumar K.V06743522014-11-05 21:57:39 +0530302#ifndef __PAGETABLE_PUD_FOLDED
303/* 4 level page table */
304struct page *pgd_page(pgd_t pgd)
305{
306 if (pgd_huge(pgd))
307 return pte_page(pgd_pte(pgd));
308 return virt_to_page(pgd_page_vaddr(pgd));
309}
310#endif
311
312struct page *pud_page(pud_t pud)
313{
314 if (pud_huge(pud))
315 return pte_page(pud_pte(pud));
316 return virt_to_page(pud_page_vaddr(pud));
317}
318
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530319/*
320 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
321 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
322 */
323struct page *pmd_page(pmd_t pmd)
324{
Aneesh Kumar K.V06743522014-11-05 21:57:39 +0530325 if (pmd_trans_huge(pmd) || pmd_huge(pmd))
Aneesh Kumar K.Ve34aa032015-12-01 09:06:53 +0530326 return pte_page(pmd_pte(pmd));
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530327 return virt_to_page(pmd_page_vaddr(pmd));
328}
329
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000330#ifdef CONFIG_PPC_64K_PAGES
331static pte_t *get_from_cache(struct mm_struct *mm)
332{
333 void *pte_frag, *ret;
334
335 spin_lock(&mm->page_table_lock);
336 ret = mm->context.pte_frag;
337 if (ret) {
338 pte_frag = ret + PTE_FRAG_SIZE;
339 /*
340 * If we have taken up all the fragments mark PTE page NULL
341 */
342 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
343 pte_frag = NULL;
344 mm->context.pte_frag = pte_frag;
345 }
346 spin_unlock(&mm->page_table_lock);
347 return (pte_t *)ret;
348}
349
350static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
351{
352 void *ret = NULL;
Michal Hocko32d6bd92016-06-24 14:48:47 -0700353 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000354 if (!page)
355 return NULL;
Kirill A. Shutemov4f8049432013-11-14 14:31:38 -0800356 if (!kernel && !pgtable_page_ctor(page)) {
357 __free_page(page);
358 return NULL;
359 }
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000360
361 ret = page_address(page);
362 spin_lock(&mm->page_table_lock);
363 /*
364 * If we find pgtable_page set, we return
365 * the allocated page with single fragement
366 * count.
367 */
368 if (likely(!mm->context.pte_frag)) {
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700369 set_page_count(page, PTE_FRAG_NR);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000370 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
371 }
372 spin_unlock(&mm->page_table_lock);
373
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000374 return (pte_t *)ret;
375}
376
Aneesh Kumar K.V74701d52016-04-29 23:26:17 +1000377pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000378{
379 pte_t *pte;
380
381 pte = get_from_cache(mm);
382 if (pte)
383 return pte;
384
385 return __alloc_for_cache(mm, kernel);
386}
Aneesh Kumar K.V934828e2016-04-29 23:26:18 +1000387#endif /* CONFIG_PPC_64K_PAGES */
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000388
Aneesh Kumar K.V74701d52016-04-29 23:26:17 +1000389void pte_fragment_free(unsigned long *table, int kernel)
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000390{
391 struct page *page = virt_to_page(table);
392 if (put_page_testzero(page)) {
393 if (!kernel)
394 pgtable_page_dtor(page);
395 free_hot_cold_page(page, 0);
396 }
397}
398
399#ifdef CONFIG_SMP
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000400void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
401{
402 unsigned long pgf = (unsigned long)table;
403
404 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
405 pgf |= shift;
406 tlb_remove_table(tlb, (void *)pgf);
407}
408
409void __tlb_remove_table(void *_table)
410{
411 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
412 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
413
414 if (!shift)
415 /* PTE page needs special handling */
Aneesh Kumar K.V74701d52016-04-29 23:26:17 +1000416 pte_fragment_free(table, 0);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000417 else {
418 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
419 kmem_cache_free(PGT_CACHE(shift), table);
420 }
421}
422#else
423void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
424{
425 if (!shift) {
426 /* PTE page needs special handling */
Aneesh Kumar K.V74701d52016-04-29 23:26:17 +1000427 pte_fragment_free(table, 0);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000428 } else {
429 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
430 kmem_cache_free(PGT_CACHE(shift), table);
431 }
432}
433#endif