blob: 876da2bc1796ba1b8568ad9a26450dcd4c162ecd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Becky Bruce41151e72011-06-28 09:54:48 +00002 * PPC Huge TLB Page Support for Kernel.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
Becky Bruce41151e72011-06-28 09:54:48 +00005 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/mm.h>
David Gibson883a3e52009-10-26 19:24:31 +000012#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/hugetlb.h>
Paul Mackerras342d3db2011-12-12 12:38:05 +000015#include <linux/export.h>
Becky Bruce41151e72011-06-28 09:54:48 +000016#include <linux/of_fdt.h>
17#include <linux/memblock.h>
18#include <linux/bootmem.h>
Kumar Gala13020be2011-11-24 09:40:07 +000019#include <linux/moduleparam.h>
Aneesh Kumar K.V50791e62017-07-06 15:38:59 -070020#include <linux/swap.h>
21#include <linux/swapops.h>
David Gibson883a3e52009-10-26 19:24:31 +000022#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/pgalloc.h>
24#include <asm/tlb.h>
Becky Bruce41151e72011-06-28 09:54:48 +000025#include <asm/setup.h>
Aneesh Kumar K.V29409992013-06-20 14:30:16 +053026#include <asm/hugetlb.h>
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053027#include <asm/pte-walk.h>
28
Aneesh Kumar K.V29409992013-06-20 14:30:16 +053029
30#ifdef CONFIG_HUGETLB_PAGE
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Jon Tollefson91224342008-07-23 21:27:55 -070032#define PAGE_SHIFT_64K 16
Christophe Leroy4b9142862016-12-07 08:47:28 +010033#define PAGE_SHIFT_512K 19
34#define PAGE_SHIFT_8M 23
Jon Tollefson91224342008-07-23 21:27:55 -070035#define PAGE_SHIFT_16M 24
36#define PAGE_SHIFT_16G 34
Jon Tollefson4ec161c2008-01-04 09:59:50 +110037
Becky Bruce41151e72011-06-28 09:54:48 +000038unsigned int HPAGE_SHIFT;
Oliver O'Halloran7a849a62017-06-30 16:52:35 +100039EXPORT_SYMBOL(HPAGE_SHIFT);
Becky Bruce41151e72011-06-28 09:54:48 +000040
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053041#define hugepd_none(hpd) (hpd_val(hpd) == 0)
David Gibsona4fe3ce2009-10-26 19:24:31 +000042
Punit Agrawal7868a202017-07-06 15:39:42 -070043pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
David Gibsona4fe3ce2009-10-26 19:24:31 +000044{
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053045 /*
46 * Only called for hugetlbfs pages, hence can ignore THP and the
47 * irq disabled walk.
48 */
49 return __find_linux_pte(mm->pgd, addr, NULL, NULL);
David Gibsona4fe3ce2009-10-26 19:24:31 +000050}
51
52static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
53 unsigned long address, unsigned pdshift, unsigned pshift)
54{
Becky Bruce41151e72011-06-28 09:54:48 +000055 struct kmem_cache *cachep;
56 pte_t *new;
Becky Bruce41151e72011-06-28 09:54:48 +000057 int i;
Christophe Leroy03bb2d62016-12-07 08:47:26 +010058 int num_hugepd;
59
60 if (pshift >= pdshift) {
61 cachep = hugepte_cache;
62 num_hugepd = 1 << (pshift - pdshift);
63 } else {
64 cachep = PGT_CACHE(pdshift - pshift);
65 num_hugepd = 1;
66 }
Becky Bruce41151e72011-06-28 09:54:48 +000067
Balbir Singhd2485642017-05-02 15:17:06 +100068 new = kmem_cache_zalloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
David Gibsonf10a04c2006-04-28 15:02:51 +100069
David Gibsona4fe3ce2009-10-26 19:24:31 +000070 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
71 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
72
David Gibsonf10a04c2006-04-28 15:02:51 +100073 if (! new)
74 return -ENOMEM;
75
Sukadev Bhattiprolu0eab46b2016-03-24 02:07:57 -040076 /*
77 * Make sure other cpus find the hugepd set only after a
78 * properly initialized page table is visible to them.
79 * For more details look for comment in __pte_alloc().
80 */
81 smp_wmb();
82
David Gibsonf10a04c2006-04-28 15:02:51 +100083 spin_lock(&mm->page_table_lock);
Christophe Leroy03bb2d62016-12-07 08:47:26 +010084
Becky Bruce41151e72011-06-28 09:54:48 +000085 /*
86 * We have multiple higher-level entries that point to the same
87 * actual pte location. Fill in each as we go and backtrack on error.
88 * We need all of these so the DTLB pgtable walk code can find the
89 * right higher-level entry without knowing if it's a hugepage or not.
90 */
91 for (i = 0; i < num_hugepd; i++, hpdp++) {
92 if (unlikely(!hugepd_none(*hpdp)))
93 break;
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053094 else {
Christophe Leroy03bb2d62016-12-07 08:47:26 +010095#ifdef CONFIG_PPC_BOOK3S_64
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053096 *hpdp = __hugepd(__pa(new) |
97 (shift_to_mmu_psize(pshift) << 2));
Christophe Leroy4b9142862016-12-07 08:47:28 +010098#elif defined(CONFIG_PPC_8xx)
Christophe Leroyde0f9382018-01-12 13:45:31 +010099 *hpdp = __hugepd(__pa(new) | _PMD_USER |
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +0530100 (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
101 _PMD_PAGE_512K) | _PMD_PRESENT);
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100102#else
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +0000103 /* We use the old format for PPC_FSL_BOOK3E */
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +0530104 *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100105#endif
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +0530106 }
Becky Bruce41151e72011-06-28 09:54:48 +0000107 }
108 /* If we bailed from the for loop early, an error occurred, clean up */
109 if (i < num_hugepd) {
110 for (i = i - 1 ; i >= 0; i--, hpdp--)
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +0530111 *hpdp = __hugepd(0);
Becky Bruce41151e72011-06-28 09:54:48 +0000112 kmem_cache_free(cachep, new);
113 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000114 spin_unlock(&mm->page_table_lock);
115 return 0;
116}
117
Becky Brucea1cd5412011-10-10 10:50:39 +0000118/*
119 * These macros define how to determine which level of the page table holds
120 * the hpdp.
121 */
Christophe Leroy4b9142862016-12-07 08:47:28 +0100122#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
Becky Brucea1cd5412011-10-10 10:50:39 +0000123#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
124#define HUGEPD_PUD_SHIFT PUD_SHIFT
125#else
126#define HUGEPD_PGD_SHIFT PUD_SHIFT
127#define HUGEPD_PUD_SHIFT PMD_SHIFT
128#endif
129
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000130/*
131 * At this point we do the placement change only for BOOK3S 64. This would
132 * possibly work on other subarchs.
133 */
134pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
135{
136 pgd_t *pg;
137 pud_t *pu;
138 pmd_t *pm;
139 hugepd_t *hpdp = NULL;
140 unsigned pshift = __ffs(sz);
141 unsigned pdshift = PGDIR_SHIFT;
142
143 addr &= ~(sz-1);
144 pg = pgd_offset(mm, addr);
145
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100146#ifdef CONFIG_PPC_BOOK3S_64
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000147 if (pshift == PGDIR_SHIFT)
148 /* 16GB huge page */
149 return (pte_t *) pg;
150 else if (pshift > PUD_SHIFT)
151 /*
152 * We need to use hugepd table
153 */
154 hpdp = (hugepd_t *)pg;
155 else {
156 pdshift = PUD_SHIFT;
157 pu = pud_alloc(mm, pg, addr);
158 if (pshift == PUD_SHIFT)
159 return (pte_t *)pu;
160 else if (pshift > PMD_SHIFT)
161 hpdp = (hugepd_t *)pu;
162 else {
163 pdshift = PMD_SHIFT;
164 pm = pmd_alloc(mm, pu, addr);
165 if (pshift == PMD_SHIFT)
166 /* 16MB hugepage */
167 return (pte_t *)pm;
168 else
169 hpdp = (hugepd_t *)pm;
170 }
171 }
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000172#else
Becky Brucea1cd5412011-10-10 10:50:39 +0000173 if (pshift >= HUGEPD_PGD_SHIFT) {
David Gibsona4fe3ce2009-10-26 19:24:31 +0000174 hpdp = (hugepd_t *)pg;
175 } else {
176 pdshift = PUD_SHIFT;
177 pu = pud_alloc(mm, pg, addr);
Becky Brucea1cd5412011-10-10 10:50:39 +0000178 if (pshift >= HUGEPD_PUD_SHIFT) {
David Gibsona4fe3ce2009-10-26 19:24:31 +0000179 hpdp = (hugepd_t *)pu;
180 } else {
181 pdshift = PMD_SHIFT;
182 pm = pmd_alloc(mm, pu, addr);
183 hpdp = (hugepd_t *)pm;
184 }
185 }
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100186#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000187 if (!hpdp)
188 return NULL;
189
190 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
191
192 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
193 return NULL;
194
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530195 return hugepte_offset(*hpdp, addr, pdshift);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100196}
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100197
Aneesh Kumar K.V79cc38d2017-07-28 10:31:26 +0530198#ifdef CONFIG_PPC_BOOK3S_64
199/*
200 * Tracks gpages after the device tree is scanned and before the
201 * huge_boot_pages list is ready on pseries.
202 */
203#define MAX_NUMBER_GPAGES 1024
204__initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
205__initdata static unsigned nr_gpages;
206
207/*
208 * Build list of addresses of gigantic pages. This function is used in early
Anton Blanchard14ed7402014-09-17 22:15:34 +1000209 * boot before the buddy allocator is setup.
Jon Tollefson658013e2008-07-23 21:27:54 -0700210 */
Aneesh Kumar K.V79cc38d2017-07-28 10:31:26 +0530211void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
Jon Tollefson658013e2008-07-23 21:27:54 -0700212{
213 if (!addr)
214 return;
215 while (number_of_pages > 0) {
216 gpage_freearray[nr_gpages] = addr;
217 nr_gpages++;
218 number_of_pages--;
219 addr += page_size;
220 }
221}
222
Aneesh Kumar K.V79cc38d2017-07-28 10:31:26 +0530223int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700224{
225 struct huge_bootmem_page *m;
226 if (nr_gpages == 0)
227 return 0;
228 m = phys_to_virt(gpage_freearray[--nr_gpages]);
229 gpage_freearray[nr_gpages] = 0;
230 list_add(&m->list, &huge_boot_pages);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700231 m->hstate = hstate;
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700232 return 1;
233}
Becky Bruce41151e72011-06-28 09:54:48 +0000234#endif
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700235
Aneesh Kumar K.V79cc38d2017-07-28 10:31:26 +0530236
237int __init alloc_bootmem_huge_page(struct hstate *h)
238{
239
240#ifdef CONFIG_PPC_BOOK3S_64
241 if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
242 return pseries_alloc_bootmem_huge_page(h);
243#endif
244 return __alloc_bootmem_huge_page(h);
245}
246
Christophe Leroy4b9142862016-12-07 08:47:28 +0100247#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
Becky Bruce41151e72011-06-28 09:54:48 +0000248#define HUGEPD_FREELIST_SIZE \
249 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
250
251struct hugepd_freelist {
252 struct rcu_head rcu;
253 unsigned int index;
254 void *ptes[0];
255};
256
257static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
258
259static void hugepd_free_rcu_callback(struct rcu_head *head)
260{
261 struct hugepd_freelist *batch =
262 container_of(head, struct hugepd_freelist, rcu);
263 unsigned int i;
264
265 for (i = 0; i < batch->index; i++)
266 kmem_cache_free(hugepte_cache, batch->ptes[i]);
267
268 free_page((unsigned long)batch);
269}
270
271static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
272{
273 struct hugepd_freelist **batchp;
274
Sebastian Siewior08a5bb22016-03-08 10:03:56 +0100275 batchp = &get_cpu_var(hugepd_freelist_cur);
Becky Bruce41151e72011-06-28 09:54:48 +0000276
277 if (atomic_read(&tlb->mm->mm_users) < 2 ||
Benjamin Herrenschmidtb426e4b2017-07-24 14:28:01 +1000278 mm_is_thread_local(tlb->mm)) {
Becky Bruce41151e72011-06-28 09:54:48 +0000279 kmem_cache_free(hugepte_cache, hugepte);
Sebastian Siewior08a5bb22016-03-08 10:03:56 +0100280 put_cpu_var(hugepd_freelist_cur);
Becky Bruce41151e72011-06-28 09:54:48 +0000281 return;
282 }
283
284 if (*batchp == NULL) {
285 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
286 (*batchp)->index = 0;
287 }
288
289 (*batchp)->ptes[(*batchp)->index++] = hugepte;
290 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
291 call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
292 *batchp = NULL;
293 }
Tiejun Chen94b09d72014-01-20 16:39:34 +0800294 put_cpu_var(hugepd_freelist_cur);
Becky Bruce41151e72011-06-28 09:54:48 +0000295}
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100296#else
297static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
Becky Bruce41151e72011-06-28 09:54:48 +0000298#endif
299
David Gibsona4fe3ce2009-10-26 19:24:31 +0000300static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
301 unsigned long start, unsigned long end,
302 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000303{
304 pte_t *hugepte = hugepd_page(*hpdp);
Becky Bruce41151e72011-06-28 09:54:48 +0000305 int i;
306
David Gibsona4fe3ce2009-10-26 19:24:31 +0000307 unsigned long pdmask = ~((1UL << pdshift) - 1);
Becky Bruce41151e72011-06-28 09:54:48 +0000308 unsigned int num_hugepd = 1;
Becky Bruce881fde12011-10-10 10:50:40 +0000309 unsigned int shift = hugepd_shift(*hpdp);
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100310
311 /* Note: On fsl the hpdp may be the first of several */
312 if (shift > pdshift)
313 num_hugepd = 1 << (shift - pdshift);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000314
315 start &= pdmask;
316 if (start < floor)
317 return;
318 if (ceiling) {
319 ceiling &= pdmask;
320 if (! ceiling)
321 return;
322 }
323 if (end - 1 > ceiling - 1)
324 return;
David Gibsonf10a04c2006-04-28 15:02:51 +1000325
Becky Bruce41151e72011-06-28 09:54:48 +0000326 for (i = 0; i < num_hugepd; i++, hpdp++)
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +0530327 *hpdp = __hugepd(0);
Becky Bruce41151e72011-06-28 09:54:48 +0000328
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100329 if (shift >= pdshift)
330 hugepd_free(tlb, hugepte);
331 else
332 pgtable_free_tlb(tlb, hugepte, pdshift - shift);
David Gibsonf10a04c2006-04-28 15:02:51 +1000333}
334
David Gibsonf10a04c2006-04-28 15:02:51 +1000335static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
336 unsigned long addr, unsigned long end,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000337 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000338{
339 pmd_t *pmd;
340 unsigned long next;
341 unsigned long start;
342
343 start = addr;
David Gibsonf10a04c2006-04-28 15:02:51 +1000344 do {
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100345 unsigned long more;
346
Becky Brucea1cd5412011-10-10 10:50:39 +0000347 pmd = pmd_offset(pud, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000348 next = pmd_addr_end(addr, end);
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530349 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
Aneesh Kumar K.V8bbd9f02013-06-19 12:04:26 +0530350 /*
351 * if it is not hugepd pointer, we should already find
352 * it cleared.
353 */
354 WARN_ON(!pmd_none_or_clear_bad(pmd));
David Gibsonf10a04c2006-04-28 15:02:51 +1000355 continue;
Aneesh Kumar K.V8bbd9f02013-06-19 12:04:26 +0530356 }
Becky Brucea1cd5412011-10-10 10:50:39 +0000357 /*
358 * Increment next by the size of the huge mapping since
359 * there may be more than one entry at this level for a
360 * single hugepage, but all of them point to
361 * the same kmem cache that holds the hugepte.
362 */
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100363 more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
364 if (more > next)
365 next = more;
366
David Gibsona4fe3ce2009-10-26 19:24:31 +0000367 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
368 addr, next, floor, ceiling);
Becky Brucea1cd5412011-10-10 10:50:39 +0000369 } while (addr = next, addr != end);
David Gibsonf10a04c2006-04-28 15:02:51 +1000370
371 start &= PUD_MASK;
372 if (start < floor)
373 return;
374 if (ceiling) {
375 ceiling &= PUD_MASK;
376 if (!ceiling)
377 return;
378 }
379 if (end - 1 > ceiling - 1)
380 return;
381
382 pmd = pmd_offset(pud, start);
383 pud_clear(pud);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000384 pmd_free_tlb(tlb, pmd, start);
Scott Wood50c6a662015-04-10 19:37:34 -0500385 mm_dec_nr_pmds(tlb->mm);
David Gibsonf10a04c2006-04-28 15:02:51 +1000386}
David Gibsonf10a04c2006-04-28 15:02:51 +1000387
388static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
389 unsigned long addr, unsigned long end,
390 unsigned long floor, unsigned long ceiling)
391{
392 pud_t *pud;
393 unsigned long next;
394 unsigned long start;
395
396 start = addr;
David Gibsonf10a04c2006-04-28 15:02:51 +1000397 do {
Becky Brucea1cd5412011-10-10 10:50:39 +0000398 pud = pud_offset(pgd, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000399 next = pud_addr_end(addr, end);
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530400 if (!is_hugepd(__hugepd(pud_val(*pud)))) {
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100401 if (pud_none_or_clear_bad(pud))
402 continue;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700403 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000404 ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100405 } else {
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100406 unsigned long more;
Becky Brucea1cd5412011-10-10 10:50:39 +0000407 /*
408 * Increment next by the size of the huge mapping since
409 * there may be more than one entry at this level for a
410 * single hugepage, but all of them point to
411 * the same kmem cache that holds the hugepte.
412 */
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100413 more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
414 if (more > next)
415 next = more;
416
David Gibsona4fe3ce2009-10-26 19:24:31 +0000417 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
418 addr, next, floor, ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100419 }
Becky Brucea1cd5412011-10-10 10:50:39 +0000420 } while (addr = next, addr != end);
David Gibsonf10a04c2006-04-28 15:02:51 +1000421
422 start &= PGDIR_MASK;
423 if (start < floor)
424 return;
425 if (ceiling) {
426 ceiling &= PGDIR_MASK;
427 if (!ceiling)
428 return;
429 }
430 if (end - 1 > ceiling - 1)
431 return;
432
433 pud = pud_offset(pgd, start);
434 pgd_clear(pgd);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000435 pud_free_tlb(tlb, pud, start);
Kirill A. Shutemovb4e98d92017-11-15 17:35:33 -0800436 mm_dec_nr_puds(tlb->mm);
David Gibsonf10a04c2006-04-28 15:02:51 +1000437}
438
439/*
440 * This function frees user-level page tables of a process.
David Gibsonf10a04c2006-04-28 15:02:51 +1000441 */
Jan Beulich42b77722008-07-23 21:27:10 -0700442void hugetlb_free_pgd_range(struct mmu_gather *tlb,
David Gibsonf10a04c2006-04-28 15:02:51 +1000443 unsigned long addr, unsigned long end,
444 unsigned long floor, unsigned long ceiling)
445{
446 pgd_t *pgd;
447 unsigned long next;
David Gibsonf10a04c2006-04-28 15:02:51 +1000448
449 /*
David Gibsona4fe3ce2009-10-26 19:24:31 +0000450 * Because there are a number of different possible pagetable
451 * layouts for hugepage ranges, we limit knowledge of how
452 * things should be laid out to the allocation path
453 * (huge_pte_alloc(), above). Everything else works out the
454 * structure as it goes from information in the hugepd
455 * pointers. That means that we can't here use the
456 * optimization used in the normal page free_pgd_range(), of
457 * checking whether we're actually covering a large enough
458 * range to have to do anything at the top level of the walk
459 * instead of at the bottom.
David Gibsonf10a04c2006-04-28 15:02:51 +1000460 *
David Gibsona4fe3ce2009-10-26 19:24:31 +0000461 * To make sense of this, you should probably go read the big
462 * block comment at the top of the normal free_pgd_range(),
463 * too.
David Gibsonf10a04c2006-04-28 15:02:51 +1000464 */
465
David Gibsonf10a04c2006-04-28 15:02:51 +1000466 do {
David Gibsonf10a04c2006-04-28 15:02:51 +1000467 next = pgd_addr_end(addr, end);
Becky Bruce41151e72011-06-28 09:54:48 +0000468 pgd = pgd_offset(tlb->mm, addr);
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530469 if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
David Gibson0b264252008-09-05 11:49:54 +1000470 if (pgd_none_or_clear_bad(pgd))
471 continue;
472 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
473 } else {
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100474 unsigned long more;
Becky Bruce41151e72011-06-28 09:54:48 +0000475 /*
476 * Increment next by the size of the huge mapping since
Becky Bruce881fde12011-10-10 10:50:40 +0000477 * there may be more than one entry at the pgd level
478 * for a single hugepage, but all of them point to the
479 * same kmem cache that holds the hugepte.
Becky Bruce41151e72011-06-28 09:54:48 +0000480 */
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100481 more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
482 if (more > next)
483 next = more;
484
David Gibsona4fe3ce2009-10-26 19:24:31 +0000485 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
486 addr, next, floor, ceiling);
David Gibson0b264252008-09-05 11:49:54 +1000487 }
Becky Bruce41151e72011-06-28 09:54:48 +0000488 } while (addr = next, addr != end);
David Gibsone28f7fa2005-08-05 19:39:06 +1000489}
490
Aneesh Kumar K.V50791e62017-07-06 15:38:59 -0700491struct page *follow_huge_pd(struct vm_area_struct *vma,
492 unsigned long address, hugepd_t hpd,
493 int flags, int pdshift)
494{
495 pte_t *ptep;
496 spinlock_t *ptl;
497 struct page *page = NULL;
498 unsigned long mask;
499 int shift = hugepd_shift(hpd);
500 struct mm_struct *mm = vma->vm_mm;
501
502retry:
503 ptl = &mm->page_table_lock;
504 spin_lock(ptl);
505
506 ptep = hugepte_offset(hpd, address, pdshift);
507 if (pte_present(*ptep)) {
508 mask = (1UL << shift) - 1;
509 page = pte_page(*ptep);
510 page += ((address & mask) >> PAGE_SHIFT);
511 if (flags & FOLL_GET)
512 get_page(page);
513 } else {
514 if (is_hugetlb_entry_migration(*ptep)) {
515 spin_unlock(ptl);
516 __migration_entry_wait(mm, ptep, ptl);
517 goto retry;
518 }
519 }
520 spin_unlock(ptl);
521 return page;
522}
523
David Gibson39adfa52009-11-23 20:03:40 +0000524static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
525 unsigned long sz)
526{
527 unsigned long __boundary = (addr + sz) & ~(sz-1);
528 return (__boundary - 1 < end - 1) ? __boundary : end;
529}
530
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530531int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift,
532 unsigned long end, int write, struct page **pages, int *nr)
David Gibsona4fe3ce2009-10-26 19:24:31 +0000533{
534 pte_t *ptep;
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530535 unsigned long sz = 1UL << hugepd_shift(hugepd);
David Gibson39adfa52009-11-23 20:03:40 +0000536 unsigned long next;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000537
538 ptep = hugepte_offset(hugepd, addr, pdshift);
539 do {
David Gibson39adfa52009-11-23 20:03:40 +0000540 next = hugepte_addr_end(addr, end, sz);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000541 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
542 return 0;
David Gibson39adfa52009-11-23 20:03:40 +0000543 } while (ptep++, addr = next, addr != end);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000544
545 return 1;
546}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
Becky Bruce76512952011-10-10 10:50:36 +0000548#ifdef CONFIG_PPC_MM_SLICES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
550 unsigned long len, unsigned long pgoff,
551 unsigned long flags)
552{
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700553 struct hstate *hstate = hstate_file(file);
554 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
Brian King48f797d2008-12-04 04:07:54 +0000555
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000556 if (radix_enabled())
557 return radix__hugetlb_get_unmapped_area(file, addr, len,
558 pgoff, flags);
Michel Lespinasse34d07172013-04-29 11:53:52 -0700559 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560}
Becky Bruce76512952011-10-10 10:50:36 +0000561#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
Mel Gorman33402892009-01-06 14:38:54 -0800563unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
564{
Paul Mackerras25c29f92011-09-20 19:58:10 +0000565#ifdef CONFIG_PPC_MM_SLICES
Mel Gorman33402892009-01-06 14:38:54 -0800566 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
Aneesh Kumar K.V2f5f0df2016-04-29 23:26:24 +1000567 /* With radix we don't use slice, so derive it from vma*/
568 if (!radix_enabled())
569 return 1UL << mmu_psize_to_shift(psize);
570#endif
Becky Bruce41151e72011-06-28 09:54:48 +0000571 if (!is_vm_hugetlb_page(vma))
572 return PAGE_SIZE;
573
574 return huge_page_size(hstate_vma(vma));
Becky Bruce41151e72011-06-28 09:54:48 +0000575}
576
577static inline bool is_power_of_4(unsigned long x)
578{
579 if (is_power_of_2(x))
580 return (__ilog2(x) % 2) ? false : true;
581 return false;
Mel Gorman33402892009-01-06 14:38:54 -0800582}
583
David Gibsond1837cb2009-10-26 19:24:31 +0000584static int __init add_huge_page_size(unsigned long long size)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100585{
David Gibsond1837cb2009-10-26 19:24:31 +0000586 int shift = __ffs(size);
587 int mmu_psize;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000588
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100589 /* Check that it is a page size supported by the hardware and
David Gibsond1837cb2009-10-26 19:24:31 +0000590 * that it fits within pagetable and slice limits. */
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100591 if (size <= PAGE_SIZE)
592 return -EINVAL;
Christophe Leroy4b9142862016-12-07 08:47:28 +0100593#if defined(CONFIG_PPC_FSL_BOOK3E)
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100594 if (!is_power_of_4(size))
Becky Bruce41151e72011-06-28 09:54:48 +0000595 return -EINVAL;
Christophe Leroy4b9142862016-12-07 08:47:28 +0100596#elif !defined(CONFIG_PPC_8xx)
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100597 if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT))
David Gibsond1837cb2009-10-26 19:24:31 +0000598 return -EINVAL;
Becky Bruce41151e72011-06-28 09:54:48 +0000599#endif
Jon Tollefson91224342008-07-23 21:27:55 -0700600
David Gibsond1837cb2009-10-26 19:24:31 +0000601 if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
602 return -EINVAL;
603
Aneesh Kumar K.Va5251082017-03-21 22:59:56 +0530604#ifdef CONFIG_PPC_BOOK3S_64
605 /*
606 * We need to make sure that for different page sizes reported by
607 * firmware we only add hugetlb support for page sizes that can be
608 * supported by linux page table layout.
609 * For now we have
610 * Radix: 2M
611 * Hash: 16M and 16G
612 */
613 if (radix_enabled()) {
Aneesh Kumar K.V40692eb2017-07-06 15:39:20 -0700614 if (mmu_psize != MMU_PAGE_2M) {
615 if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
616 (mmu_psize != MMU_PAGE_1G))
617 return -EINVAL;
618 }
Aneesh Kumar K.Va5251082017-03-21 22:59:56 +0530619 } else {
620 if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
621 return -EINVAL;
622 }
623#endif
624
David Gibsond1837cb2009-10-26 19:24:31 +0000625 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
626
627 /* Return if huge page size has already been setup */
628 if (size_to_hstate(size))
629 return 0;
630
631 hugetlb_add_hstate(shift - PAGE_SHIFT);
632
633 return 0;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100634}
635
636static int __init hugepage_setup_sz(char *str)
637{
638 unsigned long long size;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100639
640 size = memparse(str, &str);
641
Vaishali Thakkar71bf79c2016-05-19 17:11:14 -0700642 if (add_huge_page_size(size) != 0) {
643 hugetlb_bad_size();
644 pr_err("Invalid huge page size specified(%llu)\n", size);
645 }
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100646
647 return 1;
648}
649__setup("hugepagesz=", hugepage_setup_sz);
650
Becky Bruce41151e72011-06-28 09:54:48 +0000651struct kmem_cache *hugepte_cache;
652static int __init hugetlbpage_init(void)
653{
654 int psize;
655
Christophe Leroy4b9142862016-12-07 08:47:28 +0100656#if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx)
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000657 if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE))
David Gibsonf10a04c2006-04-28 15:02:51 +1000658 return -ENODEV;
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100659#endif
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700660 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
David Gibsond1837cb2009-10-26 19:24:31 +0000661 unsigned shift;
662 unsigned pdshift;
663
664 if (!mmu_psize_defs[psize].shift)
665 continue;
666
667 shift = mmu_psize_to_shift(psize);
668
669 if (add_huge_page_size(1ULL << shift) < 0)
670 continue;
671
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100672 if (shift < HUGEPD_PUD_SHIFT)
David Gibsond1837cb2009-10-26 19:24:31 +0000673 pdshift = PMD_SHIFT;
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100674 else if (shift < HUGEPD_PGD_SHIFT)
David Gibsond1837cb2009-10-26 19:24:31 +0000675 pdshift = PUD_SHIFT;
676 else
677 pdshift = PGDIR_SHIFT;
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000678 /*
679 * if we have pdshift and shift value same, we don't
680 * use pgt cache for hugepd.
681 */
Nicholas Pigginbf5ca682017-01-04 01:55:17 +1000682 if (pdshift > shift)
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000683 pgtable_cache_add(pdshift - shift, NULL);
Christophe Leroy4b9142862016-12-07 08:47:28 +0100684#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100685 else if (!hugepte_cache) {
686 /*
687 * Create a kmem cache for hugeptes. The bottom bits in
688 * the pte have size information encoded in them, so
689 * align them to allow this
690 */
691 hugepte_cache = kmem_cache_create("hugepte-cache",
692 sizeof(pte_t),
693 HUGEPD_SHIFT_MASK + 1,
694 0, NULL);
695 if (hugepte_cache == NULL)
696 panic("%s: Unable to create kmem cache "
697 "for hugeptes\n", __func__);
698
699 }
700#endif
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700701 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000702
Christophe Leroy4b9142862016-12-07 08:47:28 +0100703#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
704 /* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100705 if (mmu_psize_defs[MMU_PAGE_4M].shift)
706 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
Christophe Leroy4b9142862016-12-07 08:47:28 +0100707 else if (mmu_psize_defs[MMU_PAGE_512K].shift)
708 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_512K].shift;
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100709#else
David Gibsond1837cb2009-10-26 19:24:31 +0000710 /* Set default large page size. Currently, we pick 16M or 1M
711 * depending on what is available
712 */
713 if (mmu_psize_defs[MMU_PAGE_16M].shift)
714 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
715 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
716 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000717 else if (mmu_psize_defs[MMU_PAGE_2M].shift)
718 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100719#endif
David Gibsonf10a04c2006-04-28 15:02:51 +1000720 return 0;
721}
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100722
Paul Gortmaker6f1142812015-05-01 20:08:21 -0400723arch_initcall(hugetlbpage_init);
David Gibson0895ecd2009-10-26 19:24:31 +0000724
725void flush_dcache_icache_hugepage(struct page *page)
726{
727 int i;
Becky Bruce41151e72011-06-28 09:54:48 +0000728 void *start;
David Gibson0895ecd2009-10-26 19:24:31 +0000729
730 BUG_ON(!PageCompound(page));
731
Becky Bruce41151e72011-06-28 09:54:48 +0000732 for (i = 0; i < (1UL << compound_order(page)); i++) {
733 if (!PageHighMem(page)) {
734 __flush_dcache_icache(page_address(page+i));
735 } else {
Cong Wang2480b202011-11-25 23:14:16 +0800736 start = kmap_atomic(page+i);
Becky Bruce41151e72011-06-28 09:54:48 +0000737 __flush_dcache_icache(start);
Cong Wang2480b202011-11-25 23:14:16 +0800738 kunmap_atomic(start);
Becky Bruce41151e72011-06-28 09:54:48 +0000739 }
740 }
David Gibson0895ecd2009-10-26 19:24:31 +0000741}
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530742
743#endif /* CONFIG_HUGETLB_PAGE */
744
745/*
746 * We have 4 cases for pgds and pmds:
747 * (1) invalid (all zeroes)
748 * (2) pointer to next table, as normal; bottom 6 bits == 0
Aneesh Kumar K.V6a119ea2015-12-01 09:06:54 +0530749 * (3) leaf pte for huge page _PAGE_PTE set
750 * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530751 *
752 * So long as we atomically load page table pointers we are safe against teardown,
753 * we can follow the address down to the the page and take a ref on it.
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530754 * This function need to be called with interrupts disabled. We use this variant
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +0530755 * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530756 */
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +0530757pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
758 bool *is_thp, unsigned *hpage_shift)
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530759{
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530760 pgd_t pgd, *pgdp;
761 pud_t pud, *pudp;
762 pmd_t pmd, *pmdp;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530763 pte_t *ret_pte;
764 hugepd_t *hpdp = NULL;
765 unsigned pdshift = PGDIR_SHIFT;
766
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +0530767 if (hpage_shift)
768 *hpage_shift = 0;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530769
Aneesh Kumar K.V891121e2015-10-09 08:32:21 +0530770 if (is_thp)
771 *is_thp = false;
772
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530773 pgdp = pgdir + pgd_index(ea);
Michael Ellerman4f9c53c2015-03-25 20:11:57 +1100774 pgd = READ_ONCE(*pgdp);
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530775 /*
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530776 * Always operate on the local stack value. This make sure the
777 * value don't get updated by a parallel THP split/collapse,
778 * page fault or a page unmap. The return pte_t * is still not
779 * stable. So should be checked there for above conditions.
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530780 */
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530781 if (pgd_none(pgd))
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530782 return NULL;
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530783 else if (pgd_huge(pgd)) {
784 ret_pte = (pte_t *) pgdp;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530785 goto out;
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530786 } else if (is_hugepd(__hugepd(pgd_val(pgd))))
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530787 hpdp = (hugepd_t *)&pgd;
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530788 else {
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530789 /*
790 * Even if we end up with an unmap, the pgtable will not
791 * be freed, because we do an rcu free and here we are
792 * irq disabled
793 */
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530794 pdshift = PUD_SHIFT;
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530795 pudp = pud_offset(&pgd, ea);
Christian Borntraegerda1a2882015-01-06 22:47:41 +0100796 pud = READ_ONCE(*pudp);
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530797
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530798 if (pud_none(pud))
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530799 return NULL;
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530800 else if (pud_huge(pud)) {
801 ret_pte = (pte_t *) pudp;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530802 goto out;
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530803 } else if (is_hugepd(__hugepd(pud_val(pud))))
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530804 hpdp = (hugepd_t *)&pud;
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530805 else {
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530806 pdshift = PMD_SHIFT;
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530807 pmdp = pmd_offset(&pud, ea);
Christian Borntraegerda1a2882015-01-06 22:47:41 +0100808 pmd = READ_ONCE(*pmdp);
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530809 /*
810 * A hugepage collapse is captured by pmd_none, because
811 * it mark the pmd none and do a hpte invalidate.
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530812 */
Aneesh Kumar K.V7d6e7f72015-03-30 10:41:04 +0530813 if (pmd_none(pmd))
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530814 return NULL;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530815
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000816 if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
Aneesh Kumar K.V891121e2015-10-09 08:32:21 +0530817 if (is_thp)
818 *is_thp = true;
819 ret_pte = (pte_t *) pmdp;
820 goto out;
821 }
822
823 if (pmd_huge(pmd)) {
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530824 ret_pte = (pte_t *) pmdp;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530825 goto out;
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530826 } else if (is_hugepd(__hugepd(pmd_val(pmd))))
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530827 hpdp = (hugepd_t *)&pmd;
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530828 else
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530829 return pte_offset_kernel(&pmd, ea);
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530830 }
831 }
832 if (!hpdp)
833 return NULL;
834
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530835 ret_pte = hugepte_offset(*hpdp, ea, pdshift);
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530836 pdshift = hugepd_shift(*hpdp);
837out:
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +0530838 if (hpage_shift)
839 *hpage_shift = pdshift;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530840 return ret_pte;
841}
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +0530842EXPORT_SYMBOL_GPL(__find_linux_pte);
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530843
844int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
845 unsigned long end, int write, struct page **pages, int *nr)
846{
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530847 unsigned long pte_end;
Kirill A. Shutemovddc58f22016-01-15 16:52:56 -0800848 struct page *head, *page;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530849 pte_t pte;
850 int refs;
851
852 pte_end = (addr + sz) & ~(sz-1);
853 if (pte_end < end)
854 end = pte_end;
855
Michael Ellerman4f9c53c2015-03-25 20:11:57 +1100856 pte = READ_ONCE(*ptep);
Christophe Leroy6b8cb662016-09-19 12:58:54 +0200857
Aneesh Kumar K.V5fa5b162017-12-04 07:49:10 +0530858 if (!pte_access_permitted(pte, write))
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530859 return 0;
860
861 /* hugepages are never "special" */
862 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
863
864 refs = 0;
865 head = pte_page(pte);
866
867 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530868 do {
869 VM_BUG_ON(compound_head(page) != head);
870 pages[*nr] = page;
871 (*nr)++;
872 page++;
873 refs++;
874 } while (addr += PAGE_SIZE, addr != end);
875
876 if (!page_cache_add_speculative(head, refs)) {
877 *nr -= refs;
878 return 0;
879 }
880
881 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
882 /* Could be optimized better */
883 *nr -= refs;
884 while (refs--)
885 put_page(head);
886 return 0;
887 }
888
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530889 return 1;
890}