Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Kumar Gala | 0186f47 | 2008-11-19 12:50:04 +0000 | [diff] [blame] | 2 | /* |
| 3 | * This file contains common routines for dealing with free of page tables |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 4 | * Along with common page table handling code |
Kumar Gala | 0186f47 | 2008-11-19 12:50:04 +0000 | [diff] [blame] | 5 | * |
| 6 | * Derived from arch/powerpc/mm/tlb_64.c: |
| 7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 8 | * |
| 9 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) |
| 10 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
| 11 | * Copyright (C) 1996 Paul Mackerras |
| 12 | * |
| 13 | * Derived from "arch/i386/mm/init.c" |
| 14 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 15 | * |
| 16 | * Dave Engebretsen <engebret@us.ibm.com> |
| 17 | * Rework for PPC64 port. |
Kumar Gala | 0186f47 | 2008-11-19 12:50:04 +0000 | [diff] [blame] | 18 | */ |
| 19 | |
| 20 | #include <linux/kernel.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 21 | #include <linux/gfp.h> |
Kumar Gala | 0186f47 | 2008-11-19 12:50:04 +0000 | [diff] [blame] | 22 | #include <linux/mm.h> |
Kumar Gala | 0186f47 | 2008-11-19 12:50:04 +0000 | [diff] [blame] | 23 | #include <linux/percpu.h> |
| 24 | #include <linux/hardirq.h> |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 25 | #include <linux/hugetlb.h> |
Kumar Gala | 0186f47 | 2008-11-19 12:50:04 +0000 | [diff] [blame] | 26 | #include <asm/pgalloc.h> |
| 27 | #include <asm/tlbflush.h> |
| 28 | #include <asm/tlb.h> |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 29 | #include <asm/hugetlb.h> |
Kumar Gala | 0186f47 | 2008-11-19 12:50:04 +0000 | [diff] [blame] | 30 | |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 31 | static inline int is_exec_fault(void) |
| 32 | { |
| 33 | return current->thread.regs && TRAP(current->thread.regs) == 0x400; |
| 34 | } |
| 35 | |
| 36 | /* We only try to do i/d cache coherency on stuff that looks like |
| 37 | * reasonably "normal" PTEs. We currently require a PTE to be present |
Aneesh Kumar K.V | 30bda41 | 2016-04-29 23:25:38 +1000 | [diff] [blame] | 38 | * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 39 | * on userspace PTEs |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 40 | */ |
| 41 | static inline int pte_looks_normal(pte_t pte) |
| 42 | { |
Aneesh Kumar K.V | ac29c64 | 2016-04-29 23:25:34 +1000 | [diff] [blame] | 43 | |
Christophe Leroy | 26973fa | 2018-10-09 13:51:56 +0000 | [diff] [blame] | 44 | if (pte_present(pte) && !pte_special(pte)) { |
Aneesh Kumar K.V | 30bda41 | 2016-04-29 23:25:38 +1000 | [diff] [blame] | 45 | if (pte_ci(pte)) |
| 46 | return 0; |
Aneesh Kumar K.V | ac29c64 | 2016-04-29 23:25:34 +1000 | [diff] [blame] | 47 | if (pte_user(pte)) |
| 48 | return 1; |
| 49 | } |
| 50 | return 0; |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 51 | } |
| 52 | |
Anton Blanchard | e51df2c | 2014-08-20 08:55:18 +1000 | [diff] [blame] | 53 | static struct page *maybe_pte_to_page(pte_t pte) |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 54 | { |
| 55 | unsigned long pfn = pte_pfn(pte); |
| 56 | struct page *page; |
| 57 | |
| 58 | if (unlikely(!pfn_valid(pfn))) |
| 59 | return NULL; |
| 60 | page = pfn_to_page(pfn); |
| 61 | if (PageReserved(page)) |
| 62 | return NULL; |
| 63 | return page; |
| 64 | } |
| 65 | |
Christophe Leroy | d81e6f8 | 2018-10-09 13:51:47 +0000 | [diff] [blame] | 66 | #ifdef CONFIG_PPC_BOOK3S |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 67 | |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 68 | /* Server-style MMU handles coherency when hashing if HW exec permission |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 69 | * is supposed per page (currently 64-bit only). If not, then, we always |
| 70 | * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec |
| 71 | * support falls into the same category. |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 72 | */ |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 73 | |
Christophe Leroy | 385e89d | 2018-11-28 17:21:10 +0000 | [diff] [blame] | 74 | static pte_t set_pte_filter_hash(pte_t pte) |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 75 | { |
Aneesh Kumar K.V | 4dfb88c | 2016-04-29 23:26:20 +1000 | [diff] [blame] | 76 | if (radix_enabled()) |
| 77 | return pte; |
| 78 | |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 79 | pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); |
| 80 | if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || |
| 81 | cpu_has_feature(CPU_FTR_NOEXECUTE))) { |
| 82 | struct page *pg = maybe_pte_to_page(pte); |
| 83 | if (!pg) |
| 84 | return pte; |
| 85 | if (!test_bit(PG_arch_1, &pg->flags)) { |
| 86 | flush_dcache_icache_page(pg); |
| 87 | set_bit(PG_arch_1, &pg->flags); |
| 88 | } |
| 89 | } |
| 90 | return pte; |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 91 | } |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 92 | |
Christophe Leroy | d81e6f8 | 2018-10-09 13:51:47 +0000 | [diff] [blame] | 93 | #else /* CONFIG_PPC_BOOK3S */ |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 94 | |
Christophe Leroy | 385e89d | 2018-11-28 17:21:10 +0000 | [diff] [blame] | 95 | static pte_t set_pte_filter_hash(pte_t pte) { return pte; } |
| 96 | |
| 97 | #endif /* CONFIG_PPC_BOOK3S */ |
| 98 | |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 99 | /* Embedded type MMU with HW exec support. This is a bit more complicated |
| 100 | * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so |
| 101 | * instead we "filter out" the exec permission for non clean pages. |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 102 | */ |
LEROY Christophe | 79df1b3 | 2013-09-11 18:44:44 +0200 | [diff] [blame] | 103 | static pte_t set_pte_filter(pte_t pte) |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 104 | { |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 105 | struct page *pg; |
| 106 | |
Christophe Leroy | 385e89d | 2018-11-28 17:21:10 +0000 | [diff] [blame] | 107 | if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) |
| 108 | return set_pte_filter_hash(pte); |
| 109 | |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 110 | /* No exec permission in the first place, move on */ |
Christophe Leroy | 26973fa | 2018-10-09 13:51:56 +0000 | [diff] [blame] | 111 | if (!pte_exec(pte) || !pte_looks_normal(pte)) |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 112 | return pte; |
| 113 | |
| 114 | /* If you set _PAGE_EXEC on weird pages you're on your own */ |
| 115 | pg = maybe_pte_to_page(pte); |
| 116 | if (unlikely(!pg)) |
| 117 | return pte; |
| 118 | |
| 119 | /* If the page clean, we move on */ |
| 120 | if (test_bit(PG_arch_1, &pg->flags)) |
| 121 | return pte; |
| 122 | |
| 123 | /* If it's an exec fault, we flush the cache and make it clean */ |
| 124 | if (is_exec_fault()) { |
| 125 | flush_dcache_icache_page(pg); |
| 126 | set_bit(PG_arch_1, &pg->flags); |
| 127 | return pte; |
| 128 | } |
| 129 | |
| 130 | /* Else, we filter out _PAGE_EXEC */ |
Christophe Leroy | 26973fa | 2018-10-09 13:51:56 +0000 | [diff] [blame] | 131 | return pte_exprotect(pte); |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 132 | } |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 133 | |
| 134 | static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, |
| 135 | int dirty) |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 136 | { |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 137 | struct page *pg; |
| 138 | |
Christophe Leroy | 385e89d | 2018-11-28 17:21:10 +0000 | [diff] [blame] | 139 | if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) |
| 140 | return pte; |
| 141 | |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 142 | /* So here, we only care about exec faults, as we use them |
| 143 | * to recover lost _PAGE_EXEC and perform I$/D$ coherency |
| 144 | * if necessary. Also if _PAGE_EXEC is already set, same deal, |
| 145 | * we just bail out |
| 146 | */ |
Christophe Leroy | 26973fa | 2018-10-09 13:51:56 +0000 | [diff] [blame] | 147 | if (dirty || pte_exec(pte) || !is_exec_fault()) |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 148 | return pte; |
| 149 | |
| 150 | #ifdef CONFIG_DEBUG_VM |
| 151 | /* So this is an exec fault, _PAGE_EXEC is not set. If it was |
| 152 | * an error we would have bailed out earlier in do_page_fault() |
| 153 | * but let's make sure of it |
| 154 | */ |
| 155 | if (WARN_ON(!(vma->vm_flags & VM_EXEC))) |
| 156 | return pte; |
| 157 | #endif /* CONFIG_DEBUG_VM */ |
| 158 | |
| 159 | /* If you set _PAGE_EXEC on weird pages you're on your own */ |
| 160 | pg = maybe_pte_to_page(pte); |
| 161 | if (unlikely(!pg)) |
| 162 | goto bail; |
| 163 | |
| 164 | /* If the page is already clean, we move on */ |
| 165 | if (test_bit(PG_arch_1, &pg->flags)) |
| 166 | goto bail; |
| 167 | |
| 168 | /* Clean the page and set PG_arch_1 */ |
| 169 | flush_dcache_icache_page(pg); |
| 170 | set_bit(PG_arch_1, &pg->flags); |
| 171 | |
| 172 | bail: |
Christophe Leroy | 26973fa | 2018-10-09 13:51:56 +0000 | [diff] [blame] | 173 | return pte_mkexec(pte); |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 174 | } |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 175 | |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 176 | /* |
| 177 | * set_pte stores a linux PTE into the linux page table. |
| 178 | */ |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 179 | void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, |
| 180 | pte_t pte) |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 181 | { |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 182 | /* |
Aneesh Kumar K.V | da7ad36 | 2018-09-20 23:39:42 +0530 | [diff] [blame] | 183 | * Make sure hardware valid bit is not set. We don't do |
| 184 | * tlb flush for this update. |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 185 | */ |
Aneesh Kumar K.V | dd0e144 | 2018-10-13 22:18:15 +0530 | [diff] [blame] | 186 | VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep)); |
Aneesh Kumar K.V | c7d5484 | 2016-04-29 23:25:30 +1000 | [diff] [blame] | 187 | |
Gavin Shan | c618f6b1 | 2017-02-08 14:16:50 +1100 | [diff] [blame] | 188 | /* Add the pte bit when trying to set a pte */ |
Christophe Leroy | 26973fa | 2018-10-09 13:51:56 +0000 | [diff] [blame] | 189 | pte = pte_mkpte(pte); |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 190 | |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 191 | /* Note: mm->context.id might not yet have been assigned as |
| 192 | * this context might not have been activated yet when this |
| 193 | * is called. |
| 194 | */ |
LEROY Christophe | 79df1b3 | 2013-09-11 18:44:44 +0200 | [diff] [blame] | 195 | pte = set_pte_filter(pte); |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 196 | |
| 197 | /* Perform the setting of the PTE */ |
| 198 | __set_pte_at(mm, addr, ptep, pte, 0); |
| 199 | } |
| 200 | |
| 201 | /* |
| 202 | * This is called when relaxing access to a PTE. It's also called in the page |
| 203 | * fault path when we don't hit any of the major fault cases, ie, a minor |
| 204 | * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have |
| 205 | * handled those two for us, we additionally deal with missing execute |
| 206 | * permission here on some processors |
| 207 | */ |
| 208 | int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, |
| 209 | pte_t *ptep, pte_t entry, int dirty) |
| 210 | { |
| 211 | int changed; |
Benjamin Herrenschmidt | ea3cc33 | 2009-08-18 19:00:34 +0000 | [diff] [blame] | 212 | entry = set_access_flags_filter(entry, vma, dirty); |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 213 | changed = !pte_same(*(ptep), entry); |
| 214 | if (changed) { |
Aneesh Kumar K.V | f069ff3 | 2018-05-29 19:58:38 +0530 | [diff] [blame] | 215 | assert_pte_locked(vma->vm_mm, address); |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 216 | __ptep_set_access_flags(vma, ptep, entry, |
| 217 | address, mmu_virtual_psize); |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 218 | } |
| 219 | return changed; |
| 220 | } |
| 221 | |
Aneesh Kumar K.V | f069ff3 | 2018-05-29 19:58:38 +0530 | [diff] [blame] | 222 | #ifdef CONFIG_HUGETLB_PAGE |
Breno Leitao | bce85a1 | 2018-10-31 11:24:11 -0300 | [diff] [blame] | 223 | int huge_ptep_set_access_flags(struct vm_area_struct *vma, |
| 224 | unsigned long addr, pte_t *ptep, |
| 225 | pte_t pte, int dirty) |
Aneesh Kumar K.V | f069ff3 | 2018-05-29 19:58:38 +0530 | [diff] [blame] | 226 | { |
| 227 | #ifdef HUGETLB_NEED_PRELOAD |
| 228 | /* |
| 229 | * The "return 1" forces a call of update_mmu_cache, which will write a |
| 230 | * TLB entry. Without this, platforms that don't do a write of the TLB |
| 231 | * entry in the TLB miss handler asm will fault ad infinitum. |
| 232 | */ |
| 233 | ptep_set_access_flags(vma, addr, ptep, pte, dirty); |
| 234 | return 1; |
| 235 | #else |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 236 | int changed, psize; |
Aneesh Kumar K.V | f069ff3 | 2018-05-29 19:58:38 +0530 | [diff] [blame] | 237 | |
| 238 | pte = set_access_flags_filter(pte, vma, dirty); |
| 239 | changed = !pte_same(*(ptep), pte); |
| 240 | if (changed) { |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 241 | |
| 242 | #ifdef CONFIG_PPC_BOOK3S_64 |
Aneesh Kumar K.V | ed515b6 | 2018-06-01 13:54:24 +0530 | [diff] [blame] | 243 | struct hstate *h = hstate_vma(vma); |
| 244 | |
| 245 | psize = hstate_get_psize(h); |
| 246 | #ifdef CONFIG_DEBUG_VM |
| 247 | assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep)); |
| 248 | #endif |
| 249 | |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 250 | #else |
| 251 | /* |
| 252 | * Not used on non book3s64 platforms. But 8xx |
| 253 | * can possibly use tsize derived from hstate. |
| 254 | */ |
| 255 | psize = 0; |
| 256 | #endif |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 257 | __ptep_set_access_flags(vma, ptep, pte, addr, psize); |
Aneesh Kumar K.V | f069ff3 | 2018-05-29 19:58:38 +0530 | [diff] [blame] | 258 | } |
| 259 | return changed; |
| 260 | #endif |
| 261 | } |
| 262 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 263 | |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 264 | #ifdef CONFIG_DEBUG_VM |
| 265 | void assert_pte_locked(struct mm_struct *mm, unsigned long addr) |
| 266 | { |
| 267 | pgd_t *pgd; |
| 268 | pud_t *pud; |
| 269 | pmd_t *pmd; |
| 270 | |
| 271 | if (mm == &init_mm) |
| 272 | return; |
| 273 | pgd = mm->pgd + pgd_index(addr); |
| 274 | BUG_ON(pgd_none(*pgd)); |
| 275 | pud = pud_offset(pgd, addr); |
| 276 | BUG_ON(pud_none(*pud)); |
| 277 | pmd = pmd_offset(pud, addr); |
Aneesh Kumar K.V | a00e7be | 2013-06-20 14:30:24 +0530 | [diff] [blame] | 278 | /* |
| 279 | * khugepaged to collapse normal pages to hugepage, first set |
| 280 | * pmd to none to force page fault/gup to take mmap_sem. After |
| 281 | * pmd is set to none, we do a pte_clear which does this assertion |
| 282 | * so if we find pmd none, return. |
| 283 | */ |
| 284 | if (pmd_none(*pmd)) |
| 285 | return; |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 286 | BUG_ON(!pmd_present(*pmd)); |
Kumar Gala | 797a747 | 2009-08-18 15:21:40 +0000 | [diff] [blame] | 287 | assert_spin_locked(pte_lockptr(mm, pmd)); |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 288 | } |
| 289 | #endif /* CONFIG_DEBUG_VM */ |
| 290 | |
Alexey Kardashevskiy | e9ab1a1 | 2016-02-15 12:55:03 +1100 | [diff] [blame] | 291 | unsigned long vmalloc_to_phys(void *va) |
| 292 | { |
| 293 | unsigned long pfn = vmalloc_to_pfn(va); |
| 294 | |
| 295 | BUG_ON(!pfn); |
| 296 | return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va); |
| 297 | } |
| 298 | EXPORT_SYMBOL_GPL(vmalloc_to_phys); |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 299 | |
| 300 | /* |
| 301 | * We have 4 cases for pgds and pmds: |
| 302 | * (1) invalid (all zeroes) |
| 303 | * (2) pointer to next table, as normal; bottom 6 bits == 0 |
| 304 | * (3) leaf pte for huge page _PAGE_PTE set |
| 305 | * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table |
| 306 | * |
| 307 | * So long as we atomically load page table pointers we are safe against teardown, |
| 308 | * we can follow the address down to the the page and take a ref on it. |
| 309 | * This function need to be called with interrupts disabled. We use this variant |
| 310 | * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED |
| 311 | */ |
| 312 | pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, |
| 313 | bool *is_thp, unsigned *hpage_shift) |
| 314 | { |
| 315 | pgd_t pgd, *pgdp; |
| 316 | pud_t pud, *pudp; |
| 317 | pmd_t pmd, *pmdp; |
| 318 | pte_t *ret_pte; |
| 319 | hugepd_t *hpdp = NULL; |
| 320 | unsigned pdshift = PGDIR_SHIFT; |
| 321 | |
| 322 | if (hpage_shift) |
| 323 | *hpage_shift = 0; |
| 324 | |
| 325 | if (is_thp) |
| 326 | *is_thp = false; |
| 327 | |
| 328 | pgdp = pgdir + pgd_index(ea); |
| 329 | pgd = READ_ONCE(*pgdp); |
| 330 | /* |
| 331 | * Always operate on the local stack value. This make sure the |
| 332 | * value don't get updated by a parallel THP split/collapse, |
| 333 | * page fault or a page unmap. The return pte_t * is still not |
| 334 | * stable. So should be checked there for above conditions. |
| 335 | */ |
| 336 | if (pgd_none(pgd)) |
| 337 | return NULL; |
Christophe Leroy | fab9a11 | 2019-04-26 05:59:51 +0000 | [diff] [blame] | 338 | |
| 339 | if (pgd_huge(pgd)) { |
| 340 | ret_pte = (pte_t *)pgdp; |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 341 | goto out; |
Christophe Leroy | fab9a11 | 2019-04-26 05:59:51 +0000 | [diff] [blame] | 342 | } |
| 343 | if (is_hugepd(__hugepd(pgd_val(pgd)))) { |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 344 | hpdp = (hugepd_t *)&pgd; |
Christophe Leroy | fab9a11 | 2019-04-26 05:59:51 +0000 | [diff] [blame] | 345 | goto out_huge; |
| 346 | } |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 347 | |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 348 | /* |
| 349 | * Even if we end up with an unmap, the pgtable will not |
| 350 | * be freed, because we do an rcu free and here we are |
| 351 | * irq disabled |
| 352 | */ |
| 353 | pdshift = PUD_SHIFT; |
| 354 | pudp = pud_offset(&pgd, ea); |
| 355 | pud = READ_ONCE(*pudp); |
Christophe Leroy | fab9a11 | 2019-04-26 05:59:51 +0000 | [diff] [blame] | 356 | |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 357 | if (pud_none(pud)) |
| 358 | return NULL; |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 359 | |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 360 | if (pud_huge(pud)) { |
| 361 | ret_pte = (pte_t *)pudp; |
| 362 | goto out; |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 363 | } |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 364 | if (is_hugepd(__hugepd(pud_val(pud)))) { |
| 365 | hpdp = (hugepd_t *)&pud; |
| 366 | goto out_huge; |
| 367 | } |
| 368 | pdshift = PMD_SHIFT; |
| 369 | pmdp = pmd_offset(&pud, ea); |
| 370 | pmd = READ_ONCE(*pmdp); |
Nicholas Piggin | a00196a | 2019-06-07 13:56:36 +1000 | [diff] [blame] | 371 | |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 372 | /* |
Nicholas Piggin | a00196a | 2019-06-07 13:56:36 +1000 | [diff] [blame] | 373 | * A hugepage collapse is captured by this condition, see |
| 374 | * pmdp_collapse_flush. |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 375 | */ |
| 376 | if (pmd_none(pmd)) |
| 377 | return NULL; |
| 378 | |
Nicholas Piggin | a00196a | 2019-06-07 13:56:36 +1000 | [diff] [blame] | 379 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 380 | /* |
| 381 | * A hugepage split is captured by this condition, see |
| 382 | * pmdp_invalidate. |
| 383 | * |
| 384 | * Huge page modification can be caught here too. |
| 385 | */ |
| 386 | if (pmd_is_serializing(pmd)) |
| 387 | return NULL; |
| 388 | #endif |
| 389 | |
Christophe Leroy | 26e66b0 | 2019-04-26 05:59:53 +0000 | [diff] [blame] | 390 | if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) { |
| 391 | if (is_thp) |
| 392 | *is_thp = true; |
| 393 | ret_pte = (pte_t *)pmdp; |
| 394 | goto out; |
| 395 | } |
| 396 | /* |
| 397 | * pmd_large check below will handle the swap pmd pte |
| 398 | * we need to do both the check because they are config |
| 399 | * dependent. |
| 400 | */ |
| 401 | if (pmd_huge(pmd) || pmd_large(pmd)) { |
| 402 | ret_pte = (pte_t *)pmdp; |
| 403 | goto out; |
| 404 | } |
| 405 | if (is_hugepd(__hugepd(pmd_val(pmd)))) { |
| 406 | hpdp = (hugepd_t *)&pmd; |
| 407 | goto out_huge; |
| 408 | } |
| 409 | |
| 410 | return pte_offset_kernel(&pmd, ea); |
| 411 | |
Christophe Leroy | fab9a11 | 2019-04-26 05:59:51 +0000 | [diff] [blame] | 412 | out_huge: |
Christophe Leroy | 0caed4d | 2019-04-26 05:59:41 +0000 | [diff] [blame] | 413 | if (!hpdp) |
| 414 | return NULL; |
| 415 | |
| 416 | ret_pte = hugepte_offset(*hpdp, ea, pdshift); |
| 417 | pdshift = hugepd_shift(*hpdp); |
| 418 | out: |
| 419 | if (hpage_shift) |
| 420 | *hpage_shift = pdshift; |
| 421 | return ret_pte; |
| 422 | } |
| 423 | EXPORT_SYMBOL_GPL(__find_linux_pte); |