Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * S390 version |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 3 | * Copyright IBM Corp. 1999, 2000 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * Author(s): Hartmut Penner (hp@de.ibm.com) |
| 5 | * Ulrich Weigand (weigand@de.ibm.com) |
| 6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 7 | * |
| 8 | * Derived from "include/asm-i386/pgtable.h" |
| 9 | */ |
| 10 | |
| 11 | #ifndef _ASM_S390_PGTABLE_H |
| 12 | #define _ASM_S390_PGTABLE_H |
| 13 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | /* |
Martin Schwidefsky | a1c843b | 2015-04-22 13:55:59 +0200 | [diff] [blame] | 15 | * The Linux memory management assumes a three-level page table setup. |
| 16 | * For s390 64 bit we use up to four of the five levels the hardware |
| 17 | * provides (region first tables are not used). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | * |
| 19 | * The "pgd_xxx()" functions are trivial for a folded two-level |
| 20 | * setup: the pgd is never bad, and a pmd always exists (as it's folded |
| 21 | * into the pgd entry) |
| 22 | * |
| 23 | * This file contains the functions and defines necessary to modify and use |
| 24 | * the S390 page table tree. |
| 25 | */ |
| 26 | #ifndef __ASSEMBLY__ |
Heiko Carstens | 9789db0 | 2008-07-14 09:59:11 +0200 | [diff] [blame] | 27 | #include <linux/sched.h> |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 28 | #include <linux/mm_types.h> |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 29 | #include <linux/page-flags.h> |
Martin Schwidefsky | 527e30b | 2014-04-30 16:04:25 +0200 | [diff] [blame] | 30 | #include <linux/radix-tree.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <asm/bug.h> |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 32 | #include <asm/page.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); |
| 35 | extern void paging_init(void); |
Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 36 | extern void vmem_map_init(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
| 38 | /* |
| 39 | * The S390 doesn't have any external MMU info: the kernel page |
| 40 | * tables contain all the necessary information. |
| 41 | */ |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 42 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
David Miller | b113da6 | 2012-10-08 16:34:25 -0700 | [diff] [blame] | 43 | #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | |
| 45 | /* |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 46 | * ZERO_PAGE is a global shared page that is always zero; used |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | * for zero-mapped memory areas etc.. |
| 48 | */ |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 49 | |
| 50 | extern unsigned long empty_zero_page; |
| 51 | extern unsigned long zero_page_mask; |
| 52 | |
| 53 | #define ZERO_PAGE(vaddr) \ |
| 54 | (virt_to_page((void *)(empty_zero_page + \ |
| 55 | (((unsigned long)(vaddr)) &zero_page_mask)))) |
Kirill A. Shutemov | 816422a | 2012-12-12 13:52:36 -0800 | [diff] [blame] | 56 | #define __HAVE_COLOR_ZERO_PAGE |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 57 | |
Linus Torvalds | 4f2e290 | 2013-04-17 08:46:19 -0700 | [diff] [blame] | 58 | /* TODO: s390 cannot support io_remap_pfn_range... */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #endif /* !__ASSEMBLY__ */ |
| 60 | |
| 61 | /* |
| 62 | * PMD_SHIFT determines the size of the area a second-level page |
| 63 | * table can map |
| 64 | * PGDIR_SHIFT determines what a third-level page table entry can map |
| 65 | */ |
Heiko Carstens | 5a79859a | 2015-02-12 13:08:27 +0100 | [diff] [blame] | 66 | #define PMD_SHIFT 20 |
| 67 | #define PUD_SHIFT 31 |
| 68 | #define PGDIR_SHIFT 42 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | |
| 70 | #define PMD_SIZE (1UL << PMD_SHIFT) |
| 71 | #define PMD_MASK (~(PMD_SIZE-1)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 72 | #define PUD_SIZE (1UL << PUD_SHIFT) |
| 73 | #define PUD_MASK (~(PUD_SIZE-1)) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 74 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 75 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | |
| 77 | /* |
| 78 | * entries per page directory level: the S390 is two-level, so |
| 79 | * we don't really have any PMD directory physically. |
| 80 | * for S390 segment-table entries are combined to one PGD |
| 81 | * that leads to 1024 pte per pgd |
| 82 | */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 83 | #define PTRS_PER_PTE 256 |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 84 | #define PTRS_PER_PMD 2048 |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 85 | #define PTRS_PER_PUD 2048 |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 86 | #define PTRS_PER_PGD 2048 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | |
Kirill A. Shutemov | d016bf7 | 2015-02-11 15:26:41 -0800 | [diff] [blame] | 88 | #define FIRST_USER_ADDRESS 0UL |
Hugh Dickins | d455a36 | 2005-04-19 13:29:23 -0700 | [diff] [blame] | 89 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | #define pte_ERROR(e) \ |
| 91 | printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) |
| 92 | #define pmd_ERROR(e) \ |
| 93 | printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 94 | #define pud_ERROR(e) \ |
| 95 | printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | #define pgd_ERROR(e) \ |
| 97 | printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) |
| 98 | |
| 99 | #ifndef __ASSEMBLY__ |
| 100 | /* |
Martin Schwidefsky | a1c843b | 2015-04-22 13:55:59 +0200 | [diff] [blame] | 101 | * The vmalloc and module area will always be on the topmost area of the |
| 102 | * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules. |
Heiko Carstens | c972cc6 | 2012-10-05 16:52:18 +0200 | [diff] [blame] | 103 | * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where |
| 104 | * modules will reside. That makes sure that inter module branches always |
| 105 | * happen without trampolines and in addition the placement within a 2GB frame |
| 106 | * is branch prediction unit friendly. |
Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 107 | */ |
Heiko Carstens | 239a6425 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 108 | extern unsigned long VMALLOC_START; |
Martin Schwidefsky | 14045eb | 2011-12-27 11:27:07 +0100 | [diff] [blame] | 109 | extern unsigned long VMALLOC_END; |
| 110 | extern struct page *vmemmap; |
Heiko Carstens | 239a6425 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 111 | |
Martin Schwidefsky | 14045eb | 2011-12-27 11:27:07 +0100 | [diff] [blame] | 112 | #define VMEM_MAX_PHYS ((unsigned long) vmemmap) |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 113 | |
Heiko Carstens | c972cc6 | 2012-10-05 16:52:18 +0200 | [diff] [blame] | 114 | extern unsigned long MODULES_VADDR; |
| 115 | extern unsigned long MODULES_END; |
| 116 | #define MODULES_VADDR MODULES_VADDR |
| 117 | #define MODULES_END MODULES_END |
| 118 | #define MODULES_LEN (1UL << 31) |
Heiko Carstens | c972cc6 | 2012-10-05 16:52:18 +0200 | [diff] [blame] | 119 | |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 120 | static inline int is_module_addr(void *addr) |
| 121 | { |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 122 | BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); |
| 123 | if (addr < (void *)MODULES_VADDR) |
| 124 | return 0; |
| 125 | if (addr > (void *)MODULES_END) |
| 126 | return 0; |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 127 | return 1; |
| 128 | } |
| 129 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | * A 64 bit pagetable entry of S390 has following format: |
Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 132 | * | PFRA |0IPC| OS | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 134 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 135 | * |
| 136 | * I Page-Invalid Bit: Page is not available for address-translation |
| 137 | * P Page-Protection Bit: Store access not possible for page |
Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 138 | * C Change-bit override: HW is not required to set change bit |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | * |
| 140 | * A 64 bit segmenttable entry of S390 has following format: |
| 141 | * | P-table origin | TT |
| 142 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 143 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 144 | * |
| 145 | * I Segment-Invalid Bit: Segment is not available for address-translation |
| 146 | * C Common-Segment Bit: Segment is not private (PoP 3-30) |
| 147 | * P Page-Protection Bit: Store access not possible for page |
| 148 | * TT Type 00 |
| 149 | * |
| 150 | * A 64 bit region table entry of S390 has following format: |
| 151 | * | S-table origin | TF TTTL |
| 152 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 153 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 154 | * |
| 155 | * I Segment-Invalid Bit: Segment is not available for address-translation |
| 156 | * TT Type 01 |
| 157 | * TF |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 158 | * TL Table length |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | * |
| 160 | * The 64 bit regiontable origin of S390 has following format: |
| 161 | * | region table origon | DTTL |
| 162 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 163 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 164 | * |
| 165 | * X Space-Switch event: |
| 166 | * G Segment-Invalid Bit: |
| 167 | * P Private-Space Bit: |
| 168 | * S Storage-Alteration: |
| 169 | * R Real space |
| 170 | * TL Table-Length: |
| 171 | * |
| 172 | * A storage key has the following format: |
| 173 | * | ACC |F|R|C|0| |
| 174 | * 0 3 4 5 6 7 |
| 175 | * ACC: access key |
| 176 | * F : fetch protection bit |
| 177 | * R : referenced bit |
| 178 | * C : changed bit |
| 179 | */ |
| 180 | |
| 181 | /* Hardware bits in the page table entry */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 182 | #define _PAGE_PROTECT 0x200 /* HW read-only bit */ |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 183 | #define _PAGE_INVALID 0x400 /* HW invalid bit */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 184 | #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 185 | |
| 186 | /* Software bits in the page table entry */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 187 | #define _PAGE_PRESENT 0x001 /* SW pte present bit */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 188 | #define _PAGE_YOUNG 0x004 /* SW pte young bit */ |
| 189 | #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 190 | #define _PAGE_READ 0x010 /* SW pte read bit */ |
| 191 | #define _PAGE_WRITE 0x020 /* SW pte write bit */ |
| 192 | #define _PAGE_SPECIAL 0x040 /* SW associated with special page */ |
Konstantin Weitz | b31288f | 2013-04-17 17:36:29 +0200 | [diff] [blame] | 193 | #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ |
Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 194 | #define __HAVE_ARCH_PTE_SPECIAL |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | |
Martin Schwidefsky | 5614dd9 | 2015-04-22 14:47:42 +0200 | [diff] [blame^] | 196 | #ifdef CONFIG_MEM_SOFT_DIRTY |
| 197 | #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */ |
| 198 | #else |
| 199 | #define _PAGE_SOFT_DIRTY 0x000 |
| 200 | #endif |
| 201 | |
Nick Piggin | 138c902 | 2008-07-08 11:31:06 +0200 | [diff] [blame] | 202 | /* Set of bits not changed in pte_modify */ |
Heiko Carstens | 6a5c148 | 2014-09-22 08:50:51 +0200 | [diff] [blame] | 203 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \ |
Martin Schwidefsky | 5614dd9 | 2015-04-22 14:47:42 +0200 | [diff] [blame^] | 204 | _PAGE_YOUNG | _PAGE_SOFT_DIRTY) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 206 | /* |
Kirill A. Shutemov | 6e76d4b | 2015-02-10 14:11:04 -0800 | [diff] [blame] | 207 | * handle_pte_fault uses pte_present and pte_none to find out the pte type |
| 208 | * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to |
| 209 | * distinguish present from not-present ptes. It is changed only with the page |
| 210 | * table lock held. |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 211 | * |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 212 | * The following table gives the different possible bit combinations for |
Martin Schwidefsky | a1c843b | 2015-04-22 13:55:59 +0200 | [diff] [blame] | 213 | * the pte hardware and software bits in the last 12 bits of a pte |
| 214 | * (. unassigned bit, x don't care, t swap type): |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 215 | * |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 216 | * 842100000000 |
| 217 | * 000084210000 |
| 218 | * 000000008421 |
Martin Schwidefsky | a1c843b | 2015-04-22 13:55:59 +0200 | [diff] [blame] | 219 | * .IR.uswrdy.p |
| 220 | * empty .10.00000000 |
| 221 | * swap .11..ttttt.0 |
| 222 | * prot-none, clean, old .11.xx0000.1 |
| 223 | * prot-none, clean, young .11.xx0001.1 |
| 224 | * prot-none, dirty, old .10.xx0010.1 |
| 225 | * prot-none, dirty, young .10.xx0011.1 |
| 226 | * read-only, clean, old .11.xx0100.1 |
| 227 | * read-only, clean, young .01.xx0101.1 |
| 228 | * read-only, dirty, old .11.xx0110.1 |
| 229 | * read-only, dirty, young .01.xx0111.1 |
| 230 | * read-write, clean, old .11.xx1100.1 |
| 231 | * read-write, clean, young .01.xx1101.1 |
| 232 | * read-write, dirty, old .10.xx1110.1 |
| 233 | * read-write, dirty, young .00.xx1111.1 |
| 234 | * HW-bits: R read-only, I invalid |
| 235 | * SW-bits: p present, y young, d dirty, r read, w write, s special, |
| 236 | * u unused, l large |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 237 | * |
Martin Schwidefsky | a1c843b | 2015-04-22 13:55:59 +0200 | [diff] [blame] | 238 | * pte_none is true for the bit pattern .10.00000000, pte == 0x400 |
| 239 | * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200 |
| 240 | * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001 |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 241 | */ |
| 242 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 243 | /* Bits in the segment/region table address-space-control-element */ |
| 244 | #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ |
| 245 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ |
| 246 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ |
| 247 | #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ |
| 248 | #define _ASCE_REAL_SPACE 0x20 /* real space control */ |
| 249 | #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ |
| 250 | #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ |
| 251 | #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ |
| 252 | #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ |
| 253 | #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ |
| 254 | #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ |
| 255 | |
| 256 | /* Bits in the region table entry */ |
| 257 | #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 258 | #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */ |
| 259 | #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 260 | #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ |
| 261 | #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ |
| 262 | #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ |
| 263 | #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ |
| 264 | #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ |
| 265 | |
| 266 | #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 267 | #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 268 | #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 269 | #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 270 | #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 271 | #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 272 | |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 273 | #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ |
Heiko Carstens | 1819ed1 | 2013-02-16 11:47:27 +0100 | [diff] [blame] | 274 | #define _REGION3_ENTRY_RO 0x200 /* page protection bit */ |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 275 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 276 | /* Bits in the segment table entry */ |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 277 | #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 278 | #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL |
Heiko Carstens | ea81531 | 2013-03-21 12:50:39 +0100 | [diff] [blame] | 279 | #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 280 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 281 | #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ |
| 282 | #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 283 | |
| 284 | #define _SEGMENT_ENTRY (0) |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 285 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 286 | |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 287 | #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */ |
| 288 | #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ |
| 289 | #define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */ |
| 290 | #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 291 | #define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */ |
| 292 | #define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */ |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 293 | |
Martin Schwidefsky | 5614dd9 | 2015-04-22 14:47:42 +0200 | [diff] [blame^] | 294 | #ifdef CONFIG_MEM_SOFT_DIRTY |
| 295 | #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */ |
| 296 | #else |
| 297 | #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */ |
| 298 | #endif |
| 299 | |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 300 | /* |
| 301 | * Segment table entry encoding (R = read-only, I = invalid, y = young bit): |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 302 | * dy..R...I...wr |
| 303 | * prot-none, clean, old 00..1...1...00 |
| 304 | * prot-none, clean, young 01..1...1...00 |
| 305 | * prot-none, dirty, old 10..1...1...00 |
| 306 | * prot-none, dirty, young 11..1...1...00 |
| 307 | * read-only, clean, old 00..1...1...01 |
| 308 | * read-only, clean, young 01..1...0...01 |
| 309 | * read-only, dirty, old 10..1...1...01 |
| 310 | * read-only, dirty, young 11..1...0...01 |
| 311 | * read-write, clean, old 00..1...1...11 |
| 312 | * read-write, clean, young 01..1...0...11 |
| 313 | * read-write, dirty, old 10..0...1...11 |
| 314 | * read-write, dirty, young 11..0...0...11 |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 315 | * The segment table origin is used to distinguish empty (origin==0) from |
| 316 | * read-write, old segment table entries (origin!=0) |
Martin Schwidefsky | a1c843b | 2015-04-22 13:55:59 +0200 | [diff] [blame] | 317 | * HW-bits: R read-only, I invalid |
| 318 | * SW-bits: y young, d dirty, r read, w write |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 319 | */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 320 | |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 321 | #define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */ |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 322 | |
Martin Schwidefsky | 6c61cfe | 2011-06-06 14:14:42 +0200 | [diff] [blame] | 323 | /* Page status table bits for virtualization */ |
Martin Schwidefsky | 0d0dafc | 2013-05-17 14:41:33 +0200 | [diff] [blame] | 324 | #define PGSTE_ACC_BITS 0xf000000000000000UL |
| 325 | #define PGSTE_FP_BIT 0x0800000000000000UL |
| 326 | #define PGSTE_PCL_BIT 0x0080000000000000UL |
| 327 | #define PGSTE_HR_BIT 0x0040000000000000UL |
| 328 | #define PGSTE_HC_BIT 0x0020000000000000UL |
| 329 | #define PGSTE_GR_BIT 0x0004000000000000UL |
| 330 | #define PGSTE_GC_BIT 0x0002000000000000UL |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 331 | #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ |
| 332 | #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ |
Martin Schwidefsky | 6c61cfe | 2011-06-06 14:14:42 +0200 | [diff] [blame] | 333 | |
Konstantin Weitz | b31288f | 2013-04-17 17:36:29 +0200 | [diff] [blame] | 334 | /* Guest Page State used for virtualization */ |
| 335 | #define _PGSTE_GPS_ZERO 0x0000000080000000UL |
| 336 | #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL |
| 337 | #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL |
| 338 | #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL |
| 339 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 340 | /* |
| 341 | * A user page table pointer has the space-switch-event bit, the |
| 342 | * private-space-control bit and the storage-alteration-event-control |
| 343 | * bit set. A kernel page table pointer doesn't need them. |
| 344 | */ |
| 345 | #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ |
| 346 | _ASCE_ALT_EVENT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | /* |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 349 | * Page protection definitions. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 351 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID) |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 352 | #define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \ |
| 353 | _PAGE_INVALID | _PAGE_PROTECT) |
| 354 | #define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
| 355 | _PAGE_INVALID | _PAGE_PROTECT) |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 356 | |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 357 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
| 358 | _PAGE_YOUNG | _PAGE_DIRTY) |
| 359 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
| 360 | _PAGE_YOUNG | _PAGE_DIRTY) |
| 361 | #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \ |
| 362 | _PAGE_PROTECT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | |
| 364 | /* |
Martin Schwidefsky | 043d070 | 2011-05-23 10:24:23 +0200 | [diff] [blame] | 365 | * On s390 the page table entry has an invalid bit and a read-only bit. |
| 366 | * Read permission implies execute permission and write permission |
| 367 | * implies read permission. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | */ |
| 369 | /*xwr*/ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 370 | #define __P000 PAGE_NONE |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 371 | #define __P001 PAGE_READ |
| 372 | #define __P010 PAGE_READ |
| 373 | #define __P011 PAGE_READ |
| 374 | #define __P100 PAGE_READ |
| 375 | #define __P101 PAGE_READ |
| 376 | #define __P110 PAGE_READ |
| 377 | #define __P111 PAGE_READ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 379 | #define __S000 PAGE_NONE |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 380 | #define __S001 PAGE_READ |
| 381 | #define __S010 PAGE_WRITE |
| 382 | #define __S011 PAGE_WRITE |
| 383 | #define __S100 PAGE_READ |
| 384 | #define __S101 PAGE_READ |
| 385 | #define __S110 PAGE_WRITE |
| 386 | #define __S111 PAGE_WRITE |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 388 | /* |
| 389 | * Segment entry (large page) protection definitions. |
| 390 | */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 391 | #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ |
| 392 | _SEGMENT_ENTRY_PROTECT) |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 393 | #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \ |
| 394 | _SEGMENT_ENTRY_READ) |
| 395 | #define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \ |
| 396 | _SEGMENT_ENTRY_WRITE) |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 397 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 398 | static inline int mm_has_pgste(struct mm_struct *mm) |
| 399 | { |
| 400 | #ifdef CONFIG_PGSTE |
| 401 | if (unlikely(mm->context.has_pgste)) |
| 402 | return 1; |
| 403 | #endif |
| 404 | return 0; |
| 405 | } |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 406 | |
Martin Schwidefsky | 0b46e0a | 2015-04-15 13:23:26 +0200 | [diff] [blame] | 407 | static inline int mm_alloc_pgste(struct mm_struct *mm) |
| 408 | { |
| 409 | #ifdef CONFIG_PGSTE |
| 410 | if (unlikely(mm->context.alloc_pgste)) |
| 411 | return 1; |
| 412 | #endif |
| 413 | return 0; |
| 414 | } |
| 415 | |
Dominik Dingel | 2faee8f | 2014-10-23 12:08:38 +0200 | [diff] [blame] | 416 | /* |
| 417 | * In the case that a guest uses storage keys |
| 418 | * faults should no longer be backed by zero pages |
| 419 | */ |
| 420 | #define mm_forbids_zeropage mm_use_skey |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 421 | static inline int mm_use_skey(struct mm_struct *mm) |
| 422 | { |
| 423 | #ifdef CONFIG_PGSTE |
| 424 | if (mm->context.use_skey) |
| 425 | return 1; |
| 426 | #endif |
| 427 | return 0; |
| 428 | } |
| 429 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | /* |
| 431 | * pgd/pmd/pte query functions |
| 432 | */ |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 433 | static inline int pgd_present(pgd_t pgd) |
| 434 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 435 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) |
| 436 | return 1; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 437 | return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; |
| 438 | } |
| 439 | |
| 440 | static inline int pgd_none(pgd_t pgd) |
| 441 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 442 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) |
| 443 | return 0; |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 444 | return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 445 | } |
| 446 | |
| 447 | static inline int pgd_bad(pgd_t pgd) |
| 448 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 449 | /* |
| 450 | * With dynamic page table levels the pgd can be a region table |
| 451 | * entry or a segment table entry. Check for the bit that are |
| 452 | * invalid for either table entry. |
| 453 | */ |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 454 | unsigned long mask = |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 455 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID & |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 456 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; |
| 457 | return (pgd_val(pgd) & mask) != 0; |
| 458 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 459 | |
| 460 | static inline int pud_present(pud_t pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 462 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) |
| 463 | return 1; |
Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 464 | return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | } |
| 466 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 467 | static inline int pud_none(pud_t pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 469 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) |
| 470 | return 0; |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 471 | return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | } |
| 473 | |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 474 | static inline int pud_large(pud_t pud) |
| 475 | { |
| 476 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3) |
| 477 | return 0; |
| 478 | return !!(pud_val(pud) & _REGION3_ENTRY_LARGE); |
| 479 | } |
| 480 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 481 | static inline int pud_bad(pud_t pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 483 | /* |
| 484 | * With dynamic page table levels the pud can be a region table |
| 485 | * entry or a segment table entry. Check for the bit that are |
| 486 | * invalid for either table entry. |
| 487 | */ |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 488 | unsigned long mask = |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 489 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID & |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 490 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; |
| 491 | return (pud_val(pud) & mask) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | } |
| 493 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 494 | static inline int pmd_present(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 496 | return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | } |
| 498 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 499 | static inline int pmd_none(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 501 | return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 502 | } |
| 503 | |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 504 | static inline int pmd_large(pmd_t pmd) |
| 505 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 506 | return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 507 | } |
| 508 | |
Martin Schwidefsky | 7cded34 | 2015-05-13 14:33:22 +0200 | [diff] [blame] | 509 | static inline unsigned long pmd_pfn(pmd_t pmd) |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 510 | { |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 511 | unsigned long origin_mask; |
| 512 | |
| 513 | origin_mask = _SEGMENT_ENTRY_ORIGIN; |
| 514 | if (pmd_large(pmd)) |
| 515 | origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; |
| 516 | return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT; |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 517 | } |
| 518 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 519 | static inline int pmd_bad(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | { |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 521 | if (pmd_large(pmd)) |
| 522 | return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 523 | return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | } |
| 525 | |
Gerald Schaefer | 75077af | 2012-10-08 16:30:15 -0700 | [diff] [blame] | 526 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH |
| 527 | extern void pmdp_splitting_flush(struct vm_area_struct *vma, |
| 528 | unsigned long addr, pmd_t *pmdp); |
| 529 | |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 530 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS |
| 531 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, |
| 532 | unsigned long address, pmd_t *pmdp, |
| 533 | pmd_t entry, int dirty); |
| 534 | |
| 535 | #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH |
| 536 | extern int pmdp_clear_flush_young(struct vm_area_struct *vma, |
| 537 | unsigned long address, pmd_t *pmdp); |
| 538 | |
| 539 | #define __HAVE_ARCH_PMD_WRITE |
| 540 | static inline int pmd_write(pmd_t pmd) |
| 541 | { |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 542 | return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; |
| 543 | } |
| 544 | |
| 545 | static inline int pmd_dirty(pmd_t pmd) |
| 546 | { |
| 547 | int dirty = 1; |
| 548 | if (pmd_large(pmd)) |
| 549 | dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; |
| 550 | return dirty; |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 551 | } |
| 552 | |
| 553 | static inline int pmd_young(pmd_t pmd) |
| 554 | { |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 555 | int young = 1; |
| 556 | if (pmd_large(pmd)) |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 557 | young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 558 | return young; |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 559 | } |
| 560 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 561 | static inline int pte_present(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 563 | /* Bit pattern: (pte & 0x001) == 0x001 */ |
| 564 | return (pte_val(pte) & _PAGE_PRESENT) != 0; |
| 565 | } |
| 566 | |
| 567 | static inline int pte_none(pte_t pte) |
| 568 | { |
| 569 | /* Bit pattern: pte == 0x400 */ |
| 570 | return pte_val(pte) == _PAGE_INVALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | } |
| 572 | |
Konstantin Weitz | b31288f | 2013-04-17 17:36:29 +0200 | [diff] [blame] | 573 | static inline int pte_swap(pte_t pte) |
| 574 | { |
Martin Schwidefsky | a1c843b | 2015-04-22 13:55:59 +0200 | [diff] [blame] | 575 | /* Bit pattern: (pte & 0x201) == 0x200 */ |
| 576 | return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT)) |
| 577 | == _PAGE_PROTECT; |
Konstantin Weitz | b31288f | 2013-04-17 17:36:29 +0200 | [diff] [blame] | 578 | } |
| 579 | |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 580 | static inline int pte_special(pte_t pte) |
| 581 | { |
Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 582 | return (pte_val(pte) & _PAGE_SPECIAL); |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 583 | } |
| 584 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 585 | #define __HAVE_ARCH_PTE_SAME |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 586 | static inline int pte_same(pte_t a, pte_t b) |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 587 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 588 | return pte_val(a) == pte_val(b); |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 589 | } |
| 590 | |
Martin Schwidefsky | b54565b | 2014-09-23 14:01:34 +0200 | [diff] [blame] | 591 | #ifdef CONFIG_NUMA_BALANCING |
| 592 | static inline int pte_protnone(pte_t pte) |
| 593 | { |
| 594 | return pte_present(pte) && !(pte_val(pte) & _PAGE_READ); |
| 595 | } |
| 596 | |
| 597 | static inline int pmd_protnone(pmd_t pmd) |
| 598 | { |
| 599 | /* pmd_large(pmd) implies pmd_present(pmd) */ |
| 600 | return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); |
| 601 | } |
| 602 | #endif |
| 603 | |
Martin Schwidefsky | 5614dd9 | 2015-04-22 14:47:42 +0200 | [diff] [blame^] | 604 | static inline int pte_soft_dirty(pte_t pte) |
| 605 | { |
| 606 | return pte_val(pte) & _PAGE_SOFT_DIRTY; |
| 607 | } |
| 608 | #define pte_swp_soft_dirty pte_soft_dirty |
| 609 | |
| 610 | static inline pte_t pte_mksoft_dirty(pte_t pte) |
| 611 | { |
| 612 | pte_val(pte) |= _PAGE_SOFT_DIRTY; |
| 613 | return pte; |
| 614 | } |
| 615 | #define pte_swp_mksoft_dirty pte_mksoft_dirty |
| 616 | |
| 617 | static inline pte_t pte_clear_soft_dirty(pte_t pte) |
| 618 | { |
| 619 | pte_val(pte) &= ~_PAGE_SOFT_DIRTY; |
| 620 | return pte; |
| 621 | } |
| 622 | #define pte_swp_clear_soft_dirty pte_clear_soft_dirty |
| 623 | |
| 624 | static inline int pmd_soft_dirty(pmd_t pmd) |
| 625 | { |
| 626 | return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY; |
| 627 | } |
| 628 | |
| 629 | static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) |
| 630 | { |
| 631 | pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY; |
| 632 | return pmd; |
| 633 | } |
| 634 | |
| 635 | static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) |
| 636 | { |
| 637 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY; |
| 638 | return pmd; |
| 639 | } |
| 640 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 641 | static inline pgste_t pgste_get_lock(pte_t *ptep) |
| 642 | { |
| 643 | unsigned long new = 0; |
| 644 | #ifdef CONFIG_PGSTE |
| 645 | unsigned long old; |
| 646 | |
| 647 | preempt_disable(); |
| 648 | asm( |
| 649 | " lg %0,%2\n" |
| 650 | "0: lgr %1,%0\n" |
Martin Schwidefsky | 0d0dafc | 2013-05-17 14:41:33 +0200 | [diff] [blame] | 651 | " nihh %0,0xff7f\n" /* clear PCL bit in old */ |
| 652 | " oihh %1,0x0080\n" /* set PCL bit in new */ |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 653 | " csg %0,%1,%2\n" |
| 654 | " jl 0b\n" |
| 655 | : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) |
Christian Borntraeger | a8f6e7f | 2013-06-05 09:25:34 +0200 | [diff] [blame] | 656 | : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory"); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 657 | #endif |
| 658 | return __pgste(new); |
| 659 | } |
| 660 | |
| 661 | static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 662 | { |
| 663 | #ifdef CONFIG_PGSTE |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 664 | asm( |
Martin Schwidefsky | 0d0dafc | 2013-05-17 14:41:33 +0200 | [diff] [blame] | 665 | " nihh %1,0xff7f\n" /* clear PCL bit */ |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 666 | " stg %1,%0\n" |
| 667 | : "=Q" (ptep[PTRS_PER_PTE]) |
Christian Borntraeger | a8f6e7f | 2013-06-05 09:25:34 +0200 | [diff] [blame] | 668 | : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) |
| 669 | : "cc", "memory"); |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 670 | preempt_enable(); |
| 671 | #endif |
| 672 | } |
| 673 | |
Martin Schwidefsky | d56c893 | 2013-07-19 11:15:54 +0200 | [diff] [blame] | 674 | static inline pgste_t pgste_get(pte_t *ptep) |
| 675 | { |
| 676 | unsigned long pgste = 0; |
| 677 | #ifdef CONFIG_PGSTE |
| 678 | pgste = *(unsigned long *)(ptep + PTRS_PER_PTE); |
| 679 | #endif |
| 680 | return __pgste(pgste); |
| 681 | } |
| 682 | |
Christian Borntraeger | 3a82603 | 2013-06-05 09:22:33 +0200 | [diff] [blame] | 683 | static inline void pgste_set(pte_t *ptep, pgste_t pgste) |
| 684 | { |
| 685 | #ifdef CONFIG_PGSTE |
| 686 | *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste; |
| 687 | #endif |
| 688 | } |
| 689 | |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 690 | static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste, |
| 691 | struct mm_struct *mm) |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 692 | { |
| 693 | #ifdef CONFIG_PGSTE |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 694 | unsigned long address, bits, skey; |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 695 | |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 696 | if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID) |
Martin Schwidefsky | 09b5388 | 2011-11-14 11:19:00 +0100 | [diff] [blame] | 697 | return pgste; |
Heiko Carstens | a43a9d9 | 2011-05-29 12:40:50 +0200 | [diff] [blame] | 698 | address = pte_val(*ptep) & PAGE_MASK; |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 699 | skey = (unsigned long) page_get_storage_key(address); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 700 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 701 | /* Transfer page changed & referenced bit to guest bits in pgste */ |
Martin Schwidefsky | 0d0dafc | 2013-05-17 14:41:33 +0200 | [diff] [blame] | 702 | pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 703 | /* Copy page access key and fetch protection bit to pgste */ |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 704 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); |
| 705 | pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 706 | #endif |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 707 | return pgste; |
| 708 | |
| 709 | } |
| 710 | |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 711 | static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry, |
| 712 | struct mm_struct *mm) |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 713 | { |
| 714 | #ifdef CONFIG_PGSTE |
Heiko Carstens | a43a9d9 | 2011-05-29 12:40:50 +0200 | [diff] [blame] | 715 | unsigned long address; |
Christian Borntraeger | 338679f | 2013-06-04 09:53:59 +0200 | [diff] [blame] | 716 | unsigned long nkey; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 717 | |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 718 | if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID) |
Martin Schwidefsky | 09b5388 | 2011-11-14 11:19:00 +0100 | [diff] [blame] | 719 | return; |
Christian Borntraeger | 338679f | 2013-06-04 09:53:59 +0200 | [diff] [blame] | 720 | VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); |
Martin Schwidefsky | 09b5388 | 2011-11-14 11:19:00 +0100 | [diff] [blame] | 721 | address = pte_val(entry) & PAGE_MASK; |
Christian Borntraeger | 338679f | 2013-06-04 09:53:59 +0200 | [diff] [blame] | 722 | /* |
| 723 | * Set page access key and fetch protection bit from pgste. |
| 724 | * The guest C/R information is still in the PGSTE, set real |
| 725 | * key C/R to 0. |
| 726 | */ |
Linus Torvalds | fe489bf | 2013-07-03 13:21:40 -0700 | [diff] [blame] | 727 | nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 728 | nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48; |
Christian Borntraeger | 338679f | 2013-06-04 09:53:59 +0200 | [diff] [blame] | 729 | page_set_storage_key(address, nkey, 0); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 730 | #endif |
| 731 | } |
| 732 | |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 733 | static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 734 | { |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 735 | if ((pte_val(entry) & _PAGE_PRESENT) && |
| 736 | (pte_val(entry) & _PAGE_WRITE) && |
| 737 | !(pte_val(entry) & _PAGE_INVALID)) { |
| 738 | if (!MACHINE_HAS_ESOP) { |
| 739 | /* |
| 740 | * Without enhanced suppression-on-protection force |
| 741 | * the dirty bit on for all writable ptes. |
| 742 | */ |
| 743 | pte_val(entry) |= _PAGE_DIRTY; |
| 744 | pte_val(entry) &= ~_PAGE_PROTECT; |
| 745 | } |
| 746 | if (!(pte_val(entry) & _PAGE_PROTECT)) |
| 747 | /* This pte allows write access, set user-dirty */ |
| 748 | pgste_val(pgste) |= PGSTE_UC_BIT; |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 749 | } |
| 750 | *ptep = entry; |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 751 | return pgste; |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 752 | } |
| 753 | |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 754 | /** |
| 755 | * struct gmap_struct - guest address space |
Martin Schwidefsky | 527e30b | 2014-04-30 16:04:25 +0200 | [diff] [blame] | 756 | * @crst_list: list of all crst tables used in the guest address space |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 757 | * @mm: pointer to the parent mm_struct |
Martin Schwidefsky | 527e30b | 2014-04-30 16:04:25 +0200 | [diff] [blame] | 758 | * @guest_to_host: radix tree with guest to host address translation |
| 759 | * @host_to_guest: radix tree with pointer to segment table entries |
| 760 | * @guest_table_lock: spinlock to protect all entries in the guest page table |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 761 | * @table: pointer to the page directory |
Christian Borntraeger | 480e592 | 2011-09-20 17:07:28 +0200 | [diff] [blame] | 762 | * @asce: address space control element for gmap page table |
Dominik Dingel | 24eb3a8 | 2013-06-17 16:25:18 +0200 | [diff] [blame] | 763 | * @pfault_enabled: defines if pfaults are applicable for the guest |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 764 | */ |
| 765 | struct gmap { |
| 766 | struct list_head list; |
Martin Schwidefsky | 527e30b | 2014-04-30 16:04:25 +0200 | [diff] [blame] | 767 | struct list_head crst_list; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 768 | struct mm_struct *mm; |
Martin Schwidefsky | 527e30b | 2014-04-30 16:04:25 +0200 | [diff] [blame] | 769 | struct radix_tree_root guest_to_host; |
| 770 | struct radix_tree_root host_to_guest; |
| 771 | spinlock_t guest_table_lock; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 772 | unsigned long *table; |
Christian Borntraeger | 480e592 | 2011-09-20 17:07:28 +0200 | [diff] [blame] | 773 | unsigned long asce; |
Martin Schwidefsky | c6c956b | 2014-07-01 14:36:04 +0200 | [diff] [blame] | 774 | unsigned long asce_end; |
Christian Borntraeger | 2c70fe4 | 2013-05-17 14:41:36 +0200 | [diff] [blame] | 775 | void *private; |
Dominik Dingel | 24eb3a8 | 2013-06-17 16:25:18 +0200 | [diff] [blame] | 776 | bool pfault_enabled; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 777 | }; |
| 778 | |
| 779 | /** |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 780 | * struct gmap_notifier - notify function block for page invalidation |
| 781 | * @notifier_call: address of callback function |
| 782 | */ |
| 783 | struct gmap_notifier { |
| 784 | struct list_head list; |
Martin Schwidefsky | 6e0a043 | 2014-04-29 09:34:41 +0200 | [diff] [blame] | 785 | void (*notifier_call)(struct gmap *gmap, unsigned long gaddr); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 786 | }; |
| 787 | |
Martin Schwidefsky | c6c956b | 2014-07-01 14:36:04 +0200 | [diff] [blame] | 788 | struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 789 | void gmap_free(struct gmap *gmap); |
| 790 | void gmap_enable(struct gmap *gmap); |
| 791 | void gmap_disable(struct gmap *gmap); |
| 792 | int gmap_map_segment(struct gmap *gmap, unsigned long from, |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 793 | unsigned long to, unsigned long len); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 794 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); |
Martin Schwidefsky | 6e0a043 | 2014-04-29 09:34:41 +0200 | [diff] [blame] | 795 | unsigned long __gmap_translate(struct gmap *, unsigned long gaddr); |
| 796 | unsigned long gmap_translate(struct gmap *, unsigned long gaddr); |
Martin Schwidefsky | 527e30b | 2014-04-30 16:04:25 +0200 | [diff] [blame] | 797 | int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr); |
| 798 | int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags); |
Martin Schwidefsky | 6e0a043 | 2014-04-29 09:34:41 +0200 | [diff] [blame] | 799 | void gmap_discard(struct gmap *, unsigned long from, unsigned long to); |
| 800 | void __gmap_zap(struct gmap *, unsigned long gaddr); |
Dominik Dingel | a0bf4f1 | 2014-03-24 14:27:58 +0100 | [diff] [blame] | 801 | bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *); |
| 802 | |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 803 | |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 804 | void gmap_register_ipte_notifier(struct gmap_notifier *); |
| 805 | void gmap_unregister_ipte_notifier(struct gmap_notifier *); |
| 806 | int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len); |
Martin Schwidefsky | 9da4e38 | 2014-04-30 14:46:26 +0200 | [diff] [blame] | 807 | void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 808 | |
| 809 | static inline pgste_t pgste_ipte_notify(struct mm_struct *mm, |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 810 | unsigned long addr, |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 811 | pte_t *ptep, pgste_t pgste) |
| 812 | { |
| 813 | #ifdef CONFIG_PGSTE |
Martin Schwidefsky | 0d0dafc | 2013-05-17 14:41:33 +0200 | [diff] [blame] | 814 | if (pgste_val(pgste) & PGSTE_IN_BIT) { |
| 815 | pgste_val(pgste) &= ~PGSTE_IN_BIT; |
Martin Schwidefsky | 9da4e38 | 2014-04-30 14:46:26 +0200 | [diff] [blame] | 816 | gmap_do_ipte_notify(mm, addr, ptep); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 817 | } |
| 818 | #endif |
| 819 | return pgste; |
| 820 | } |
| 821 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 822 | /* |
| 823 | * Certain architectures need to do special things when PTEs |
| 824 | * within a page table are directly modified. Thus, the following |
| 825 | * hook is made available. |
| 826 | */ |
| 827 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 828 | pte_t *ptep, pte_t entry) |
| 829 | { |
| 830 | pgste_t pgste; |
| 831 | |
| 832 | if (mm_has_pgste(mm)) { |
| 833 | pgste = pgste_get_lock(ptep); |
Konstantin Weitz | b31288f | 2013-04-17 17:36:29 +0200 | [diff] [blame] | 834 | pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 835 | pgste_set_key(ptep, pgste, entry, mm); |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 836 | pgste = pgste_set_pte(ptep, pgste, entry); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 837 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 838 | } else { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 839 | *ptep = entry; |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 840 | } |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 841 | } |
| 842 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 843 | /* |
| 844 | * query functions pte_write/pte_dirty/pte_young only work if |
| 845 | * pte_present() is true. Undefined behaviour if not.. |
| 846 | */ |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 847 | static inline int pte_write(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 849 | return (pte_val(pte) & _PAGE_WRITE) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 850 | } |
| 851 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 852 | static inline int pte_dirty(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 854 | return (pte_val(pte) & _PAGE_DIRTY) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | } |
| 856 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 857 | static inline int pte_young(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 858 | { |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 859 | return (pte_val(pte) & _PAGE_YOUNG) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | } |
| 861 | |
Konstantin Weitz | b31288f | 2013-04-17 17:36:29 +0200 | [diff] [blame] | 862 | #define __HAVE_ARCH_PTE_UNUSED |
| 863 | static inline int pte_unused(pte_t pte) |
| 864 | { |
| 865 | return pte_val(pte) & _PAGE_UNUSED; |
| 866 | } |
| 867 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | /* |
| 869 | * pgd/pmd/pte modification functions |
| 870 | */ |
| 871 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 872 | static inline void pgd_clear(pgd_t *pgd) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 873 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 874 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) |
| 875 | pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 876 | } |
| 877 | |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 878 | static inline void pud_clear(pud_t *pud) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 879 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 880 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
| 881 | pud_val(*pud) = _REGION3_ENTRY_EMPTY; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 882 | } |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 883 | |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 884 | static inline void pmd_clear(pmd_t *pmdp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 886 | pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 | } |
| 888 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 889 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 890 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 891 | pte_val(*ptep) = _PAGE_INVALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 892 | } |
| 893 | |
| 894 | /* |
| 895 | * The following pte modification functions only work if |
| 896 | * pte_present() is true. Undefined behaviour if not.. |
| 897 | */ |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 898 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 899 | { |
Nick Piggin | 138c902 | 2008-07-08 11:31:06 +0200 | [diff] [blame] | 900 | pte_val(pte) &= _PAGE_CHG_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 901 | pte_val(pte) |= pgprot_val(newprot); |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 902 | /* |
| 903 | * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the |
| 904 | * invalid bit set, clear it again for readable, young pages |
| 905 | */ |
| 906 | if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) |
| 907 | pte_val(pte) &= ~_PAGE_INVALID; |
| 908 | /* |
| 909 | * newprot for PAGE_READ and PAGE_WRITE has the page protection |
| 910 | * bit set, clear it again for writable, dirty pages |
| 911 | */ |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 912 | if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) |
| 913 | pte_val(pte) &= ~_PAGE_PROTECT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 914 | return pte; |
| 915 | } |
| 916 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 917 | static inline pte_t pte_wrprotect(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 919 | pte_val(pte) &= ~_PAGE_WRITE; |
| 920 | pte_val(pte) |= _PAGE_PROTECT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 | return pte; |
| 922 | } |
| 923 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 924 | static inline pte_t pte_mkwrite(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 925 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 926 | pte_val(pte) |= _PAGE_WRITE; |
| 927 | if (pte_val(pte) & _PAGE_DIRTY) |
| 928 | pte_val(pte) &= ~_PAGE_PROTECT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | return pte; |
| 930 | } |
| 931 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 932 | static inline pte_t pte_mkclean(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 934 | pte_val(pte) &= ~_PAGE_DIRTY; |
| 935 | pte_val(pte) |= _PAGE_PROTECT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 936 | return pte; |
| 937 | } |
| 938 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 939 | static inline pte_t pte_mkdirty(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 940 | { |
Martin Schwidefsky | 5614dd9 | 2015-04-22 14:47:42 +0200 | [diff] [blame^] | 941 | pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY; |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 942 | if (pte_val(pte) & _PAGE_WRITE) |
| 943 | pte_val(pte) &= ~_PAGE_PROTECT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | return pte; |
| 945 | } |
| 946 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 947 | static inline pte_t pte_mkold(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 949 | pte_val(pte) &= ~_PAGE_YOUNG; |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 950 | pte_val(pte) |= _PAGE_INVALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 951 | return pte; |
| 952 | } |
| 953 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 954 | static inline pte_t pte_mkyoung(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | { |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 956 | pte_val(pte) |= _PAGE_YOUNG; |
| 957 | if (pte_val(pte) & _PAGE_READ) |
| 958 | pte_val(pte) &= ~_PAGE_INVALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 959 | return pte; |
| 960 | } |
| 961 | |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 962 | static inline pte_t pte_mkspecial(pte_t pte) |
| 963 | { |
Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 964 | pte_val(pte) |= _PAGE_SPECIAL; |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 965 | return pte; |
| 966 | } |
| 967 | |
Heiko Carstens | 84afdce | 2010-10-25 16:10:36 +0200 | [diff] [blame] | 968 | #ifdef CONFIG_HUGETLB_PAGE |
| 969 | static inline pte_t pte_mkhuge(pte_t pte) |
| 970 | { |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 971 | pte_val(pte) |= _PAGE_LARGE; |
Heiko Carstens | 84afdce | 2010-10-25 16:10:36 +0200 | [diff] [blame] | 972 | return pte; |
| 973 | } |
| 974 | #endif |
| 975 | |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 976 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) |
| 977 | { |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 978 | unsigned long pto = (unsigned long) ptep; |
| 979 | |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 980 | /* Invalidation + global TLB flush for the pte */ |
| 981 | asm volatile( |
| 982 | " ipte %2,%3" |
| 983 | : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); |
| 984 | } |
| 985 | |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 986 | static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep) |
| 987 | { |
| 988 | unsigned long pto = (unsigned long) ptep; |
| 989 | |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 990 | /* Invalidation + local TLB flush for the pte */ |
| 991 | asm volatile( |
| 992 | " .insn rrf,0xb2210000,%2,%3,0,1" |
| 993 | : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); |
| 994 | } |
| 995 | |
Heiko Carstens | cfb0b24 | 2014-09-23 21:29:20 +0200 | [diff] [blame] | 996 | static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep) |
| 997 | { |
| 998 | unsigned long pto = (unsigned long) ptep; |
| 999 | |
Heiko Carstens | cfb0b24 | 2014-09-23 21:29:20 +0200 | [diff] [blame] | 1000 | /* Invalidate a range of ptes + global TLB flush of the ptes */ |
| 1001 | do { |
| 1002 | asm volatile( |
| 1003 | " .insn rrf,0xb2210000,%2,%0,%1,0" |
| 1004 | : "+a" (address), "+a" (nr) : "a" (pto) : "memory"); |
| 1005 | } while (nr != 255); |
| 1006 | } |
| 1007 | |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1008 | static inline void ptep_flush_direct(struct mm_struct *mm, |
| 1009 | unsigned long address, pte_t *ptep) |
| 1010 | { |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1011 | int active, count; |
| 1012 | |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1013 | if (pte_val(*ptep) & _PAGE_INVALID) |
| 1014 | return; |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1015 | active = (mm == current->active_mm) ? 1 : 0; |
| 1016 | count = atomic_add_return(0x10000, &mm->context.attach_count); |
| 1017 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && |
| 1018 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) |
| 1019 | __ptep_ipte_local(address, ptep); |
| 1020 | else |
| 1021 | __ptep_ipte(address, ptep); |
| 1022 | atomic_sub(0x10000, &mm->context.attach_count); |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 1023 | } |
| 1024 | |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 1025 | static inline void ptep_flush_lazy(struct mm_struct *mm, |
| 1026 | unsigned long address, pte_t *ptep) |
| 1027 | { |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1028 | int active, count; |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 1029 | |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1030 | if (pte_val(*ptep) & _PAGE_INVALID) |
| 1031 | return; |
| 1032 | active = (mm == current->active_mm) ? 1 : 0; |
| 1033 | count = atomic_add_return(0x10000, &mm->context.attach_count); |
| 1034 | if ((count & 0xffff) <= active) { |
| 1035 | pte_val(*ptep) |= _PAGE_INVALID; |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 1036 | mm->context.flush_mm = 1; |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1037 | } else |
| 1038 | __ptep_ipte(address, ptep); |
| 1039 | atomic_sub(0x10000, &mm->context.attach_count); |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 1040 | } |
| 1041 | |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 1042 | /* |
| 1043 | * Get (and clear) the user dirty bit for a pte. |
| 1044 | */ |
| 1045 | static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, |
| 1046 | unsigned long addr, |
| 1047 | pte_t *ptep) |
| 1048 | { |
| 1049 | pgste_t pgste; |
| 1050 | pte_t pte; |
| 1051 | int dirty; |
| 1052 | |
| 1053 | if (!mm_has_pgste(mm)) |
| 1054 | return 0; |
| 1055 | pgste = pgste_get_lock(ptep); |
| 1056 | dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); |
| 1057 | pgste_val(pgste) &= ~PGSTE_UC_BIT; |
| 1058 | pte = *ptep; |
| 1059 | if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1060 | pgste = pgste_ipte_notify(mm, addr, ptep, pgste); |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 1061 | __ptep_ipte(addr, ptep); |
| 1062 | if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) |
| 1063 | pte_val(pte) |= _PAGE_PROTECT; |
| 1064 | else |
| 1065 | pte_val(pte) |= _PAGE_INVALID; |
| 1066 | *ptep = pte; |
| 1067 | } |
| 1068 | pgste_set_unlock(ptep, pgste); |
| 1069 | return dirty; |
| 1070 | } |
| 1071 | |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1072 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
| 1073 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, |
| 1074 | unsigned long addr, pte_t *ptep) |
| 1075 | { |
| 1076 | pgste_t pgste; |
Christian Borntraeger | 3e03d4c | 2014-08-28 21:21:41 +0200 | [diff] [blame] | 1077 | pte_t pte, oldpte; |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1078 | int young; |
| 1079 | |
| 1080 | if (mm_has_pgste(vma->vm_mm)) { |
| 1081 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1082 | pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste); |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1083 | } |
| 1084 | |
Christian Borntraeger | 3e03d4c | 2014-08-28 21:21:41 +0200 | [diff] [blame] | 1085 | oldpte = pte = *ptep; |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1086 | ptep_flush_direct(vma->vm_mm, addr, ptep); |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1087 | young = pte_young(pte); |
| 1088 | pte = pte_mkold(pte); |
| 1089 | |
| 1090 | if (mm_has_pgste(vma->vm_mm)) { |
Christian Borntraeger | 3e03d4c | 2014-08-28 21:21:41 +0200 | [diff] [blame] | 1091 | pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm); |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 1092 | pgste = pgste_set_pte(ptep, pgste, pte); |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1093 | pgste_set_unlock(ptep, pgste); |
| 1094 | } else |
| 1095 | *ptep = pte; |
| 1096 | |
| 1097 | return young; |
| 1098 | } |
| 1099 | |
| 1100 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
| 1101 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, |
| 1102 | unsigned long address, pte_t *ptep) |
| 1103 | { |
| 1104 | return ptep_test_and_clear_young(vma, address, ptep); |
| 1105 | } |
| 1106 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1107 | /* |
| 1108 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush |
| 1109 | * both clear the TLB for the unmapped pte. The reason is that |
| 1110 | * ptep_get_and_clear is used in common code (e.g. change_pte_range) |
| 1111 | * to modify an active pte. The sequence is |
| 1112 | * 1) ptep_get_and_clear |
| 1113 | * 2) set_pte_at |
| 1114 | * 3) flush_tlb_range |
| 1115 | * On s390 the tlb needs to get flushed with the modification of the pte |
| 1116 | * if the pte is active. The only way how this can be implemented is to |
| 1117 | * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range |
| 1118 | * is a nop. |
| 1119 | */ |
| 1120 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1121 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
| 1122 | unsigned long address, pte_t *ptep) |
| 1123 | { |
| 1124 | pgste_t pgste; |
| 1125 | pte_t pte; |
| 1126 | |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1127 | if (mm_has_pgste(mm)) { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1128 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1129 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1130 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1131 | |
| 1132 | pte = *ptep; |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 1133 | ptep_flush_lazy(mm, address, ptep); |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 1134 | pte_val(*ptep) = _PAGE_INVALID; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1135 | |
| 1136 | if (mm_has_pgste(mm)) { |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 1137 | pgste = pgste_update_all(&pte, pgste, mm); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1138 | pgste_set_unlock(ptep, pgste); |
| 1139 | } |
| 1140 | return pte; |
| 1141 | } |
| 1142 | |
| 1143 | #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION |
| 1144 | static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, |
| 1145 | unsigned long address, |
| 1146 | pte_t *ptep) |
| 1147 | { |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1148 | pgste_t pgste; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1149 | pte_t pte; |
| 1150 | |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1151 | if (mm_has_pgste(mm)) { |
| 1152 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1153 | pgste_ipte_notify(mm, address, ptep, pgste); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1154 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1155 | |
| 1156 | pte = *ptep; |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 1157 | ptep_flush_lazy(mm, address, ptep); |
Christian Borntraeger | b56433c | 2013-05-27 16:19:55 +0200 | [diff] [blame] | 1158 | |
Christian Borntraeger | 3a82603 | 2013-06-05 09:22:33 +0200 | [diff] [blame] | 1159 | if (mm_has_pgste(mm)) { |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 1160 | pgste = pgste_update_all(&pte, pgste, mm); |
Christian Borntraeger | 3a82603 | 2013-06-05 09:22:33 +0200 | [diff] [blame] | 1161 | pgste_set(ptep, pgste); |
| 1162 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1163 | return pte; |
| 1164 | } |
| 1165 | |
| 1166 | static inline void ptep_modify_prot_commit(struct mm_struct *mm, |
| 1167 | unsigned long address, |
| 1168 | pte_t *ptep, pte_t pte) |
| 1169 | { |
Christian Borntraeger | b56433c | 2013-05-27 16:19:55 +0200 | [diff] [blame] | 1170 | pgste_t pgste; |
| 1171 | |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1172 | if (mm_has_pgste(mm)) { |
Martin Schwidefsky | d56c893 | 2013-07-19 11:15:54 +0200 | [diff] [blame] | 1173 | pgste = pgste_get(ptep); |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 1174 | pgste_set_key(ptep, pgste, pte, mm); |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 1175 | pgste = pgste_set_pte(ptep, pgste, pte); |
Christian Borntraeger | b56433c | 2013-05-27 16:19:55 +0200 | [diff] [blame] | 1176 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1177 | } else |
| 1178 | *ptep = pte; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1179 | } |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1180 | |
| 1181 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 1182 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, |
| 1183 | unsigned long address, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1184 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1185 | pgste_t pgste; |
| 1186 | pte_t pte; |
| 1187 | |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1188 | if (mm_has_pgste(vma->vm_mm)) { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1189 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1190 | pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1191 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1192 | |
| 1193 | pte = *ptep; |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1194 | ptep_flush_direct(vma->vm_mm, address, ptep); |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 1195 | pte_val(*ptep) = _PAGE_INVALID; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1196 | |
| 1197 | if (mm_has_pgste(vma->vm_mm)) { |
Konstantin Weitz | b31288f | 2013-04-17 17:36:29 +0200 | [diff] [blame] | 1198 | if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == |
| 1199 | _PGSTE_GPS_USAGE_UNUSED) |
| 1200 | pte_val(pte) |= _PAGE_UNUSED; |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 1201 | pgste = pgste_update_all(&pte, pgste, vma->vm_mm); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1202 | pgste_set_unlock(ptep, pgste); |
| 1203 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1204 | return pte; |
| 1205 | } |
| 1206 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1207 | /* |
| 1208 | * The batched pte unmap code uses ptep_get_and_clear_full to clear the |
| 1209 | * ptes. Here an optimization is possible. tlb_gather_mmu flushes all |
| 1210 | * tlbs of an mm if it can guarantee that the ptes of the mm_struct |
| 1211 | * cannot be accessed while the batched unmap is running. In this case |
| 1212 | * full==1 and a simple pte_clear is enough. See tlb.h. |
| 1213 | */ |
| 1214 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
| 1215 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1216 | unsigned long address, |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1217 | pte_t *ptep, int full) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1219 | pgste_t pgste; |
| 1220 | pte_t pte; |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1221 | |
Martin Schwidefsky | a055f66 | 2013-07-19 10:31:55 +0200 | [diff] [blame] | 1222 | if (!full && mm_has_pgste(mm)) { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1223 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1224 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1225 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1226 | |
| 1227 | pte = *ptep; |
| 1228 | if (!full) |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 1229 | ptep_flush_lazy(mm, address, ptep); |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 1230 | pte_val(*ptep) = _PAGE_INVALID; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1231 | |
Martin Schwidefsky | a055f66 | 2013-07-19 10:31:55 +0200 | [diff] [blame] | 1232 | if (!full && mm_has_pgste(mm)) { |
Dominik Dingel | 65eef335 | 2014-01-14 15:02:11 +0100 | [diff] [blame] | 1233 | pgste = pgste_update_all(&pte, pgste, mm); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1234 | pgste_set_unlock(ptep, pgste); |
| 1235 | } |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1236 | return pte; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1237 | } |
| 1238 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1239 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1240 | static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, |
| 1241 | unsigned long address, pte_t *ptep) |
| 1242 | { |
| 1243 | pgste_t pgste; |
| 1244 | pte_t pte = *ptep; |
| 1245 | |
| 1246 | if (pte_write(pte)) { |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1247 | if (mm_has_pgste(mm)) { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1248 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1249 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1250 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1251 | |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 1252 | ptep_flush_lazy(mm, address, ptep); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1253 | pte = pte_wrprotect(pte); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1254 | |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1255 | if (mm_has_pgste(mm)) { |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 1256 | pgste = pgste_set_pte(ptep, pgste, pte); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1257 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1258 | } else |
| 1259 | *ptep = pte; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1260 | } |
| 1261 | return pte; |
| 1262 | } |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1263 | |
| 1264 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1265 | static inline int ptep_set_access_flags(struct vm_area_struct *vma, |
| 1266 | unsigned long address, pte_t *ptep, |
| 1267 | pte_t entry, int dirty) |
| 1268 | { |
| 1269 | pgste_t pgste; |
| 1270 | |
| 1271 | if (pte_same(*ptep, entry)) |
| 1272 | return 0; |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1273 | if (mm_has_pgste(vma->vm_mm)) { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1274 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | 55dbbdd | 2014-04-30 14:44:44 +0200 | [diff] [blame] | 1275 | pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); |
Martin Schwidefsky | d338363 | 2013-04-17 10:53:39 +0200 | [diff] [blame] | 1276 | } |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1277 | |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1278 | ptep_flush_direct(vma->vm_mm, address, ptep); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1279 | |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1280 | if (mm_has_pgste(vma->vm_mm)) { |
Christian Borntraeger | 1951497 | 2014-08-28 23:44:57 +0200 | [diff] [blame] | 1281 | pgste_set_key(ptep, pgste, entry, vma->vm_mm); |
Martin Schwidefsky | 0a61b22 | 2013-10-18 12:03:41 +0200 | [diff] [blame] | 1282 | pgste = pgste_set_pte(ptep, pgste, entry); |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1283 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1284 | } else |
| 1285 | *ptep = entry; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 1286 | return 1; |
| 1287 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1288 | |
| 1289 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1290 | * Conversion functions: convert a page and protection to a page entry, |
| 1291 | * and a page entry and page directory to the page they refer to. |
| 1292 | */ |
| 1293 | static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) |
| 1294 | { |
| 1295 | pte_t __pte; |
| 1296 | pte_val(__pte) = physpage + pgprot_val(pgprot); |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1297 | return pte_mkyoung(__pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1298 | } |
| 1299 | |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1300 | static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) |
| 1301 | { |
Heiko Carstens | 0b2b6e1d | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 1302 | unsigned long physpage = page_to_phys(page); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1303 | pte_t __pte = mk_pte_phys(physpage, pgprot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1304 | |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 1305 | if (pte_write(__pte) && PageDirty(page)) |
| 1306 | __pte = pte_mkdirty(__pte); |
Martin Schwidefsky | abf09be | 2012-11-07 13:17:37 +0100 | [diff] [blame] | 1307 | return __pte; |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1308 | } |
| 1309 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1310 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1311 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) |
| 1312 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
| 1313 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1314 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1315 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1316 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 1317 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1318 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) |
| 1319 | #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1320 | #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1321 | |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1322 | static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) |
| 1323 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 1324 | pud_t *pud = (pud_t *) pgd; |
| 1325 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) |
| 1326 | pud = (pud_t *) pgd_deref(*pgd); |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1327 | return pud + pud_index(address); |
| 1328 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1329 | |
| 1330 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) |
| 1331 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 1332 | pmd_t *pmd = (pmd_t *) pud; |
| 1333 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
| 1334 | pmd = (pmd_t *) pud_deref(*pud); |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1335 | return pmd + pmd_index(address); |
| 1336 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1337 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1338 | #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) |
| 1339 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) |
| 1340 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 1341 | |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1342 | #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1343 | |
| 1344 | /* Find an entry in the lowest level page table.. */ |
| 1345 | #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) |
| 1346 | #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1347 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1348 | #define pte_unmap(pte) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1349 | |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1350 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1351 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) |
| 1352 | { |
Gerald Schaefer | d8e7a33 | 2012-10-25 17:42:50 +0200 | [diff] [blame] | 1353 | /* |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 1354 | * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx) |
Gerald Schaefer | d8e7a33 | 2012-10-25 17:42:50 +0200 | [diff] [blame] | 1355 | * Convert to segment table entry format. |
| 1356 | */ |
| 1357 | if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) |
| 1358 | return pgprot_val(SEGMENT_NONE); |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 1359 | if (pgprot_val(pgprot) == pgprot_val(PAGE_READ)) |
| 1360 | return pgprot_val(SEGMENT_READ); |
| 1361 | return pgprot_val(SEGMENT_WRITE); |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1362 | } |
| 1363 | |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1364 | static inline pmd_t pmd_wrprotect(pmd_t pmd) |
| 1365 | { |
| 1366 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE; |
| 1367 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; |
| 1368 | return pmd; |
| 1369 | } |
| 1370 | |
| 1371 | static inline pmd_t pmd_mkwrite(pmd_t pmd) |
| 1372 | { |
| 1373 | pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; |
| 1374 | if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) |
| 1375 | return pmd; |
| 1376 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; |
| 1377 | return pmd; |
| 1378 | } |
| 1379 | |
| 1380 | static inline pmd_t pmd_mkclean(pmd_t pmd) |
| 1381 | { |
| 1382 | if (pmd_large(pmd)) { |
| 1383 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; |
| 1384 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; |
| 1385 | } |
| 1386 | return pmd; |
| 1387 | } |
| 1388 | |
| 1389 | static inline pmd_t pmd_mkdirty(pmd_t pmd) |
| 1390 | { |
| 1391 | if (pmd_large(pmd)) { |
Martin Schwidefsky | 5614dd9 | 2015-04-22 14:47:42 +0200 | [diff] [blame^] | 1392 | pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | |
| 1393 | _SEGMENT_ENTRY_SOFT_DIRTY; |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1394 | if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) |
| 1395 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; |
| 1396 | } |
| 1397 | return pmd; |
| 1398 | } |
| 1399 | |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1400 | static inline pmd_t pmd_mkyoung(pmd_t pmd) |
| 1401 | { |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1402 | if (pmd_large(pmd)) { |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1403 | pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1404 | if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) |
| 1405 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1406 | } |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1407 | return pmd; |
| 1408 | } |
| 1409 | |
| 1410 | static inline pmd_t pmd_mkold(pmd_t pmd) |
| 1411 | { |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1412 | if (pmd_large(pmd)) { |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1413 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; |
| 1414 | pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; |
| 1415 | } |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1416 | return pmd; |
| 1417 | } |
| 1418 | |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1419 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
| 1420 | { |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1421 | if (pmd_large(pmd)) { |
| 1422 | pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | |
| 1423 | _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | |
Martin Schwidefsky | 5614dd9 | 2015-04-22 14:47:42 +0200 | [diff] [blame^] | 1424 | _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT | |
| 1425 | _SEGMENT_ENTRY_SOFT_DIRTY; |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1426 | pmd_val(pmd) |= massage_pgprot_pmd(newprot); |
| 1427 | if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) |
| 1428 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; |
| 1429 | if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) |
| 1430 | pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; |
| 1431 | return pmd; |
| 1432 | } |
| 1433 | pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN; |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1434 | pmd_val(pmd) |= massage_pgprot_pmd(newprot); |
| 1435 | return pmd; |
| 1436 | } |
| 1437 | |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1438 | static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1439 | { |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1440 | pmd_t __pmd; |
| 1441 | pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1442 | return __pmd; |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1443 | } |
| 1444 | |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1445 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ |
| 1446 | |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1447 | static inline void __pmdp_csp(pmd_t *pmdp) |
| 1448 | { |
| 1449 | register unsigned long reg2 asm("2") = pmd_val(*pmdp); |
| 1450 | register unsigned long reg3 asm("3") = pmd_val(*pmdp) | |
| 1451 | _SEGMENT_ENTRY_INVALID; |
| 1452 | register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; |
| 1453 | |
| 1454 | asm volatile( |
| 1455 | " csp %1,%3" |
| 1456 | : "=m" (*pmdp) |
| 1457 | : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); |
| 1458 | } |
| 1459 | |
| 1460 | static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp) |
| 1461 | { |
| 1462 | unsigned long sto; |
| 1463 | |
| 1464 | sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); |
| 1465 | asm volatile( |
| 1466 | " .insn rrf,0xb98e0000,%2,%3,0,0" |
| 1467 | : "=m" (*pmdp) |
| 1468 | : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK)) |
| 1469 | : "cc" ); |
| 1470 | } |
| 1471 | |
| 1472 | static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp) |
| 1473 | { |
| 1474 | unsigned long sto; |
| 1475 | |
| 1476 | sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); |
| 1477 | asm volatile( |
| 1478 | " .insn rrf,0xb98e0000,%2,%3,0,1" |
| 1479 | : "=m" (*pmdp) |
| 1480 | : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK)) |
| 1481 | : "cc" ); |
| 1482 | } |
| 1483 | |
| 1484 | static inline void pmdp_flush_direct(struct mm_struct *mm, |
| 1485 | unsigned long address, pmd_t *pmdp) |
| 1486 | { |
| 1487 | int active, count; |
| 1488 | |
| 1489 | if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) |
| 1490 | return; |
| 1491 | if (!MACHINE_HAS_IDTE) { |
| 1492 | __pmdp_csp(pmdp); |
| 1493 | return; |
| 1494 | } |
| 1495 | active = (mm == current->active_mm) ? 1 : 0; |
| 1496 | count = atomic_add_return(0x10000, &mm->context.attach_count); |
| 1497 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && |
| 1498 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) |
| 1499 | __pmdp_idte_local(address, pmdp); |
| 1500 | else |
| 1501 | __pmdp_idte(address, pmdp); |
| 1502 | atomic_sub(0x10000, &mm->context.attach_count); |
| 1503 | } |
| 1504 | |
Martin Schwidefsky | 3eabaee | 2013-07-26 15:04:02 +0200 | [diff] [blame] | 1505 | static inline void pmdp_flush_lazy(struct mm_struct *mm, |
| 1506 | unsigned long address, pmd_t *pmdp) |
| 1507 | { |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1508 | int active, count; |
Martin Schwidefsky | 3eabaee | 2013-07-26 15:04:02 +0200 | [diff] [blame] | 1509 | |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1510 | if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) |
| 1511 | return; |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1512 | active = (mm == current->active_mm) ? 1 : 0; |
| 1513 | count = atomic_add_return(0x10000, &mm->context.attach_count); |
| 1514 | if ((count & 0xffff) <= active) { |
| 1515 | pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; |
Martin Schwidefsky | 3eabaee | 2013-07-26 15:04:02 +0200 | [diff] [blame] | 1516 | mm->context.flush_mm = 1; |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1517 | } else if (MACHINE_HAS_IDTE) |
| 1518 | __pmdp_idte(address, pmdp); |
| 1519 | else |
| 1520 | __pmdp_csp(pmdp); |
Martin Schwidefsky | 53e857f | 2012-09-10 13:00:09 +0200 | [diff] [blame] | 1521 | atomic_sub(0x10000, &mm->context.attach_count); |
Martin Schwidefsky | 3eabaee | 2013-07-26 15:04:02 +0200 | [diff] [blame] | 1522 | } |
| 1523 | |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1524 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 1525 | |
| 1526 | #define __HAVE_ARCH_PGTABLE_DEPOSIT |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 1527 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 1528 | pgtable_t pgtable); |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1529 | |
| 1530 | #define __HAVE_ARCH_PGTABLE_WITHDRAW |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 1531 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1532 | |
| 1533 | static inline int pmd_trans_splitting(pmd_t pmd) |
| 1534 | { |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1535 | return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) && |
| 1536 | (pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT); |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1537 | } |
| 1538 | |
| 1539 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 1540 | pmd_t *pmdp, pmd_t entry) |
| 1541 | { |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 1542 | *pmdp = entry; |
| 1543 | } |
| 1544 | |
| 1545 | static inline pmd_t pmd_mkhuge(pmd_t pmd) |
| 1546 | { |
| 1547 | pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; |
Martin Schwidefsky | 152125b | 2014-07-24 11:03:41 +0200 | [diff] [blame] | 1548 | pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; |
| 1549 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1550 | return pmd; |
| 1551 | } |
| 1552 | |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1553 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG |
| 1554 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
| 1555 | unsigned long address, pmd_t *pmdp) |
| 1556 | { |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1557 | pmd_t pmd; |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1558 | |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1559 | pmd = *pmdp; |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1560 | pmdp_flush_direct(vma->vm_mm, address, pmdp); |
Martin Schwidefsky | 0944fe3 | 2013-07-23 22:11:42 +0200 | [diff] [blame] | 1561 | *pmdp = pmd_mkold(pmd); |
| 1562 | return pmd_young(pmd); |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1563 | } |
| 1564 | |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 1565 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR |
| 1566 | static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, |
| 1567 | unsigned long address, pmd_t *pmdp) |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1568 | { |
| 1569 | pmd_t pmd = *pmdp; |
| 1570 | |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1571 | pmdp_flush_direct(mm, address, pmdp); |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1572 | pmd_clear(pmdp); |
| 1573 | return pmd; |
| 1574 | } |
| 1575 | |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 1576 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL |
| 1577 | static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, |
| 1578 | unsigned long address, |
| 1579 | pmd_t *pmdp, int full) |
Martin Schwidefsky | fcbe08d6 | 2014-10-24 10:52:29 +0200 | [diff] [blame] | 1580 | { |
| 1581 | pmd_t pmd = *pmdp; |
| 1582 | |
| 1583 | if (!full) |
| 1584 | pmdp_flush_lazy(mm, address, pmdp); |
| 1585 | pmd_clear(pmdp); |
| 1586 | return pmd; |
| 1587 | } |
| 1588 | |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 1589 | #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH |
| 1590 | static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, |
| 1591 | unsigned long address, pmd_t *pmdp) |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1592 | { |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 1593 | return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1594 | } |
| 1595 | |
| 1596 | #define __HAVE_ARCH_PMDP_INVALIDATE |
| 1597 | static inline void pmdp_invalidate(struct vm_area_struct *vma, |
| 1598 | unsigned long address, pmd_t *pmdp) |
| 1599 | { |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1600 | pmdp_flush_direct(vma->vm_mm, address, pmdp); |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1601 | } |
| 1602 | |
Gerald Schaefer | be32865 | 2013-01-21 16:48:07 +0100 | [diff] [blame] | 1603 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT |
| 1604 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, |
| 1605 | unsigned long address, pmd_t *pmdp) |
| 1606 | { |
| 1607 | pmd_t pmd = *pmdp; |
| 1608 | |
| 1609 | if (pmd_write(pmd)) { |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 1610 | pmdp_flush_direct(mm, address, pmdp); |
Gerald Schaefer | be32865 | 2013-01-21 16:48:07 +0100 | [diff] [blame] | 1611 | set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); |
| 1612 | } |
| 1613 | } |
| 1614 | |
Aneesh Kumar K.V | f28b6ff | 2015-06-24 16:57:42 -0700 | [diff] [blame] | 1615 | static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, |
| 1616 | unsigned long address, |
| 1617 | pmd_t *pmdp) |
| 1618 | { |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 1619 | return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); |
Aneesh Kumar K.V | f28b6ff | 2015-06-24 16:57:42 -0700 | [diff] [blame] | 1620 | } |
| 1621 | #define pmdp_collapse_flush pmdp_collapse_flush |
| 1622 | |
Gerald Schaefer | 1ae1c1d | 2012-10-08 16:30:24 -0700 | [diff] [blame] | 1623 | #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) |
| 1624 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) |
| 1625 | |
| 1626 | static inline int pmd_trans_huge(pmd_t pmd) |
| 1627 | { |
| 1628 | return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; |
| 1629 | } |
| 1630 | |
| 1631 | static inline int has_transparent_hugepage(void) |
| 1632 | { |
| 1633 | return MACHINE_HAS_HPAGE ? 1 : 0; |
| 1634 | } |
Gerald Schaefer | 75077af | 2012-10-08 16:30:15 -0700 | [diff] [blame] | 1635 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 1636 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1637 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1638 | * 64 bit swap entry format: |
| 1639 | * A page-table entry has some bits we have to treat in a special way. |
Geert Uytterhoeven | 4e0a641 | 2015-05-21 14:00:47 +0200 | [diff] [blame] | 1640 | * Bits 52 and bit 55 have to be zero, otherwise a specification |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1641 | * exception will occur instead of a page translation exception. The |
Geert Uytterhoeven | 4e0a641 | 2015-05-21 14:00:47 +0200 | [diff] [blame] | 1642 | * specification exception has the bad habit not to store necessary |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1643 | * information in the lowcore. |
Martin Schwidefsky | a1c843b | 2015-04-22 13:55:59 +0200 | [diff] [blame] | 1644 | * Bits 54 and 63 are used to indicate the page type. |
| 1645 | * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200 |
| 1646 | * This leaves the bits 0-51 and bits 56-62 to store type and offset. |
| 1647 | * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51 |
| 1648 | * for the offset. |
| 1649 | * | offset |01100|type |00| |
| 1650 | * |0000000000111111111122222222223333333333444444444455|55555|55566|66| |
| 1651 | * |0123456789012345678901234567890123456789012345678901|23456|78901|23| |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1652 | */ |
Heiko Carstens | 5a79859a | 2015-02-12 13:08:27 +0100 | [diff] [blame] | 1653 | |
Martin Schwidefsky | a1c843b | 2015-04-22 13:55:59 +0200 | [diff] [blame] | 1654 | #define __SWP_OFFSET_MASK ((1UL << 52) - 1) |
| 1655 | #define __SWP_OFFSET_SHIFT 12 |
| 1656 | #define __SWP_TYPE_MASK ((1UL << 5) - 1) |
| 1657 | #define __SWP_TYPE_SHIFT 2 |
Heiko Carstens | 5a79859a | 2015-02-12 13:08:27 +0100 | [diff] [blame] | 1658 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 1659 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1660 | { |
| 1661 | pte_t pte; |
Martin Schwidefsky | a1c843b | 2015-04-22 13:55:59 +0200 | [diff] [blame] | 1662 | |
| 1663 | pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT; |
| 1664 | pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; |
| 1665 | pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1666 | return pte; |
| 1667 | } |
| 1668 | |
Martin Schwidefsky | a1c843b | 2015-04-22 13:55:59 +0200 | [diff] [blame] | 1669 | static inline unsigned long __swp_type(swp_entry_t entry) |
| 1670 | { |
| 1671 | return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK; |
| 1672 | } |
| 1673 | |
| 1674 | static inline unsigned long __swp_offset(swp_entry_t entry) |
| 1675 | { |
| 1676 | return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK; |
| 1677 | } |
| 1678 | |
| 1679 | static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) |
| 1680 | { |
| 1681 | return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) }; |
| 1682 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1683 | |
| 1684 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 1685 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
| 1686 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1687 | #endif /* !__ASSEMBLY__ */ |
| 1688 | |
| 1689 | #define kern_addr_valid(addr) (1) |
| 1690 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 1691 | extern int vmem_add_mapping(unsigned long start, unsigned long size); |
| 1692 | extern int vmem_remove_mapping(unsigned long start, unsigned long size); |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 1693 | extern int s390_enable_sie(void); |
Dominik Dingel | 3ac8e38 | 2014-10-23 12:09:17 +0200 | [diff] [blame] | 1694 | extern int s390_enable_skey(void); |
Dominik Dingel | a13cff3 | 2014-10-23 12:07:14 +0200 | [diff] [blame] | 1695 | extern void s390_reset_cmma(struct mm_struct *mm); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 1696 | |
Martin Schwidefsky | 1f6b83e | 2015-01-14 17:51:17 +0100 | [diff] [blame] | 1697 | /* s390 has a private copy of get unmapped area to deal with cache synonyms */ |
| 1698 | #define HAVE_ARCH_UNMAPPED_AREA |
| 1699 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
| 1700 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1701 | /* |
| 1702 | * No page table caches to initialise |
| 1703 | */ |
Heiko Carstens | 765a0ca | 2013-03-23 10:29:01 +0100 | [diff] [blame] | 1704 | static inline void pgtable_cache_init(void) { } |
| 1705 | static inline void check_pgt_cache(void) { } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1706 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1707 | #include <asm-generic/pgtable.h> |
| 1708 | |
| 1709 | #endif /* _S390_PAGE_H */ |