blob: 76dc344edb8cfd9c661b456d8c19afa54d0fa530 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 1999, 2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (weigand@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgtable.h"
10 */
11
12#ifndef _ASM_S390_PGTABLE_H
13#define _ASM_S390_PGTABLE_H
14
Heiko Carstens9789db02008-07-14 09:59:11 +020015#include <linux/sched.h>
Heiko Carstens2dcea572006-09-29 01:58:41 -070016#include <linux/mm_types.h>
Martin Schwidefskyabf09be2012-11-07 13:17:37 +010017#include <linux/page-flags.h>
Martin Schwidefsky527e30b2014-04-30 16:04:25 +020018#include <linux/radix-tree.h>
Heiko Carstens37cd9442016-05-20 08:08:14 +020019#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/bug.h>
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +020021#include <asm/page.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Heiko Carstens0ccb32c2016-05-28 10:03:55 +020023extern pgd_t swapper_pg_dir[];
Linus Torvalds1da177e2005-04-16 15:20:36 -070024extern void paging_init(void);
25
Heiko Carstens37cd9442016-05-20 08:08:14 +020026enum {
27 PG_DIRECT_MAP_4K = 0,
28 PG_DIRECT_MAP_1M,
29 PG_DIRECT_MAP_2G,
30 PG_DIRECT_MAP_MAX
31};
32
33extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
34
35static inline void update_page_count(int level, long count)
36{
37 if (IS_ENABLED(CONFIG_PROC_FS))
38 atomic_long_add(count, &direct_pages_count[level]);
39}
40
41struct seq_file;
42void arch_report_meminfo(struct seq_file *m);
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/*
45 * The S390 doesn't have any external MMU info: the kernel page
46 * tables contain all the necessary information.
47 */
Russell King4b3073e2009-12-18 16:40:18 +000048#define update_mmu_cache(vma, address, ptep) do { } while (0)
David Millerb113da62012-10-08 16:34:25 -070049#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51/*
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020052 * ZERO_PAGE is a global shared page that is always zero; used
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 * for zero-mapped memory areas etc..
54 */
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020055
56extern unsigned long empty_zero_page;
57extern unsigned long zero_page_mask;
58
59#define ZERO_PAGE(vaddr) \
60 (virt_to_page((void *)(empty_zero_page + \
61 (((unsigned long)(vaddr)) &zero_page_mask))))
Kirill A. Shutemov816422a2012-12-12 13:52:36 -080062#define __HAVE_COLOR_ZERO_PAGE
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020063
Linus Torvalds4f2e2902013-04-17 08:46:19 -070064/* TODO: s390 cannot support io_remap_pfn_range... */
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Kirill A. Shutemovd016bf72015-02-11 15:26:41 -080066#define FIRST_USER_ADDRESS 0UL
Hugh Dickinsd455a362005-04-19 13:29:23 -070067
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#define pte_ERROR(e) \
69 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
70#define pmd_ERROR(e) \
71 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020072#define pud_ERROR(e) \
73 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020074#define p4d_ERROR(e) \
75 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#define pgd_ERROR(e) \
77 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079/*
Martin Schwidefskya1c843b2015-04-22 13:55:59 +020080 * The vmalloc and module area will always be on the topmost area of the
81 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
Heiko Carstensc972cc62012-10-05 16:52:18 +020082 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
83 * modules will reside. That makes sure that inter module branches always
84 * happen without trampolines and in addition the placement within a 2GB frame
85 * is branch prediction unit friendly.
Heiko Carstens8b62bc92006-12-04 15:40:56 +010086 */
Heiko Carstens239a64252009-06-12 10:26:33 +020087extern unsigned long VMALLOC_START;
Martin Schwidefsky14045eb2011-12-27 11:27:07 +010088extern unsigned long VMALLOC_END;
89extern struct page *vmemmap;
Heiko Carstens239a64252009-06-12 10:26:33 +020090
Martin Schwidefsky14045eb2011-12-27 11:27:07 +010091#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +010092
Heiko Carstensc972cc62012-10-05 16:52:18 +020093extern unsigned long MODULES_VADDR;
94extern unsigned long MODULES_END;
95#define MODULES_VADDR MODULES_VADDR
96#define MODULES_END MODULES_END
97#define MODULES_LEN (1UL << 31)
Heiko Carstensc972cc62012-10-05 16:52:18 +020098
Heiko Carstensc9331462014-10-15 12:17:38 +020099static inline int is_module_addr(void *addr)
100{
Heiko Carstensc9331462014-10-15 12:17:38 +0200101 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
102 if (addr < (void *)MODULES_VADDR)
103 return 0;
104 if (addr > (void *)MODULES_END)
105 return 0;
Heiko Carstensc9331462014-10-15 12:17:38 +0200106 return 1;
107}
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 * A 64 bit pagetable entry of S390 has following format:
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100111 * | PFRA |0IPC| OS |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 * 0000000000111111111122222222223333333333444444444455555555556666
113 * 0123456789012345678901234567890123456789012345678901234567890123
114 *
115 * I Page-Invalid Bit: Page is not available for address-translation
116 * P Page-Protection Bit: Store access not possible for page
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100117 * C Change-bit override: HW is not required to set change bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 *
119 * A 64 bit segmenttable entry of S390 has following format:
120 * | P-table origin | TT
121 * 0000000000111111111122222222223333333333444444444455555555556666
122 * 0123456789012345678901234567890123456789012345678901234567890123
123 *
124 * I Segment-Invalid Bit: Segment is not available for address-translation
125 * C Common-Segment Bit: Segment is not private (PoP 3-30)
126 * P Page-Protection Bit: Store access not possible for page
127 * TT Type 00
128 *
129 * A 64 bit region table entry of S390 has following format:
130 * | S-table origin | TF TTTL
131 * 0000000000111111111122222222223333333333444444444455555555556666
132 * 0123456789012345678901234567890123456789012345678901234567890123
133 *
134 * I Segment-Invalid Bit: Segment is not available for address-translation
135 * TT Type 01
136 * TF
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200137 * TL Table length
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 *
139 * The 64 bit regiontable origin of S390 has following format:
140 * | region table origon | DTTL
141 * 0000000000111111111122222222223333333333444444444455555555556666
142 * 0123456789012345678901234567890123456789012345678901234567890123
143 *
144 * X Space-Switch event:
145 * G Segment-Invalid Bit:
146 * P Private-Space Bit:
147 * S Storage-Alteration:
148 * R Real space
149 * TL Table-Length:
150 *
151 * A storage key has the following format:
152 * | ACC |F|R|C|0|
153 * 0 3 4 5 6 7
154 * ACC: access key
155 * F : fetch protection bit
156 * R : referenced bit
157 * C : changed bit
158 */
159
160/* Hardware bits in the page table entry */
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100161#define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200162#define _PAGE_PROTECT 0x200 /* HW read-only bit */
Martin Schwidefsky83377482006-10-18 18:30:51 +0200163#define _PAGE_INVALID 0x400 /* HW invalid bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200164#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200165
166/* Software bits in the page table entry */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200167#define _PAGE_PRESENT 0x001 /* SW pte present bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200168#define _PAGE_YOUNG 0x004 /* SW pte young bit */
169#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200170#define _PAGE_READ 0x010 /* SW pte read bit */
171#define _PAGE_WRITE 0x020 /* SW pte write bit */
172#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200173#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200175#ifdef CONFIG_MEM_SOFT_DIRTY
176#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
177#else
178#define _PAGE_SOFT_DIRTY 0x000
179#endif
180
Nick Piggin138c9022008-07-08 11:31:06 +0200181/* Set of bits not changed in pte_modify */
Heiko Carstens6a5c1482014-09-22 08:50:51 +0200182#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200183 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Martin Schwidefsky83377482006-10-18 18:30:51 +0200185/*
Kirill A. Shutemov6e76d4b2015-02-10 14:11:04 -0800186 * handle_pte_fault uses pte_present and pte_none to find out the pte type
187 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
188 * distinguish present from not-present ptes. It is changed only with the page
189 * table lock held.
Martin Schwidefsky83377482006-10-18 18:30:51 +0200190 *
Martin Schwidefskye5098612013-07-23 20:57:57 +0200191 * The following table gives the different possible bit combinations for
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200192 * the pte hardware and software bits in the last 12 bits of a pte
193 * (. unassigned bit, x don't care, t swap type):
Martin Schwidefsky83377482006-10-18 18:30:51 +0200194 *
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200195 * 842100000000
196 * 000084210000
197 * 000000008421
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200198 * .IR.uswrdy.p
199 * empty .10.00000000
200 * swap .11..ttttt.0
201 * prot-none, clean, old .11.xx0000.1
202 * prot-none, clean, young .11.xx0001.1
Gerald Schaeferbc29b7a2016-07-18 14:35:13 +0200203 * prot-none, dirty, old .11.xx0010.1
204 * prot-none, dirty, young .11.xx0011.1
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200205 * read-only, clean, old .11.xx0100.1
206 * read-only, clean, young .01.xx0101.1
207 * read-only, dirty, old .11.xx0110.1
208 * read-only, dirty, young .01.xx0111.1
209 * read-write, clean, old .11.xx1100.1
210 * read-write, clean, young .01.xx1101.1
211 * read-write, dirty, old .10.xx1110.1
212 * read-write, dirty, young .00.xx1111.1
213 * HW-bits: R read-only, I invalid
214 * SW-bits: p present, y young, d dirty, r read, w write, s special,
215 * u unused, l large
Martin Schwidefskye5098612013-07-23 20:57:57 +0200216 *
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200217 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
218 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
219 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
Martin Schwidefsky83377482006-10-18 18:30:51 +0200220 */
221
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200222/* Bits in the segment/region table address-space-control-element */
Heiko Carstens8457d772017-06-14 08:57:24 +0200223#define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200224#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
225#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
226#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
227#define _ASCE_REAL_SPACE 0x20 /* real space control */
228#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
229#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
230#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
231#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
232#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
233#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
234
235/* Bits in the region table entry */
236#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200237#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100238#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100239#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200240#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200241#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
242#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
243#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
244#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
245#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
246
247#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200248#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200249#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200250#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200251#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200252#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200253
Heiko Carstens9e20b4d2016-05-10 10:34:47 +0200254#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200255#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
256#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
257#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
258#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
259#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
260
261#ifdef CONFIG_MEM_SOFT_DIRTY
262#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
263#else
264#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
265#endif
266
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200267#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
268#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200269
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200270/* Bits in the segment table entry */
Janosch Frank58b7e202018-07-13 11:28:20 +0100271#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
272#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
273#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
274#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
Heiko Carstensea815312013-03-21 12:50:39 +0100275#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
Heiko Carstens8457d772017-06-14 08:57:24 +0200276#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
277#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
278#define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200279#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200280
281#define _SEGMENT_ENTRY (0)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200282#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200283
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200284#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
285#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200286#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
Gerald Schaeferbc29b7a2016-07-18 14:35:13 +0200287#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
288#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200289
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200290#ifdef CONFIG_MEM_SOFT_DIRTY
291#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
292#else
293#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
294#endif
295
Heiko Carstensc67da7c2017-06-16 17:24:39 +0200296#define _CRST_ENTRIES 2048 /* number of region/segment table entries */
297#define _PAGE_ENTRIES 256 /* number of page table entries */
298
299#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
300#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
301
302#define _REGION1_SHIFT 53
303#define _REGION2_SHIFT 42
304#define _REGION3_SHIFT 31
305#define _SEGMENT_SHIFT 20
306
307#define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
308#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
309#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
310#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
311#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
312
313#define _REGION1_SIZE (1UL << _REGION1_SHIFT)
314#define _REGION2_SIZE (1UL << _REGION2_SHIFT)
315#define _REGION3_SIZE (1UL << _REGION3_SHIFT)
316#define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
317
318#define _REGION1_MASK (~(_REGION1_SIZE - 1))
319#define _REGION2_MASK (~(_REGION2_SIZE - 1))
320#define _REGION3_MASK (~(_REGION3_SIZE - 1))
321#define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
322
323#define PMD_SHIFT _SEGMENT_SHIFT
324#define PUD_SHIFT _REGION3_SHIFT
325#define P4D_SHIFT _REGION2_SHIFT
326#define PGDIR_SHIFT _REGION1_SHIFT
327
328#define PMD_SIZE _SEGMENT_SIZE
329#define PUD_SIZE _REGION3_SIZE
330#define P4D_SIZE _REGION2_SIZE
331#define PGDIR_SIZE _REGION1_SIZE
332
333#define PMD_MASK _SEGMENT_MASK
334#define PUD_MASK _REGION3_MASK
335#define P4D_MASK _REGION2_MASK
336#define PGDIR_MASK _REGION1_MASK
337
338#define PTRS_PER_PTE _PAGE_ENTRIES
339#define PTRS_PER_PMD _CRST_ENTRIES
340#define PTRS_PER_PUD _CRST_ENTRIES
341#define PTRS_PER_P4D _CRST_ENTRIES
342#define PTRS_PER_PGD _CRST_ENTRIES
343
Vasily Gorbik34377d32018-09-12 13:23:58 +0200344#define MAX_PTRS_PER_P4D PTRS_PER_P4D
345
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200346/*
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200347 * Segment table and region3 table entry encoding
348 * (R = read-only, I = invalid, y = young bit):
Gerald Schaeferbc29b7a2016-07-18 14:35:13 +0200349 * dy..R...I...wr
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200350 * prot-none, clean, old 00..1...1...00
351 * prot-none, clean, young 01..1...1...00
352 * prot-none, dirty, old 10..1...1...00
353 * prot-none, dirty, young 11..1...1...00
Gerald Schaeferbc29b7a2016-07-18 14:35:13 +0200354 * read-only, clean, old 00..1...1...01
355 * read-only, clean, young 01..1...0...01
356 * read-only, dirty, old 10..1...1...01
357 * read-only, dirty, young 11..1...0...01
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200358 * read-write, clean, old 00..1...1...11
359 * read-write, clean, young 01..1...0...11
360 * read-write, dirty, old 10..0...1...11
361 * read-write, dirty, young 11..0...0...11
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200362 * The segment table origin is used to distinguish empty (origin==0) from
363 * read-write, old segment table entries (origin!=0)
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200364 * HW-bits: R read-only, I invalid
365 * SW-bits: y young, d dirty, r read, w write
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200366 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200367
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200368/* Page status table bits for virtualization */
Martin Schwidefsky0d0dafc2013-05-17 14:41:33 +0200369#define PGSTE_ACC_BITS 0xf000000000000000UL
370#define PGSTE_FP_BIT 0x0800000000000000UL
371#define PGSTE_PCL_BIT 0x0080000000000000UL
372#define PGSTE_HR_BIT 0x0040000000000000UL
373#define PGSTE_HC_BIT 0x0020000000000000UL
374#define PGSTE_GR_BIT 0x0004000000000000UL
375#define PGSTE_GC_BIT 0x0002000000000000UL
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200376#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
377#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100378#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200379
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200380/* Guest Page State used for virtualization */
Claudio Imbrenda2d42f942017-04-20 10:03:45 +0200381#define _PGSTE_GPS_ZERO 0x0000000080000000UL
Martin Schwidefskycd774b92016-07-26 17:02:31 +0200382#define _PGSTE_GPS_NODAT 0x0000000040000000UL
Claudio Imbrenda2d42f942017-04-20 10:03:45 +0200383#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
384#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
385#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
386#define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
387#define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200388
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200389/*
390 * A user page table pointer has the space-switch-event bit, the
391 * private-space-control bit and the storage-alteration-event-control
392 * bit set. A kernel page table pointer doesn't need them.
393 */
394#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
395 _ASCE_ALT_EVENT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397/*
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200398 * Page protection definitions.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 */
Gerald Schaeferbc29b7a2016-07-18 14:35:13 +0200400#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100401#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
402 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
403#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200404 _PAGE_INVALID | _PAGE_PROTECT)
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100405#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
406 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
407#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200408 _PAGE_INVALID | _PAGE_PROTECT)
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200409
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200410#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100411 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200412#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100413 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200414#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100415 _PAGE_PROTECT | _PAGE_NOEXEC)
416#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
417 _PAGE_YOUNG | _PAGE_DIRTY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419/*
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200420 * On s390 the page table entry has an invalid bit and a read-only bit.
421 * Read permission implies execute permission and write permission
422 * implies read permission.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 */
424 /*xwr*/
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200425#define __P000 PAGE_NONE
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100426#define __P001 PAGE_RO
427#define __P010 PAGE_RO
428#define __P011 PAGE_RO
429#define __P100 PAGE_RX
430#define __P101 PAGE_RX
431#define __P110 PAGE_RX
432#define __P111 PAGE_RX
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200434#define __S000 PAGE_NONE
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100435#define __S001 PAGE_RO
436#define __S010 PAGE_RW
437#define __S011 PAGE_RW
438#define __S100 PAGE_RX
439#define __S101 PAGE_RX
440#define __S110 PAGE_RWX
441#define __S111 PAGE_RWX
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
Gerald Schaefer106c9922013-04-29 15:07:23 -0700443/*
444 * Segment entry (large page) protection definitions.
445 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200446#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
447 _SEGMENT_ENTRY_PROTECT)
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100448#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
449 _SEGMENT_ENTRY_READ | \
450 _SEGMENT_ENTRY_NOEXEC)
451#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200452 _SEGMENT_ENTRY_READ)
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100453#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
454 _SEGMENT_ENTRY_WRITE | \
455 _SEGMENT_ENTRY_NOEXEC)
456#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200457 _SEGMENT_ENTRY_WRITE)
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200458#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
459 _SEGMENT_ENTRY_LARGE | \
460 _SEGMENT_ENTRY_READ | \
461 _SEGMENT_ENTRY_WRITE | \
462 _SEGMENT_ENTRY_YOUNG | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100463 _SEGMENT_ENTRY_DIRTY | \
464 _SEGMENT_ENTRY_NOEXEC)
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200465#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
466 _SEGMENT_ENTRY_LARGE | \
467 _SEGMENT_ENTRY_READ | \
468 _SEGMENT_ENTRY_YOUNG | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100469 _SEGMENT_ENTRY_PROTECT | \
470 _SEGMENT_ENTRY_NOEXEC)
Vasily Gorbikd58106c2017-11-17 18:44:28 +0100471#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
472 _SEGMENT_ENTRY_LARGE | \
473 _SEGMENT_ENTRY_READ | \
474 _SEGMENT_ENTRY_WRITE | \
475 _SEGMENT_ENTRY_YOUNG | \
476 _SEGMENT_ENTRY_DIRTY)
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200477
478/*
479 * Region3 entry (large page) protection definitions.
480 */
481
482#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
483 _REGION3_ENTRY_LARGE | \
484 _REGION3_ENTRY_READ | \
485 _REGION3_ENTRY_WRITE | \
486 _REGION3_ENTRY_YOUNG | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100487 _REGION3_ENTRY_DIRTY | \
488 _REGION_ENTRY_NOEXEC)
Heiko Carstens2dffdcb2016-05-11 10:52:07 +0200489#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
490 _REGION3_ENTRY_LARGE | \
491 _REGION3_ENTRY_READ | \
492 _REGION3_ENTRY_YOUNG | \
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100493 _REGION_ENTRY_PROTECT | \
494 _REGION_ENTRY_NOEXEC)
Gerald Schaefer106c9922013-04-29 15:07:23 -0700495
Martin Schwidefskye12e4042018-10-15 11:09:16 +0200496static inline bool mm_p4d_folded(struct mm_struct *mm)
497{
498 return mm->context.asce_limit <= _REGION1_SIZE;
499}
500#define mm_p4d_folded(mm) mm_p4d_folded(mm)
501
502static inline bool mm_pud_folded(struct mm_struct *mm)
503{
504 return mm->context.asce_limit <= _REGION2_SIZE;
505}
506#define mm_pud_folded(mm) mm_pud_folded(mm)
507
508static inline bool mm_pmd_folded(struct mm_struct *mm)
509{
510 return mm->context.asce_limit <= _REGION3_SIZE;
511}
512#define mm_pmd_folded(mm) mm_pmd_folded(mm)
513
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200514static inline int mm_has_pgste(struct mm_struct *mm)
515{
516#ifdef CONFIG_PGSTE
517 if (unlikely(mm->context.has_pgste))
518 return 1;
519#endif
520 return 0;
521}
Dominik Dingel65eef3352014-01-14 15:02:11 +0100522
Martin Schwidefsky0b46e0a2015-04-15 13:23:26 +0200523static inline int mm_alloc_pgste(struct mm_struct *mm)
524{
525#ifdef CONFIG_PGSTE
526 if (unlikely(mm->context.alloc_pgste))
527 return 1;
528#endif
529 return 0;
530}
531
Dominik Dingel2faee8f2014-10-23 12:08:38 +0200532/*
533 * In the case that a guest uses storage keys
534 * faults should no longer be backed by zero pages
535 */
Christian Borntraegerfa41ba02017-08-24 12:55:08 +0200536#define mm_forbids_zeropage mm_has_pgste
Janosch Frank55531b72018-02-15 16:33:47 +0100537static inline int mm_uses_skeys(struct mm_struct *mm)
Dominik Dingel65eef3352014-01-14 15:02:11 +0100538{
539#ifdef CONFIG_PGSTE
Janosch Frank55531b72018-02-15 16:33:47 +0100540 if (mm->context.uses_skeys)
Dominik Dingel65eef3352014-01-14 15:02:11 +0100541 return 1;
542#endif
543 return 0;
544}
545
Heiko Carstens4ccccc52016-05-14 10:46:33 +0200546static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
547{
548 register unsigned long reg2 asm("2") = old;
549 register unsigned long reg3 asm("3") = new;
550 unsigned long address = (unsigned long)ptr | 1;
551
552 asm volatile(
553 " csp %0,%3"
554 : "+d" (reg2), "+m" (*ptr)
555 : "d" (reg3), "d" (address)
556 : "cc");
557}
558
Heiko Carstense8a97e42016-05-17 10:50:15 +0200559static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
560{
561 register unsigned long reg2 asm("2") = old;
562 register unsigned long reg3 asm("3") = new;
563 unsigned long address = (unsigned long)ptr | 1;
564
565 asm volatile(
566 " .insn rre,0xb98a0000,%0,%3"
567 : "+d" (reg2), "+m" (*ptr)
568 : "d" (reg3), "d" (address)
569 : "cc");
570}
571
572#define CRDTE_DTT_PAGE 0x00UL
573#define CRDTE_DTT_SEGMENT 0x10UL
574#define CRDTE_DTT_REGION3 0x14UL
575#define CRDTE_DTT_REGION2 0x18UL
576#define CRDTE_DTT_REGION1 0x1cUL
577
578static inline void crdte(unsigned long old, unsigned long new,
579 unsigned long table, unsigned long dtt,
580 unsigned long address, unsigned long asce)
581{
582 register unsigned long reg2 asm("2") = old;
583 register unsigned long reg3 asm("3") = new;
584 register unsigned long reg4 asm("4") = table | dtt;
585 register unsigned long reg5 asm("5") = address;
586
587 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
588 : "+d" (reg2)
589 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
590 : "memory", "cc");
591}
592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593/*
Heiko Carstenscc18b462017-05-20 11:43:26 +0200594 * pgd/p4d/pud/pmd/pte query functions
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 */
Heiko Carstenscc18b462017-05-20 11:43:26 +0200596static inline int pgd_folded(pgd_t pgd)
597{
598 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
599}
600
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100601static inline int pgd_present(pgd_t pgd)
602{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200603 if (pgd_folded(pgd))
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100604 return 1;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100605 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
606}
607
608static inline int pgd_none(pgd_t pgd)
609{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200610 if (pgd_folded(pgd))
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100611 return 0;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200612 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100613}
614
615static inline int pgd_bad(pgd_t pgd)
616{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100617 /*
618 * With dynamic page table levels the pgd can be a region table
619 * entry or a segment table entry. Check for the bit that are
620 * invalid for either table entry.
621 */
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100622 unsigned long mask =
Martin Schwidefskye5098612013-07-23 20:57:57 +0200623 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100624 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
625 return (pgd_val(pgd) & mask) != 0;
626}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200627
Vasily Gorbikd0e2eb02018-09-13 10:59:43 +0200628static inline unsigned long pgd_pfn(pgd_t pgd)
629{
630 unsigned long origin_mask;
631
632 origin_mask = _REGION_ENTRY_ORIGIN;
633 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
634}
635
Heiko Carstenscc18b462017-05-20 11:43:26 +0200636static inline int p4d_folded(p4d_t p4d)
637{
638 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
639}
640
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200641static inline int p4d_present(p4d_t p4d)
642{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200643 if (p4d_folded(p4d))
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200644 return 1;
645 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
646}
647
648static inline int p4d_none(p4d_t p4d)
649{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200650 if (p4d_folded(p4d))
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200651 return 0;
652 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
653}
654
655static inline unsigned long p4d_pfn(p4d_t p4d)
656{
657 unsigned long origin_mask;
658
659 origin_mask = _REGION_ENTRY_ORIGIN;
660 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
661}
662
Heiko Carstenscc18b462017-05-20 11:43:26 +0200663static inline int pud_folded(pud_t pud)
664{
665 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
666}
667
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200668static inline int pud_present(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200670 if (pud_folded(pud))
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100671 return 1;
Martin Schwidefsky0d017922007-12-17 16:25:48 +0100672 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673}
674
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200675static inline int pud_none(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676{
Heiko Carstenscc18b462017-05-20 11:43:26 +0200677 if (pud_folded(pud))
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100678 return 0;
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200679 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680}
681
Heiko Carstens18da2362012-10-08 09:18:26 +0200682static inline int pud_large(pud_t pud)
683{
684 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
685 return 0;
686 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
687}
688
Heiko Carstens9e20b4d2016-05-10 10:34:47 +0200689static inline unsigned long pud_pfn(pud_t pud)
690{
691 unsigned long origin_mask;
692
Heiko Carstensf96c6f72017-05-22 13:27:34 +0200693 origin_mask = _REGION_ENTRY_ORIGIN;
Heiko Carstens9e20b4d2016-05-10 10:34:47 +0200694 if (pud_large(pud))
695 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
696 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
697}
698
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200699static inline int pmd_large(pmd_t pmd)
700{
701 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
702}
703
704static inline int pmd_bad(pmd_t pmd)
705{
706 if (pmd_large(pmd))
707 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
708 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
709}
710
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200711static inline int pud_bad(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712{
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200713 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
714 return pmd_bad(__pmd(pud_val(pud)));
715 if (pud_large(pud))
716 return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
717 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718}
719
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200720static inline int p4d_bad(p4d_t p4d)
721{
722 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
723 return pud_bad(__pud(p4d_val(p4d)));
724 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
725}
726
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800727static inline int pmd_present(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728{
Dominik Dingel54397bb2016-04-27 11:43:07 +0200729 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730}
731
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800732static inline int pmd_none(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733{
Dominik Dingel54397bb2016-04-27 11:43:07 +0200734 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735}
736
Martin Schwidefsky7cded342015-05-13 14:33:22 +0200737static inline unsigned long pmd_pfn(pmd_t pmd)
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200738{
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200739 unsigned long origin_mask;
740
741 origin_mask = _SEGMENT_ENTRY_ORIGIN;
742 if (pmd_large(pmd))
743 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
744 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200745}
746
Dan Williamse4e40e02017-11-29 16:10:10 -0800747#define pmd_write pmd_write
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700748static inline int pmd_write(pmd_t pmd)
749{
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200750 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
751}
752
753static inline int pmd_dirty(pmd_t pmd)
754{
755 int dirty = 1;
756 if (pmd_large(pmd))
757 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
758 return dirty;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700759}
760
761static inline int pmd_young(pmd_t pmd)
762{
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200763 int young = 1;
764 if (pmd_large(pmd))
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200765 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200766 return young;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700767}
768
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800769static inline int pte_present(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200771 /* Bit pattern: (pte & 0x001) == 0x001 */
772 return (pte_val(pte) & _PAGE_PRESENT) != 0;
773}
774
775static inline int pte_none(pte_t pte)
776{
777 /* Bit pattern: pte == 0x400 */
778 return pte_val(pte) == _PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779}
780
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200781static inline int pte_swap(pte_t pte)
782{
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200783 /* Bit pattern: (pte & 0x201) == 0x200 */
784 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
785 == _PAGE_PROTECT;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200786}
787
Nick Piggin7e675132008-04-28 02:13:00 -0700788static inline int pte_special(pte_t pte)
789{
Nick Piggina08cb622008-04-28 02:13:03 -0700790 return (pte_val(pte) & _PAGE_SPECIAL);
Nick Piggin7e675132008-04-28 02:13:00 -0700791}
792
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200793#define __HAVE_ARCH_PTE_SAME
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200794static inline int pte_same(pte_t a, pte_t b)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100795{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200796 return pte_val(a) == pte_val(b);
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100797}
798
Martin Schwidefskyb54565b2014-09-23 14:01:34 +0200799#ifdef CONFIG_NUMA_BALANCING
800static inline int pte_protnone(pte_t pte)
801{
802 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
803}
804
805static inline int pmd_protnone(pmd_t pmd)
806{
807 /* pmd_large(pmd) implies pmd_present(pmd) */
808 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
809}
810#endif
811
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200812static inline int pte_soft_dirty(pte_t pte)
813{
814 return pte_val(pte) & _PAGE_SOFT_DIRTY;
815}
816#define pte_swp_soft_dirty pte_soft_dirty
817
818static inline pte_t pte_mksoft_dirty(pte_t pte)
819{
820 pte_val(pte) |= _PAGE_SOFT_DIRTY;
821 return pte;
822}
823#define pte_swp_mksoft_dirty pte_mksoft_dirty
824
825static inline pte_t pte_clear_soft_dirty(pte_t pte)
826{
827 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
828 return pte;
829}
830#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
831
832static inline int pmd_soft_dirty(pmd_t pmd)
833{
834 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
835}
836
837static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
838{
839 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
840 return pmd;
841}
842
843static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
844{
845 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
846 return pmd;
847}
848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849/*
850 * query functions pte_write/pte_dirty/pte_young only work if
851 * pte_present() is true. Undefined behaviour if not..
852 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800853static inline int pte_write(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200855 return (pte_val(pte) & _PAGE_WRITE) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856}
857
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800858static inline int pte_dirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200860 return (pte_val(pte) & _PAGE_DIRTY) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861}
862
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800863static inline int pte_young(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864{
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200865 return (pte_val(pte) & _PAGE_YOUNG) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866}
867
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200868#define __HAVE_ARCH_PTE_UNUSED
869static inline int pte_unused(pte_t pte)
870{
871 return pte_val(pte) & _PAGE_UNUSED;
872}
873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874/*
875 * pgd/pmd/pte modification functions
876 */
877
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200878static inline void pgd_clear(pgd_t *pgd)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100879{
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200880 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
881 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
882}
883
884static inline void p4d_clear(p4d_t *p4d)
885{
886 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
887 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888}
889
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100890static inline void pud_clear(pud_t *pud)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100891{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200892 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
893 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100894}
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100895
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200896static inline void pmd_clear(pmd_t *pmdp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897{
Dominik Dingel54397bb2016-04-27 11:43:07 +0200898 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899}
900
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800901static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200903 pte_val(*ptep) = _PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904}
905
906/*
907 * The following pte modification functions only work if
908 * pte_present() is true. Undefined behaviour if not..
909 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800910static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911{
Nick Piggin138c9022008-07-08 11:31:06 +0200912 pte_val(pte) &= _PAGE_CHG_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 pte_val(pte) |= pgprot_val(newprot);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200914 /*
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100915 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
916 * has the invalid bit set, clear it again for readable, young pages
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200917 */
918 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
919 pte_val(pte) &= ~_PAGE_INVALID;
920 /*
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100921 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
922 * protection bit set, clear it again for writable, dirty pages
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200923 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200924 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
925 pte_val(pte) &= ~_PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 return pte;
927}
928
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800929static inline pte_t pte_wrprotect(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200931 pte_val(pte) &= ~_PAGE_WRITE;
932 pte_val(pte) |= _PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 return pte;
934}
935
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800936static inline pte_t pte_mkwrite(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200938 pte_val(pte) |= _PAGE_WRITE;
939 if (pte_val(pte) & _PAGE_DIRTY)
940 pte_val(pte) &= ~_PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 return pte;
942}
943
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800944static inline pte_t pte_mkclean(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200946 pte_val(pte) &= ~_PAGE_DIRTY;
947 pte_val(pte) |= _PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 return pte;
949}
950
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800951static inline pte_t pte_mkdirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952{
Martin Schwidefsky5614dd92015-04-22 14:47:42 +0200953 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200954 if (pte_val(pte) & _PAGE_WRITE)
955 pte_val(pte) &= ~_PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 return pte;
957}
958
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800959static inline pte_t pte_mkold(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200961 pte_val(pte) &= ~_PAGE_YOUNG;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200962 pte_val(pte) |= _PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 return pte;
964}
965
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800966static inline pte_t pte_mkyoung(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967{
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200968 pte_val(pte) |= _PAGE_YOUNG;
969 if (pte_val(pte) & _PAGE_READ)
970 pte_val(pte) &= ~_PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 return pte;
972}
973
Nick Piggin7e675132008-04-28 02:13:00 -0700974static inline pte_t pte_mkspecial(pte_t pte)
975{
Nick Piggina08cb622008-04-28 02:13:03 -0700976 pte_val(pte) |= _PAGE_SPECIAL;
Nick Piggin7e675132008-04-28 02:13:00 -0700977 return pte;
978}
979
Heiko Carstens84afdce2010-10-25 16:10:36 +0200980#ifdef CONFIG_HUGETLB_PAGE
981static inline pte_t pte_mkhuge(pte_t pte)
982{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200983 pte_val(pte) |= _PAGE_LARGE;
Heiko Carstens84afdce2010-10-25 16:10:36 +0200984 return pte;
985}
986#endif
987
Martin Schwidefsky34eeaf32016-06-14 12:38:40 +0200988#define IPTE_GLOBAL 0
989#define IPTE_LOCAL 1
990
Martin Schwidefsky118bd312016-07-26 16:53:09 +0200991#define IPTE_NODAT 0x400
Martin Schwidefsky28c807e2016-07-26 16:00:22 +0200992#define IPTE_GUEST_ASCE 0x800
Martin Schwidefsky118bd312016-07-26 16:53:09 +0200993
994static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
Martin Schwidefsky28c807e2016-07-26 16:00:22 +0200995 unsigned long opt, unsigned long asce,
996 int local)
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200997{
Martin Schwidefsky53e857f2012-09-10 13:00:09 +0200998 unsigned long pto = (unsigned long) ptep;
999
Martin Schwidefsky118bd312016-07-26 16:53:09 +02001000 if (__builtin_constant_p(opt) && opt == 0) {
1001 /* Invalidation + TLB flush for the pte */
1002 asm volatile(
1003 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1004 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1005 [m4] "i" (local));
1006 return;
1007 }
1008
1009 /* Invalidate ptes with options + TLB flush of the ptes */
Martin Schwidefsky28c807e2016-07-26 16:00:22 +02001010 opt = opt | (asce & _ASCE_ORIGIN);
Martin Schwidefsky53e857f2012-09-10 13:00:09 +02001011 asm volatile(
Martin Schwidefsky118bd312016-07-26 16:53:09 +02001012 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1013 : [r2] "+a" (address), [r3] "+a" (opt)
1014 : [r1] "a" (pto), [m4] "i" (local) : "memory");
Martin Schwidefsky53e857f2012-09-10 13:00:09 +02001015}
1016
Martin Schwidefsky34eeaf32016-06-14 12:38:40 +02001017static inline void __ptep_ipte_range(unsigned long address, int nr,
1018 pte_t *ptep, int local)
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001019{
1020 unsigned long pto = (unsigned long) ptep;
1021
Martin Schwidefsky34eeaf32016-06-14 12:38:40 +02001022 /* Invalidate a range of ptes + TLB flush of the ptes */
Heiko Carstenscfb0b242014-09-23 21:29:20 +02001023 do {
1024 asm volatile(
Martin Schwidefsky34eeaf32016-06-14 12:38:40 +02001025 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1026 : [r2] "+a" (address), [r3] "+a" (nr)
1027 : [r1] "a" (pto), [m4] "i" (local) : "memory");
Heiko Carstenscfb0b242014-09-23 21:29:20 +02001028 } while (nr != 255);
1029}
1030
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001031/*
1032 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1033 * both clear the TLB for the unmapped pte. The reason is that
1034 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1035 * to modify an active pte. The sequence is
1036 * 1) ptep_get_and_clear
1037 * 2) set_pte_at
1038 * 3) flush_tlb_range
1039 * On s390 the tlb needs to get flushed with the modification of the pte
1040 * if the pte is active. The only way how this can be implemented is to
1041 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1042 * is a nop.
1043 */
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001044pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1045pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1046
1047#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1048static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1049 unsigned long addr, pte_t *ptep)
1050{
1051 pte_t pte = *ptep;
1052
1053 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1054 return pte_young(pte);
1055}
1056
1057#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1058static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1059 unsigned long address, pte_t *ptep)
1060{
1061 return ptep_test_and_clear_young(vma, address, ptep);
1062}
1063
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001064#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001065static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001066 unsigned long addr, pte_t *ptep)
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001067{
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001068 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001069}
1070
1071#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
Aneesh Kumar K.V0cbe3e22019-03-05 15:46:26 -08001072pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
Aneesh Kumar K.V04a86452019-03-05 15:46:29 -08001073void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1074 pte_t *, pte_t, pte_t);
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001075
1076#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
Martin Schwidefskyf0e47c22007-07-17 04:03:03 -07001077static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001078 unsigned long addr, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079{
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001080 return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081}
1082
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001083/*
1084 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1085 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1086 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1087 * cannot be accessed while the batched unmap is running. In this case
1088 * full==1 and a simple pte_clear is enough. See tlb.h.
1089 */
1090#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1091static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001092 unsigned long addr,
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001093 pte_t *ptep, int full)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094{
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001095 if (full) {
1096 pte_t pte = *ptep;
1097 *ptep = __pte(_PAGE_INVALID);
1098 return pte;
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001099 }
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001100 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101}
1102
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001103#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001104static inline void ptep_set_wrprotect(struct mm_struct *mm,
1105 unsigned long addr, pte_t *ptep)
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001106{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001107 pte_t pte = *ptep;
1108
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001109 if (pte_write(pte))
1110 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001111}
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001112
1113#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001114static inline int ptep_set_access_flags(struct vm_area_struct *vma,
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001115 unsigned long addr, pte_t *ptep,
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001116 pte_t entry, int dirty)
1117{
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001118 if (pte_same(*ptep, entry))
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001119 return 0;
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001120 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001121 return 1;
1122}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001124/*
1125 * Additional functions to handle KVM guest page tables
1126 */
1127void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1128 pte_t *ptep, pte_t entry);
1129void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001130void ptep_notify(struct mm_struct *mm, unsigned long addr,
1131 pte_t *ptep, unsigned long bits);
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01001132int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001133 pte_t *ptep, int prot, unsigned long bit);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001134void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1135 pte_t *ptep , int reset);
1136void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001137int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
David Hildenbranda9d23e72016-03-08 12:21:41 +01001138 pte_t *sptep, pte_t *tptep, pte_t pte);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001139void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001140
Janosch Frank0959e162018-07-17 13:21:22 +01001141bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1142 pte_t *ptep);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001143int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1144 unsigned char key, bool nq);
David Hildenbrand1824c722016-05-10 09:43:11 +02001145int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1146 unsigned char key, unsigned char *oldkey,
1147 bool nq, bool mr, bool mc);
David Hildenbranda7e19ab2016-05-10 09:50:21 +02001148int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
David Hildenbrand154c8c12016-05-09 11:22:34 +02001149int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1150 unsigned char *key);
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001151
Claudio Imbrenda2d42f942017-04-20 10:03:45 +02001152int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1153 unsigned long bits, unsigned long value);
1154int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1155int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1156 unsigned long *oldpte, unsigned long *oldpgste);
Janosch Frank6a376272018-07-13 11:28:22 +01001157void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1158void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1159void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1160void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
Claudio Imbrenda2d42f942017-04-20 10:03:45 +02001161
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001162/*
1163 * Certain architectures need to do special things when PTEs
1164 * within a page table are directly modified. Thus, the following
1165 * hook is made available.
1166 */
1167static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1168 pte_t *ptep, pte_t entry)
1169{
Martin Schwidefsky57d7f932016-03-22 10:54:24 +01001170 if (!MACHINE_HAS_NX)
1171 pte_val(entry) &= ~_PAGE_NOEXEC;
Christian Borntraegera8f60d12017-04-09 22:09:38 +02001172 if (pte_present(entry))
1173 pte_val(entry) &= ~_PAGE_UNUSED;
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001174 if (mm_has_pgste(mm))
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001175 ptep_set_pte_at(mm, addr, ptep, entry);
Martin Schwidefskyebde7652016-03-08 11:08:09 +01001176 else
1177 *ptep = entry;
1178}
1179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 * Conversion functions: convert a page and protection to a page entry,
1182 * and a page entry and page directory to the page they refer to.
1183 */
1184static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1185{
1186 pte_t __pte;
1187 pte_val(__pte) = physpage + pgprot_val(pgprot);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001188 return pte_mkyoung(__pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189}
1190
Heiko Carstens2dcea572006-09-29 01:58:41 -07001191static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1192{
Heiko Carstens0b2b6e1d2006-10-04 20:02:23 +02001193 unsigned long physpage = page_to_phys(page);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001194 pte_t __pte = mk_pte_phys(physpage, pgprot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
Martin Schwidefskye5098612013-07-23 20:57:57 +02001196 if (pte_write(__pte) && PageDirty(page))
1197 __pte = pte_mkdirty(__pte);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001198 return __pte;
Heiko Carstens2dcea572006-09-29 01:58:41 -07001199}
1200
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +02001202#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001203#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1204#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1205#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001207#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208#define pgd_offset_k(address) pgd_offset(&init_mm, address)
Vasily Gorbik42db5ed2017-11-17 14:29:13 +01001209#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001211#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1212#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +02001213#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001214#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001215
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +02001216static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001217{
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +02001218 p4d_t *p4d = (p4d_t *) pgd;
1219
1220 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
1221 p4d = (p4d_t *) pgd_deref(*pgd);
1222 return p4d + p4d_index(address);
1223}
1224
1225static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
1226{
1227 pud_t *pud = (pud_t *) p4d;
1228
1229 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1230 pud = (pud_t *) p4d_deref(*p4d);
1231 return pud + pud_index(address);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001232}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001233
1234static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1235{
Martin Schwidefsky6252d702008-02-09 18:24:37 +01001236 pmd_t *pmd = (pmd_t *) pud;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +02001237
Martin Schwidefsky6252d702008-02-09 18:24:37 +01001238 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1239 pmd = (pmd_t *) pud_deref(*pud);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001240 return pmd + pmd_index(address);
1241}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001243#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1244#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1245#define pte_page(x) pfn_to_page(pte_pfn(x))
1246
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001247#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
Gerald Schaeferd08de8e2016-07-04 14:47:01 +02001248#define pud_page(pud) pfn_to_page(pud_pfn(pud))
Vasily Gorbikd0e2eb02018-09-13 10:59:43 +02001249#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1250#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001251
1252/* Find an entry in the lowest level page table.. */
1253#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1254#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256#define pte_unmap(pte) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001258static inline pmd_t pmd_wrprotect(pmd_t pmd)
1259{
1260 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1261 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1262 return pmd;
1263}
1264
1265static inline pmd_t pmd_mkwrite(pmd_t pmd)
1266{
1267 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1268 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1269 return pmd;
1270 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1271 return pmd;
1272}
1273
1274static inline pmd_t pmd_mkclean(pmd_t pmd)
1275{
1276 if (pmd_large(pmd)) {
1277 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1278 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1279 }
1280 return pmd;
1281}
1282
1283static inline pmd_t pmd_mkdirty(pmd_t pmd)
1284{
1285 if (pmd_large(pmd)) {
Martin Schwidefsky5614dd92015-04-22 14:47:42 +02001286 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1287 _SEGMENT_ENTRY_SOFT_DIRTY;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001288 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1289 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1290 }
1291 return pmd;
1292}
1293
Heiko Carstens9e20b4d2016-05-10 10:34:47 +02001294static inline pud_t pud_wrprotect(pud_t pud)
1295{
1296 pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1297 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1298 return pud;
1299}
1300
1301static inline pud_t pud_mkwrite(pud_t pud)
1302{
1303 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1304 if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
1305 return pud;
1306 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1307 return pud;
1308}
1309
1310static inline pud_t pud_mkclean(pud_t pud)
1311{
1312 if (pud_large(pud)) {
1313 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1314 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1315 }
1316 return pud;
1317}
1318
1319static inline pud_t pud_mkdirty(pud_t pud)
1320{
1321 if (pud_large(pud)) {
1322 pud_val(pud) |= _REGION3_ENTRY_DIRTY |
1323 _REGION3_ENTRY_SOFT_DIRTY;
1324 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1325 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1326 }
1327 return pud;
1328}
1329
1330#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1331static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1332{
1333 /*
Martin Schwidefsky57d7f932016-03-22 10:54:24 +01001334 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1335 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
Heiko Carstens9e20b4d2016-05-10 10:34:47 +02001336 */
1337 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1338 return pgprot_val(SEGMENT_NONE);
Martin Schwidefsky57d7f932016-03-22 10:54:24 +01001339 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1340 return pgprot_val(SEGMENT_RO);
1341 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1342 return pgprot_val(SEGMENT_RX);
1343 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1344 return pgprot_val(SEGMENT_RW);
1345 return pgprot_val(SEGMENT_RWX);
Heiko Carstens9e20b4d2016-05-10 10:34:47 +02001346}
1347
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001348static inline pmd_t pmd_mkyoung(pmd_t pmd)
1349{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001350 if (pmd_large(pmd)) {
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001351 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001352 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1353 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001354 }
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001355 return pmd;
1356}
1357
1358static inline pmd_t pmd_mkold(pmd_t pmd)
1359{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001360 if (pmd_large(pmd)) {
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001361 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1362 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1363 }
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001364 return pmd;
1365}
1366
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001367static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1368{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001369 if (pmd_large(pmd)) {
1370 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1371 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
Kirill A. Shutemovfecffad2016-01-15 16:53:24 -08001372 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001373 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1374 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1375 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1376 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1377 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1378 return pmd;
1379 }
1380 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001381 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1382 return pmd;
1383}
1384
Gerald Schaefer106c9922013-04-29 15:07:23 -07001385static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001386{
Gerald Schaefer106c9922013-04-29 15:07:23 -07001387 pmd_t __pmd;
1388 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001389 return __pmd;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001390}
1391
Gerald Schaefer106c9922013-04-29 15:07:23 -07001392#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1393
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001394static inline void __pmdp_csp(pmd_t *pmdp)
1395{
Heiko Carstens4ccccc52016-05-14 10:46:33 +02001396 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1397 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001398}
1399
Martin Schwidefsky47e4d852016-06-14 12:41:35 +02001400#define IDTE_GLOBAL 0
1401#define IDTE_LOCAL 1
1402
Martin Schwidefsky118bd312016-07-26 16:53:09 +02001403#define IDTE_PTOA 0x0800
1404#define IDTE_NODAT 0x1000
Martin Schwidefsky28c807e2016-07-26 16:00:22 +02001405#define IDTE_GUEST_ASCE 0x2000
Martin Schwidefsky118bd312016-07-26 16:53:09 +02001406
1407static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
Martin Schwidefsky28c807e2016-07-26 16:00:22 +02001408 unsigned long opt, unsigned long asce,
1409 int local)
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001410{
1411 unsigned long sto;
1412
Martin Schwidefsky118bd312016-07-26 16:53:09 +02001413 sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
Martin Schwidefsky28c807e2016-07-26 16:00:22 +02001414 if (__builtin_constant_p(opt) && opt == 0) {
1415 /* flush without guest asce */
1416 asm volatile(
1417 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1418 : "+m" (*pmdp)
1419 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1420 [m4] "i" (local)
1421 : "cc" );
1422 } else {
1423 /* flush with guest asce */
1424 asm volatile(
1425 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1426 : "+m" (*pmdp)
1427 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1428 [r3] "a" (asce), [m4] "i" (local)
1429 : "cc" );
1430 }
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001431}
1432
Martin Schwidefsky118bd312016-07-26 16:53:09 +02001433static inline void __pudp_idte(unsigned long addr, pud_t *pudp,
Martin Schwidefsky28c807e2016-07-26 16:00:22 +02001434 unsigned long opt, unsigned long asce,
1435 int local)
Gerald Schaeferd08de8e2016-07-04 14:47:01 +02001436{
1437 unsigned long r3o;
1438
Martin Schwidefsky118bd312016-07-26 16:53:09 +02001439 r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
Gerald Schaeferd08de8e2016-07-04 14:47:01 +02001440 r3o |= _ASCE_TYPE_REGION3;
Martin Schwidefsky28c807e2016-07-26 16:00:22 +02001441 if (__builtin_constant_p(opt) && opt == 0) {
1442 /* flush without guest asce */
1443 asm volatile(
1444 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1445 : "+m" (*pudp)
1446 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1447 [m4] "i" (local)
1448 : "cc");
1449 } else {
1450 /* flush with guest asce */
1451 asm volatile(
1452 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1453 : "+m" (*pudp)
1454 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1455 [r3] "a" (asce), [m4] "i" (local)
1456 : "cc" );
1457 }
Gerald Schaeferd08de8e2016-07-04 14:47:01 +02001458}
1459
Martin Schwidefsky227be792016-03-08 11:09:25 +01001460pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1461pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
Gerald Schaeferd08de8e2016-07-04 14:47:01 +02001462pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001463
Gerald Schaefer106c9922013-04-29 15:07:23 -07001464#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1465
1466#define __HAVE_ARCH_PGTABLE_DEPOSIT
Martin Schwidefsky227be792016-03-08 11:09:25 +01001467void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1468 pgtable_t pgtable);
Gerald Schaefer106c9922013-04-29 15:07:23 -07001469
1470#define __HAVE_ARCH_PGTABLE_WITHDRAW
Martin Schwidefsky227be792016-03-08 11:09:25 +01001471pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1472
1473#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1474static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1475 unsigned long addr, pmd_t *pmdp,
1476 pmd_t entry, int dirty)
1477{
1478 VM_BUG_ON(addr & ~HPAGE_MASK);
1479
1480 entry = pmd_mkyoung(entry);
1481 if (dirty)
1482 entry = pmd_mkdirty(entry);
1483 if (pmd_val(*pmdp) == pmd_val(entry))
1484 return 0;
1485 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1486 return 1;
1487}
1488
1489#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1490static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1491 unsigned long addr, pmd_t *pmdp)
1492{
1493 pmd_t pmd = *pmdp;
1494
1495 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1496 return pmd_young(pmd);
1497}
1498
1499#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1500static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1501 unsigned long addr, pmd_t *pmdp)
1502{
1503 VM_BUG_ON(addr & ~HPAGE_MASK);
1504 return pmdp_test_and_clear_young(vma, addr, pmdp);
1505}
Gerald Schaefer106c9922013-04-29 15:07:23 -07001506
Gerald Schaefer106c9922013-04-29 15:07:23 -07001507static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1508 pmd_t *pmdp, pmd_t entry)
1509{
Martin Schwidefsky57d7f932016-03-22 10:54:24 +01001510 if (!MACHINE_HAS_NX)
1511 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
Gerald Schaefer106c9922013-04-29 15:07:23 -07001512 *pmdp = entry;
1513}
1514
1515static inline pmd_t pmd_mkhuge(pmd_t pmd)
1516{
1517 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001518 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1519 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001520 return pmd;
1521}
1522
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001523#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1524static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001525 unsigned long addr, pmd_t *pmdp)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001526{
Dominik Dingel54397bb2016-04-27 11:43:07 +02001527 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001528}
1529
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001530#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1531static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001532 unsigned long addr,
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001533 pmd_t *pmdp, int full)
Martin Schwidefskyfcbe08d62014-10-24 10:52:29 +02001534{
Martin Schwidefsky227be792016-03-08 11:09:25 +01001535 if (full) {
1536 pmd_t pmd = *pmdp;
Dominik Dingel54397bb2016-04-27 11:43:07 +02001537 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
Martin Schwidefsky227be792016-03-08 11:09:25 +01001538 return pmd;
1539 }
Dominik Dingel54397bb2016-04-27 11:43:07 +02001540 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
Martin Schwidefskyfcbe08d62014-10-24 10:52:29 +02001541}
1542
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001543#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1544static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001545 unsigned long addr, pmd_t *pmdp)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001546{
Martin Schwidefsky227be792016-03-08 11:09:25 +01001547 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001548}
1549
1550#define __HAVE_ARCH_PMDP_INVALIDATE
Martin Schwidefsky9c4563f2018-01-31 16:18:05 -08001551static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001552 unsigned long addr, pmd_t *pmdp)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001553{
Gerald Schaefer91c575b2017-09-18 16:10:35 +02001554 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1555
Martin Schwidefsky9c4563f2018-01-31 16:18:05 -08001556 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001557}
1558
Gerald Schaeferbe328652013-01-21 16:48:07 +01001559#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1560static inline void pmdp_set_wrprotect(struct mm_struct *mm,
Martin Schwidefsky227be792016-03-08 11:09:25 +01001561 unsigned long addr, pmd_t *pmdp)
Gerald Schaeferbe328652013-01-21 16:48:07 +01001562{
1563 pmd_t pmd = *pmdp;
1564
Martin Schwidefsky227be792016-03-08 11:09:25 +01001565 if (pmd_write(pmd))
1566 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
Gerald Schaeferbe328652013-01-21 16:48:07 +01001567}
1568
Aneesh Kumar K.Vf28b6ff2015-06-24 16:57:42 -07001569static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1570 unsigned long address,
1571 pmd_t *pmdp)
1572{
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001573 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
Aneesh Kumar K.Vf28b6ff2015-06-24 16:57:42 -07001574}
1575#define pmdp_collapse_flush pmdp_collapse_flush
1576
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001577#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1578#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1579
1580static inline int pmd_trans_huge(pmd_t pmd)
1581{
1582 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1583}
1584
Hugh Dickinsfd8cfd32016-05-19 17:13:00 -07001585#define has_transparent_hugepage has_transparent_hugepage
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001586static inline int has_transparent_hugepage(void)
1587{
Heiko Carstens466178f2017-02-13 15:11:15 +01001588 return MACHINE_HAS_EDAT1 ? 1 : 0;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001589}
Gerald Schaefer75077af2012-10-08 16:30:15 -07001590#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1591
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 * 64 bit swap entry format:
1594 * A page-table entry has some bits we have to treat in a special way.
Geert Uytterhoeven4e0a6412015-05-21 14:00:47 +02001595 * Bits 52 and bit 55 have to be zero, otherwise a specification
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 * exception will occur instead of a page translation exception. The
Geert Uytterhoeven4e0a6412015-05-21 14:00:47 +02001597 * specification exception has the bad habit not to store necessary
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 * information in the lowcore.
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001599 * Bits 54 and 63 are used to indicate the page type.
1600 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1601 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1602 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1603 * for the offset.
1604 * | offset |01100|type |00|
1605 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1606 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 */
Heiko Carstens5a79859a2015-02-12 13:08:27 +01001608
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001609#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1610#define __SWP_OFFSET_SHIFT 12
1611#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1612#define __SWP_TYPE_SHIFT 2
Heiko Carstens5a79859a2015-02-12 13:08:27 +01001613
Adrian Bunk4448aaf2005-11-08 21:34:42 -08001614static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615{
1616 pte_t pte;
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001617
1618 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1619 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1620 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 return pte;
1622}
1623
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001624static inline unsigned long __swp_type(swp_entry_t entry)
1625{
1626 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1627}
1628
1629static inline unsigned long __swp_offset(swp_entry_t entry)
1630{
1631 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1632}
1633
1634static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1635{
1636 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1637}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638
1639#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1640#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1641
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642#define kern_addr_valid(addr) (1)
1643
Heiko Carstens17f34582008-04-30 13:38:47 +02001644extern int vmem_add_mapping(unsigned long start, unsigned long size);
1645extern int vmem_remove_mapping(unsigned long start, unsigned long size);
Carsten Otte402b0862008-03-25 18:47:10 +01001646extern int s390_enable_sie(void);
Dominik Dingel3ac8e382014-10-23 12:09:17 +02001647extern int s390_enable_skey(void);
Dominik Dingela13cff32014-10-23 12:07:14 +02001648extern void s390_reset_cmma(struct mm_struct *mm);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001649
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +01001650/* s390 has a private copy of get unmapped area to deal with cache synonyms */
1651#define HAVE_ARCH_UNMAPPED_AREA
1652#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1653
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654/*
1655 * No page table caches to initialise
1656 */
Heiko Carstens765a0ca2013-03-23 10:29:01 +01001657static inline void pgtable_cache_init(void) { }
1658static inline void check_pgt_cache(void) { }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660#include <asm-generic/pgtable.h>
1661
1662#endif /* _S390_PAGE_H */