blob: b31c779cf58176ad3bf91ee816053cbcf40b3476 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef _S390_TLB_H
3#define _S390_TLB_H
4
5/*
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02006 * TLB flushing on s390 is complicated. The following requirement
7 * from the principles of operation is the most arduous:
8 *
9 * "A valid table entry must not be changed while it is attached
10 * to any CPU and may be used for translation by that CPU except to
11 * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
12 * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
13 * table entry, or (3) make a change by means of a COMPARE AND SWAP
14 * AND PURGE instruction that purges the TLB."
15 *
16 * The modification of a pte of an active mm struct therefore is
17 * a two step process: i) invalidate the pte, ii) store the new pte.
18 * This is true for the page protection bit as well.
19 * The only possible optimization is to flush at the beginning of
20 * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
21 *
22 * Pages used for the page tables is a different story. FIXME: more
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020024
25#include <linux/mm.h>
Heiko Carstensc84ca002011-01-31 11:30:06 +010026#include <linux/pagemap.h>
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020027#include <linux/swap.h>
28#include <asm/processor.h>
29#include <asm/pgalloc.h>
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020030#include <asm/tlbflush.h>
31
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020032struct mmu_gather {
33 struct mm_struct *mm;
Martin Schwidefsky36409f62011-06-06 14:14:41 +020034 struct mmu_table_batch *batch;
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020035 unsigned int fullmm;
Guenter Roeck215b28a2013-08-16 20:50:55 -070036 unsigned long start, end;
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020037};
38
Martin Schwidefsky36409f62011-06-06 14:14:41 +020039struct mmu_table_batch {
40 struct rcu_head rcu;
41 unsigned int nr;
42 void *tables[0];
43};
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020044
Martin Schwidefsky36409f62011-06-06 14:14:41 +020045#define MAX_TABLE_BATCH \
46 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
47
48extern void tlb_table_flush(struct mmu_gather *tlb);
49extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020050
Minchan Kim56236a52017-08-10 15:24:05 -070051static inline void
52arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
53 unsigned long start, unsigned long end)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020054{
Peter Zijlstra68f03922011-05-24 17:11:51 -070055 tlb->mm = mm;
Linus Torvalds2b047252013-08-15 11:42:25 -070056 tlb->start = start;
57 tlb->end = end;
58 tlb->fullmm = !(start | (end+1));
Martin Schwidefsky36409f62011-06-06 14:14:41 +020059 tlb->batch = NULL;
Peter Zijlstra68f03922011-05-24 17:11:51 -070060}
61
Linus Torvalds1cf35d42014-04-25 16:05:40 -070062static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
Peter Zijlstra68f03922011-05-24 17:11:51 -070063{
Martin Schwidefsky5c474a12013-08-16 13:31:40 +020064 __tlb_flush_mm_lazy(tlb->mm);
Linus Torvalds1cf35d42014-04-25 16:05:40 -070065}
66
67static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
68{
Martin Schwidefsky36409f62011-06-06 14:14:41 +020069 tlb_table_flush(tlb);
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020070}
71
Linus Torvalds1cf35d42014-04-25 16:05:40 -070072
73static inline void tlb_flush_mmu(struct mmu_gather *tlb)
74{
75 tlb_flush_mmu_tlbonly(tlb);
76 tlb_flush_mmu_free(tlb);
77}
78
Minchan Kim56236a52017-08-10 15:24:05 -070079static inline void
80arch_tlb_finish_mmu(struct mmu_gather *tlb,
Minchan Kim99baac22017-08-10 15:24:12 -070081 unsigned long start, unsigned long end, bool force)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020082{
Minchan Kim99baac22017-08-10 15:24:12 -070083 if (force) {
84 tlb->start = start;
85 tlb->end = end;
86 }
87
Martin Schwidefsky5c474a12013-08-16 13:31:40 +020088 tlb_flush_mmu(tlb);
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020089}
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91/*
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020092 * Release the page cache reference for a pte removed by
Peter Zijlstra68f03922011-05-24 17:11:51 -070093 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020094 * has already been freed, so just do free_page_and_swap_cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 */
Aneesh Kumar K.Ve9d55e12016-07-26 15:24:09 -070096static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
Peter Zijlstra68f03922011-05-24 17:11:51 -070097{
98 free_page_and_swap_cache(page);
Aneesh Kumar K.Ve9d55e12016-07-26 15:24:09 -070099 return false; /* avoid calling tlb_flush_mmu */
Peter Zijlstra68f03922011-05-24 17:11:51 -0700100}
101
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200102static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
103{
104 free_page_and_swap_cache(page);
105}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Aneesh Kumar K.Ve77b0852016-07-26 15:24:12 -0700107static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
108 struct page *page, int page_size)
109{
110 return __tlb_remove_page(tlb, page);
111}
112
Aneesh Kumar K.Ve77b0852016-07-26 15:24:12 -0700113static inline void tlb_remove_page_size(struct mmu_gather *tlb,
114 struct page *page, int page_size)
115{
116 return tlb_remove_page(tlb, page);
117}
118
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200119/*
120 * pte_free_tlb frees a pte table and clears the CRSTE for the
121 * page table from the tlb.
122 */
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000123static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
124 unsigned long address)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200125{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200126 page_table_free_rcu(tlb, (unsigned long *) pte, address);
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200127}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200129/*
130 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
131 * segment table entry from the tlb.
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100132 * If the mm uses a two level page table the single pmd is freed
133 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
134 * to avoid the double free of the pmd in this case.
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200135 */
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000136static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
137 unsigned long address)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200138{
Martin Schwidefskye12e4042018-10-15 11:09:16 +0200139 if (mm_pmd_folded(tlb->mm))
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100140 return;
Martin Schwidefsky9de45f72014-12-04 11:07:19 +0100141 pgtable_pmd_page_dtor(virt_to_page(pmd));
Martin Schwidefsky02a8f3a2014-04-03 13:54:59 +0200142 tlb_remove_table(tlb, pmd);
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200143}
144
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100145/*
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200146 * p4d_free_tlb frees a pud table and clears the CRSTE for the
147 * region second table entry from the tlb.
148 * If the mm uses a four level page table the single p4d is freed
149 * as the pgd. p4d_free_tlb checks the asce_limit against 8PB
150 * to avoid the double free of the p4d in this case.
151 */
152static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
153 unsigned long address)
154{
Martin Schwidefskye12e4042018-10-15 11:09:16 +0200155 if (mm_p4d_folded(tlb->mm))
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200156 return;
157 tlb_remove_table(tlb, p4d);
158}
159
160/*
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100161 * pud_free_tlb frees a pud table and clears the CRSTE for the
162 * region third table entry from the tlb.
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100163 * If the mm uses a three level page table the single pud is freed
164 * as the pgd. pud_free_tlb checks the asce_limit against 4TB
165 * to avoid the double free of the pud in this case.
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100166 */
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000167static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
168 unsigned long address)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100169{
Martin Schwidefskye12e4042018-10-15 11:09:16 +0200170 if (mm_pud_folded(tlb->mm))
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100171 return;
Martin Schwidefsky02a8f3a2014-04-03 13:54:59 +0200172 tlb_remove_table(tlb, pud);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100173}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200174
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200175#define tlb_start_vma(tlb, vma) do { } while (0)
176#define tlb_end_vma(tlb, vma) do { } while (0)
177#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700178#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200179#define tlb_migrate_finish(mm) do { } while (0)
Aneesh Kumar K.Vb528e4b2016-12-12 16:42:37 -0800180#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
181 tlb_remove_tlb_entry(tlb, ptep, address)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200182
Aneesh Kumar K.V07e32662016-12-12 16:42:40 -0800183#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
184static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
185 unsigned int page_size)
186{
187}
188
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200189#endif /* _S390_TLB_H */